mirror of https://github.com/milvus-io/milvus.git
Signed-off-by: kejiang <ke.jiang@zilliz.com> Signed-off-by: kejiang <ke.jiang@zilliz.com> Co-authored-by: kejiang <ke.jiang@zilliz.com>pull/18603/head
parent
111c608513
commit
6c3dbf0a07
|
@ -20,13 +20,12 @@ import (
|
|||
"context"
|
||||
"io"
|
||||
|
||||
rc "github.com/milvus-io/milvus/internal/distributed/rootcoord"
|
||||
"github.com/milvus-io/milvus/internal/log"
|
||||
"github.com/milvus-io/milvus/internal/proto/internalpb"
|
||||
"github.com/milvus-io/milvus/internal/util/dependency"
|
||||
|
||||
"github.com/opentracing/opentracing-go"
|
||||
|
||||
rc "github.com/milvus-io/milvus/internal/distributed/rootcoord"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// RootCoord implements RoodCoord grpc server
|
||||
|
@ -53,6 +52,7 @@ func NewRootCoord(ctx context.Context, factory dependency.Factory) (*RootCoord,
|
|||
// Run starts service
|
||||
func (rc *RootCoord) Run() error {
|
||||
if err := rc.svr.Run(); err != nil {
|
||||
log.Error("RootCoord starts error", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
log.Debug("RootCoord successfully started")
|
||||
|
|
|
@ -41,6 +41,7 @@ ignore:
|
|||
- "docs/.*"
|
||||
- "**/*.pb.go"
|
||||
- "**/*.proto"
|
||||
- "internal/metastore/db/dbmodel/mocks/.*"
|
||||
|
||||
|
||||
|
||||
|
|
|
@ -14,7 +14,7 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Related configuration of etcd, used to store Milvus metadata.
|
||||
# Related configuration of etcd, used to store Milvus metadata & service discovery.
|
||||
etcd:
|
||||
endpoints:
|
||||
- localhost:2379
|
||||
|
@ -47,6 +47,22 @@ etcd:
|
|||
# We recommend using version 1.2 and above
|
||||
tlsMinVersion: 1.3
|
||||
|
||||
# Default value: etcd
|
||||
# Valid values: [etcd, mysql]
|
||||
metastore:
|
||||
type: etcd
|
||||
|
||||
# Related configuration of mysql, used to store Milvus metadata.
|
||||
mysql:
|
||||
username: root
|
||||
password: 11111111
|
||||
address: localhost
|
||||
port: 3306
|
||||
dbName: milvus_meta
|
||||
driverName: mysql
|
||||
maxOpenConns: 20
|
||||
maxIdleConns: 5
|
||||
|
||||
# please adjust in embedded Milvus: /tmp/milvus/data/
|
||||
localStorage:
|
||||
path: /var/lib/milvus/data/
|
||||
|
@ -62,10 +78,10 @@ minio:
|
|||
rootPath: files # The root path where the message is stored in MinIO/S3
|
||||
# Whether to use AWS IAM role to access S3 instead of access/secret keys
|
||||
# For more infomation, refer to https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html
|
||||
useIAM: false
|
||||
# Custom endpoint for fetch IAM role credentials.
|
||||
useIAM: false
|
||||
# Custom endpoint for fetch IAM role credentials.
|
||||
# Leave it empty if you want to use AWS default endpoint
|
||||
iamEndpoint: ""
|
||||
iamEndpoint: ""
|
||||
|
||||
# Milvus supports three MQ: rocksmq(based on RockDB), Pulsar and Kafka, which should be reserved in config what you use.
|
||||
# There is a note about enabling priority if we config multiple mq in this file
|
||||
|
|
6
go.mod
6
go.mod
|
@ -5,6 +5,7 @@ go 1.18
|
|||
require (
|
||||
github.com/99designs/keyring v1.2.1 // indirect
|
||||
github.com/BurntSushi/toml v1.0.0
|
||||
github.com/DATA-DOG/go-sqlmock v1.5.0 // indirect
|
||||
github.com/HdrHistogram/hdrhistogram-go v1.0.1 // indirect
|
||||
github.com/StackExchange/wmi v1.2.1 // indirect
|
||||
github.com/antlr/antlr4/runtime/Go/antlr v0.0.0-20210826220005-b48c857c3a0e
|
||||
|
@ -24,6 +25,7 @@ require (
|
|||
github.com/google/btree v1.0.1
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0
|
||||
github.com/jarcoal/httpmock v1.0.8
|
||||
github.com/jinzhu/now v1.1.5 // indirect
|
||||
github.com/klauspost/compress v1.14.2
|
||||
github.com/lingdor/stackerror v0.0.0-20191119040541-976d8885ed76
|
||||
github.com/minio/minio-go/v7 v7.0.10
|
||||
|
@ -53,6 +55,8 @@ require (
|
|||
google.golang.org/grpc/examples v0.0.0-20220617181431-3e7b97febc7f
|
||||
google.golang.org/protobuf v1.28.0
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0
|
||||
gorm.io/driver/mysql v1.3.5 // indirect
|
||||
gorm.io/gorm v1.23.8 // indirect
|
||||
stathat.com/c/consistent v1.0.0
|
||||
)
|
||||
|
||||
|
@ -90,6 +94,7 @@ require (
|
|||
github.com/go-playground/locales v0.13.0 // indirect
|
||||
github.com/go-playground/universal-translator v0.17.0 // indirect
|
||||
github.com/go-playground/validator/v10 v10.4.1 // indirect
|
||||
github.com/go-sql-driver/mysql v1.6.0 // indirect
|
||||
github.com/goccy/go-json v0.7.10 // indirect
|
||||
github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 // indirect
|
||||
github.com/godbus/dbus/v5 v5.0.4 // indirect
|
||||
|
@ -102,6 +107,7 @@ require (
|
|||
github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c // indirect
|
||||
github.com/hashicorp/hcl v1.0.0 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.0.0 // indirect
|
||||
github.com/jinzhu/inflection v1.0.0 // indirect
|
||||
github.com/jonboulle/clockwork v0.2.2 // indirect
|
||||
github.com/json-iterator/go v1.1.11 // indirect
|
||||
github.com/klauspost/asmfmt v1.3.1 // indirect
|
||||
|
|
15
go.sum
15
go.sum
|
@ -53,6 +53,8 @@ github.com/BurntSushi/toml v1.0.0 h1:dtDWrepsVPfW9H/4y7dDgFc2MBUSeJhlaDtK13CxFlU
|
|||
github.com/BurntSushi/toml v1.0.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM=
|
||||
github.com/DATA-DOG/go-sqlmock v1.5.0 h1:Shsta01QNfFxHCfpW6YH2STWB0MudeXXEWMr20OEh60=
|
||||
github.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM=
|
||||
github.com/DataDog/zstd v1.4.6-0.20210211175136-c6db21d202f4 h1:++HGU87uq9UsSTlFeiOV9uZR3NpYkndUXeYyLv2DTc8=
|
||||
github.com/DataDog/zstd v1.4.6-0.20210211175136-c6db21d202f4/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw=
|
||||
github.com/HdrHistogram/hdrhistogram-go v1.0.1 h1:GX8GAYDuhlFQnI2fRDHQhTlkHMz8bEn0jTI6LJU0mpw=
|
||||
|
@ -246,6 +248,8 @@ github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+
|
|||
github.com/go-playground/validator/v10 v10.4.1 h1:pH2c5ADXtd66mxoE0Zm9SUhxE20r7aM3F26W0hOn+GE=
|
||||
github.com/go-playground/validator/v10 v10.4.1/go.mod h1:nlOn6nFhuKACm19sB/8EGNn9GlaMV7XkbRSipzJ0Ii4=
|
||||
github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
|
||||
github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE=
|
||||
github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/goccy/go-json v0.7.10 h1:ulhbuNe1JqE68nMRXXTJRrUu0uhouf0VevLINxQq4Ec=
|
||||
github.com/goccy/go-json v0.7.10/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
|
||||
|
@ -398,6 +402,11 @@ github.com/jhump/gopoet v0.1.0/go.mod h1:me9yfT6IJSlOL3FCfrg+L6yzUEZ+5jW6WHt4Sk+
|
|||
github.com/jhump/goprotoc v0.5.0/go.mod h1:VrbvcYrQOrTi3i0Vf+m+oqQWk9l72mjkJCYo7UvLHRQ=
|
||||
github.com/jhump/protoreflect v1.11.0/go.mod h1:U7aMIjN0NWq9swDP7xDdoMfRHb35uiuTd3Z9nFXJf5E=
|
||||
github.com/jhump/protoreflect v1.12.0/go.mod h1:JytZfP5d0r8pVNLZvai7U/MCuTWITgrI4tTg7puQFKI=
|
||||
github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E=
|
||||
github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
|
||||
github.com/jinzhu/now v1.1.4/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8=
|
||||
github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ=
|
||||
github.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8=
|
||||
github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik=
|
||||
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
|
||||
github.com/jonboulle/clockwork v0.2.2 h1:UOGuzwb1PwsrDAObMuhUnj0p5ULPj8V/xJ7Kx9qUBdQ=
|
||||
|
@ -451,6 +460,7 @@ github.com/kris-nova/lolgopher v0.0.0-20180921204813-313b3abb0d9b h1:xYEM2oBUhBE
|
|||
github.com/kris-nova/lolgopher v0.0.0-20180921204813-313b3abb0d9b/go.mod h1:V0HF/ZBlN86HqewcDC/cVxMmYDiRukWjSrgKLUAn9Js=
|
||||
github.com/leodido/go-urn v1.2.0 h1:hpXL4XnriNwQ/ABnpepYM/1vCLWNDfUNts8dX3xTG6Y=
|
||||
github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII=
|
||||
github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/lingdor/stackerror v0.0.0-20191119040541-976d8885ed76 h1:IVlcvV0CjvfBYYod5ePe89l+3LBAl//6n9kJ9Vr2i0k=
|
||||
github.com/lingdor/stackerror v0.0.0-20191119040541-976d8885ed76/go.mod h1:Iu9BHUvTh8/KpbuSoKx/CaJEdJvFxSverxIy7I+nq7s=
|
||||
github.com/linkedin/goavro v2.1.0+incompatible/go.mod h1:bBCwI2eGYpUI/4820s67MElg9tdeLbINjLjiM2xZFYM=
|
||||
|
@ -473,6 +483,7 @@ github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Ky
|
|||
github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
|
||||
github.com/mattn/go-runewidth v0.0.8 h1:3tS41NlGYSmhhe/8fhGRzc+z3AYCw1Fe1WAyLuujKs0=
|
||||
github.com/mattn/go-runewidth v0.0.8/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
|
||||
github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
|
||||
|
@ -1209,6 +1220,10 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C
|
|||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gorm.io/driver/mysql v1.3.5 h1:iWBTVW/8Ij5AG4e0G/zqzaJblYkBI1VIL1LG2HUGsvY=
|
||||
gorm.io/driver/mysql v1.3.5/go.mod h1:sSIebwZAVPiT+27jK9HIwvsqOGKx3YMPmrA3mBJR10c=
|
||||
gorm.io/gorm v1.23.8 h1:h8sGJ+biDgBA1AD1Ha9gFCx7h8npU7AsLdlkX0n2TpE=
|
||||
gorm.io/gorm v1.23.8/go.mod h1:l2lP/RyAtc1ynaTjFksBde/O8v9oOGIApu2/xRitmZk=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
|
|
|
@ -23,7 +23,7 @@ type Catalog interface {
|
|||
CreateIndex(ctx context.Context, col *model.Collection, index *model.Index) error
|
||||
// AlterIndex newIndex only contains updated parts
|
||||
AlterIndex(ctx context.Context, oldIndex *model.Index, newIndex *model.Index, alterType AlterType) error
|
||||
DropIndex(ctx context.Context, collectionInfo *model.Collection, dropIdxID typeutil.UniqueID, ts typeutil.Timestamp) error
|
||||
DropIndex(ctx context.Context, collectionInfo *model.Collection, dropIdxID typeutil.UniqueID) error
|
||||
ListIndexes(ctx context.Context) ([]*model.Index, error)
|
||||
|
||||
CreateAlias(ctx context.Context, alias *model.Alias, ts typeutil.Timestamp) error
|
||||
|
|
|
@ -0,0 +1,93 @@
|
|||
package dao
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/milvus-io/milvus/internal/log"
|
||||
"github.com/milvus-io/milvus/internal/metastore/db/dbmodel"
|
||||
"github.com/milvus-io/milvus/internal/util/typeutil"
|
||||
"go.uber.org/zap"
|
||||
"gorm.io/gorm"
|
||||
"gorm.io/gorm/clause"
|
||||
)
|
||||
|
||||
type collectionDb struct {
|
||||
db *gorm.DB
|
||||
}
|
||||
|
||||
func (s *collectionDb) GetCollectionIDTs(tenantID string, collectionID typeutil.UniqueID, ts typeutil.Timestamp) (*dbmodel.Collection, error) {
|
||||
var col dbmodel.Collection
|
||||
|
||||
err := s.db.Model(&dbmodel.Collection{}).Select("collection_id, ts").Where("tenant_id = ? AND collection_id = ? AND ts <= ?", tenantID, collectionID, ts).Order("ts desc").Take(&col).Error
|
||||
|
||||
if errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
log.Warn("record not found", zap.Int64("collID", collectionID), zap.Uint64("ts", ts), zap.Error(err))
|
||||
return nil, fmt.Errorf("record not found, collID=%d, ts=%d", collectionID, ts)
|
||||
}
|
||||
if err != nil {
|
||||
log.Error("get collection ts failed", zap.String("tenant", tenantID), zap.Int64("collID", collectionID), zap.Uint64("ts", ts), zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &col, nil
|
||||
}
|
||||
|
||||
func (s *collectionDb) ListCollectionIDTs(tenantID string, ts typeutil.Timestamp) ([]*dbmodel.Collection, error) {
|
||||
var r []*dbmodel.Collection
|
||||
|
||||
err := s.db.Model(&dbmodel.Collection{}).Select("collection_id, MAX(ts) ts").Where("tenant_id = ? AND ts <= ?", tenantID, ts).Group("collection_id").Find(&r).Error
|
||||
if err != nil {
|
||||
log.Error("list collection_id & latest ts pairs in collections failed", zap.String("tenant", tenantID), zap.Uint64("ts", ts), zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return r, nil
|
||||
}
|
||||
|
||||
func (s *collectionDb) Get(tenantID string, collectionID typeutil.UniqueID, ts typeutil.Timestamp) (*dbmodel.Collection, error) {
|
||||
var r dbmodel.Collection
|
||||
|
||||
err := s.db.Model(&dbmodel.Collection{}).Where("tenant_id = ? AND collection_id = ? AND ts = ? AND is_deleted = false", tenantID, collectionID, ts).Take(&r).Error
|
||||
|
||||
if errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
return nil, fmt.Errorf("collection not found, collID=%d, ts=%d", collectionID, ts)
|
||||
}
|
||||
if err != nil {
|
||||
log.Error("get collection by collection_id and ts failed", zap.String("tenant", tenantID), zap.Int64("collID", collectionID), zap.Uint64("ts", ts), zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &r, nil
|
||||
}
|
||||
|
||||
func (s *collectionDb) GetCollectionIDByName(tenantID string, collectionName string, ts typeutil.Timestamp) (typeutil.UniqueID, error) {
|
||||
var r dbmodel.Collection
|
||||
|
||||
err := s.db.Model(&dbmodel.Collection{}).Select("collection_id").Where("tenant_id = ? AND collection_name = ? AND ts <= ?", tenantID, collectionName, ts).Order("ts desc").Take(&r).Error
|
||||
|
||||
if errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
return 0, fmt.Errorf("get collection_id by collection_name not found, collName=%s, ts=%d", collectionName, ts)
|
||||
}
|
||||
if err != nil {
|
||||
log.Error("get collection_id by collection_name failed", zap.String("tenant", tenantID), zap.String("collName", collectionName), zap.Uint64("ts", ts), zap.Error(err))
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return r.CollectionID, nil
|
||||
}
|
||||
|
||||
// Insert used in create & drop collection, needs be an idempotent operation, so we use DoNothing strategy here so it will not throw exception for retry, equivalent to kv catalog
|
||||
func (s *collectionDb) Insert(in *dbmodel.Collection) error {
|
||||
err := s.db.Clauses(clause.OnConflict{
|
||||
// constraint UNIQUE (tenant_id, collection_id, ts)
|
||||
DoNothing: true,
|
||||
}).Create(&in).Error
|
||||
|
||||
if err != nil {
|
||||
log.Error("insert collection failed", zap.String("tenant", in.TenantID), zap.Int64("collID", in.CollectionID), zap.Uint64("ts", in.Ts), zap.Error(err))
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,78 @@
|
|||
package dao
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/milvus-io/milvus/internal/log"
|
||||
"github.com/milvus-io/milvus/internal/metastore/db/dbmodel"
|
||||
"github.com/milvus-io/milvus/internal/util/typeutil"
|
||||
"go.uber.org/zap"
|
||||
"gorm.io/gorm"
|
||||
"gorm.io/gorm/clause"
|
||||
)
|
||||
|
||||
type collAliasDb struct {
|
||||
db *gorm.DB
|
||||
}
|
||||
|
||||
func (s *collAliasDb) Insert(in []*dbmodel.CollectionAlias) error {
|
||||
err := s.db.Clauses(clause.OnConflict{
|
||||
// constraint UNIQUE (tenant_id, collection_alias, ts)
|
||||
DoNothing: true,
|
||||
}).Create(&in).Error
|
||||
|
||||
if err != nil {
|
||||
log.Error("insert collection alias failed", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *collAliasDb) GetCollectionIDByAlias(tenantID string, alias string, ts typeutil.Timestamp) (typeutil.UniqueID, error) {
|
||||
var r dbmodel.CollectionAlias
|
||||
|
||||
err := s.db.Model(&dbmodel.CollectionAlias{}).Select("collection_id").Where("tenant_id = ? AND collection_alias = ? AND ts <= ?", tenantID, alias, ts).Order("ts desc").Take(&r).Error
|
||||
|
||||
if errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
return 0, fmt.Errorf("get collection_id by alias not found, alias=%s, ts=%d", alias, ts)
|
||||
}
|
||||
if err != nil {
|
||||
log.Error("get collection_id by alias failed", zap.String("tenant", tenantID), zap.String("alias", alias), zap.Uint64("ts", ts), zap.Error(err))
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return r.CollectionID, nil
|
||||
}
|
||||
|
||||
func (s *collAliasDb) ListCollectionIDTs(tenantID string, ts typeutil.Timestamp) ([]*dbmodel.CollectionAlias, error) {
|
||||
var r []*dbmodel.CollectionAlias
|
||||
|
||||
err := s.db.Model(&dbmodel.CollectionAlias{}).Select("collection_id, MAX(ts) ts").Where("tenant_id = ? AND ts <= ?", tenantID, ts).Group("collection_id").Find(&r).Error
|
||||
if err != nil {
|
||||
log.Error("list collection_id & latest ts pairs in collection_aliases failed", zap.String("tenant", tenantID), zap.Uint64("ts", ts), zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return r, nil
|
||||
}
|
||||
|
||||
func (s *collAliasDb) List(tenantID string, cidTsPairs []*dbmodel.CollectionAlias) ([]*dbmodel.CollectionAlias, error) {
|
||||
var collAliases []*dbmodel.CollectionAlias
|
||||
|
||||
inValues := make([][]interface{}, 0, len(cidTsPairs))
|
||||
for _, pair := range cidTsPairs {
|
||||
in := []interface{}{pair.CollectionID, pair.Ts}
|
||||
inValues = append(inValues, in)
|
||||
}
|
||||
|
||||
err := s.db.Model(&dbmodel.CollectionAlias{}).Select("collection_id, collection_alias").
|
||||
Where("tenant_id = ? AND is_deleted = false AND (collection_id, ts) IN ?", tenantID, inValues).Find(&collAliases).Error
|
||||
if err != nil {
|
||||
log.Error("list alias by collection_id and alias pairs failed", zap.String("tenant", tenantID), zap.Any("collIdTs", inValues), zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return collAliases, nil
|
||||
}
|
|
@ -0,0 +1,204 @@
|
|||
package dao
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/DATA-DOG/go-sqlmock"
|
||||
"github.com/milvus-io/milvus/internal/metastore/db/dbmodel"
|
||||
"github.com/milvus-io/milvus/internal/util/typeutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
func TestCollectionAlias_Insert(t *testing.T) {
|
||||
var collAliases = []*dbmodel.CollectionAlias{
|
||||
{
|
||||
TenantID: "",
|
||||
CollectionID: collID1,
|
||||
CollectionAlias: "test_alias_1",
|
||||
Ts: ts,
|
||||
IsDeleted: false,
|
||||
CreatedAt: time.Now(),
|
||||
UpdatedAt: time.Now(),
|
||||
},
|
||||
}
|
||||
|
||||
// expectation
|
||||
mock.ExpectBegin()
|
||||
mock.ExpectExec("INSERT INTO `collection_aliases` (`tenant_id`,`collection_id`,`collection_alias`,`ts`,`is_deleted`,`created_at`,`updated_at`) VALUES (?,?,?,?,?,?,?) ON DUPLICATE KEY UPDATE `id`=`id`").
|
||||
WithArgs(collAliases[0].TenantID, collAliases[0].CollectionID, collAliases[0].CollectionAlias, collAliases[0].Ts, collAliases[0].IsDeleted, collAliases[0].CreatedAt, collAliases[0].UpdatedAt).
|
||||
WillReturnResult(sqlmock.NewResult(100, 2))
|
||||
mock.ExpectCommit()
|
||||
|
||||
// actual
|
||||
err := aliasTestDb.Insert(collAliases)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
func TestCollectionAlias_Insert_Error(t *testing.T) {
|
||||
var collAliases = []*dbmodel.CollectionAlias{
|
||||
{
|
||||
TenantID: "",
|
||||
CollectionID: collID1,
|
||||
CollectionAlias: "test_alias_1",
|
||||
Ts: ts,
|
||||
IsDeleted: false,
|
||||
CreatedAt: time.Now(),
|
||||
UpdatedAt: time.Now(),
|
||||
},
|
||||
}
|
||||
|
||||
// expectation
|
||||
mock.ExpectBegin()
|
||||
mock.ExpectExec("INSERT INTO `collection_aliases` (`tenant_id`,`collection_id`,`collection_alias`,`ts`,`is_deleted`,`created_at`,`updated_at`) VALUES (?,?,?,?,?,?,?) ON DUPLICATE KEY UPDATE `id`=`id`").
|
||||
WithArgs(collAliases[0].TenantID, collAliases[0].CollectionID, collAliases[0].CollectionAlias, collAliases[0].Ts, collAliases[0].IsDeleted, collAliases[0].CreatedAt, collAliases[0].UpdatedAt).
|
||||
WillReturnError(errors.New("test error"))
|
||||
mock.ExpectRollback()
|
||||
|
||||
// actual
|
||||
err := aliasTestDb.Insert(collAliases)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestCollectionAlias_GetCollectionIDByName(t *testing.T) {
|
||||
alias := "test_alias_name_1"
|
||||
|
||||
// expectation
|
||||
mock.ExpectQuery("SELECT `collection_id` FROM `collection_aliases` WHERE tenant_id = ? AND collection_alias = ? AND ts <= ? ORDER BY ts desc LIMIT 1").
|
||||
WithArgs(tenantID, alias, ts).
|
||||
WillReturnRows(
|
||||
sqlmock.NewRows([]string{"collection_id"}).
|
||||
AddRow(collID1))
|
||||
|
||||
// actual
|
||||
res, err := aliasTestDb.GetCollectionIDByAlias(tenantID, alias, ts)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, collID1, res)
|
||||
}
|
||||
|
||||
func TestCollectionAlias_GetCollectionIDByName_Error(t *testing.T) {
|
||||
alias := "test_alias_name_1"
|
||||
|
||||
// expectation
|
||||
mock.ExpectQuery("SELECT `collection_id` FROM `collection_aliases` WHERE tenant_id = ? AND collection_alias = ? AND ts <= ? ORDER BY ts desc LIMIT 1").
|
||||
WithArgs(tenantID, alias, ts).
|
||||
WillReturnError(errors.New("test error"))
|
||||
|
||||
// actual
|
||||
res, err := aliasTestDb.GetCollectionIDByAlias(tenantID, alias, ts)
|
||||
assert.Equal(t, typeutil.UniqueID(0), res)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestCollectionAlias_GetCollectionIDByName_ErrRecordNotFound(t *testing.T) {
|
||||
alias := "test_alias_name_1"
|
||||
|
||||
// expectation
|
||||
mock.ExpectQuery("SELECT `collection_id` FROM `collection_aliases` WHERE tenant_id = ? AND collection_alias = ? AND ts <= ? ORDER BY ts desc LIMIT 1").
|
||||
WithArgs(tenantID, alias, ts).
|
||||
WillReturnError(gorm.ErrRecordNotFound)
|
||||
|
||||
// actual
|
||||
res, err := aliasTestDb.GetCollectionIDByAlias(tenantID, alias, ts)
|
||||
assert.Equal(t, typeutil.UniqueID(0), res)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestCollectionAlias_ListCidTs(t *testing.T) {
|
||||
var collAliases = []*dbmodel.CollectionAlias{
|
||||
{
|
||||
CollectionID: collID1,
|
||||
Ts: typeutil.Timestamp(2),
|
||||
},
|
||||
{
|
||||
CollectionID: collID2,
|
||||
Ts: typeutil.Timestamp(5),
|
||||
},
|
||||
}
|
||||
|
||||
// expectation
|
||||
mock.ExpectQuery("SELECT collection_id, MAX(ts) ts FROM `collection_aliases` WHERE tenant_id = ? AND ts <= ? GROUP BY `collection_id`").
|
||||
WithArgs(tenantID, ts).
|
||||
WillReturnRows(
|
||||
sqlmock.NewRows([]string{"collection_id", "ts"}).
|
||||
AddRow(collID1, typeutil.Timestamp(2)).
|
||||
AddRow(collID2, typeutil.Timestamp(5)))
|
||||
|
||||
// actual
|
||||
res, err := aliasTestDb.ListCollectionIDTs(tenantID, ts)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, collAliases, res)
|
||||
}
|
||||
|
||||
func TestCollectionAlias_ListCidTs_Error(t *testing.T) {
|
||||
// expectation
|
||||
mock.ExpectQuery("SELECT collection_id, MAX(ts) ts FROM `collection_aliases` WHERE tenant_id = ? AND ts <= ? GROUP BY `collection_id`").
|
||||
WithArgs(tenantID, ts).
|
||||
WillReturnError(errors.New("test error"))
|
||||
|
||||
// actual
|
||||
res, err := aliasTestDb.ListCollectionIDTs(tenantID, ts)
|
||||
assert.Nil(t, res)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestCollectionAlias_List(t *testing.T) {
|
||||
var cidTsPairs = []*dbmodel.CollectionAlias{
|
||||
{
|
||||
CollectionID: collID1,
|
||||
Ts: typeutil.Timestamp(2),
|
||||
},
|
||||
{
|
||||
CollectionID: collID2,
|
||||
Ts: typeutil.Timestamp(5),
|
||||
},
|
||||
}
|
||||
var out = []*dbmodel.CollectionAlias{
|
||||
{
|
||||
CollectionID: collID1,
|
||||
CollectionAlias: "test_alias_1",
|
||||
},
|
||||
{
|
||||
CollectionID: collID2,
|
||||
CollectionAlias: "test_alias_2",
|
||||
},
|
||||
}
|
||||
|
||||
// expectation
|
||||
mock.ExpectQuery("SELECT collection_id, collection_alias FROM `collection_aliases` WHERE tenant_id = ? AND is_deleted = false AND (collection_id, ts) IN ((?,?),(?,?))").
|
||||
WithArgs(tenantID, cidTsPairs[0].CollectionID, cidTsPairs[0].Ts, cidTsPairs[1].CollectionID, cidTsPairs[1].Ts).
|
||||
WillReturnRows(
|
||||
sqlmock.NewRows([]string{"collection_id", "collection_alias"}).
|
||||
AddRow(collID1, "test_alias_1").
|
||||
AddRow(collID2, "test_alias_2"))
|
||||
|
||||
// actual
|
||||
res, err := aliasTestDb.List(tenantID, cidTsPairs)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, out, res)
|
||||
}
|
||||
|
||||
func TestCollectionAlias_List_Error(t *testing.T) {
|
||||
var cidTsPairs = []*dbmodel.CollectionAlias{
|
||||
{
|
||||
CollectionID: collID1,
|
||||
Ts: typeutil.Timestamp(2),
|
||||
},
|
||||
{
|
||||
CollectionID: collID2,
|
||||
Ts: typeutil.Timestamp(5),
|
||||
},
|
||||
}
|
||||
|
||||
// expectation
|
||||
mock.ExpectQuery("SELECT collection_id, collection_alias FROM `collection_aliases` WHERE tenant_id = ? AND is_deleted = false AND (collection_id, ts) IN ((?,?),(?,?))").
|
||||
WithArgs(tenantID, cidTsPairs[0].CollectionID, cidTsPairs[0].Ts, cidTsPairs[1].CollectionID, cidTsPairs[1].Ts).
|
||||
WillReturnError(errors.New("test error"))
|
||||
|
||||
// actual
|
||||
res, err := aliasTestDb.List(tenantID, cidTsPairs)
|
||||
assert.Nil(t, res)
|
||||
assert.Error(t, err)
|
||||
}
|
|
@ -0,0 +1,35 @@
|
|||
package dao
|
||||
|
||||
import (
|
||||
"github.com/milvus-io/milvus/internal/log"
|
||||
"github.com/milvus-io/milvus/internal/metastore/db/dbmodel"
|
||||
"github.com/milvus-io/milvus/internal/util/typeutil"
|
||||
"go.uber.org/zap"
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
type collChannelDb struct {
|
||||
db *gorm.DB
|
||||
}
|
||||
|
||||
func (s *collChannelDb) GetByCollectionID(tenantID string, collectionID typeutil.UniqueID, ts typeutil.Timestamp) ([]*dbmodel.CollectionChannel, error) {
|
||||
var r []*dbmodel.CollectionChannel
|
||||
|
||||
err := s.db.Model(&dbmodel.CollectionChannel{}).Where("tenant_id = ? AND collection_id = ? AND ts = ? AND is_deleted = false", tenantID, collectionID, ts).Find(&r).Error
|
||||
if err != nil {
|
||||
log.Error("get channels by collection_id and ts failed", zap.String("tenant", tenantID), zap.Int64("collID", collectionID), zap.Uint64("ts", ts), zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return r, nil
|
||||
}
|
||||
|
||||
func (s *collChannelDb) Insert(in []*dbmodel.CollectionChannel) error {
|
||||
err := s.db.CreateInBatches(in, 100).Error
|
||||
if err != nil {
|
||||
log.Error("insert channel failed", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,105 @@
|
|||
package dao
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/DATA-DOG/go-sqlmock"
|
||||
"github.com/milvus-io/milvus/internal/metastore/db/dbmodel"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestCollectionChannel_GetByCollID(t *testing.T) {
|
||||
var collChannels = []*dbmodel.CollectionChannel{
|
||||
{
|
||||
TenantID: tenantID,
|
||||
CollectionID: collID1,
|
||||
VirtualChannelName: "test_virtual_channel_1",
|
||||
PhysicalChannelName: "test_physical_channel_1",
|
||||
Removed: false,
|
||||
Ts: ts,
|
||||
IsDeleted: false,
|
||||
CreatedAt: time.Now(),
|
||||
UpdatedAt: time.Now(),
|
||||
},
|
||||
}
|
||||
|
||||
// expectation
|
||||
mock.ExpectQuery("SELECT * FROM `collection_channels` WHERE tenant_id = ? AND collection_id = ? AND ts = ? AND is_deleted = false").
|
||||
WithArgs(tenantID, collID1, ts).
|
||||
WillReturnRows(
|
||||
sqlmock.NewRows([]string{"tenant_id", "collection_id", "virtual_channel_name", "physical_channel_name", "removed", "ts", "is_deleted", "created_at", "updated_at"}).
|
||||
AddRow(collChannels[0].TenantID, collChannels[0].CollectionID, collChannels[0].VirtualChannelName, collChannels[0].PhysicalChannelName, collChannels[0].Removed, collChannels[0].Ts, collChannels[0].IsDeleted, collChannels[0].CreatedAt, collChannels[0].UpdatedAt))
|
||||
|
||||
// actual
|
||||
res, err := channelTestDb.GetByCollectionID(tenantID, collID1, ts)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, collChannels, res)
|
||||
}
|
||||
|
||||
func TestCollectionChannel_GetByCollID_Error(t *testing.T) {
|
||||
// expectation
|
||||
mock.ExpectQuery("SELECT * FROM `collection_channels` WHERE tenant_id = ? AND collection_id = ? AND ts = ? AND is_deleted = false").
|
||||
WithArgs(tenantID, collID1, ts).
|
||||
WillReturnError(errors.New("test error"))
|
||||
|
||||
// actual
|
||||
res, err := channelTestDb.GetByCollectionID(tenantID, collID1, ts)
|
||||
assert.Nil(t, res)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestCollectionChannel_Insert(t *testing.T) {
|
||||
var collChannels = []*dbmodel.CollectionChannel{
|
||||
{
|
||||
TenantID: "",
|
||||
CollectionID: collID1,
|
||||
VirtualChannelName: "test_virtual_channel_1",
|
||||
PhysicalChannelName: "test_physical_channel_1",
|
||||
Removed: false,
|
||||
Ts: ts,
|
||||
IsDeleted: false,
|
||||
CreatedAt: time.Now(),
|
||||
UpdatedAt: time.Now(),
|
||||
},
|
||||
}
|
||||
|
||||
// expectation
|
||||
mock.ExpectBegin()
|
||||
mock.ExpectExec("INSERT INTO `collection_channels` (`tenant_id`,`collection_id`,`virtual_channel_name`,`physical_channel_name`,`removed`,`ts`,`is_deleted`,`created_at`,`updated_at`) VALUES (?,?,?,?,?,?,?,?,?)").
|
||||
WithArgs(collChannels[0].TenantID, collChannels[0].CollectionID, collChannels[0].VirtualChannelName, collChannels[0].PhysicalChannelName, collChannels[0].Removed, collChannels[0].Ts, collChannels[0].IsDeleted, collChannels[0].CreatedAt, collChannels[0].UpdatedAt).
|
||||
WillReturnResult(sqlmock.NewResult(1, 1))
|
||||
mock.ExpectCommit()
|
||||
|
||||
// actual
|
||||
err := channelTestDb.Insert(collChannels)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
func TestCollectionChannel_Insert_Error(t *testing.T) {
|
||||
var collChannels = []*dbmodel.CollectionChannel{
|
||||
{
|
||||
TenantID: "",
|
||||
CollectionID: collID1,
|
||||
VirtualChannelName: "test_virtual_channel_1",
|
||||
PhysicalChannelName: "test_physical_channel_1",
|
||||
Removed: false,
|
||||
Ts: ts,
|
||||
IsDeleted: false,
|
||||
CreatedAt: time.Now(),
|
||||
UpdatedAt: time.Now(),
|
||||
},
|
||||
}
|
||||
|
||||
// expectation
|
||||
mock.ExpectBegin()
|
||||
mock.ExpectExec("INSERT INTO `collection_channels` (`tenant_id`,`collection_id`,`virtual_channel_name`,`physical_channel_name`,`removed`,`ts`,`is_deleted`,`created_at`,`updated_at`) VALUES (?,?,?,?,?,?,?,?,?)").
|
||||
WithArgs(collChannels[0].TenantID, collChannels[0].CollectionID, collChannels[0].VirtualChannelName, collChannels[0].PhysicalChannelName, collChannels[0].Removed, collChannels[0].Ts, collChannels[0].IsDeleted, collChannels[0].CreatedAt, collChannels[0].UpdatedAt).
|
||||
WillReturnError(errors.New("test error"))
|
||||
mock.ExpectRollback()
|
||||
|
||||
// actual
|
||||
err := channelTestDb.Insert(collChannels)
|
||||
assert.Error(t, err)
|
||||
}
|
|
@ -0,0 +1,394 @@
|
|||
package dao
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"database/sql/driver"
|
||||
"errors"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/DATA-DOG/go-sqlmock"
|
||||
"github.com/milvus-io/milvus/internal/metastore/db/dbcore"
|
||||
"github.com/milvus-io/milvus/internal/metastore/db/dbmodel"
|
||||
"github.com/milvus-io/milvus/internal/proto/commonpb"
|
||||
"github.com/milvus-io/milvus/internal/util/typeutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"gorm.io/driver/mysql"
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
const (
|
||||
tenantID = "test_tenant"
|
||||
noTs = typeutil.Timestamp(0)
|
||||
ts = typeutil.Timestamp(10)
|
||||
collID1 = typeutil.UniqueID(101)
|
||||
collID2 = typeutil.UniqueID(102)
|
||||
fieldID1 = typeutil.UniqueID(501)
|
||||
indexID1 = typeutil.UniqueID(1001)
|
||||
indexID2 = typeutil.UniqueID(1002)
|
||||
segmentID1 = typeutil.UniqueID(2001)
|
||||
segmentID2 = typeutil.UniqueID(2002)
|
||||
partitionID1 = typeutil.UniqueID(3001)
|
||||
indexBuildID1 = typeutil.UniqueID(5001)
|
||||
)
|
||||
|
||||
var (
|
||||
mock sqlmock.Sqlmock
|
||||
collTestDb dbmodel.ICollectionDb
|
||||
aliasTestDb dbmodel.ICollAliasDb
|
||||
channelTestDb dbmodel.ICollChannelDb
|
||||
fieldTestDb dbmodel.IFieldDb
|
||||
partitionTestDb dbmodel.IPartitionDb
|
||||
indexTestDb dbmodel.IIndexDb
|
||||
segIndexTestDb dbmodel.ISegmentIndexDb
|
||||
userTestDb dbmodel.IUserDb
|
||||
)
|
||||
|
||||
// TestMain is the first function executed in current package, we will do some initial here
|
||||
func TestMain(m *testing.M) {
|
||||
var (
|
||||
db *sql.DB
|
||||
err error
|
||||
)
|
||||
|
||||
// setting sql MUST exact match
|
||||
db, mock, err = sqlmock.New(sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
DB, err := gorm.Open(mysql.New(mysql.Config{
|
||||
Conn: db,
|
||||
SkipInitializeWithVersion: true,
|
||||
}), &gorm.Config{})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// set mocked database
|
||||
dbcore.SetGlobalDB(DB)
|
||||
|
||||
collTestDb = NewMetaDomain().CollectionDb(context.TODO())
|
||||
aliasTestDb = NewMetaDomain().CollAliasDb(context.TODO())
|
||||
channelTestDb = NewMetaDomain().CollChannelDb(context.TODO())
|
||||
fieldTestDb = NewMetaDomain().FieldDb(context.TODO())
|
||||
partitionTestDb = NewMetaDomain().PartitionDb(context.TODO())
|
||||
indexTestDb = NewMetaDomain().IndexDb(context.TODO())
|
||||
segIndexTestDb = NewMetaDomain().SegmentIndexDb(context.TODO())
|
||||
userTestDb = NewMetaDomain().UserDb(context.TODO())
|
||||
|
||||
// m.Run entry for executing tests
|
||||
os.Exit(m.Run())
|
||||
}
|
||||
|
||||
// Notice: sql must be exactly matched, we can use debug() to print the sql
|
||||
|
||||
func TestCollection_GetCidTs_Ts0(t *testing.T) {
|
||||
var collection = &dbmodel.Collection{
|
||||
CollectionID: collID1,
|
||||
Ts: noTs,
|
||||
}
|
||||
|
||||
// expectation
|
||||
mock.ExpectQuery("SELECT collection_id, ts FROM `collections` WHERE tenant_id = ? AND collection_id = ? AND ts <= ? ORDER BY ts desc LIMIT 1").
|
||||
WithArgs(tenantID, collID1, noTs).
|
||||
WillReturnRows(
|
||||
sqlmock.NewRows([]string{"collection_id", "ts"}).
|
||||
AddRow(collID1, noTs))
|
||||
|
||||
// actual
|
||||
res, err := collTestDb.GetCollectionIDTs(tenantID, collID1, noTs)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, collection, res)
|
||||
}
|
||||
|
||||
func TestCollection_GetCidTs_TsNot0(t *testing.T) {
|
||||
resultTs := typeutil.Timestamp(2)
|
||||
var collection = &dbmodel.Collection{
|
||||
CollectionID: collID1,
|
||||
Ts: resultTs,
|
||||
}
|
||||
|
||||
// expectation
|
||||
mock.ExpectQuery("SELECT collection_id, ts FROM `collections` WHERE tenant_id = ? AND collection_id = ? AND ts <= ? ORDER BY ts desc LIMIT 1").
|
||||
WithArgs(tenantID, collID1, ts).
|
||||
WillReturnRows(
|
||||
sqlmock.NewRows([]string{"collection_id", "ts"}).
|
||||
AddRow(collID1, resultTs))
|
||||
|
||||
// actual
|
||||
res, err := collTestDb.GetCollectionIDTs(tenantID, collID1, ts)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, collection, res)
|
||||
}
|
||||
|
||||
func TestCollection_GetCidTs_TsNot0_Error(t *testing.T) {
|
||||
// expectation
|
||||
mock.ExpectQuery("SELECT collection_id, ts FROM `collections` WHERE tenant_id = ? AND collection_id = ? AND ts <= ? ORDER BY ts desc LIMIT 1").
|
||||
WithArgs(tenantID, collID1, ts).
|
||||
WillReturnError(errors.New("test error"))
|
||||
|
||||
// actual
|
||||
res, err := collTestDb.GetCollectionIDTs(tenantID, collID1, ts)
|
||||
assert.Nil(t, res)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestCollection_GetCidTs_TsNot0_ErrRecordNotFound(t *testing.T) {
|
||||
// expectation
|
||||
mock.ExpectQuery("SELECT collection_id, ts FROM `collections` WHERE tenant_id = ? AND collection_id = ? AND ts <= ? ORDER BY ts desc LIMIT 1").
|
||||
WithArgs(tenantID, collID1, ts).
|
||||
WillReturnError(gorm.ErrRecordNotFound)
|
||||
|
||||
// actual
|
||||
res, err := collTestDb.GetCollectionIDTs(tenantID, collID1, ts)
|
||||
assert.Nil(t, res)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestCollection_ListCidTs_TsNot0(t *testing.T) {
|
||||
var collection = []*dbmodel.Collection{
|
||||
{
|
||||
CollectionID: collID1,
|
||||
Ts: typeutil.Timestamp(2),
|
||||
},
|
||||
{
|
||||
CollectionID: collID2,
|
||||
Ts: typeutil.Timestamp(5),
|
||||
},
|
||||
}
|
||||
|
||||
// expectation
|
||||
mock.ExpectQuery("SELECT collection_id, MAX(ts) ts FROM `collections` WHERE tenant_id = ? AND ts <= ? GROUP BY `collection_id`").
|
||||
WithArgs(tenantID, ts).
|
||||
WillReturnRows(
|
||||
sqlmock.NewRows([]string{"collection_id", "ts"}).
|
||||
AddRow(collID1, typeutil.Timestamp(2)).
|
||||
AddRow(collID2, typeutil.Timestamp(5)))
|
||||
|
||||
// actual
|
||||
res, err := collTestDb.ListCollectionIDTs(tenantID, ts)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, collection, res)
|
||||
}
|
||||
|
||||
func TestCollection_ListCidTs_TsNot0_Error(t *testing.T) {
|
||||
// expectation
|
||||
mock.ExpectQuery("SELECT collection_id, MAX(ts) ts FROM `collections` WHERE tenant_id = ? AND ts <= ? GROUP BY `collection_id`").
|
||||
WithArgs(tenantID, ts).
|
||||
WillReturnError(errors.New("test error"))
|
||||
|
||||
// actual
|
||||
res, err := collTestDb.ListCollectionIDTs(tenantID, ts)
|
||||
assert.Nil(t, res)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestCollection_ListCidTs_Ts0(t *testing.T) {
|
||||
var collection = []*dbmodel.Collection{
|
||||
{
|
||||
CollectionID: collID1,
|
||||
Ts: noTs,
|
||||
},
|
||||
{
|
||||
CollectionID: collID2,
|
||||
Ts: noTs,
|
||||
},
|
||||
}
|
||||
|
||||
// expectation
|
||||
mock.ExpectQuery("SELECT collection_id, MAX(ts) ts FROM `collections` WHERE tenant_id = ? AND ts <= ? GROUP BY `collection_id`").
|
||||
WithArgs(tenantID, noTs).
|
||||
WillReturnRows(
|
||||
sqlmock.NewRows([]string{"collection_id", "ts"}).
|
||||
AddRow(collID1, noTs).
|
||||
AddRow(collID2, noTs))
|
||||
|
||||
// actual
|
||||
res, err := collTestDb.ListCollectionIDTs(tenantID, noTs)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, collection, res)
|
||||
}
|
||||
|
||||
func TestCollection_Get(t *testing.T) {
|
||||
var collection = &dbmodel.Collection{
|
||||
TenantID: "",
|
||||
CollectionID: collID1,
|
||||
CollectionName: "test_collection_name_1",
|
||||
Description: "",
|
||||
AutoID: false,
|
||||
ShardsNum: int32(2),
|
||||
StartPosition: "",
|
||||
ConsistencyLevel: int32(commonpb.ConsistencyLevel_Eventually),
|
||||
Ts: ts,
|
||||
}
|
||||
|
||||
// expectation
|
||||
mock.ExpectQuery("SELECT * FROM `collections` WHERE tenant_id = ? AND collection_id = ? AND ts = ? AND is_deleted = false LIMIT 1").
|
||||
WithArgs(tenantID, collection.CollectionID, collection.Ts).
|
||||
WillReturnRows(
|
||||
sqlmock.NewRows([]string{"tenant_id", "collection_id", "collection_name", "description", "auto_id", "shards_num", "start_position", "consistency_level", "ts"}).
|
||||
AddRow(collection.TenantID, collection.CollectionID, collection.CollectionName, collection.Description, collection.AutoID, collection.ShardsNum, collection.StartPosition, collection.ConsistencyLevel, collection.Ts))
|
||||
|
||||
// actual
|
||||
res, err := collTestDb.Get(tenantID, collID1, ts)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, collection, res)
|
||||
}
|
||||
|
||||
func TestCollection_Get_Error(t *testing.T) {
|
||||
var collection = &dbmodel.Collection{
|
||||
TenantID: "",
|
||||
CollectionID: collID1,
|
||||
CollectionName: "test_collection_name_1",
|
||||
Description: "",
|
||||
AutoID: false,
|
||||
ShardsNum: int32(2),
|
||||
StartPosition: "",
|
||||
ConsistencyLevel: int32(commonpb.ConsistencyLevel_Eventually),
|
||||
Ts: ts,
|
||||
}
|
||||
|
||||
// expectation
|
||||
mock.ExpectQuery("SELECT * FROM `collections` WHERE tenant_id = ? AND collection_id = ? AND ts = ? AND is_deleted = false LIMIT 1").
|
||||
WithArgs(tenantID, collection.CollectionID, collection.Ts).
|
||||
WillReturnError(errors.New("test error"))
|
||||
|
||||
// actual
|
||||
res, err := collTestDb.Get(tenantID, collID1, ts)
|
||||
assert.Nil(t, res)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestCollection_Get_ErrRecordNotFound(t *testing.T) {
|
||||
var collection = &dbmodel.Collection{
|
||||
TenantID: "",
|
||||
CollectionID: collID1,
|
||||
CollectionName: "test_collection_name_1",
|
||||
Description: "",
|
||||
AutoID: false,
|
||||
ShardsNum: int32(2),
|
||||
StartPosition: "",
|
||||
ConsistencyLevel: int32(commonpb.ConsistencyLevel_Eventually),
|
||||
Ts: ts,
|
||||
}
|
||||
|
||||
// expectation
|
||||
mock.ExpectQuery("SELECT * FROM `collections` WHERE tenant_id = ? AND collection_id = ? AND ts = ? AND is_deleted = false LIMIT 1").
|
||||
WithArgs(tenantID, collection.CollectionID, collection.Ts).
|
||||
WillReturnError(gorm.ErrRecordNotFound)
|
||||
|
||||
// actual
|
||||
res, err := collTestDb.Get(tenantID, collID1, ts)
|
||||
assert.Nil(t, res)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestCollection_GetCollectionIDByName(t *testing.T) {
|
||||
collectionName := "test_collection_name_1"
|
||||
|
||||
// expectation
|
||||
mock.ExpectQuery("SELECT `collection_id` FROM `collections` WHERE tenant_id = ? AND collection_name = ? AND ts <= ? ORDER BY ts desc LIMIT 1").
|
||||
WithArgs(tenantID, collectionName, ts).
|
||||
WillReturnRows(
|
||||
sqlmock.NewRows([]string{"collection_id"}).
|
||||
AddRow(collID1))
|
||||
|
||||
// actual
|
||||
res, err := collTestDb.GetCollectionIDByName(tenantID, collectionName, ts)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, collID1, res)
|
||||
}
|
||||
|
||||
func TestCollection_GetCollectionIDByName_Error(t *testing.T) {
|
||||
collectionName := "test_collection_name_1"
|
||||
|
||||
// expectation
|
||||
mock.ExpectQuery("SELECT `collection_id` FROM `collections` WHERE tenant_id = ? AND collection_name = ? AND ts <= ? ORDER BY ts desc LIMIT 1").
|
||||
WithArgs(tenantID, collectionName, ts).
|
||||
WillReturnError(errors.New("test error"))
|
||||
|
||||
// actual
|
||||
res, err := collTestDb.GetCollectionIDByName(tenantID, collectionName, ts)
|
||||
assert.Equal(t, typeutil.UniqueID(0), res)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestCollection_GetCollectionIDByName_ErrRecordNotFound(t *testing.T) {
|
||||
collectionName := "test_collection_name_1"
|
||||
|
||||
// expectation
|
||||
mock.ExpectQuery("SELECT `collection_id` FROM `collections` WHERE tenant_id = ? AND collection_name = ? AND ts <= ? ORDER BY ts desc LIMIT 1").
|
||||
WithArgs(tenantID, collectionName, ts).
|
||||
WillReturnError(gorm.ErrRecordNotFound)
|
||||
|
||||
// actual
|
||||
res, err := collTestDb.GetCollectionIDByName(tenantID, collectionName, ts)
|
||||
assert.Equal(t, typeutil.UniqueID(0), res)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestCollection_Insert(t *testing.T) {
|
||||
var collection = &dbmodel.Collection{
|
||||
TenantID: "",
|
||||
CollectionID: collID1,
|
||||
CollectionName: "test_collection_name_1",
|
||||
Description: "",
|
||||
AutoID: false,
|
||||
ShardsNum: int32(2),
|
||||
StartPosition: "",
|
||||
ConsistencyLevel: int32(commonpb.ConsistencyLevel_Eventually),
|
||||
Ts: ts,
|
||||
IsDeleted: false,
|
||||
CreatedAt: time.Now(),
|
||||
UpdatedAt: time.Now(),
|
||||
}
|
||||
|
||||
// expectation
|
||||
mock.ExpectBegin()
|
||||
mock.ExpectExec("INSERT INTO `collections` (`tenant_id`,`collection_id`,`collection_name`,`description`,`auto_id`,`shards_num`,`start_position`,`consistency_level`,`ts`,`is_deleted`,`created_at`,`updated_at`) VALUES (?,?,?,?,?,?,?,?,?,?,?,?) ON DUPLICATE KEY UPDATE `id`=`id`").
|
||||
WithArgs(collection.TenantID, collection.CollectionID, collection.CollectionName, collection.Description, collection.AutoID, collection.ShardsNum, collection.StartPosition, collection.ConsistencyLevel, collection.Ts, collection.IsDeleted, collection.CreatedAt, collection.UpdatedAt).
|
||||
WillReturnResult(sqlmock.NewResult(1, 1))
|
||||
mock.ExpectCommit()
|
||||
|
||||
// actual
|
||||
err := collTestDb.Insert(collection)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
func TestCollection_Insert_Error(t *testing.T) {
|
||||
var collection = &dbmodel.Collection{
|
||||
TenantID: "",
|
||||
CollectionID: collID1,
|
||||
CollectionName: "test_collection_name_1",
|
||||
Description: "",
|
||||
AutoID: false,
|
||||
ShardsNum: int32(2),
|
||||
StartPosition: "",
|
||||
ConsistencyLevel: int32(commonpb.ConsistencyLevel_Eventually),
|
||||
Ts: ts,
|
||||
IsDeleted: false,
|
||||
CreatedAt: time.Now(),
|
||||
UpdatedAt: time.Now(),
|
||||
}
|
||||
|
||||
// expectation
|
||||
mock.ExpectBegin()
|
||||
mock.ExpectExec("INSERT INTO `collections` (`tenant_id`,`collection_id`,`collection_name`,`description`,`auto_id`,`shards_num`,`start_position`,`consistency_level`,`ts`,`is_deleted`,`created_at`,`updated_at`) VALUES (?,?,?,?,?,?,?,?,?,?,?,?) ON DUPLICATE KEY UPDATE `id`=`id`").
|
||||
WithArgs(collection.TenantID, collection.CollectionID, collection.CollectionName, collection.Description, collection.AutoID, collection.ShardsNum, collection.StartPosition, collection.ConsistencyLevel, collection.Ts, collection.IsDeleted, collection.CreatedAt, collection.UpdatedAt).
|
||||
WillReturnError(errors.New("test error"))
|
||||
mock.ExpectRollback()
|
||||
|
||||
// actual
|
||||
err := collTestDb.Insert(collection)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
type AnyTime struct{}
|
||||
|
||||
func (a AnyTime) Match(v driver.Value) bool {
|
||||
_, ok := v.(time.Time)
|
||||
return ok
|
||||
}
|
|
@ -0,0 +1,46 @@
|
|||
package dao
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/milvus-io/milvus/internal/metastore/db/dbcore"
|
||||
"github.com/milvus-io/milvus/internal/metastore/db/dbmodel"
|
||||
)
|
||||
|
||||
type metaDomain struct{}
|
||||
|
||||
func NewMetaDomain() *metaDomain {
|
||||
return &metaDomain{}
|
||||
}
|
||||
|
||||
func (*metaDomain) CollectionDb(ctx context.Context) dbmodel.ICollectionDb {
|
||||
return &collectionDb{dbcore.GetDB(ctx)}
|
||||
}
|
||||
|
||||
func (*metaDomain) FieldDb(ctx context.Context) dbmodel.IFieldDb {
|
||||
return &fieldDb{dbcore.GetDB(ctx)}
|
||||
}
|
||||
|
||||
func (*metaDomain) CollChannelDb(ctx context.Context) dbmodel.ICollChannelDb {
|
||||
return &collChannelDb{dbcore.GetDB(ctx)}
|
||||
}
|
||||
|
||||
func (*metaDomain) CollAliasDb(ctx context.Context) dbmodel.ICollAliasDb {
|
||||
return &collAliasDb{dbcore.GetDB(ctx)}
|
||||
}
|
||||
|
||||
func (*metaDomain) PartitionDb(ctx context.Context) dbmodel.IPartitionDb {
|
||||
return &partitionDb{dbcore.GetDB(ctx)}
|
||||
}
|
||||
|
||||
func (*metaDomain) IndexDb(ctx context.Context) dbmodel.IIndexDb {
|
||||
return &indexDb{dbcore.GetDB(ctx)}
|
||||
}
|
||||
|
||||
func (*metaDomain) SegmentIndexDb(ctx context.Context) dbmodel.ISegmentIndexDb {
|
||||
return &segmentIndexDb{dbcore.GetDB(ctx)}
|
||||
}
|
||||
|
||||
func (*metaDomain) UserDb(ctx context.Context) dbmodel.IUserDb {
|
||||
return &userDb{dbcore.GetDB(ctx)}
|
||||
}
|
|
@ -0,0 +1,35 @@
|
|||
package dao
|
||||
|
||||
import (
|
||||
"github.com/milvus-io/milvus/internal/log"
|
||||
"github.com/milvus-io/milvus/internal/metastore/db/dbmodel"
|
||||
"github.com/milvus-io/milvus/internal/util/typeutil"
|
||||
"go.uber.org/zap"
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
type fieldDb struct {
|
||||
db *gorm.DB
|
||||
}
|
||||
|
||||
func (s *fieldDb) GetByCollectionID(tenantID string, collectionID typeutil.UniqueID, ts typeutil.Timestamp) ([]*dbmodel.Field, error) {
|
||||
var r []*dbmodel.Field
|
||||
|
||||
err := s.db.Model(&dbmodel.Field{}).Where("tenant_id = ? AND collection_id = ? AND ts = ? AND is_deleted = false", tenantID, collectionID, ts).Find(&r).Error
|
||||
if err != nil {
|
||||
log.Error("get fields by collection_id and ts failed", zap.String("tenant", tenantID), zap.Int64("collID", collectionID), zap.Uint64("ts", ts), zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return r, nil
|
||||
}
|
||||
|
||||
func (s *fieldDb) Insert(in []*dbmodel.Field) error {
|
||||
err := s.db.CreateInBatches(in, 100).Error
|
||||
if err != nil {
|
||||
log.Error("insert field failed", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,118 @@
|
|||
package dao
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/DATA-DOG/go-sqlmock"
|
||||
"github.com/milvus-io/milvus/internal/metastore/db/dbmodel"
|
||||
"github.com/milvus-io/milvus/internal/proto/schemapb"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestField_GetByCollID(t *testing.T) {
|
||||
var fields = []*dbmodel.Field{
|
||||
{
|
||||
TenantID: tenantID,
|
||||
FieldID: fieldID1,
|
||||
FieldName: "test_field_1",
|
||||
IsPrimaryKey: false,
|
||||
Description: "",
|
||||
DataType: schemapb.DataType_FloatVector,
|
||||
TypeParams: "",
|
||||
IndexParams: "",
|
||||
AutoID: false,
|
||||
CollectionID: collID1,
|
||||
Ts: ts,
|
||||
},
|
||||
}
|
||||
|
||||
// expectation
|
||||
mock.ExpectQuery("SELECT * FROM `field_schemas` WHERE tenant_id = ? AND collection_id = ? AND ts = ? AND is_deleted = false").
|
||||
WithArgs(tenantID, collID1, ts).
|
||||
WillReturnRows(
|
||||
sqlmock.NewRows([]string{"tenant_id", "field_id", "field_name", "is_primary_key", "description", "data_type", "type_params", "index_params", "auto_id", "collection_id", "ts"}).
|
||||
AddRow(fields[0].TenantID, fields[0].FieldID, fields[0].FieldName, fields[0].IsPrimaryKey, fields[0].Description, fields[0].DataType, fields[0].TypeParams, fields[0].IndexParams, fields[0].AutoID, fields[0].CollectionID, fields[0].Ts))
|
||||
|
||||
// actual
|
||||
res, err := fieldTestDb.GetByCollectionID(tenantID, collID1, ts)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, fields, res)
|
||||
}
|
||||
|
||||
func TestField_GetByCollID_Error(t *testing.T) {
|
||||
// expectation
|
||||
mock.ExpectQuery("SELECT * FROM `field_schemas` WHERE tenant_id = ? AND collection_id = ? AND ts = ? AND is_deleted = false").
|
||||
WithArgs(tenantID, collID1, ts).
|
||||
WillReturnError(errors.New("test error"))
|
||||
|
||||
// actual
|
||||
res, err := fieldTestDb.GetByCollectionID(tenantID, collID1, ts)
|
||||
assert.Nil(t, res)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestField_Insert(t *testing.T) {
|
||||
var fields = []*dbmodel.Field{
|
||||
{
|
||||
TenantID: tenantID,
|
||||
FieldID: fieldID1,
|
||||
FieldName: "test_field_1",
|
||||
IsPrimaryKey: false,
|
||||
Description: "",
|
||||
DataType: schemapb.DataType_FloatVector,
|
||||
TypeParams: "",
|
||||
IndexParams: "",
|
||||
AutoID: false,
|
||||
CollectionID: collID1,
|
||||
Ts: ts,
|
||||
IsDeleted: false,
|
||||
CreatedAt: time.Now(),
|
||||
UpdatedAt: time.Now(),
|
||||
},
|
||||
}
|
||||
|
||||
// expectation
|
||||
mock.ExpectBegin()
|
||||
mock.ExpectExec("INSERT INTO `field_schemas` (`tenant_id`,`field_id`,`field_name`,`is_primary_key`,`description`,`data_type`,`type_params`,`index_params`,`auto_id`,`collection_id`,`ts`,`is_deleted`,`created_at`,`updated_at`) VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?)").
|
||||
WithArgs(fields[0].TenantID, fields[0].FieldID, fields[0].FieldName, fields[0].IsPrimaryKey, fields[0].Description, fields[0].DataType, fields[0].TypeParams, fields[0].IndexParams, fields[0].AutoID, fields[0].CollectionID, fields[0].Ts, fields[0].IsDeleted, fields[0].CreatedAt, fields[0].UpdatedAt).
|
||||
WillReturnResult(sqlmock.NewResult(1, 1))
|
||||
mock.ExpectCommit()
|
||||
|
||||
// actual
|
||||
err := fieldTestDb.Insert(fields)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
func TestField_Insert_Error(t *testing.T) {
|
||||
var fields = []*dbmodel.Field{
|
||||
{
|
||||
TenantID: tenantID,
|
||||
FieldID: fieldID1,
|
||||
FieldName: "test_field_1",
|
||||
IsPrimaryKey: false,
|
||||
Description: "",
|
||||
DataType: schemapb.DataType_FloatVector,
|
||||
TypeParams: "",
|
||||
IndexParams: "",
|
||||
AutoID: false,
|
||||
CollectionID: collID1,
|
||||
Ts: ts,
|
||||
IsDeleted: false,
|
||||
CreatedAt: time.Now(),
|
||||
UpdatedAt: time.Now(),
|
||||
},
|
||||
}
|
||||
|
||||
// expectation
|
||||
mock.ExpectBegin()
|
||||
mock.ExpectExec("INSERT INTO `field_schemas` (`tenant_id`,`field_id`,`field_name`,`is_primary_key`,`description`,`data_type`,`type_params`,`index_params`,`auto_id`,`collection_id`,`ts`,`is_deleted`,`created_at`,`updated_at`) VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?)").
|
||||
WithArgs(fields[0].TenantID, fields[0].FieldID, fields[0].FieldName, fields[0].IsPrimaryKey, fields[0].Description, fields[0].DataType, fields[0].TypeParams, fields[0].IndexParams, fields[0].AutoID, fields[0].CollectionID, fields[0].Ts, fields[0].IsDeleted, fields[0].CreatedAt, fields[0].UpdatedAt).
|
||||
WillReturnError(errors.New("test error"))
|
||||
mock.ExpectRollback()
|
||||
|
||||
// actual
|
||||
err := fieldTestDb.Insert(fields)
|
||||
assert.Error(t, err)
|
||||
}
|
|
@ -0,0 +1,91 @@
|
|||
package dao
|
||||
|
||||
import (
|
||||
"github.com/milvus-io/milvus/internal/log"
|
||||
"github.com/milvus-io/milvus/internal/metastore/db/dbmodel"
|
||||
"github.com/milvus-io/milvus/internal/util/typeutil"
|
||||
"go.uber.org/zap"
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
type indexDb struct {
|
||||
db *gorm.DB
|
||||
}
|
||||
|
||||
func (s *indexDb) Get(tenantID string, collectionID typeutil.UniqueID) ([]*dbmodel.Index, error) {
|
||||
var r []*dbmodel.Index
|
||||
|
||||
err := s.db.Model(&dbmodel.Index{}).Where("tenant_id = ? AND collection_id = ? AND is_deleted = false", tenantID, collectionID).Find(&r).Error
|
||||
if err != nil {
|
||||
log.Error("get indexes by collection_id failed", zap.String("tenant", tenantID), zap.Int64("collID", collectionID), zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return r, nil
|
||||
}
|
||||
|
||||
func (s *indexDb) List(tenantID string) ([]*dbmodel.IndexResult, error) {
|
||||
tx := s.db.Table("indexes").
|
||||
Select("indexes.field_id AS field_id, indexes.collection_id AS collection_id, indexes.index_id AS index_id, indexes.index_name AS index_name, indexes.index_params AS index_params, segment_indexes.segment_id AS segment_id, segment_indexes.partition_id AS partition_id, segment_indexes.enable_index AS enable_index, segment_indexes.index_build_id AS index_build_id, segment_indexes.index_size AS index_size, segment_indexes.index_file_paths AS index_file_paths").
|
||||
Joins("LEFT JOIN segment_indexes ON indexes.index_id = segment_indexes.index_id AND indexes.tenant_id = segment_indexes.tenant_id AND segment_indexes.tenant_id = ? AND segment_indexes.is_deleted = false", tenantID).
|
||||
Where("indexes.is_deleted = false").Where("indexes.tenant_id = ?", tenantID)
|
||||
|
||||
var rs []*dbmodel.IndexResult
|
||||
err := tx.Scan(&rs).Error
|
||||
if err != nil {
|
||||
log.Error("list indexes by join failed", zap.String("tenant", tenantID), zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return rs, nil
|
||||
}
|
||||
|
||||
func (s *indexDb) Insert(in []*dbmodel.Index) error {
|
||||
err := s.db.CreateInBatches(in, 100).Error
|
||||
if err != nil {
|
||||
log.Error("insert index failed", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *indexDb) Update(in *dbmodel.Index) error {
|
||||
err := s.db.Model(&dbmodel.Index{}).Where("tenant_id = ? AND collection_id = ? AND index_id = ?", in.TenantID, in.CollectionID, in.IndexID).Updates(dbmodel.Index{
|
||||
CreateTime: in.CreateTime, // if in.CreateTime is not set, column CreateTime will not be updated
|
||||
IsDeleted: in.IsDeleted,
|
||||
}).Error
|
||||
|
||||
if err != nil {
|
||||
log.Error("update indexes failed", zap.String("tenant", in.TenantID), zap.Int64("collID", in.CollectionID), zap.Int64("indexID", in.IndexID), zap.Error(err))
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *indexDb) MarkDeletedByCollectionID(tenantID string, collID typeutil.UniqueID) error {
|
||||
err := s.db.Model(&dbmodel.Index{}).Where("tenant_id = ? AND collection_id = ?", tenantID, collID).Updates(dbmodel.Index{
|
||||
IsDeleted: true,
|
||||
}).Error
|
||||
|
||||
if err != nil {
|
||||
log.Error("update indexes is_deleted=true failed", zap.String("tenant", tenantID), zap.Int64("collID", collID), zap.Error(err))
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *indexDb) MarkDeletedByIndexID(tenantID string, indexID typeutil.UniqueID) error {
|
||||
err := s.db.Model(&dbmodel.Index{}).Where("tenant_id = ? AND index_id = ?", tenantID, indexID).Updates(dbmodel.Index{
|
||||
IsDeleted: true,
|
||||
}).Error
|
||||
|
||||
if err != nil {
|
||||
log.Error("update indexes is_deleted=true failed", zap.String("tenant", tenantID), zap.Int64("indexID", indexID), zap.Error(err))
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,242 @@
|
|||
package dao
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/DATA-DOG/go-sqlmock"
|
||||
"github.com/milvus-io/milvus/internal/metastore/db/dbmodel"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestIndex_Get(t *testing.T) {
|
||||
var indexes = []*dbmodel.Index{
|
||||
{
|
||||
TenantID: "",
|
||||
FieldID: fieldID1,
|
||||
CollectionID: collID1,
|
||||
IndexID: indexID1,
|
||||
IndexName: "test_index_1",
|
||||
IndexParams: "",
|
||||
},
|
||||
}
|
||||
|
||||
// expectation
|
||||
mock.ExpectQuery("SELECT * FROM `indexes` WHERE tenant_id = ? AND collection_id = ? AND is_deleted = false").
|
||||
WithArgs(tenantID, collID1).
|
||||
WillReturnRows(
|
||||
sqlmock.NewRows([]string{"tenant_id", "field_id", "collection_id", "index_id", "index_name", "index_params"}).
|
||||
AddRow(indexes[0].TenantID, indexes[0].FieldID, indexes[0].CollectionID, indexes[0].IndexID, indexes[0].IndexName, indexes[0].IndexParams))
|
||||
|
||||
// actual
|
||||
res, err := indexTestDb.Get(tenantID, collID1)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, indexes, res)
|
||||
}
|
||||
|
||||
func TestIndex_Get_Error(t *testing.T) {
|
||||
// expectation
|
||||
mock.ExpectQuery("SELECT * FROM `indexes` WHERE tenant_id = ? AND collection_id = ? AND is_deleted = false").
|
||||
WithArgs(tenantID, collID1).
|
||||
WillReturnError(errors.New("test error"))
|
||||
|
||||
// actual
|
||||
res, err := indexTestDb.Get(tenantID, collID1)
|
||||
assert.Nil(t, res)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestIndex_List(t *testing.T) {
|
||||
var indexResults = []*dbmodel.IndexResult{
|
||||
{
|
||||
FieldID: fieldID1,
|
||||
CollectionID: collID1,
|
||||
IndexID: indexID1,
|
||||
IndexName: "test_index_1",
|
||||
IndexParams: "",
|
||||
SegmentID: segmentID1,
|
||||
PartitionID: partitionID1,
|
||||
EnableIndex: false,
|
||||
IndexBuildID: indexBuildID1,
|
||||
IndexSize: 1024,
|
||||
IndexFilePaths: "",
|
||||
},
|
||||
}
|
||||
|
||||
// expectation
|
||||
mock.ExpectQuery("SELECT indexes.field_id AS field_id, indexes.collection_id AS collection_id, indexes.index_id AS index_id, indexes.index_name AS index_name, indexes.index_params AS index_params, segment_indexes.segment_id AS segment_id, segment_indexes.partition_id AS partition_id, segment_indexes.enable_index AS enable_index, segment_indexes.index_build_id AS index_build_id, segment_indexes.index_size AS index_size, segment_indexes.index_file_paths AS index_file_paths FROM `indexes` LEFT JOIN segment_indexes ON indexes.index_id = segment_indexes.index_id AND indexes.tenant_id = segment_indexes.tenant_id AND segment_indexes.tenant_id = ? AND segment_indexes.is_deleted = false WHERE indexes.is_deleted = false AND indexes.tenant_id = ?").
|
||||
WithArgs(tenantID, tenantID).
|
||||
WillReturnRows(
|
||||
sqlmock.NewRows([]string{"field_id", "collection_id", "index_id", "index_name", "index_params", "segment_id", "partition_id", "enable_index", "index_build_id", "index_size", "index_file_paths"}).
|
||||
AddRow(indexResults[0].FieldID, indexResults[0].CollectionID, indexResults[0].IndexID, indexResults[0].IndexName, indexResults[0].IndexParams, indexResults[0].SegmentID, indexResults[0].PartitionID, indexResults[0].EnableIndex, indexResults[0].IndexBuildID, indexResults[0].IndexSize, indexResults[0].IndexFilePaths))
|
||||
|
||||
// actual
|
||||
res, err := indexTestDb.List(tenantID)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, indexResults, res)
|
||||
}
|
||||
|
||||
func TestIndex_List_Error(t *testing.T) {
|
||||
// expectation
|
||||
mock.ExpectQuery("SELECT indexes.field_id AS field_id, indexes.collection_id AS collection_id, indexes.index_id AS index_id, indexes.index_name AS index_name, indexes.index_params AS index_params, segment_indexes.segment_id AS segment_id, segment_indexes.partition_id AS partition_id, segment_indexes.enable_index AS enable_index, segment_indexes.index_build_id AS index_build_id, segment_indexes.index_size AS index_size, segment_indexes.index_file_paths AS index_file_paths FROM `indexes` LEFT JOIN segment_indexes ON indexes.index_id = segment_indexes.index_id AND indexes.tenant_id = segment_indexes.tenant_id AND segment_indexes.tenant_id = ? AND segment_indexes.is_deleted = false WHERE indexes.is_deleted = false AND indexes.tenant_id = ?").
|
||||
WithArgs(tenantID, tenantID).
|
||||
WillReturnError(errors.New("test error"))
|
||||
|
||||
// actual
|
||||
res, err := indexTestDb.List(tenantID)
|
||||
assert.Nil(t, res)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestIndex_Insert(t *testing.T) {
|
||||
var indexes = []*dbmodel.Index{
|
||||
{
|
||||
TenantID: tenantID,
|
||||
FieldID: fieldID1,
|
||||
CollectionID: collID1,
|
||||
IndexID: indexID1,
|
||||
IndexName: "test_index_1",
|
||||
IndexParams: "",
|
||||
CreateTime: uint64(1011),
|
||||
IsDeleted: false,
|
||||
CreatedAt: time.Now(),
|
||||
UpdatedAt: time.Now(),
|
||||
},
|
||||
}
|
||||
|
||||
// expectation
|
||||
mock.ExpectBegin()
|
||||
mock.ExpectExec("INSERT INTO `indexes` (`tenant_id`,`field_id`,`collection_id`,`index_id`,`index_name`,`index_params`,`create_time`,`is_deleted`,`created_at`,`updated_at`) VALUES (?,?,?,?,?,?,?,?,?,?)").
|
||||
WithArgs(indexes[0].TenantID, indexes[0].FieldID, indexes[0].CollectionID, indexes[0].IndexID, indexes[0].IndexName, indexes[0].IndexParams, indexes[0].CreateTime, indexes[0].IsDeleted, indexes[0].CreatedAt, indexes[0].UpdatedAt).
|
||||
WillReturnResult(sqlmock.NewResult(1, 1))
|
||||
mock.ExpectCommit()
|
||||
|
||||
// actual
|
||||
err := indexTestDb.Insert(indexes)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
func TestIndex_Insert_Error(t *testing.T) {
|
||||
var indexes = []*dbmodel.Index{
|
||||
{
|
||||
TenantID: tenantID,
|
||||
FieldID: fieldID1,
|
||||
CollectionID: collID1,
|
||||
IndexID: indexID1,
|
||||
IndexName: "test_index_1",
|
||||
IndexParams: "",
|
||||
CreateTime: uint64(1011),
|
||||
IsDeleted: false,
|
||||
CreatedAt: time.Now(),
|
||||
UpdatedAt: time.Now(),
|
||||
},
|
||||
}
|
||||
|
||||
// expectation
|
||||
mock.ExpectBegin()
|
||||
mock.ExpectExec("INSERT INTO `indexes` (`tenant_id`,`field_id`,`collection_id`,`index_id`,`index_name`,`index_params`,`create_time`,`is_deleted`,`created_at`,`updated_at`) VALUES (?,?,?,?,?,?,?,?,?,?)").
|
||||
WithArgs(indexes[0].TenantID, indexes[0].FieldID, indexes[0].CollectionID, indexes[0].IndexID, indexes[0].IndexName, indexes[0].IndexParams, indexes[0].CreateTime, indexes[0].IsDeleted, indexes[0].CreatedAt, indexes[0].UpdatedAt).
|
||||
WillReturnError(errors.New("test error"))
|
||||
mock.ExpectRollback()
|
||||
|
||||
// actual
|
||||
err := indexTestDb.Insert(indexes)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestIndex_Update(t *testing.T) {
|
||||
var index = &dbmodel.Index{
|
||||
TenantID: tenantID,
|
||||
IndexName: "test_index_name_1",
|
||||
IndexID: indexID1,
|
||||
IndexParams: "",
|
||||
IsDeleted: true,
|
||||
CreateTime: uint64(1111),
|
||||
}
|
||||
|
||||
// expectation
|
||||
mock.ExpectBegin()
|
||||
mock.ExpectExec("UPDATE `indexes` SET `create_time`=?,`is_deleted`=?,`updated_at`=? WHERE tenant_id = ? AND collection_id = ? AND index_id = ?").
|
||||
WithArgs(index.CreateTime, index.IsDeleted, AnyTime{}, index.TenantID, index.CollectionID, index.IndexID).
|
||||
WillReturnResult(sqlmock.NewResult(1, 1))
|
||||
mock.ExpectCommit()
|
||||
|
||||
// actual
|
||||
err := indexTestDb.Update(index)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
func TestIndex_Update_Error(t *testing.T) {
|
||||
var index = &dbmodel.Index{
|
||||
TenantID: tenantID,
|
||||
IndexName: "test_index_name_1",
|
||||
IndexID: indexID1,
|
||||
IndexParams: "",
|
||||
IsDeleted: true,
|
||||
CreateTime: uint64(1111),
|
||||
}
|
||||
|
||||
// expectation
|
||||
mock.ExpectBegin()
|
||||
mock.ExpectExec("UPDATE `indexes` SET `create_time`=?,`is_deleted`=?,`updated_at`=? WHERE tenant_id = ? AND collection_id = ? AND index_id = ?").
|
||||
WithArgs(index.CreateTime, index.IsDeleted, AnyTime{}, index.TenantID, index.CollectionID, index.IndexID).
|
||||
WillReturnError(errors.New("test error"))
|
||||
mock.ExpectRollback()
|
||||
|
||||
// actual
|
||||
err := indexTestDb.Update(index)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestIndex_MarkDeletedByCollID(t *testing.T) {
|
||||
// expectation
|
||||
mock.ExpectBegin()
|
||||
mock.ExpectExec("UPDATE `indexes` SET `is_deleted`=?,`updated_at`=? WHERE tenant_id = ? AND collection_id = ?").
|
||||
WithArgs(true, AnyTime{}, tenantID, collID1).
|
||||
WillReturnResult(sqlmock.NewResult(1, 1))
|
||||
mock.ExpectCommit()
|
||||
|
||||
// actual
|
||||
err := indexTestDb.MarkDeletedByCollectionID(tenantID, collID1)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
func TestIndex_MarkDeletedByCollID_Error(t *testing.T) {
|
||||
// expectation
|
||||
mock.ExpectBegin()
|
||||
mock.ExpectExec("UPDATE `indexes` SET `is_deleted`=?,`updated_at`=? WHERE tenant_id = ? AND collection_id = ?").
|
||||
WithArgs(true, AnyTime{}, tenantID, collID1).
|
||||
WillReturnError(errors.New("test error"))
|
||||
mock.ExpectRollback()
|
||||
|
||||
// actual
|
||||
err := indexTestDb.MarkDeletedByCollectionID(tenantID, collID1)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestIndex_MarkDeletedByIdxID(t *testing.T) {
|
||||
// expectation
|
||||
mock.ExpectBegin()
|
||||
mock.ExpectExec("UPDATE `indexes` SET `is_deleted`=?,`updated_at`=? WHERE tenant_id = ? AND index_id = ?").
|
||||
WithArgs(true, AnyTime{}, tenantID, indexID1).
|
||||
WillReturnResult(sqlmock.NewResult(1, 1))
|
||||
mock.ExpectCommit()
|
||||
|
||||
// actual
|
||||
err := indexTestDb.MarkDeletedByIndexID(tenantID, indexID1)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
func TestIndex_MarkDeletedByIdxID_Error(t *testing.T) {
|
||||
// expectation
|
||||
mock.ExpectBegin()
|
||||
mock.ExpectExec("UPDATE `indexes` SET `is_deleted`=?,`updated_at`=? WHERE tenant_id = ? AND index_id = ?").
|
||||
WithArgs(true, AnyTime{}, tenantID, indexID1).
|
||||
WillReturnError(errors.New("test error"))
|
||||
mock.ExpectRollback()
|
||||
|
||||
// actual
|
||||
err := indexTestDb.MarkDeletedByIndexID(tenantID, indexID1)
|
||||
assert.Error(t, err)
|
||||
}
|
|
@ -0,0 +1,35 @@
|
|||
package dao
|
||||
|
||||
import (
|
||||
"github.com/milvus-io/milvus/internal/log"
|
||||
"github.com/milvus-io/milvus/internal/metastore/db/dbmodel"
|
||||
"github.com/milvus-io/milvus/internal/util/typeutil"
|
||||
"go.uber.org/zap"
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
type partitionDb struct {
|
||||
db *gorm.DB
|
||||
}
|
||||
|
||||
func (s *partitionDb) GetByCollectionID(tenantID string, collectionID typeutil.UniqueID, ts typeutil.Timestamp) ([]*dbmodel.Partition, error) {
|
||||
var r []*dbmodel.Partition
|
||||
|
||||
err := s.db.Model(&dbmodel.Partition{}).Where("tenant_id = ? AND collection_id = ? AND ts = ? AND is_deleted = false", tenantID, collectionID, ts).Find(&r).Error
|
||||
if err != nil {
|
||||
log.Error("get partitions by collection_id and ts failed", zap.String("tenant", tenantID), zap.Int64("collID", collectionID), zap.Uint64("ts", ts), zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return r, nil
|
||||
}
|
||||
|
||||
func (s *partitionDb) Insert(in []*dbmodel.Partition) error {
|
||||
err := s.db.CreateInBatches(in, 100).Error
|
||||
if err != nil {
|
||||
log.Error("insert partition failed", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,103 @@
|
|||
package dao
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/DATA-DOG/go-sqlmock"
|
||||
"github.com/milvus-io/milvus/internal/metastore/db/dbmodel"
|
||||
"github.com/milvus-io/milvus/internal/util/typeutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestPartition_GetByCollID(t *testing.T) {
|
||||
var partitions = []*dbmodel.Partition{
|
||||
{
|
||||
TenantID: tenantID,
|
||||
PartitionID: fieldID1,
|
||||
PartitionName: "test_field_1",
|
||||
PartitionCreatedTimestamp: typeutil.Timestamp(1000),
|
||||
CollectionID: collID1,
|
||||
Ts: ts,
|
||||
},
|
||||
}
|
||||
|
||||
// expectation
|
||||
mock.ExpectQuery("SELECT * FROM `partitions` WHERE tenant_id = ? AND collection_id = ? AND ts = ? AND is_deleted = false").
|
||||
WithArgs(tenantID, collID1, ts).
|
||||
WillReturnRows(
|
||||
sqlmock.NewRows([]string{"tenant_id", "partition_id", "partition_name", "partition_created_timestamp", "collection_id", "ts"}).
|
||||
AddRow(partitions[0].TenantID, partitions[0].PartitionID, partitions[0].PartitionName, partitions[0].PartitionCreatedTimestamp, partitions[0].CollectionID, partitions[0].Ts))
|
||||
|
||||
// actual
|
||||
res, err := partitionTestDb.GetByCollectionID(tenantID, collID1, ts)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, partitions, res)
|
||||
}
|
||||
|
||||
func TestPartition_GetByCollID_Error(t *testing.T) {
|
||||
// expectation
|
||||
mock.ExpectQuery("SELECT * FROM `partitions` WHERE tenant_id = ? AND collection_id = ? AND ts = ? AND is_deleted = false").
|
||||
WithArgs(tenantID, collID1, ts).
|
||||
WillReturnError(errors.New("test error"))
|
||||
|
||||
// actual
|
||||
res, err := partitionTestDb.GetByCollectionID(tenantID, collID1, ts)
|
||||
assert.Nil(t, res)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestPartition_Insert(t *testing.T) {
|
||||
var partitions = []*dbmodel.Partition{
|
||||
{
|
||||
TenantID: tenantID,
|
||||
PartitionID: fieldID1,
|
||||
PartitionName: "test_field_1",
|
||||
PartitionCreatedTimestamp: typeutil.Timestamp(1000),
|
||||
CollectionID: collID1,
|
||||
Ts: ts,
|
||||
IsDeleted: false,
|
||||
CreatedAt: time.Now(),
|
||||
UpdatedAt: time.Now(),
|
||||
},
|
||||
}
|
||||
|
||||
// expectation
|
||||
mock.ExpectBegin()
|
||||
mock.ExpectExec("INSERT INTO `partitions` (`tenant_id`,`partition_id`,`partition_name`,`partition_created_timestamp`,`collection_id`,`ts`,`is_deleted`,`created_at`,`updated_at`) VALUES (?,?,?,?,?,?,?,?,?)").
|
||||
WithArgs(partitions[0].TenantID, partitions[0].PartitionID, partitions[0].PartitionName, partitions[0].PartitionCreatedTimestamp, partitions[0].CollectionID, partitions[0].Ts, partitions[0].IsDeleted, partitions[0].CreatedAt, partitions[0].UpdatedAt).
|
||||
WillReturnResult(sqlmock.NewResult(1, 1))
|
||||
mock.ExpectCommit()
|
||||
|
||||
// actual
|
||||
err := partitionTestDb.Insert(partitions)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
func TestPartition_Insert_Error(t *testing.T) {
|
||||
var partitions = []*dbmodel.Partition{
|
||||
{
|
||||
TenantID: tenantID,
|
||||
PartitionID: fieldID1,
|
||||
PartitionName: "test_field_1",
|
||||
PartitionCreatedTimestamp: typeutil.Timestamp(1000),
|
||||
CollectionID: collID1,
|
||||
Ts: ts,
|
||||
IsDeleted: false,
|
||||
CreatedAt: time.Now(),
|
||||
UpdatedAt: time.Now(),
|
||||
},
|
||||
}
|
||||
|
||||
// expectation
|
||||
mock.ExpectBegin()
|
||||
mock.ExpectExec("INSERT INTO `partitions` (`tenant_id`,`partition_id`,`partition_name`,`partition_created_timestamp`,`collection_id`,`ts`,`is_deleted`,`created_at`,`updated_at`) VALUES (?,?,?,?,?,?,?,?,?)").
|
||||
WithArgs(partitions[0].TenantID, partitions[0].PartitionID, partitions[0].PartitionName, partitions[0].PartitionCreatedTimestamp, partitions[0].CollectionID, partitions[0].Ts, partitions[0].IsDeleted, partitions[0].CreatedAt, partitions[0].UpdatedAt).
|
||||
WillReturnError(errors.New("test error"))
|
||||
mock.ExpectRollback()
|
||||
|
||||
// actual
|
||||
err := partitionTestDb.Insert(partitions)
|
||||
assert.Error(t, err)
|
||||
}
|
|
@ -0,0 +1,83 @@
|
|||
package dao
|
||||
|
||||
import (
|
||||
"github.com/milvus-io/milvus/internal/log"
|
||||
"github.com/milvus-io/milvus/internal/metastore/db/dbmodel"
|
||||
"github.com/milvus-io/milvus/internal/util/typeutil"
|
||||
"go.uber.org/zap"
|
||||
"gorm.io/gorm"
|
||||
"gorm.io/gorm/clause"
|
||||
)
|
||||
|
||||
type segmentIndexDb struct {
|
||||
db *gorm.DB
|
||||
}
|
||||
|
||||
func (s *segmentIndexDb) Insert(in []*dbmodel.SegmentIndex) error {
|
||||
err := s.db.CreateInBatches(in, 100).Error
|
||||
if err != nil {
|
||||
log.Error("insert segment_indexes failed", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *segmentIndexDb) Upsert(in []*dbmodel.SegmentIndex) error {
|
||||
err := s.db.Clauses(clause.OnConflict{
|
||||
// constraint UNIQUE (tenant_id, segment_id, index_id)
|
||||
DoUpdates: clause.AssignmentColumns([]string{"index_build_id", "enable_index", "create_time"}),
|
||||
}).CreateInBatches(in, 100).Error
|
||||
|
||||
if err != nil {
|
||||
log.Error("upsert segment_indexes failed", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *segmentIndexDb) MarkDeleted(tenantID string, segIndexes []*dbmodel.SegmentIndex) error {
|
||||
inValues := make([][]interface{}, 0, len(segIndexes))
|
||||
for _, segIdx := range segIndexes {
|
||||
in := []interface{}{segIdx.SegmentID, segIdx.IndexID}
|
||||
inValues = append(inValues, in)
|
||||
}
|
||||
|
||||
err := s.db.Model(&dbmodel.SegmentIndex{}).Where("tenant_id = ? AND (segment_id, index_id) IN ?", tenantID, inValues).Updates(dbmodel.SegmentIndex{
|
||||
IsDeleted: true,
|
||||
}).Error
|
||||
|
||||
if err != nil {
|
||||
log.Error("update segment_indexes deleted failed", zap.String("tenant", tenantID), zap.Any("segmentIDIndexID", inValues), zap.Error(err))
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *segmentIndexDb) MarkDeletedByCollectionID(tenantID string, collID typeutil.UniqueID) error {
|
||||
err := s.db.Model(&dbmodel.SegmentIndex{}).Where("tenant_id = ? AND collection_id = ?", tenantID, collID).Updates(dbmodel.SegmentIndex{
|
||||
IsDeleted: true,
|
||||
}).Error
|
||||
|
||||
if err != nil {
|
||||
log.Error("update segment_indexes deleted by collection id failed", zap.String("tenant", tenantID), zap.Int64("collID", collID), zap.Error(err))
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *segmentIndexDb) MarkDeletedByIndexID(tenantID string, indexID typeutil.UniqueID) error {
|
||||
err := s.db.Model(&dbmodel.SegmentIndex{}).Where("tenant_id = ? AND index_id = ?", tenantID, indexID).Updates(dbmodel.SegmentIndex{
|
||||
IsDeleted: true,
|
||||
}).Error
|
||||
|
||||
if err != nil {
|
||||
log.Error("update segment_indexes deleted by index id failed", zap.String("tenant", tenantID), zap.Int64("indexID", indexID), zap.Error(err))
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,225 @@
|
|||
package dao
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/DATA-DOG/go-sqlmock"
|
||||
"github.com/milvus-io/milvus/internal/metastore/db/dbmodel"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestSegmentIndex_Insert(t *testing.T) {
|
||||
var segIndexes = []*dbmodel.SegmentIndex{
|
||||
{
|
||||
TenantID: tenantID,
|
||||
CollectionID: collID1,
|
||||
PartitionID: partitionID1,
|
||||
SegmentID: segmentID1,
|
||||
FieldID: fieldID1,
|
||||
IndexID: indexID1,
|
||||
IndexBuildID: indexBuildID1,
|
||||
EnableIndex: false,
|
||||
CreateTime: uint64(1011),
|
||||
IndexFilePaths: "",
|
||||
IndexSize: 1024,
|
||||
IsDeleted: false,
|
||||
CreatedAt: time.Now(),
|
||||
UpdatedAt: time.Now(),
|
||||
},
|
||||
}
|
||||
|
||||
// expectation
|
||||
mock.ExpectBegin()
|
||||
mock.ExpectExec("INSERT INTO `segment_indexes` (`tenant_id`,`collection_id`,`partition_id`,`segment_id`,`field_id`,`index_id`,`index_build_id`,`enable_index`,`create_time`,`index_file_paths`,`index_size`,`is_deleted`,`created_at`,`updated_at`) VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?)").
|
||||
WithArgs(segIndexes[0].TenantID, segIndexes[0].CollectionID, segIndexes[0].PartitionID, segIndexes[0].SegmentID, segIndexes[0].FieldID, segIndexes[0].IndexID, segIndexes[0].IndexBuildID, segIndexes[0].EnableIndex, segIndexes[0].CreateTime, segIndexes[0].IndexFilePaths, segIndexes[0].IndexSize, segIndexes[0].IsDeleted, segIndexes[0].CreatedAt, segIndexes[0].UpdatedAt).
|
||||
WillReturnResult(sqlmock.NewResult(1, 1))
|
||||
mock.ExpectCommit()
|
||||
|
||||
// actual
|
||||
err := segIndexTestDb.Insert(segIndexes)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
func TestSegmentIndex_Insert_Error(t *testing.T) {
|
||||
var segIndexes = []*dbmodel.SegmentIndex{
|
||||
{
|
||||
TenantID: tenantID,
|
||||
CollectionID: collID1,
|
||||
PartitionID: partitionID1,
|
||||
SegmentID: segmentID1,
|
||||
FieldID: fieldID1,
|
||||
IndexID: indexID1,
|
||||
IndexBuildID: indexBuildID1,
|
||||
EnableIndex: false,
|
||||
CreateTime: uint64(1011),
|
||||
IndexFilePaths: "",
|
||||
IndexSize: 1024,
|
||||
IsDeleted: false,
|
||||
CreatedAt: time.Now(),
|
||||
UpdatedAt: time.Now(),
|
||||
},
|
||||
}
|
||||
|
||||
// expectation
|
||||
mock.ExpectBegin()
|
||||
mock.ExpectExec("INSERT INTO `segment_indexes` (`tenant_id`,`collection_id`,`partition_id`,`segment_id`,`field_id`,`index_id`,`index_build_id`,`enable_index`,`create_time`,`index_file_paths`,`index_size`,`is_deleted`,`created_at`,`updated_at`) VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?)").
|
||||
WithArgs(segIndexes[0].TenantID, segIndexes[0].CollectionID, segIndexes[0].PartitionID, segIndexes[0].SegmentID, segIndexes[0].FieldID, segIndexes[0].IndexID, segIndexes[0].IndexBuildID, segIndexes[0].EnableIndex, segIndexes[0].CreateTime, segIndexes[0].IndexFilePaths, segIndexes[0].IndexSize, segIndexes[0].IsDeleted, segIndexes[0].CreatedAt, segIndexes[0].UpdatedAt).
|
||||
WillReturnError(errors.New("test error"))
|
||||
mock.ExpectRollback()
|
||||
|
||||
// actual
|
||||
err := segIndexTestDb.Insert(segIndexes)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestSegmentIndex_Upsert(t *testing.T) {
|
||||
var segIndexes = []*dbmodel.SegmentIndex{
|
||||
{
|
||||
TenantID: tenantID,
|
||||
CollectionID: collID1,
|
||||
PartitionID: partitionID1,
|
||||
SegmentID: segmentID1,
|
||||
FieldID: fieldID1,
|
||||
IndexID: indexID1,
|
||||
IndexBuildID: indexBuildID1,
|
||||
EnableIndex: false,
|
||||
CreateTime: uint64(1011),
|
||||
},
|
||||
}
|
||||
|
||||
// expectation
|
||||
mock.ExpectBegin()
|
||||
mock.ExpectExec("INSERT INTO `segment_indexes` (`tenant_id`,`collection_id`,`partition_id`,`segment_id`,`field_id`,`index_id`,`index_build_id`,`enable_index`,`create_time`,`index_file_paths`,`index_size`,`is_deleted`,`created_at`,`updated_at`) VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?) ON DUPLICATE KEY UPDATE `index_build_id`=VALUES(`index_build_id`),`enable_index`=VALUES(`enable_index`),`create_time`=VALUES(`create_time`)").
|
||||
WithArgs(segIndexes[0].TenantID, segIndexes[0].CollectionID, segIndexes[0].PartitionID, segIndexes[0].SegmentID, segIndexes[0].FieldID, segIndexes[0].IndexID, segIndexes[0].IndexBuildID, segIndexes[0].EnableIndex, segIndexes[0].CreateTime, "", uint64(0), false, AnyTime{}, AnyTime{}).
|
||||
WillReturnResult(sqlmock.NewResult(1, 1))
|
||||
mock.ExpectCommit()
|
||||
|
||||
// actual
|
||||
err := segIndexTestDb.Upsert(segIndexes)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
func TestSegmentIndex_Upsert_Error(t *testing.T) {
|
||||
var segIndexes = []*dbmodel.SegmentIndex{
|
||||
{
|
||||
TenantID: tenantID,
|
||||
CollectionID: collID1,
|
||||
PartitionID: partitionID1,
|
||||
SegmentID: segmentID1,
|
||||
FieldID: fieldID1,
|
||||
IndexID: indexID1,
|
||||
IndexBuildID: indexBuildID1,
|
||||
EnableIndex: false,
|
||||
CreateTime: uint64(1011),
|
||||
},
|
||||
}
|
||||
|
||||
// expectation
|
||||
mock.ExpectBegin()
|
||||
mock.ExpectExec("INSERT INTO `segment_indexes` (`tenant_id`,`collection_id`,`partition_id`,`segment_id`,`field_id`,`index_id`,`index_build_id`,`enable_index`,`create_time`,`index_file_paths`,`index_size`,`is_deleted`,`created_at`,`updated_at`) VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?) ON DUPLICATE KEY UPDATE `index_build_id`=VALUES(`index_build_id`),`enable_index`=VALUES(`enable_index`),`create_time`=VALUES(`create_time`)").
|
||||
WithArgs(segIndexes[0].TenantID, segIndexes[0].CollectionID, segIndexes[0].PartitionID, segIndexes[0].SegmentID, segIndexes[0].FieldID, segIndexes[0].IndexID, segIndexes[0].IndexBuildID, segIndexes[0].EnableIndex, segIndexes[0].CreateTime, "", uint64(0), false, AnyTime{}, AnyTime{}).
|
||||
WillReturnError(errors.New("test error"))
|
||||
mock.ExpectRollback()
|
||||
|
||||
// actual
|
||||
err := segIndexTestDb.Upsert(segIndexes)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestSegmentIndex_MarkDeleted(t *testing.T) {
|
||||
var segIndexes = []*dbmodel.SegmentIndex{
|
||||
{
|
||||
SegmentID: segmentID1,
|
||||
IndexID: indexID1,
|
||||
},
|
||||
{
|
||||
SegmentID: segmentID2,
|
||||
IndexID: indexID2,
|
||||
},
|
||||
}
|
||||
|
||||
// expectation
|
||||
mock.ExpectBegin()
|
||||
mock.ExpectExec("UPDATE `segment_indexes` SET `is_deleted`=?,`updated_at`=? WHERE tenant_id = ? AND (segment_id, index_id) IN ((?,?),(?,?))").
|
||||
WithArgs(true, AnyTime{}, tenantID, segIndexes[0].SegmentID, segIndexes[0].IndexID, segIndexes[1].SegmentID, segIndexes[1].IndexID).
|
||||
WillReturnResult(sqlmock.NewResult(1, 1))
|
||||
mock.ExpectCommit()
|
||||
|
||||
// actual
|
||||
err := segIndexTestDb.MarkDeleted(tenantID, segIndexes)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
func TestSegmentIndex_MarkDeleted_Error(t *testing.T) {
|
||||
var segIndexes = []*dbmodel.SegmentIndex{
|
||||
{
|
||||
SegmentID: segmentID1,
|
||||
IndexID: indexID1,
|
||||
},
|
||||
}
|
||||
|
||||
// expectation
|
||||
mock.ExpectBegin()
|
||||
mock.ExpectExec("UPDATE `segment_indexes` SET `is_deleted`=?,`updated_at`=? WHERE tenant_id = ? AND (segment_id, index_id) IN ((?,?))").
|
||||
WithArgs(true, AnyTime{}, tenantID, segIndexes[0].SegmentID, segIndexes[0].IndexID).
|
||||
WillReturnError(errors.New("test error"))
|
||||
mock.ExpectRollback()
|
||||
|
||||
// actual
|
||||
err := segIndexTestDb.MarkDeleted(tenantID, segIndexes)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestSegmentIndex_MarkDeletedByCollID(t *testing.T) {
|
||||
// expectation
|
||||
mock.ExpectBegin()
|
||||
mock.ExpectExec("UPDATE `segment_indexes` SET `is_deleted`=?,`updated_at`=? WHERE tenant_id = ? AND collection_id = ?").
|
||||
WithArgs(true, AnyTime{}, tenantID, collID1).
|
||||
WillReturnResult(sqlmock.NewResult(1, 1))
|
||||
mock.ExpectCommit()
|
||||
|
||||
// actual
|
||||
err := segIndexTestDb.MarkDeletedByCollectionID(tenantID, collID1)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
func TestSegmentIndex_MarkDeletedByCollID_Error(t *testing.T) {
|
||||
// expectation
|
||||
mock.ExpectBegin()
|
||||
mock.ExpectExec("UPDATE `segment_indexes` SET `is_deleted`=?,`updated_at`=? WHERE tenant_id = ? AND collection_id = ?").
|
||||
WithArgs(true, AnyTime{}, tenantID, collID1).
|
||||
WillReturnError(errors.New("test error"))
|
||||
mock.ExpectRollback()
|
||||
|
||||
// actual
|
||||
err := segIndexTestDb.MarkDeletedByCollectionID(tenantID, collID1)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestSegmentIndex_MarkDeletedByIdxID(t *testing.T) {
|
||||
// expectation
|
||||
mock.ExpectBegin()
|
||||
mock.ExpectExec("UPDATE `segment_indexes` SET `is_deleted`=?,`updated_at`=? WHERE tenant_id = ? AND index_id = ?").
|
||||
WithArgs(true, AnyTime{}, tenantID, indexID1).
|
||||
WillReturnResult(sqlmock.NewResult(1, 1))
|
||||
mock.ExpectCommit()
|
||||
|
||||
// actual
|
||||
err := segIndexTestDb.MarkDeletedByIndexID(tenantID, indexID1)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
func TestSegmentIndex_MarkDeletedByIdxID_Error(t *testing.T) {
|
||||
// expectation
|
||||
mock.ExpectBegin()
|
||||
mock.ExpectExec("UPDATE `segment_indexes` SET `is_deleted`=?,`updated_at`=? WHERE tenant_id = ? AND index_id = ?").
|
||||
WithArgs(true, AnyTime{}, tenantID, indexID1).
|
||||
WillReturnError(errors.New("test error"))
|
||||
mock.ExpectRollback()
|
||||
|
||||
// actual
|
||||
err := segIndexTestDb.MarkDeletedByIndexID(tenantID, indexID1)
|
||||
assert.Error(t, err)
|
||||
}
|
|
@ -0,0 +1,63 @@
|
|||
package dao
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/milvus-io/milvus/internal/log"
|
||||
"github.com/milvus-io/milvus/internal/metastore/db/dbmodel"
|
||||
"go.uber.org/zap"
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
type userDb struct {
|
||||
db *gorm.DB
|
||||
}
|
||||
|
||||
func (s *userDb) GetByUsername(tenantID string, username string) (*dbmodel.User, error) {
|
||||
var r *dbmodel.User
|
||||
|
||||
err := s.db.Model(&dbmodel.User{}).Where("tenant_id = ? AND username = ? AND is_deleted = false", tenantID, username).Take(&r).Error
|
||||
|
||||
if errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
return nil, fmt.Errorf("user %s not found", username)
|
||||
}
|
||||
if err != nil {
|
||||
log.Error("get user by username failed", zap.String("tenant", tenantID), zap.String("username", username), zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return r, nil
|
||||
}
|
||||
|
||||
func (s *userDb) ListUsername(tenantID string) ([]string, error) {
|
||||
var usernames []string
|
||||
|
||||
err := s.db.Model(&dbmodel.User{}).Select("username").Where("tenant_id = ? AND is_deleted = false", tenantID).Find(&usernames).Error
|
||||
if err != nil {
|
||||
log.Error("list usernames failed", zap.String("tenant", tenantID), zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return usernames, nil
|
||||
}
|
||||
|
||||
func (s *userDb) Insert(in *dbmodel.User) error {
|
||||
err := s.db.Create(in).Error
|
||||
if err != nil {
|
||||
log.Error("insert credential_users failed", zap.String("tenant", in.TenantID), zap.String("username", in.Username), zap.Error(err))
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *userDb) MarkDeletedByUsername(tenantID string, username string) error {
|
||||
err := s.db.Model(&dbmodel.Index{}).Where("tenant_id = ? AND username = ?", tenantID, username).Update("is_deleted", true).Error
|
||||
if err != nil {
|
||||
log.Error("update credential_users is_deleted=true failed", zap.String("tenant", tenantID), zap.String("username", username), zap.Error(err))
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,170 @@
|
|||
package dao
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/DATA-DOG/go-sqlmock"
|
||||
"github.com/milvus-io/milvus/internal/metastore/db/dbmodel"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
func TestUser_GetByUsername(t *testing.T) {
|
||||
username := "test_username_1"
|
||||
var user = &dbmodel.User{
|
||||
TenantID: tenantID,
|
||||
Username: username,
|
||||
EncryptedPassword: "xxx",
|
||||
IsSuper: false,
|
||||
}
|
||||
|
||||
// expectation
|
||||
mock.ExpectQuery("SELECT * FROM `credential_users` WHERE tenant_id = ? AND username = ? AND is_deleted = false LIMIT 1").
|
||||
WithArgs(tenantID, username).
|
||||
WillReturnRows(
|
||||
sqlmock.NewRows([]string{"tenant_id", "username", "encrypted_password", "is_super"}).
|
||||
AddRow(user.TenantID, user.Username, user.EncryptedPassword, user.IsSuper))
|
||||
|
||||
// actual
|
||||
res, err := userTestDb.GetByUsername(tenantID, username)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, user, res)
|
||||
}
|
||||
|
||||
func TestUser_GetByUsername_ErrRecordNotFound(t *testing.T) {
|
||||
username := "test_username_1"
|
||||
|
||||
// expectation
|
||||
mock.ExpectQuery("SELECT * FROM `credential_users` WHERE tenant_id = ? AND username = ? AND is_deleted = false LIMIT 1").
|
||||
WithArgs(tenantID, username).
|
||||
WillReturnError(gorm.ErrRecordNotFound)
|
||||
|
||||
// actual
|
||||
res, err := userTestDb.GetByUsername(tenantID, username)
|
||||
assert.Nil(t, res)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestUser_GetByUsername_Error(t *testing.T) {
|
||||
username := "test_username_1"
|
||||
|
||||
// expectation
|
||||
mock.ExpectQuery("SELECT * FROM `credential_users` WHERE tenant_id = ? AND username = ? AND is_deleted = false LIMIT 1").
|
||||
WithArgs(tenantID, username).
|
||||
WillReturnError(errors.New("test error"))
|
||||
|
||||
// actual
|
||||
res, err := userTestDb.GetByUsername(tenantID, username)
|
||||
assert.Nil(t, res)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestUser_ListUsername(t *testing.T) {
|
||||
var usernames = []string{
|
||||
"test_username_1",
|
||||
"test_username_2",
|
||||
}
|
||||
|
||||
// expectation
|
||||
mock.ExpectQuery("SELECT `username` FROM `credential_users` WHERE tenant_id = ? AND is_deleted = false").
|
||||
WithArgs(tenantID).
|
||||
WillReturnRows(
|
||||
sqlmock.NewRows([]string{"username"}).
|
||||
AddRow(usernames[0]).
|
||||
AddRow(usernames[1]))
|
||||
|
||||
// actual
|
||||
res, err := userTestDb.ListUsername(tenantID)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, usernames, res)
|
||||
}
|
||||
|
||||
func TestUser_ListUsername_Error(t *testing.T) {
|
||||
// expectation
|
||||
mock.ExpectQuery("SELECT `username` FROM `credential_users` WHERE tenant_id = ? AND is_deleted = false").
|
||||
WithArgs(tenantID).
|
||||
WillReturnError(errors.New("test error"))
|
||||
|
||||
// actual
|
||||
res, err := userTestDb.ListUsername(tenantID)
|
||||
assert.Nil(t, res)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestUser_Insert(t *testing.T) {
|
||||
var user = &dbmodel.User{
|
||||
TenantID: tenantID,
|
||||
Username: "test_username",
|
||||
EncryptedPassword: "xxx",
|
||||
IsSuper: false,
|
||||
IsDeleted: false,
|
||||
CreatedAt: time.Now(),
|
||||
UpdatedAt: time.Now(),
|
||||
}
|
||||
|
||||
// expectation
|
||||
mock.ExpectBegin()
|
||||
mock.ExpectExec("INSERT INTO `credential_users` (`tenant_id`,`username`,`encrypted_password`,`is_super`,`is_deleted`,`created_at`,`updated_at`) VALUES (?,?,?,?,?,?,?)").
|
||||
WithArgs(user.TenantID, user.Username, user.EncryptedPassword, user.IsSuper, user.IsDeleted, user.CreatedAt, user.UpdatedAt).
|
||||
WillReturnResult(sqlmock.NewResult(1, 1))
|
||||
mock.ExpectCommit()
|
||||
|
||||
// actual
|
||||
err := userTestDb.Insert(user)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
func TestUser_Insert_Error(t *testing.T) {
|
||||
var user = &dbmodel.User{
|
||||
TenantID: tenantID,
|
||||
Username: "test_username",
|
||||
EncryptedPassword: "xxx",
|
||||
IsSuper: false,
|
||||
IsDeleted: false,
|
||||
CreatedAt: time.Now(),
|
||||
UpdatedAt: time.Now(),
|
||||
}
|
||||
|
||||
// expectation
|
||||
mock.ExpectBegin()
|
||||
mock.ExpectExec("INSERT INTO `credential_users` (`tenant_id`,`username`,`encrypted_password`,`is_super`,`is_deleted`,`created_at`,`updated_at`) VALUES (?,?,?,?,?,?,?)").
|
||||
WithArgs(user.TenantID, user.Username, user.EncryptedPassword, user.IsSuper, user.IsDeleted, user.CreatedAt, user.UpdatedAt).
|
||||
WillReturnError(errors.New("test error"))
|
||||
mock.ExpectRollback()
|
||||
|
||||
// actual
|
||||
err := userTestDb.Insert(user)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestUser_MarkDeletedByUsername(t *testing.T) {
|
||||
username := "test_username_1"
|
||||
|
||||
// expectation
|
||||
mock.ExpectBegin()
|
||||
mock.ExpectExec("UPDATE `indexes` SET `is_deleted`=?,`updated_at`=? WHERE tenant_id = ? AND username = ?").
|
||||
WithArgs(true, AnyTime{}, tenantID, username).
|
||||
WillReturnResult(sqlmock.NewResult(1, 1))
|
||||
mock.ExpectCommit()
|
||||
|
||||
// actual
|
||||
err := userTestDb.MarkDeletedByUsername(tenantID, username)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
func TestUser_MarkDeletedByUsername_Error(t *testing.T) {
|
||||
username := "test_username_1"
|
||||
|
||||
// expectation
|
||||
mock.ExpectBegin()
|
||||
mock.ExpectExec("UPDATE `indexes` SET `is_deleted`=?,`updated_at`=? WHERE tenant_id = ? AND username = ?").
|
||||
WithArgs(true, AnyTime{}, tenantID, username).
|
||||
WillReturnError(errors.New("test error"))
|
||||
mock.ExpectRollback()
|
||||
|
||||
// actual
|
||||
err := userTestDb.MarkDeletedByUsername(tenantID, username)
|
||||
assert.Error(t, err)
|
||||
}
|
|
@ -0,0 +1,105 @@
|
|||
package dbcore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
"github.com/milvus-io/milvus/internal/log"
|
||||
"github.com/milvus-io/milvus/internal/util/paramtable"
|
||||
"go.uber.org/zap"
|
||||
"gorm.io/driver/mysql"
|
||||
"gorm.io/gorm"
|
||||
"gorm.io/gorm/logger"
|
||||
)
|
||||
|
||||
var (
|
||||
globalDB *gorm.DB
|
||||
)
|
||||
|
||||
func Connect(cfg *paramtable.MetaDBConfig) error {
|
||||
// load config
|
||||
dsn := fmt.Sprintf("%s:%s@tcp(%s:%d)/%s?charset=utf8mb4&parseTime=True&loc=Local", cfg.Username, cfg.Password, cfg.Address, cfg.Port, cfg.DBName)
|
||||
|
||||
var ormLogger logger.Interface
|
||||
if cfg.Base.Log.Level == "debug" {
|
||||
ormLogger = logger.Default.LogMode(logger.Info)
|
||||
} else {
|
||||
ormLogger = logger.Default
|
||||
}
|
||||
|
||||
db, err := gorm.Open(mysql.Open(dsn), &gorm.Config{
|
||||
Logger: ormLogger,
|
||||
CreateBatchSize: 100,
|
||||
})
|
||||
if err != nil {
|
||||
log.Error("fail to connect db", zap.String("host", cfg.Address), zap.Int("port", cfg.Port), zap.String("database", cfg.DBName), zap.Error(err))
|
||||
return err
|
||||
}
|
||||
|
||||
idb, err := db.DB()
|
||||
if err != nil {
|
||||
log.Error("fail to create db instance", zap.String("host", cfg.Address), zap.Int("port", cfg.Port), zap.String("database", cfg.DBName), zap.Error(err))
|
||||
return err
|
||||
}
|
||||
idb.SetMaxIdleConns(cfg.MaxIdleConns)
|
||||
idb.SetMaxOpenConns(cfg.MaxOpenConns)
|
||||
|
||||
globalDB = db
|
||||
|
||||
log.Info("db connected success", zap.String("host", cfg.Address), zap.Int("port", cfg.Port), zap.String("database", cfg.DBName))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetGlobalDB Only for test
|
||||
func SetGlobalDB(db *gorm.DB) {
|
||||
globalDB = db
|
||||
}
|
||||
|
||||
type ctxTransactionKey struct{}
|
||||
|
||||
func CtxWithTransaction(ctx context.Context, tx *gorm.DB) context.Context {
|
||||
if ctx == nil {
|
||||
ctx = context.Background()
|
||||
}
|
||||
return context.WithValue(ctx, ctxTransactionKey{}, tx)
|
||||
}
|
||||
|
||||
type txImpl struct{}
|
||||
|
||||
func NewTxImpl() *txImpl {
|
||||
return &txImpl{}
|
||||
}
|
||||
|
||||
func (*txImpl) Transaction(ctx context.Context, fn func(txctx context.Context) error) error {
|
||||
db := globalDB.WithContext(ctx)
|
||||
|
||||
return db.Transaction(func(tx *gorm.DB) error {
|
||||
txCtx := CtxWithTransaction(ctx, tx)
|
||||
return fn(txCtx)
|
||||
})
|
||||
}
|
||||
|
||||
func GetDB(ctx context.Context) *gorm.DB {
|
||||
iface := ctx.Value(ctxTransactionKey{})
|
||||
|
||||
if iface != nil {
|
||||
tx, ok := iface.(*gorm.DB)
|
||||
if !ok {
|
||||
log.Error("unexpect context value type", zap.Any("type", reflect.TypeOf(tx)))
|
||||
return nil
|
||||
}
|
||||
|
||||
return tx
|
||||
}
|
||||
|
||||
return globalDB.WithContext(ctx)
|
||||
}
|
||||
|
||||
//type CommonModel struct {
|
||||
// ID string `gorm:"primary_key"`
|
||||
// IsDeleted bool `gorm:"is_deleted"`
|
||||
// CreatedAt time.Time `gorm:"created_at"`
|
||||
// UpdatedAt time.Time `gorm:"updated_at"`
|
||||
//}
|
|
@ -0,0 +1,67 @@
|
|||
package dbmodel
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"time"
|
||||
|
||||
"github.com/milvus-io/milvus/internal/log"
|
||||
"github.com/milvus-io/milvus/internal/metastore/model"
|
||||
"github.com/milvus-io/milvus/internal/proto/commonpb"
|
||||
"github.com/milvus-io/milvus/internal/util/typeutil"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
type Collection struct {
|
||||
ID int64 `gorm:"id"`
|
||||
TenantID string `gorm:"tenant_id"`
|
||||
CollectionID int64 `gorm:"collection_id"`
|
||||
CollectionName string `gorm:"collection_name"`
|
||||
Description string `gorm:"description"`
|
||||
AutoID bool `gorm:"auto_id"`
|
||||
ShardsNum int32 `gorm:"shards_num"`
|
||||
StartPosition string `gorm:"start_position"`
|
||||
ConsistencyLevel int32 `gorm:"consistency_level"`
|
||||
Ts typeutil.Timestamp `gorm:"ts"`
|
||||
IsDeleted bool `gorm:"is_deleted"`
|
||||
CreatedAt time.Time `gorm:"created_at"`
|
||||
UpdatedAt time.Time `gorm:"updated_at"`
|
||||
}
|
||||
|
||||
func (v Collection) TableName() string {
|
||||
return "collections"
|
||||
}
|
||||
|
||||
//go:generate mockery --name=ICollectionDb
|
||||
type ICollectionDb interface {
|
||||
// GetCollectionIdTs get the largest timestamp that less than or equal to param ts, no matter is_deleted is true or false.
|
||||
GetCollectionIDTs(tenantID string, collectionID typeutil.UniqueID, ts typeutil.Timestamp) (*Collection, error)
|
||||
ListCollectionIDTs(tenantID string, ts typeutil.Timestamp) ([]*Collection, error)
|
||||
Get(tenantID string, collectionID typeutil.UniqueID, ts typeutil.Timestamp) (*Collection, error)
|
||||
GetCollectionIDByName(tenantID string, collectionName string, ts typeutil.Timestamp) (typeutil.UniqueID, error)
|
||||
Insert(in *Collection) error
|
||||
}
|
||||
|
||||
// model <---> db
|
||||
|
||||
func UnmarshalCollectionModel(coll *Collection) (*model.Collection, error) {
|
||||
var startPositions []*commonpb.KeyDataPair
|
||||
if coll.StartPosition != "" {
|
||||
err := json.Unmarshal([]byte(coll.StartPosition), &startPositions)
|
||||
if err != nil {
|
||||
log.Error("unmarshal collection start positions error", zap.Int64("collID", coll.CollectionID), zap.Uint64("ts", coll.Ts), zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return &model.Collection{
|
||||
TenantID: coll.TenantID,
|
||||
CollectionID: coll.CollectionID,
|
||||
Name: coll.CollectionName,
|
||||
Description: coll.Description,
|
||||
AutoID: coll.AutoID,
|
||||
ShardsNum: coll.ShardsNum,
|
||||
StartPositions: startPositions,
|
||||
ConsistencyLevel: commonpb.ConsistencyLevel(coll.ConsistencyLevel),
|
||||
CreateTime: coll.Ts,
|
||||
}, nil
|
||||
}
|
|
@ -0,0 +1,30 @@
|
|||
package dbmodel
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/milvus-io/milvus/internal/util/typeutil"
|
||||
)
|
||||
|
||||
type CollectionAlias struct {
|
||||
ID int64 `gorm:"id"`
|
||||
TenantID string `gorm:"tenant_id"`
|
||||
CollectionID int64 `gorm:"collection_id"`
|
||||
CollectionAlias string `gorm:"collection_alias"`
|
||||
Ts typeutil.Timestamp `gorm:"ts"`
|
||||
IsDeleted bool `gorm:"is_deleted"`
|
||||
CreatedAt time.Time `gorm:"created_at"`
|
||||
UpdatedAt time.Time `gorm:"updated_at"`
|
||||
}
|
||||
|
||||
func (v CollectionAlias) TableName() string {
|
||||
return "collection_aliases"
|
||||
}
|
||||
|
||||
//go:generate mockery --name=ICollAliasDb
|
||||
type ICollAliasDb interface {
|
||||
Insert(in []*CollectionAlias) error
|
||||
GetCollectionIDByAlias(tenantID string, alias string, ts typeutil.Timestamp) (typeutil.UniqueID, error)
|
||||
ListCollectionIDTs(tenantID string, ts typeutil.Timestamp) ([]*CollectionAlias, error)
|
||||
List(tenantID string, cidTsPairs []*CollectionAlias) ([]*CollectionAlias, error)
|
||||
}
|
|
@ -0,0 +1,40 @@
|
|||
package dbmodel
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/milvus-io/milvus/internal/util/typeutil"
|
||||
)
|
||||
|
||||
type CollectionChannel struct {
|
||||
ID int64 `gorm:"id"`
|
||||
TenantID string `gorm:"tenant_id"`
|
||||
CollectionID int64 `gorm:"collection_id"`
|
||||
VirtualChannelName string `gorm:"virtual_channel_name"`
|
||||
PhysicalChannelName string `gorm:"physical_channel_name"`
|
||||
Removed bool `gorm:"removed"`
|
||||
Ts typeutil.Timestamp `gorm:"ts"`
|
||||
IsDeleted bool `gorm:"is_deleted"`
|
||||
CreatedAt time.Time `gorm:"created_at"`
|
||||
UpdatedAt time.Time `gorm:"updated_at"`
|
||||
}
|
||||
|
||||
func (v CollectionChannel) TableName() string {
|
||||
return "collection_channels"
|
||||
}
|
||||
|
||||
//go:generate mockery --name=ICollChannelDb
|
||||
type ICollChannelDb interface {
|
||||
GetByCollectionID(tenantID string, collectionID typeutil.UniqueID, ts typeutil.Timestamp) ([]*CollectionChannel, error)
|
||||
Insert(in []*CollectionChannel) error
|
||||
}
|
||||
|
||||
func ExtractChannelNames(channels []*CollectionChannel) ([]string, []string) {
|
||||
vchans := make([]string, 0, len(channels))
|
||||
pchans := make([]string, 0, len(channels))
|
||||
for _, ch := range channels {
|
||||
vchans = append(vchans, ch.VirtualChannelName)
|
||||
pchans = append(pchans, ch.PhysicalChannelName)
|
||||
}
|
||||
return vchans, pchans
|
||||
}
|
|
@ -0,0 +1,19 @@
|
|||
package dbmodel
|
||||
|
||||
import "context"
|
||||
|
||||
//go:generate mockery --name=IMetaDomain
|
||||
type IMetaDomain interface {
|
||||
CollectionDb(ctx context.Context) ICollectionDb
|
||||
FieldDb(ctx context.Context) IFieldDb
|
||||
CollChannelDb(ctx context.Context) ICollChannelDb
|
||||
CollAliasDb(ctx context.Context) ICollAliasDb
|
||||
PartitionDb(ctx context.Context) IPartitionDb
|
||||
IndexDb(ctx context.Context) IIndexDb
|
||||
SegmentIndexDb(ctx context.Context) ISegmentIndexDb
|
||||
UserDb(ctx context.Context) IUserDb
|
||||
}
|
||||
|
||||
type ITransaction interface {
|
||||
Transaction(ctx context.Context, fn func(txCtx context.Context) error) error
|
||||
}
|
|
@ -0,0 +1,90 @@
|
|||
package dbmodel
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"time"
|
||||
|
||||
"github.com/milvus-io/milvus/internal/log"
|
||||
"github.com/milvus-io/milvus/internal/metastore/model"
|
||||
"github.com/milvus-io/milvus/internal/proto/commonpb"
|
||||
"github.com/milvus-io/milvus/internal/proto/schemapb"
|
||||
"github.com/milvus-io/milvus/internal/util/funcutil"
|
||||
"github.com/milvus-io/milvus/internal/util/typeutil"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
type Field struct {
|
||||
ID int64 `gorm:"id"`
|
||||
TenantID string `gorm:"tenant_id"`
|
||||
FieldID int64 `gorm:"field_id"`
|
||||
FieldName string `gorm:"field_name"`
|
||||
IsPrimaryKey bool `gorm:"is_primary_key"`
|
||||
Description string `gorm:"description"`
|
||||
DataType schemapb.DataType `gorm:"data_type"`
|
||||
TypeParams string `gorm:"type_params"`
|
||||
IndexParams string `gorm:"index_params"`
|
||||
AutoID bool `gorm:"auto_id"`
|
||||
CollectionID int64 `gorm:"collection_id"`
|
||||
Ts typeutil.Timestamp `gorm:"ts"`
|
||||
IsDeleted bool `gorm:"is_deleted"`
|
||||
CreatedAt time.Time `gorm:"created_at"`
|
||||
UpdatedAt time.Time `gorm:"updated_at"`
|
||||
}
|
||||
|
||||
func (v Field) TableName() string {
|
||||
return "field_schemas"
|
||||
}
|
||||
|
||||
//go:generate mockery --name=IFieldDb
|
||||
type IFieldDb interface {
|
||||
GetByCollectionID(tenantID string, collectionID typeutil.UniqueID, ts typeutil.Timestamp) ([]*Field, error)
|
||||
Insert(in []*Field) error
|
||||
}
|
||||
|
||||
// model <---> db
|
||||
|
||||
func UnmarshalFieldModel(fields []*Field) ([]*model.Field, error) {
|
||||
r := make([]*model.Field, 0, len(fields))
|
||||
for _, f := range fields {
|
||||
fd, err := ConvertFieldDBToModel(f)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
r = append(r, fd)
|
||||
}
|
||||
|
||||
return r, nil
|
||||
}
|
||||
|
||||
func ConvertFieldDBToModel(field *Field) (*model.Field, error) {
|
||||
var typeParams []commonpb.KeyValuePair
|
||||
if field.TypeParams != "" {
|
||||
err := json.Unmarshal([]byte(field.TypeParams), &typeParams)
|
||||
if err != nil {
|
||||
log.Error("unmarshal TypeParams of field failed", zap.Int64("collID", field.CollectionID),
|
||||
zap.Int64("fieldID", field.FieldID), zap.String("fieldName", field.FieldName), zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
var indexParams []commonpb.KeyValuePair
|
||||
if field.IndexParams != "" {
|
||||
err := json.Unmarshal([]byte(field.IndexParams), &indexParams)
|
||||
if err != nil {
|
||||
log.Error("unmarshal IndexParams of field failed", zap.Int64("collID", field.CollectionID),
|
||||
zap.Int64("fieldID", field.FieldID), zap.String("fieldName", field.FieldName), zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return &model.Field{
|
||||
FieldID: field.FieldID,
|
||||
Name: field.FieldName,
|
||||
IsPrimaryKey: field.IsPrimaryKey,
|
||||
Description: field.Description,
|
||||
DataType: field.DataType,
|
||||
TypeParams: funcutil.ConvertToKeyValuePairPointer(typeParams),
|
||||
IndexParams: funcutil.ConvertToKeyValuePairPointer(indexParams),
|
||||
AutoID: field.AutoID,
|
||||
}, nil
|
||||
}
|
|
@ -0,0 +1,126 @@
|
|||
package dbmodel
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"time"
|
||||
|
||||
"github.com/milvus-io/milvus/internal/common"
|
||||
"github.com/milvus-io/milvus/internal/log"
|
||||
"github.com/milvus-io/milvus/internal/metastore/model"
|
||||
"github.com/milvus-io/milvus/internal/proto/commonpb"
|
||||
"github.com/milvus-io/milvus/internal/util/funcutil"
|
||||
"github.com/milvus-io/milvus/internal/util/typeutil"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
type Index struct {
|
||||
ID int64 `gorm:"id"`
|
||||
TenantID string `gorm:"tenant_id"`
|
||||
FieldID int64 `gorm:"field_id"`
|
||||
CollectionID int64 `gorm:"collection_id"`
|
||||
IndexID int64 `gorm:"index_id"`
|
||||
IndexName string `gorm:"index_name"`
|
||||
IndexParams string `gorm:"index_params"`
|
||||
CreateTime uint64 `gorm:"create_time"`
|
||||
IsDeleted bool `gorm:"is_deleted"`
|
||||
CreatedAt time.Time `gorm:"created_at"`
|
||||
UpdatedAt time.Time `gorm:"updated_at"`
|
||||
}
|
||||
|
||||
func (v Index) TableName() string {
|
||||
return "indexes"
|
||||
}
|
||||
|
||||
// ------------- search result -------------
|
||||
|
||||
type IndexResult struct {
|
||||
FieldID int64
|
||||
CollectionID int64
|
||||
IndexID int64
|
||||
IndexName string
|
||||
IndexParams string
|
||||
SegmentID int64
|
||||
PartitionID int64
|
||||
EnableIndex bool
|
||||
IndexBuildID int64
|
||||
IndexSize uint64
|
||||
IndexFilePaths string
|
||||
}
|
||||
|
||||
//go:generate mockery --name=IIndexDb
|
||||
type IIndexDb interface {
|
||||
Get(tenantID string, collectionID typeutil.UniqueID) ([]*Index, error)
|
||||
List(tenantID string) ([]*IndexResult, error)
|
||||
Insert(in []*Index) error
|
||||
Update(in *Index) error
|
||||
MarkDeletedByCollectionID(tenantID string, collID typeutil.UniqueID) error
|
||||
MarkDeletedByIndexID(tenantID string, idxID typeutil.UniqueID) error
|
||||
}
|
||||
|
||||
// model <---> db
|
||||
|
||||
func UnmarshalIndexModel(inputs []*IndexResult) ([]*model.Index, error) {
|
||||
result := make([]*model.Index, 0, len(inputs))
|
||||
|
||||
for _, ir := range inputs {
|
||||
var indexFilePaths []string
|
||||
if ir.IndexFilePaths != "" {
|
||||
err := json.Unmarshal([]byte(ir.IndexFilePaths), &indexFilePaths)
|
||||
if err != nil {
|
||||
log.Error("unmarshal IndexFilePaths of SegmentIndex failed", zap.Int64("collID", ir.CollectionID),
|
||||
zap.Int64("segmentID", ir.SegmentID), zap.Int64("indexID", ir.IndexID), zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
segIndex := &model.SegmentIndex{
|
||||
Segment: model.Segment{
|
||||
SegmentID: ir.SegmentID,
|
||||
PartitionID: ir.PartitionID,
|
||||
},
|
||||
EnableIndex: ir.EnableIndex,
|
||||
BuildID: ir.IndexBuildID,
|
||||
IndexSize: ir.IndexSize,
|
||||
IndexFilePaths: indexFilePaths,
|
||||
}
|
||||
|
||||
var indexParams []commonpb.KeyValuePair
|
||||
if ir.IndexParams != "" {
|
||||
err := json.Unmarshal([]byte(ir.IndexParams), &indexParams)
|
||||
if err != nil {
|
||||
log.Error("unmarshal IndexParams of index failed", zap.Int64("collID", ir.CollectionID),
|
||||
zap.Int64("segmentID", ir.SegmentID), zap.Int64("indexID", ir.IndexID), zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
idx := &model.Index{
|
||||
CollectionID: ir.CollectionID,
|
||||
FieldID: ir.FieldID,
|
||||
IndexID: ir.IndexID,
|
||||
IndexName: ir.IndexName,
|
||||
IndexParams: funcutil.ConvertToKeyValuePairPointer(indexParams),
|
||||
SegmentIndexes: map[int64]model.SegmentIndex{
|
||||
segIndex.SegmentID: *segIndex,
|
||||
},
|
||||
}
|
||||
|
||||
result = append(result, idx)
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func ConvertIndexDBToModel(indexes []*Index) []common.Int64Tuple {
|
||||
r := make([]common.Int64Tuple, 0, len(indexes))
|
||||
|
||||
for _, idx := range indexes {
|
||||
tuple := common.Int64Tuple{
|
||||
Key: idx.FieldID,
|
||||
Value: idx.IndexID,
|
||||
}
|
||||
r = append(r, tuple)
|
||||
}
|
||||
|
||||
return r
|
||||
}
|
|
@ -0,0 +1,109 @@
|
|||
// Code generated by mockery v2.14.0. DO NOT EDIT.
|
||||
|
||||
package mocks
|
||||
|
||||
import (
|
||||
dbmodel "github.com/milvus-io/milvus/internal/metastore/db/dbmodel"
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
)
|
||||
|
||||
// ICollAliasDb is an autogenerated mock type for the ICollAliasDb type
|
||||
type ICollAliasDb struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
// GetCollectionIDByAlias provides a mock function with given fields: tenantID, alias, ts
|
||||
func (_m *ICollAliasDb) GetCollectionIDByAlias(tenantID string, alias string, ts uint64) (int64, error) {
|
||||
ret := _m.Called(tenantID, alias, ts)
|
||||
|
||||
var r0 int64
|
||||
if rf, ok := ret.Get(0).(func(string, string, uint64) int64); ok {
|
||||
r0 = rf(tenantID, alias, ts)
|
||||
} else {
|
||||
r0 = ret.Get(0).(int64)
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(string, string, uint64) error); ok {
|
||||
r1 = rf(tenantID, alias, ts)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// Insert provides a mock function with given fields: in
|
||||
func (_m *ICollAliasDb) Insert(in []*dbmodel.CollectionAlias) error {
|
||||
ret := _m.Called(in)
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func([]*dbmodel.CollectionAlias) error); ok {
|
||||
r0 = rf(in)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// List provides a mock function with given fields: tenantID, cidTsPairs
|
||||
func (_m *ICollAliasDb) List(tenantID string, cidTsPairs []*dbmodel.CollectionAlias) ([]*dbmodel.CollectionAlias, error) {
|
||||
ret := _m.Called(tenantID, cidTsPairs)
|
||||
|
||||
var r0 []*dbmodel.CollectionAlias
|
||||
if rf, ok := ret.Get(0).(func(string, []*dbmodel.CollectionAlias) []*dbmodel.CollectionAlias); ok {
|
||||
r0 = rf(tenantID, cidTsPairs)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).([]*dbmodel.CollectionAlias)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(string, []*dbmodel.CollectionAlias) error); ok {
|
||||
r1 = rf(tenantID, cidTsPairs)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// ListCollectionIDTs provides a mock function with given fields: tenantID, ts
|
||||
func (_m *ICollAliasDb) ListCollectionIDTs(tenantID string, ts uint64) ([]*dbmodel.CollectionAlias, error) {
|
||||
ret := _m.Called(tenantID, ts)
|
||||
|
||||
var r0 []*dbmodel.CollectionAlias
|
||||
if rf, ok := ret.Get(0).(func(string, uint64) []*dbmodel.CollectionAlias); ok {
|
||||
r0 = rf(tenantID, ts)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).([]*dbmodel.CollectionAlias)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(string, uint64) error); ok {
|
||||
r1 = rf(tenantID, ts)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
type mockConstructorTestingTNewICollAliasDb interface {
|
||||
mock.TestingT
|
||||
Cleanup(func())
|
||||
}
|
||||
|
||||
// NewICollAliasDb creates a new instance of ICollAliasDb. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
|
||||
func NewICollAliasDb(t mockConstructorTestingTNewICollAliasDb) *ICollAliasDb {
|
||||
mock := &ICollAliasDb{}
|
||||
mock.Mock.Test(t)
|
||||
|
||||
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||
|
||||
return mock
|
||||
}
|
|
@ -0,0 +1,65 @@
|
|||
// Code generated by mockery v2.14.0. DO NOT EDIT.
|
||||
|
||||
package mocks
|
||||
|
||||
import (
|
||||
dbmodel "github.com/milvus-io/milvus/internal/metastore/db/dbmodel"
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
)
|
||||
|
||||
// ICollChannelDb is an autogenerated mock type for the ICollChannelDb type
|
||||
type ICollChannelDb struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
// GetByCollectionID provides a mock function with given fields: tenantID, collectionID, ts
|
||||
func (_m *ICollChannelDb) GetByCollectionID(tenantID string, collectionID int64, ts uint64) ([]*dbmodel.CollectionChannel, error) {
|
||||
ret := _m.Called(tenantID, collectionID, ts)
|
||||
|
||||
var r0 []*dbmodel.CollectionChannel
|
||||
if rf, ok := ret.Get(0).(func(string, int64, uint64) []*dbmodel.CollectionChannel); ok {
|
||||
r0 = rf(tenantID, collectionID, ts)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).([]*dbmodel.CollectionChannel)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(string, int64, uint64) error); ok {
|
||||
r1 = rf(tenantID, collectionID, ts)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// Insert provides a mock function with given fields: in
|
||||
func (_m *ICollChannelDb) Insert(in []*dbmodel.CollectionChannel) error {
|
||||
ret := _m.Called(in)
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func([]*dbmodel.CollectionChannel) error); ok {
|
||||
r0 = rf(in)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
type mockConstructorTestingTNewICollChannelDb interface {
|
||||
mock.TestingT
|
||||
Cleanup(func())
|
||||
}
|
||||
|
||||
// NewICollChannelDb creates a new instance of ICollChannelDb. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
|
||||
func NewICollChannelDb(t mockConstructorTestingTNewICollChannelDb) *ICollChannelDb {
|
||||
mock := &ICollChannelDb{}
|
||||
mock.Mock.Test(t)
|
||||
|
||||
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||
|
||||
return mock
|
||||
}
|
|
@ -0,0 +1,132 @@
|
|||
// Code generated by mockery v2.14.0. DO NOT EDIT.
|
||||
|
||||
package mocks
|
||||
|
||||
import (
|
||||
dbmodel "github.com/milvus-io/milvus/internal/metastore/db/dbmodel"
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
)
|
||||
|
||||
// ICollectionDb is an autogenerated mock type for the ICollectionDb type
|
||||
type ICollectionDb struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
// Get provides a mock function with given fields: tenantID, collectionID, ts
|
||||
func (_m *ICollectionDb) Get(tenantID string, collectionID int64, ts uint64) (*dbmodel.Collection, error) {
|
||||
ret := _m.Called(tenantID, collectionID, ts)
|
||||
|
||||
var r0 *dbmodel.Collection
|
||||
if rf, ok := ret.Get(0).(func(string, int64, uint64) *dbmodel.Collection); ok {
|
||||
r0 = rf(tenantID, collectionID, ts)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*dbmodel.Collection)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(string, int64, uint64) error); ok {
|
||||
r1 = rf(tenantID, collectionID, ts)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// GetCollectionIDByName provides a mock function with given fields: tenantID, collectionName, ts
|
||||
func (_m *ICollectionDb) GetCollectionIDByName(tenantID string, collectionName string, ts uint64) (int64, error) {
|
||||
ret := _m.Called(tenantID, collectionName, ts)
|
||||
|
||||
var r0 int64
|
||||
if rf, ok := ret.Get(0).(func(string, string, uint64) int64); ok {
|
||||
r0 = rf(tenantID, collectionName, ts)
|
||||
} else {
|
||||
r0 = ret.Get(0).(int64)
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(string, string, uint64) error); ok {
|
||||
r1 = rf(tenantID, collectionName, ts)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// GetCollectionIDTs provides a mock function with given fields: tenantID, collectionID, ts
|
||||
func (_m *ICollectionDb) GetCollectionIDTs(tenantID string, collectionID int64, ts uint64) (*dbmodel.Collection, error) {
|
||||
ret := _m.Called(tenantID, collectionID, ts)
|
||||
|
||||
var r0 *dbmodel.Collection
|
||||
if rf, ok := ret.Get(0).(func(string, int64, uint64) *dbmodel.Collection); ok {
|
||||
r0 = rf(tenantID, collectionID, ts)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*dbmodel.Collection)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(string, int64, uint64) error); ok {
|
||||
r1 = rf(tenantID, collectionID, ts)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// Insert provides a mock function with given fields: in
|
||||
func (_m *ICollectionDb) Insert(in *dbmodel.Collection) error {
|
||||
ret := _m.Called(in)
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(*dbmodel.Collection) error); ok {
|
||||
r0 = rf(in)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// ListCollectionIDTs provides a mock function with given fields: tenantID, ts
|
||||
func (_m *ICollectionDb) ListCollectionIDTs(tenantID string, ts uint64) ([]*dbmodel.Collection, error) {
|
||||
ret := _m.Called(tenantID, ts)
|
||||
|
||||
var r0 []*dbmodel.Collection
|
||||
if rf, ok := ret.Get(0).(func(string, uint64) []*dbmodel.Collection); ok {
|
||||
r0 = rf(tenantID, ts)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).([]*dbmodel.Collection)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(string, uint64) error); ok {
|
||||
r1 = rf(tenantID, ts)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
type mockConstructorTestingTNewICollectionDb interface {
|
||||
mock.TestingT
|
||||
Cleanup(func())
|
||||
}
|
||||
|
||||
// NewICollectionDb creates a new instance of ICollectionDb. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
|
||||
func NewICollectionDb(t mockConstructorTestingTNewICollectionDb) *ICollectionDb {
|
||||
mock := &ICollectionDb{}
|
||||
mock.Mock.Test(t)
|
||||
|
||||
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||
|
||||
return mock
|
||||
}
|
|
@ -0,0 +1,65 @@
|
|||
// Code generated by mockery v2.14.0. DO NOT EDIT.
|
||||
|
||||
package mocks
|
||||
|
||||
import (
|
||||
dbmodel "github.com/milvus-io/milvus/internal/metastore/db/dbmodel"
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
)
|
||||
|
||||
// IFieldDb is an autogenerated mock type for the IFieldDb type
|
||||
type IFieldDb struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
// GetByCollectionID provides a mock function with given fields: tenantID, collectionID, ts
|
||||
func (_m *IFieldDb) GetByCollectionID(tenantID string, collectionID int64, ts uint64) ([]*dbmodel.Field, error) {
|
||||
ret := _m.Called(tenantID, collectionID, ts)
|
||||
|
||||
var r0 []*dbmodel.Field
|
||||
if rf, ok := ret.Get(0).(func(string, int64, uint64) []*dbmodel.Field); ok {
|
||||
r0 = rf(tenantID, collectionID, ts)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).([]*dbmodel.Field)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(string, int64, uint64) error); ok {
|
||||
r1 = rf(tenantID, collectionID, ts)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// Insert provides a mock function with given fields: in
|
||||
func (_m *IFieldDb) Insert(in []*dbmodel.Field) error {
|
||||
ret := _m.Called(in)
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func([]*dbmodel.Field) error); ok {
|
||||
r0 = rf(in)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
type mockConstructorTestingTNewIFieldDb interface {
|
||||
mock.TestingT
|
||||
Cleanup(func())
|
||||
}
|
||||
|
||||
// NewIFieldDb creates a new instance of IFieldDb. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
|
||||
func NewIFieldDb(t mockConstructorTestingTNewIFieldDb) *IFieldDb {
|
||||
mock := &IFieldDb{}
|
||||
mock.Mock.Test(t)
|
||||
|
||||
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||
|
||||
return mock
|
||||
}
|
|
@ -0,0 +1,130 @@
|
|||
// Code generated by mockery v2.14.0. DO NOT EDIT.
|
||||
|
||||
package mocks
|
||||
|
||||
import (
|
||||
dbmodel "github.com/milvus-io/milvus/internal/metastore/db/dbmodel"
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
)
|
||||
|
||||
// IIndexDb is an autogenerated mock type for the IIndexDb type
|
||||
type IIndexDb struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
// Get provides a mock function with given fields: tenantID, collectionID
|
||||
func (_m *IIndexDb) Get(tenantID string, collectionID int64) ([]*dbmodel.Index, error) {
|
||||
ret := _m.Called(tenantID, collectionID)
|
||||
|
||||
var r0 []*dbmodel.Index
|
||||
if rf, ok := ret.Get(0).(func(string, int64) []*dbmodel.Index); ok {
|
||||
r0 = rf(tenantID, collectionID)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).([]*dbmodel.Index)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(string, int64) error); ok {
|
||||
r1 = rf(tenantID, collectionID)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// Insert provides a mock function with given fields: in
|
||||
func (_m *IIndexDb) Insert(in []*dbmodel.Index) error {
|
||||
ret := _m.Called(in)
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func([]*dbmodel.Index) error); ok {
|
||||
r0 = rf(in)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// List provides a mock function with given fields: tenantID
|
||||
func (_m *IIndexDb) List(tenantID string) ([]*dbmodel.IndexResult, error) {
|
||||
ret := _m.Called(tenantID)
|
||||
|
||||
var r0 []*dbmodel.IndexResult
|
||||
if rf, ok := ret.Get(0).(func(string) []*dbmodel.IndexResult); ok {
|
||||
r0 = rf(tenantID)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).([]*dbmodel.IndexResult)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(string) error); ok {
|
||||
r1 = rf(tenantID)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// MarkDeletedByCollectionID provides a mock function with given fields: tenantID, collID
|
||||
func (_m *IIndexDb) MarkDeletedByCollectionID(tenantID string, collID int64) error {
|
||||
ret := _m.Called(tenantID, collID)
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(string, int64) error); ok {
|
||||
r0 = rf(tenantID, collID)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// MarkDeletedByIndexID provides a mock function with given fields: tenantID, idxID
|
||||
func (_m *IIndexDb) MarkDeletedByIndexID(tenantID string, idxID int64) error {
|
||||
ret := _m.Called(tenantID, idxID)
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(string, int64) error); ok {
|
||||
r0 = rf(tenantID, idxID)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// Update provides a mock function with given fields: in
|
||||
func (_m *IIndexDb) Update(in *dbmodel.Index) error {
|
||||
ret := _m.Called(in)
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(*dbmodel.Index) error); ok {
|
||||
r0 = rf(in)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
type mockConstructorTestingTNewIIndexDb interface {
|
||||
mock.TestingT
|
||||
Cleanup(func())
|
||||
}
|
||||
|
||||
// NewIIndexDb creates a new instance of IIndexDb. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
|
||||
func NewIIndexDb(t mockConstructorTestingTNewIIndexDb) *IIndexDb {
|
||||
mock := &IIndexDb{}
|
||||
mock.Mock.Test(t)
|
||||
|
||||
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||
|
||||
return mock
|
||||
}
|
|
@ -0,0 +1,158 @@
|
|||
// Code generated by mockery v2.14.0. DO NOT EDIT.
|
||||
|
||||
package mocks
|
||||
|
||||
import (
|
||||
context "context"
|
||||
|
||||
dbmodel "github.com/milvus-io/milvus/internal/metastore/db/dbmodel"
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
)
|
||||
|
||||
// IMetaDomain is an autogenerated mock type for the IMetaDomain type
|
||||
type IMetaDomain struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
// CollAliasDb provides a mock function with given fields: ctx
|
||||
func (_m *IMetaDomain) CollAliasDb(ctx context.Context) dbmodel.ICollAliasDb {
|
||||
ret := _m.Called(ctx)
|
||||
|
||||
var r0 dbmodel.ICollAliasDb
|
||||
if rf, ok := ret.Get(0).(func(context.Context) dbmodel.ICollAliasDb); ok {
|
||||
r0 = rf(ctx)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(dbmodel.ICollAliasDb)
|
||||
}
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// CollChannelDb provides a mock function with given fields: ctx
|
||||
func (_m *IMetaDomain) CollChannelDb(ctx context.Context) dbmodel.ICollChannelDb {
|
||||
ret := _m.Called(ctx)
|
||||
|
||||
var r0 dbmodel.ICollChannelDb
|
||||
if rf, ok := ret.Get(0).(func(context.Context) dbmodel.ICollChannelDb); ok {
|
||||
r0 = rf(ctx)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(dbmodel.ICollChannelDb)
|
||||
}
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// CollectionDb provides a mock function with given fields: ctx
|
||||
func (_m *IMetaDomain) CollectionDb(ctx context.Context) dbmodel.ICollectionDb {
|
||||
ret := _m.Called(ctx)
|
||||
|
||||
var r0 dbmodel.ICollectionDb
|
||||
if rf, ok := ret.Get(0).(func(context.Context) dbmodel.ICollectionDb); ok {
|
||||
r0 = rf(ctx)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(dbmodel.ICollectionDb)
|
||||
}
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// FieldDb provides a mock function with given fields: ctx
|
||||
func (_m *IMetaDomain) FieldDb(ctx context.Context) dbmodel.IFieldDb {
|
||||
ret := _m.Called(ctx)
|
||||
|
||||
var r0 dbmodel.IFieldDb
|
||||
if rf, ok := ret.Get(0).(func(context.Context) dbmodel.IFieldDb); ok {
|
||||
r0 = rf(ctx)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(dbmodel.IFieldDb)
|
||||
}
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// IndexDb provides a mock function with given fields: ctx
|
||||
func (_m *IMetaDomain) IndexDb(ctx context.Context) dbmodel.IIndexDb {
|
||||
ret := _m.Called(ctx)
|
||||
|
||||
var r0 dbmodel.IIndexDb
|
||||
if rf, ok := ret.Get(0).(func(context.Context) dbmodel.IIndexDb); ok {
|
||||
r0 = rf(ctx)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(dbmodel.IIndexDb)
|
||||
}
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// PartitionDb provides a mock function with given fields: ctx
|
||||
func (_m *IMetaDomain) PartitionDb(ctx context.Context) dbmodel.IPartitionDb {
|
||||
ret := _m.Called(ctx)
|
||||
|
||||
var r0 dbmodel.IPartitionDb
|
||||
if rf, ok := ret.Get(0).(func(context.Context) dbmodel.IPartitionDb); ok {
|
||||
r0 = rf(ctx)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(dbmodel.IPartitionDb)
|
||||
}
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// SegmentIndexDb provides a mock function with given fields: ctx
|
||||
func (_m *IMetaDomain) SegmentIndexDb(ctx context.Context) dbmodel.ISegmentIndexDb {
|
||||
ret := _m.Called(ctx)
|
||||
|
||||
var r0 dbmodel.ISegmentIndexDb
|
||||
if rf, ok := ret.Get(0).(func(context.Context) dbmodel.ISegmentIndexDb); ok {
|
||||
r0 = rf(ctx)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(dbmodel.ISegmentIndexDb)
|
||||
}
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// UserDb provides a mock function with given fields: ctx
|
||||
func (_m *IMetaDomain) UserDb(ctx context.Context) dbmodel.IUserDb {
|
||||
ret := _m.Called(ctx)
|
||||
|
||||
var r0 dbmodel.IUserDb
|
||||
if rf, ok := ret.Get(0).(func(context.Context) dbmodel.IUserDb); ok {
|
||||
r0 = rf(ctx)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(dbmodel.IUserDb)
|
||||
}
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
type mockConstructorTestingTNewIMetaDomain interface {
|
||||
mock.TestingT
|
||||
Cleanup(func())
|
||||
}
|
||||
|
||||
// NewIMetaDomain creates a new instance of IMetaDomain. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
|
||||
func NewIMetaDomain(t mockConstructorTestingTNewIMetaDomain) *IMetaDomain {
|
||||
mock := &IMetaDomain{}
|
||||
mock.Mock.Test(t)
|
||||
|
||||
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||
|
||||
return mock
|
||||
}
|
|
@ -0,0 +1,65 @@
|
|||
// Code generated by mockery v2.14.0. DO NOT EDIT.
|
||||
|
||||
package mocks
|
||||
|
||||
import (
|
||||
dbmodel "github.com/milvus-io/milvus/internal/metastore/db/dbmodel"
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
)
|
||||
|
||||
// IPartitionDb is an autogenerated mock type for the IPartitionDb type
|
||||
type IPartitionDb struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
// GetByCollectionID provides a mock function with given fields: tenantID, collectionID, ts
|
||||
func (_m *IPartitionDb) GetByCollectionID(tenantID string, collectionID int64, ts uint64) ([]*dbmodel.Partition, error) {
|
||||
ret := _m.Called(tenantID, collectionID, ts)
|
||||
|
||||
var r0 []*dbmodel.Partition
|
||||
if rf, ok := ret.Get(0).(func(string, int64, uint64) []*dbmodel.Partition); ok {
|
||||
r0 = rf(tenantID, collectionID, ts)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).([]*dbmodel.Partition)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(string, int64, uint64) error); ok {
|
||||
r1 = rf(tenantID, collectionID, ts)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// Insert provides a mock function with given fields: in
|
||||
func (_m *IPartitionDb) Insert(in []*dbmodel.Partition) error {
|
||||
ret := _m.Called(in)
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func([]*dbmodel.Partition) error); ok {
|
||||
r0 = rf(in)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
type mockConstructorTestingTNewIPartitionDb interface {
|
||||
mock.TestingT
|
||||
Cleanup(func())
|
||||
}
|
||||
|
||||
// NewIPartitionDb creates a new instance of IPartitionDb. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
|
||||
func NewIPartitionDb(t mockConstructorTestingTNewIPartitionDb) *IPartitionDb {
|
||||
mock := &IPartitionDb{}
|
||||
mock.Mock.Test(t)
|
||||
|
||||
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||
|
||||
return mock
|
||||
}
|
|
@ -0,0 +1,98 @@
|
|||
// Code generated by mockery v2.14.0. DO NOT EDIT.
|
||||
|
||||
package mocks
|
||||
|
||||
import (
|
||||
dbmodel "github.com/milvus-io/milvus/internal/metastore/db/dbmodel"
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
)
|
||||
|
||||
// ISegmentIndexDb is an autogenerated mock type for the ISegmentIndexDb type
|
||||
type ISegmentIndexDb struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
// Insert provides a mock function with given fields: in
|
||||
func (_m *ISegmentIndexDb) Insert(in []*dbmodel.SegmentIndex) error {
|
||||
ret := _m.Called(in)
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func([]*dbmodel.SegmentIndex) error); ok {
|
||||
r0 = rf(in)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// MarkDeleted provides a mock function with given fields: tenantID, in
|
||||
func (_m *ISegmentIndexDb) MarkDeleted(tenantID string, in []*dbmodel.SegmentIndex) error {
|
||||
ret := _m.Called(tenantID, in)
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(string, []*dbmodel.SegmentIndex) error); ok {
|
||||
r0 = rf(tenantID, in)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// MarkDeletedByCollectionID provides a mock function with given fields: tenantID, collID
|
||||
func (_m *ISegmentIndexDb) MarkDeletedByCollectionID(tenantID string, collID int64) error {
|
||||
ret := _m.Called(tenantID, collID)
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(string, int64) error); ok {
|
||||
r0 = rf(tenantID, collID)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// MarkDeletedByIndexID provides a mock function with given fields: tenantID, idxID
|
||||
func (_m *ISegmentIndexDb) MarkDeletedByIndexID(tenantID string, idxID int64) error {
|
||||
ret := _m.Called(tenantID, idxID)
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(string, int64) error); ok {
|
||||
r0 = rf(tenantID, idxID)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// Upsert provides a mock function with given fields: in
|
||||
func (_m *ISegmentIndexDb) Upsert(in []*dbmodel.SegmentIndex) error {
|
||||
ret := _m.Called(in)
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func([]*dbmodel.SegmentIndex) error); ok {
|
||||
r0 = rf(in)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
type mockConstructorTestingTNewISegmentIndexDb interface {
|
||||
mock.TestingT
|
||||
Cleanup(func())
|
||||
}
|
||||
|
||||
// NewISegmentIndexDb creates a new instance of ISegmentIndexDb. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
|
||||
func NewISegmentIndexDb(t mockConstructorTestingTNewISegmentIndexDb) *ISegmentIndexDb {
|
||||
mock := &ISegmentIndexDb{}
|
||||
mock.Mock.Test(t)
|
||||
|
||||
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||
|
||||
return mock
|
||||
}
|
|
@ -0,0 +1,102 @@
|
|||
// Code generated by mockery v2.14.0. DO NOT EDIT.
|
||||
|
||||
package mocks
|
||||
|
||||
import (
|
||||
dbmodel "github.com/milvus-io/milvus/internal/metastore/db/dbmodel"
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
)
|
||||
|
||||
// IUserDb is an autogenerated mock type for the IUserDb type
|
||||
type IUserDb struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
// GetByUsername provides a mock function with given fields: tenantID, username
|
||||
func (_m *IUserDb) GetByUsername(tenantID string, username string) (*dbmodel.User, error) {
|
||||
ret := _m.Called(tenantID, username)
|
||||
|
||||
var r0 *dbmodel.User
|
||||
if rf, ok := ret.Get(0).(func(string, string) *dbmodel.User); ok {
|
||||
r0 = rf(tenantID, username)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*dbmodel.User)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(string, string) error); ok {
|
||||
r1 = rf(tenantID, username)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// Insert provides a mock function with given fields: in
|
||||
func (_m *IUserDb) Insert(in *dbmodel.User) error {
|
||||
ret := _m.Called(in)
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(*dbmodel.User) error); ok {
|
||||
r0 = rf(in)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// ListUsername provides a mock function with given fields: tenantID
|
||||
func (_m *IUserDb) ListUsername(tenantID string) ([]string, error) {
|
||||
ret := _m.Called(tenantID)
|
||||
|
||||
var r0 []string
|
||||
if rf, ok := ret.Get(0).(func(string) []string); ok {
|
||||
r0 = rf(tenantID)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).([]string)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(string) error); ok {
|
||||
r1 = rf(tenantID)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// MarkDeletedByUsername provides a mock function with given fields: tenantID, username
|
||||
func (_m *IUserDb) MarkDeletedByUsername(tenantID string, username string) error {
|
||||
ret := _m.Called(tenantID, username)
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(string, string) error); ok {
|
||||
r0 = rf(tenantID, username)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
type mockConstructorTestingTNewIUserDb interface {
|
||||
mock.TestingT
|
||||
Cleanup(func())
|
||||
}
|
||||
|
||||
// NewIUserDb creates a new instance of IUserDb. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
|
||||
func NewIUserDb(t mockConstructorTestingTNewIUserDb) *IUserDb {
|
||||
mock := &IUserDb{}
|
||||
mock.Mock.Test(t)
|
||||
|
||||
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||
|
||||
return mock
|
||||
}
|
|
@ -0,0 +1,52 @@
|
|||
package dbmodel
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/milvus-io/milvus/internal/metastore/model"
|
||||
"github.com/milvus-io/milvus/internal/util/typeutil"
|
||||
)
|
||||
|
||||
type Partition struct {
|
||||
ID int64 `gorm:"id"`
|
||||
TenantID string `gorm:"tenant_id"`
|
||||
PartitionID int64 `gorm:"partition_id"`
|
||||
PartitionName string `gorm:"partition_name"`
|
||||
PartitionCreatedTimestamp uint64 `gorm:"partition_created_timestamp"`
|
||||
CollectionID int64 `gorm:"collection_id"`
|
||||
Ts typeutil.Timestamp `gorm:"ts"`
|
||||
IsDeleted bool `gorm:"is_deleted"`
|
||||
CreatedAt time.Time `gorm:"created_at"`
|
||||
UpdatedAt time.Time `gorm:"updated_at"`
|
||||
}
|
||||
|
||||
func (v Partition) TableName() string {
|
||||
return "partitions"
|
||||
}
|
||||
|
||||
//go:generate mockery --name=IPartitionDb
|
||||
type IPartitionDb interface {
|
||||
GetByCollectionID(tenantID string, collectionID typeutil.UniqueID, ts typeutil.Timestamp) ([]*Partition, error)
|
||||
Insert(in []*Partition) error
|
||||
//MarkDeleted(tenantID string, collID typeutil.UniqueID) error
|
||||
}
|
||||
|
||||
// model <---> db
|
||||
|
||||
func UnmarshalPartitionModel(partitons []*Partition) []*model.Partition {
|
||||
r := make([]*model.Partition, 0, len(partitons))
|
||||
for _, p := range partitons {
|
||||
partition := ConvertPartitionDBToModel(p)
|
||||
r = append(r, partition)
|
||||
}
|
||||
|
||||
return r
|
||||
}
|
||||
|
||||
func ConvertPartitionDBToModel(partiton *Partition) *model.Partition {
|
||||
return &model.Partition{
|
||||
PartitionID: partiton.PartitionID,
|
||||
PartitionName: partiton.PartitionName,
|
||||
PartitionCreatedTimestamp: partiton.PartitionCreatedTimestamp,
|
||||
}
|
||||
}
|
|
@ -0,0 +1,41 @@
|
|||
package dbmodel
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/milvus-io/milvus/internal/util/typeutil"
|
||||
)
|
||||
|
||||
type SegmentIndex struct {
|
||||
ID int64 `gorm:"id"`
|
||||
TenantID string `gorm:"tenant_id"`
|
||||
// SegmentIndexInfo (CollectionID & PartitionID & SegmentID & FieldID & IndexID & BuildID & EnableIndex)
|
||||
CollectionID int64 `gorm:"collection_id"`
|
||||
PartitionID int64 `gorm:"partition_id"`
|
||||
SegmentID int64 `gorm:"segment_id"`
|
||||
// FieldIndexInfo (FieldID & IndexID)
|
||||
FieldID int64 `gorm:"field_id"`
|
||||
// IndexInfo (IndexID & IndexName & IndexParams)
|
||||
IndexID int64 `gorm:"index_id"`
|
||||
IndexBuildID int64 `gorm:"index_build_id"`
|
||||
EnableIndex bool `gorm:"enable_index"`
|
||||
CreateTime uint64 `gorm:"create_time"`
|
||||
IndexFilePaths string `gorm:"index_file_paths"`
|
||||
IndexSize uint64 `gorm:"index_size"`
|
||||
IsDeleted bool `gorm:"is_deleted"`
|
||||
CreatedAt time.Time `gorm:"created_at"`
|
||||
UpdatedAt time.Time `gorm:"updated_at"`
|
||||
}
|
||||
|
||||
func (v SegmentIndex) TableName() string {
|
||||
return "segment_indexes"
|
||||
}
|
||||
|
||||
//go:generate mockery --name=ISegmentIndexDb
|
||||
type ISegmentIndexDb interface {
|
||||
Insert(in []*SegmentIndex) error
|
||||
Upsert(in []*SegmentIndex) error
|
||||
MarkDeleted(tenantID string, in []*SegmentIndex) error
|
||||
MarkDeletedByCollectionID(tenantID string, collID typeutil.UniqueID) error
|
||||
MarkDeletedByIndexID(tenantID string, idxID typeutil.UniqueID) error
|
||||
}
|
|
@ -0,0 +1,43 @@
|
|||
package dbmodel
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/milvus-io/milvus/internal/metastore/model"
|
||||
)
|
||||
|
||||
type User struct {
|
||||
ID int64 `gorm:"id"`
|
||||
TenantID string `gorm:"tenant_id"`
|
||||
Username string `gorm:"username"`
|
||||
EncryptedPassword string `gorm:"encrypted_password"`
|
||||
IsSuper bool `gorm:"is_super"`
|
||||
IsDeleted bool `gorm:"is_deleted"`
|
||||
CreatedAt time.Time `gorm:"created_at"`
|
||||
UpdatedAt time.Time `gorm:"updated_at"`
|
||||
}
|
||||
|
||||
func (v User) TableName() string {
|
||||
return "credential_users"
|
||||
}
|
||||
|
||||
//go:generate mockery --name=IUserDb
|
||||
type IUserDb interface {
|
||||
GetByUsername(tenantID string, username string) (*User, error)
|
||||
ListUsername(tenantID string) ([]string, error)
|
||||
Insert(in *User) error
|
||||
MarkDeletedByUsername(tenantID string, username string) error
|
||||
}
|
||||
|
||||
// model <---> db
|
||||
|
||||
func UnmarshalUserModel(user *User) *model.Credential {
|
||||
if user == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return &model.Credential{
|
||||
Username: user.Username,
|
||||
EncryptedPassword: user.EncryptedPassword,
|
||||
}
|
||||
}
|
|
@ -0,0 +1,821 @@
|
|||
package db
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"runtime"
|
||||
|
||||
"github.com/milvus-io/milvus/internal/log"
|
||||
"github.com/milvus-io/milvus/internal/metastore"
|
||||
"github.com/milvus-io/milvus/internal/metastore/db/dbmodel"
|
||||
"github.com/milvus-io/milvus/internal/metastore/model"
|
||||
"github.com/milvus-io/milvus/internal/proto/milvuspb"
|
||||
"github.com/milvus-io/milvus/internal/util/contextutil"
|
||||
"github.com/milvus-io/milvus/internal/util/funcutil"
|
||||
"github.com/milvus-io/milvus/internal/util/typeutil"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
type Catalog struct {
|
||||
metaDomain dbmodel.IMetaDomain
|
||||
txImpl dbmodel.ITransaction
|
||||
}
|
||||
|
||||
func NewTableCatalog(txImpl dbmodel.ITransaction, metaDomain dbmodel.IMetaDomain) *Catalog {
|
||||
return &Catalog{
|
||||
txImpl: txImpl,
|
||||
metaDomain: metaDomain,
|
||||
}
|
||||
}
|
||||
|
||||
func (tc *Catalog) CreateCollection(ctx context.Context, collection *model.Collection, ts typeutil.Timestamp) error {
|
||||
tenantID := contextutil.TenantID(ctx)
|
||||
|
||||
return tc.txImpl.Transaction(ctx, func(txCtx context.Context) error {
|
||||
// insert collection
|
||||
var startPositionsStr string
|
||||
if collection.StartPositions != nil {
|
||||
startPositionsBytes, err := json.Marshal(collection.StartPositions)
|
||||
if err != nil {
|
||||
log.Error("marshal collection start positions error", zap.Int64("collID", collection.CollectionID), zap.Uint64("ts", ts), zap.Error(err))
|
||||
return err
|
||||
}
|
||||
startPositionsStr = string(startPositionsBytes)
|
||||
}
|
||||
|
||||
err := tc.metaDomain.CollectionDb(txCtx).Insert(&dbmodel.Collection{
|
||||
TenantID: tenantID,
|
||||
CollectionID: collection.CollectionID,
|
||||
CollectionName: collection.Name,
|
||||
Description: collection.Description,
|
||||
AutoID: collection.AutoID,
|
||||
ShardsNum: collection.ShardsNum,
|
||||
StartPosition: startPositionsStr,
|
||||
ConsistencyLevel: int32(collection.ConsistencyLevel),
|
||||
Ts: ts,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// insert field
|
||||
var fields = make([]*dbmodel.Field, 0, len(collection.Fields))
|
||||
for _, field := range collection.Fields {
|
||||
typeParamsBytes, err := json.Marshal(field.TypeParams)
|
||||
if err != nil {
|
||||
log.Error("marshal TypeParams of field failed", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
typeParamsStr := string(typeParamsBytes)
|
||||
|
||||
indexParamsBytes, err := json.Marshal(field.IndexParams)
|
||||
if err != nil {
|
||||
log.Error("marshal IndexParams of field failed", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
indexParamsStr := string(indexParamsBytes)
|
||||
|
||||
f := &dbmodel.Field{
|
||||
TenantID: collection.TenantID,
|
||||
FieldID: field.FieldID,
|
||||
FieldName: field.Name,
|
||||
IsPrimaryKey: field.IsPrimaryKey,
|
||||
Description: field.Description,
|
||||
DataType: field.DataType,
|
||||
TypeParams: typeParamsStr,
|
||||
IndexParams: indexParamsStr,
|
||||
AutoID: field.AutoID,
|
||||
CollectionID: collection.CollectionID,
|
||||
Ts: ts,
|
||||
}
|
||||
|
||||
fields = append(fields, f)
|
||||
}
|
||||
|
||||
err = tc.metaDomain.FieldDb(txCtx).Insert(fields)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// insert partition
|
||||
var partitions = make([]*dbmodel.Partition, 0, len(collection.Partitions))
|
||||
for _, partition := range collection.Partitions {
|
||||
p := &dbmodel.Partition{
|
||||
TenantID: collection.TenantID,
|
||||
PartitionID: partition.PartitionID,
|
||||
PartitionName: partition.PartitionName,
|
||||
PartitionCreatedTimestamp: partition.PartitionCreatedTimestamp,
|
||||
CollectionID: collection.CollectionID,
|
||||
Ts: ts,
|
||||
}
|
||||
partitions = append(partitions, p)
|
||||
}
|
||||
|
||||
err = tc.metaDomain.PartitionDb(txCtx).Insert(partitions)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// insert channel
|
||||
var channels = make([]*dbmodel.CollectionChannel, 0, len(collection.VirtualChannelNames))
|
||||
for i, vChannelName := range collection.VirtualChannelNames {
|
||||
collChannel := &dbmodel.CollectionChannel{
|
||||
TenantID: collection.TenantID,
|
||||
CollectionID: collection.CollectionID,
|
||||
VirtualChannelName: vChannelName,
|
||||
PhysicalChannelName: collection.PhysicalChannelNames[i],
|
||||
Ts: ts,
|
||||
}
|
||||
channels = append(channels, collChannel)
|
||||
}
|
||||
|
||||
err = tc.metaDomain.CollChannelDb(txCtx).Insert(channels)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func (tc *Catalog) GetCollectionByID(ctx context.Context, collectionID typeutil.UniqueID, ts typeutil.Timestamp) (*model.Collection, error) {
|
||||
tenantID := contextutil.TenantID(ctx)
|
||||
|
||||
// get latest timestamp less than or equals to param ts
|
||||
cidTsPair, err := tc.metaDomain.CollectionDb(ctx).GetCollectionIDTs(tenantID, collectionID, ts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if cidTsPair.IsDeleted {
|
||||
log.Error("not found collection", zap.Int64("collID", collectionID), zap.Uint64("ts", ts))
|
||||
return nil, fmt.Errorf("not found collection, collID=%d, ts=%d", collectionID, ts)
|
||||
}
|
||||
|
||||
queryTs := cidTsPair.Ts
|
||||
|
||||
return tc.populateCollection(ctx, collectionID, queryTs)
|
||||
}
|
||||
|
||||
func (tc *Catalog) populateCollection(ctx context.Context, collectionID typeutil.UniqueID, ts typeutil.Timestamp) (*model.Collection, error) {
|
||||
tenantID := contextutil.TenantID(ctx)
|
||||
|
||||
// get collection by collection_id and ts
|
||||
collection, err := tc.metaDomain.CollectionDb(ctx).Get(tenantID, collectionID, ts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// get fields by collection_id and ts
|
||||
fields, err := tc.metaDomain.FieldDb(ctx).GetByCollectionID(tenantID, collectionID, ts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// get partitions by collection_id and ts
|
||||
partitions, err := tc.metaDomain.PartitionDb(ctx).GetByCollectionID(tenantID, collectionID, ts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// get channels by collection_id and ts
|
||||
channels, err := tc.metaDomain.CollChannelDb(ctx).GetByCollectionID(tenantID, collectionID, ts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// get indexes by collection_id
|
||||
indexes, err := tc.metaDomain.IndexDb(ctx).Get(tenantID, collectionID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// merge as collection attributes
|
||||
|
||||
mCollection, err := dbmodel.UnmarshalCollectionModel(collection)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
mFields, err := dbmodel.UnmarshalFieldModel(fields)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
mCollection.Fields = mFields
|
||||
mCollection.Partitions = dbmodel.UnmarshalPartitionModel(partitions)
|
||||
mCollection.VirtualChannelNames, mCollection.PhysicalChannelNames = dbmodel.ExtractChannelNames(channels)
|
||||
mCollection.FieldIDToIndexID = dbmodel.ConvertIndexDBToModel(indexes)
|
||||
|
||||
return mCollection, nil
|
||||
}
|
||||
|
||||
func (tc *Catalog) GetCollectionByName(ctx context.Context, collectionName string, ts typeutil.Timestamp) (*model.Collection, error) {
|
||||
tenantID := contextutil.TenantID(ctx)
|
||||
|
||||
// Since collection name will not change for different ts
|
||||
collectionID, err := tc.metaDomain.CollectionDb(ctx).GetCollectionIDByName(tenantID, collectionName, ts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return tc.GetCollectionByID(ctx, collectionID, ts)
|
||||
}
|
||||
|
||||
// ListCollections For time travel (ts > 0), find only one record respectively for each collection no matter `is_deleted` is true or false
|
||||
// i.e. there are 3 collections in total,
|
||||
// [collection1, t1, is_deleted=true]
|
||||
// [collection2, t2, is_deleted=false]
|
||||
// [collection3, t3, is_deleted=false]
|
||||
// t1, t2, t3 are the largest timestamp that less than or equal to @param ts
|
||||
// the final result will only return collection2 and collection3 since collection1 is deleted
|
||||
func (tc *Catalog) ListCollections(ctx context.Context, ts typeutil.Timestamp) (map[string]*model.Collection, error) {
|
||||
tenantID := contextutil.TenantID(ctx)
|
||||
|
||||
// 1. find each collection_id with latest ts <= @param ts
|
||||
cidTsPairs, err := tc.metaDomain.CollectionDb(ctx).ListCollectionIDTs(tenantID, ts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(cidTsPairs) == 0 {
|
||||
return map[string]*model.Collection{}, nil
|
||||
}
|
||||
|
||||
// 2. populate each collection
|
||||
collections := make([]*model.Collection, len(cidTsPairs))
|
||||
|
||||
reloadCollectionByCollectionIDTsFunc := func(idx int) error {
|
||||
collIDTsPair := cidTsPairs[idx]
|
||||
collection, err := tc.populateCollection(ctx, collIDTsPair.CollectionID, collIDTsPair.Ts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
collections[idx] = collection
|
||||
return nil
|
||||
}
|
||||
|
||||
concurrency := len(cidTsPairs)
|
||||
if concurrency > runtime.NumCPU() {
|
||||
concurrency = runtime.NumCPU()
|
||||
}
|
||||
err = funcutil.ProcessFuncParallel(len(cidTsPairs), concurrency, reloadCollectionByCollectionIDTsFunc, "ListCollectionByCollectionIDTs")
|
||||
if err != nil {
|
||||
log.Error("list collections by collection_id & ts pair failed", zap.Uint64("ts", ts), zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
|
||||
r := map[string]*model.Collection{}
|
||||
for _, c := range collections {
|
||||
r[c.Name] = c
|
||||
}
|
||||
|
||||
return r, nil
|
||||
}
|
||||
|
||||
func (tc *Catalog) CollectionExists(ctx context.Context, collectionID typeutil.UniqueID, ts typeutil.Timestamp) bool {
|
||||
tenantID := contextutil.TenantID(ctx)
|
||||
|
||||
// get latest timestamp less than or equals to param ts
|
||||
cidTsPair, err := tc.metaDomain.CollectionDb(ctx).GetCollectionIDTs(tenantID, collectionID, ts)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
if cidTsPair.IsDeleted {
|
||||
return false
|
||||
}
|
||||
|
||||
queryTs := cidTsPair.Ts
|
||||
|
||||
col, err := tc.metaDomain.CollectionDb(ctx).Get(tenantID, collectionID, queryTs)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
if col != nil {
|
||||
return !col.IsDeleted
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func (tc *Catalog) DropCollection(ctx context.Context, collection *model.Collection, ts typeutil.Timestamp) error {
|
||||
tenantID := contextutil.TenantID(ctx)
|
||||
|
||||
return tc.txImpl.Transaction(ctx, func(txCtx context.Context) error {
|
||||
// 1. insert a mark-deleted record for collections
|
||||
coll := &dbmodel.Collection{
|
||||
TenantID: tenantID,
|
||||
CollectionID: collection.CollectionID,
|
||||
Ts: ts,
|
||||
IsDeleted: true,
|
||||
}
|
||||
err := tc.metaDomain.CollectionDb(txCtx).Insert(coll)
|
||||
if err != nil {
|
||||
log.Error("insert tombstone record for collections failed", zap.String("tenant", tenantID), zap.Int64("collID", collection.CollectionID), zap.Uint64("ts", ts), zap.Error(err))
|
||||
return err
|
||||
}
|
||||
|
||||
// 2. insert a mark-deleted record for collection_aliases
|
||||
if len(collection.Aliases) > 0 {
|
||||
collAliases := make([]*dbmodel.CollectionAlias, 0, len(collection.Aliases))
|
||||
|
||||
for _, alias := range collection.Aliases {
|
||||
collAliases = append(collAliases, &dbmodel.CollectionAlias{
|
||||
TenantID: tenantID,
|
||||
CollectionID: collection.CollectionID,
|
||||
CollectionAlias: alias,
|
||||
Ts: ts,
|
||||
IsDeleted: true,
|
||||
})
|
||||
}
|
||||
|
||||
err = tc.metaDomain.CollAliasDb(txCtx).Insert(collAliases)
|
||||
if err != nil {
|
||||
log.Error("insert tombstone record for collection_aliases failed", zap.String("tenant", tenantID), zap.Int64("collID", collection.CollectionID), zap.Uint64("ts", ts), zap.Error(err))
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// 3. insert a mark-deleted record for collection_channels
|
||||
collChannel := &dbmodel.CollectionChannel{
|
||||
TenantID: tenantID,
|
||||
CollectionID: collection.CollectionID,
|
||||
Ts: ts,
|
||||
IsDeleted: true,
|
||||
}
|
||||
err = tc.metaDomain.CollChannelDb(txCtx).Insert([]*dbmodel.CollectionChannel{collChannel})
|
||||
if err != nil {
|
||||
log.Error("insert tombstone record for collection_channels failed", zap.String("tenant", tenantID), zap.Int64("collID", collection.CollectionID), zap.Uint64("ts", ts), zap.Error(err))
|
||||
return err
|
||||
}
|
||||
|
||||
// 4. insert a mark-deleted record for field_schemas
|
||||
field := &dbmodel.Field{
|
||||
TenantID: tenantID,
|
||||
CollectionID: collection.CollectionID,
|
||||
Ts: ts,
|
||||
IsDeleted: true,
|
||||
}
|
||||
err = tc.metaDomain.FieldDb(txCtx).Insert([]*dbmodel.Field{field})
|
||||
if err != nil {
|
||||
log.Error("insert tombstone record for field_schemas failed", zap.String("tenant", tenantID), zap.Int64("collID", collection.CollectionID), zap.Uint64("ts", ts), zap.Error(err))
|
||||
return err
|
||||
}
|
||||
|
||||
// 5. insert a mark-deleted record for partitions
|
||||
partition := &dbmodel.Partition{
|
||||
TenantID: tenantID,
|
||||
CollectionID: collection.CollectionID,
|
||||
Ts: ts,
|
||||
IsDeleted: true,
|
||||
}
|
||||
err = tc.metaDomain.PartitionDb(txCtx).Insert([]*dbmodel.Partition{partition})
|
||||
if err != nil {
|
||||
log.Error("insert tombstone record for partitions failed", zap.String("tenant", tenantID), zap.Int64("collID", collection.CollectionID), zap.Uint64("ts", ts), zap.Error(err))
|
||||
return err
|
||||
}
|
||||
|
||||
// 6. mark deleted for indexes
|
||||
err = tc.metaDomain.IndexDb(txCtx).MarkDeletedByCollectionID(tenantID, collection.CollectionID)
|
||||
if err != nil {
|
||||
log.Error("mark deleted for indexes failed", zap.String("tenant", tenantID), zap.Int64("collID", collection.CollectionID), zap.Error(err))
|
||||
return err
|
||||
}
|
||||
|
||||
// 7. mark deleted for segment_indexes
|
||||
err = tc.metaDomain.SegmentIndexDb(txCtx).MarkDeletedByCollectionID(tenantID, collection.CollectionID)
|
||||
if err != nil {
|
||||
log.Error("mark deleted for segment_indexes failed", zap.String("tenant", tenantID), zap.Int64("collID", collection.CollectionID), zap.Error(err))
|
||||
return err
|
||||
}
|
||||
|
||||
return err
|
||||
})
|
||||
}
|
||||
|
||||
func (tc *Catalog) CreatePartition(ctx context.Context, partition *model.Partition, ts typeutil.Timestamp) error {
|
||||
tenantID := contextutil.TenantID(ctx)
|
||||
|
||||
p := &dbmodel.Partition{
|
||||
TenantID: tenantID,
|
||||
PartitionID: partition.PartitionID,
|
||||
PartitionName: partition.PartitionName,
|
||||
PartitionCreatedTimestamp: partition.PartitionCreatedTimestamp,
|
||||
CollectionID: partition.CollectionID,
|
||||
Ts: ts,
|
||||
}
|
||||
err := tc.metaDomain.PartitionDb(ctx).Insert([]*dbmodel.Partition{p})
|
||||
if err != nil {
|
||||
log.Error("insert partitions failed", zap.String("tenant", tenantID), zap.Int64("collID", partition.CollectionID), zap.Int64("partitionID", partition.PartitionID), zap.Uint64("ts", ts), zap.Error(err))
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tc *Catalog) DropPartition(ctx context.Context, collectionID typeutil.UniqueID, partitionID typeutil.UniqueID, ts typeutil.Timestamp) error {
|
||||
tenantID := contextutil.TenantID(ctx)
|
||||
|
||||
p := &dbmodel.Partition{
|
||||
TenantID: tenantID,
|
||||
PartitionID: partitionID,
|
||||
CollectionID: collectionID,
|
||||
Ts: ts,
|
||||
IsDeleted: true,
|
||||
}
|
||||
err := tc.metaDomain.PartitionDb(ctx).Insert([]*dbmodel.Partition{p})
|
||||
if err != nil {
|
||||
log.Error("insert tombstone record for partition failed", zap.String("tenant", tenantID), zap.Int64("collID", collectionID), zap.Int64("partitionID", partitionID), zap.Uint64("ts", ts), zap.Error(err))
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tc *Catalog) CreateIndex(ctx context.Context, col *model.Collection, index *model.Index) error {
|
||||
tenantID := contextutil.TenantID(ctx)
|
||||
|
||||
return tc.txImpl.Transaction(ctx, func(txCtx context.Context) error {
|
||||
// insert index
|
||||
indexParamsBytes, err := json.Marshal(index.IndexParams)
|
||||
if err != nil {
|
||||
log.Error("marshal IndexParams of index failed", zap.String("tenant", tenantID), zap.Int64("collID", index.CollectionID), zap.Int64("indexID", index.IndexID), zap.String("indexName", index.IndexName), zap.Error(err))
|
||||
}
|
||||
indexParamsStr := string(indexParamsBytes)
|
||||
|
||||
idx := &dbmodel.Index{
|
||||
TenantID: tenantID,
|
||||
CollectionID: index.CollectionID,
|
||||
FieldID: index.FieldID,
|
||||
IndexID: index.IndexID,
|
||||
IndexName: index.IndexName,
|
||||
IndexParams: indexParamsStr,
|
||||
}
|
||||
|
||||
err = tc.metaDomain.IndexDb(txCtx).Insert([]*dbmodel.Index{idx})
|
||||
if err != nil {
|
||||
log.Error("insert indexes failed", zap.String("tenant", tenantID), zap.Int64("collID", index.CollectionID), zap.Int64("indexID", index.IndexID), zap.String("indexName", index.IndexName), zap.Error(err))
|
||||
return err
|
||||
}
|
||||
|
||||
// insert segment_indexes
|
||||
if len(index.SegmentIndexes) > 0 {
|
||||
segIndexes := make([]*dbmodel.SegmentIndex, 0, len(index.SegmentIndexes))
|
||||
|
||||
for _, segIndex := range index.SegmentIndexes {
|
||||
indexFilePaths, err := json.Marshal(segIndex.IndexFilePaths)
|
||||
if err != nil {
|
||||
log.Error("marshal IndexFilePaths failed", zap.String("tenant", tenantID), zap.Int64("collID", index.CollectionID), zap.Int64("indexID", index.IndexID), zap.Int64("segmentID", segIndex.SegmentID), zap.Error(err))
|
||||
return err
|
||||
}
|
||||
indexFilePathsStr := string(indexFilePaths)
|
||||
si := &dbmodel.SegmentIndex{
|
||||
TenantID: tenantID,
|
||||
CollectionID: index.CollectionID,
|
||||
PartitionID: segIndex.PartitionID,
|
||||
SegmentID: segIndex.SegmentID,
|
||||
FieldID: index.FieldID,
|
||||
IndexID: index.IndexID,
|
||||
IndexBuildID: segIndex.BuildID,
|
||||
EnableIndex: segIndex.EnableIndex,
|
||||
IndexFilePaths: indexFilePathsStr,
|
||||
IndexSize: segIndex.IndexSize,
|
||||
}
|
||||
segIndexes = append(segIndexes, si)
|
||||
}
|
||||
|
||||
err := tc.metaDomain.SegmentIndexDb(txCtx).Insert(segIndexes)
|
||||
if err != nil {
|
||||
log.Error("insert segment_indexes failed", zap.String("tenant", tenantID), zap.Int64("collID", index.CollectionID), zap.Int64("indexID", index.IndexID), zap.String("indexName", index.IndexName), zap.Error(err))
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func (tc *Catalog) AlterIndex(ctx context.Context, oldIndex *model.Index, newIndex *model.Index, alterType metastore.AlterType) error {
|
||||
switch alterType {
|
||||
case metastore.ADD:
|
||||
return tc.alterAddIndex(ctx, oldIndex, newIndex)
|
||||
case metastore.DELETE:
|
||||
return tc.alterDeleteIndex(ctx, oldIndex, newIndex)
|
||||
default:
|
||||
return errors.New("Unknown alter type:" + fmt.Sprintf("%d", alterType))
|
||||
}
|
||||
}
|
||||
|
||||
func (tc *Catalog) alterAddIndex(ctx context.Context, oldIndex *model.Index, newIndex *model.Index) error {
|
||||
tenantID := contextutil.TenantID(ctx)
|
||||
|
||||
return tc.txImpl.Transaction(ctx, func(txCtx context.Context) error {
|
||||
adds := make([]*dbmodel.SegmentIndex, 0, len(newIndex.SegmentIndexes))
|
||||
|
||||
for segID, newSegIndex := range newIndex.SegmentIndexes {
|
||||
oldSegIndex, ok := oldIndex.SegmentIndexes[segID]
|
||||
if !ok || !reflect.DeepEqual(oldSegIndex, newSegIndex) {
|
||||
segment := newSegIndex.Segment
|
||||
segIdxInfo := &dbmodel.SegmentIndex{
|
||||
TenantID: tenantID,
|
||||
CollectionID: newIndex.CollectionID,
|
||||
PartitionID: segment.PartitionID,
|
||||
SegmentID: segment.SegmentID,
|
||||
FieldID: newIndex.FieldID,
|
||||
IndexID: newIndex.IndexID,
|
||||
IndexBuildID: newSegIndex.BuildID,
|
||||
EnableIndex: newSegIndex.EnableIndex,
|
||||
CreateTime: newSegIndex.CreateTime,
|
||||
//IndexFilePaths: indexFilePathsStr,
|
||||
//IndexSize: newSegIndex.IndexSize,
|
||||
}
|
||||
|
||||
adds = append(adds, segIdxInfo)
|
||||
}
|
||||
}
|
||||
|
||||
if len(adds) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// upsert segment_indexes
|
||||
err := tc.metaDomain.SegmentIndexDb(txCtx).Upsert(adds)
|
||||
if err != nil {
|
||||
log.Error("upsert segment_indexes failed", zap.String("tenant", tenantID), zap.Int64("collectionID", newIndex.CollectionID), zap.Int64("indexID", newIndex.IndexID), zap.Error(err))
|
||||
return err
|
||||
}
|
||||
|
||||
// update index info
|
||||
if oldIndex.CreateTime != newIndex.CreateTime || oldIndex.IsDeleted != newIndex.IsDeleted {
|
||||
indexParamsBytes, err := json.Marshal(newIndex.IndexParams)
|
||||
if err != nil {
|
||||
log.Error("marshal IndexParams of index failed", zap.String("tenant", tenantID), zap.Int64("collectionID", newIndex.CollectionID), zap.Int64("indexID", newIndex.IndexID), zap.Error(err))
|
||||
return err
|
||||
}
|
||||
indexParamsStr := string(indexParamsBytes)
|
||||
|
||||
index := &dbmodel.Index{
|
||||
TenantID: tenantID,
|
||||
CollectionID: newIndex.CollectionID,
|
||||
IndexName: newIndex.IndexName,
|
||||
IndexID: newIndex.IndexID,
|
||||
IndexParams: indexParamsStr,
|
||||
IsDeleted: newIndex.IsDeleted,
|
||||
CreateTime: newIndex.CreateTime,
|
||||
}
|
||||
err = tc.metaDomain.IndexDb(txCtx).Update(index)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func (tc *Catalog) alterDeleteIndex(ctx context.Context, oldIndex *model.Index, newIndex *model.Index) error {
|
||||
tenantID := contextutil.TenantID(ctx)
|
||||
delSegIndexes := make([]*dbmodel.SegmentIndex, 0, len(newIndex.SegmentIndexes))
|
||||
|
||||
for _, segIdx := range newIndex.SegmentIndexes {
|
||||
segIndex := &dbmodel.SegmentIndex{
|
||||
SegmentID: segIdx.SegmentID,
|
||||
IndexID: newIndex.IndexID,
|
||||
}
|
||||
delSegIndexes = append(delSegIndexes, segIndex)
|
||||
}
|
||||
|
||||
if len(delSegIndexes) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
err := tc.metaDomain.SegmentIndexDb(ctx).MarkDeleted(tenantID, delSegIndexes)
|
||||
if err != nil {
|
||||
log.Error("mark SegmentIndex deleted failed", zap.Int64("collID", newIndex.CollectionID), zap.Int64("indexID", newIndex.IndexID), zap.Error(err))
|
||||
return err
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (tc *Catalog) DropIndex(ctx context.Context, collectionInfo *model.Collection, dropIdxID typeutil.UniqueID) error {
|
||||
tenantID := contextutil.TenantID(ctx)
|
||||
|
||||
return tc.txImpl.Transaction(ctx, func(txCtx context.Context) error {
|
||||
// mark deleted for index
|
||||
err := tc.metaDomain.IndexDb(txCtx).MarkDeletedByIndexID(tenantID, dropIdxID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// mark deleted for segment_indexes
|
||||
err = tc.metaDomain.SegmentIndexDb(txCtx).MarkDeletedByIndexID(tenantID, dropIdxID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func (tc *Catalog) ListIndexes(ctx context.Context) ([]*model.Index, error) {
|
||||
tenantID := contextutil.TenantID(ctx)
|
||||
|
||||
rs, err := tc.metaDomain.IndexDb(ctx).List(tenantID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
result, err := dbmodel.UnmarshalIndexModel(rs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (tc *Catalog) CreateAlias(ctx context.Context, alias *model.Alias, ts typeutil.Timestamp) error {
|
||||
tenantID := contextutil.TenantID(ctx)
|
||||
|
||||
collAlias := &dbmodel.CollectionAlias{
|
||||
TenantID: tenantID,
|
||||
CollectionID: alias.CollectionID,
|
||||
CollectionAlias: alias.Name,
|
||||
Ts: ts,
|
||||
}
|
||||
err := tc.metaDomain.CollAliasDb(ctx).Insert([]*dbmodel.CollectionAlias{collAlias})
|
||||
if err != nil {
|
||||
log.Error("insert collection_aliases failed", zap.Int64("collID", alias.CollectionID), zap.String("alias", alias.Name), zap.Uint64("ts", ts), zap.Error(err))
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tc *Catalog) DropAlias(ctx context.Context, alias string, ts typeutil.Timestamp) error {
|
||||
tenantID := contextutil.TenantID(ctx)
|
||||
|
||||
collectionID, err := tc.metaDomain.CollAliasDb(ctx).GetCollectionIDByAlias(tenantID, alias, ts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
collAlias := &dbmodel.CollectionAlias{
|
||||
TenantID: tenantID,
|
||||
CollectionID: collectionID,
|
||||
CollectionAlias: alias,
|
||||
Ts: ts,
|
||||
IsDeleted: true,
|
||||
}
|
||||
err = tc.metaDomain.CollAliasDb(ctx).Insert([]*dbmodel.CollectionAlias{collAlias})
|
||||
if err != nil {
|
||||
log.Error("insert tombstone record for collection_aliases failed", zap.Int64("collID", collectionID), zap.String("collAlias", alias), zap.Uint64("ts", ts), zap.Error(err))
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tc *Catalog) AlterAlias(ctx context.Context, alias *model.Alias, ts typeutil.Timestamp) error {
|
||||
//if ts == 0 {
|
||||
// tenantID := contextutil.TenantID(ctx)
|
||||
// alias := collection.Aliases[0]
|
||||
//
|
||||
// return tc.metaDomain.CollAliasDb(ctx).Update(tenantID, collection.CollectionID, alias, ts)
|
||||
//}
|
||||
|
||||
return tc.CreateAlias(ctx, alias, ts)
|
||||
}
|
||||
|
||||
// ListAliases query collection ID and aliases only, other information are not needed
|
||||
func (tc *Catalog) ListAliases(ctx context.Context, ts typeutil.Timestamp) ([]*model.Alias, error) {
|
||||
tenantID := contextutil.TenantID(ctx)
|
||||
|
||||
// 1. find each collection with latest ts
|
||||
cidTsPairs, err := tc.metaDomain.CollAliasDb(ctx).ListCollectionIDTs(tenantID, ts)
|
||||
if err != nil {
|
||||
log.Error("list latest ts and corresponding collectionID in collection_aliases failed", zap.Uint64("ts", ts), zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
if len(cidTsPairs) == 0 {
|
||||
return []*model.Alias{}, nil
|
||||
}
|
||||
|
||||
// 2. select with IN clause
|
||||
collAliases, err := tc.metaDomain.CollAliasDb(ctx).List(tenantID, cidTsPairs)
|
||||
if err != nil {
|
||||
log.Error("list collection alias failed", zap.Uint64("ts", ts), zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
|
||||
r := make([]*model.Alias, 0, len(collAliases))
|
||||
for _, record := range collAliases {
|
||||
r = append(r, &model.Alias{
|
||||
CollectionID: record.CollectionID,
|
||||
Name: record.CollectionAlias,
|
||||
})
|
||||
}
|
||||
|
||||
return r, nil
|
||||
}
|
||||
|
||||
func (tc *Catalog) GetCredential(ctx context.Context, username string) (*model.Credential, error) {
|
||||
tenantID := contextutil.TenantID(ctx)
|
||||
|
||||
user, err := tc.metaDomain.UserDb(ctx).GetByUsername(tenantID, username)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return dbmodel.UnmarshalUserModel(user), nil
|
||||
}
|
||||
|
||||
func (tc *Catalog) CreateCredential(ctx context.Context, credential *model.Credential) error {
|
||||
tenantID := contextutil.TenantID(ctx)
|
||||
|
||||
user := &dbmodel.User{
|
||||
TenantID: tenantID,
|
||||
Username: credential.Username,
|
||||
EncryptedPassword: credential.EncryptedPassword,
|
||||
}
|
||||
|
||||
err := tc.metaDomain.UserDb(ctx).Insert(user)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tc *Catalog) DropCredential(ctx context.Context, username string) error {
|
||||
tenantID := contextutil.TenantID(ctx)
|
||||
|
||||
err := tc.metaDomain.UserDb(ctx).MarkDeletedByUsername(tenantID, username)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tc *Catalog) ListCredentials(ctx context.Context) ([]string, error) {
|
||||
tenantID := contextutil.TenantID(ctx)
|
||||
|
||||
usernames, err := tc.metaDomain.UserDb(ctx).ListUsername(tenantID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return usernames, nil
|
||||
}
|
||||
|
||||
func (tc *Catalog) CreateRole(ctx context.Context, tenant string, entity *milvuspb.RoleEntity) error {
|
||||
//TODO implement me
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tc *Catalog) DropRole(ctx context.Context, tenant string, roleName string) error {
|
||||
//TODO implement me
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tc *Catalog) OperateUserRole(ctx context.Context, tenant string, userEntity *milvuspb.UserEntity, roleEntity *milvuspb.RoleEntity, operateType milvuspb.OperateUserRoleType) error {
|
||||
//TODO implement me
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tc *Catalog) SelectRole(ctx context.Context, tenant string, entity *milvuspb.RoleEntity, includeUserInfo bool) ([]*milvuspb.RoleResult, error) {
|
||||
//TODO implement me
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (tc *Catalog) SelectUser(ctx context.Context, tenant string, entity *milvuspb.UserEntity, includeRoleInfo bool) ([]*milvuspb.UserResult, error) {
|
||||
//TODO implement me
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (tc *Catalog) OperatePrivilege(ctx context.Context, tenant string, entity *milvuspb.GrantEntity, operateType milvuspb.OperatePrivilegeType) error {
|
||||
//TODO implement me
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tc *Catalog) SelectGrant(ctx context.Context, tenant string, entity *milvuspb.GrantEntity) ([]*milvuspb.GrantEntity, error) {
|
||||
//TODO implement me
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (tc *Catalog) ListPolicy(ctx context.Context, tenant string) ([]string, error) {
|
||||
//TODO implement me
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (tc *Catalog) ListUserRole(ctx context.Context, tenant string) ([]string, error) {
|
||||
//TODO implement me
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (tc *Catalog) Close() {
|
||||
|
||||
}
|
File diff suppressed because it is too large
Load Diff
|
@ -10,22 +10,18 @@ import (
|
|||
"reflect"
|
||||
"strconv"
|
||||
|
||||
"github.com/milvus-io/milvus/internal/common"
|
||||
|
||||
"github.com/milvus-io/milvus/internal/util/funcutil"
|
||||
|
||||
"github.com/milvus-io/milvus/internal/proto/milvuspb"
|
||||
"github.com/milvus-io/milvus/internal/util"
|
||||
|
||||
"github.com/milvus-io/milvus/internal/metastore"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/milvus-io/milvus/internal/common"
|
||||
"github.com/milvus-io/milvus/internal/kv"
|
||||
"github.com/milvus-io/milvus/internal/log"
|
||||
"github.com/milvus-io/milvus/internal/metastore"
|
||||
"github.com/milvus-io/milvus/internal/metastore/model"
|
||||
pb "github.com/milvus-io/milvus/internal/proto/etcdpb"
|
||||
"github.com/milvus-io/milvus/internal/proto/internalpb"
|
||||
"github.com/milvus-io/milvus/internal/proto/milvuspb"
|
||||
"github.com/milvus-io/milvus/internal/proto/schemapb"
|
||||
"github.com/milvus-io/milvus/internal/util"
|
||||
"github.com/milvus-io/milvus/internal/util/funcutil"
|
||||
"github.com/milvus-io/milvus/internal/util/typeutil"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
@ -520,7 +516,7 @@ func (kc *Catalog) DropPartition(ctx context.Context, collectionID typeutil.Uniq
|
|||
return kc.Snapshot.Save(k, string(v), ts)
|
||||
}
|
||||
|
||||
func (kc *Catalog) DropIndex(ctx context.Context, collectionInfo *model.Collection, dropIdxID typeutil.UniqueID, ts typeutil.Timestamp) error {
|
||||
func (kc *Catalog) DropIndex(ctx context.Context, collectionInfo *model.Collection, dropIdxID typeutil.UniqueID) error {
|
||||
collMeta := model.MarshalCollectionModel(collectionInfo)
|
||||
k := path.Join(CollectionMetaPrefix, strconv.FormatInt(collectionInfo.CollectionID, 10))
|
||||
v, err := proto.Marshal(collMeta)
|
||||
|
|
|
@ -1,91 +0,0 @@
|
|||
package table
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/milvus-io/milvus/internal/metastore/model"
|
||||
"github.com/milvus-io/milvus/internal/util/typeutil"
|
||||
)
|
||||
|
||||
type Catalog struct {
|
||||
}
|
||||
|
||||
func (tc *Catalog) CreateCollection(ctx context.Context, collectionInfo *model.Collection, ts typeutil.Timestamp) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tc *Catalog) GetCollectionByID(ctx context.Context, collectionID typeutil.UniqueID, ts typeutil.Timestamp) (*model.Collection, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (tc *Catalog) GetCollectionByName(ctx context.Context, collectionName string, ts typeutil.Timestamp) (*model.Collection, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (tc *Catalog) ListCollections(ctx context.Context, ts typeutil.Timestamp) (map[string]*model.Collection, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (tc *Catalog) CollectionExists(ctx context.Context, collectionID typeutil.UniqueID, ts typeutil.Timestamp) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (tc *Catalog) DropCollection(ctx context.Context, collectionInfo *model.Collection, ts typeutil.Timestamp) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tc *Catalog) CreatePartition(ctx context.Context, coll *model.Collection, ts typeutil.Timestamp) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tc *Catalog) DropPartition(ctx context.Context, collectionInfo *model.Collection, partitionID typeutil.UniqueID, ts typeutil.Timestamp) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tc *Catalog) AlterIndex(ctx context.Context, index *model.Index) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tc *Catalog) DropIndex(ctx context.Context, collectionInfo *model.Collection, dropIdxID typeutil.UniqueID, ts typeutil.Timestamp) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tc *Catalog) ListIndexes(ctx context.Context) ([]*model.Index, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (tc *Catalog) CreateAlias(ctx context.Context, collection *model.Collection, ts typeutil.Timestamp) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tc *Catalog) DropAlias(ctx context.Context, collectionID typeutil.UniqueID, alias string, ts typeutil.Timestamp) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tc *Catalog) AlterAlias(ctx context.Context, collection *model.Collection, ts typeutil.Timestamp) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tc *Catalog) ListAliases(ctx context.Context) ([]*model.Collection, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (tc *Catalog) GetCredential(ctx context.Context, username string) (*model.Credential, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (tc *Catalog) CreateCredential(ctx context.Context, credential *model.Credential) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tc *Catalog) DropCredential(ctx context.Context, username string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tc *Catalog) ListCredentials(ctx context.Context) ([]string, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (tc *Catalog) Close() {
|
||||
|
||||
}
|
|
@ -21,11 +21,7 @@ import (
|
|||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/milvus-io/milvus/internal/util/funcutil"
|
||||
|
||||
"github.com/milvus-io/milvus/internal/common"
|
||||
|
||||
"github.com/milvus-io/milvus/internal/kv"
|
||||
"github.com/milvus-io/milvus/internal/log"
|
||||
"github.com/milvus-io/milvus/internal/metastore"
|
||||
kvmetestore "github.com/milvus-io/milvus/internal/metastore/kv"
|
||||
|
@ -34,6 +30,8 @@ import (
|
|||
"github.com/milvus-io/milvus/internal/proto/internalpb"
|
||||
"github.com/milvus-io/milvus/internal/proto/milvuspb"
|
||||
"github.com/milvus-io/milvus/internal/proto/schemapb"
|
||||
"github.com/milvus-io/milvus/internal/util/contextutil"
|
||||
"github.com/milvus-io/milvus/internal/util/funcutil"
|
||||
"github.com/milvus-io/milvus/internal/util/typeutil"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
@ -69,10 +67,8 @@ const (
|
|||
|
||||
// MetaTable store all rootCoord meta info
|
||||
type MetaTable struct {
|
||||
ctx context.Context
|
||||
txn kv.TxnKV // client of a reliable txnkv service, i.e. etcd client
|
||||
snapshot kv.SnapShotKV // client of a reliable snapshotkv service, i.e. etcd client
|
||||
catalog metastore.Catalog
|
||||
ctx context.Context
|
||||
catalog metastore.Catalog
|
||||
|
||||
collID2Meta map[typeutil.UniqueID]model.Collection // collection id -> collection meta
|
||||
collName2ID map[string]typeutil.UniqueID // collection name to collection id
|
||||
|
@ -81,20 +77,16 @@ type MetaTable struct {
|
|||
segID2IndexID map[typeutil.UniqueID]typeutil.UniqueID // segment_id -> index_id
|
||||
indexID2Meta map[typeutil.UniqueID]*model.Index // collection id/index_id -> meta
|
||||
|
||||
ddLock sync.RWMutex
|
||||
credLock sync.RWMutex
|
||||
ddLock sync.RWMutex
|
||||
}
|
||||
|
||||
// NewMetaTable creates meta table for rootcoord, which stores all in-memory information
|
||||
// for collection, partition, segment, index etc.
|
||||
func NewMetaTable(ctx context.Context, txn kv.TxnKV, snap kv.SnapShotKV) (*MetaTable, error) {
|
||||
func NewMetaTable(ctx context.Context, catalog metastore.Catalog) (*MetaTable, error) {
|
||||
mt := &MetaTable{
|
||||
ctx: ctx,
|
||||
txn: txn,
|
||||
snapshot: snap,
|
||||
catalog: &kvmetestore.Catalog{Txn: txn, Snapshot: snap},
|
||||
ddLock: sync.RWMutex{},
|
||||
credLock: sync.RWMutex{},
|
||||
ctx: contextutil.WithTenantID(ctx, Params.CommonCfg.ClusterName),
|
||||
catalog: catalog,
|
||||
ddLock: sync.RWMutex{},
|
||||
}
|
||||
err := mt.reloadFromCatalog()
|
||||
if err != nil {
|
||||
|
@ -689,7 +681,7 @@ func (mt *MetaTable) DropIndex(collName, fieldName, indexName string) (typeutil.
|
|||
col.FieldIDToIndexID = fieldIDToIndexID
|
||||
|
||||
// update metastore
|
||||
err = mt.catalog.DropIndex(mt.ctx, &col, dropIdxID, 0)
|
||||
err = mt.catalog.DropIndex(mt.ctx, &col, dropIdxID)
|
||||
if err != nil {
|
||||
return 0, false, err
|
||||
}
|
||||
|
@ -875,7 +867,7 @@ func (mt *MetaTable) RecycleDroppedIndex() error {
|
|||
|
||||
// update metastore
|
||||
newColMeta := colMeta
|
||||
if err := mt.catalog.DropIndex(mt.ctx, &newColMeta, dropIdxID, 0); err != nil {
|
||||
if err := mt.catalog.DropIndex(mt.ctx, &newColMeta, dropIdxID); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
|
@ -28,30 +28,27 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"go.uber.org/zap"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/milvus-io/milvus/internal/proto/milvuspb"
|
||||
"github.com/milvus-io/milvus/internal/util"
|
||||
|
||||
"github.com/milvus-io/milvus/internal/common"
|
||||
|
||||
"github.com/milvus-io/milvus/internal/metastore"
|
||||
"github.com/stretchr/testify/mock"
|
||||
|
||||
"github.com/milvus-io/milvus/internal/kv"
|
||||
etcdkv "github.com/milvus-io/milvus/internal/kv/etcd"
|
||||
memkv "github.com/milvus-io/milvus/internal/kv/mem"
|
||||
"github.com/milvus-io/milvus/internal/metastore"
|
||||
kvmetestore "github.com/milvus-io/milvus/internal/metastore/kv"
|
||||
"github.com/milvus-io/milvus/internal/metastore/model"
|
||||
"github.com/milvus-io/milvus/internal/proto/commonpb"
|
||||
pb "github.com/milvus-io/milvus/internal/proto/etcdpb"
|
||||
"github.com/milvus-io/milvus/internal/proto/internalpb"
|
||||
"github.com/milvus-io/milvus/internal/proto/milvuspb"
|
||||
"github.com/milvus-io/milvus/internal/proto/schemapb"
|
||||
"github.com/milvus-io/milvus/internal/util"
|
||||
"github.com/milvus-io/milvus/internal/util/etcd"
|
||||
"github.com/milvus-io/milvus/internal/util/funcutil"
|
||||
"github.com/milvus-io/milvus/internal/util/typeutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
type mockTestKV struct {
|
||||
|
@ -134,16 +131,16 @@ func generateMetaTable(t *testing.T) (*MetaTable, *mockTestKV, *mockTestTxnKV, f
|
|||
assert.NotNil(t, skv)
|
||||
|
||||
txnkv := etcdkv.NewEtcdKV(etcdCli, rootPath)
|
||||
mt, err := NewMetaTable(context.TODO(), txnkv, skv)
|
||||
_, err = NewMetaTable(context.TODO(), &kvmetestore.Catalog{Txn: txnkv, Snapshot: skv})
|
||||
assert.Nil(t, err)
|
||||
mockSnapshotKV := &mockTestKV{
|
||||
SnapShotKV: mt.snapshot,
|
||||
SnapShotKV: skv,
|
||||
loadWithPrefix: func(key string, ts typeutil.Timestamp) ([]string, []string, error) {
|
||||
return skv.LoadWithPrefix(key, ts)
|
||||
},
|
||||
}
|
||||
mockTxnKV := &mockTestTxnKV{
|
||||
TxnKV: mt.txn,
|
||||
TxnKV: txnkv,
|
||||
loadWithPrefix: func(key string) ([]string, []string, error) { return txnkv.LoadWithPrefix(key) },
|
||||
save: func(key, value string) error { return txnkv.Save(key, value) },
|
||||
multiSave: func(kvs map[string]string) error { return txnkv.MultiSave(kvs) },
|
||||
|
@ -153,7 +150,7 @@ func generateMetaTable(t *testing.T) (*MetaTable, *mockTestKV, *mockTestTxnKV, f
|
|||
remove: func(key string) error { return txnkv.Remove(key) },
|
||||
}
|
||||
|
||||
mockMt, err := NewMetaTable(context.TODO(), mockTxnKV, mockSnapshotKV)
|
||||
mockMt, err := NewMetaTable(context.TODO(), &kvmetestore.Catalog{Txn: mockTxnKV, Snapshot: mockSnapshotKV})
|
||||
assert.Nil(t, err)
|
||||
return mockMt, mockSnapshotKV, mockTxnKV, func() {
|
||||
etcdCli.Close()
|
||||
|
@ -201,7 +198,7 @@ func TestMetaTable(t *testing.T) {
|
|||
assert.Nil(t, err)
|
||||
assert.NotNil(t, skv)
|
||||
txnKV := etcdkv.NewEtcdKV(etcdCli, rootPath)
|
||||
mt, err := NewMetaTable(context.TODO(), txnKV, skv)
|
||||
mt, err := NewMetaTable(context.TODO(), &kvmetestore.Catalog{Txn: txnKV, Snapshot: skv})
|
||||
assert.Nil(t, err)
|
||||
|
||||
collInfo := &model.Collection{
|
||||
|
@ -251,6 +248,14 @@ func TestMetaTable(t *testing.T) {
|
|||
PartitionCreatedTimestamp: 0,
|
||||
},
|
||||
},
|
||||
VirtualChannelNames: []string{
|
||||
fmt.Sprintf("dmChannel_%dv%d", collID, 0),
|
||||
fmt.Sprintf("dmChannel_%dv%d", collID, 1),
|
||||
},
|
||||
PhysicalChannelNames: []string{
|
||||
funcutil.ToPhysicalChannel(fmt.Sprintf("dmChannel_%dv%d", collID, 0)),
|
||||
funcutil.ToPhysicalChannel(fmt.Sprintf("dmChannel_%dv%d", collID, 1)),
|
||||
},
|
||||
}
|
||||
|
||||
idxInfo := []*model.Index{
|
||||
|
@ -539,7 +544,7 @@ func TestMetaTable(t *testing.T) {
|
|||
},
|
||||
}
|
||||
mockTxnKV := &mockTestTxnKV{
|
||||
TxnKV: mt.txn,
|
||||
TxnKV: txnkv,
|
||||
loadWithPrefix: func(key string) ([]string, []string, error) { return txnkv.LoadWithPrefix(key) },
|
||||
save: func(key, value string) error { return txnkv.Save(key, value) },
|
||||
multiSave: func(kvs map[string]string) error { return txnkv.MultiSave(kvs) },
|
||||
|
@ -549,7 +554,7 @@ func TestMetaTable(t *testing.T) {
|
|||
remove: func(key string) error { return txnkv.Remove(key) },
|
||||
}
|
||||
|
||||
mt, err = NewMetaTable(context.TODO(), mockTxnKV, mockKV)
|
||||
mt, err = NewMetaTable(context.TODO(), &kvmetestore.Catalog{Txn: mockTxnKV, Snapshot: mockKV})
|
||||
assert.Nil(t, err)
|
||||
|
||||
wg.Add(1)
|
||||
|
@ -1560,7 +1565,7 @@ func TestMetaWithTimestamp(t *testing.T) {
|
|||
assert.Nil(t, err)
|
||||
assert.NotNil(t, skv)
|
||||
txnKV := etcdkv.NewEtcdKV(etcdCli, rootPath)
|
||||
mt, err := NewMetaTable(context.TODO(), txnKV, skv)
|
||||
mt, err := NewMetaTable(context.TODO(), &kvmetestore.Catalog{Txn: txnKV, Snapshot: skv})
|
||||
assert.Nil(t, err)
|
||||
|
||||
collInfo := &model.Collection{
|
||||
|
@ -1729,7 +1734,7 @@ func TestFixIssue10540(t *testing.T) {
|
|||
txnKV.Save(path.Join(kvmetestore.SegmentIndexMetaPrefix, "2"), string(kvmetestore.SuffixSnapshotTombstone))
|
||||
txnKV.Save(path.Join(kvmetestore.IndexMetaPrefix, "3"), string(kvmetestore.SuffixSnapshotTombstone))
|
||||
|
||||
_, err = NewMetaTable(context.TODO(), txnKV, skv)
|
||||
_, err = NewMetaTable(context.TODO(), &kvmetestore.Catalog{Txn: txnKV, Snapshot: skv})
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
|
@ -2069,9 +2074,11 @@ func TestMetaTable_AlignSegmentsMeta(t *testing.T) {
|
|||
indexID = UniqueID(1000)
|
||||
)
|
||||
mt := &MetaTable{
|
||||
txn: &mockTestTxnKV{
|
||||
multiRemove: func(keys []string) error {
|
||||
return nil
|
||||
catalog: &kvmetestore.Catalog{
|
||||
Txn: &mockTestTxnKV{
|
||||
multiRemove: func(keys []string) error {
|
||||
return nil
|
||||
},
|
||||
},
|
||||
},
|
||||
collID2Meta: map[typeutil.UniqueID]model.Collection{
|
||||
|
@ -2109,7 +2116,7 @@ func TestMetaTable_AlignSegmentsMeta(t *testing.T) {
|
|||
return fmt.Errorf("error occurred")
|
||||
},
|
||||
}
|
||||
mt.txn = txn
|
||||
mt.catalog = &kvmetestore.Catalog{Txn: txn}
|
||||
mt.AlignSegmentsMeta(collID, partID, map[UniqueID]struct{}{103: {}, 104: {}, 105: {}})
|
||||
})
|
||||
}
|
||||
|
@ -2232,7 +2239,7 @@ type MockedCatalog struct {
|
|||
metastore.Catalog
|
||||
alterIndexParamsVerification func(ctx context.Context, oldIndex *model.Index, newIndex *model.Index, alterType metastore.AlterType)
|
||||
createIndexParamsVerification func(ctx context.Context, col *model.Collection, index *model.Index)
|
||||
dropIndexParamsVerification func(ctx context.Context, collectionInfo *model.Collection, dropIdxID typeutil.UniqueID, ts typeutil.Timestamp)
|
||||
dropIndexParamsVerification func(ctx context.Context, collectionInfo *model.Collection, dropIdxID typeutil.UniqueID)
|
||||
}
|
||||
|
||||
func (mc *MockedCatalog) ListCollections(ctx context.Context, ts typeutil.Timestamp) (map[string]*model.Collection, error) {
|
||||
|
@ -2275,9 +2282,9 @@ func (mc *MockedCatalog) CreateIndex(ctx context.Context, col *model.Collection,
|
|||
}
|
||||
|
||||
func (mc *MockedCatalog) DropIndex(ctx context.Context, collectionInfo *model.Collection,
|
||||
dropIdxID typeutil.UniqueID, ts typeutil.Timestamp) error {
|
||||
dropIdxID typeutil.UniqueID) error {
|
||||
if mc.dropIndexParamsVerification != nil {
|
||||
mc.dropIndexParamsVerification(ctx, collectionInfo, dropIdxID, ts)
|
||||
mc.dropIndexParamsVerification(ctx, collectionInfo, dropIdxID)
|
||||
}
|
||||
|
||||
args := mc.Called()
|
||||
|
@ -2634,7 +2641,7 @@ func TestMetaTable_RecycleDroppedIndex(t *testing.T) {
|
|||
|
||||
mc := &MockedCatalog{}
|
||||
mc.On("DropIndex").Return(nil)
|
||||
mc.dropIndexParamsVerification = func(ctx context.Context, collectionInfo *model.Collection, dropIdxID typeutil.UniqueID, ts typeutil.Timestamp) {
|
||||
mc.dropIndexParamsVerification = func(ctx context.Context, collectionInfo *model.Collection, dropIdxID typeutil.UniqueID) {
|
||||
assert.NotNil(t, collectionInfo)
|
||||
assert.Equal(t, int64(1), dropIdxID)
|
||||
assert.Equal(t, int64(1), collectionInfo.CollectionID)
|
||||
|
|
|
@ -18,28 +18,25 @@ package rootcoord
|
|||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/milvus-io/milvus/internal/util/errorutil"
|
||||
"github.com/milvus-io/milvus/internal/util/funcutil"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
|
||||
"github.com/milvus-io/milvus/internal/allocator"
|
||||
"github.com/milvus-io/milvus/internal/common"
|
||||
"github.com/milvus-io/milvus/internal/kv"
|
||||
etcdkv "github.com/milvus-io/milvus/internal/kv/etcd"
|
||||
"github.com/milvus-io/milvus/internal/log"
|
||||
"github.com/milvus-io/milvus/internal/metastore"
|
||||
"github.com/milvus-io/milvus/internal/metastore/db"
|
||||
"github.com/milvus-io/milvus/internal/metastore/db/dao"
|
||||
"github.com/milvus-io/milvus/internal/metastore/db/dbcore"
|
||||
kvmetestore "github.com/milvus-io/milvus/internal/metastore/kv"
|
||||
"github.com/milvus-io/milvus/internal/metastore/model"
|
||||
"github.com/milvus-io/milvus/internal/metrics"
|
||||
|
@ -57,6 +54,8 @@ import (
|
|||
"github.com/milvus-io/milvus/internal/util"
|
||||
"github.com/milvus-io/milvus/internal/util/crypto"
|
||||
"github.com/milvus-io/milvus/internal/util/dependency"
|
||||
"github.com/milvus-io/milvus/internal/util/errorutil"
|
||||
"github.com/milvus-io/milvus/internal/util/funcutil"
|
||||
"github.com/milvus-io/milvus/internal/util/metricsinfo"
|
||||
"github.com/milvus-io/milvus/internal/util/paramtable"
|
||||
"github.com/milvus-io/milvus/internal/util/retry"
|
||||
|
@ -1157,19 +1156,37 @@ func (c *Core) Init() error {
|
|||
log.Error("RootCoord failed to new EtcdKV for MetaKV", zap.Any("reason", initError))
|
||||
return initError
|
||||
}
|
||||
var metaKV kv.TxnKV
|
||||
metaKV, initError = c.kvBaseCreate(Params.EtcdCfg.MetaRootPath)
|
||||
if initError != nil {
|
||||
log.Error("RootCoord failed to new EtcdKV", zap.Any("reason", initError))
|
||||
return initError
|
||||
|
||||
var catalog metastore.Catalog
|
||||
switch Params.MetaStoreCfg.MetaStoreType {
|
||||
case util.MetaStoreTypeEtcd:
|
||||
var metaKV kv.TxnKV
|
||||
metaKV, initError = c.kvBaseCreate(Params.EtcdCfg.MetaRootPath)
|
||||
if initError != nil {
|
||||
log.Error("RootCoord failed to new EtcdKV", zap.Any("reason", initError))
|
||||
return initError
|
||||
}
|
||||
|
||||
var ss *kvmetestore.SuffixSnapshot
|
||||
if ss, initError = kvmetestore.NewSuffixSnapshot(metaKV, "_ts", Params.EtcdCfg.MetaRootPath, "snapshots"); initError != nil {
|
||||
log.Error("RootCoord failed to new suffixSnapshot", zap.Error(initError))
|
||||
return initError
|
||||
}
|
||||
|
||||
catalog = &kvmetestore.Catalog{Txn: metaKV, Snapshot: ss}
|
||||
case util.MetaStoreTypeMysql:
|
||||
// connect to database
|
||||
err := dbcore.Connect(&Params.DBCfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
catalog = db.NewTableCatalog(dbcore.NewTxImpl(), dao.NewMetaDomain())
|
||||
default:
|
||||
return fmt.Errorf("not supported meta store: %s", Params.MetaStoreCfg.MetaStoreType)
|
||||
}
|
||||
|
||||
var ss *kvmetestore.SuffixSnapshot
|
||||
if ss, initError = kvmetestore.NewSuffixSnapshot(metaKV, "_ts", Params.EtcdCfg.MetaRootPath, "snapshots"); initError != nil {
|
||||
log.Error("RootCoord failed to new suffixSnapshot", zap.Error(initError))
|
||||
return initError
|
||||
}
|
||||
if c.MetaTable, initError = NewMetaTable(c.ctx, metaKV, ss); initError != nil {
|
||||
if c.MetaTable, initError = NewMetaTable(c.ctx, catalog); initError != nil {
|
||||
log.Error("RootCoord failed to new MetaTable", zap.Any("reason", initError))
|
||||
return initError
|
||||
}
|
||||
|
@ -1327,132 +1344,6 @@ func (c *Core) initRbac() (initError error) {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (c *Core) reSendDdMsg(ctx context.Context, force bool) error {
|
||||
if !force {
|
||||
flag, err := c.MetaTable.txn.Load(DDMsgSendPrefix)
|
||||
if err != nil {
|
||||
// TODO, this is super ugly hack but our kv interface does not support loadWithExist
|
||||
// leave it for later
|
||||
if strings.Contains(err.Error(), "there is no value on key") {
|
||||
log.Debug("skip reSendDdMsg with no dd-msg-send key")
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
value, err := strconv.ParseBool(flag)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if value {
|
||||
log.Debug("skip reSendDdMsg with dd-msg-send set to true")
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
ddOpStr, err := c.MetaTable.txn.Load(DDOperationPrefix)
|
||||
if err != nil {
|
||||
log.Debug("DdOperation key does not exist")
|
||||
return nil
|
||||
}
|
||||
var ddOp DdOperation
|
||||
if err = json.Unmarshal([]byte(ddOpStr), &ddOp); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
invalidateCache := false
|
||||
var ts typeutil.Timestamp
|
||||
var collName string
|
||||
var collectionID UniqueID
|
||||
|
||||
switch ddOp.Type {
|
||||
// TODO remove create collection resend
|
||||
// since create collection needs a start position to succeed
|
||||
case CreateCollectionDDType:
|
||||
var ddReq = internalpb.CreateCollectionRequest{}
|
||||
if err = proto.Unmarshal(ddOp.Body, &ddReq); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := c.MetaTable.GetCollectionByName(ddReq.CollectionName, 0); err != nil {
|
||||
if _, err = c.SendDdCreateCollectionReq(ctx, &ddReq, ddReq.PhysicalChannelNames); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
log.Debug("collection has been created, skip re-send CreateCollection",
|
||||
zap.String("collection name", collName))
|
||||
}
|
||||
case DropCollectionDDType:
|
||||
var ddReq = internalpb.DropCollectionRequest{}
|
||||
if err = proto.Unmarshal(ddOp.Body, &ddReq); err != nil {
|
||||
return err
|
||||
}
|
||||
ts = ddReq.Base.Timestamp
|
||||
collName = ddReq.CollectionName
|
||||
if collInfo, err := c.MetaTable.GetCollectionByName(ddReq.CollectionName, 0); err == nil {
|
||||
if err = c.SendDdDropCollectionReq(ctx, &ddReq, collInfo.PhysicalChannelNames); err != nil {
|
||||
return err
|
||||
}
|
||||
invalidateCache = true
|
||||
collectionID = ddReq.CollectionID
|
||||
} else {
|
||||
log.Debug("collection has been removed, skip re-send DropCollection",
|
||||
zap.String("collection name", collName))
|
||||
}
|
||||
case CreatePartitionDDType:
|
||||
var ddReq = internalpb.CreatePartitionRequest{}
|
||||
if err = proto.Unmarshal(ddOp.Body, &ddReq); err != nil {
|
||||
return err
|
||||
}
|
||||
ts = ddReq.Base.Timestamp
|
||||
collName = ddReq.CollectionName
|
||||
collInfo, err := c.MetaTable.GetCollectionByName(ddReq.CollectionName, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err = c.MetaTable.GetPartitionByName(collInfo.CollectionID, ddReq.PartitionName, 0); err != nil {
|
||||
if err = c.SendDdCreatePartitionReq(ctx, &ddReq, collInfo.PhysicalChannelNames); err != nil {
|
||||
return err
|
||||
}
|
||||
invalidateCache = true
|
||||
collectionID = ddReq.CollectionID
|
||||
} else {
|
||||
log.Debug("partition has been created, skip re-send CreatePartition",
|
||||
zap.String("collection name", collName), zap.String("partition name", ddReq.PartitionName))
|
||||
}
|
||||
case DropPartitionDDType:
|
||||
var ddReq = internalpb.DropPartitionRequest{}
|
||||
if err = proto.Unmarshal(ddOp.Body, &ddReq); err != nil {
|
||||
return err
|
||||
}
|
||||
ts = ddReq.Base.Timestamp
|
||||
collName = ddReq.CollectionName
|
||||
collInfo, err := c.MetaTable.GetCollectionByName(ddReq.CollectionName, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err = c.MetaTable.GetPartitionByName(collInfo.CollectionID, ddReq.PartitionName, 0); err == nil {
|
||||
if err = c.SendDdDropPartitionReq(ctx, &ddReq, collInfo.PhysicalChannelNames); err != nil {
|
||||
return err
|
||||
}
|
||||
invalidateCache = true
|
||||
collectionID = ddReq.CollectionID
|
||||
} else {
|
||||
log.Debug("partition has been removed, skip re-send DropPartition",
|
||||
zap.String("collection name", collName), zap.String("partition name", ddReq.PartitionName))
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("invalid DdOperation %s", ddOp.Type)
|
||||
}
|
||||
|
||||
if invalidateCache {
|
||||
if err = c.ExpireMetaCache(ctx, nil, collectionID, ts); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Update DDOperation in etcd
|
||||
return c.MetaTable.txn.Save(DDMsgSendPrefix, strconv.FormatBool(true))
|
||||
}
|
||||
|
||||
func (c *Core) getCollectionName(collID, partitionID typeutil.UniqueID) (string, string, error) {
|
||||
colName, err := c.MetaTable.GetCollectionNameByID(collID)
|
||||
if err != nil {
|
||||
|
@ -1486,10 +1377,6 @@ func (c *Core) Start() error {
|
|||
// you can not just stuck here,
|
||||
panic(err)
|
||||
}
|
||||
if err := c.reSendDdMsg(c.ctx, false); err != nil {
|
||||
log.Fatal("RootCoord Start reSendDdMsg failed", zap.Error(err))
|
||||
panic(err)
|
||||
}
|
||||
c.wg.Add(7)
|
||||
go c.startTimeTickLoop()
|
||||
go c.tsLoop()
|
||||
|
|
|
@ -788,10 +788,10 @@ func TestRootCoordInitData(t *testing.T) {
|
|||
assert.NotNil(t, snapshotKV)
|
||||
assert.NoError(t, err)
|
||||
txnKV := etcdkv.NewEtcdKV(etcdCli, Params.EtcdCfg.MetaRootPath)
|
||||
mt, err := NewMetaTable(context.TODO(), txnKV, snapshotKV)
|
||||
mt, err := NewMetaTable(context.TODO(), &kvmetestore.Catalog{Txn: txnKV, Snapshot: snapshotKV})
|
||||
assert.NoError(t, err)
|
||||
mockTxnKV := &mockTestTxnKV{
|
||||
TxnKV: mt.txn,
|
||||
TxnKV: txnKV,
|
||||
save: func(key, value string) error {
|
||||
return fmt.Errorf("save error")
|
||||
},
|
||||
|
@ -1030,9 +1030,6 @@ func TestRootCoord_Base(t *testing.T) {
|
|||
status, err = core.CreateCollection(ctx, req)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, commonpb.ErrorCode_Success, status.ErrorCode)
|
||||
|
||||
err = core.reSendDdMsg(core.ctx, true)
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
|
||||
wg.Add(1)
|
||||
|
@ -1164,9 +1161,6 @@ func TestRootCoord_Base(t *testing.T) {
|
|||
|
||||
assert.Equal(t, 1, len(pnm.GetCollIDs()))
|
||||
assert.Equal(t, collMeta.CollectionID, pnm.GetCollIDs()[0])
|
||||
|
||||
err = core.reSendDdMsg(core.ctx, true)
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
|
||||
wg.Add(1)
|
||||
|
@ -1804,9 +1798,6 @@ func TestRootCoord_Base(t *testing.T) {
|
|||
|
||||
assert.Equal(t, 2, len(pnm.GetCollIDs()))
|
||||
assert.Equal(t, collMeta.CollectionID, pnm.GetCollIDs()[1])
|
||||
|
||||
err = core.reSendDdMsg(core.ctx, true)
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
|
||||
wg.Add(1)
|
||||
|
@ -1881,9 +1872,6 @@ func TestRootCoord_Base(t *testing.T) {
|
|||
collIDs = pnm.GetCollIDs()
|
||||
assert.Equal(t, 3, len(collIDs))
|
||||
assert.Equal(t, collMeta.CollectionID, collIDs[2])
|
||||
|
||||
err = core.reSendDdMsg(core.ctx, true)
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
|
||||
wg.Add(1)
|
||||
|
|
|
@ -19,12 +19,11 @@ package rootcoord
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/milvus-io/milvus/internal/common"
|
||||
"github.com/milvus-io/milvus/internal/log"
|
||||
model "github.com/milvus-io/milvus/internal/metastore/model"
|
||||
"github.com/milvus-io/milvus/internal/metastore/model"
|
||||
"github.com/milvus-io/milvus/internal/proto/commonpb"
|
||||
"github.com/milvus-io/milvus/internal/proto/etcdpb"
|
||||
"github.com/milvus-io/milvus/internal/proto/internalpb"
|
||||
|
@ -287,8 +286,7 @@ func (t *CreateCollectionReqTask) Execute(ctx context.Context) error {
|
|||
return err
|
||||
}
|
||||
|
||||
// Update DDOperation in etcd
|
||||
return t.core.MetaTable.txn.Save(DDMsgSendPrefix, strconv.FormatBool(true))
|
||||
return nil
|
||||
}
|
||||
|
||||
// DropCollectionReqTask drop collection request task
|
||||
|
@ -413,8 +411,7 @@ func (t *DropCollectionReqTask) Execute(ctx context.Context) error {
|
|||
return err
|
||||
}
|
||||
|
||||
// Update DDOperation in etcd
|
||||
return t.core.MetaTable.txn.Save(DDMsgSendPrefix, strconv.FormatBool(true))
|
||||
return nil
|
||||
}
|
||||
|
||||
// HasCollectionReqTask has collection request task
|
||||
|
@ -617,8 +614,7 @@ func (t *CreatePartitionReqTask) Execute(ctx context.Context) error {
|
|||
return err
|
||||
}
|
||||
|
||||
// Update DDOperation in etcd
|
||||
return t.core.MetaTable.txn.Save(DDMsgSendPrefix, strconv.FormatBool(true))
|
||||
return nil
|
||||
}
|
||||
|
||||
// DropPartitionReqTask drop partition request task
|
||||
|
@ -715,8 +711,7 @@ func (t *DropPartitionReqTask) Execute(ctx context.Context) error {
|
|||
// return err
|
||||
//}
|
||||
|
||||
// Update DDOperation in etcd
|
||||
return t.core.MetaTable.txn.Save(DDMsgSendPrefix, strconv.FormatBool(true))
|
||||
return nil
|
||||
}
|
||||
|
||||
// HasPartitionReqTask has partition request task
|
||||
|
|
|
@ -100,11 +100,6 @@ func EncodeDdOperation(m proto.Message, ddType string) (string, error) {
|
|||
return string(ddOpByte), nil
|
||||
}
|
||||
|
||||
// DecodeDdOperation deserialize string to DdOperation
|
||||
func DecodeDdOperation(str string, ddOp *DdOperation) error {
|
||||
return json.Unmarshal([]byte(str), ddOp)
|
||||
}
|
||||
|
||||
// EncodeMsgPositions serialize []*MsgPosition into string
|
||||
func EncodeMsgPositions(msgPositions []*msgstream.MsgPosition) (string, error) {
|
||||
if len(msgPositions) == 0 {
|
||||
|
|
|
@ -24,6 +24,9 @@ import (
|
|||
|
||||
// Meta Prefix consts
|
||||
const (
|
||||
MetaStoreTypeEtcd = "etcd"
|
||||
MetaStoreTypeMysql = "mysql"
|
||||
|
||||
SegmentMetaPrefix = "queryCoord-segmentMeta"
|
||||
ChangeInfoMetaPrefix = "queryCoord-sealedSegmentChangeInfo"
|
||||
HandoffSegmentPrefix = "querycoord-handoff"
|
||||
|
|
|
@ -0,0 +1,39 @@
|
|||
// Licensed to the LF AI & Data foundation under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package contextutil
|
||||
|
||||
import "context"
|
||||
|
||||
type ctxTenantKey struct{}
|
||||
|
||||
// WithTenantID creates a new context that has tenantID injected.
|
||||
func WithTenantID(ctx context.Context, tenantID string) context.Context {
|
||||
if ctx == nil {
|
||||
ctx = context.Background()
|
||||
}
|
||||
return context.WithValue(ctx, ctxTenantKey{}, tenantID)
|
||||
}
|
||||
|
||||
// TenantID tries to retrieve tenantID from the given context.
|
||||
// If it doesn't exist, an empty string is returned.
|
||||
func TenantID(ctx context.Context) string {
|
||||
if requestID, ok := ctx.Value(ctxTenantKey{}).(string); ok {
|
||||
return requestID
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
|
@ -218,6 +218,14 @@ func KeyValuePair2Map(datas []*commonpb.KeyValuePair) map[string]string {
|
|||
return results
|
||||
}
|
||||
|
||||
func ConvertToKeyValuePairPointer(datas []commonpb.KeyValuePair) []*commonpb.KeyValuePair {
|
||||
var kvs []*commonpb.KeyValuePair
|
||||
for i := 0; i < len(datas); i++ {
|
||||
kvs = append(kvs, &datas[i])
|
||||
}
|
||||
return kvs
|
||||
}
|
||||
|
||||
// GenChannelSubName generate subName to watch channel
|
||||
func GenChannelSubName(prefix string, collectionID int64, nodeID int64) string {
|
||||
return fmt.Sprintf("%s-%d-%d", prefix, collectionID, nodeID)
|
||||
|
|
|
@ -20,9 +20,8 @@ import (
|
|||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"go.uber.org/zap"
|
||||
|
||||
"github.com/milvus-io/milvus/internal/log"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -130,6 +129,8 @@ type commonConfig struct {
|
|||
SimdType string
|
||||
|
||||
AuthorizationEnabled bool
|
||||
|
||||
ClusterName string
|
||||
}
|
||||
|
||||
func (p *commonConfig) init(base *BaseTable) {
|
||||
|
@ -167,6 +168,8 @@ func (p *commonConfig) init(base *BaseTable) {
|
|||
p.initStorageType()
|
||||
|
||||
p.initEnableAuthorization()
|
||||
|
||||
p.initClusterName()
|
||||
}
|
||||
|
||||
func (p *commonConfig) initClusterPrefix() {
|
||||
|
@ -370,6 +373,10 @@ func (p *commonConfig) initEnableAuthorization() {
|
|||
p.AuthorizationEnabled = p.Base.ParseBool("common.security.authorizationEnabled", false)
|
||||
}
|
||||
|
||||
func (p *commonConfig) initClusterName() {
|
||||
p.ClusterName = p.Base.LoadWithDefault("common.cluster.name", "")
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// --- rootcoord ---
|
||||
type rootCoordConfig struct {
|
||||
|
|
|
@ -25,6 +25,7 @@ import (
|
|||
"sync"
|
||||
|
||||
"github.com/milvus-io/milvus/internal/log"
|
||||
"github.com/milvus-io/milvus/internal/util"
|
||||
"github.com/milvus-io/milvus/internal/util/metricsinfo"
|
||||
"github.com/streamnative/pulsarctl/pkg/cmdutils"
|
||||
"go.uber.org/zap"
|
||||
|
@ -44,7 +45,9 @@ type ServiceParam struct {
|
|||
BaseTable
|
||||
|
||||
LocalStorageCfg LocalStorageConfig
|
||||
MetaStoreCfg MetaStoreConfig
|
||||
EtcdCfg EtcdConfig
|
||||
DBCfg MetaDBConfig
|
||||
PulsarCfg PulsarConfig
|
||||
KafkaCfg KafkaConfig
|
||||
RocksmqCfg RocksmqConfig
|
||||
|
@ -55,7 +58,12 @@ func (p *ServiceParam) Init() {
|
|||
p.BaseTable.Init()
|
||||
|
||||
p.LocalStorageCfg.init(&p.BaseTable)
|
||||
p.MetaStoreCfg.init(&p.BaseTable)
|
||||
p.EtcdCfg.init(&p.BaseTable)
|
||||
if p.MetaStoreCfg.MetaStoreType == util.MetaStoreTypeMysql {
|
||||
log.Debug("Mysql protocol is used as meta store")
|
||||
p.DBCfg.init(&p.BaseTable)
|
||||
}
|
||||
p.PulsarCfg.init(&p.BaseTable)
|
||||
p.KafkaCfg.init(&p.BaseTable)
|
||||
p.RocksmqCfg.init(&p.BaseTable)
|
||||
|
@ -201,6 +209,97 @@ func (p *LocalStorageConfig) initPath() {
|
|||
p.Path = p.Base.LoadWithDefault("localStorage.path", "/var/lib/milvus/data")
|
||||
}
|
||||
|
||||
type MetaStoreConfig struct {
|
||||
Base *BaseTable
|
||||
|
||||
MetaStoreType string
|
||||
}
|
||||
|
||||
func (p *MetaStoreConfig) init(base *BaseTable) {
|
||||
p.Base = base
|
||||
p.initMetaStoreType()
|
||||
}
|
||||
|
||||
func (p *MetaStoreConfig) initMetaStoreType() {
|
||||
p.MetaStoreType = p.Base.LoadWithDefault("metastore.type", util.MetaStoreTypeEtcd)
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// --- meta db ---
|
||||
type MetaDBConfig struct {
|
||||
Base *BaseTable
|
||||
|
||||
Username string
|
||||
Password string
|
||||
Address string
|
||||
Port int
|
||||
DBName string
|
||||
MaxOpenConns int
|
||||
MaxIdleConns int
|
||||
}
|
||||
|
||||
func (p *MetaDBConfig) init(base *BaseTable) {
|
||||
p.Base = base
|
||||
p.LoadCfgToMemory()
|
||||
}
|
||||
|
||||
func (p *MetaDBConfig) LoadCfgToMemory() {
|
||||
p.initUsername()
|
||||
p.initPassword()
|
||||
p.initAddress()
|
||||
p.initPort()
|
||||
p.initDbName()
|
||||
p.initMaxOpenConns()
|
||||
p.initMaxIdleConns()
|
||||
}
|
||||
|
||||
func (p *MetaDBConfig) initUsername() {
|
||||
username, err := p.Base.Load("mysql.username")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
p.Username = username
|
||||
}
|
||||
|
||||
func (p *MetaDBConfig) initPassword() {
|
||||
password, err := p.Base.Load("mysql.password")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
p.Password = password
|
||||
}
|
||||
|
||||
func (p *MetaDBConfig) initAddress() {
|
||||
address, err := p.Base.Load("mysql.address")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
p.Address = address
|
||||
}
|
||||
|
||||
func (p *MetaDBConfig) initPort() {
|
||||
port := p.Base.ParseIntWithDefault("mysql.port", 3306)
|
||||
p.Port = port
|
||||
}
|
||||
|
||||
func (p *MetaDBConfig) initDbName() {
|
||||
dbName, err := p.Base.Load("mysql.dbName")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
p.DBName = dbName
|
||||
}
|
||||
|
||||
func (p *MetaDBConfig) initMaxOpenConns() {
|
||||
maxOpenConns := p.Base.ParseIntWithDefault("mysql.maxOpenConns", 20)
|
||||
p.MaxOpenConns = maxOpenConns
|
||||
}
|
||||
|
||||
func (p *MetaDBConfig) initMaxIdleConns() {
|
||||
maxIdleConns := p.Base.ParseIntWithDefault("mysql.maxIdleConns", 5)
|
||||
p.MaxIdleConns = maxIdleConns
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// --- pulsar ---
|
||||
type PulsarConfig struct {
|
||||
|
|
|
@ -0,0 +1,224 @@
|
|||
-- create database
|
||||
CREATE DATABASE if not exists milvus_meta CHARACTER SET utf8mb4;
|
||||
|
||||
/*
|
||||
create tables script
|
||||
|
||||
Notices:
|
||||
1. id, tenant_id, is_deleted, created_at, updated_at are 5 common columns for all collections.
|
||||
2. Query index in community version CANNOT includes tenant_id, since tenant_id is not existed and will miss query index.
|
||||
*/
|
||||
|
||||
-- collections
|
||||
CREATE TABLE if not exists milvus_meta.collections (
|
||||
id BIGINT NOT NULL AUTO_INCREMENT,
|
||||
tenant_id VARCHAR(128) DEFAULT NULL,
|
||||
collection_id BIGINT NOT NULL,
|
||||
collection_name VARCHAR(128),
|
||||
description VARCHAR(2048) DEFAULT NULL,
|
||||
auto_id BOOL DEFAULT FALSE,
|
||||
shards_num INT,
|
||||
start_position TEXT,
|
||||
consistency_level INT,
|
||||
ts BIGINT UNSIGNED DEFAULT 0,
|
||||
is_deleted BOOL DEFAULT FALSE,
|
||||
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP on update current_timestamp,
|
||||
PRIMARY KEY (id),
|
||||
UNIQUE (tenant_id, collection_id, ts),
|
||||
INDEX idx_collection_id_ts (collection_id, ts)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4;
|
||||
|
||||
-- collection aliases
|
||||
CREATE TABLE if not exists milvus_meta.collection_aliases (
|
||||
id BIGINT NOT NULL AUTO_INCREMENT,
|
||||
tenant_id VARCHAR(128) DEFAULT NULL,
|
||||
collection_id BIGINT NOT NULL,
|
||||
collection_alias VARCHAR(128),
|
||||
ts BIGINT UNSIGNED DEFAULT 0,
|
||||
is_deleted BOOL DEFAULT FALSE,
|
||||
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP on update current_timestamp,
|
||||
PRIMARY KEY (id),
|
||||
UNIQUE (tenant_id, collection_alias, ts),
|
||||
INDEX idx_tenant_id_collection_id_ts (tenant_id, collection_id, ts),
|
||||
INDEX idx_collection_id_ts (collection_id, ts)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4;
|
||||
|
||||
-- channels
|
||||
CREATE TABLE if not exists milvus_meta.collection_channels (
|
||||
id BIGINT NOT NULL AUTO_INCREMENT,
|
||||
tenant_id VARCHAR(128) DEFAULT NULL,
|
||||
collection_id BIGINT NOT NULL,
|
||||
virtual_channel_name VARCHAR(256) NOT NULL,
|
||||
physical_channel_name VARCHAR(256) NOT NULL,
|
||||
removed BOOL DEFAULT FALSE,
|
||||
ts BIGINT UNSIGNED DEFAULT 0,
|
||||
is_deleted BOOL DEFAULT FALSE,
|
||||
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP on update current_timestamp,
|
||||
PRIMARY KEY (id),
|
||||
UNIQUE (tenant_id, collection_id, virtual_channel_name, ts),
|
||||
INDEX idx_tenant_id_collection_id_ts (tenant_id, collection_id, ts),
|
||||
INDEX idx_collection_id_ts (collection_id, ts)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4;
|
||||
|
||||
-- fields
|
||||
CREATE TABLE if not exists milvus_meta.field_schemas (
|
||||
id BIGINT NOT NULL AUTO_INCREMENT,
|
||||
tenant_id VARCHAR(128) DEFAULT NULL,
|
||||
field_id BIGINT NOT NULL,
|
||||
field_name VARCHAR(128) NOT NULL,
|
||||
is_primary_key BOOL NOT NULL,
|
||||
description VARCHAR(2048) DEFAULT NULL,
|
||||
data_type INT UNSIGNED NOT NULL,
|
||||
type_params VARCHAR(2048),
|
||||
index_params VARCHAR(2048),
|
||||
auto_id BOOL NOT NULL,
|
||||
collection_id BIGINT NOT NULL,
|
||||
ts BIGINT UNSIGNED DEFAULT 0,
|
||||
is_deleted BOOL DEFAULT FALSE,
|
||||
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP on update current_timestamp,
|
||||
PRIMARY KEY (id),
|
||||
UNIQUE (tenant_id, collection_id, field_name, ts),
|
||||
INDEX idx_tenant_id_collection_id_field_id_ts (tenant_id, collection_id, field_id, ts),
|
||||
INDEX idx_collection_id_field_id_ts (collection_id, field_id, ts)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4;
|
||||
|
||||
-- partitions
|
||||
CREATE TABLE if not exists milvus_meta.`partitions` (
|
||||
id BIGINT NOT NULL AUTO_INCREMENT,
|
||||
tenant_id VARCHAR(128) DEFAULT NULL,
|
||||
partition_id BIGINT NOT NULL,
|
||||
partition_name VARCHAR(128),
|
||||
partition_created_timestamp bigint unsigned,
|
||||
collection_id BIGINT NOT NULL,
|
||||
ts BIGINT UNSIGNED DEFAULT 0,
|
||||
is_deleted BOOL DEFAULT FALSE,
|
||||
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP on update current_timestamp,
|
||||
PRIMARY KEY (id),
|
||||
UNIQUE (tenant_id, collection_id, partition_name, ts),
|
||||
INDEX idx_tenant_id_collection_id_partition_id_ts (tenant_id, collection_id, partition_id, ts),
|
||||
INDEX idx_collection_id_partition_id_ts (collection_id, partition_id, ts)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4;
|
||||
|
||||
-- indexes
|
||||
CREATE TABLE if not exists milvus_meta.`indexes` (
|
||||
id BIGINT NOT NULL AUTO_INCREMENT,
|
||||
tenant_id VARCHAR(128) DEFAULT NULL,
|
||||
field_id BIGINT NOT NULL,
|
||||
collection_id BIGINT NOT NULL,
|
||||
index_id BIGINT NOT NULL,
|
||||
index_name VARCHAR(128),
|
||||
index_params VARCHAR(2048),
|
||||
create_time bigint unsigned,
|
||||
is_deleted BOOL DEFAULT FALSE,
|
||||
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP on update current_timestamp,
|
||||
PRIMARY KEY (id),
|
||||
UNIQUE (tenant_id, collection_id, index_name),
|
||||
INDEX idx_tenant_id_collection_id_index_id (tenant_id, collection_id, index_id),
|
||||
INDEX idx_collection_id_index_id (collection_id, index_id)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4;
|
||||
|
||||
-- index file paths
|
||||
CREATE TABLE if not exists milvus_meta.index_file_paths (
|
||||
id BIGINT NOT NULL AUTO_INCREMENT,
|
||||
tenant_id VARCHAR(128) DEFAULT NULL,
|
||||
index_build_id BIGINT NOT NULL,
|
||||
index_file_path VARCHAR(256),
|
||||
is_deleted BOOL DEFAULT FALSE,
|
||||
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP on update current_timestamp,
|
||||
PRIMARY KEY (id),
|
||||
INDEX idx_tenant_id_index_build_id (tenant_id, index_build_id),
|
||||
INDEX idx_index_build_id (index_build_id)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4;
|
||||
|
||||
-- segments
|
||||
CREATE TABLE if not exists milvus_meta.segments (
|
||||
id BIGINT NOT NULL AUTO_INCREMENT,
|
||||
tenant_id VARCHAR(128) DEFAULT NULL,
|
||||
segment_id BIGINT NOT NULL,
|
||||
collection_id BIGINT NOT NULL,
|
||||
partition_id BIGINT NOT NULL,
|
||||
num_rows BIGINT NOT NULL,
|
||||
max_row_num INT COMMENT 'estimate max rows',
|
||||
dm_channel VARCHAR(128) NOT NULL,
|
||||
dml_position TEXT COMMENT 'checkpoint',
|
||||
start_position TEXT,
|
||||
compaction_from VARCHAR(4096) COMMENT 'old segment IDs',
|
||||
created_by_compaction BOOL,
|
||||
segment_state TINYINT UNSIGNED NOT NULL,
|
||||
last_expire_time bigint unsigned COMMENT 'segment assignment expiration time',
|
||||
dropped_at bigint unsigned,
|
||||
is_deleted BOOL DEFAULT FALSE,
|
||||
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP on update current_timestamp,
|
||||
PRIMARY KEY (id),
|
||||
INDEX idx_tenant_id_collection_id_segment_id (tenant_id, collection_id, segment_id),
|
||||
INDEX idx_collection_id_segment_id (collection_id, segment_id)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4;
|
||||
|
||||
-- segment indexes
|
||||
CREATE TABLE if not exists milvus_meta.segment_indexes (
|
||||
id BIGINT NOT NULL AUTO_INCREMENT,
|
||||
tenant_id VARCHAR(128) DEFAULT NULL,
|
||||
collection_id BIGINT NOT NULL,
|
||||
partition_id BIGINT NOT NULL,
|
||||
segment_id BIGINT NOT NULL,
|
||||
field_id BIGINT NOT NULL,
|
||||
index_id BIGINT NOT NULL,
|
||||
index_build_id BIGINT,
|
||||
enable_index BOOL NOT NULL,
|
||||
create_time bigint unsigned,
|
||||
index_file_paths VARCHAR(4096),
|
||||
index_size BIGINT UNSIGNED,
|
||||
`version` INT UNSIGNED,
|
||||
is_deleted BOOL DEFAULT FALSE COMMENT 'as mark_deleted',
|
||||
recycled BOOL DEFAULT FALSE COMMENT 'binlog files truly deleted',
|
||||
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP on update current_timestamp,
|
||||
PRIMARY KEY (id),
|
||||
UNIQUE (tenant_id, segment_id, index_id),
|
||||
INDEX idx_tenant_id_collection_id_segment_id_index_id (tenant_id, collection_id, segment_id, index_id),
|
||||
INDEX idx_collection_id_segment_id_index_id (collection_id, segment_id, index_id)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4;
|
||||
|
||||
-- binlog files info
|
||||
CREATE TABLE if not exists milvus_meta.binlogs (
|
||||
id BIGINT NOT NULL AUTO_INCREMENT,
|
||||
tenant_id VARCHAR(128) DEFAULT NULL,
|
||||
field_id BIGINT NOT NULL,
|
||||
segment_id BIGINT NOT NULL,
|
||||
collection_id BIGINT NOT NULL,
|
||||
log_type SMALLINT UNSIGNED NOT NULL COMMENT 'binlog、stats binlog、delta binlog',
|
||||
num_entries BIGINT,
|
||||
timestamp_from BIGINT UNSIGNED,
|
||||
timestamp_to BIGINT UNSIGNED,
|
||||
log_path VARCHAR(256) NOT NULL,
|
||||
log_size BIGINT NOT NULL,
|
||||
is_deleted BOOL DEFAULT FALSE,
|
||||
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP on update current_timestamp,
|
||||
PRIMARY KEY (id),
|
||||
INDEX idx_tenant_id_segment_id_log_type (tenant_id, segment_id, log_type),
|
||||
INDEX idx_segment_id_log_type (segment_id, log_type)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4;
|
||||
|
||||
-- users
|
||||
CREATE TABLE if not exists milvus_meta.credential_users (
|
||||
id BIGINT NOT NULL AUTO_INCREMENT,
|
||||
tenant_id VARCHAR(128) DEFAULT NULL,
|
||||
username VARCHAR(128) NOT NULL,
|
||||
encrypted_password VARCHAR(256) NOT NULL,
|
||||
is_super BOOL NOT NULL DEFAULT false,
|
||||
is_deleted BOOL NOT NULL DEFAULT false,
|
||||
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP on update current_timestamp,
|
||||
PRIMARY KEY (id),
|
||||
INDEX idx_tenant_id_username (tenant_id, username),
|
||||
INDEX idx_username (username)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4;
|
Loading…
Reference in New Issue