chore: update to go 1.20 (#24088)
* build: upgrade to go 1.19 * chore: bump go.mod * chore: `gofmt` changes for doc comments https://tip.golang.org/doc/comment * test: update tests for new sort order * chore: make generate-sources * chore: make generate-sources * chore: go 1.20 * chore: handle rand.Seed deprecation * chore: handle rand.Seed deprecation in tests --------- Co-authored-by: DStrand1 <dstrandboge@influxdata.com>test/monitor-ci-415
parent
8ad6e17265
commit
f74c69c5e4
|
@ -7,7 +7,7 @@ orbs:
|
|||
parameters:
|
||||
cross-container-tag:
|
||||
type: string
|
||||
default: go1.18.9-cb1343dd74ecba8ec07fe810195530a0b9055aa9
|
||||
default: go1.20-0492ad609850ef223390d36ae41a226fe806e83c
|
||||
|
||||
workflow:
|
||||
type: string
|
||||
|
|
|
@ -26,12 +26,12 @@ func (s *tenantService) FindUser(ctx context.Context, filter influxdb.UserFilter
|
|||
return s.FindUserFn(ctx, filter)
|
||||
}
|
||||
|
||||
//FindOrganizationByID calls FindOrganizationByIDF.
|
||||
// FindOrganizationByID calls FindOrganizationByIDF.
|
||||
func (s *tenantService) FindOrganizationByID(ctx context.Context, id platform.ID) (*influxdb.Organization, error) {
|
||||
return s.FindOrganizationByIDF(ctx, id)
|
||||
}
|
||||
|
||||
//FindOrganization calls FindOrganizationF.
|
||||
// FindOrganization calls FindOrganizationF.
|
||||
func (s *tenantService) FindOrganization(ctx context.Context, filter influxdb.OrganizationFilter) (*influxdb.Organization, error) {
|
||||
return s.FindOrganizationF(ctx, filter)
|
||||
}
|
||||
|
|
|
@ -97,9 +97,12 @@ func authorizeReadSystemBucket(ctx context.Context, bid, oid platform.ID) (influ
|
|||
|
||||
// AuthorizeReadBucket exists because buckets are a special case and should use this method.
|
||||
// I.e., instead of:
|
||||
// AuthorizeRead(ctx, influxdb.BucketsResourceType, b.ID, b.OrgID)
|
||||
//
|
||||
// AuthorizeRead(ctx, influxdb.BucketsResourceType, b.ID, b.OrgID)
|
||||
//
|
||||
// use:
|
||||
// AuthorizeReadBucket(ctx, b.Type, b.ID, b.OrgID)
|
||||
//
|
||||
// AuthorizeReadBucket(ctx, b.Type, b.ID, b.OrgID)
|
||||
func AuthorizeReadBucket(ctx context.Context, bt influxdb.BucketType, bid, oid platform.ID) (influxdb.Authorizer, influxdb.Permission, error) {
|
||||
switch bt {
|
||||
case influxdb.BucketTypeSystem:
|
||||
|
|
22
go.mod
22
go.mod
|
@ -1,9 +1,9 @@
|
|||
module github.com/influxdata/influxdb/v2
|
||||
|
||||
go 1.18
|
||||
go 1.20
|
||||
|
||||
require (
|
||||
github.com/BurntSushi/toml v0.4.1
|
||||
github.com/BurntSushi/toml v1.2.1
|
||||
github.com/Masterminds/squirrel v1.5.0
|
||||
github.com/NYTimes/gziphandler v1.0.1
|
||||
github.com/RoaringBitmap/roaring v0.4.16
|
||||
|
@ -64,15 +64,15 @@ require (
|
|||
go.uber.org/multierr v1.6.0
|
||||
go.uber.org/zap v1.16.0
|
||||
golang.org/x/crypto v0.0.0-20220331220935-ae2d96664a29
|
||||
golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8
|
||||
golang.org/x/text v0.3.7
|
||||
golang.org/x/sync v0.1.0
|
||||
golang.org/x/sys v0.4.0
|
||||
golang.org/x/text v0.6.0
|
||||
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac
|
||||
golang.org/x/tools v0.1.11
|
||||
golang.org/x/tools v0.5.0
|
||||
google.golang.org/protobuf v1.28.1
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
honnef.co/go/tools v0.3.0
|
||||
honnef.co/go/tools v0.4.0
|
||||
)
|
||||
|
||||
require (
|
||||
|
@ -206,12 +206,12 @@ require (
|
|||
go.opencensus.io v0.23.0 // indirect
|
||||
go.uber.org/atomic v1.7.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20211216164055-b2b84827b756 // indirect
|
||||
golang.org/x/exp/typeparams v0.0.0-20220218215828-6cf2b201936e // indirect
|
||||
golang.org/x/exp/typeparams v0.0.0-20221208152030-732eee02a75a // indirect
|
||||
golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 // indirect
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 // indirect
|
||||
golang.org/x/net v0.0.0-20220617184016-355a448f1bc9 // indirect
|
||||
golang.org/x/mod v0.7.0 // indirect
|
||||
golang.org/x/net v0.5.0 // indirect
|
||||
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f // indirect
|
||||
golang.org/x/term v0.0.0-20220526004731-065cf7ba2467 // indirect
|
||||
golang.org/x/term v0.4.0 // indirect
|
||||
golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f // indirect
|
||||
gonum.org/v1/gonum v0.11.0 // indirect
|
||||
google.golang.org/api v0.47.0 // indirect
|
||||
|
|
39
go.sum
39
go.sum
|
@ -80,8 +80,8 @@ github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZ
|
|||
github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo=
|
||||
github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/BurntSushi/toml v0.4.1 h1:GaI7EiDXDRfa8VshkTj7Fym7ha+y8/XxIgD2okUIjLw=
|
||||
github.com/BurntSushi/toml v0.4.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
|
||||
github.com/BurntSushi/toml v1.2.1 h1:9F2/+DoOYIOksmaJFPw1tGFy1eDnIJXg+UHjuD8lTak=
|
||||
github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
github.com/DATA-DOG/go-sqlmock v1.4.1 h1:ThlnYciV1iM/V0OSF/dtkqWb6xo5qITT1TJBG1MRDJM=
|
||||
github.com/DATA-DOG/go-sqlmock v1.4.1/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM=
|
||||
|
@ -1324,8 +1324,8 @@ golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EH
|
|||
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
|
||||
golang.org/x/exp v0.0.0-20211216164055-b2b84827b756 h1:/5Bs7sWi0i3rOVO5KnM55OwugpsD4bRW1zywKoZjbkI=
|
||||
golang.org/x/exp v0.0.0-20211216164055-b2b84827b756/go.mod h1:b9TAUYHmRtqA6klRHApnXMnj+OyLce4yF5cZCUbk2ps=
|
||||
golang.org/x/exp/typeparams v0.0.0-20220218215828-6cf2b201936e h1:qyrTQ++p1afMkO4DPEeLGq/3oTsdlvdH4vqZUBWzUKM=
|
||||
golang.org/x/exp/typeparams v0.0.0-20220218215828-6cf2b201936e/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk=
|
||||
golang.org/x/exp/typeparams v0.0.0-20221208152030-732eee02a75a h1:Jw5wfR+h9mnIYH+OtGT2im5wV1YGGDora5vTv/aa5bE=
|
||||
golang.org/x/exp/typeparams v0.0.0-20221208152030-732eee02a75a/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk=
|
||||
golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs=
|
||||
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
||||
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||
|
@ -1364,8 +1364,8 @@ golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
|||
golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro=
|
||||
golang.org/x/mod v0.6.0-dev.0.20211013180041-c96bc1413d57/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 h1:6zppjxzCulZykYSLyVDYbneBfbaBIQPYMevg0bEwv2s=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/mod v0.7.0 h1:LapD9S96VoQRhi/GrNTqeBJFrUjs5UHCAtTlgwA5oZA=
|
||||
golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
|
@ -1425,8 +1425,8 @@ golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qx
|
|||
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20211118161319-6a13c67c3ce4/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.0.0-20220617184016-355a448f1bc9 h1:Yqz/iviulwKwAREEeUd3nbBFn0XuyJqkoft2IlrvOhc=
|
||||
golang.org/x/net v0.0.0-20220617184016-355a448f1bc9/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.5.0 h1:GyT4nK/YDHSqa1c4753ouYCDajOYKTja9Xb/OHtgvSw=
|
||||
golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
|
@ -1453,8 +1453,8 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ
|
|||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f h1:Ax0t5p6N38Ga0dThY21weqDEyz2oklo4IvDkpigvkD8=
|
||||
golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
|
||||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
|
@ -1564,14 +1564,14 @@ golang.org/x/sys v0.0.0-20211117180635-dee7805ff2e1/go.mod h1:oPkhp1MJrh7nUepCBc
|
|||
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8 h1:0A+M6Uqn+Eje4kHMK80dtF3JCXC4ykBgQG4Fe06QRhQ=
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18=
|
||||
golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210503060354-a79de5458b56/go.mod h1:tfny5GFUkzUvx4ps4ajbZsCe5lw1metzhBm9T3x7oIY=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.0.0-20220526004731-065cf7ba2467 h1:CBpWXWQpIRjzmkkA+M7q9Fqnwd2mZr3AFqexg8YTfoM=
|
||||
golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.4.0 h1:O7UWfv5+A2qiuulQk30kVinPoMtoIPeVaKLEgLpVkvg=
|
||||
golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
|
@ -1581,8 +1581,9 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
|||
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.6.0 h1:3XmdazWV+ubf7QgHSTWeykHOci5oeekaGJBLkrkaw4k=
|
||||
golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
|
@ -1664,8 +1665,8 @@ golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
|||
golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.8-0.20211029000441-d6a9af8af023/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU=
|
||||
golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E=
|
||||
golang.org/x/tools v0.1.11 h1:loJ25fNOEhSXfHrpoGj91eCUThwdNX6u24rO1xnNteY=
|
||||
golang.org/x/tools v0.1.11/go.mod h1:SgwaegtQh8clINPpECJMqnxLv9I09HLqnW3RMqW0CA4=
|
||||
golang.org/x/tools v0.5.0 h1:+bSpV5HIeWkuvgaMfI3UmKRThoTA5ODJTUd8T17NO+4=
|
||||
golang.org/x/tools v0.5.0/go.mod h1:N+Kgy78s5I24c24dU8OfWNEotWjutIs8SnJvn5IDq+k=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
|
@ -1880,8 +1881,8 @@ honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt
|
|||
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||
honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las=
|
||||
honnef.co/go/tools v0.3.0 h1:2LdYUZ7CIxnYgskbUZfY7FPggmqnh6shBqfWa8Tn3XU=
|
||||
honnef.co/go/tools v0.3.0/go.mod h1:vlRD9XErLMGT+mDuofSr0mMMquscM/1nQqtRSsh6m70=
|
||||
honnef.co/go/tools v0.4.0 h1:lyXVV1c8wUBJRKqI8JgIpT8TW1VDagfYYaxbKa/HoL8=
|
||||
honnef.co/go/tools v0.4.0/go.mod h1:36ZgoUOrqOk1GxwHhyryEkq8FQWkUO2xGuSMhUCcdvA=
|
||||
k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo=
|
||||
k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ=
|
||||
k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8=
|
||||
|
|
|
@ -841,7 +841,8 @@ func (s *CheckService) DeleteCheck(ctx context.Context, id platform.ID) error {
|
|||
}
|
||||
|
||||
// TODO(gavincabbage): These structures should be in a common place, like other models,
|
||||
// but the common influxdb.Check is an interface that is not appropriate for an API client.
|
||||
//
|
||||
// but the common influxdb.Check is an interface that is not appropriate for an API client.
|
||||
type Checks struct {
|
||||
Checks []*Check `json:"checks"`
|
||||
Links *influxdb.PagingLinks `json:"links"`
|
||||
|
|
|
@ -611,7 +611,8 @@ func (s *NotificationEndpointService) FindNotificationEndpoints(ctx context.Cont
|
|||
|
||||
// CreateNotificationEndpoint creates a new notification endpoint and sets b.ID with the new identifier.
|
||||
// TODO(@jsteenb2): this is unsatisfactory, we have no way of grabbing the new notification endpoint without
|
||||
// serious hacky hackertoning. Put it on the list...
|
||||
//
|
||||
// serious hacky hackertoning. Put it on the list...
|
||||
func (s *NotificationEndpointService) CreateNotificationEndpoint(ctx context.Context, ne influxdb.NotificationEndpoint, userID platform.ID) error {
|
||||
var resp notificationEndpointDecoder
|
||||
err := s.Client.
|
||||
|
@ -667,9 +668,10 @@ func (s *NotificationEndpointService) PatchNotificationEndpoint(ctx context.Cont
|
|||
|
||||
// DeleteNotificationEndpoint removes a notification endpoint by ID, returns secret fields, orgID for further deletion.
|
||||
// TODO: axe this delete design, makes little sense in how its currently being done. Right now, as an http client,
|
||||
// I am forced to know how the store handles this and then figure out what the server does in between me and that store,
|
||||
// then see what falls out :flushed... for now returning nothing for secrets, orgID, and only returning an error. This makes
|
||||
// the code/design smell super obvious imo
|
||||
//
|
||||
// I am forced to know how the store handles this and then figure out what the server does in between me and that store,
|
||||
// then see what falls out :flushed... for now returning nothing for secrets, orgID, and only returning an error. This makes
|
||||
// the code/design smell super obvious imo
|
||||
func (s *NotificationEndpointService) DeleteNotificationEndpoint(ctx context.Context, id platform.ID) ([]influxdb.SecretField, platform.ID, error) {
|
||||
if !id.Valid() {
|
||||
return nil, 0, fmt.Errorf("invalid ID: please provide a valid ID")
|
||||
|
|
|
@ -1212,8 +1212,8 @@ func (r *UnsignedCumulativeSumReducer) Emit() []UnsignedPoint {
|
|||
|
||||
// FloatHoltWintersReducer forecasts a series into the future.
|
||||
// This is done using the Holt-Winters damped method.
|
||||
// 1. Using the series the initial values are calculated using a SSE.
|
||||
// 2. The series is forecasted into the future using the iterative relations.
|
||||
// 1. Using the series the initial values are calculated using a SSE.
|
||||
// 2. The series is forecasted into the future using the iterative relations.
|
||||
type FloatHoltWintersReducer struct {
|
||||
// Season period
|
||||
m int
|
||||
|
|
|
@ -404,11 +404,10 @@ func (itr *floatSortedMergeIterator) pop() (*FloatPoint, error) {
|
|||
|
||||
// floatSortedMergeHeap represents a heap of floatSortedMergeHeapItems.
|
||||
// Items are sorted with the following priority:
|
||||
// - By their measurement name;
|
||||
// - By their tag keys/values;
|
||||
// - By time; or
|
||||
// - By their Aux field values.
|
||||
//
|
||||
// - By their measurement name;
|
||||
// - By their tag keys/values;
|
||||
// - By time; or
|
||||
// - By their Aux field values.
|
||||
type floatSortedMergeHeap struct {
|
||||
opt IteratorOptions
|
||||
items []*floatSortedMergeHeapItem
|
||||
|
@ -3068,11 +3067,10 @@ func (itr *integerSortedMergeIterator) pop() (*IntegerPoint, error) {
|
|||
|
||||
// integerSortedMergeHeap represents a heap of integerSortedMergeHeapItems.
|
||||
// Items are sorted with the following priority:
|
||||
// - By their measurement name;
|
||||
// - By their tag keys/values;
|
||||
// - By time; or
|
||||
// - By their Aux field values.
|
||||
//
|
||||
// - By their measurement name;
|
||||
// - By their tag keys/values;
|
||||
// - By time; or
|
||||
// - By their Aux field values.
|
||||
type integerSortedMergeHeap struct {
|
||||
opt IteratorOptions
|
||||
items []*integerSortedMergeHeapItem
|
||||
|
@ -5732,11 +5730,10 @@ func (itr *unsignedSortedMergeIterator) pop() (*UnsignedPoint, error) {
|
|||
|
||||
// unsignedSortedMergeHeap represents a heap of unsignedSortedMergeHeapItems.
|
||||
// Items are sorted with the following priority:
|
||||
// - By their measurement name;
|
||||
// - By their tag keys/values;
|
||||
// - By time; or
|
||||
// - By their Aux field values.
|
||||
//
|
||||
// - By their measurement name;
|
||||
// - By their tag keys/values;
|
||||
// - By time; or
|
||||
// - By their Aux field values.
|
||||
type unsignedSortedMergeHeap struct {
|
||||
opt IteratorOptions
|
||||
items []*unsignedSortedMergeHeapItem
|
||||
|
@ -8396,11 +8393,10 @@ func (itr *stringSortedMergeIterator) pop() (*StringPoint, error) {
|
|||
|
||||
// stringSortedMergeHeap represents a heap of stringSortedMergeHeapItems.
|
||||
// Items are sorted with the following priority:
|
||||
// - By their measurement name;
|
||||
// - By their tag keys/values;
|
||||
// - By time; or
|
||||
// - By their Aux field values.
|
||||
//
|
||||
// - By their measurement name;
|
||||
// - By their tag keys/values;
|
||||
// - By time; or
|
||||
// - By their Aux field values.
|
||||
type stringSortedMergeHeap struct {
|
||||
opt IteratorOptions
|
||||
items []*stringSortedMergeHeapItem
|
||||
|
@ -11046,11 +11042,10 @@ func (itr *booleanSortedMergeIterator) pop() (*BooleanPoint, error) {
|
|||
|
||||
// booleanSortedMergeHeap represents a heap of booleanSortedMergeHeapItems.
|
||||
// Items are sorted with the following priority:
|
||||
// - By their measurement name;
|
||||
// - By their tag keys/values;
|
||||
// - By time; or
|
||||
// - By their Aux field values.
|
||||
//
|
||||
// - By their measurement name;
|
||||
// - By their tag keys/values;
|
||||
// - By time; or
|
||||
// - By their Aux field values.
|
||||
type booleanSortedMergeHeap struct {
|
||||
opt IteratorOptions
|
||||
items []*booleanSortedMergeHeapItem
|
||||
|
|
|
@ -0,0 +1,33 @@
|
|||
package rand
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// LockedSource is taken from the Go "math/rand" package.
|
||||
// The default rand functions use a similar type under the hood, this does not introduce any additional
|
||||
// locking than using the default functions.
|
||||
type LockedSource struct {
|
||||
lk sync.Mutex
|
||||
src rand.Source
|
||||
}
|
||||
|
||||
func NewLockedSourceFromSeed(seed int64) *LockedSource {
|
||||
return &LockedSource{
|
||||
src: rand.NewSource(seed),
|
||||
}
|
||||
}
|
||||
|
||||
func (r *LockedSource) Int63() (n int64) {
|
||||
r.lk.Lock()
|
||||
n = r.src.Int63()
|
||||
r.lk.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
func (r *LockedSource) Seed(seed int64) {
|
||||
r.lk.Lock()
|
||||
r.src.Seed(seed)
|
||||
r.lk.Unlock()
|
||||
}
|
|
@ -3,46 +3,45 @@
|
|||
// This is a small simplification over viper to move most of the boilerplate
|
||||
// into one place.
|
||||
//
|
||||
//
|
||||
// In this example the flags can be set with MYPROGRAM_MONITOR_HOST and
|
||||
// MYPROGRAM_NUMBER or with the flags --monitor-host and --number
|
||||
//
|
||||
// var flags struct {
|
||||
// monitorHost string
|
||||
// number int
|
||||
// }
|
||||
// var flags struct {
|
||||
// monitorHost string
|
||||
// number int
|
||||
// }
|
||||
//
|
||||
// func main() {
|
||||
// cmd := cli.NewCommand(&cli.Program{
|
||||
// Run: run,
|
||||
// Name: "myprogram",
|
||||
// Opts: []cli.Opt{
|
||||
// {
|
||||
// DestP: &flags.monitorHost,
|
||||
// Flag: "monitor-host",
|
||||
// Default: "http://localhost:8086",
|
||||
// Desc: "host to send influxdb metrics",
|
||||
// },
|
||||
// {
|
||||
// DestP: &flags.number,
|
||||
// Flag: "number",
|
||||
// Default: 2,
|
||||
// Desc: "number of times to loop",
|
||||
// func main() {
|
||||
// cmd := cli.NewCommand(&cli.Program{
|
||||
// Run: run,
|
||||
// Name: "myprogram",
|
||||
// Opts: []cli.Opt{
|
||||
// {
|
||||
// DestP: &flags.monitorHost,
|
||||
// Flag: "monitor-host",
|
||||
// Default: "http://localhost:8086",
|
||||
// Desc: "host to send influxdb metrics",
|
||||
// },
|
||||
// {
|
||||
// DestP: &flags.number,
|
||||
// Flag: "number",
|
||||
// Default: 2,
|
||||
// Desc: "number of times to loop",
|
||||
//
|
||||
// },
|
||||
// },
|
||||
// })
|
||||
// },
|
||||
// },
|
||||
// })
|
||||
//
|
||||
// if err := cmd.Execute(); err != nil {
|
||||
// fmt.Fprintln(os.Stderr, err)
|
||||
// os.Exit(1)
|
||||
// }
|
||||
// }
|
||||
// if err := cmd.Execute(); err != nil {
|
||||
// fmt.Fprintln(os.Stderr, err)
|
||||
// os.Exit(1)
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// func run() error {
|
||||
// for i := 0; i < number; i++ {
|
||||
// fmt.Printf("%d\n", i)
|
||||
// feturn nil
|
||||
// }
|
||||
// }
|
||||
// func run() error {
|
||||
// for i := 0; i < number; i++ {
|
||||
// fmt.Printf("%d\n", i)
|
||||
// feturn nil
|
||||
// }
|
||||
// }
|
||||
package cli
|
||||
|
|
|
@ -32,33 +32,39 @@
|
|||
// First, I add an entry to `flags.yml`.
|
||||
//
|
||||
// ```yaml
|
||||
// - name: My Feature
|
||||
// description: My feature is awesome
|
||||
// key: myFeature
|
||||
// default: false
|
||||
// expose: true
|
||||
// contact: My Name
|
||||
// - name: My Feature
|
||||
// description: My feature is awesome
|
||||
// key: myFeature
|
||||
// default: false
|
||||
// expose: true
|
||||
// contact: My Name
|
||||
//
|
||||
// ```
|
||||
//
|
||||
// My flag type is inferred to be boolean by my default of `false` when I run
|
||||
// `make flags` and the `feature` package now includes `func MyFeature() BoolFlag`.
|
||||
//
|
||||
// I use this to control my backend code with
|
||||
// # I use this to control my backend code with
|
||||
//
|
||||
// ```go
|
||||
// if feature.MyFeature.Enabled(ctx) {
|
||||
// // new code...
|
||||
// } else {
|
||||
// // new code...
|
||||
// }
|
||||
//
|
||||
// if feature.MyFeature.Enabled(ctx) {
|
||||
// // new code...
|
||||
// } else {
|
||||
//
|
||||
// // new code...
|
||||
// }
|
||||
//
|
||||
// ```
|
||||
//
|
||||
// and the `/api/v2/flags` response provides the same information to the frontend.
|
||||
//
|
||||
// ```json
|
||||
// {
|
||||
// "myFeature": false
|
||||
// }
|
||||
//
|
||||
// {
|
||||
// "myFeature": false
|
||||
// }
|
||||
//
|
||||
// ```
|
||||
//
|
||||
// While `false` by default, I can turn on my experimental feature by starting
|
||||
|
@ -71,5 +77,4 @@
|
|||
// ```
|
||||
// influxd --feature-flags flag1=value1,flag2=value2
|
||||
// ```
|
||||
//
|
||||
package feature
|
||||
|
|
|
@ -78,8 +78,9 @@ func ExposedFlagsFromContext(ctx context.Context, byKey ByKeyFn) map[string]inte
|
|||
// to be removed, e.g. enabling debug tracing for an organization.
|
||||
//
|
||||
// TODO(gavincabbage): This may become a stale date, which can then
|
||||
// be used to trigger a notification to the contact when the flag
|
||||
// has become stale, to encourage flag cleanup.
|
||||
//
|
||||
// be used to trigger a notification to the contact when the flag
|
||||
// has become stale, to encourage flag cleanup.
|
||||
type Lifetime int
|
||||
|
||||
const (
|
||||
|
|
|
@ -39,24 +39,31 @@ const (
|
|||
// further help operators.
|
||||
//
|
||||
// To create a simple error,
|
||||
// &Error{
|
||||
// Code:ENotFound,
|
||||
// }
|
||||
//
|
||||
// &Error{
|
||||
// Code:ENotFound,
|
||||
// }
|
||||
//
|
||||
// To show where the error happens, add Op.
|
||||
// &Error{
|
||||
// Code: ENotFound,
|
||||
// Op: "bolt.FindUserByID"
|
||||
// }
|
||||
//
|
||||
// &Error{
|
||||
// Code: ENotFound,
|
||||
// Op: "bolt.FindUserByID"
|
||||
// }
|
||||
//
|
||||
// To show an error with a unpredictable value, add the value in Msg.
|
||||
// &Error{
|
||||
// Code: EConflict,
|
||||
// Message: fmt.Sprintf("organization with name %s already exist", aName),
|
||||
// }
|
||||
//
|
||||
// &Error{
|
||||
// Code: EConflict,
|
||||
// Message: fmt.Sprintf("organization with name %s already exist", aName),
|
||||
// }
|
||||
//
|
||||
// To show an error wrapped with another error.
|
||||
// &Error{
|
||||
// Code:EInternal,
|
||||
// Err: err,
|
||||
// }.
|
||||
//
|
||||
// &Error{
|
||||
// Code:EInternal,
|
||||
// Err: err,
|
||||
// }.
|
||||
type Error struct {
|
||||
Code string
|
||||
Msg string
|
||||
|
|
|
@ -19,7 +19,8 @@ import (
|
|||
|
||||
// LogError adds a span log for an error.
|
||||
// Returns unchanged error, so useful to wrap as in:
|
||||
// return 0, tracing.LogError(err)
|
||||
//
|
||||
// return 0, tracing.LogError(err)
|
||||
func LogError(span opentracing.Span, err error) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
|
@ -115,24 +116,25 @@ func (s *Span) Finish() {
|
|||
// Context without parent span reference triggers root span construction.
|
||||
// This function never returns nil values.
|
||||
//
|
||||
// Performance
|
||||
// # Performance
|
||||
//
|
||||
// This function incurs a small performance penalty, roughly 1000 ns/op, 376 B/op, 6 allocs/op.
|
||||
// Jaeger timestamp and duration precision is only µs, so this is pretty negligible.
|
||||
//
|
||||
// Alternatives
|
||||
// # Alternatives
|
||||
//
|
||||
// If this performance penalty is too much, try these, which are also demonstrated in benchmark tests:
|
||||
// // Create a root span
|
||||
// span := opentracing.StartSpan("operation name")
|
||||
// ctx := opentracing.ContextWithSpan(context.Background(), span)
|
||||
//
|
||||
// // Create a child span
|
||||
// span := opentracing.StartSpan("operation name", opentracing.ChildOf(sc))
|
||||
// ctx := opentracing.ContextWithSpan(context.Background(), span)
|
||||
// // Create a root span
|
||||
// span := opentracing.StartSpan("operation name")
|
||||
// ctx := opentracing.ContextWithSpan(context.Background(), span)
|
||||
//
|
||||
// // Sugar to create a child span
|
||||
// span, ctx := opentracing.StartSpanFromContext(ctx, "operation name")
|
||||
// // Create a child span
|
||||
// span := opentracing.StartSpan("operation name", opentracing.ChildOf(sc))
|
||||
// ctx := opentracing.ContextWithSpan(context.Background(), span)
|
||||
//
|
||||
// // Sugar to create a child span
|
||||
// span, ctx := opentracing.StartSpanFromContext(ctx, "operation name")
|
||||
func StartSpanFromContext(ctx context.Context, opts ...opentracing.StartSpanOption) (opentracing.Span, context.Context) {
|
||||
if ctx == nil {
|
||||
panic("StartSpanFromContext called with nil context")
|
||||
|
|
44
kv/index.go
44
kv/index.go
|
@ -17,37 +17,37 @@ import (
|
|||
//
|
||||
// The following is an illustration of its use:
|
||||
//
|
||||
// byUserID := func(v []byte) ([]byte, error) {
|
||||
// auth := &influxdb.Authorization{}
|
||||
// byUserID := func(v []byte) ([]byte, error) {
|
||||
// auth := &influxdb.Authorization{}
|
||||
//
|
||||
// if err := json.Unmarshal(v, auth); err != nil {
|
||||
// return err
|
||||
// }
|
||||
// if err := json.Unmarshal(v, auth); err != nil {
|
||||
// return err
|
||||
// }
|
||||
//
|
||||
// return auth.UserID.Encode()
|
||||
// }
|
||||
// return auth.UserID.Encode()
|
||||
// }
|
||||
//
|
||||
// // configure a write only index
|
||||
// indexByUser := NewIndex(NewSource([]byte(`authorizationsbyuserv1/), byUserID))
|
||||
// // configure a write only index
|
||||
// indexByUser := NewIndex(NewSource([]byte(`authorizationsbyuserv1/), byUserID))
|
||||
//
|
||||
// indexByUser.Insert(tx, someUserID, someAuthID)
|
||||
// indexByUser.Insert(tx, someUserID, someAuthID)
|
||||
//
|
||||
// indexByUser.Delete(tx, someUserID, someAuthID)
|
||||
// indexByUser.Delete(tx, someUserID, someAuthID)
|
||||
//
|
||||
// indexByUser.Walk(tx, someUserID, func(k, v []byte) error {
|
||||
// auth := &influxdb.Authorization{}
|
||||
// if err := json.Unmarshal(v, auth); err != nil {
|
||||
// return err
|
||||
// }
|
||||
// indexByUser.Walk(tx, someUserID, func(k, v []byte) error {
|
||||
// auth := &influxdb.Authorization{}
|
||||
// if err := json.Unmarshal(v, auth); err != nil {
|
||||
// return err
|
||||
// }
|
||||
//
|
||||
// // do something with auth
|
||||
// // do something with auth
|
||||
//
|
||||
// return nil
|
||||
// })
|
||||
// return nil
|
||||
// })
|
||||
//
|
||||
// // verify the current index against the source and return the differences
|
||||
// // found in each
|
||||
// diff, err := indexByUser.Verify(ctx, tx)
|
||||
// // verify the current index against the source and return the differences
|
||||
// // found in each
|
||||
// diff, err := indexByUser.Verify(ctx, tx)
|
||||
type Index struct {
|
||||
IndexMapping
|
||||
|
||||
|
|
|
@ -9,11 +9,11 @@
|
|||
//
|
||||
// This package is arranged like so:
|
||||
//
|
||||
// doc.go - this piece of documentation.
|
||||
// all.go - definition of Migration array referencing each of the name migrations in number migration files (below).
|
||||
// migration.go - an implementation of migration.Spec for convenience.
|
||||
// 000X_migration_name.go (example) - N files contains the specific implementations of each migration enumerated in `all.go`.
|
||||
// ...
|
||||
// doc.go - this piece of documentation.
|
||||
// all.go - definition of Migration array referencing each of the name migrations in number migration files (below).
|
||||
// migration.go - an implementation of migration.Spec for convenience.
|
||||
// 000X_migration_name.go (example) - N files contains the specific implementations of each migration enumerated in `all.go`.
|
||||
// ...
|
||||
//
|
||||
// Managing this list of files and all.go can be fiddly.
|
||||
// There is a buildable cli utility called `kvmigrate` in the `internal/cmd/kvmigrate` package.
|
||||
|
|
|
@ -39,17 +39,17 @@ func NewOrganizationService() *OrganizationService {
|
|||
}
|
||||
}
|
||||
|
||||
//FindOrganizationByID calls FindOrganizationByIDF.
|
||||
// FindOrganizationByID calls FindOrganizationByIDF.
|
||||
func (s *OrganizationService) FindOrganizationByID(ctx context.Context, id platform2.ID) (*platform.Organization, error) {
|
||||
return s.FindOrganizationByIDF(ctx, id)
|
||||
}
|
||||
|
||||
//FindOrganization calls FindOrganizationF.
|
||||
// FindOrganization calls FindOrganizationF.
|
||||
func (s *OrganizationService) FindOrganization(ctx context.Context, filter platform.OrganizationFilter) (*platform.Organization, error) {
|
||||
return s.FindOrganizationF(ctx, filter)
|
||||
}
|
||||
|
||||
//FindOrganizations calls FindOrganizationsF.
|
||||
// FindOrganizations calls FindOrganizationsF.
|
||||
func (s *OrganizationService) FindOrganizations(ctx context.Context, filter platform.OrganizationFilter, opt ...platform.FindOptions) ([]*platform.Organization, int, error) {
|
||||
return s.FindOrganizationsF(ctx, filter, opt...)
|
||||
}
|
||||
|
|
|
@ -230,7 +230,7 @@ func BenchmarkTagKeysSet_UnionBytes(b *testing.B) {
|
|||
bytes.Split([]byte("tag04,tag05"), commaB),
|
||||
}
|
||||
|
||||
rand.Seed(20040409)
|
||||
seededRand := rand.New(rand.NewSource(20040409))
|
||||
|
||||
tests := []int{
|
||||
10,
|
||||
|
@ -245,7 +245,7 @@ func BenchmarkTagKeysSet_UnionBytes(b *testing.B) {
|
|||
var km models.TagKeysSet
|
||||
for i := 0; i < b.N; i++ {
|
||||
for j := 0; j < n; j++ {
|
||||
km.UnionBytes(keys[rand.Int()%len(keys)])
|
||||
km.UnionBytes(keys[seededRand.Int()%len(keys)])
|
||||
}
|
||||
km.Clear()
|
||||
}
|
||||
|
|
|
@ -47,20 +47,20 @@ const MaxWritesPending = 1024
|
|||
// queues can have a max size configured such that when the size of all
|
||||
// segments on disk exceeds the size, write will fail.
|
||||
//
|
||||
// ┌─────┐
|
||||
// │Head │
|
||||
// ├─────┘
|
||||
// │
|
||||
// ▼
|
||||
// ┌─────────────────┐ ┌─────────────────┐┌─────────────────┐
|
||||
// │Segment 1 - 10MB │ │Segment 2 - 10MB ││Segment 3 - 10MB │
|
||||
// └─────────────────┘ └─────────────────┘└─────────────────┘
|
||||
// ▲
|
||||
// │
|
||||
// │
|
||||
// ┌─────┐
|
||||
// │Tail │
|
||||
// └─────┘
|
||||
// ┌─────┐
|
||||
// │Head │
|
||||
// ├─────┘
|
||||
// │
|
||||
// ▼
|
||||
// ┌─────────────────┐ ┌─────────────────┐┌─────────────────┐
|
||||
// │Segment 1 - 10MB │ │Segment 2 - 10MB ││Segment 3 - 10MB │
|
||||
// └─────────────────┘ └─────────────────┘└─────────────────┘
|
||||
// ▲
|
||||
// │
|
||||
// │
|
||||
// ┌─────┐
|
||||
// │Tail │
|
||||
// └─────┘
|
||||
type Queue struct {
|
||||
mu sync.RWMutex
|
||||
|
||||
|
@ -609,13 +609,13 @@ func (l *Queue) trimHead(force bool) error {
|
|||
// lengths + block with a single footer point to the position in the segment of the
|
||||
// current Head block.
|
||||
//
|
||||
// ┌──────────────────────────┐ ┌──────────────────────────┐ ┌────────────┐
|
||||
// │ Block 1 │ │ Block 2 │ │ Footer │
|
||||
// └──────────────────────────┘ └──────────────────────────┘ └────────────┘
|
||||
// ┌────────────┐┌────────────┐ ┌────────────┐┌────────────┐ ┌────────────┐
|
||||
// │Block 1 Len ││Block 1 Body│ │Block 2 Len ││Block 2 Body│ │Head Offset │
|
||||
// │ 8 bytes ││ N bytes │ │ 8 bytes ││ N bytes │ │ 8 bytes │
|
||||
// └────────────┘└────────────┘ └────────────┘└────────────┘ └────────────┘
|
||||
// ┌──────────────────────────┐ ┌──────────────────────────┐ ┌────────────┐
|
||||
// │ Block 1 │ │ Block 2 │ │ Footer │
|
||||
// └──────────────────────────┘ └──────────────────────────┘ └────────────┘
|
||||
// ┌────────────┐┌────────────┐ ┌────────────┐┌────────────┐ ┌────────────┐
|
||||
// │Block 1 Len ││Block 1 Body│ │Block 2 Len ││Block 2 Body│ │Head Offset │
|
||||
// │ 8 bytes ││ N bytes │ │ 8 bytes ││ N bytes │ │ 8 bytes │
|
||||
// └────────────┘└────────────┘ └────────────┘└────────────┘ └────────────┘
|
||||
//
|
||||
// The footer holds the pointer to the Head entry at the end of the segment to allow writes
|
||||
// to seek to the end and write sequentially (vs having to seek back to the beginning of
|
||||
|
|
|
@ -396,11 +396,11 @@ func TestQueue_TotalBytes(t *testing.T) {
|
|||
|
||||
// This test verifies the queue will advance in the following scenario:
|
||||
//
|
||||
// * There is one segment
|
||||
// * The segment is not full
|
||||
// * The segment record size entry is corrupted, resulting in
|
||||
// currentRecordSize + pos > fileSize and
|
||||
// therefore the Advance would fail.
|
||||
// - There is one segment
|
||||
// - The segment is not full
|
||||
// - The segment record size entry is corrupted, resulting in
|
||||
// currentRecordSize + pos > fileSize and
|
||||
// therefore the Advance would fail.
|
||||
func TestQueue_AdvanceSingleCorruptSegment(t *testing.T) {
|
||||
q, dir := newTestQueue(t, withVerify(func([]byte) error { return nil }))
|
||||
defer os.RemoveAll(dir)
|
||||
|
|
|
@ -468,8 +468,9 @@ func Decode(dst *[240]uint64, v uint64) (n int, err error) {
|
|||
|
||||
// Decode writes the uncompressed values from src to dst. It returns the number
|
||||
// of values written or an error.
|
||||
//go:nocheckptr
|
||||
// nocheckptr while the underlying struct layout doesn't change
|
||||
//
|
||||
//go:nocheckptr
|
||||
func DecodeAll(dst, src []uint64) (value int, err error) {
|
||||
j := 0
|
||||
for _, v := range src {
|
||||
|
@ -482,8 +483,9 @@ func DecodeAll(dst, src []uint64) (value int, err error) {
|
|||
|
||||
// DecodeBytesBigEndian writes the compressed, big-endian values from src to dst. It returns the number
|
||||
// of values written or an error.
|
||||
//go:nocheckptr
|
||||
// nocheckptr while the underlying struct layout doesn't change
|
||||
//
|
||||
//go:nocheckptr
|
||||
func DecodeBytesBigEndian(dst []uint64, src []byte) (value int, err error) {
|
||||
if len(src)&7 != 0 {
|
||||
return 0, errors.New("src length is not multiple of 8")
|
||||
|
|
|
@ -76,6 +76,8 @@ func combine(fns ...func() []uint64) func() []uint64 {
|
|||
// TestEncodeAll ensures 100% test coverage of simple8b.EncodeAll and
|
||||
// verifies all output by comparing the original input with the output of simple8b.DecodeAll
|
||||
func TestEncodeAll(t *testing.T) {
|
||||
//lint:ignore SA1019 This function was deprecated for good reasons that aren't important to us since its just used for testing.
|
||||
// Ignoring seems better than all the effort to address the underlying concern. https://github.com/golang/go/issues/56319
|
||||
rand.Seed(0)
|
||||
|
||||
tests := []struct {
|
||||
|
|
|
@ -2,11 +2,12 @@ package errors
|
|||
|
||||
// Capture is a wrapper function which can be used to capture errors from closing via a defer.
|
||||
// An example:
|
||||
// func Example() (err error) {
|
||||
// f, _ := os.Open(...)
|
||||
// defer errors.Capture(&err, f.Close)()
|
||||
// ...
|
||||
// return
|
||||
//
|
||||
// func Example() (err error) {
|
||||
// f, _ := os.Open(...)
|
||||
// defer errors.Capture(&err, f.Close)()
|
||||
// ...
|
||||
// return
|
||||
//
|
||||
// Doing this will result in the error from the f.Close() call being
|
||||
// put in the error via a ptr, if the error is not nil
|
||||
|
|
|
@ -4,10 +4,10 @@
|
|||
//
|
||||
// The differences are that the implementation in this package:
|
||||
//
|
||||
// * uses an AMD64 optimised xxhash algorithm instead of murmur;
|
||||
// * uses some AMD64 optimisations for things like clz;
|
||||
// * works with []byte rather than a Hash64 interface, to reduce allocations;
|
||||
// * implements encoding.BinaryMarshaler and encoding.BinaryUnmarshaler
|
||||
// - uses an AMD64 optimised xxhash algorithm instead of murmur;
|
||||
// - uses some AMD64 optimisations for things like clz;
|
||||
// - works with []byte rather than a Hash64 interface, to reduce allocations;
|
||||
// - implements encoding.BinaryMarshaler and encoding.BinaryUnmarshaler
|
||||
//
|
||||
// Based on some rough benchmarking, this implementation of HyperLogLog++ is
|
||||
// around twice as fast as the github.com/clarkduvall/hyperloglog implementation.
|
||||
|
|
|
@ -3,7 +3,7 @@ Package tracing provides a way for capturing hierarchical traces.
|
|||
|
||||
To start a new trace with a root span named select
|
||||
|
||||
trace, span := tracing.NewTrace("select")
|
||||
trace, span := tracing.NewTrace("select")
|
||||
|
||||
It is recommended that a span be forwarded to callees using the
|
||||
context package. Firstly, create a new context with the span associated
|
||||
|
@ -21,6 +21,5 @@ Once the trace is complete, it may be converted to a graph with the Tree method.
|
|||
|
||||
The tree is intended to be used with the Walk function in order to generate
|
||||
different presentations. The default Tree#String method returns a tree.
|
||||
|
||||
*/
|
||||
package tracing
|
||||
|
|
|
@ -50,7 +50,7 @@ func Bool(key string, val bool) Field {
|
|||
}
|
||||
}
|
||||
|
||||
/// Int64 adds an int64-valued key:value pair to a Span.LogFields() record
|
||||
// / Int64 adds an int64-valued key:value pair to a Span.LogFields() record
|
||||
func Int64(key string, val int64) Field {
|
||||
return Field{
|
||||
key: key,
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
//Package wire is used to serialize a trace.
|
||||
// Package wire is used to serialize a trace.
|
||||
package wire
|
||||
|
||||
//go:generate protoc --go_out=. binary.proto
|
||||
|
|
|
@ -276,6 +276,7 @@ type Field struct {
|
|||
Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
|
||||
FieldType FieldType `protobuf:"varint,2,opt,name=FieldType,proto3,enum=wire.FieldType" json:"FieldType,omitempty"`
|
||||
// Types that are assignable to Value:
|
||||
//
|
||||
// *Field_NumericVal
|
||||
// *Field_StringVal
|
||||
Value isField_Value `protobuf_oneof:"value"`
|
||||
|
|
|
@ -4,10 +4,15 @@ import (
|
|||
"fmt"
|
||||
"math/rand"
|
||||
"time"
|
||||
|
||||
rand2 "github.com/influxdata/influxdb/v2/internal/rand"
|
||||
)
|
||||
|
||||
var seededRand *rand.Rand
|
||||
|
||||
func init() {
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
lockedSource := rand2.NewLockedSourceFromSeed(time.Now().UnixNano())
|
||||
seededRand = rand.New(lockedSource)
|
||||
}
|
||||
|
||||
var (
|
||||
|
@ -878,5 +883,5 @@ var (
|
|||
// formatted as "adjective_surname". For example 'focused_turing'. If retry is non-zero, a random
|
||||
// integer between 0 and 10 will be added to the end of the name, e.g `focused_turing3`
|
||||
func GetRandomName() string {
|
||||
return fmt.Sprintf("%s-%s", left[rand.Intn(len(left))], right[rand.Intn(len(right))])
|
||||
return fmt.Sprintf("%s-%s", left[seededRand.Intn(len(left))], right[seededRand.Intn(len(right))])
|
||||
}
|
||||
|
|
|
@ -1182,8 +1182,8 @@ type color struct {
|
|||
}
|
||||
|
||||
// TODO:
|
||||
// - verify templates are desired
|
||||
// - template colors so references can be shared
|
||||
// - verify templates are desired
|
||||
// - template colors so references can be shared
|
||||
type colors []*color
|
||||
|
||||
func (c colors) influxViewColors() []influxdb.ViewColor {
|
||||
|
@ -1218,8 +1218,9 @@ func (c colors) strings() []string {
|
|||
}
|
||||
|
||||
// TODO: looks like much of these are actually getting defaults in
|
||||
// the UI. looking at system charts, seeing lots of failures for missing
|
||||
// color types or no colors at all.
|
||||
//
|
||||
// the UI. looking at system charts, seeing lots of failures for missing
|
||||
// color types or no colors at all.
|
||||
func (c colors) hasTypes(types ...string) []validationErr {
|
||||
tMap := make(map[string]bool)
|
||||
for _, cc := range c {
|
||||
|
|
|
@ -27,13 +27,13 @@ func SetGlobalProfiling(enabled bool) {
|
|||
}
|
||||
|
||||
// collectAllProfiles generates a tarball containing:
|
||||
// - goroutine profile
|
||||
// - blocking profile
|
||||
// - mutex profile
|
||||
// - heap profile
|
||||
// - allocations profile
|
||||
// - (optionally) trace profile
|
||||
// - (optionally) CPU profile
|
||||
// - goroutine profile
|
||||
// - blocking profile
|
||||
// - mutex profile
|
||||
// - heap profile
|
||||
// - allocations profile
|
||||
// - (optionally) trace profile
|
||||
// - (optionally) CPU profile
|
||||
//
|
||||
// All information is added to a tar archive and then compressed, before being
|
||||
// returned to the requester as an archive file. Where profiles support debug
|
||||
|
|
|
@ -20,7 +20,7 @@ type EventRecorder struct {
|
|||
// descriptive of the type of metric being recorded. Possible values may include write, query,
|
||||
// task, dashboard, etc.
|
||||
//
|
||||
// The general structure of the metrics produced from the metric recorder should be
|
||||
// # The general structure of the metrics produced from the metric recorder should be
|
||||
//
|
||||
// http_<subsystem>_request_count{org_id=<org_id>, status=<status>, endpoint=<endpoint>} ...
|
||||
// http_<subsystem>_request_bytes{org_id=<org_id>, status=<status>, endpoint=<endpoint>} ...
|
||||
|
|
|
@ -72,9 +72,11 @@ func (e *NoContentEncoder) Encode(w io.Writer, results flux.ResultIterator) (int
|
|||
// Otherwise one can decode the response body to get the error. For example:
|
||||
// ```
|
||||
// _, err = csv.NewResultDecoder(csv.ResultDecoderConfig{}).Decode(bytes.NewReader(res))
|
||||
// if err != nil {
|
||||
// // we got some runtime error
|
||||
// }
|
||||
//
|
||||
// if err != nil {
|
||||
// // we got some runtime error
|
||||
// }
|
||||
//
|
||||
// ```
|
||||
type NoContentWithErrorDialect struct {
|
||||
csv.ResultEncoderConfig
|
||||
|
|
|
@ -656,10 +656,8 @@ func (SortedPivotRule) Rewrite(ctx context.Context, pn plan.Node) (plan.Node, bo
|
|||
return pn, false, nil
|
||||
}
|
||||
|
||||
//
|
||||
// Push Down of window aggregates.
|
||||
// ReadRangePhys |> window |> { min, max, mean, count, sum }
|
||||
//
|
||||
type PushDownWindowAggregateRule struct{}
|
||||
|
||||
func (PushDownWindowAggregateRule) Name() string {
|
||||
|
@ -1040,10 +1038,8 @@ func (p GroupWindowAggregateTransposeRule) Rewrite(ctx context.Context, pn plan.
|
|||
return fnNode, true, nil
|
||||
}
|
||||
|
||||
//
|
||||
// Push Down of group aggregates.
|
||||
// ReadGroupPhys |> { count }
|
||||
//
|
||||
type PushDownGroupAggregateRule struct{}
|
||||
|
||||
func (PushDownGroupAggregateRule) Name() string {
|
||||
|
|
|
@ -6,13 +6,17 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
rand2 "github.com/influxdata/influxdb/v2/internal/rand"
|
||||
platform2 "github.com/influxdata/influxdb/v2/kit/platform"
|
||||
"github.com/influxdata/influxdb/v2/pkg/snowflake"
|
||||
)
|
||||
|
||||
var seededRand *rand.Rand
|
||||
|
||||
func init() {
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
SetGlobalMachineID(rand.Intn(1023))
|
||||
lockedSource := rand2.NewLockedSourceFromSeed(time.Now().UnixNano())
|
||||
seededRand = rand.New(lockedSource)
|
||||
SetGlobalMachineID(seededRand.Intn(1023))
|
||||
}
|
||||
|
||||
var globalmachineID struct {
|
||||
|
@ -79,7 +83,8 @@ func NewIDGenerator(opts ...IDGeneratorOp) *IDGenerator {
|
|||
f(gen)
|
||||
}
|
||||
if gen.Generator == nil {
|
||||
gen.Generator = snowflake.New(rand.Intn(1023))
|
||||
machineId := seededRand.Intn(1023)
|
||||
gen.Generator = snowflake.New(machineId)
|
||||
}
|
||||
return gen
|
||||
}
|
||||
|
|
|
@ -200,6 +200,7 @@ type Node struct {
|
|||
NodeType Node_Type `protobuf:"varint,1,opt,name=node_type,json=nodeType,proto3,enum=influxdata.platform.storage.Node_Type" json:"node_type,omitempty"` // [(gogoproto.customname) = "NodeType", (gogoproto.jsontag) = "nodeType"];
|
||||
Children []*Node `protobuf:"bytes,2,rep,name=children,proto3" json:"children,omitempty"`
|
||||
// Types that are assignable to Value:
|
||||
//
|
||||
// *Node_StringValue
|
||||
// *Node_BooleanValue
|
||||
// *Node_IntegerValue
|
||||
|
|
|
@ -1620,6 +1620,7 @@ type ReadResponse_Frame struct {
|
|||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
// Types that are assignable to Data:
|
||||
//
|
||||
// *ReadResponse_Frame_Group
|
||||
// *ReadResponse_Frame_Series
|
||||
// *ReadResponse_Frame_FloatPoints
|
||||
|
|
|
@ -151,13 +151,13 @@ group:
|
|||
group:
|
||||
tag key : _m,tag0,tag1
|
||||
partition key: val11
|
||||
series: _m=cpu,tag0=val01,tag1=val11
|
||||
series: _m=cpu,tag0=val00,tag1=val11
|
||||
series: _m=cpu,tag0=val01,tag1=val11
|
||||
group:
|
||||
tag key : _m,tag0,tag1
|
||||
partition key: val12
|
||||
series: _m=cpu,tag0=val01,tag1=val12
|
||||
series: _m=cpu,tag0=val00,tag1=val12
|
||||
series: _m=cpu,tag0=val01,tag1=val12
|
||||
group:
|
||||
tag key : _m,tag0
|
||||
partition key: <nil>
|
||||
|
@ -347,8 +347,8 @@ group:
|
|||
exp: `group:
|
||||
tag key : _m,tag1,tag2
|
||||
partition key: <nil>,val20
|
||||
series: _m=mem,tag1=val11,tag2=val20
|
||||
series: _m=mem,tag1=val10,tag2=val20
|
||||
series: _m=mem,tag1=val11,tag2=val20
|
||||
group:
|
||||
tag key : _m,tag1,tag2
|
||||
partition key: <nil>,val21
|
||||
|
@ -356,10 +356,10 @@ group:
|
|||
group:
|
||||
tag key : _m,tag0,tag1
|
||||
partition key: val00,<nil>
|
||||
series: _m=aaa,tag0=val00
|
||||
series: _m=cpu,tag0=val00,tag1=val10
|
||||
series: _m=cpu,tag0=val00,tag1=val11
|
||||
series: _m=cpu,tag0=val00,tag1=val12
|
||||
series: _m=aaa,tag0=val00
|
||||
group:
|
||||
tag key : _m,tag0
|
||||
partition key: val01,<nil>
|
||||
|
|
|
@ -146,7 +146,7 @@ func BenchmarkKeyMerger_MergeKeys(b *testing.B) {
|
|||
bytes.Split([]byte("tag04,tag05"), commaB),
|
||||
}
|
||||
|
||||
rand.Seed(20040409)
|
||||
seededRand := rand.New(rand.NewSource(20040409))
|
||||
|
||||
tests := []int{
|
||||
10,
|
||||
|
@ -161,7 +161,7 @@ func BenchmarkKeyMerger_MergeKeys(b *testing.B) {
|
|||
var km KeyMerger
|
||||
for i := 0; i < b.N; i++ {
|
||||
for j := 0; j < n; j++ {
|
||||
km.MergeKeys(keys[rand.Int()%len(keys)])
|
||||
km.MergeKeys(keys[seededRand.Int()%len(keys)])
|
||||
}
|
||||
km.Clear()
|
||||
}
|
||||
|
@ -180,7 +180,7 @@ func BenchmarkKeyMerger_MergeTagKeys(b *testing.B) {
|
|||
models.ParseTags([]byte("foo,tag04=v0,tag05=v0")),
|
||||
}
|
||||
|
||||
rand.Seed(20040409)
|
||||
seededRand := rand.New(rand.NewSource(20040409))
|
||||
|
||||
tests := []int{
|
||||
10,
|
||||
|
@ -195,7 +195,7 @@ func BenchmarkKeyMerger_MergeTagKeys(b *testing.B) {
|
|||
var km KeyMerger
|
||||
for i := 0; i < b.N; i++ {
|
||||
for j := 0; j < n; j++ {
|
||||
km.MergeTagKeys(tags[rand.Int()%len(tags)])
|
||||
km.MergeTagKeys(tags[seededRand.Int()%len(tags)])
|
||||
}
|
||||
km.Clear()
|
||||
}
|
||||
|
|
|
@ -15,7 +15,6 @@ func Modulo(dividend, modulus int64) int64 {
|
|||
// normalized timestamp. If it lies to the left we know it represents
|
||||
// the start time. Otherwise it represents the stop time, in which case
|
||||
// we decrement by the window period to get the start time.
|
||||
//
|
||||
func WindowStart(t, every, offset int64) int64 {
|
||||
mod := Modulo(t, every)
|
||||
off := Modulo(offset, every)
|
||||
|
@ -33,7 +32,6 @@ func WindowStart(t, every, offset int64) int64 {
|
|||
// normalized timestamp. If it lies to the right we know it represents
|
||||
// the stop time. Otherwise it represents the start time, in which case
|
||||
// we increment by the window period to get the stop time.
|
||||
//
|
||||
func WindowStop(t, every, offset int64) int64 {
|
||||
mod := Modulo(t, every)
|
||||
off := Modulo(offset, every)
|
||||
|
|
|
@ -42,7 +42,7 @@ const (
|
|||
// Distribution is handled by hashing the TaskID (to ensure uniform distribution) and then distributing over those channels
|
||||
// evenly based on the hashed ID. This is to ensure that all tasks of the same ID go to the same worker.
|
||||
//
|
||||
//The workers call ExecutorFunc handle any errors and update the LastScheduled time internally and also via the Checkpointer.
|
||||
// The workers call ExecutorFunc handle any errors and update the LastScheduled time internally and also via the Checkpointer.
|
||||
//
|
||||
// The main loop:
|
||||
//
|
||||
|
|
|
@ -662,9 +662,9 @@ from(bucket: "b")
|
|||
}
|
||||
}
|
||||
|
||||
//Create a new task with a Cron and Offset option
|
||||
//Update the task to remove the Offset option, and change Cron to Every
|
||||
//Retrieve the task again to ensure the options are now Every, without Cron or Offset
|
||||
// Create a new task with a Cron and Offset option
|
||||
// Update the task to remove the Offset option, and change Cron to Every
|
||||
// Retrieve the task again to ensure the options are now Every, without Cron or Offset
|
||||
func testTaskOptionsUpdateFull(t *testing.T, sys *System) {
|
||||
|
||||
script := `option task = {name: "task-Options-Update", cron: "* * * * *", concurrency: 100, offset: 10s}
|
||||
|
|
|
@ -11,13 +11,19 @@ allow us to make changes to this service without effecting any dependant service
|
|||
|
||||
When a new request for the tenant service comes in it should follow this pattern:
|
||||
1 http_server_resource - this is where the request is parsed and rejected if the client didn't send
|
||||
|
||||
the right information
|
||||
|
||||
2 middleware_resource_auth - We now confirm the user that generated the request has sufficient permission
|
||||
|
||||
to accomplish this task, in some cases we adjust the request if the user is without the correct permissions
|
||||
|
||||
3 middleware_resource_metrics - Track RED metrics for this request
|
||||
4 middleware_resource_logging - add logging around request duration and status.
|
||||
5 service_resource - When a request reaches the service we verify the content for compatibility with the existing dataset,
|
||||
|
||||
for instance if a resource has a "orgID" we will ensure the organization exists
|
||||
|
||||
6 storage_resource - Basic CRUD actions for the system.
|
||||
|
||||
This pattern of api -> middleware -> service -> basic crud helps us to break down the responsibilities into digestible
|
||||
|
|
|
@ -333,15 +333,15 @@ func (c *Client) MustCreateDBRPMapping(t *testing.T) platform.ID {
|
|||
// MustCreateResource will create a generic resource via the API.
|
||||
// Used in tests where the content of the resource does not matter.
|
||||
//
|
||||
// // Create one of each org resource
|
||||
// for _, r := range influxdb.OrgResourceTypes {
|
||||
// client.MustCreateResource(t, r)
|
||||
// }
|
||||
// // Create one of each org resource
|
||||
// for _, r := range influxdb.OrgResourceTypes {
|
||||
// client.MustCreateResource(t, r)
|
||||
// }
|
||||
//
|
||||
//
|
||||
// // Create a variable:
|
||||
// id := client.MustCreateResource(t, influxdb.VariablesResourceType)
|
||||
// defer client.MustDeleteResource(t, influxdb.VariablesResourceType, id)
|
||||
// // Create a variable:
|
||||
// id := client.MustCreateResource(t, influxdb.VariablesResourceType)
|
||||
// defer client.MustDeleteResource(t, influxdb.VariablesResourceType, id)
|
||||
func (c *Client) MustCreateResource(t *testing.T, r influxdb.ResourceType) platform.ID {
|
||||
t.Helper()
|
||||
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
/*
|
||||
Package tests contains a set of integration tests, which run in-memory versions
|
||||
of various 2.0 services. They're not intended to be full end-to-end tests,
|
||||
but are a suitable place to write tests that need to flex the logic of
|
||||
multiple 2.0 components.
|
||||
Package tests contains a set of integration tests, which run in-memory versions
|
||||
of various 2.0 services. They're not intended to be full end-to-end tests,
|
||||
but are a suitable place to write tests that need to flex the logic of
|
||||
multiple 2.0 components.
|
||||
*/
|
||||
package tests
|
||||
|
|
|
@ -169,9 +169,9 @@ func TestMeasurementFieldSliceSort(t *testing.T) {
|
|||
|
||||
// randomize order using fixed seed to
|
||||
// ensure tests are deterministic on a given platform
|
||||
rand.Seed(100)
|
||||
seededRand := rand.New(rand.NewSource(100))
|
||||
for i := 0; i < 5; i++ {
|
||||
rand.Shuffle(len(got), func(i, j int) {
|
||||
seededRand.Shuffle(len(got), func(i, j int) {
|
||||
got[i], got[j] = got[j], got[i]
|
||||
})
|
||||
|
||||
|
|
|
@ -111,12 +111,12 @@ func BenchmarkDecodeFloatArrayBlock(b *testing.B) {
|
|||
}
|
||||
|
||||
func BenchmarkDecodeIntegerArrayBlock(b *testing.B) {
|
||||
rle := func(i int) int64 { return int64(i) }
|
||||
s8b := func(i int) int64 { return int64(i + int(rand.Int31n(10))) }
|
||||
rle := func(_ *rand.Rand, i int) int64 { return int64(i) }
|
||||
s8b := func(r *rand.Rand, i int) int64 { return int64(i + int(r.Int31n(10))) }
|
||||
|
||||
cases := []struct {
|
||||
enc string
|
||||
gen func(i int) int64
|
||||
gen func(r *rand.Rand, i int) int64
|
||||
n int
|
||||
}{
|
||||
{enc: "rle", gen: rle, n: 5},
|
||||
|
@ -130,13 +130,13 @@ func BenchmarkDecodeIntegerArrayBlock(b *testing.B) {
|
|||
}
|
||||
for _, bm := range cases {
|
||||
b.Run(fmt.Sprintf("%s_%d", bm.enc, bm.n), func(b *testing.B) {
|
||||
rand.Seed(int64(bm.n * 1e3))
|
||||
seededRand := rand.New(rand.NewSource(int64(bm.n * 1e3)))
|
||||
|
||||
valueCount := bm.n
|
||||
times := getTimes(valueCount, 60, time.Second)
|
||||
values := make([]tsm1.Value, len(times))
|
||||
for i, t := range times {
|
||||
values[i] = tsm1.NewValue(t, bm.gen(i))
|
||||
values[i] = tsm1.NewValue(t, bm.gen(seededRand, i))
|
||||
}
|
||||
|
||||
bytes, err := tsm1.Values(values).Encode(nil)
|
||||
|
|
|
@ -180,12 +180,12 @@ func Test_BooleanArrayDecodeAll_Multi_Compressed(t *testing.T) {
|
|||
|
||||
for _, tc := range cases {
|
||||
t.Run(fmt.Sprintf("%d_%0.2f", tc.n, tc.p), func(t *testing.T) {
|
||||
rand.Seed(int64(tc.n * tc.n))
|
||||
seededRand := rand.New(rand.NewSource(int64(tc.n * tc.n)))
|
||||
|
||||
enc := tsm1.NewBooleanEncoder(tc.n)
|
||||
values := make([]bool, tc.n)
|
||||
for i := range values {
|
||||
values[i] = rand.Float64() < tc.p
|
||||
values[i] = seededRand.Float64() < tc.p
|
||||
enc.Write(values[i])
|
||||
}
|
||||
|
||||
|
|
|
@ -1092,11 +1092,11 @@ func BenchmarkIntegerArrayDecodeAllUncompressed(b *testing.B) {
|
|||
}
|
||||
|
||||
for _, size := range benchmarks {
|
||||
rand.Seed(int64(size * 1e3))
|
||||
seededRand := rand.New(rand.NewSource(int64(size * 1e3)))
|
||||
|
||||
enc := NewIntegerEncoder(size)
|
||||
for i := 0; i < size; i++ {
|
||||
enc.Write(values[rand.Int()%len(values)])
|
||||
enc.Write(values[seededRand.Int()%len(values)])
|
||||
}
|
||||
bytes, _ := enc.Bytes()
|
||||
|
||||
|
@ -1120,12 +1120,12 @@ func BenchmarkIntegerArrayDecodeAllPackedSimple(b *testing.B) {
|
|||
1000,
|
||||
}
|
||||
for _, size := range benchmarks {
|
||||
rand.Seed(int64(size * 1e3))
|
||||
seededRand := rand.New(rand.NewSource(int64(size * 1e3)))
|
||||
|
||||
enc := NewIntegerEncoder(size)
|
||||
for i := 0; i < size; i++ {
|
||||
// Small amount of randomness prevents RLE from being used
|
||||
enc.Write(int64(i) + int64(rand.Intn(10)))
|
||||
enc.Write(int64(i) + int64(seededRand.Intn(10)))
|
||||
}
|
||||
bytes, _ := enc.Bytes()
|
||||
|
||||
|
@ -1153,8 +1153,6 @@ func BenchmarkIntegerArrayDecodeAllRLE(b *testing.B) {
|
|||
{1000, 0},
|
||||
}
|
||||
for _, bm := range benchmarks {
|
||||
rand.Seed(int64(bm.n * 1e3))
|
||||
|
||||
enc := NewIntegerEncoder(bm.n)
|
||||
acc := int64(0)
|
||||
for i := 0; i < bm.n; i++ {
|
||||
|
|
|
@ -3,7 +3,6 @@ package tsm1
|
|||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
|
@ -373,8 +372,6 @@ func BenchmarkStringArrayDecodeAll(b *testing.B) {
|
|||
{1000, 10},
|
||||
}
|
||||
for _, bm := range benchmarks {
|
||||
rand.Seed(int64(bm.n * 1e3))
|
||||
|
||||
s := NewStringEncoder(bm.n)
|
||||
for c := 0; c < bm.n; c++ {
|
||||
s.Write(testutil.MakeSentence(bm.w))
|
||||
|
|
|
@ -1090,11 +1090,11 @@ func BenchmarkTimeArrayDecodeAllUncompressed(b *testing.B) {
|
|||
}
|
||||
|
||||
for _, size := range benchmarks {
|
||||
rand.Seed(int64(size * 1e3))
|
||||
seededRand := rand.New(rand.NewSource(int64(size * 1e3)))
|
||||
|
||||
enc := NewTimeEncoder(size)
|
||||
for i := 0; i < size; i++ {
|
||||
enc.Write(values[rand.Int()%len(values)])
|
||||
enc.Write(values[seededRand.Int()%len(values)])
|
||||
}
|
||||
bytes, _ := enc.Bytes()
|
||||
|
||||
|
@ -1118,12 +1118,12 @@ func BenchmarkTimeArrayDecodeAllPackedSimple(b *testing.B) {
|
|||
1000,
|
||||
}
|
||||
for _, size := range benchmarks {
|
||||
rand.Seed(int64(size * 1e3))
|
||||
seededRand := rand.New(rand.NewSource(int64(size * 1e3)))
|
||||
|
||||
enc := NewTimeEncoder(size)
|
||||
for i := 0; i < size; i++ {
|
||||
// Small amount of randomness prevents RLE from being used
|
||||
enc.Write(int64(i*1000) + int64(rand.Intn(10)))
|
||||
enc.Write(int64(i*1000) + int64(seededRand.Intn(10)))
|
||||
}
|
||||
bytes, _ := enc.Bytes()
|
||||
|
||||
|
|
|
@ -1554,12 +1554,12 @@ func BenchmarkDecodeFloatBlock(b *testing.B) {
|
|||
}
|
||||
|
||||
func BenchmarkDecodeIntegerBlock(b *testing.B) {
|
||||
rle := func(i int) int64 { return int64(i) }
|
||||
s8b := func(i int) int64 { return int64(i + int(rand.Int31n(10))) }
|
||||
rle := func(_ *rand.Rand, i int) int64 { return int64(i) }
|
||||
s8b := func(r *rand.Rand, i int) int64 { return int64(i + int(r.Int31n(10))) }
|
||||
|
||||
cases := []struct {
|
||||
enc string
|
||||
gen func(i int) int64
|
||||
gen func(r *rand.Rand, i int) int64
|
||||
n int
|
||||
}{
|
||||
{enc: "rle", gen: rle, n: 5},
|
||||
|
@ -1573,13 +1573,13 @@ func BenchmarkDecodeIntegerBlock(b *testing.B) {
|
|||
}
|
||||
for _, bm := range cases {
|
||||
b.Run(fmt.Sprintf("%s_%d", bm.enc, bm.n), func(b *testing.B) {
|
||||
rand.Seed(int64(bm.n * 1e3))
|
||||
seededRand := rand.New(rand.NewSource(int64(bm.n * 1e3)))
|
||||
|
||||
valueCount := bm.n
|
||||
times := getTimes(valueCount, 60, time.Second)
|
||||
values := make([]tsm1.Value, len(times))
|
||||
for i, t := range times {
|
||||
values[i] = tsm1.NewValue(t, bm.gen(i))
|
||||
values[i] = tsm1.NewValue(t, bm.gen(seededRand, i))
|
||||
}
|
||||
|
||||
bytes, err := tsm1.Values(values).Encode(nil)
|
||||
|
|
|
@ -626,11 +626,11 @@ func BenchmarkIntegerBatch_DecodeAllUncompressed(b *testing.B) {
|
|||
}
|
||||
|
||||
for _, bm := range benchmarks {
|
||||
rand.Seed(int64(bm.n * 1e3))
|
||||
seededRand := rand.New(rand.NewSource(int64(bm.n * 1e3)))
|
||||
|
||||
enc := NewIntegerEncoder(bm.n)
|
||||
for i := 0; i < bm.n; i++ {
|
||||
enc.Write(values[rand.Int()%len(values)])
|
||||
enc.Write(values[seededRand.Int()%len(values)])
|
||||
}
|
||||
bytes, _ := enc.Bytes()
|
||||
|
||||
|
@ -662,12 +662,12 @@ func BenchmarkIntegerBatch_DecodeAllPackedSimple(b *testing.B) {
|
|||
{1000},
|
||||
}
|
||||
for _, bm := range benchmarks {
|
||||
rand.Seed(int64(bm.n * 1e3))
|
||||
seededRand := rand.New(rand.NewSource(int64(bm.n * 1e3)))
|
||||
|
||||
enc := NewIntegerEncoder(bm.n)
|
||||
for i := 0; i < bm.n; i++ {
|
||||
// Small amount of randomness prevents RLE from being used
|
||||
enc.Write(int64(i) + int64(rand.Intn(10)))
|
||||
enc.Write(int64(i) + int64(seededRand.Intn(10)))
|
||||
}
|
||||
bytes, _ := enc.Bytes()
|
||||
|
||||
|
|
|
@ -29,7 +29,6 @@ const partitions = 16
|
|||
//
|
||||
// To determine the partition that a series key should be added to, the series
|
||||
// key is hashed and the first 8 bits are used as an index to the ring.
|
||||
//
|
||||
type ring struct {
|
||||
// The unique set of partitions in the ring.
|
||||
// len(partitions) <= len(continuum)
|
||||
|
@ -40,8 +39,7 @@ type ring struct {
|
|||
// power of 2, and for performance reasons should be larger than the number of
|
||||
// cores on the host. The supported set of values for n is:
|
||||
//
|
||||
// {1, 2, 4, 8, 16}.
|
||||
//
|
||||
// {1, 2, 4, 8, 16}.
|
||||
func newring(n int) (*ring, error) {
|
||||
if n <= 0 || n > partitions {
|
||||
return nil, fmt.Errorf("invalid number of partitions: %d", n)
|
||||
|
|
|
@ -2,7 +2,6 @@ package tsm1
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"reflect"
|
||||
"testing"
|
||||
"testing/quick"
|
||||
|
@ -190,8 +189,6 @@ func BenchmarkStringDecoder_DecodeAll(b *testing.B) {
|
|||
{1000, 10},
|
||||
}
|
||||
for _, bm := range benchmarks {
|
||||
rand.Seed(int64(bm.n * 1e3))
|
||||
|
||||
s := NewStringEncoder(bm.n)
|
||||
for c := 0; c < bm.n; c++ {
|
||||
s.Write(testutil.MakeSentence(bm.w))
|
||||
|
|
|
@ -625,11 +625,11 @@ func BenchmarkTimeBatch_DecodeAllUncompressed(b *testing.B) {
|
|||
}
|
||||
|
||||
for _, size := range benchmarks {
|
||||
rand.Seed(int64(size * 1e3))
|
||||
seededRand := rand.New(rand.NewSource(int64(size * 1e3)))
|
||||
|
||||
enc := NewTimeEncoder(size)
|
||||
for i := 0; i < size; i++ {
|
||||
enc.Write(values[rand.Int()%len(values)])
|
||||
enc.Write(values[seededRand.Int()%len(values)])
|
||||
}
|
||||
bytes, _ := enc.Bytes()
|
||||
|
||||
|
@ -661,12 +661,12 @@ func BenchmarkTimeBatch_DecodeAllPackedSimple(b *testing.B) {
|
|||
{1000},
|
||||
}
|
||||
for _, bm := range benchmarks {
|
||||
rand.Seed(int64(bm.n * 1e3))
|
||||
seededRand := rand.New(rand.NewSource(int64(bm.n * 1e3)))
|
||||
|
||||
enc := NewTimeEncoder(bm.n)
|
||||
for i := 0; i < bm.n; i++ {
|
||||
// Small amount of randomness prevents RLE from being used
|
||||
enc.Write(int64(i*1000) + int64(rand.Intn(10)))
|
||||
enc.Write(int64(i*1000) + int64(seededRand.Intn(10)))
|
||||
}
|
||||
bytes, _ := enc.Bytes()
|
||||
|
||||
|
|
|
@ -11,8 +11,8 @@ import (
|
|||
const MaxFieldValueLength = 1048576
|
||||
|
||||
// ValidateFields will return a PartialWriteError if:
|
||||
// - the point has inconsistent fields, or
|
||||
// - the point has fields that are too long
|
||||
// - the point has inconsistent fields, or
|
||||
// - the point has fields that are too long
|
||||
func ValidateFields(mf *MeasurementFields, point models.Point, skipSizeValidation bool) error {
|
||||
pointSize := point.StringSize()
|
||||
iter := point.FieldIterator()
|
||||
|
|
|
@ -1,9 +1,8 @@
|
|||
/*
|
||||
|
||||
Package tsi1 provides a memory-mapped index implementation that supports
|
||||
high cardinality series.
|
||||
|
||||
Overview
|
||||
# Overview
|
||||
|
||||
The top-level object in tsi1 is the Index. It is the primary access point from
|
||||
the rest of the system. The Index is composed of LogFile and IndexFile objects.
|
||||
|
@ -17,8 +16,7 @@ Index files also contain series information, however, they are highly indexed
|
|||
so that reads can be performed quickly. Index files are built through a process
|
||||
called compaction where a log file or multiple index files are merged together.
|
||||
|
||||
|
||||
Operations
|
||||
# Operations
|
||||
|
||||
The index can perform many tasks related to series, measurement, & tag data.
|
||||
All data is inserted by adding a series to the index. When adding a series,
|
||||
|
@ -34,8 +32,7 @@ as by measurement name, by tag value, or by using regular expressions. The
|
|||
index provides an API to iterate over subsets of series and perform set
|
||||
operations such as unions and intersections.
|
||||
|
||||
|
||||
Log File Layout
|
||||
# Log File Layout
|
||||
|
||||
The write-ahead file that series initially are inserted into simply appends
|
||||
all new operations sequentially. It is simply composed of a series of log
|
||||
|
@ -61,15 +58,13 @@ name, the tag set, and a checksum.
|
|||
When the log file is replayed, if the checksum is incorrect or the entry is
|
||||
incomplete (because of a partially failed write) then the log is truncated.
|
||||
|
||||
|
||||
Index File Layout
|
||||
# Index File Layout
|
||||
|
||||
The index file is composed of 3 main block types: one series block, one or more
|
||||
tag blocks, and one measurement block. At the end of the index file is a
|
||||
trailer that records metadata such as the offsets to these blocks.
|
||||
|
||||
|
||||
Series Block Layout
|
||||
# Series Block Layout
|
||||
|
||||
The series block stores raw series keys in sorted order. It also provides hash
|
||||
indexes so that series can be looked up quickly. Hash indexes are inserted
|
||||
|
@ -111,8 +106,7 @@ a trailer which contains metadata about the block.
|
|||
┃ └──────────────────────┘ ┃
|
||||
┗━━━━━━━━━━━━━━━━━━━━━━━━━━┛
|
||||
|
||||
|
||||
Tag Block Layout
|
||||
# Tag Block Layout
|
||||
|
||||
After the series block is one or more tag blocks. One of these blocks exists
|
||||
for every measurement in the index file. The block is structured as a sorted
|
||||
|
@ -159,8 +153,7 @@ that value. Series iterators can be built around a single tag key value or
|
|||
multiple iterators can be merged with set operators such as union or
|
||||
intersection.
|
||||
|
||||
|
||||
Measurement block
|
||||
# Measurement block
|
||||
|
||||
The measurement block stores a sorted list of measurements, their associated
|
||||
series offsets, and the offset to their tag block. This allows all series for
|
||||
|
@ -188,8 +181,7 @@ measurements.
|
|||
┃ └──────────────────────┘ ┃
|
||||
┗━━━━━━━━━━━━━━━━━━━━━━━━━━┛
|
||||
|
||||
|
||||
Manifest file
|
||||
# Manifest file
|
||||
|
||||
The index is simply an ordered set of log and index files. These files can be
|
||||
merged together or rewritten but their order must always be the same. This is
|
||||
|
@ -200,8 +192,7 @@ Whenever the set of active files is changed, a manifest file is written to
|
|||
track the set. The manifest specifies the ordering of files and, on startup,
|
||||
all files not in the manifest are removed from the index directory.
|
||||
|
||||
|
||||
Compacting index files
|
||||
# Compacting index files
|
||||
|
||||
Compaction is the process of taking files and merging them together into a
|
||||
single file. There are two stages of compaction within TSI.
|
||||
|
@ -216,8 +207,7 @@ they are all merged together into a single index file and the old files are
|
|||
discarded. Because all blocks are written in sorted order, the new index file
|
||||
can be streamed and minimize memory use.
|
||||
|
||||
|
||||
Concurrency
|
||||
# Concurrency
|
||||
|
||||
Index files are immutable so they do not require fine grained locks, however,
|
||||
compactions require that we track which files are in use so they are not
|
||||
|
@ -232,7 +222,5 @@ returns to zero.
|
|||
Besides the reference counting, there are no other locking mechanisms when
|
||||
reading or writing index files. Log files, however, do require a lock whenever
|
||||
they are accessed. This is another reason to minimize log file size.
|
||||
|
||||
|
||||
*/
|
||||
package tsi1
|
||||
|
|
|
@ -55,7 +55,6 @@ func init() {
|
|||
//
|
||||
// NOTE: Currently, this must not be change once a database is created. Further,
|
||||
// it must also be a power of 2.
|
||||
//
|
||||
var DefaultPartitionN uint64 = 8
|
||||
|
||||
// An IndexOption is a functional option for changing the configuration of
|
||||
|
|
|
@ -123,9 +123,9 @@ func TestLogFile_SeriesStoredInOrder(t *testing.T) {
|
|||
|
||||
// Generate and add test data
|
||||
tvm := make(map[string]struct{})
|
||||
rand.Seed(time.Now().Unix())
|
||||
seededRand := rand.New(rand.NewSource(time.Now().Unix()))
|
||||
for i := 0; i < 100; i++ {
|
||||
tv := fmt.Sprintf("server-%d", rand.Intn(50)) // Encourage adding duplicate series.
|
||||
tv := fmt.Sprintf("server-%d", seededRand.Intn(50)) // Encourage adding duplicate series.
|
||||
tvm[tv] = struct{}{}
|
||||
|
||||
if _, err := f.AddSeriesList(seriesSet, [][]byte{
|
||||
|
|
|
@ -162,8 +162,6 @@ var set *SeriesIDSet
|
|||
// BenchmarkSeriesIDSet_Add/10-4 5000000 348 ns/op 0 B/op 0 allocs/op
|
||||
// BenchmarkSeriesIDSet_Add/100-4 5000000 373 ns/op 0 B/op 0 allocs/op
|
||||
// BenchmarkSeriesIDSet_Add/1000-4 5000000 342 ns/op 0 B/op 0 allocs/op
|
||||
//
|
||||
//
|
||||
func BenchmarkSeriesIDSet_AddMore(b *testing.B) {
|
||||
cardinalities := []uint64{1, 2, 10, 100, 1000, 10000, 100000, 1000000, 10000000}
|
||||
|
||||
|
@ -202,7 +200,6 @@ func BenchmarkSeriesIDSet_AddMore(b *testing.B) {
|
|||
// BenchmarkSeriesIDSet_Add/cardinality_1000000_check_add/random_global_lock-8 2000000 914 ns/op 0 B/op 0 allocs/op
|
||||
// BenchmarkSeriesIDSet_Add/cardinality_1000000_check_add/same_multi_lock-8 30000000 39.7 ns/op 0 B/op 0 allocs/op
|
||||
// BenchmarkSeriesIDSet_Add/cardinality_1000000_check_add/random_multi_lock-8 1000000 1002 ns/op 0 B/op 0 allocs/op
|
||||
//
|
||||
func BenchmarkSeriesIDSet_Add(b *testing.B) {
|
||||
// Setup...
|
||||
set = NewSeriesIDSet()
|
||||
|
@ -523,7 +520,6 @@ func BenchmarkSeriesIDSet_AddMany(b *testing.B) {
|
|||
// BenchmarkSeriesIDSet_Remove/cardinality_1000000_remove_same-4 20000000 99.1 ns/op 0 B/op 0 allocs/op
|
||||
// BenchmarkSeriesIDSet_Remove/cardinality_1000000_check_remove_global_lock-4 20000000 57.7 ns/op 0 B/op 0 allocs/op
|
||||
// BenchmarkSeriesIDSet_Remove/cardinality_1000000_check_remove_multi_lock-4 20000000 80.1 ns/op 0 B/op 0 allocs/op
|
||||
//
|
||||
func BenchmarkSeriesIDSet_Remove(b *testing.B) {
|
||||
// Setup...
|
||||
set = NewSeriesIDSet()
|
||||
|
|
|
@ -190,7 +190,7 @@ func (s *Shard) SetEnabled(enabled bool) {
|
|||
s.mu.Unlock()
|
||||
}
|
||||
|
||||
//! setEnabledNoLock performs actual work of SetEnabled. Must hold s.mu before calling.
|
||||
// ! setEnabledNoLock performs actual work of SetEnabled. Must hold s.mu before calling.
|
||||
func (s *Shard) setEnabledNoLock(enabled bool) {
|
||||
// Prevent writes and queries
|
||||
s.enabled = enabled
|
||||
|
|
|
@ -26,12 +26,12 @@ func (s *tenantService) FindUser(ctx context.Context, filter influxdb.UserFilter
|
|||
return s.FindUserFn(ctx, filter)
|
||||
}
|
||||
|
||||
//FindOrganizationByID calls FindOrganizationByIDF.
|
||||
// FindOrganizationByID calls FindOrganizationByIDF.
|
||||
func (s *tenantService) FindOrganizationByID(ctx context.Context, id platform.ID) (*influxdb.Organization, error) {
|
||||
return s.FindOrganizationByIDF(ctx, id)
|
||||
}
|
||||
|
||||
//FindOrganization calls FindOrganizationF.
|
||||
// FindOrganization calls FindOrganizationF.
|
||||
func (s *tenantService) FindOrganization(ctx context.Context, filter influxdb.OrganizationFilter) (*influxdb.Organization, error) {
|
||||
return s.FindOrganizationF(ctx, filter)
|
||||
}
|
||||
|
|
|
@ -293,8 +293,8 @@ func (l sgList) Covers(t time.Time) bool {
|
|||
// to start time. Therefore, if there are multiple shard groups that match
|
||||
// this point's time they will be preferred in this order:
|
||||
//
|
||||
// - a shard group with the earliest end time;
|
||||
// - (assuming identical end times) the shard group with the earliest start time.
|
||||
// - a shard group with the earliest end time;
|
||||
// - (assuming identical end times) the shard group with the earliest start time.
|
||||
func (l sgList) ShardGroupAt(t time.Time) *meta.ShardGroupInfo {
|
||||
if l.items.Len() == 0 {
|
||||
return nil
|
||||
|
|
|
@ -197,7 +197,6 @@ func (c *Client) CreateDatabase(name string) (*DatabaseInfo, error) {
|
|||
// This call is only idempotent when the caller provides the exact same
|
||||
// retention policy, and that retention policy is already the default for the
|
||||
// database.
|
||||
//
|
||||
func (c *Client) CreateDatabaseWithRetentionPolicy(name string, spec *RetentionPolicySpec) (*DatabaseInfo, error) {
|
||||
if spec == nil {
|
||||
return nil, errors.New("CreateDatabaseWithRetentionPolicy called with nil spec")
|
||||
|
|
|
@ -13,8 +13,10 @@ import (
|
|||
"github.com/influxdata/influxql"
|
||||
)
|
||||
|
||||
var seededRand *rand.Rand
|
||||
|
||||
func init() {
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
seededRand = rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
}
|
||||
|
||||
func Test_Data_DropDatabase(t *testing.T) {
|
||||
|
@ -469,7 +471,7 @@ func randString(n int) string {
|
|||
var letters = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")
|
||||
b := make([]rune, n)
|
||||
for i := range b {
|
||||
b[i] = letters[rand.Intn(len(letters))]
|
||||
b[i] = letters[seededRand.Intn(len(letters))]
|
||||
}
|
||||
return string(b)
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue