chore: update to go 1.20 (#24088)

* build: upgrade to go 1.19

* chore: bump go.mod

* chore: `gofmt` changes for doc comments

https://tip.golang.org/doc/comment

* test: update tests for new sort order

* chore: make generate-sources

* chore: make generate-sources

* chore: go 1.20

* chore: handle rand.Seed deprecation

* chore: handle rand.Seed deprecation in tests

---------

Co-authored-by: DStrand1 <dstrandboge@influxdata.com>
test/monitor-ci-415
Jeffrey Smith II 2023-02-09 14:14:35 -05:00 committed by GitHub
parent 8ad6e17265
commit f74c69c5e4
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
67 changed files with 391 additions and 348 deletions

View File

@ -7,7 +7,7 @@ orbs:
parameters: parameters:
cross-container-tag: cross-container-tag:
type: string type: string
default: go1.18.9-cb1343dd74ecba8ec07fe810195530a0b9055aa9 default: go1.20-0492ad609850ef223390d36ae41a226fe806e83c
workflow: workflow:
type: string type: string

View File

@ -26,12 +26,12 @@ func (s *tenantService) FindUser(ctx context.Context, filter influxdb.UserFilter
return s.FindUserFn(ctx, filter) return s.FindUserFn(ctx, filter)
} }
//FindOrganizationByID calls FindOrganizationByIDF. // FindOrganizationByID calls FindOrganizationByIDF.
func (s *tenantService) FindOrganizationByID(ctx context.Context, id platform.ID) (*influxdb.Organization, error) { func (s *tenantService) FindOrganizationByID(ctx context.Context, id platform.ID) (*influxdb.Organization, error) {
return s.FindOrganizationByIDF(ctx, id) return s.FindOrganizationByIDF(ctx, id)
} }
//FindOrganization calls FindOrganizationF. // FindOrganization calls FindOrganizationF.
func (s *tenantService) FindOrganization(ctx context.Context, filter influxdb.OrganizationFilter) (*influxdb.Organization, error) { func (s *tenantService) FindOrganization(ctx context.Context, filter influxdb.OrganizationFilter) (*influxdb.Organization, error) {
return s.FindOrganizationF(ctx, filter) return s.FindOrganizationF(ctx, filter)
} }

View File

@ -97,9 +97,12 @@ func authorizeReadSystemBucket(ctx context.Context, bid, oid platform.ID) (influ
// AuthorizeReadBucket exists because buckets are a special case and should use this method. // AuthorizeReadBucket exists because buckets are a special case and should use this method.
// I.e., instead of: // I.e., instead of:
// AuthorizeRead(ctx, influxdb.BucketsResourceType, b.ID, b.OrgID) //
// AuthorizeRead(ctx, influxdb.BucketsResourceType, b.ID, b.OrgID)
//
// use: // use:
// AuthorizeReadBucket(ctx, b.Type, b.ID, b.OrgID) //
// AuthorizeReadBucket(ctx, b.Type, b.ID, b.OrgID)
func AuthorizeReadBucket(ctx context.Context, bt influxdb.BucketType, bid, oid platform.ID) (influxdb.Authorizer, influxdb.Permission, error) { func AuthorizeReadBucket(ctx context.Context, bt influxdb.BucketType, bid, oid platform.ID) (influxdb.Authorizer, influxdb.Permission, error) {
switch bt { switch bt {
case influxdb.BucketTypeSystem: case influxdb.BucketTypeSystem:

22
go.mod
View File

@ -1,9 +1,9 @@
module github.com/influxdata/influxdb/v2 module github.com/influxdata/influxdb/v2
go 1.18 go 1.20
require ( require (
github.com/BurntSushi/toml v0.4.1 github.com/BurntSushi/toml v1.2.1
github.com/Masterminds/squirrel v1.5.0 github.com/Masterminds/squirrel v1.5.0
github.com/NYTimes/gziphandler v1.0.1 github.com/NYTimes/gziphandler v1.0.1
github.com/RoaringBitmap/roaring v0.4.16 github.com/RoaringBitmap/roaring v0.4.16
@ -64,15 +64,15 @@ require (
go.uber.org/multierr v1.6.0 go.uber.org/multierr v1.6.0
go.uber.org/zap v1.16.0 go.uber.org/zap v1.16.0
golang.org/x/crypto v0.0.0-20220331220935-ae2d96664a29 golang.org/x/crypto v0.0.0-20220331220935-ae2d96664a29
golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f golang.org/x/sync v0.1.0
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8 golang.org/x/sys v0.4.0
golang.org/x/text v0.3.7 golang.org/x/text v0.6.0
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac
golang.org/x/tools v0.1.11 golang.org/x/tools v0.5.0
google.golang.org/protobuf v1.28.1 google.golang.org/protobuf v1.28.1
gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v2 v2.4.0
gopkg.in/yaml.v3 v3.0.1 gopkg.in/yaml.v3 v3.0.1
honnef.co/go/tools v0.3.0 honnef.co/go/tools v0.4.0
) )
require ( require (
@ -206,12 +206,12 @@ require (
go.opencensus.io v0.23.0 // indirect go.opencensus.io v0.23.0 // indirect
go.uber.org/atomic v1.7.0 // indirect go.uber.org/atomic v1.7.0 // indirect
golang.org/x/exp v0.0.0-20211216164055-b2b84827b756 // indirect golang.org/x/exp v0.0.0-20211216164055-b2b84827b756 // indirect
golang.org/x/exp/typeparams v0.0.0-20220218215828-6cf2b201936e // indirect golang.org/x/exp/typeparams v0.0.0-20221208152030-732eee02a75a // indirect
golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 // indirect golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 // indirect
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 // indirect golang.org/x/mod v0.7.0 // indirect
golang.org/x/net v0.0.0-20220617184016-355a448f1bc9 // indirect golang.org/x/net v0.5.0 // indirect
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f // indirect golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f // indirect
golang.org/x/term v0.0.0-20220526004731-065cf7ba2467 // indirect golang.org/x/term v0.4.0 // indirect
golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f // indirect golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f // indirect
gonum.org/v1/gonum v0.11.0 // indirect gonum.org/v1/gonum v0.11.0 // indirect
google.golang.org/api v0.47.0 // indirect google.golang.org/api v0.47.0 // indirect

39
go.sum
View File

@ -80,8 +80,8 @@ github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZ
github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo=
github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/toml v0.4.1 h1:GaI7EiDXDRfa8VshkTj7Fym7ha+y8/XxIgD2okUIjLw= github.com/BurntSushi/toml v1.2.1 h1:9F2/+DoOYIOksmaJFPw1tGFy1eDnIJXg+UHjuD8lTak=
github.com/BurntSushi/toml v0.4.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/DATA-DOG/go-sqlmock v1.4.1 h1:ThlnYciV1iM/V0OSF/dtkqWb6xo5qITT1TJBG1MRDJM= github.com/DATA-DOG/go-sqlmock v1.4.1 h1:ThlnYciV1iM/V0OSF/dtkqWb6xo5qITT1TJBG1MRDJM=
github.com/DATA-DOG/go-sqlmock v1.4.1/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= github.com/DATA-DOG/go-sqlmock v1.4.1/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM=
@ -1324,8 +1324,8 @@ golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EH
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
golang.org/x/exp v0.0.0-20211216164055-b2b84827b756 h1:/5Bs7sWi0i3rOVO5KnM55OwugpsD4bRW1zywKoZjbkI= golang.org/x/exp v0.0.0-20211216164055-b2b84827b756 h1:/5Bs7sWi0i3rOVO5KnM55OwugpsD4bRW1zywKoZjbkI=
golang.org/x/exp v0.0.0-20211216164055-b2b84827b756/go.mod h1:b9TAUYHmRtqA6klRHApnXMnj+OyLce4yF5cZCUbk2ps= golang.org/x/exp v0.0.0-20211216164055-b2b84827b756/go.mod h1:b9TAUYHmRtqA6klRHApnXMnj+OyLce4yF5cZCUbk2ps=
golang.org/x/exp/typeparams v0.0.0-20220218215828-6cf2b201936e h1:qyrTQ++p1afMkO4DPEeLGq/3oTsdlvdH4vqZUBWzUKM= golang.org/x/exp/typeparams v0.0.0-20221208152030-732eee02a75a h1:Jw5wfR+h9mnIYH+OtGT2im5wV1YGGDora5vTv/aa5bE=
golang.org/x/exp/typeparams v0.0.0-20220218215828-6cf2b201936e/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= golang.org/x/exp/typeparams v0.0.0-20221208152030-732eee02a75a/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk=
golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
@ -1364,8 +1364,8 @@ golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro=
golang.org/x/mod v0.6.0-dev.0.20211013180041-c96bc1413d57/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= golang.org/x/mod v0.6.0-dev.0.20211013180041-c96bc1413d57/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY=
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 h1:6zppjxzCulZykYSLyVDYbneBfbaBIQPYMevg0bEwv2s= golang.org/x/mod v0.7.0 h1:LapD9S96VoQRhi/GrNTqeBJFrUjs5UHCAtTlgwA5oZA=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@ -1425,8 +1425,8 @@ golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qx
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211118161319-6a13c67c3ce4/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211118161319-6a13c67c3ce4/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220617184016-355a448f1bc9 h1:Yqz/iviulwKwAREEeUd3nbBFn0XuyJqkoft2IlrvOhc= golang.org/x/net v0.5.0 h1:GyT4nK/YDHSqa1c4753ouYCDajOYKTja9Xb/OHtgvSw=
golang.org/x/net v0.0.0-20220617184016-355a448f1bc9/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@ -1453,8 +1453,8 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f h1:Ax0t5p6N38Ga0dThY21weqDEyz2oklo4IvDkpigvkD8= golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@ -1564,14 +1564,14 @@ golang.org/x/sys v0.0.0-20211117180635-dee7805ff2e1/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8 h1:0A+M6Uqn+Eje4kHMK80dtF3JCXC4ykBgQG4Fe06QRhQ= golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210503060354-a79de5458b56/go.mod h1:tfny5GFUkzUvx4ps4ajbZsCe5lw1metzhBm9T3x7oIY= golang.org/x/term v0.0.0-20210503060354-a79de5458b56/go.mod h1:tfny5GFUkzUvx4ps4ajbZsCe5lw1metzhBm9T3x7oIY=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.0.0-20220526004731-065cf7ba2467 h1:CBpWXWQpIRjzmkkA+M7q9Fqnwd2mZr3AFqexg8YTfoM= golang.org/x/term v0.4.0 h1:O7UWfv5+A2qiuulQk30kVinPoMtoIPeVaKLEgLpVkvg=
golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@ -1581,8 +1581,9 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.6.0 h1:3XmdazWV+ubf7QgHSTWeykHOci5oeekaGJBLkrkaw4k=
golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@ -1664,8 +1665,8 @@ golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.8-0.20211029000441-d6a9af8af023/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= golang.org/x/tools v0.1.8-0.20211029000441-d6a9af8af023/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU=
golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E=
golang.org/x/tools v0.1.11 h1:loJ25fNOEhSXfHrpoGj91eCUThwdNX6u24rO1xnNteY= golang.org/x/tools v0.5.0 h1:+bSpV5HIeWkuvgaMfI3UmKRThoTA5ODJTUd8T17NO+4=
golang.org/x/tools v0.1.11/go.mod h1:SgwaegtQh8clINPpECJMqnxLv9I09HLqnW3RMqW0CA4= golang.org/x/tools v0.5.0/go.mod h1:N+Kgy78s5I24c24dU8OfWNEotWjutIs8SnJvn5IDq+k=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@ -1880,8 +1881,8 @@ honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las=
honnef.co/go/tools v0.3.0 h1:2LdYUZ7CIxnYgskbUZfY7FPggmqnh6shBqfWa8Tn3XU= honnef.co/go/tools v0.4.0 h1:lyXVV1c8wUBJRKqI8JgIpT8TW1VDagfYYaxbKa/HoL8=
honnef.co/go/tools v0.3.0/go.mod h1:vlRD9XErLMGT+mDuofSr0mMMquscM/1nQqtRSsh6m70= honnef.co/go/tools v0.4.0/go.mod h1:36ZgoUOrqOk1GxwHhyryEkq8FQWkUO2xGuSMhUCcdvA=
k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo= k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo=
k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ= k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ=
k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8= k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8=

View File

@ -841,7 +841,8 @@ func (s *CheckService) DeleteCheck(ctx context.Context, id platform.ID) error {
} }
// TODO(gavincabbage): These structures should be in a common place, like other models, // TODO(gavincabbage): These structures should be in a common place, like other models,
// but the common influxdb.Check is an interface that is not appropriate for an API client. //
// but the common influxdb.Check is an interface that is not appropriate for an API client.
type Checks struct { type Checks struct {
Checks []*Check `json:"checks"` Checks []*Check `json:"checks"`
Links *influxdb.PagingLinks `json:"links"` Links *influxdb.PagingLinks `json:"links"`

View File

@ -611,7 +611,8 @@ func (s *NotificationEndpointService) FindNotificationEndpoints(ctx context.Cont
// CreateNotificationEndpoint creates a new notification endpoint and sets b.ID with the new identifier. // CreateNotificationEndpoint creates a new notification endpoint and sets b.ID with the new identifier.
// TODO(@jsteenb2): this is unsatisfactory, we have no way of grabbing the new notification endpoint without // TODO(@jsteenb2): this is unsatisfactory, we have no way of grabbing the new notification endpoint without
// serious hacky hackertoning. Put it on the list... //
// serious hacky hackertoning. Put it on the list...
func (s *NotificationEndpointService) CreateNotificationEndpoint(ctx context.Context, ne influxdb.NotificationEndpoint, userID platform.ID) error { func (s *NotificationEndpointService) CreateNotificationEndpoint(ctx context.Context, ne influxdb.NotificationEndpoint, userID platform.ID) error {
var resp notificationEndpointDecoder var resp notificationEndpointDecoder
err := s.Client. err := s.Client.
@ -667,9 +668,10 @@ func (s *NotificationEndpointService) PatchNotificationEndpoint(ctx context.Cont
// DeleteNotificationEndpoint removes a notification endpoint by ID, returns secret fields, orgID for further deletion. // DeleteNotificationEndpoint removes a notification endpoint by ID, returns secret fields, orgID for further deletion.
// TODO: axe this delete design, makes little sense in how its currently being done. Right now, as an http client, // TODO: axe this delete design, makes little sense in how its currently being done. Right now, as an http client,
// I am forced to know how the store handles this and then figure out what the server does in between me and that store, //
// then see what falls out :flushed... for now returning nothing for secrets, orgID, and only returning an error. This makes // I am forced to know how the store handles this and then figure out what the server does in between me and that store,
// the code/design smell super obvious imo // then see what falls out :flushed... for now returning nothing for secrets, orgID, and only returning an error. This makes
// the code/design smell super obvious imo
func (s *NotificationEndpointService) DeleteNotificationEndpoint(ctx context.Context, id platform.ID) ([]influxdb.SecretField, platform.ID, error) { func (s *NotificationEndpointService) DeleteNotificationEndpoint(ctx context.Context, id platform.ID) ([]influxdb.SecretField, platform.ID, error) {
if !id.Valid() { if !id.Valid() {
return nil, 0, fmt.Errorf("invalid ID: please provide a valid ID") return nil, 0, fmt.Errorf("invalid ID: please provide a valid ID")

View File

@ -1212,8 +1212,8 @@ func (r *UnsignedCumulativeSumReducer) Emit() []UnsignedPoint {
// FloatHoltWintersReducer forecasts a series into the future. // FloatHoltWintersReducer forecasts a series into the future.
// This is done using the Holt-Winters damped method. // This is done using the Holt-Winters damped method.
// 1. Using the series the initial values are calculated using a SSE. // 1. Using the series the initial values are calculated using a SSE.
// 2. The series is forecasted into the future using the iterative relations. // 2. The series is forecasted into the future using the iterative relations.
type FloatHoltWintersReducer struct { type FloatHoltWintersReducer struct {
// Season period // Season period
m int m int

View File

@ -404,11 +404,10 @@ func (itr *floatSortedMergeIterator) pop() (*FloatPoint, error) {
// floatSortedMergeHeap represents a heap of floatSortedMergeHeapItems. // floatSortedMergeHeap represents a heap of floatSortedMergeHeapItems.
// Items are sorted with the following priority: // Items are sorted with the following priority:
// - By their measurement name; // - By their measurement name;
// - By their tag keys/values; // - By their tag keys/values;
// - By time; or // - By time; or
// - By their Aux field values. // - By their Aux field values.
//
type floatSortedMergeHeap struct { type floatSortedMergeHeap struct {
opt IteratorOptions opt IteratorOptions
items []*floatSortedMergeHeapItem items []*floatSortedMergeHeapItem
@ -3068,11 +3067,10 @@ func (itr *integerSortedMergeIterator) pop() (*IntegerPoint, error) {
// integerSortedMergeHeap represents a heap of integerSortedMergeHeapItems. // integerSortedMergeHeap represents a heap of integerSortedMergeHeapItems.
// Items are sorted with the following priority: // Items are sorted with the following priority:
// - By their measurement name; // - By their measurement name;
// - By their tag keys/values; // - By their tag keys/values;
// - By time; or // - By time; or
// - By their Aux field values. // - By their Aux field values.
//
type integerSortedMergeHeap struct { type integerSortedMergeHeap struct {
opt IteratorOptions opt IteratorOptions
items []*integerSortedMergeHeapItem items []*integerSortedMergeHeapItem
@ -5732,11 +5730,10 @@ func (itr *unsignedSortedMergeIterator) pop() (*UnsignedPoint, error) {
// unsignedSortedMergeHeap represents a heap of unsignedSortedMergeHeapItems. // unsignedSortedMergeHeap represents a heap of unsignedSortedMergeHeapItems.
// Items are sorted with the following priority: // Items are sorted with the following priority:
// - By their measurement name; // - By their measurement name;
// - By their tag keys/values; // - By their tag keys/values;
// - By time; or // - By time; or
// - By their Aux field values. // - By their Aux field values.
//
type unsignedSortedMergeHeap struct { type unsignedSortedMergeHeap struct {
opt IteratorOptions opt IteratorOptions
items []*unsignedSortedMergeHeapItem items []*unsignedSortedMergeHeapItem
@ -8396,11 +8393,10 @@ func (itr *stringSortedMergeIterator) pop() (*StringPoint, error) {
// stringSortedMergeHeap represents a heap of stringSortedMergeHeapItems. // stringSortedMergeHeap represents a heap of stringSortedMergeHeapItems.
// Items are sorted with the following priority: // Items are sorted with the following priority:
// - By their measurement name; // - By their measurement name;
// - By their tag keys/values; // - By their tag keys/values;
// - By time; or // - By time; or
// - By their Aux field values. // - By their Aux field values.
//
type stringSortedMergeHeap struct { type stringSortedMergeHeap struct {
opt IteratorOptions opt IteratorOptions
items []*stringSortedMergeHeapItem items []*stringSortedMergeHeapItem
@ -11046,11 +11042,10 @@ func (itr *booleanSortedMergeIterator) pop() (*BooleanPoint, error) {
// booleanSortedMergeHeap represents a heap of booleanSortedMergeHeapItems. // booleanSortedMergeHeap represents a heap of booleanSortedMergeHeapItems.
// Items are sorted with the following priority: // Items are sorted with the following priority:
// - By their measurement name; // - By their measurement name;
// - By their tag keys/values; // - By their tag keys/values;
// - By time; or // - By time; or
// - By their Aux field values. // - By their Aux field values.
//
type booleanSortedMergeHeap struct { type booleanSortedMergeHeap struct {
opt IteratorOptions opt IteratorOptions
items []*booleanSortedMergeHeapItem items []*booleanSortedMergeHeapItem

View File

@ -0,0 +1,33 @@
package rand
import (
"math/rand"
"sync"
)
// LockedSource is taken from the Go "math/rand" package.
// The default rand functions use a similar type under the hood, this does not introduce any additional
// locking than using the default functions.
type LockedSource struct {
lk sync.Mutex
src rand.Source
}
func NewLockedSourceFromSeed(seed int64) *LockedSource {
return &LockedSource{
src: rand.NewSource(seed),
}
}
func (r *LockedSource) Int63() (n int64) {
r.lk.Lock()
n = r.src.Int63()
r.lk.Unlock()
return
}
func (r *LockedSource) Seed(seed int64) {
r.lk.Lock()
r.src.Seed(seed)
r.lk.Unlock()
}

View File

@ -3,46 +3,45 @@
// This is a small simplification over viper to move most of the boilerplate // This is a small simplification over viper to move most of the boilerplate
// into one place. // into one place.
// //
//
// In this example the flags can be set with MYPROGRAM_MONITOR_HOST and // In this example the flags can be set with MYPROGRAM_MONITOR_HOST and
// MYPROGRAM_NUMBER or with the flags --monitor-host and --number // MYPROGRAM_NUMBER or with the flags --monitor-host and --number
// //
// var flags struct { // var flags struct {
// monitorHost string // monitorHost string
// number int // number int
// } // }
// //
// func main() { // func main() {
// cmd := cli.NewCommand(&cli.Program{ // cmd := cli.NewCommand(&cli.Program{
// Run: run, // Run: run,
// Name: "myprogram", // Name: "myprogram",
// Opts: []cli.Opt{ // Opts: []cli.Opt{
// { // {
// DestP: &flags.monitorHost, // DestP: &flags.monitorHost,
// Flag: "monitor-host", // Flag: "monitor-host",
// Default: "http://localhost:8086", // Default: "http://localhost:8086",
// Desc: "host to send influxdb metrics", // Desc: "host to send influxdb metrics",
// }, // },
// { // {
// DestP: &flags.number, // DestP: &flags.number,
// Flag: "number", // Flag: "number",
// Default: 2, // Default: 2,
// Desc: "number of times to loop", // Desc: "number of times to loop",
// //
// }, // },
// }, // },
// }) // })
// //
// if err := cmd.Execute(); err != nil { // if err := cmd.Execute(); err != nil {
// fmt.Fprintln(os.Stderr, err) // fmt.Fprintln(os.Stderr, err)
// os.Exit(1) // os.Exit(1)
// } // }
// } // }
// //
// func run() error { // func run() error {
// for i := 0; i < number; i++ { // for i := 0; i < number; i++ {
// fmt.Printf("%d\n", i) // fmt.Printf("%d\n", i)
// feturn nil // feturn nil
// } // }
// } // }
package cli package cli

View File

@ -32,33 +32,39 @@
// First, I add an entry to `flags.yml`. // First, I add an entry to `flags.yml`.
// //
// ```yaml // ```yaml
// - name: My Feature // - name: My Feature
// description: My feature is awesome // description: My feature is awesome
// key: myFeature // key: myFeature
// default: false // default: false
// expose: true // expose: true
// contact: My Name // contact: My Name
//
// ``` // ```
// //
// My flag type is inferred to be boolean by my default of `false` when I run // My flag type is inferred to be boolean by my default of `false` when I run
// `make flags` and the `feature` package now includes `func MyFeature() BoolFlag`. // `make flags` and the `feature` package now includes `func MyFeature() BoolFlag`.
// //
// I use this to control my backend code with // # I use this to control my backend code with
// //
// ```go // ```go
// if feature.MyFeature.Enabled(ctx) { //
// // new code... // if feature.MyFeature.Enabled(ctx) {
// } else { // // new code...
// // new code... // } else {
// } //
// // new code...
// }
//
// ``` // ```
// //
// and the `/api/v2/flags` response provides the same information to the frontend. // and the `/api/v2/flags` response provides the same information to the frontend.
// //
// ```json // ```json
// { //
// "myFeature": false // {
// } // "myFeature": false
// }
//
// ``` // ```
// //
// While `false` by default, I can turn on my experimental feature by starting // While `false` by default, I can turn on my experimental feature by starting
@ -71,5 +77,4 @@
// ``` // ```
// influxd --feature-flags flag1=value1,flag2=value2 // influxd --feature-flags flag1=value1,flag2=value2
// ``` // ```
//
package feature package feature

View File

@ -78,8 +78,9 @@ func ExposedFlagsFromContext(ctx context.Context, byKey ByKeyFn) map[string]inte
// to be removed, e.g. enabling debug tracing for an organization. // to be removed, e.g. enabling debug tracing for an organization.
// //
// TODO(gavincabbage): This may become a stale date, which can then // TODO(gavincabbage): This may become a stale date, which can then
// be used to trigger a notification to the contact when the flag //
// has become stale, to encourage flag cleanup. // be used to trigger a notification to the contact when the flag
// has become stale, to encourage flag cleanup.
type Lifetime int type Lifetime int
const ( const (

View File

@ -39,24 +39,31 @@ const (
// further help operators. // further help operators.
// //
// To create a simple error, // To create a simple error,
// &Error{ //
// Code:ENotFound, // &Error{
// } // Code:ENotFound,
// }
//
// To show where the error happens, add Op. // To show where the error happens, add Op.
// &Error{ //
// Code: ENotFound, // &Error{
// Op: "bolt.FindUserByID" // Code: ENotFound,
// } // Op: "bolt.FindUserByID"
// }
//
// To show an error with a unpredictable value, add the value in Msg. // To show an error with a unpredictable value, add the value in Msg.
// &Error{ //
// Code: EConflict, // &Error{
// Message: fmt.Sprintf("organization with name %s already exist", aName), // Code: EConflict,
// } // Message: fmt.Sprintf("organization with name %s already exist", aName),
// }
//
// To show an error wrapped with another error. // To show an error wrapped with another error.
// &Error{ //
// Code:EInternal, // &Error{
// Err: err, // Code:EInternal,
// }. // Err: err,
// }.
type Error struct { type Error struct {
Code string Code string
Msg string Msg string

View File

@ -19,7 +19,8 @@ import (
// LogError adds a span log for an error. // LogError adds a span log for an error.
// Returns unchanged error, so useful to wrap as in: // Returns unchanged error, so useful to wrap as in:
// return 0, tracing.LogError(err) //
// return 0, tracing.LogError(err)
func LogError(span opentracing.Span, err error) error { func LogError(span opentracing.Span, err error) error {
if err == nil { if err == nil {
return nil return nil
@ -115,24 +116,25 @@ func (s *Span) Finish() {
// Context without parent span reference triggers root span construction. // Context without parent span reference triggers root span construction.
// This function never returns nil values. // This function never returns nil values.
// //
// Performance // # Performance
// //
// This function incurs a small performance penalty, roughly 1000 ns/op, 376 B/op, 6 allocs/op. // This function incurs a small performance penalty, roughly 1000 ns/op, 376 B/op, 6 allocs/op.
// Jaeger timestamp and duration precision is only µs, so this is pretty negligible. // Jaeger timestamp and duration precision is only µs, so this is pretty negligible.
// //
// Alternatives // # Alternatives
// //
// If this performance penalty is too much, try these, which are also demonstrated in benchmark tests: // If this performance penalty is too much, try these, which are also demonstrated in benchmark tests:
// // Create a root span
// span := opentracing.StartSpan("operation name")
// ctx := opentracing.ContextWithSpan(context.Background(), span)
// //
// // Create a child span // // Create a root span
// span := opentracing.StartSpan("operation name", opentracing.ChildOf(sc)) // span := opentracing.StartSpan("operation name")
// ctx := opentracing.ContextWithSpan(context.Background(), span) // ctx := opentracing.ContextWithSpan(context.Background(), span)
// //
// // Sugar to create a child span // // Create a child span
// span, ctx := opentracing.StartSpanFromContext(ctx, "operation name") // span := opentracing.StartSpan("operation name", opentracing.ChildOf(sc))
// ctx := opentracing.ContextWithSpan(context.Background(), span)
//
// // Sugar to create a child span
// span, ctx := opentracing.StartSpanFromContext(ctx, "operation name")
func StartSpanFromContext(ctx context.Context, opts ...opentracing.StartSpanOption) (opentracing.Span, context.Context) { func StartSpanFromContext(ctx context.Context, opts ...opentracing.StartSpanOption) (opentracing.Span, context.Context) {
if ctx == nil { if ctx == nil {
panic("StartSpanFromContext called with nil context") panic("StartSpanFromContext called with nil context")

View File

@ -17,37 +17,37 @@ import (
// //
// The following is an illustration of its use: // The following is an illustration of its use:
// //
// byUserID := func(v []byte) ([]byte, error) { // byUserID := func(v []byte) ([]byte, error) {
// auth := &influxdb.Authorization{} // auth := &influxdb.Authorization{}
// //
// if err := json.Unmarshal(v, auth); err != nil { // if err := json.Unmarshal(v, auth); err != nil {
// return err // return err
// } // }
// //
// return auth.UserID.Encode() // return auth.UserID.Encode()
// } // }
// //
// // configure a write only index // // configure a write only index
// indexByUser := NewIndex(NewSource([]byte(`authorizationsbyuserv1/), byUserID)) // indexByUser := NewIndex(NewSource([]byte(`authorizationsbyuserv1/), byUserID))
// //
// indexByUser.Insert(tx, someUserID, someAuthID) // indexByUser.Insert(tx, someUserID, someAuthID)
// //
// indexByUser.Delete(tx, someUserID, someAuthID) // indexByUser.Delete(tx, someUserID, someAuthID)
// //
// indexByUser.Walk(tx, someUserID, func(k, v []byte) error { // indexByUser.Walk(tx, someUserID, func(k, v []byte) error {
// auth := &influxdb.Authorization{} // auth := &influxdb.Authorization{}
// if err := json.Unmarshal(v, auth); err != nil { // if err := json.Unmarshal(v, auth); err != nil {
// return err // return err
// } // }
// //
// // do something with auth // // do something with auth
// //
// return nil // return nil
// }) // })
// //
// // verify the current index against the source and return the differences // // verify the current index against the source and return the differences
// // found in each // // found in each
// diff, err := indexByUser.Verify(ctx, tx) // diff, err := indexByUser.Verify(ctx, tx)
type Index struct { type Index struct {
IndexMapping IndexMapping

View File

@ -9,11 +9,11 @@
// //
// This package is arranged like so: // This package is arranged like so:
// //
// doc.go - this piece of documentation. // doc.go - this piece of documentation.
// all.go - definition of Migration array referencing each of the name migrations in number migration files (below). // all.go - definition of Migration array referencing each of the name migrations in number migration files (below).
// migration.go - an implementation of migration.Spec for convenience. // migration.go - an implementation of migration.Spec for convenience.
// 000X_migration_name.go (example) - N files contains the specific implementations of each migration enumerated in `all.go`. // 000X_migration_name.go (example) - N files contains the specific implementations of each migration enumerated in `all.go`.
// ... // ...
// //
// Managing this list of files and all.go can be fiddly. // Managing this list of files and all.go can be fiddly.
// There is a buildable cli utility called `kvmigrate` in the `internal/cmd/kvmigrate` package. // There is a buildable cli utility called `kvmigrate` in the `internal/cmd/kvmigrate` package.

View File

@ -39,17 +39,17 @@ func NewOrganizationService() *OrganizationService {
} }
} }
//FindOrganizationByID calls FindOrganizationByIDF. // FindOrganizationByID calls FindOrganizationByIDF.
func (s *OrganizationService) FindOrganizationByID(ctx context.Context, id platform2.ID) (*platform.Organization, error) { func (s *OrganizationService) FindOrganizationByID(ctx context.Context, id platform2.ID) (*platform.Organization, error) {
return s.FindOrganizationByIDF(ctx, id) return s.FindOrganizationByIDF(ctx, id)
} }
//FindOrganization calls FindOrganizationF. // FindOrganization calls FindOrganizationF.
func (s *OrganizationService) FindOrganization(ctx context.Context, filter platform.OrganizationFilter) (*platform.Organization, error) { func (s *OrganizationService) FindOrganization(ctx context.Context, filter platform.OrganizationFilter) (*platform.Organization, error) {
return s.FindOrganizationF(ctx, filter) return s.FindOrganizationF(ctx, filter)
} }
//FindOrganizations calls FindOrganizationsF. // FindOrganizations calls FindOrganizationsF.
func (s *OrganizationService) FindOrganizations(ctx context.Context, filter platform.OrganizationFilter, opt ...platform.FindOptions) ([]*platform.Organization, int, error) { func (s *OrganizationService) FindOrganizations(ctx context.Context, filter platform.OrganizationFilter, opt ...platform.FindOptions) ([]*platform.Organization, int, error) {
return s.FindOrganizationsF(ctx, filter, opt...) return s.FindOrganizationsF(ctx, filter, opt...)
} }

View File

@ -230,7 +230,7 @@ func BenchmarkTagKeysSet_UnionBytes(b *testing.B) {
bytes.Split([]byte("tag04,tag05"), commaB), bytes.Split([]byte("tag04,tag05"), commaB),
} }
rand.Seed(20040409) seededRand := rand.New(rand.NewSource(20040409))
tests := []int{ tests := []int{
10, 10,
@ -245,7 +245,7 @@ func BenchmarkTagKeysSet_UnionBytes(b *testing.B) {
var km models.TagKeysSet var km models.TagKeysSet
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
for j := 0; j < n; j++ { for j := 0; j < n; j++ {
km.UnionBytes(keys[rand.Int()%len(keys)]) km.UnionBytes(keys[seededRand.Int()%len(keys)])
} }
km.Clear() km.Clear()
} }

View File

@ -47,20 +47,20 @@ const MaxWritesPending = 1024
// queues can have a max size configured such that when the size of all // queues can have a max size configured such that when the size of all
// segments on disk exceeds the size, write will fail. // segments on disk exceeds the size, write will fail.
// //
// ┌─────┐ // ┌─────┐
// │Head │ // │Head │
// ├─────┘ // ├─────┘
// //
// //
// ┌─────────────────┐ ┌─────────────────┐┌─────────────────┐ // ┌─────────────────┐ ┌─────────────────┐┌─────────────────┐
// │Segment 1 - 10MB │ │Segment 2 - 10MB ││Segment 3 - 10MB │ // │Segment 1 - 10MB │ │Segment 2 - 10MB ││Segment 3 - 10MB │
// └─────────────────┘ └─────────────────┘└─────────────────┘ // └─────────────────┘ └─────────────────┘└─────────────────┘
// //
// //
// //
// ┌─────┐ // ┌─────┐
// │Tail │ // │Tail │
// └─────┘ // └─────┘
type Queue struct { type Queue struct {
mu sync.RWMutex mu sync.RWMutex
@ -609,13 +609,13 @@ func (l *Queue) trimHead(force bool) error {
// lengths + block with a single footer point to the position in the segment of the // lengths + block with a single footer point to the position in the segment of the
// current Head block. // current Head block.
// //
// ┌──────────────────────────┐ ┌──────────────────────────┐ ┌────────────┐ // ┌──────────────────────────┐ ┌──────────────────────────┐ ┌────────────┐
// │ Block 1 │ │ Block 2 │ │ Footer │ // │ Block 1 │ │ Block 2 │ │ Footer │
// └──────────────────────────┘ └──────────────────────────┘ └────────────┘ // └──────────────────────────┘ └──────────────────────────┘ └────────────┘
// ┌────────────┐┌────────────┐ ┌────────────┐┌────────────┐ ┌────────────┐ // ┌────────────┐┌────────────┐ ┌────────────┐┌────────────┐ ┌────────────┐
// │Block 1 Len ││Block 1 Body│ │Block 2 Len ││Block 2 Body│ │Head Offset │ // │Block 1 Len ││Block 1 Body│ │Block 2 Len ││Block 2 Body│ │Head Offset │
// │ 8 bytes ││ N bytes │ │ 8 bytes ││ N bytes │ │ 8 bytes │ // │ 8 bytes ││ N bytes │ │ 8 bytes ││ N bytes │ │ 8 bytes │
// └────────────┘└────────────┘ └────────────┘└────────────┘ └────────────┘ // └────────────┘└────────────┘ └────────────┘└────────────┘ └────────────┘
// //
// The footer holds the pointer to the Head entry at the end of the segment to allow writes // The footer holds the pointer to the Head entry at the end of the segment to allow writes
// to seek to the end and write sequentially (vs having to seek back to the beginning of // to seek to the end and write sequentially (vs having to seek back to the beginning of

View File

@ -396,11 +396,11 @@ func TestQueue_TotalBytes(t *testing.T) {
// This test verifies the queue will advance in the following scenario: // This test verifies the queue will advance in the following scenario:
// //
// * There is one segment // - There is one segment
// * The segment is not full // - The segment is not full
// * The segment record size entry is corrupted, resulting in // - The segment record size entry is corrupted, resulting in
// currentRecordSize + pos > fileSize and // currentRecordSize + pos > fileSize and
// therefore the Advance would fail. // therefore the Advance would fail.
func TestQueue_AdvanceSingleCorruptSegment(t *testing.T) { func TestQueue_AdvanceSingleCorruptSegment(t *testing.T) {
q, dir := newTestQueue(t, withVerify(func([]byte) error { return nil })) q, dir := newTestQueue(t, withVerify(func([]byte) error { return nil }))
defer os.RemoveAll(dir) defer os.RemoveAll(dir)

View File

@ -468,8 +468,9 @@ func Decode(dst *[240]uint64, v uint64) (n int, err error) {
// Decode writes the uncompressed values from src to dst. It returns the number // Decode writes the uncompressed values from src to dst. It returns the number
// of values written or an error. // of values written or an error.
//go:nocheckptr
// nocheckptr while the underlying struct layout doesn't change // nocheckptr while the underlying struct layout doesn't change
//
//go:nocheckptr
func DecodeAll(dst, src []uint64) (value int, err error) { func DecodeAll(dst, src []uint64) (value int, err error) {
j := 0 j := 0
for _, v := range src { for _, v := range src {
@ -482,8 +483,9 @@ func DecodeAll(dst, src []uint64) (value int, err error) {
// DecodeBytesBigEndian writes the compressed, big-endian values from src to dst. It returns the number // DecodeBytesBigEndian writes the compressed, big-endian values from src to dst. It returns the number
// of values written or an error. // of values written or an error.
//go:nocheckptr
// nocheckptr while the underlying struct layout doesn't change // nocheckptr while the underlying struct layout doesn't change
//
//go:nocheckptr
func DecodeBytesBigEndian(dst []uint64, src []byte) (value int, err error) { func DecodeBytesBigEndian(dst []uint64, src []byte) (value int, err error) {
if len(src)&7 != 0 { if len(src)&7 != 0 {
return 0, errors.New("src length is not multiple of 8") return 0, errors.New("src length is not multiple of 8")

View File

@ -76,6 +76,8 @@ func combine(fns ...func() []uint64) func() []uint64 {
// TestEncodeAll ensures 100% test coverage of simple8b.EncodeAll and // TestEncodeAll ensures 100% test coverage of simple8b.EncodeAll and
// verifies all output by comparing the original input with the output of simple8b.DecodeAll // verifies all output by comparing the original input with the output of simple8b.DecodeAll
func TestEncodeAll(t *testing.T) { func TestEncodeAll(t *testing.T) {
//lint:ignore SA1019 This function was deprecated for good reasons that aren't important to us since its just used for testing.
// Ignoring seems better than all the effort to address the underlying concern. https://github.com/golang/go/issues/56319
rand.Seed(0) rand.Seed(0)
tests := []struct { tests := []struct {

View File

@ -2,11 +2,12 @@ package errors
// Capture is a wrapper function which can be used to capture errors from closing via a defer. // Capture is a wrapper function which can be used to capture errors from closing via a defer.
// An example: // An example:
// func Example() (err error) { //
// f, _ := os.Open(...) // func Example() (err error) {
// defer errors.Capture(&err, f.Close)() // f, _ := os.Open(...)
// ... // defer errors.Capture(&err, f.Close)()
// return // ...
// return
// //
// Doing this will result in the error from the f.Close() call being // Doing this will result in the error from the f.Close() call being
// put in the error via a ptr, if the error is not nil // put in the error via a ptr, if the error is not nil

View File

@ -4,10 +4,10 @@
// //
// The differences are that the implementation in this package: // The differences are that the implementation in this package:
// //
// * uses an AMD64 optimised xxhash algorithm instead of murmur; // - uses an AMD64 optimised xxhash algorithm instead of murmur;
// * uses some AMD64 optimisations for things like clz; // - uses some AMD64 optimisations for things like clz;
// * works with []byte rather than a Hash64 interface, to reduce allocations; // - works with []byte rather than a Hash64 interface, to reduce allocations;
// * implements encoding.BinaryMarshaler and encoding.BinaryUnmarshaler // - implements encoding.BinaryMarshaler and encoding.BinaryUnmarshaler
// //
// Based on some rough benchmarking, this implementation of HyperLogLog++ is // Based on some rough benchmarking, this implementation of HyperLogLog++ is
// around twice as fast as the github.com/clarkduvall/hyperloglog implementation. // around twice as fast as the github.com/clarkduvall/hyperloglog implementation.

View File

@ -3,7 +3,7 @@ Package tracing provides a way for capturing hierarchical traces.
To start a new trace with a root span named select To start a new trace with a root span named select
trace, span := tracing.NewTrace("select") trace, span := tracing.NewTrace("select")
It is recommended that a span be forwarded to callees using the It is recommended that a span be forwarded to callees using the
context package. Firstly, create a new context with the span associated context package. Firstly, create a new context with the span associated
@ -21,6 +21,5 @@ Once the trace is complete, it may be converted to a graph with the Tree method.
The tree is intended to be used with the Walk function in order to generate The tree is intended to be used with the Walk function in order to generate
different presentations. The default Tree#String method returns a tree. different presentations. The default Tree#String method returns a tree.
*/ */
package tracing package tracing

View File

@ -50,7 +50,7 @@ func Bool(key string, val bool) Field {
} }
} }
/// Int64 adds an int64-valued key:value pair to a Span.LogFields() record // / Int64 adds an int64-valued key:value pair to a Span.LogFields() record
func Int64(key string, val int64) Field { func Int64(key string, val int64) Field {
return Field{ return Field{
key: key, key: key,

View File

@ -1,4 +1,4 @@
//Package wire is used to serialize a trace. // Package wire is used to serialize a trace.
package wire package wire
//go:generate protoc --go_out=. binary.proto //go:generate protoc --go_out=. binary.proto

View File

@ -276,6 +276,7 @@ type Field struct {
Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
FieldType FieldType `protobuf:"varint,2,opt,name=FieldType,proto3,enum=wire.FieldType" json:"FieldType,omitempty"` FieldType FieldType `protobuf:"varint,2,opt,name=FieldType,proto3,enum=wire.FieldType" json:"FieldType,omitempty"`
// Types that are assignable to Value: // Types that are assignable to Value:
//
// *Field_NumericVal // *Field_NumericVal
// *Field_StringVal // *Field_StringVal
Value isField_Value `protobuf_oneof:"value"` Value isField_Value `protobuf_oneof:"value"`

View File

@ -4,10 +4,15 @@ import (
"fmt" "fmt"
"math/rand" "math/rand"
"time" "time"
rand2 "github.com/influxdata/influxdb/v2/internal/rand"
) )
var seededRand *rand.Rand
func init() { func init() {
rand.Seed(time.Now().UnixNano()) lockedSource := rand2.NewLockedSourceFromSeed(time.Now().UnixNano())
seededRand = rand.New(lockedSource)
} }
var ( var (
@ -878,5 +883,5 @@ var (
// formatted as "adjective_surname". For example 'focused_turing'. If retry is non-zero, a random // formatted as "adjective_surname". For example 'focused_turing'. If retry is non-zero, a random
// integer between 0 and 10 will be added to the end of the name, e.g `focused_turing3` // integer between 0 and 10 will be added to the end of the name, e.g `focused_turing3`
func GetRandomName() string { func GetRandomName() string {
return fmt.Sprintf("%s-%s", left[rand.Intn(len(left))], right[rand.Intn(len(right))]) return fmt.Sprintf("%s-%s", left[seededRand.Intn(len(left))], right[seededRand.Intn(len(right))])
} }

View File

@ -1182,8 +1182,8 @@ type color struct {
} }
// TODO: // TODO:
// - verify templates are desired // - verify templates are desired
// - template colors so references can be shared // - template colors so references can be shared
type colors []*color type colors []*color
func (c colors) influxViewColors() []influxdb.ViewColor { func (c colors) influxViewColors() []influxdb.ViewColor {
@ -1218,8 +1218,9 @@ func (c colors) strings() []string {
} }
// TODO: looks like much of these are actually getting defaults in // TODO: looks like much of these are actually getting defaults in
// the UI. looking at system charts, seeing lots of failures for missing //
// color types or no colors at all. // the UI. looking at system charts, seeing lots of failures for missing
// color types or no colors at all.
func (c colors) hasTypes(types ...string) []validationErr { func (c colors) hasTypes(types ...string) []validationErr {
tMap := make(map[string]bool) tMap := make(map[string]bool)
for _, cc := range c { for _, cc := range c {

View File

@ -27,13 +27,13 @@ func SetGlobalProfiling(enabled bool) {
} }
// collectAllProfiles generates a tarball containing: // collectAllProfiles generates a tarball containing:
// - goroutine profile // - goroutine profile
// - blocking profile // - blocking profile
// - mutex profile // - mutex profile
// - heap profile // - heap profile
// - allocations profile // - allocations profile
// - (optionally) trace profile // - (optionally) trace profile
// - (optionally) CPU profile // - (optionally) CPU profile
// //
// All information is added to a tar archive and then compressed, before being // All information is added to a tar archive and then compressed, before being
// returned to the requester as an archive file. Where profiles support debug // returned to the requester as an archive file. Where profiles support debug

View File

@ -20,7 +20,7 @@ type EventRecorder struct {
// descriptive of the type of metric being recorded. Possible values may include write, query, // descriptive of the type of metric being recorded. Possible values may include write, query,
// task, dashboard, etc. // task, dashboard, etc.
// //
// The general structure of the metrics produced from the metric recorder should be // # The general structure of the metrics produced from the metric recorder should be
// //
// http_<subsystem>_request_count{org_id=<org_id>, status=<status>, endpoint=<endpoint>} ... // http_<subsystem>_request_count{org_id=<org_id>, status=<status>, endpoint=<endpoint>} ...
// http_<subsystem>_request_bytes{org_id=<org_id>, status=<status>, endpoint=<endpoint>} ... // http_<subsystem>_request_bytes{org_id=<org_id>, status=<status>, endpoint=<endpoint>} ...

View File

@ -72,9 +72,11 @@ func (e *NoContentEncoder) Encode(w io.Writer, results flux.ResultIterator) (int
// Otherwise one can decode the response body to get the error. For example: // Otherwise one can decode the response body to get the error. For example:
// ``` // ```
// _, err = csv.NewResultDecoder(csv.ResultDecoderConfig{}).Decode(bytes.NewReader(res)) // _, err = csv.NewResultDecoder(csv.ResultDecoderConfig{}).Decode(bytes.NewReader(res))
// if err != nil { //
// // we got some runtime error // if err != nil {
// } // // we got some runtime error
// }
//
// ``` // ```
type NoContentWithErrorDialect struct { type NoContentWithErrorDialect struct {
csv.ResultEncoderConfig csv.ResultEncoderConfig

View File

@ -656,10 +656,8 @@ func (SortedPivotRule) Rewrite(ctx context.Context, pn plan.Node) (plan.Node, bo
return pn, false, nil return pn, false, nil
} }
//
// Push Down of window aggregates. // Push Down of window aggregates.
// ReadRangePhys |> window |> { min, max, mean, count, sum } // ReadRangePhys |> window |> { min, max, mean, count, sum }
//
type PushDownWindowAggregateRule struct{} type PushDownWindowAggregateRule struct{}
func (PushDownWindowAggregateRule) Name() string { func (PushDownWindowAggregateRule) Name() string {
@ -1040,10 +1038,8 @@ func (p GroupWindowAggregateTransposeRule) Rewrite(ctx context.Context, pn plan.
return fnNode, true, nil return fnNode, true, nil
} }
//
// Push Down of group aggregates. // Push Down of group aggregates.
// ReadGroupPhys |> { count } // ReadGroupPhys |> { count }
//
type PushDownGroupAggregateRule struct{} type PushDownGroupAggregateRule struct{}
func (PushDownGroupAggregateRule) Name() string { func (PushDownGroupAggregateRule) Name() string {

View File

@ -6,13 +6,17 @@ import (
"sync" "sync"
"time" "time"
rand2 "github.com/influxdata/influxdb/v2/internal/rand"
platform2 "github.com/influxdata/influxdb/v2/kit/platform" platform2 "github.com/influxdata/influxdb/v2/kit/platform"
"github.com/influxdata/influxdb/v2/pkg/snowflake" "github.com/influxdata/influxdb/v2/pkg/snowflake"
) )
var seededRand *rand.Rand
func init() { func init() {
rand.Seed(time.Now().UnixNano()) lockedSource := rand2.NewLockedSourceFromSeed(time.Now().UnixNano())
SetGlobalMachineID(rand.Intn(1023)) seededRand = rand.New(lockedSource)
SetGlobalMachineID(seededRand.Intn(1023))
} }
var globalmachineID struct { var globalmachineID struct {
@ -79,7 +83,8 @@ func NewIDGenerator(opts ...IDGeneratorOp) *IDGenerator {
f(gen) f(gen)
} }
if gen.Generator == nil { if gen.Generator == nil {
gen.Generator = snowflake.New(rand.Intn(1023)) machineId := seededRand.Intn(1023)
gen.Generator = snowflake.New(machineId)
} }
return gen return gen
} }

View File

@ -200,6 +200,7 @@ type Node struct {
NodeType Node_Type `protobuf:"varint,1,opt,name=node_type,json=nodeType,proto3,enum=influxdata.platform.storage.Node_Type" json:"node_type,omitempty"` // [(gogoproto.customname) = "NodeType", (gogoproto.jsontag) = "nodeType"]; NodeType Node_Type `protobuf:"varint,1,opt,name=node_type,json=nodeType,proto3,enum=influxdata.platform.storage.Node_Type" json:"node_type,omitempty"` // [(gogoproto.customname) = "NodeType", (gogoproto.jsontag) = "nodeType"];
Children []*Node `protobuf:"bytes,2,rep,name=children,proto3" json:"children,omitempty"` Children []*Node `protobuf:"bytes,2,rep,name=children,proto3" json:"children,omitempty"`
// Types that are assignable to Value: // Types that are assignable to Value:
//
// *Node_StringValue // *Node_StringValue
// *Node_BooleanValue // *Node_BooleanValue
// *Node_IntegerValue // *Node_IntegerValue

View File

@ -1620,6 +1620,7 @@ type ReadResponse_Frame struct {
unknownFields protoimpl.UnknownFields unknownFields protoimpl.UnknownFields
// Types that are assignable to Data: // Types that are assignable to Data:
//
// *ReadResponse_Frame_Group // *ReadResponse_Frame_Group
// *ReadResponse_Frame_Series // *ReadResponse_Frame_Series
// *ReadResponse_Frame_FloatPoints // *ReadResponse_Frame_FloatPoints

View File

@ -151,13 +151,13 @@ group:
group: group:
tag key : _m,tag0,tag1 tag key : _m,tag0,tag1
partition key: val11 partition key: val11
series: _m=cpu,tag0=val01,tag1=val11
series: _m=cpu,tag0=val00,tag1=val11 series: _m=cpu,tag0=val00,tag1=val11
series: _m=cpu,tag0=val01,tag1=val11
group: group:
tag key : _m,tag0,tag1 tag key : _m,tag0,tag1
partition key: val12 partition key: val12
series: _m=cpu,tag0=val01,tag1=val12
series: _m=cpu,tag0=val00,tag1=val12 series: _m=cpu,tag0=val00,tag1=val12
series: _m=cpu,tag0=val01,tag1=val12
group: group:
tag key : _m,tag0 tag key : _m,tag0
partition key: <nil> partition key: <nil>
@ -347,8 +347,8 @@ group:
exp: `group: exp: `group:
tag key : _m,tag1,tag2 tag key : _m,tag1,tag2
partition key: <nil>,val20 partition key: <nil>,val20
series: _m=mem,tag1=val11,tag2=val20
series: _m=mem,tag1=val10,tag2=val20 series: _m=mem,tag1=val10,tag2=val20
series: _m=mem,tag1=val11,tag2=val20
group: group:
tag key : _m,tag1,tag2 tag key : _m,tag1,tag2
partition key: <nil>,val21 partition key: <nil>,val21
@ -356,10 +356,10 @@ group:
group: group:
tag key : _m,tag0,tag1 tag key : _m,tag0,tag1
partition key: val00,<nil> partition key: val00,<nil>
series: _m=aaa,tag0=val00
series: _m=cpu,tag0=val00,tag1=val10 series: _m=cpu,tag0=val00,tag1=val10
series: _m=cpu,tag0=val00,tag1=val11 series: _m=cpu,tag0=val00,tag1=val11
series: _m=cpu,tag0=val00,tag1=val12 series: _m=cpu,tag0=val00,tag1=val12
series: _m=aaa,tag0=val00
group: group:
tag key : _m,tag0 tag key : _m,tag0
partition key: val01,<nil> partition key: val01,<nil>

View File

@ -146,7 +146,7 @@ func BenchmarkKeyMerger_MergeKeys(b *testing.B) {
bytes.Split([]byte("tag04,tag05"), commaB), bytes.Split([]byte("tag04,tag05"), commaB),
} }
rand.Seed(20040409) seededRand := rand.New(rand.NewSource(20040409))
tests := []int{ tests := []int{
10, 10,
@ -161,7 +161,7 @@ func BenchmarkKeyMerger_MergeKeys(b *testing.B) {
var km KeyMerger var km KeyMerger
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
for j := 0; j < n; j++ { for j := 0; j < n; j++ {
km.MergeKeys(keys[rand.Int()%len(keys)]) km.MergeKeys(keys[seededRand.Int()%len(keys)])
} }
km.Clear() km.Clear()
} }
@ -180,7 +180,7 @@ func BenchmarkKeyMerger_MergeTagKeys(b *testing.B) {
models.ParseTags([]byte("foo,tag04=v0,tag05=v0")), models.ParseTags([]byte("foo,tag04=v0,tag05=v0")),
} }
rand.Seed(20040409) seededRand := rand.New(rand.NewSource(20040409))
tests := []int{ tests := []int{
10, 10,
@ -195,7 +195,7 @@ func BenchmarkKeyMerger_MergeTagKeys(b *testing.B) {
var km KeyMerger var km KeyMerger
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
for j := 0; j < n; j++ { for j := 0; j < n; j++ {
km.MergeTagKeys(tags[rand.Int()%len(tags)]) km.MergeTagKeys(tags[seededRand.Int()%len(tags)])
} }
km.Clear() km.Clear()
} }

View File

@ -15,7 +15,6 @@ func Modulo(dividend, modulus int64) int64 {
// normalized timestamp. If it lies to the left we know it represents // normalized timestamp. If it lies to the left we know it represents
// the start time. Otherwise it represents the stop time, in which case // the start time. Otherwise it represents the stop time, in which case
// we decrement by the window period to get the start time. // we decrement by the window period to get the start time.
//
func WindowStart(t, every, offset int64) int64 { func WindowStart(t, every, offset int64) int64 {
mod := Modulo(t, every) mod := Modulo(t, every)
off := Modulo(offset, every) off := Modulo(offset, every)
@ -33,7 +32,6 @@ func WindowStart(t, every, offset int64) int64 {
// normalized timestamp. If it lies to the right we know it represents // normalized timestamp. If it lies to the right we know it represents
// the stop time. Otherwise it represents the start time, in which case // the stop time. Otherwise it represents the start time, in which case
// we increment by the window period to get the stop time. // we increment by the window period to get the stop time.
//
func WindowStop(t, every, offset int64) int64 { func WindowStop(t, every, offset int64) int64 {
mod := Modulo(t, every) mod := Modulo(t, every)
off := Modulo(offset, every) off := Modulo(offset, every)

View File

@ -42,7 +42,7 @@ const (
// Distribution is handled by hashing the TaskID (to ensure uniform distribution) and then distributing over those channels // Distribution is handled by hashing the TaskID (to ensure uniform distribution) and then distributing over those channels
// evenly based on the hashed ID. This is to ensure that all tasks of the same ID go to the same worker. // evenly based on the hashed ID. This is to ensure that all tasks of the same ID go to the same worker.
// //
//The workers call ExecutorFunc handle any errors and update the LastScheduled time internally and also via the Checkpointer. // The workers call ExecutorFunc handle any errors and update the LastScheduled time internally and also via the Checkpointer.
// //
// The main loop: // The main loop:
// //

View File

@ -662,9 +662,9 @@ from(bucket: "b")
} }
} }
//Create a new task with a Cron and Offset option // Create a new task with a Cron and Offset option
//Update the task to remove the Offset option, and change Cron to Every // Update the task to remove the Offset option, and change Cron to Every
//Retrieve the task again to ensure the options are now Every, without Cron or Offset // Retrieve the task again to ensure the options are now Every, without Cron or Offset
func testTaskOptionsUpdateFull(t *testing.T, sys *System) { func testTaskOptionsUpdateFull(t *testing.T, sys *System) {
script := `option task = {name: "task-Options-Update", cron: "* * * * *", concurrency: 100, offset: 10s} script := `option task = {name: "task-Options-Update", cron: "* * * * *", concurrency: 100, offset: 10s}

View File

@ -11,13 +11,19 @@ allow us to make changes to this service without effecting any dependant service
When a new request for the tenant service comes in it should follow this pattern: When a new request for the tenant service comes in it should follow this pattern:
1 http_server_resource - this is where the request is parsed and rejected if the client didn't send 1 http_server_resource - this is where the request is parsed and rejected if the client didn't send
the right information the right information
2 middleware_resource_auth - We now confirm the user that generated the request has sufficient permission 2 middleware_resource_auth - We now confirm the user that generated the request has sufficient permission
to accomplish this task, in some cases we adjust the request if the user is without the correct permissions to accomplish this task, in some cases we adjust the request if the user is without the correct permissions
3 middleware_resource_metrics - Track RED metrics for this request 3 middleware_resource_metrics - Track RED metrics for this request
4 middleware_resource_logging - add logging around request duration and status. 4 middleware_resource_logging - add logging around request duration and status.
5 service_resource - When a request reaches the service we verify the content for compatibility with the existing dataset, 5 service_resource - When a request reaches the service we verify the content for compatibility with the existing dataset,
for instance if a resource has a "orgID" we will ensure the organization exists for instance if a resource has a "orgID" we will ensure the organization exists
6 storage_resource - Basic CRUD actions for the system. 6 storage_resource - Basic CRUD actions for the system.
This pattern of api -> middleware -> service -> basic crud helps us to break down the responsibilities into digestible This pattern of api -> middleware -> service -> basic crud helps us to break down the responsibilities into digestible

View File

@ -333,15 +333,15 @@ func (c *Client) MustCreateDBRPMapping(t *testing.T) platform.ID {
// MustCreateResource will create a generic resource via the API. // MustCreateResource will create a generic resource via the API.
// Used in tests where the content of the resource does not matter. // Used in tests where the content of the resource does not matter.
// //
// // Create one of each org resource // // Create one of each org resource
// for _, r := range influxdb.OrgResourceTypes { // for _, r := range influxdb.OrgResourceTypes {
// client.MustCreateResource(t, r) // client.MustCreateResource(t, r)
// } // }
// //
// //
// // Create a variable: // // Create a variable:
// id := client.MustCreateResource(t, influxdb.VariablesResourceType) // id := client.MustCreateResource(t, influxdb.VariablesResourceType)
// defer client.MustDeleteResource(t, influxdb.VariablesResourceType, id) // defer client.MustDeleteResource(t, influxdb.VariablesResourceType, id)
func (c *Client) MustCreateResource(t *testing.T, r influxdb.ResourceType) platform.ID { func (c *Client) MustCreateResource(t *testing.T, r influxdb.ResourceType) platform.ID {
t.Helper() t.Helper()

View File

@ -1,7 +1,7 @@
/* /*
Package tests contains a set of integration tests, which run in-memory versions Package tests contains a set of integration tests, which run in-memory versions
of various 2.0 services. They're not intended to be full end-to-end tests, of various 2.0 services. They're not intended to be full end-to-end tests,
but are a suitable place to write tests that need to flex the logic of but are a suitable place to write tests that need to flex the logic of
multiple 2.0 components. multiple 2.0 components.
*/ */
package tests package tests

View File

@ -169,9 +169,9 @@ func TestMeasurementFieldSliceSort(t *testing.T) {
// randomize order using fixed seed to // randomize order using fixed seed to
// ensure tests are deterministic on a given platform // ensure tests are deterministic on a given platform
rand.Seed(100) seededRand := rand.New(rand.NewSource(100))
for i := 0; i < 5; i++ { for i := 0; i < 5; i++ {
rand.Shuffle(len(got), func(i, j int) { seededRand.Shuffle(len(got), func(i, j int) {
got[i], got[j] = got[j], got[i] got[i], got[j] = got[j], got[i]
}) })

View File

@ -111,12 +111,12 @@ func BenchmarkDecodeFloatArrayBlock(b *testing.B) {
} }
func BenchmarkDecodeIntegerArrayBlock(b *testing.B) { func BenchmarkDecodeIntegerArrayBlock(b *testing.B) {
rle := func(i int) int64 { return int64(i) } rle := func(_ *rand.Rand, i int) int64 { return int64(i) }
s8b := func(i int) int64 { return int64(i + int(rand.Int31n(10))) } s8b := func(r *rand.Rand, i int) int64 { return int64(i + int(r.Int31n(10))) }
cases := []struct { cases := []struct {
enc string enc string
gen func(i int) int64 gen func(r *rand.Rand, i int) int64
n int n int
}{ }{
{enc: "rle", gen: rle, n: 5}, {enc: "rle", gen: rle, n: 5},
@ -130,13 +130,13 @@ func BenchmarkDecodeIntegerArrayBlock(b *testing.B) {
} }
for _, bm := range cases { for _, bm := range cases {
b.Run(fmt.Sprintf("%s_%d", bm.enc, bm.n), func(b *testing.B) { b.Run(fmt.Sprintf("%s_%d", bm.enc, bm.n), func(b *testing.B) {
rand.Seed(int64(bm.n * 1e3)) seededRand := rand.New(rand.NewSource(int64(bm.n * 1e3)))
valueCount := bm.n valueCount := bm.n
times := getTimes(valueCount, 60, time.Second) times := getTimes(valueCount, 60, time.Second)
values := make([]tsm1.Value, len(times)) values := make([]tsm1.Value, len(times))
for i, t := range times { for i, t := range times {
values[i] = tsm1.NewValue(t, bm.gen(i)) values[i] = tsm1.NewValue(t, bm.gen(seededRand, i))
} }
bytes, err := tsm1.Values(values).Encode(nil) bytes, err := tsm1.Values(values).Encode(nil)

View File

@ -180,12 +180,12 @@ func Test_BooleanArrayDecodeAll_Multi_Compressed(t *testing.T) {
for _, tc := range cases { for _, tc := range cases {
t.Run(fmt.Sprintf("%d_%0.2f", tc.n, tc.p), func(t *testing.T) { t.Run(fmt.Sprintf("%d_%0.2f", tc.n, tc.p), func(t *testing.T) {
rand.Seed(int64(tc.n * tc.n)) seededRand := rand.New(rand.NewSource(int64(tc.n * tc.n)))
enc := tsm1.NewBooleanEncoder(tc.n) enc := tsm1.NewBooleanEncoder(tc.n)
values := make([]bool, tc.n) values := make([]bool, tc.n)
for i := range values { for i := range values {
values[i] = rand.Float64() < tc.p values[i] = seededRand.Float64() < tc.p
enc.Write(values[i]) enc.Write(values[i])
} }

View File

@ -1092,11 +1092,11 @@ func BenchmarkIntegerArrayDecodeAllUncompressed(b *testing.B) {
} }
for _, size := range benchmarks { for _, size := range benchmarks {
rand.Seed(int64(size * 1e3)) seededRand := rand.New(rand.NewSource(int64(size * 1e3)))
enc := NewIntegerEncoder(size) enc := NewIntegerEncoder(size)
for i := 0; i < size; i++ { for i := 0; i < size; i++ {
enc.Write(values[rand.Int()%len(values)]) enc.Write(values[seededRand.Int()%len(values)])
} }
bytes, _ := enc.Bytes() bytes, _ := enc.Bytes()
@ -1120,12 +1120,12 @@ func BenchmarkIntegerArrayDecodeAllPackedSimple(b *testing.B) {
1000, 1000,
} }
for _, size := range benchmarks { for _, size := range benchmarks {
rand.Seed(int64(size * 1e3)) seededRand := rand.New(rand.NewSource(int64(size * 1e3)))
enc := NewIntegerEncoder(size) enc := NewIntegerEncoder(size)
for i := 0; i < size; i++ { for i := 0; i < size; i++ {
// Small amount of randomness prevents RLE from being used // Small amount of randomness prevents RLE from being used
enc.Write(int64(i) + int64(rand.Intn(10))) enc.Write(int64(i) + int64(seededRand.Intn(10)))
} }
bytes, _ := enc.Bytes() bytes, _ := enc.Bytes()
@ -1153,8 +1153,6 @@ func BenchmarkIntegerArrayDecodeAllRLE(b *testing.B) {
{1000, 0}, {1000, 0},
} }
for _, bm := range benchmarks { for _, bm := range benchmarks {
rand.Seed(int64(bm.n * 1e3))
enc := NewIntegerEncoder(bm.n) enc := NewIntegerEncoder(bm.n)
acc := int64(0) acc := int64(0)
for i := 0; i < bm.n; i++ { for i := 0; i < bm.n; i++ {

View File

@ -3,7 +3,6 @@ package tsm1
import ( import (
"bytes" "bytes"
"fmt" "fmt"
"math/rand"
"reflect" "reflect"
"strings" "strings"
"testing" "testing"
@ -373,8 +372,6 @@ func BenchmarkStringArrayDecodeAll(b *testing.B) {
{1000, 10}, {1000, 10},
} }
for _, bm := range benchmarks { for _, bm := range benchmarks {
rand.Seed(int64(bm.n * 1e3))
s := NewStringEncoder(bm.n) s := NewStringEncoder(bm.n)
for c := 0; c < bm.n; c++ { for c := 0; c < bm.n; c++ {
s.Write(testutil.MakeSentence(bm.w)) s.Write(testutil.MakeSentence(bm.w))

View File

@ -1090,11 +1090,11 @@ func BenchmarkTimeArrayDecodeAllUncompressed(b *testing.B) {
} }
for _, size := range benchmarks { for _, size := range benchmarks {
rand.Seed(int64(size * 1e3)) seededRand := rand.New(rand.NewSource(int64(size * 1e3)))
enc := NewTimeEncoder(size) enc := NewTimeEncoder(size)
for i := 0; i < size; i++ { for i := 0; i < size; i++ {
enc.Write(values[rand.Int()%len(values)]) enc.Write(values[seededRand.Int()%len(values)])
} }
bytes, _ := enc.Bytes() bytes, _ := enc.Bytes()
@ -1118,12 +1118,12 @@ func BenchmarkTimeArrayDecodeAllPackedSimple(b *testing.B) {
1000, 1000,
} }
for _, size := range benchmarks { for _, size := range benchmarks {
rand.Seed(int64(size * 1e3)) seededRand := rand.New(rand.NewSource(int64(size * 1e3)))
enc := NewTimeEncoder(size) enc := NewTimeEncoder(size)
for i := 0; i < size; i++ { for i := 0; i < size; i++ {
// Small amount of randomness prevents RLE from being used // Small amount of randomness prevents RLE from being used
enc.Write(int64(i*1000) + int64(rand.Intn(10))) enc.Write(int64(i*1000) + int64(seededRand.Intn(10)))
} }
bytes, _ := enc.Bytes() bytes, _ := enc.Bytes()

View File

@ -1554,12 +1554,12 @@ func BenchmarkDecodeFloatBlock(b *testing.B) {
} }
func BenchmarkDecodeIntegerBlock(b *testing.B) { func BenchmarkDecodeIntegerBlock(b *testing.B) {
rle := func(i int) int64 { return int64(i) } rle := func(_ *rand.Rand, i int) int64 { return int64(i) }
s8b := func(i int) int64 { return int64(i + int(rand.Int31n(10))) } s8b := func(r *rand.Rand, i int) int64 { return int64(i + int(r.Int31n(10))) }
cases := []struct { cases := []struct {
enc string enc string
gen func(i int) int64 gen func(r *rand.Rand, i int) int64
n int n int
}{ }{
{enc: "rle", gen: rle, n: 5}, {enc: "rle", gen: rle, n: 5},
@ -1573,13 +1573,13 @@ func BenchmarkDecodeIntegerBlock(b *testing.B) {
} }
for _, bm := range cases { for _, bm := range cases {
b.Run(fmt.Sprintf("%s_%d", bm.enc, bm.n), func(b *testing.B) { b.Run(fmt.Sprintf("%s_%d", bm.enc, bm.n), func(b *testing.B) {
rand.Seed(int64(bm.n * 1e3)) seededRand := rand.New(rand.NewSource(int64(bm.n * 1e3)))
valueCount := bm.n valueCount := bm.n
times := getTimes(valueCount, 60, time.Second) times := getTimes(valueCount, 60, time.Second)
values := make([]tsm1.Value, len(times)) values := make([]tsm1.Value, len(times))
for i, t := range times { for i, t := range times {
values[i] = tsm1.NewValue(t, bm.gen(i)) values[i] = tsm1.NewValue(t, bm.gen(seededRand, i))
} }
bytes, err := tsm1.Values(values).Encode(nil) bytes, err := tsm1.Values(values).Encode(nil)

View File

@ -626,11 +626,11 @@ func BenchmarkIntegerBatch_DecodeAllUncompressed(b *testing.B) {
} }
for _, bm := range benchmarks { for _, bm := range benchmarks {
rand.Seed(int64(bm.n * 1e3)) seededRand := rand.New(rand.NewSource(int64(bm.n * 1e3)))
enc := NewIntegerEncoder(bm.n) enc := NewIntegerEncoder(bm.n)
for i := 0; i < bm.n; i++ { for i := 0; i < bm.n; i++ {
enc.Write(values[rand.Int()%len(values)]) enc.Write(values[seededRand.Int()%len(values)])
} }
bytes, _ := enc.Bytes() bytes, _ := enc.Bytes()
@ -662,12 +662,12 @@ func BenchmarkIntegerBatch_DecodeAllPackedSimple(b *testing.B) {
{1000}, {1000},
} }
for _, bm := range benchmarks { for _, bm := range benchmarks {
rand.Seed(int64(bm.n * 1e3)) seededRand := rand.New(rand.NewSource(int64(bm.n * 1e3)))
enc := NewIntegerEncoder(bm.n) enc := NewIntegerEncoder(bm.n)
for i := 0; i < bm.n; i++ { for i := 0; i < bm.n; i++ {
// Small amount of randomness prevents RLE from being used // Small amount of randomness prevents RLE from being used
enc.Write(int64(i) + int64(rand.Intn(10))) enc.Write(int64(i) + int64(seededRand.Intn(10)))
} }
bytes, _ := enc.Bytes() bytes, _ := enc.Bytes()

View File

@ -29,7 +29,6 @@ const partitions = 16
// //
// To determine the partition that a series key should be added to, the series // To determine the partition that a series key should be added to, the series
// key is hashed and the first 8 bits are used as an index to the ring. // key is hashed and the first 8 bits are used as an index to the ring.
//
type ring struct { type ring struct {
// The unique set of partitions in the ring. // The unique set of partitions in the ring.
// len(partitions) <= len(continuum) // len(partitions) <= len(continuum)
@ -40,8 +39,7 @@ type ring struct {
// power of 2, and for performance reasons should be larger than the number of // power of 2, and for performance reasons should be larger than the number of
// cores on the host. The supported set of values for n is: // cores on the host. The supported set of values for n is:
// //
// {1, 2, 4, 8, 16}. // {1, 2, 4, 8, 16}.
//
func newring(n int) (*ring, error) { func newring(n int) (*ring, error) {
if n <= 0 || n > partitions { if n <= 0 || n > partitions {
return nil, fmt.Errorf("invalid number of partitions: %d", n) return nil, fmt.Errorf("invalid number of partitions: %d", n)

View File

@ -2,7 +2,6 @@ package tsm1
import ( import (
"fmt" "fmt"
"math/rand"
"reflect" "reflect"
"testing" "testing"
"testing/quick" "testing/quick"
@ -190,8 +189,6 @@ func BenchmarkStringDecoder_DecodeAll(b *testing.B) {
{1000, 10}, {1000, 10},
} }
for _, bm := range benchmarks { for _, bm := range benchmarks {
rand.Seed(int64(bm.n * 1e3))
s := NewStringEncoder(bm.n) s := NewStringEncoder(bm.n)
for c := 0; c < bm.n; c++ { for c := 0; c < bm.n; c++ {
s.Write(testutil.MakeSentence(bm.w)) s.Write(testutil.MakeSentence(bm.w))

View File

@ -625,11 +625,11 @@ func BenchmarkTimeBatch_DecodeAllUncompressed(b *testing.B) {
} }
for _, size := range benchmarks { for _, size := range benchmarks {
rand.Seed(int64(size * 1e3)) seededRand := rand.New(rand.NewSource(int64(size * 1e3)))
enc := NewTimeEncoder(size) enc := NewTimeEncoder(size)
for i := 0; i < size; i++ { for i := 0; i < size; i++ {
enc.Write(values[rand.Int()%len(values)]) enc.Write(values[seededRand.Int()%len(values)])
} }
bytes, _ := enc.Bytes() bytes, _ := enc.Bytes()
@ -661,12 +661,12 @@ func BenchmarkTimeBatch_DecodeAllPackedSimple(b *testing.B) {
{1000}, {1000},
} }
for _, bm := range benchmarks { for _, bm := range benchmarks {
rand.Seed(int64(bm.n * 1e3)) seededRand := rand.New(rand.NewSource(int64(bm.n * 1e3)))
enc := NewTimeEncoder(bm.n) enc := NewTimeEncoder(bm.n)
for i := 0; i < bm.n; i++ { for i := 0; i < bm.n; i++ {
// Small amount of randomness prevents RLE from being used // Small amount of randomness prevents RLE from being used
enc.Write(int64(i*1000) + int64(rand.Intn(10))) enc.Write(int64(i*1000) + int64(seededRand.Intn(10)))
} }
bytes, _ := enc.Bytes() bytes, _ := enc.Bytes()

View File

@ -11,8 +11,8 @@ import (
const MaxFieldValueLength = 1048576 const MaxFieldValueLength = 1048576
// ValidateFields will return a PartialWriteError if: // ValidateFields will return a PartialWriteError if:
// - the point has inconsistent fields, or // - the point has inconsistent fields, or
// - the point has fields that are too long // - the point has fields that are too long
func ValidateFields(mf *MeasurementFields, point models.Point, skipSizeValidation bool) error { func ValidateFields(mf *MeasurementFields, point models.Point, skipSizeValidation bool) error {
pointSize := point.StringSize() pointSize := point.StringSize()
iter := point.FieldIterator() iter := point.FieldIterator()

View File

@ -1,9 +1,8 @@
/* /*
Package tsi1 provides a memory-mapped index implementation that supports Package tsi1 provides a memory-mapped index implementation that supports
high cardinality series. high cardinality series.
Overview # Overview
The top-level object in tsi1 is the Index. It is the primary access point from The top-level object in tsi1 is the Index. It is the primary access point from
the rest of the system. The Index is composed of LogFile and IndexFile objects. the rest of the system. The Index is composed of LogFile and IndexFile objects.
@ -17,8 +16,7 @@ Index files also contain series information, however, they are highly indexed
so that reads can be performed quickly. Index files are built through a process so that reads can be performed quickly. Index files are built through a process
called compaction where a log file or multiple index files are merged together. called compaction where a log file or multiple index files are merged together.
# Operations
Operations
The index can perform many tasks related to series, measurement, & tag data. The index can perform many tasks related to series, measurement, & tag data.
All data is inserted by adding a series to the index. When adding a series, All data is inserted by adding a series to the index. When adding a series,
@ -34,8 +32,7 @@ as by measurement name, by tag value, or by using regular expressions. The
index provides an API to iterate over subsets of series and perform set index provides an API to iterate over subsets of series and perform set
operations such as unions and intersections. operations such as unions and intersections.
# Log File Layout
Log File Layout
The write-ahead file that series initially are inserted into simply appends The write-ahead file that series initially are inserted into simply appends
all new operations sequentially. It is simply composed of a series of log all new operations sequentially. It is simply composed of a series of log
@ -61,15 +58,13 @@ name, the tag set, and a checksum.
When the log file is replayed, if the checksum is incorrect or the entry is When the log file is replayed, if the checksum is incorrect or the entry is
incomplete (because of a partially failed write) then the log is truncated. incomplete (because of a partially failed write) then the log is truncated.
# Index File Layout
Index File Layout
The index file is composed of 3 main block types: one series block, one or more The index file is composed of 3 main block types: one series block, one or more
tag blocks, and one measurement block. At the end of the index file is a tag blocks, and one measurement block. At the end of the index file is a
trailer that records metadata such as the offsets to these blocks. trailer that records metadata such as the offsets to these blocks.
# Series Block Layout
Series Block Layout
The series block stores raw series keys in sorted order. It also provides hash The series block stores raw series keys in sorted order. It also provides hash
indexes so that series can be looked up quickly. Hash indexes are inserted indexes so that series can be looked up quickly. Hash indexes are inserted
@ -111,8 +106,7 @@ a trailer which contains metadata about the block.
# Tag Block Layout
Tag Block Layout
After the series block is one or more tag blocks. One of these blocks exists After the series block is one or more tag blocks. One of these blocks exists
for every measurement in the index file. The block is structured as a sorted for every measurement in the index file. The block is structured as a sorted
@ -159,8 +153,7 @@ that value. Series iterators can be built around a single tag key value or
multiple iterators can be merged with set operators such as union or multiple iterators can be merged with set operators such as union or
intersection. intersection.
# Measurement block
Measurement block
The measurement block stores a sorted list of measurements, their associated The measurement block stores a sorted list of measurements, their associated
series offsets, and the offset to their tag block. This allows all series for series offsets, and the offset to their tag block. This allows all series for
@ -188,8 +181,7 @@ measurements.
# Manifest file
Manifest file
The index is simply an ordered set of log and index files. These files can be The index is simply an ordered set of log and index files. These files can be
merged together or rewritten but their order must always be the same. This is merged together or rewritten but their order must always be the same. This is
@ -200,8 +192,7 @@ Whenever the set of active files is changed, a manifest file is written to
track the set. The manifest specifies the ordering of files and, on startup, track the set. The manifest specifies the ordering of files and, on startup,
all files not in the manifest are removed from the index directory. all files not in the manifest are removed from the index directory.
# Compacting index files
Compacting index files
Compaction is the process of taking files and merging them together into a Compaction is the process of taking files and merging them together into a
single file. There are two stages of compaction within TSI. single file. There are two stages of compaction within TSI.
@ -216,8 +207,7 @@ they are all merged together into a single index file and the old files are
discarded. Because all blocks are written in sorted order, the new index file discarded. Because all blocks are written in sorted order, the new index file
can be streamed and minimize memory use. can be streamed and minimize memory use.
# Concurrency
Concurrency
Index files are immutable so they do not require fine grained locks, however, Index files are immutable so they do not require fine grained locks, however,
compactions require that we track which files are in use so they are not compactions require that we track which files are in use so they are not
@ -232,7 +222,5 @@ returns to zero.
Besides the reference counting, there are no other locking mechanisms when Besides the reference counting, there are no other locking mechanisms when
reading or writing index files. Log files, however, do require a lock whenever reading or writing index files. Log files, however, do require a lock whenever
they are accessed. This is another reason to minimize log file size. they are accessed. This is another reason to minimize log file size.
*/ */
package tsi1 package tsi1

View File

@ -55,7 +55,6 @@ func init() {
// //
// NOTE: Currently, this must not be change once a database is created. Further, // NOTE: Currently, this must not be change once a database is created. Further,
// it must also be a power of 2. // it must also be a power of 2.
//
var DefaultPartitionN uint64 = 8 var DefaultPartitionN uint64 = 8
// An IndexOption is a functional option for changing the configuration of // An IndexOption is a functional option for changing the configuration of

View File

@ -123,9 +123,9 @@ func TestLogFile_SeriesStoredInOrder(t *testing.T) {
// Generate and add test data // Generate and add test data
tvm := make(map[string]struct{}) tvm := make(map[string]struct{})
rand.Seed(time.Now().Unix()) seededRand := rand.New(rand.NewSource(time.Now().Unix()))
for i := 0; i < 100; i++ { for i := 0; i < 100; i++ {
tv := fmt.Sprintf("server-%d", rand.Intn(50)) // Encourage adding duplicate series. tv := fmt.Sprintf("server-%d", seededRand.Intn(50)) // Encourage adding duplicate series.
tvm[tv] = struct{}{} tvm[tv] = struct{}{}
if _, err := f.AddSeriesList(seriesSet, [][]byte{ if _, err := f.AddSeriesList(seriesSet, [][]byte{

View File

@ -162,8 +162,6 @@ var set *SeriesIDSet
// BenchmarkSeriesIDSet_Add/10-4 5000000 348 ns/op 0 B/op 0 allocs/op // BenchmarkSeriesIDSet_Add/10-4 5000000 348 ns/op 0 B/op 0 allocs/op
// BenchmarkSeriesIDSet_Add/100-4 5000000 373 ns/op 0 B/op 0 allocs/op // BenchmarkSeriesIDSet_Add/100-4 5000000 373 ns/op 0 B/op 0 allocs/op
// BenchmarkSeriesIDSet_Add/1000-4 5000000 342 ns/op 0 B/op 0 allocs/op // BenchmarkSeriesIDSet_Add/1000-4 5000000 342 ns/op 0 B/op 0 allocs/op
//
//
func BenchmarkSeriesIDSet_AddMore(b *testing.B) { func BenchmarkSeriesIDSet_AddMore(b *testing.B) {
cardinalities := []uint64{1, 2, 10, 100, 1000, 10000, 100000, 1000000, 10000000} cardinalities := []uint64{1, 2, 10, 100, 1000, 10000, 100000, 1000000, 10000000}
@ -202,7 +200,6 @@ func BenchmarkSeriesIDSet_AddMore(b *testing.B) {
// BenchmarkSeriesIDSet_Add/cardinality_1000000_check_add/random_global_lock-8 2000000 914 ns/op 0 B/op 0 allocs/op // BenchmarkSeriesIDSet_Add/cardinality_1000000_check_add/random_global_lock-8 2000000 914 ns/op 0 B/op 0 allocs/op
// BenchmarkSeriesIDSet_Add/cardinality_1000000_check_add/same_multi_lock-8 30000000 39.7 ns/op 0 B/op 0 allocs/op // BenchmarkSeriesIDSet_Add/cardinality_1000000_check_add/same_multi_lock-8 30000000 39.7 ns/op 0 B/op 0 allocs/op
// BenchmarkSeriesIDSet_Add/cardinality_1000000_check_add/random_multi_lock-8 1000000 1002 ns/op 0 B/op 0 allocs/op // BenchmarkSeriesIDSet_Add/cardinality_1000000_check_add/random_multi_lock-8 1000000 1002 ns/op 0 B/op 0 allocs/op
//
func BenchmarkSeriesIDSet_Add(b *testing.B) { func BenchmarkSeriesIDSet_Add(b *testing.B) {
// Setup... // Setup...
set = NewSeriesIDSet() set = NewSeriesIDSet()
@ -523,7 +520,6 @@ func BenchmarkSeriesIDSet_AddMany(b *testing.B) {
// BenchmarkSeriesIDSet_Remove/cardinality_1000000_remove_same-4 20000000 99.1 ns/op 0 B/op 0 allocs/op // BenchmarkSeriesIDSet_Remove/cardinality_1000000_remove_same-4 20000000 99.1 ns/op 0 B/op 0 allocs/op
// BenchmarkSeriesIDSet_Remove/cardinality_1000000_check_remove_global_lock-4 20000000 57.7 ns/op 0 B/op 0 allocs/op // BenchmarkSeriesIDSet_Remove/cardinality_1000000_check_remove_global_lock-4 20000000 57.7 ns/op 0 B/op 0 allocs/op
// BenchmarkSeriesIDSet_Remove/cardinality_1000000_check_remove_multi_lock-4 20000000 80.1 ns/op 0 B/op 0 allocs/op // BenchmarkSeriesIDSet_Remove/cardinality_1000000_check_remove_multi_lock-4 20000000 80.1 ns/op 0 B/op 0 allocs/op
//
func BenchmarkSeriesIDSet_Remove(b *testing.B) { func BenchmarkSeriesIDSet_Remove(b *testing.B) {
// Setup... // Setup...
set = NewSeriesIDSet() set = NewSeriesIDSet()

View File

@ -190,7 +190,7 @@ func (s *Shard) SetEnabled(enabled bool) {
s.mu.Unlock() s.mu.Unlock()
} }
//! setEnabledNoLock performs actual work of SetEnabled. Must hold s.mu before calling. // ! setEnabledNoLock performs actual work of SetEnabled. Must hold s.mu before calling.
func (s *Shard) setEnabledNoLock(enabled bool) { func (s *Shard) setEnabledNoLock(enabled bool) {
// Prevent writes and queries // Prevent writes and queries
s.enabled = enabled s.enabled = enabled

View File

@ -26,12 +26,12 @@ func (s *tenantService) FindUser(ctx context.Context, filter influxdb.UserFilter
return s.FindUserFn(ctx, filter) return s.FindUserFn(ctx, filter)
} }
//FindOrganizationByID calls FindOrganizationByIDF. // FindOrganizationByID calls FindOrganizationByIDF.
func (s *tenantService) FindOrganizationByID(ctx context.Context, id platform.ID) (*influxdb.Organization, error) { func (s *tenantService) FindOrganizationByID(ctx context.Context, id platform.ID) (*influxdb.Organization, error) {
return s.FindOrganizationByIDF(ctx, id) return s.FindOrganizationByIDF(ctx, id)
} }
//FindOrganization calls FindOrganizationF. // FindOrganization calls FindOrganizationF.
func (s *tenantService) FindOrganization(ctx context.Context, filter influxdb.OrganizationFilter) (*influxdb.Organization, error) { func (s *tenantService) FindOrganization(ctx context.Context, filter influxdb.OrganizationFilter) (*influxdb.Organization, error) {
return s.FindOrganizationF(ctx, filter) return s.FindOrganizationF(ctx, filter)
} }

View File

@ -293,8 +293,8 @@ func (l sgList) Covers(t time.Time) bool {
// to start time. Therefore, if there are multiple shard groups that match // to start time. Therefore, if there are multiple shard groups that match
// this point's time they will be preferred in this order: // this point's time they will be preferred in this order:
// //
// - a shard group with the earliest end time; // - a shard group with the earliest end time;
// - (assuming identical end times) the shard group with the earliest start time. // - (assuming identical end times) the shard group with the earliest start time.
func (l sgList) ShardGroupAt(t time.Time) *meta.ShardGroupInfo { func (l sgList) ShardGroupAt(t time.Time) *meta.ShardGroupInfo {
if l.items.Len() == 0 { if l.items.Len() == 0 {
return nil return nil

View File

@ -197,7 +197,6 @@ func (c *Client) CreateDatabase(name string) (*DatabaseInfo, error) {
// This call is only idempotent when the caller provides the exact same // This call is only idempotent when the caller provides the exact same
// retention policy, and that retention policy is already the default for the // retention policy, and that retention policy is already the default for the
// database. // database.
//
func (c *Client) CreateDatabaseWithRetentionPolicy(name string, spec *RetentionPolicySpec) (*DatabaseInfo, error) { func (c *Client) CreateDatabaseWithRetentionPolicy(name string, spec *RetentionPolicySpec) (*DatabaseInfo, error) {
if spec == nil { if spec == nil {
return nil, errors.New("CreateDatabaseWithRetentionPolicy called with nil spec") return nil, errors.New("CreateDatabaseWithRetentionPolicy called with nil spec")

View File

@ -13,8 +13,10 @@ import (
"github.com/influxdata/influxql" "github.com/influxdata/influxql"
) )
var seededRand *rand.Rand
func init() { func init() {
rand.Seed(time.Now().UnixNano()) seededRand = rand.New(rand.NewSource(time.Now().UnixNano()))
} }
func Test_Data_DropDatabase(t *testing.T) { func Test_Data_DropDatabase(t *testing.T) {
@ -469,7 +471,7 @@ func randString(n int) string {
var letters = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") var letters = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")
b := make([]rune, n) b := make([]rune, n)
for i := range b { for i := range b {
b[i] = letters[rand.Intn(len(letters))] b[i] = letters[seededRand.Intn(len(letters))]
} }
return string(b) return string(b)
} }