From 3e4c4028e6f758d446a3b161160cbe80b03e29ed Mon Sep 17 00:00:00 2001 From: Roger Peppe Date: Thu, 10 Sep 2020 13:37:16 +0100 Subject: [PATCH 01/34] fix: http: add required name to LabelCreateRequest The label creation operation always requires a name, so make the OpenAPI specification reflect that. --- http/swagger.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/http/swagger.yml b/http/swagger.yml index 2c39a59879..28bb533619 100644 --- a/http/swagger.yml +++ b/http/swagger.yml @@ -7022,15 +7022,15 @@ components: maxLength: 1 minLength: 1 annotations: - description: Https://www.w3.org/TR/2015/REC-tabular-data-model-20151217/#columns + description: https://www.w3.org/TR/2015/REC-tabular-data-model-20151217/#columns type: array + uniqueItems: true items: type: string enum: - "group" - "datatype" - "default" - uniqueItems: true commentPrefix: description: Character prefixed to comment strings type: string @@ -7126,7 +7126,7 @@ components: description: ID of org that authorization is scoped to. permissions: type: array - minLength: 1 + minItems: 1 description: List of permissions for an auth. An auth must have at least one Permission. items: $ref: "#/components/schemas/Permission" @@ -10852,7 +10852,7 @@ components: example: { "color": "ffb3b3", "description": "this is a description" } LabelCreateRequest: type: object - required: [orgID] + required: [orgID, name] properties: orgID: type: string From 48f72e0dfd202e7c80e1331e2c22b611fd7dd3fc Mon Sep 17 00:00:00 2001 From: Yoofi Quansah Date: Thu, 10 Sep 2020 12:35:25 -0700 Subject: [PATCH 02/34] chore: remove hardcoded constants for ids --- authorizer/authorize.go | 5 ----- bucket.go | 5 ----- kv/bucket.go | 4 ---- mock/bucket_service.go | 2 +- task/backend/analytical_storage_test.go | 2 +- tenant/storage_bucket.go | 2 -- 6 files changed, 2 insertions(+), 18 deletions(-) diff --git a/authorizer/authorize.go b/authorizer/authorize.go index 044db92bc9..ab96810109 100644 --- a/authorizer/authorize.go +++ b/authorizer/authorize.go @@ -90,11 +90,6 @@ func authorize(ctx context.Context, a influxdb.Action, rt influxdb.ResourceType, } func authorizeReadSystemBucket(ctx context.Context, bid, oid influxdb.ID) (influxdb.Authorizer, influxdb.Permission, error) { - // HACK: remove once system buckets are migrated away from hard coded values - if !oid.Valid() && (bid == influxdb.TasksSystemBucketID || bid == influxdb.MonitoringSystemBucketID) { - a, _ := icontext.GetAuthorizer(ctx) - return a, influxdb.Permission{}, nil - } return AuthorizeReadOrg(ctx, oid) } diff --git a/bucket.go b/bucket.go index 2eab49672c..0d092e214a 100644 --- a/bucket.go +++ b/bucket.go @@ -8,11 +8,6 @@ import ( ) const ( - // TasksSystemBucketID is the fixed ID for our tasks system bucket - TasksSystemBucketID = ID(10) - // MonitoringSystemBucketID is the fixed ID for our monitoring system bucket - MonitoringSystemBucketID = ID(11) - // BucketTypeUser is a user created bucket BucketTypeUser = BucketType(0) // BucketTypeSystem is an internally created bucket that cannot be deleted/renamed. diff --git a/kv/bucket.go b/kv/bucket.go index e652672bf1..477d9c9eb1 100644 --- a/kv/bucket.go +++ b/kv/bucket.go @@ -182,7 +182,6 @@ func (s *Service) findBucketByName(ctx context.Context, tx Tx, orgID influxdb.ID switch n { case influxdb.TasksSystemBucketName: return &influxdb.Bucket{ - ID: influxdb.TasksSystemBucketID, Type: influxdb.BucketTypeSystem, Name: influxdb.TasksSystemBucketName, RetentionPeriod: influxdb.TasksSystemBucketRetention, @@ -191,7 +190,6 @@ func (s *Service) findBucketByName(ctx context.Context, tx Tx, orgID influxdb.ID }, nil case influxdb.MonitoringSystemBucketName: return &influxdb.Bucket{ - ID: influxdb.MonitoringSystemBucketID, Type: influxdb.BucketTypeSystem, Name: influxdb.MonitoringSystemBucketName, RetentionPeriod: influxdb.MonitoringSystemBucketRetention, @@ -361,7 +359,6 @@ func (s *Service) FindBuckets(ctx context.Context, filter influxdb.BucketFilter, if needsSystemBuckets { tb := &influxdb.Bucket{ - ID: influxdb.TasksSystemBucketID, Type: influxdb.BucketTypeSystem, Name: influxdb.TasksSystemBucketName, RetentionPeriod: influxdb.TasksSystemBucketRetention, @@ -371,7 +368,6 @@ func (s *Service) FindBuckets(ctx context.Context, filter influxdb.BucketFilter, bs = append(bs, tb) mb := &influxdb.Bucket{ - ID: influxdb.MonitoringSystemBucketID, Type: influxdb.BucketTypeSystem, Name: influxdb.MonitoringSystemBucketName, RetentionPeriod: influxdb.MonitoringSystemBucketRetention, diff --git a/mock/bucket_service.go b/mock/bucket_service.go index 3de766e70d..f2b080237c 100644 --- a/mock/bucket_service.go +++ b/mock/bucket_service.go @@ -40,7 +40,7 @@ func NewBucketService() *BucketService { FindBucketByIDFn: func(context.Context, platform.ID) (*platform.Bucket, error) { return nil, nil }, FindBucketByNameFn: func(context.Context, platform.ID, string) (*platform.Bucket, error) { return &platform.Bucket{ - ID: platform.TasksSystemBucketID, + ID: platform.ID(10), Type: platform.BucketTypeSystem, Name: "_tasks", RetentionPeriod: time.Hour * 24 * 3, diff --git a/task/backend/analytical_storage_test.go b/task/backend/analytical_storage_test.go index d8bb789da2..89e76ab2b4 100644 --- a/task/backend/analytical_storage_test.go +++ b/task/backend/analytical_storage_test.go @@ -102,7 +102,7 @@ func TestDeduplicateRuns(t *testing.T) { metaClient := meta.NewClient(meta.NewConfig(), store) require.NoError(t, metaClient.Open()) - _, err := metaClient.CreateDatabase(influxdb.TasksSystemBucketID.String()) + _, err := metaClient.CreateDatabase(influxdb.ID(10).String()) require.NoError(t, err) ab := newAnalyticalBackend(t, ts.OrganizationService, ts.BucketService, metaClient) diff --git a/tenant/storage_bucket.go b/tenant/storage_bucket.go index 7bd9d3dacd..9ef0a4a1a4 100644 --- a/tenant/storage_bucket.go +++ b/tenant/storage_bucket.go @@ -120,7 +120,6 @@ func (s *Store) GetBucketByName(ctx context.Context, tx kv.Tx, orgID influxdb.ID switch n { case influxdb.TasksSystemBucketName: return &influxdb.Bucket{ - ID: influxdb.TasksSystemBucketID, Type: influxdb.BucketTypeSystem, Name: influxdb.TasksSystemBucketName, RetentionPeriod: influxdb.TasksSystemBucketRetention, @@ -129,7 +128,6 @@ func (s *Store) GetBucketByName(ctx context.Context, tx kv.Tx, orgID influxdb.ID }, nil case influxdb.MonitoringSystemBucketName: return &influxdb.Bucket{ - ID: influxdb.MonitoringSystemBucketID, Type: influxdb.BucketTypeSystem, Name: influxdb.MonitoringSystemBucketName, RetentionPeriod: influxdb.MonitoringSystemBucketRetention, From 2099b6457852fa5d792df4fa6f058a3882f4c15d Mon Sep 17 00:00:00 2001 From: Yoofi Quansah Date: Fri, 11 Sep 2020 12:29:00 -0700 Subject: [PATCH 03/34] chore: total removal of references to hardcoded IDs --- kv/bucket.go | 61 +++++----------------------------------- tenant/storage_bucket.go | 21 +------------- 2 files changed, 8 insertions(+), 74 deletions(-) diff --git a/kv/bucket.go b/kv/bucket.go index 477d9c9eb1..cac85cbb84 100644 --- a/kv/bucket.go +++ b/kv/bucket.go @@ -179,28 +179,9 @@ func (s *Service) findBucketByName(ctx context.Context, tx Tx, orgID influxdb.ID buf, err := idx.Get(key) if IsNotFound(err) { - switch n { - case influxdb.TasksSystemBucketName: - return &influxdb.Bucket{ - Type: influxdb.BucketTypeSystem, - Name: influxdb.TasksSystemBucketName, - RetentionPeriod: influxdb.TasksSystemBucketRetention, - Description: "System bucket for task logs", - OrgID: orgID, - }, nil - case influxdb.MonitoringSystemBucketName: - return &influxdb.Bucket{ - Type: influxdb.BucketTypeSystem, - Name: influxdb.MonitoringSystemBucketName, - RetentionPeriod: influxdb.MonitoringSystemBucketRetention, - Description: "System bucket for monitoring logs", - OrgID: orgID, - }, nil - default: - return nil, &influxdb.Error{ - Code: influxdb.ENotFound, - Msg: fmt.Sprintf("bucket %q not found", n), - } + return nil, &influxdb.Error{ + Code: influxdb.ENotFound, + Msg: fmt.Sprintf("bucket %q not found", n), } } @@ -341,6 +322,10 @@ func (s *Service) FindBuckets(ctx context.Context, filter influxdb.BucketFilter, return nil }) + if err != nil { + return nil, 0, err + } + // Don't append system buckets if Name is set. Users who don't have real // system buckets won't get mocked buckets if they query for a bucket by name // without the orgID, but this is a vanishing small number of users and has @@ -349,38 +334,6 @@ func (s *Service) FindBuckets(ctx context.Context, filter influxdb.BucketFilter, return bs, len(bs), nil } - needsSystemBuckets := true - for _, b := range bs { - if b.Type == influxdb.BucketTypeSystem { - needsSystemBuckets = false - break - } - } - - if needsSystemBuckets { - tb := &influxdb.Bucket{ - Type: influxdb.BucketTypeSystem, - Name: influxdb.TasksSystemBucketName, - RetentionPeriod: influxdb.TasksSystemBucketRetention, - Description: "System bucket for task logs", - } - - bs = append(bs, tb) - - mb := &influxdb.Bucket{ - Type: influxdb.BucketTypeSystem, - Name: influxdb.MonitoringSystemBucketName, - RetentionPeriod: influxdb.MonitoringSystemBucketRetention, - Description: "System bucket for monitoring logs", - } - - bs = append(bs, mb) - } - - if err != nil { - return nil, 0, err - } - return bs, len(bs), nil } diff --git a/tenant/storage_bucket.go b/tenant/storage_bucket.go index 9ef0a4a1a4..1acdb6933c 100644 --- a/tenant/storage_bucket.go +++ b/tenant/storage_bucket.go @@ -117,26 +117,7 @@ func (s *Store) GetBucketByName(ctx context.Context, tx kv.Tx, orgID influxdb.ID // allow for hard coded bucket names that dont exist in the system if kv.IsNotFound(err) { - switch n { - case influxdb.TasksSystemBucketName: - return &influxdb.Bucket{ - Type: influxdb.BucketTypeSystem, - Name: influxdb.TasksSystemBucketName, - RetentionPeriod: influxdb.TasksSystemBucketRetention, - Description: "System bucket for task logs", - OrgID: orgID, - }, nil - case influxdb.MonitoringSystemBucketName: - return &influxdb.Bucket{ - Type: influxdb.BucketTypeSystem, - Name: influxdb.MonitoringSystemBucketName, - RetentionPeriod: influxdb.MonitoringSystemBucketRetention, - Description: "System bucket for monitoring logs", - OrgID: orgID, - }, nil - default: - return nil, ErrBucketNotFoundByName(n) - } + return nil, ErrBucketNotFoundByName(n) } if err != nil { From 238ba1990ea68eb64bdfc6b31efebad2acb01ad6 Mon Sep 17 00:00:00 2001 From: Pavel Zavora Date: Mon, 31 Aug 2020 11:46:51 +0200 Subject: [PATCH 04/34] fix(pkg/csv2lp): don't allow duplicate tags #19453 --- pkg/csv2lp/csv_table.go | 15 ++++++++++++--- pkg/csv2lp/csv_table_test.go | 9 +++++++++ 2 files changed, 21 insertions(+), 3 deletions(-) diff --git a/pkg/csv2lp/csv_table.go b/pkg/csv2lp/csv_table.go index 56d7579c5a..9c98fd2574 100644 --- a/pkg/csv2lp/csv_table.go +++ b/pkg/csv2lp/csv_table.go @@ -328,6 +328,8 @@ func (t *CsvTable) recomputeLineProtocolColumns() { t.cachedFieldValue = nil t.cachedTags = nil t.cachedFields = nil + // collect unique tag names (#19453) + var tags = make(map[string]*CsvTableColumn) // having a _field column indicates fields without a line type are ignored defaultIsField := t.Column(labelFieldName) == nil @@ -353,8 +355,11 @@ func (t *CsvTable) recomputeLineProtocolColumns() { case col.Label == labelFieldValue: t.cachedFieldValue = col case col.LinePart == linePartTag: + if val, found := tags[col.Label]; found { + log.Printf("WARNING: ignoring duplicate tag '%s' at column index %d, using column at index %d\n", col.Label, val.Index, col.Index) + } col.escapedLabel = escapeTag(col.Label) - t.cachedTags = append(t.cachedTags, col) + tags[col.Label] = col case col.LinePart == linePartField: col.escapedLabel = escapeTag(col.Label) t.cachedFields = append(t.cachedFields, col) @@ -365,8 +370,12 @@ func (t *CsvTable) recomputeLineProtocolColumns() { } } } - // line protocol requires sorted tags - if t.cachedTags != nil && len(t.cachedTags) > 0 { + // line protocol requires sorted unique tags + if len(tags) > 0 { + t.cachedTags = make([]*CsvTableColumn, 0, len(tags)) + for _, v := range tags { + t.cachedTags = append(t.cachedTags, v) + } sort.Slice(t.cachedTags, func(i, j int) bool { return t.cachedTags[i].Label < t.cachedTags[j].Label }) diff --git a/pkg/csv2lp/csv_table_test.go b/pkg/csv2lp/csv_table_test.go index 08df866c30..70d93f47e1 100644 --- a/pkg/csv2lp/csv_table_test.go +++ b/pkg/csv2lp/csv_table_test.go @@ -255,6 +255,15 @@ func Test_CsvTableProcessing(t *testing.T) { "#default cpu,yes,0,1\n#datatype ,tag,,\n_measurement,test,col1,_time\n,,,", "cpu,test=yes col1=0 1", }, + { + "no duplicate tags", // duplicate tags are ignored, the last column wins, https://github.com/influxdata/influxdb/issues/19453 + "#datatype,string,long,dateTime:RFC3339,dateTime:RFC3339,dateTime:RFC3339,double,string,string,string,string,string,string,string,string,string,string\n" + + "#group,true,true,false,false,false,false,true,true,true,true,true,true,true,true,true,true\n" + + "#default,_result,,,,,,,,,,,,,,,\n" + + ",result,table,_start,_stop,_time,_value,_field,_measurement,env,host,hostname,nodename,org,result,table,url\n" + + ",,0,2020-08-26T23:10:54.023607624Z,2020-08-26T23:15:54.023607624Z,2020-08-26T23:11:00Z,0,0.001,something,host,pod,node,host,,success,role,http://127.0.0.1:8099/metrics\n", + "something,env=host,host=pod,hostname=node,nodename=host,result=success,table=role,url=http://127.0.0.1:8099/metrics 0.001=0 1598483460000000000", + }, } for _, test := range tests { From e749b8cedf957349988c767763408cb862a41b2b Mon Sep 17 00:00:00 2001 From: Pavel Zavora Date: Sat, 12 Sep 2020 10:36:54 +0200 Subject: [PATCH 05/34] chore: update changelog --- CHANGELOG.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4a3c503d66..8ae7c66e08 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -37,7 +37,8 @@ need to update any InfluxDB CLI config profiles with the new port number. ### Bug Fixes 1. [19331](https://github.com/influxdata/influxdb/pull/19331): Add description to auth influx command outputs. -1. [19392](https://github.com/influxdata/influxdb/pull/19392) Include the edge of the boundary we are observing. +1. [19392](https://github.com/influxdata/influxdb/pull/19392): Include the edge of the boundary we are observing. +1. [19453](https://github.com/influxdata/influxdb/pull/19453): Warn about duplicate tag names during influx write csv. ## v2.0.0-beta.16 [2020-08-07] From 001343d958de265be936f4cd4e97343e598ec2f8 Mon Sep 17 00:00:00 2001 From: Pavel Zavora Date: Mon, 29 Jun 2020 17:04:38 +0200 Subject: [PATCH 06/34] feat(pkg/csv2lp): log a warning when loosing precision #18744 --- pkg/csv2lp/csv2lp.go | 18 +++++++++++-- pkg/csv2lp/csv2lp_test.go | 4 +++ pkg/csv2lp/csv_table.go | 10 +++---- pkg/csv2lp/data_conversion.go | 32 +++++++++++++--------- pkg/csv2lp/data_conversion_test.go | 43 +++++++++++++++++++++--------- 5 files changed, 75 insertions(+), 32 deletions(-) diff --git a/pkg/csv2lp/csv2lp.go b/pkg/csv2lp/csv2lp.go index 25bf3f7c9a..12030d53ac 100644 --- a/pkg/csv2lp/csv2lp.go +++ b/pkg/csv2lp/csv2lp.go @@ -17,7 +17,21 @@ type CsvLineError struct { } func (e CsvLineError) Error() string { - return fmt.Sprintf("line %d: %v", e.Line, e.Err) + if e.Line > 0 { + return fmt.Sprintf("line %d: %v", e.Line, e.Err) + } + return fmt.Sprintf("%v", e.Err) +} + +// CreateRowColumnError creates adds row number and column name to the error supplied +func CreateRowColumnError(line int, columnLabel string, err error) CsvLineError { + return CsvLineError{ + Line: line, + Err: CsvColumnError{ + Column: columnLabel, + Err: err, + }, + } } // CsvToLineReader represents state of transformation from csv data to lien protocol reader @@ -98,7 +112,7 @@ func (state *CsvToLineReader) Read(p []byte) (n int, err error) { if state.Table.AddRow(row) { var err error state.lineBuffer = state.lineBuffer[:0] // reuse line buffer - state.lineBuffer, err = state.Table.AppendLine(state.lineBuffer, row) + state.lineBuffer, err = state.Table.AppendLine(state.lineBuffer, row, state.LineNumber) if !state.dataRowAdded && state.logTableDataColumns { log.Println(state.Table.DataColumnsInfo()) } diff --git a/pkg/csv2lp/csv2lp_test.go b/pkg/csv2lp/csv2lp_test.go index 01d5885367..cf7b3f9d65 100644 --- a/pkg/csv2lp/csv2lp_test.go +++ b/pkg/csv2lp/csv2lp_test.go @@ -218,6 +218,10 @@ func Test_CsvLineError(t *testing.T) { CsvLineError{Line: 2, Err: CsvColumnError{"a", errors.New("cause")}}, "line 2: column 'a': cause", }, + { + CsvLineError{Line: -1, Err: CsvColumnError{"a", errors.New("cause")}}, + "column 'a': cause", + }, } for _, test := range tests { require.Equal(t, test.value, test.err.Error()) diff --git a/pkg/csv2lp/csv_table.go b/pkg/csv2lp/csv_table.go index 9c98fd2574..4e791cdbbb 100644 --- a/pkg/csv2lp/csv_table.go +++ b/pkg/csv2lp/csv_table.go @@ -391,7 +391,7 @@ func (t *CsvTable) recomputeLineProtocolColumns() { // CreateLine produces a protocol line out of the supplied row or returns error func (t *CsvTable) CreateLine(row []string) (line string, err error) { buffer := make([]byte, 100)[:0] - buffer, err = t.AppendLine(buffer, row) + buffer, err = t.AppendLine(buffer, row, -1) if err != nil { return "", err } @@ -399,7 +399,7 @@ func (t *CsvTable) CreateLine(row []string) (line string, err error) { } // AppendLine appends a protocol line to the supplied buffer using a CSV row and returns appended buffer or an error if any -func (t *CsvTable) AppendLine(buffer []byte, row []string) ([]byte, error) { +func (t *CsvTable) AppendLine(buffer []byte, row []string, lineNumber int) ([]byte, error) { if t.computeLineProtocolColumns() { // validate column data types if t.cachedFieldValue != nil && !IsTypeSupported(t.cachedFieldValue.DataType) { @@ -447,7 +447,7 @@ func (t *CsvTable) AppendLine(buffer []byte, row []string) ([]byte, error) { buffer = append(buffer, escapeTag(field)...) buffer = append(buffer, '=') var err error - buffer, err = appendConverted(buffer, value, t.cachedFieldValue) + buffer, err = appendConverted(buffer, value, t.cachedFieldValue, lineNumber) if err != nil { return buffer, CsvColumnError{ t.cachedFieldName.Label, @@ -468,7 +468,7 @@ func (t *CsvTable) AppendLine(buffer []byte, row []string) ([]byte, error) { buffer = append(buffer, field.LineLabel()...) buffer = append(buffer, '=') var err error - buffer, err = appendConverted(buffer, value, field) + buffer, err = appendConverted(buffer, value, field, lineNumber) if err != nil { return buffer, CsvColumnError{ field.Label, @@ -491,7 +491,7 @@ func (t *CsvTable) AppendLine(buffer []byte, row []string) ([]byte, error) { } buffer = append(buffer, ' ') var err error - buffer, err = appendConverted(buffer, timeVal, t.cachedTime) + buffer, err = appendConverted(buffer, timeVal, t.cachedTime, lineNumber) if err != nil { return buffer, CsvColumnError{ t.cachedTime.Label, diff --git a/pkg/csv2lp/data_conversion.go b/pkg/csv2lp/data_conversion.go index d11f8e4b56..74d3942052 100644 --- a/pkg/csv2lp/data_conversion.go +++ b/pkg/csv2lp/data_conversion.go @@ -5,6 +5,7 @@ import ( "errors" "fmt" "io" + "log" "math" "strconv" "strings" @@ -82,16 +83,17 @@ func escapeString(val string) string { return val } -// normalizeNumberString normalizes the supplied value with the help of the format supplied. +// normalizeNumberString normalizes the supplied value according to DataForm of the supplied column. // This normalization is intended to convert number strings of different locales to a strconv-parseable value. // // The format's first character is a fraction delimiter character. Next characters in the format // are simply removed, they are typically used to visually separate groups in large numbers. -// The removeFaction parameter controls whether the returned value can contain also the fraction part. +// The removeFraction parameter controls whether the returned value can contain also the fraction part. // // For example, to get a strconv-parseable float from a Spanish value '3.494.826.157,123', use format ",." . -func normalizeNumberString(value string, format string, removeFraction bool) string { - if len(format) == 0 { +func normalizeNumberString(value string, column *CsvTableColumn, removeFraction bool, lineNumber int) string { + format := column.DataFormat + if format == "" { format = ". \n\t\r_" } if strings.ContainsAny(value, format) { @@ -110,12 +112,16 @@ func normalizeNumberString(value string, format string, removeFraction bool) str } if c == fractionRune { if removeFraction { - break ForAllCharacters + // warn about lost precision + truncatedValue := retVal.String() + warning := fmt.Errorf("'%s' truncated to '%s' to fit into '%s' data type", value, truncatedValue, column.DataType) + log.Printf("WARNING: %v\n", CreateRowColumnError(lineNumber, column.Label, warning)) + return truncatedValue } retVal.WriteByte('.') - } else { - retVal.WriteRune(c) + continue } + retVal.WriteRune(c) } return retVal.String() @@ -123,7 +129,7 @@ func normalizeNumberString(value string, format string, removeFraction bool) str return value } -func toTypedValue(val string, column *CsvTableColumn) (interface{}, error) { +func toTypedValue(val string, column *CsvTableColumn, lineNumber int) (interface{}, error) { dataType := column.DataType dataFormat := column.DataFormat if column.ParseF != nil { @@ -159,7 +165,7 @@ func toTypedValue(val string, column *CsvTableColumn) (interface{}, error) { case durationDatatype: return time.ParseDuration(val) case doubleDatatype: - return strconv.ParseFloat(normalizeNumberString(val, dataFormat, false), 64) + return strconv.ParseFloat(normalizeNumberString(val, column, false, lineNumber), 64) case boolDatatype: switch { case len(val) == 0: @@ -172,9 +178,9 @@ func toTypedValue(val string, column *CsvTableColumn) (interface{}, error) { return nil, errors.New("Unsupported boolean value '" + val + "' , first character is expected to be 't','f','0','1','y','n'") } case longDatatype: - return strconv.ParseInt(normalizeNumberString(val, dataFormat, true), 10, 64) + return strconv.ParseInt(normalizeNumberString(val, column, true, lineNumber), 10, 64) case uLongDatatype: - return strconv.ParseUint(normalizeNumberString(val, dataFormat, true), 10, 64) + return strconv.ParseUint(normalizeNumberString(val, column, true, lineNumber), 10, 64) case base64BinaryDataType: return base64.StdEncoding.DecodeString(val) default: @@ -230,11 +236,11 @@ func appendProtocolValue(buffer []byte, value interface{}) ([]byte, error) { } } -func appendConverted(buffer []byte, val string, column *CsvTableColumn) ([]byte, error) { +func appendConverted(buffer []byte, val string, column *CsvTableColumn, lineNumber int) ([]byte, error) { if len(column.DataType) == 0 { // keep the value as it is return append(buffer, val...), nil } - typedVal, err := toTypedValue(val, column) + typedVal, err := toTypedValue(val, column, lineNumber) if err != nil { return buffer, err } diff --git a/pkg/csv2lp/data_conversion_test.go b/pkg/csv2lp/data_conversion_test.go index ec1d42d9b8..5f0a81e646 100644 --- a/pkg/csv2lp/data_conversion_test.go +++ b/pkg/csv2lp/data_conversion_test.go @@ -6,6 +6,7 @@ import ( "io/ioutil" "log" "math" + "os" "strings" "testing" "time" @@ -112,9 +113,9 @@ func Test_ToTypedValue(t *testing.T) { for i, test := range tests { t.Run(fmt.Sprint(i)+" "+test.value, func(t *testing.T) { - column := &CsvTableColumn{} + column := &CsvTableColumn{Label: "test"} column.setupDataType(test.dataType) - val, err := toTypedValue(test.value, column) + val, err := toTypedValue(test.value, column, 1) if err != nil && test.expect != nil { require.Nil(t, err.Error()) } @@ -143,7 +144,7 @@ func Test_ToTypedValue_dateTimeCustomTimeZone(t *testing.T) { column := &CsvTableColumn{} column.TimeZone = tz column.setupDataType(test.dataType) - val, err := toTypedValue(test.value, column) + val, err := toTypedValue(test.value, column, 1) if err != nil && test.expect != nil { require.Nil(t, err.Error()) } @@ -210,9 +211,9 @@ func Test_AppendConverted(t *testing.T) { for i, test := range tests { t.Run(fmt.Sprint(i), func(t *testing.T) { - column := &CsvTableColumn{} + column := &CsvTableColumn{Label: "test"} column.setupDataType(test.dataType) - val, err := appendConverted(nil, test.value, column) + val, err := appendConverted(nil, test.value, column, 1) if err != nil && test.expect != "" { require.Nil(t, err.Error()) } @@ -246,18 +247,36 @@ func Test_NormalizeNumberString(t *testing.T) { format string removeFraction bool expect string + warning string }{ - {"123", "", true, "123"}, - {"123", ".", true, "123"}, - {"123.456", ".", true, "123"}, - {"123.456", ".", false, "123.456"}, - {"1 2.3,456", ",. ", false, "123.456"}, - {" 1 2\t3.456 \r\n", "", false, "123.456"}, + {"123", "", true, "123", ""}, + {"123", ".", true, "123", ""}, + {"123.456", ".", true, "123", "::PREFIX::WARNING: line 1: column 'test': '123.456' truncated to '123' to fit into 'tst' data type\n"}, + {"123.456", ".", false, "123.456", ""}, + {"1 2.3,456", ",. ", false, "123.456", ""}, + {" 1 2\t3.456 \r\n", "", false, "123.456", ""}, } for i, test := range tests { t.Run(fmt.Sprint(i), func(t *testing.T) { - require.Equal(t, test.expect, normalizeNumberString(test.value, test.format, test.removeFraction)) + // customize logging to check warnings + var buf bytes.Buffer + log.SetOutput(&buf) + oldFlags := log.Flags() + log.SetFlags(0) + oldPrefix := log.Prefix() + prefix := "::PREFIX::" + log.SetPrefix(prefix) + defer func() { + log.SetOutput(os.Stderr) + log.SetFlags(oldFlags) + log.SetPrefix(oldPrefix) + }() + + require.Equal(t, test.expect, + normalizeNumberString(test.value, + &CsvTableColumn{Label: "test", DataType: "tst", DataFormat: test.format}, test.removeFraction, 1)) + require.Equal(t, test.warning, buf.String()) }) } } From 14718c9dfc169166e1beb3ad13ab38a2f0678cb0 Mon Sep 17 00:00:00 2001 From: Pavel Zavora Date: Mon, 29 Jun 2020 17:08:32 +0200 Subject: [PATCH 07/34] feat(pkg/csv2lp): pass a line number to custom parsing fn #18744 --- pkg/csv2lp/csv_table.go | 2 +- pkg/csv2lp/data_conversion.go | 6 +++--- pkg/csv2lp/data_conversion_test.go | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/pkg/csv2lp/csv_table.go b/pkg/csv2lp/csv_table.go index 4e791cdbbb..c2ae37bd37 100644 --- a/pkg/csv2lp/csv_table.go +++ b/pkg/csv2lp/csv_table.go @@ -46,7 +46,7 @@ type CsvTableColumn struct { // TimeZone of dateTime column, applied when parsing dateTime DataType TimeZone *time.Location // ParseF is an optional function used to convert column's string value to interface{} - ParseF func(string) (interface{}, error) + ParseF func(value string, lineNumber int) (interface{}, error) // escapedLabel contains escaped label that can be directly used in line protocol escapedLabel string diff --git a/pkg/csv2lp/data_conversion.go b/pkg/csv2lp/data_conversion.go index 74d3942052..5e519aaebc 100644 --- a/pkg/csv2lp/data_conversion.go +++ b/pkg/csv2lp/data_conversion.go @@ -133,7 +133,7 @@ func toTypedValue(val string, column *CsvTableColumn, lineNumber int) (interface dataType := column.DataType dataFormat := column.DataFormat if column.ParseF != nil { - return column.ParseF(val) + return column.ParseF(val, lineNumber) } switch dataType { case stringDatatype: @@ -267,7 +267,7 @@ func CreateDecoder(encoding string) (func(io.Reader) io.Reader, error) { } // createBoolParseFn returns a function that converts a string value to boolean according to format "true,yes,1:false,no,0" -func createBoolParseFn(format string) func(string) (interface{}, error) { +func createBoolParseFn(format string) func(string, int) (interface{}, error) { var err error = nil truthy := []string{} falsy := []string{} @@ -284,7 +284,7 @@ func createBoolParseFn(format string) func(string) (interface{}, error) { falsy = strings.Split(f, ",") } } - return func(val string) (interface{}, error) { + return func(val string, _lineNumber int) (interface{}, error) { if err != nil { return nil, err } diff --git a/pkg/csv2lp/data_conversion_test.go b/pkg/csv2lp/data_conversion_test.go index 5f0a81e646..93d01a6201 100644 --- a/pkg/csv2lp/data_conversion_test.go +++ b/pkg/csv2lp/data_conversion_test.go @@ -336,7 +336,7 @@ func Test_CreateBoolParseFn(t *testing.T) { fn := createBoolParseFn(test.format) for j, pair := range test.pair { t.Run(fmt.Sprint(i)+"_"+fmt.Sprint(j), func(t *testing.T) { - result, err := fn(pair.value) + result, err := fn(pair.value, 1) switch pair.expect { case "true": require.Equal(t, true, result) From c2643243f3a8a4f174a2add7d2d43eb2b5f6a1b6 Mon Sep 17 00:00:00 2001 From: Pavel Zavora Date: Mon, 29 Jun 2020 18:42:49 +0200 Subject: [PATCH 08/34] feat(pkg/csv2lp): add possibility to parse long and unsignedLong values strictly #18744 --- pkg/csv2lp/csv_table.go | 13 +++++- pkg/csv2lp/csv_table_test.go | 54 +++++++++++++++++-------- pkg/csv2lp/data_conversion.go | 63 ++++++++++++++++++++++-------- pkg/csv2lp/data_conversion_test.go | 24 ++++++------ 4 files changed, 106 insertions(+), 48 deletions(-) diff --git a/pkg/csv2lp/csv_table.go b/pkg/csv2lp/csv_table.go index c2ae37bd37..b98ecd5c1b 100644 --- a/pkg/csv2lp/csv_table.go +++ b/pkg/csv2lp/csv_table.go @@ -46,7 +46,7 @@ type CsvTableColumn struct { // TimeZone of dateTime column, applied when parsing dateTime DataType TimeZone *time.Location // ParseF is an optional function used to convert column's string value to interface{} - ParseF func(value string, lineNumber int) (interface{}, error) + ParseF func(value string) (interface{}, error) // escapedLabel contains escaped label that can be directly used in line protocol escapedLabel string @@ -126,9 +126,18 @@ func (c *CsvTableColumn) setupDataType(columnValue string) { // setup column data type c.DataType = columnValue - // setup custom parsing of bool data type + // setup custom parsing if c.DataType == boolDatatype && c.DataFormat != "" { c.ParseF = createBoolParseFn(c.DataFormat) + return + } + if c.DataType == longDatatype && strings.HasPrefix(c.DataFormat, "strict") { + c.ParseF = createStrictLongParseFn(c.DataFormat[6:]) + return + } + if c.DataType == uLongDatatype && strings.HasPrefix(c.DataFormat, "strict") { + c.ParseF = createStrictUnsignedLongParseFn(c.DataFormat[6:]) + return } } diff --git a/pkg/csv2lp/csv_table_test.go b/pkg/csv2lp/csv_table_test.go index 70d93f47e1..f5e4e3fb1b 100644 --- a/pkg/csv2lp/csv_table_test.go +++ b/pkg/csv2lp/csv_table_test.go @@ -338,40 +338,56 @@ func Test_DataTypeInColumnName(t *testing.T) { csv string line string ignoreDataTypeInColumnName bool + error string }{ { - "m|measurement,b|boolean:x:,c|boolean:x:|x\n" + + csv: "m|measurement,b|boolean:x:,c|boolean:x:|x\n" + "cpu,,", - `cpu c=true`, - false, + line: `cpu c=true`, }, { - "m|measurement,a|boolean,b|boolean:0:1,c|boolean:x:,d|boolean:x:\n" + + csv: "m|measurement,a|boolean,b|boolean:0:1,c|boolean:x:,d|boolean:x:\n" + "cpu,1,1,x,y", - `cpu a=true,b=false,c=true,d=false`, - false, + line: `cpu a=true,b=false,c=true,d=false`, }, { - "#constant measurement,cpu\n" + + csv: "#constant measurement,cpu\n" + "a|long,b|string\n" + "1,1", - `cpu a=1i,b="1"`, - false, + line: `cpu a=1i,b="1"`, }, { - "#constant measurement,cpu\n" + + csv: "#constant measurement,cpu\n" + "a|long,b|string\n" + "1,1", - `cpu a|long=1,b|string=1`, - true, + line: `cpu a|long=1,b|string=1`, + ignoreDataTypeInColumnName: true, }, { - "#constant measurement,cpu\n" + + csv: "#constant measurement,cpu\n" + "#datatype long,string\n" + "a|long,b|string\n" + "1,1", - `cpu a|long=1i,b|string="1"`, - true, + line: `cpu a|long=1i,b|string="1"`, + ignoreDataTypeInColumnName: true, + }, + { + csv: "#constant measurement,cpu\n" + + "a|long:strict: ,b|unsignedLong:strict: \n" + + "1 2,1 2", + line: `cpu a=12i,b=12u`, + }, + { + csv: "#constant measurement,cpu\n" + + "a|long:strict\n" + + "1.1,1", + error: "column 'a': '1.1' cannot fit into long data type", + }, + { + csv: "#constant measurement,cpu\n" + + "a|unsignedLong:strict\n" + + "1.1,1", + error: "column 'a': '1.1' cannot fit into unsignedLong data type", }, } @@ -385,8 +401,12 @@ func Test_DataTypeInColumnName(t *testing.T) { rowProcessed := table.AddRow(row) if rowProcessed { line, err := table.CreateLine(row) - if err != nil && test.line != "" { - require.Nil(t, err.Error()) + if err != nil { + if test.error == "" { + require.Nil(t, err.Error()) + } else { + require.Equal(t, test.error, err.Error()) + } } lines = append(lines, line) } diff --git a/pkg/csv2lp/data_conversion.go b/pkg/csv2lp/data_conversion.go index 5e519aaebc..c28a4dcb50 100644 --- a/pkg/csv2lp/data_conversion.go +++ b/pkg/csv2lp/data_conversion.go @@ -83,16 +83,16 @@ func escapeString(val string) string { return val } -// normalizeNumberString normalizes the supplied value according to DataForm of the supplied column. +// normalizeNumberString normalizes the supplied value according to the supplied format. // This normalization is intended to convert number strings of different locales to a strconv-parseable value. // // The format's first character is a fraction delimiter character. Next characters in the format // are simply removed, they are typically used to visually separate groups in large numbers. // The removeFraction parameter controls whether the returned value can contain also the fraction part. +// An empty format means ". \n\t\r_" // // For example, to get a strconv-parseable float from a Spanish value '3.494.826.157,123', use format ",." . -func normalizeNumberString(value string, column *CsvTableColumn, removeFraction bool, lineNumber int) string { - format := column.DataFormat +func normalizeNumberString(value string, format string, removeFraction bool) (normalized string, truncated bool) { if format == "" { format = ". \n\t\r_" } @@ -112,11 +112,7 @@ func normalizeNumberString(value string, column *CsvTableColumn, removeFraction } if c == fractionRune { if removeFraction { - // warn about lost precision - truncatedValue := retVal.String() - warning := fmt.Errorf("'%s' truncated to '%s' to fit into '%s' data type", value, truncatedValue, column.DataType) - log.Printf("WARNING: %v\n", CreateRowColumnError(lineNumber, column.Label, warning)) - return truncatedValue + return retVal.String(), true } retVal.WriteByte('.') continue @@ -124,16 +120,16 @@ func normalizeNumberString(value string, column *CsvTableColumn, removeFraction retVal.WriteRune(c) } - return retVal.String() + return retVal.String(), false } - return value + return value, false } func toTypedValue(val string, column *CsvTableColumn, lineNumber int) (interface{}, error) { dataType := column.DataType dataFormat := column.DataFormat if column.ParseF != nil { - return column.ParseF(val, lineNumber) + return column.ParseF(val) } switch dataType { case stringDatatype: @@ -165,7 +161,8 @@ func toTypedValue(val string, column *CsvTableColumn, lineNumber int) (interface case durationDatatype: return time.ParseDuration(val) case doubleDatatype: - return strconv.ParseFloat(normalizeNumberString(val, column, false, lineNumber), 64) + normalized, _ := normalizeNumberString(val, dataFormat, false) + return strconv.ParseFloat(normalized, 64) case boolDatatype: switch { case len(val) == 0: @@ -178,9 +175,21 @@ func toTypedValue(val string, column *CsvTableColumn, lineNumber int) (interface return nil, errors.New("Unsupported boolean value '" + val + "' , first character is expected to be 't','f','0','1','y','n'") } case longDatatype: - return strconv.ParseInt(normalizeNumberString(val, column, true, lineNumber), 10, 64) + normalized, truncated := normalizeNumberString(val, dataFormat, true) + if truncated { + error := CreateRowColumnError(lineNumber, column.Label, + fmt.Errorf("'%s' truncated to '%s' to fit into long data type", val, normalized)) + log.Printf("WARNING: %v\n", error) + } + return strconv.ParseInt(normalized, 10, 64) case uLongDatatype: - return strconv.ParseUint(normalizeNumberString(val, column, true, lineNumber), 10, 64) + normalized, truncated := normalizeNumberString(val, dataFormat, true) + if truncated { + error := CreateRowColumnError(lineNumber, column.Label, + fmt.Errorf("'%s' truncated to '%s' to fit into unsignedLong data type", val, normalized)) + log.Printf("WARNING: %v\n", error) + } + return strconv.ParseUint(normalized, 10, 64) case base64BinaryDataType: return base64.StdEncoding.DecodeString(val) default: @@ -267,7 +276,7 @@ func CreateDecoder(encoding string) (func(io.Reader) io.Reader, error) { } // createBoolParseFn returns a function that converts a string value to boolean according to format "true,yes,1:false,no,0" -func createBoolParseFn(format string) func(string, int) (interface{}, error) { +func createBoolParseFn(format string) func(string) (interface{}, error) { var err error = nil truthy := []string{} falsy := []string{} @@ -284,7 +293,7 @@ func createBoolParseFn(format string) func(string, int) (interface{}, error) { falsy = strings.Split(f, ",") } } - return func(val string, _lineNumber int) (interface{}, error) { + return func(val string) (interface{}, error) { if err != nil { return nil, err } @@ -308,3 +317,25 @@ func createBoolParseFn(format string) func(string, int) (interface{}, error) { return nil, fmt.Errorf("unsupported boolean value: %s must one of %v or one of %v", val, truthy, falsy) } } + +// createStrictLongParseFn returns a function that converts a string value to long and fails also when a fraction digit is detected +func createStrictLongParseFn(dataFormat string) func(string) (interface{}, error) { + return func(val string) (interface{}, error) { + normalized, truncated := normalizeNumberString(val, dataFormat, true) + if truncated { + return 0, fmt.Errorf("'%s' cannot fit into long data type", val) + } + return strconv.ParseInt(normalized, 10, 64) + } +} + +// createStrictUnsignedLongParseFn returns a function that converts a string value to unsigned long and fails when a fraction digit is detected +func createStrictUnsignedLongParseFn(dataFormat string) func(string) (interface{}, error) { + return func(val string) (interface{}, error) { + normalized, truncated := normalizeNumberString(val, dataFormat, true) + if truncated { + return 0, fmt.Errorf("'%s' cannot fit into unsignedLong data type", val) + } + return strconv.ParseUint(normalized, 10, 64) + } +} diff --git a/pkg/csv2lp/data_conversion_test.go b/pkg/csv2lp/data_conversion_test.go index 93d01a6201..6bd0aa3d85 100644 --- a/pkg/csv2lp/data_conversion_test.go +++ b/pkg/csv2lp/data_conversion_test.go @@ -247,14 +247,14 @@ func Test_NormalizeNumberString(t *testing.T) { format string removeFraction bool expect string - warning string + truncated bool }{ - {"123", "", true, "123", ""}, - {"123", ".", true, "123", ""}, - {"123.456", ".", true, "123", "::PREFIX::WARNING: line 1: column 'test': '123.456' truncated to '123' to fit into 'tst' data type\n"}, - {"123.456", ".", false, "123.456", ""}, - {"1 2.3,456", ",. ", false, "123.456", ""}, - {" 1 2\t3.456 \r\n", "", false, "123.456", ""}, + {"123", "", true, "123", false}, + {"123", ".", true, "123", false}, + {"123.456", ".", true, "123", true}, + {"123.456", ".", false, "123.456", false}, + {"1 2.3,456", ",. ", false, "123.456", false}, + {" 1 2\t3.456 \r\n", "", false, "123.456", false}, } for i, test := range tests { @@ -272,11 +272,9 @@ func Test_NormalizeNumberString(t *testing.T) { log.SetFlags(oldFlags) log.SetPrefix(oldPrefix) }() - - require.Equal(t, test.expect, - normalizeNumberString(test.value, - &CsvTableColumn{Label: "test", DataType: "tst", DataFormat: test.format}, test.removeFraction, 1)) - require.Equal(t, test.warning, buf.String()) + normalized, truncated := normalizeNumberString(test.value, test.format, test.removeFraction) + require.Equal(t, test.expect, normalized) + require.Equal(t, test.truncated, truncated) }) } } @@ -336,7 +334,7 @@ func Test_CreateBoolParseFn(t *testing.T) { fn := createBoolParseFn(test.format) for j, pair := range test.pair { t.Run(fmt.Sprint(i)+"_"+fmt.Sprint(j), func(t *testing.T) { - result, err := fn(pair.value, 1) + result, err := fn(pair.value) switch pair.expect { case "true": require.Equal(t, true, result) From ff96ef04f9759d1307d31873b95f1629cdfb4cb1 Mon Sep 17 00:00:00 2001 From: Pavel Zavora Date: Mon, 29 Jun 2020 18:56:52 +0200 Subject: [PATCH 09/34] feat(pkg/csv2lp): enhance documentation with strict parsing #18744 --- pkg/csv2lp/README.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pkg/csv2lp/README.md b/pkg/csv2lp/README.md index fdee92859e..e14ff4d85e 100644 --- a/pkg/csv2lp/README.md +++ b/pkg/csv2lp/README.md @@ -158,6 +158,9 @@ All data types can include the format that is used to parse column data. It is t - note that you have to quote column delimiters whenever they appear in a CSV column value, for example: - `#constant,"double:,.",myColumn,"1.234,011"` - `long:format` and `unsignedLong:format` support the same format as `double`, but everything after and including a fraction character is ignored + - the format can be prepended with `strict` to fail when a fraction digit is present, for example: + - `1000.000` is `1000` when parsed as `long`, but fails when parsed as `long:strict` + - `1_000,000` is `1000` when parsed as `long:,_`, but fails when parsed as `long:strict,_` - `boolean:truthy:falsy` - `truthy` and `falsy` are comma-separated lists of values, they can be empty to assume all values as truthy/falsy; for example `boolean:sí,yes,ja,oui,ano,да:no,nein,non,ne,нет` - a `boolean` data type (without the format) parses column values that start with any of _tTyY1_ as `true` values, _fFnN0_ as `false` values and fails on other values From b51866e7be6b35e31a24bfbc02bfb88f054e582e Mon Sep 17 00:00:00 2001 From: Pavel Zavora Date: Mon, 29 Jun 2020 19:42:17 +0200 Subject: [PATCH 10/34] chore(pkg/csv2lp): improve doc --- pkg/csv2lp/csv2lp.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/csv2lp/csv2lp.go b/pkg/csv2lp/csv2lp.go index 12030d53ac..6d5886aab5 100644 --- a/pkg/csv2lp/csv2lp.go +++ b/pkg/csv2lp/csv2lp.go @@ -23,7 +23,7 @@ func (e CsvLineError) Error() string { return fmt.Sprintf("%v", e.Err) } -// CreateRowColumnError creates adds row number and column name to the error supplied +// CreateRowColumnError wraps an existing error to add line and column coordinates func CreateRowColumnError(line int, columnLabel string, err error) CsvLineError { return CsvLineError{ Line: line, From 05c8a00b8df6e66c82fef1a7b141d829caa895c2 Mon Sep 17 00:00:00 2001 From: Pavel Zavora Date: Tue, 30 Jun 2020 08:03:15 +0200 Subject: [PATCH 11/34] feat(pkg/csv2lp): add RowSkippedListener to inform about rejected records #18742 --- pkg/csv2lp/csv2lp.go | 11 +++++++++++ pkg/csv2lp/csv2lp_test.go | 34 ++++++++++++++++++++++++++++++++++ 2 files changed, 45 insertions(+) diff --git a/pkg/csv2lp/csv2lp.go b/pkg/csv2lp/csv2lp.go index 6d5886aab5..4299b577ec 100644 --- a/pkg/csv2lp/csv2lp.go +++ b/pkg/csv2lp/csv2lp.go @@ -48,6 +48,8 @@ type CsvToLineReader struct { dataRowAdded bool // log CSV data errors to sterr and continue with CSV processing skipRowOnError bool + // RowSkipped is called when a row is skipped because of data parsing error + RowSkipped func(source *CsvToLineReader, lineError error, row []string) // reader results buffer []byte @@ -68,6 +70,11 @@ func (state *CsvToLineReader) SkipRowOnError(val bool) *CsvToLineReader { return state } +// Comma returns a field delimiter used in an input CSV file +func (state *CsvToLineReader) Comma() rune { + return state.csv.Comma +} + // Read implements io.Reader that returns protocol lines func (state *CsvToLineReader) Read(p []byte) (n int, err error) { // state1: finished @@ -119,6 +126,10 @@ func (state *CsvToLineReader) Read(p []byte) (n int, err error) { state.dataRowAdded = true if err != nil { lineError := CsvLineError{state.LineNumber, err} + if state.RowSkipped != nil { + state.RowSkipped(state, lineError, row) + continue + } if state.skipRowOnError { log.Println(lineError) continue diff --git a/pkg/csv2lp/csv2lp_test.go b/pkg/csv2lp/csv2lp_test.go index cf7b3f9d65..dfbc960f0c 100644 --- a/pkg/csv2lp/csv2lp_test.go +++ b/pkg/csv2lp/csv2lp_test.go @@ -204,6 +204,40 @@ func Test_CsvToLineProtocol_SkipRowOnError(t *testing.T) { require.Equal(t, messages, 2) } +// Test_CsvToLineProtocol_RowSkipped tests that error rows are reported to configured RowSkippedListener +func Test_CsvToLineProtocol_RowSkipped(t *testing.T) { + var buf bytes.Buffer + log.SetOutput(&buf) + oldFlags := log.Flags() + log.SetFlags(0) + oldPrefix := log.Prefix() + prefix := "::PREFIX::" + log.SetPrefix(prefix) + defer func() { + log.SetOutput(os.Stderr) + log.SetFlags(oldFlags) + log.SetPrefix(oldPrefix) + }() + + csv := "sep=;\n_measurement;a|long:strict\n;1\ncpu;2.1\ncpu;3a\n" + + reader := CsvToLineProtocol(strings.NewReader(csv)).SkipRowOnError(true) + reader.RowSkipped = func(src *CsvToLineReader, err error, _row []string) { + log.Println(err, string(src.Comma())) + } + // read all the data + ioutil.ReadAll(reader) + + out := buf.String() + // fmt.Println(out, string(';')) + // ::PREFIX::line 3: column '_measurement': no measurement supplied + // ::PREFIX::line 4: column 'a': '2.1' cannot fit into long data type + // ::PREFIX::line 5: column 'a': strconv.ParseInt: parsing "3a": invalid syntax + messages := strings.Count(out, prefix) + require.Equal(t, 3, messages) + require.Equal(t, 3, strings.Count(out, ";")) +} + // Test_CsvLineError tests CsvLineError error format func Test_CsvLineError(t *testing.T) { var tests = []struct { From 90a3a7c8d8030931cb2a2725b36b9f0103ef52fe Mon Sep 17 00:00:00 2001 From: Pavel Zavora Date: Tue, 30 Jun 2020 08:11:21 +0200 Subject: [PATCH 12/34] feat(cmd/influx/write): add --errors-file option #18742 --- cmd/influx/write.go | 25 +++++++++++++++++++++++++ cmd/influx/write_test.go | 17 +++++++++++++++++ 2 files changed, 42 insertions(+) diff --git a/cmd/influx/write.go b/cmd/influx/write.go index 3abbdb0fa9..47b47ccc0e 100644 --- a/cmd/influx/write.go +++ b/cmd/influx/write.go @@ -2,6 +2,7 @@ package main import ( "context" + "encoding/csv" "fmt" "io" "log" @@ -38,6 +39,7 @@ type writeFlagsType struct { SkipHeader int IgnoreDataTypeInColumnName bool Encoding string + ErrorsFile string } var writeFlags writeFlagsType @@ -86,6 +88,7 @@ func cmdWrite(f *globalFlags, opt genericCLIOpts) *cobra.Command { cmd.PersistentFlags().BoolVar(&writeFlags.IgnoreDataTypeInColumnName, "xIgnoreDataTypeInColumnName", false, "Ignores dataType which could be specified after ':' in column name") cmd.PersistentFlags().MarkHidden("xIgnoreDataTypeInColumnName") // should be used only upon explicit advice cmd.PersistentFlags().StringVar(&writeFlags.Encoding, "encoding", "UTF-8", "Character encoding of input files or stdin") + cmd.PersistentFlags().StringVar(&writeFlags.ErrorsFile, "errors-file", "", "The path to the file to write rejected rows") cmdDryRun := opt.newCmd("dryrun", fluxWriteDryrunF, false) cmdDryRun.Args = cobra.MaximumNArgs(1) @@ -204,6 +207,27 @@ func (writeFlags *writeFlagsType) createLineReader(ctx context.Context, cmd *cob } } + // create writer for errors-file, if supplied + var errorsFile *csv.Writer + var rowSkippedListener func(*csv2lp.CsvToLineReader, error, []string) + if writeFlags.ErrorsFile != "" { + writer, err := os.Create(writeFlags.ErrorsFile) + if err != nil { + return nil, csv2lp.MultiCloser(closers...), fmt.Errorf("failed to create %q: %v", writeFlags.ErrorsFile, err) + } + closers = append(closers, writer) + errorsFile = csv.NewWriter(writer) + rowSkippedListener = func(source *csv2lp.CsvToLineReader, lineError error, row []string) { + log.Println(lineError) + errorsFile.Comma = source.Comma() + errorsFile.Write([]string{fmt.Sprintf("# error : %v", lineError)}) + if err := errorsFile.Write(row); err != nil { + log.Printf("Unable to write to error-file: %v\n", err) + } + errorsFile.Flush() // flush is required + } + } + // concatenate readers r := io.MultiReader(readers...) if writeFlags.Format == inputFormatCsv { @@ -213,6 +237,7 @@ func (writeFlags *writeFlagsType) createLineReader(ctx context.Context, cmd *cob csvReader.Table.IgnoreDataTypeInColumnName(writeFlags.IgnoreDataTypeInColumnName) // change LineNumber to report file/stdin line numbers properly csvReader.LineNumber = writeFlags.SkipHeader - len(writeFlags.Headers) + csvReader.RowSkipped = rowSkippedListener r = csvReader } return r, csv2lp.MultiCloser(closers...), nil diff --git a/cmd/influx/write_test.go b/cmd/influx/write_test.go index e56198f172..36dee249d0 100644 --- a/cmd/influx/write_test.go +++ b/cmd/influx/write_test.go @@ -56,6 +56,7 @@ func readLines(reader io.Reader) []string { func createTempFile(suffix string, contents []byte) string { file, err := ioutil.TempFile("", "influx_writeTest*."+suffix) + file.Close() // Close immediatelly, since we need only a file name if err != nil { log.Fatal(err) return "unknown.file" @@ -545,3 +546,19 @@ func Test_fluxWriteF(t *testing.T) { require.Equal(t, "stdin3 i=stdin1,j=stdin2,k=stdin4", strings.Trim(string(lineData), "\n")) }) } + +// Test_writeFlags_errorsFile tests that rejected rows are written to errors file +func Test_writeFlags_errorsFile(t *testing.T) { + defer removeTempFiles() + errorsFile := createTempFile("errors", []byte{}) + stdInContents := "_measurement,a|long:strict\nm,1\nm,1.1" + out := bytes.Buffer{} + command := cmdWrite(&globalFlags{}, genericCLIOpts{in: strings.NewReader(stdInContents), w: bufio.NewWriter(&out)}) + command.SetArgs([]string{"dryrun", "--format", "csv", "--errors-file", errorsFile}) + err := command.Execute() + require.Nil(t, err) + require.Equal(t, "m a=1i", strings.Trim(out.String(), "\n")) + errorLines, err := ioutil.ReadFile(errorsFile) + require.Nil(t, err) + require.Equal(t, "# error : line 3: column 'a': '1.1' cannot fit into long data type\nm,1.1", strings.Trim(string(errorLines), "\n")) +} From 0718c9a53fd45260578fb9b0b923af6f9b6f0ed4 Mon Sep 17 00:00:00 2001 From: Pavel Zavora Date: Wed, 1 Jul 2020 05:12:55 +0200 Subject: [PATCH 13/34] chore(cmd/influx/write): update option description --- cmd/influx/write.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/influx/write.go b/cmd/influx/write.go index 47b47ccc0e..888bd7a2e8 100644 --- a/cmd/influx/write.go +++ b/cmd/influx/write.go @@ -88,7 +88,7 @@ func cmdWrite(f *globalFlags, opt genericCLIOpts) *cobra.Command { cmd.PersistentFlags().BoolVar(&writeFlags.IgnoreDataTypeInColumnName, "xIgnoreDataTypeInColumnName", false, "Ignores dataType which could be specified after ':' in column name") cmd.PersistentFlags().MarkHidden("xIgnoreDataTypeInColumnName") // should be used only upon explicit advice cmd.PersistentFlags().StringVar(&writeFlags.Encoding, "encoding", "UTF-8", "Character encoding of input files or stdin") - cmd.PersistentFlags().StringVar(&writeFlags.ErrorsFile, "errors-file", "", "The path to the file to write rejected rows") + cmd.PersistentFlags().StringVar(&writeFlags.ErrorsFile, "errors-file", "", "The path to the file to write rejected rows to") cmdDryRun := opt.newCmd("dryrun", fluxWriteDryrunF, false) cmdDryRun.Args = cobra.MaximumNArgs(1) From c5d841efdf0d90e1292564f8c523f5f323e0eff1 Mon Sep 17 00:00:00 2001 From: Pavel Zavora Date: Wed, 1 Jul 2020 16:53:17 +0200 Subject: [PATCH 14/34] feat(pkg/csv2lp): add concat annotation --- pkg/csv2lp/csv_annotations.go | 60 +++++++++++++++++++- pkg/csv2lp/csv_annotations_test.go | 90 ++++++++++++++++++++++++++++++ pkg/csv2lp/csv_table.go | 22 ++++++++ pkg/csv2lp/csv_table_test.go | 51 +++++++++++++++++ 4 files changed, 220 insertions(+), 3 deletions(-) diff --git a/pkg/csv2lp/csv_annotations.go b/pkg/csv2lp/csv_annotations.go index 8b6fa12f8d..81fb75ea84 100644 --- a/pkg/csv2lp/csv_annotations.go +++ b/pkg/csv2lp/csv_annotations.go @@ -2,6 +2,7 @@ package csv2lp import ( "fmt" + "log" "regexp" "strconv" "strings" @@ -33,9 +34,8 @@ func (a annotationComment) matches(comment string) bool { return strings.HasPrefix(strings.ToLower(comment), a.prefix) } -// constantSetupTable setups the supplied CSV table from #constant annotation -func constantSetupTable(table *CsvTable, row []string) error { - // adds a virtual column with contsant value to all data rows +func createConstantOrConcatColumn(table *CsvTable, row []string) CsvTableColumn { + // adds a virtual column with constant value to all data rows // supported types of constant annotation rows are: // 1. "#constant,datatype,label,defaultValue" // 2. "#constant,measurement,value" @@ -79,10 +79,60 @@ func constantSetupTable(table *CsvTable, row []string) error { } } // add a virtual column to the table + return col +} + +// constantSetupTable setups the supplied CSV table from #constant annotation +func constantSetupTable(table *CsvTable, row []string) error { + col := createConstantOrConcatColumn(table, row) + // add a virtual column to the table table.extraColumns = append(table.extraColumns, &col) return nil } +// computedReplacer is used to replace value in computed columns +var computedReplacer *regexp.Regexp = regexp.MustCompile(`\$\{[^}]+\}`) + +// concatSetupTable setups the supplied CSV table from #concat annotation +func concatSetupTable(table *CsvTable, row []string) error { + col := createConstantOrConcatColumn(table, row) + template := col.DefaultValue + col.ComputeValue = func(row []string) string { + return computedReplacer.ReplaceAllStringFunc(template, func(text string) string { + columnLabel := text[2 : len(text)-1] // ${columnLabel} + if columnLabel == "$" { + return "$" // ${$} is a way to print $, if it would require escaping + } + if placeholderColumn := table.Column(columnLabel); placeholderColumn != nil { + return placeholderColumn.Value(row) + } + log.Printf("WARNING: column %s: column '%s' cannot be replaced, no such column available", col.Label, columnLabel) + return "" + }) + } + // add a virtual column to the table + table.extraColumns = append(table.extraColumns, &col) + // add validator to report error when no placeholder column is not available + table.validators = append(table.validators, func(table *CsvTable) error { + placeholders := computedReplacer.FindAllString(template, len(template)) + for _, placeholder := range placeholders { + columnLabel := placeholder[2 : len(placeholder)-1] // ${columnLabel} + if columnLabel == "$" { + return nil // ${$} is a way to print $ + } + if placeholderColumn := table.Column(columnLabel); placeholderColumn == nil { + return CsvColumnError{ + Column: col.Label, + Err: fmt.Errorf("'%s' references an uknown column '%s', available columns are: %v", + template, columnLabel, strings.Join(table.ColumnLabels(), ",")), + } + } + } + return nil + }) + return nil +} + // supportedAnnotations contains all supported CSV annotations comments var supportedAnnotations = []annotationComment{ { @@ -131,6 +181,10 @@ var supportedAnnotations = []annotationComment{ return nil }, }, + { + prefix: "#concat", + setupTable: concatSetupTable, + }, } // ignoreLeadingComment returns a value without '#anyComment ' prefix diff --git a/pkg/csv2lp/csv_annotations_test.go b/pkg/csv2lp/csv_annotations_test.go index 0b4e9e1a29..6f47648816 100644 --- a/pkg/csv2lp/csv_annotations_test.go +++ b/pkg/csv2lp/csv_annotations_test.go @@ -1,7 +1,10 @@ package csv2lp import ( + "bytes" "fmt" + "log" + "os" "strconv" "strings" "testing" @@ -140,6 +143,93 @@ func Test_ConstantAnnotation(t *testing.T) { } } +// Test_ConcatAnnotation tests #concat annotation +func Test_ConcatAnnotation(t *testing.T) { + subject := annotation("#concat") + require.True(t, subject.matches("#Concat")) + require.True(t, subject.isTableAnnotation()) + var tests = []struct { + value []string + expectLabel string + expectValue string + expectLinePart int + }{ + // all possible specifications + {[]string{"#concat "}, "", "", 0}, // means literally nothing + {[]string{"#concat measurement", "a"}, "_", "a", linePartMeasurement}, + {[]string{"#concat measurement", "a", "b"}, "_", "b", linePartMeasurement}, + {[]string{"#concat measurement", "a", ""}, "_", "a", linePartMeasurement}, + {[]string{"#concat tag", "tgName", "tgValue"}, "tgName", "tgValue", linePartTag}, + {[]string{"#concat", "tag", "tgName", "tgValue"}, "tgName", "tgValue", linePartTag}, + {[]string{"#concat field", "fName", "fVal"}, "fName", "fVal", linePartField}, + {[]string{"#concat", "field", "fName", "fVal"}, "fName", "fVal", linePartField}, + {[]string{"dateTime", "1"}, "_", "1", linePartTime}, + {[]string{"dateTime", "1", "2"}, "_", "2", linePartTime}, + {[]string{"dateTime", "", "2"}, "_", "2", linePartTime}, + {[]string{"dateTime", "3", ""}, "_", "3", linePartTime}, + {[]string{"long", "fN", "fV"}, "fN", "fV", 0}, + // concat values + {[]string{"string", "fN", "${$}-${b}-${a}"}, "fN", "$-2-1", 0}, + } + exampleRow := []string{"1", "2"} + for i, test := range tests { + t.Run(strconv.Itoa(i), func(t *testing.T) { + table := &CsvTable{columns: []*CsvTableColumn{ + {Label: "a", Index: 0}, + {Label: "b", Index: 1}, + }} + subject.setupTable(table, test.value) + // validator + require.Equal(t, 1, len(table.validators)) + require.Equal(t, table.validators[0](table), nil) + // columns + require.Equal(t, 1, len(table.extraColumns)) + col := table.extraColumns[0] + require.Equal(t, test.expectLinePart, col.LinePart) + require.Greater(t, 0, col.Index) + if test.expectLabel != "_" { + require.Equal(t, test.expectLabel, col.Label) + } else { + require.NotEqual(t, "", col.Label) + } + require.Equal(t, test.expectValue, col.Value(exampleRow)) + }) + } + t.Run("concat template references unknown column", func(t *testing.T) { + var buf bytes.Buffer + log.SetOutput(&buf) + oldFlags := log.Flags() + log.SetFlags(0) + oldPrefix := log.Prefix() + prefix := "::PREFIX::" + log.SetPrefix(prefix) + defer func() { + log.SetOutput(os.Stderr) + log.SetFlags(oldFlags) + log.SetPrefix(oldPrefix) + }() + + table := &CsvTable{columns: []*CsvTableColumn{ + {Label: "x", Index: 0}, + }} + subject.setupTable(table, []string{"string", "fN", "a${y}-${x}z"}) + require.Equal(t, 1, len(table.validators)) + require.NotNil(t, table.validators[0](table)) + require.Equal(t, + "column 'fN': 'a${y}-${x}z' references an uknown column 'y', available columns are: x", + table.validators[0](table).Error()) + // columns + require.Equal(t, 1, len(table.extraColumns)) + col := table.extraColumns[0] + require.Greater(t, 0, col.Index) + require.Equal(t, "a-1z", col.Value(exampleRow)) + // a warning is printed to console + require.Equal(t, + "::PREFIX::WARNING: column fN: column 'y' cannot be replaced, no such column available", + strings.TrimSpace(buf.String())) + }) +} + // Test_TimeZoneAnnotation tests #timezone annotation func Test_TimeZoneAnnotation(t *testing.T) { subject := annotation("#timezone") diff --git a/pkg/csv2lp/csv_table.go b/pkg/csv2lp/csv_table.go index b98ecd5c1b..af720ed7cd 100644 --- a/pkg/csv2lp/csv_table.go +++ b/pkg/csv2lp/csv_table.go @@ -47,6 +47,8 @@ type CsvTableColumn struct { TimeZone *time.Location // ParseF is an optional function used to convert column's string value to interface{} ParseF func(value string) (interface{}, error) + // ComputeValue is an optional function used to compute column value out of row data + ComputeValue func(row []string) string // escapedLabel contains escaped label that can be directly used in line protocol escapedLabel string @@ -63,6 +65,9 @@ func (c *CsvTableColumn) LineLabel() string { // Value returns the value of the column for the supplied row func (c *CsvTableColumn) Value(row []string) string { if c.Index < 0 || c.Index >= len(row) { + if c.ComputeValue != nil { + return c.ComputeValue(row) + } return c.DefaultValue } val := row[c.Index] @@ -172,6 +177,8 @@ type CsvTable struct { ignoreDataTypeInColumnName bool // timeZone of dateTime column(s), applied when parsing dateTime value without a time zone specified timeZone *time.Location + // validators validate table structure right before processing data rows + validators []func(*CsvTable) error /* cached columns are initialized before reading the data rows using the computeLineProtocolColumns fn */ // cachedMeasurement is a required column that read (line protocol) measurement @@ -202,6 +209,7 @@ func (t *CsvTable) DataColumnsInfo() string { return "" } var builder = strings.Builder{} + t.computeLineProtocolColumns() // censure that ached columns are initialized builder.WriteString(fmt.Sprintf("CsvTable{ dataColumns: %d constantColumns: %d\n", len(t.columns), len(t.extraColumns))) builder.WriteString(fmt.Sprintf(" measurement: %+v\n", t.cachedMeasurement)) for _, col := range t.cachedTags { @@ -425,6 +433,11 @@ func (t *CsvTable) AppendLine(buffer []byte, row []string, lineNumber int) ([]by } } } + for _, v := range t.validators { + if err := v(t); err != nil { + return buffer, err + } + } } if t.cachedMeasurement == nil { @@ -527,6 +540,15 @@ func (t *CsvTable) Columns() []*CsvTableColumn { return t.columns } +// ColumnLabels returns available columns labels +func (t *CsvTable) ColumnLabels() []string { + labels := make([]string, len(t.columns)) + for i, col := range t.columns { + labels[i] = col.Label + } + return labels +} + // Measurement returns measurement column or nil func (t *CsvTable) Measurement() *CsvTableColumn { t.computeLineProtocolColumns() diff --git a/pkg/csv2lp/csv_table_test.go b/pkg/csv2lp/csv_table_test.go index f5e4e3fb1b..45585fecf2 100644 --- a/pkg/csv2lp/csv_table_test.go +++ b/pkg/csv2lp/csv_table_test.go @@ -104,6 +104,7 @@ func Test_CsvTable_FluxQueryResult(t *testing.T) { require.Equal(t, table.Tags()[0].Label, "cpu") require.Equal(t, table.Tags()[1].Label, "host") require.Equal(t, len(table.Fields()), 0) + require.Contains(t, table.ColumnLabels(), "_measurement") } } } @@ -332,6 +333,52 @@ func Test_ConstantAnnotations(t *testing.T) { } } +// Test_ConcatAnnotations tests processing of concat annotations +func Test_ConcatAnnotations(t *testing.T) { + var tests = []struct { + name string + csv string + line string + }{ + { + "measurement_1", + "#concat measurement,cpu\n" + + "a,b\n" + + "1,1", + "cpu a=1,b=1", + }, + { + "measurement_2", + "#concat,measurement,${a}${b}\n" + + "#constant,tag,cpu,cpu1\n" + + "#constant,long,of,0\n" + + "#constant,dateTime,,2\n" + + "a,b\n" + + "1,1", + "11,cpu=cpu1 a=1,b=1,of=0i 2", + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + rows := readCsv(t, test.csv) + table := CsvTable{} + var lines []string + for _, row := range rows { + rowProcessed := table.AddRow(row) + if rowProcessed { + line, err := table.CreateLine(row) + if err != nil && test.line != "" { + require.Nil(t, err.Error()) + } + lines = append(lines, line) + } + } + require.Equal(t, []string{test.line}, lines) + }) + } +} + // Test_DataTypeInColumnName tests specification of column data type in the header row func Test_DataTypeInColumnName(t *testing.T) { var tests = []struct { @@ -454,6 +501,10 @@ func Test_CsvTable_dataErrors(t *testing.T) { "error_no_measurement_data", "_measurement,col1\n,2", }, + { + "error_derived_column_missing reference", + "#concat string,d,${col1}${col2}\n_measurement,col1\nm,2", + }, } for _, test := range tests { From b21b4014e6ca1d4a0fa3313cae9e345cd7638f32 Mon Sep 17 00:00:00 2001 From: Pavel Zavora Date: Thu, 2 Jul 2020 08:47:03 +0200 Subject: [PATCH 15/34] feat(pkg/csv2lp): document concat annotation --- pkg/csv2lp/README.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/pkg/csv2lp/README.md b/pkg/csv2lp/README.md index e14ff4d85e..42a07940a5 100644 --- a/pkg/csv2lp/README.md +++ b/pkg/csv2lp/README.md @@ -139,6 +139,11 @@ Existing [data types](https://v2.docs.influxdata.com/v2.0/reference/syntax/annot - `#constant` annotation adds a constant column to the data, so you can set measurement, time, field or tag of every row you import - the format of a constant annotation row is `#constant,datatype,name,value`', it contains supported datatype, a column name, and a constant value - _column name_ can be omitted for _dateTime_ or _measurement_ columns, so the annotation can be simply `#constant,measurement,cpu` +- `#concat` annotation adds a new column that is concatenated from existing columns according to a template + - the format of a concat annotation row is `#concat,datatype,name,template`', it contains supported datatype, a column name, and a template value + - the `template` is a string with `${columnName}` placeholders, in which the placeholders are replaced by values of existing columns + - for example: `#concat,string,fullName,${firstName} ${lastName}` + - _column name_ can be omitted for _dateTime_ or _measurement_ columns - `#timezone` annotation specifies the time zone of the data using an offset, which is either `+hhmm` or `-hhmm` or `Local` to use the local/computer time zone. Examples: _#timezone,+0100_ _#timezone -0500_ _#timezone Local_ #### Data type with data format From fe2c7dfae0c74ed46db6d3ddfee728b9713b074a Mon Sep 17 00:00:00 2001 From: Pavel Zavora Date: Thu, 9 Jul 2020 06:13:00 +0200 Subject: [PATCH 16/34] chore(pkg/csv2lp): improve Test_CsvToLineProtocol_RowSkipped --- pkg/csv2lp/csv2lp_test.go | 54 +++++++++++++++++++++++++++++---------- 1 file changed, 41 insertions(+), 13 deletions(-) diff --git a/pkg/csv2lp/csv2lp_test.go b/pkg/csv2lp/csv2lp_test.go index dfbc960f0c..4f091efb51 100644 --- a/pkg/csv2lp/csv2lp_test.go +++ b/pkg/csv2lp/csv2lp_test.go @@ -204,38 +204,66 @@ func Test_CsvToLineProtocol_SkipRowOnError(t *testing.T) { require.Equal(t, messages, 2) } -// Test_CsvToLineProtocol_RowSkipped tests that error rows are reported to configured RowSkippedListener +// Test_CsvToLineProtocol_RowSkipped tests that error rows are reported to configured RowSkipped listener func Test_CsvToLineProtocol_RowSkipped(t *testing.T) { var buf bytes.Buffer log.SetOutput(&buf) oldFlags := log.Flags() log.SetFlags(0) - oldPrefix := log.Prefix() - prefix := "::PREFIX::" - log.SetPrefix(prefix) defer func() { log.SetOutput(os.Stderr) log.SetFlags(oldFlags) - log.SetPrefix(oldPrefix) }() + type ActualArguments = struct { + src *CsvToLineReader + err error + row []string + } + type ExpectedArguments = struct { + errorString string + row []string + } + csv := "sep=;\n_measurement;a|long:strict\n;1\ncpu;2.1\ncpu;3a\n" + calledArgs := []ActualArguments{} + expectedArgs := []ExpectedArguments{ + { + "line 3: column '_measurement': no measurement supplied", + []string{"", "1"}, + }, + { + "line 4: column 'a': '2.1' cannot fit into long data type", + []string{"cpu", "2.1"}, + }, + { + "line 5: column 'a': strconv.ParseInt:", + []string{"cpu", "3a"}, + }, + } reader := CsvToLineProtocol(strings.NewReader(csv)).SkipRowOnError(true) reader.RowSkipped = func(src *CsvToLineReader, err error, _row []string) { - log.Println(err, string(src.Comma())) + // make a copy of _row + row := make([]string, len(_row)) + copy(row, _row) + // remember for comparison + calledArgs = append(calledArgs, ActualArguments{ + src, err, row, + }) } // read all the data ioutil.ReadAll(reader) out := buf.String() - // fmt.Println(out, string(';')) - // ::PREFIX::line 3: column '_measurement': no measurement supplied - // ::PREFIX::line 4: column 'a': '2.1' cannot fit into long data type - // ::PREFIX::line 5: column 'a': strconv.ParseInt: parsing "3a": invalid syntax - messages := strings.Count(out, prefix) - require.Equal(t, 3, messages) - require.Equal(t, 3, strings.Count(out, ";")) + require.Empty(t, out, "No log messages expected because RowSkipped handler is set") + + require.Len(t, calledArgs, 3) + for i, expected := range expectedArgs { + require.Equal(t, reader, calledArgs[i].src) + require.Contains(t, calledArgs[i].err.Error(), expected.errorString) + require.Equal(t, expected.row, calledArgs[i].row) + } } // Test_CsvLineError tests CsvLineError error format From 7fc590fb6ffcd8fdebde8f616211d75418a94467 Mon Sep 17 00:00:00 2001 From: Pavel Zavora Date: Fri, 31 Jul 2020 12:43:28 +0200 Subject: [PATCH 17/34] chore: doc only --- pkg/csv2lp/csv_annotations.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/csv2lp/csv_annotations.go b/pkg/csv2lp/csv_annotations.go index 81fb75ea84..eab27a1c7f 100644 --- a/pkg/csv2lp/csv_annotations.go +++ b/pkg/csv2lp/csv_annotations.go @@ -112,7 +112,7 @@ func concatSetupTable(table *CsvTable, row []string) error { } // add a virtual column to the table table.extraColumns = append(table.extraColumns, &col) - // add validator to report error when no placeholder column is not available + // add validator to report error when no placeholder column is available table.validators = append(table.validators, func(table *CsvTable) error { placeholders := computedReplacer.FindAllString(template, len(template)) for _, placeholder := range placeholders { From 60c4984f518bf39080ca5bd7c15b0772623be89b Mon Sep 17 00:00:00 2001 From: Pavel Zavora Date: Sat, 12 Sep 2020 11:11:19 +0200 Subject: [PATCH 18/34] chore: apply review comments --- pkg/csv2lp/csv_annotations.go | 17 +++++++---------- pkg/csv2lp/csv_annotations_test.go | 2 +- pkg/csv2lp/data_conversion.go | 2 +- 3 files changed, 9 insertions(+), 12 deletions(-) diff --git a/pkg/csv2lp/csv_annotations.go b/pkg/csv2lp/csv_annotations.go index eab27a1c7f..7416842748 100644 --- a/pkg/csv2lp/csv_annotations.go +++ b/pkg/csv2lp/csv_annotations.go @@ -34,7 +34,7 @@ func (a annotationComment) matches(comment string) bool { return strings.HasPrefix(strings.ToLower(comment), a.prefix) } -func createConstantOrConcatColumn(table *CsvTable, row []string) CsvTableColumn { +func createConstantOrConcatColumn(table *CsvTable, row []string, annotationName string) CsvTableColumn { // adds a virtual column with constant value to all data rows // supported types of constant annotation rows are: // 1. "#constant,datatype,label,defaultValue" @@ -72,10 +72,10 @@ func createConstantOrConcatColumn(table *CsvTable, row []string) CsvTableColumn if col.DefaultValue == "" && col.Label != "" { // type 2,3,5,6 col.DefaultValue = col.Label - col.Label = "#constant " + col.DataType + col.Label = annotationName + " " + col.DataType } else if col.Label == "" { - // setup a label if no label is supplied fo focused error messages - col.Label = "#constant " + col.DataType + // setup a label if no label is supplied for focused error messages + col.Label = annotationName + " " + col.DataType } } // add a virtual column to the table @@ -84,7 +84,7 @@ func createConstantOrConcatColumn(table *CsvTable, row []string) CsvTableColumn // constantSetupTable setups the supplied CSV table from #constant annotation func constantSetupTable(table *CsvTable, row []string) error { - col := createConstantOrConcatColumn(table, row) + col := createConstantOrConcatColumn(table, row, "#constant") // add a virtual column to the table table.extraColumns = append(table.extraColumns, &col) return nil @@ -95,14 +95,11 @@ var computedReplacer *regexp.Regexp = regexp.MustCompile(`\$\{[^}]+\}`) // concatSetupTable setups the supplied CSV table from #concat annotation func concatSetupTable(table *CsvTable, row []string) error { - col := createConstantOrConcatColumn(table, row) + col := createConstantOrConcatColumn(table, row, "#concat") template := col.DefaultValue col.ComputeValue = func(row []string) string { return computedReplacer.ReplaceAllStringFunc(template, func(text string) string { columnLabel := text[2 : len(text)-1] // ${columnLabel} - if columnLabel == "$" { - return "$" // ${$} is a way to print $, if it would require escaping - } if placeholderColumn := table.Column(columnLabel); placeholderColumn != nil { return placeholderColumn.Value(row) } @@ -118,7 +115,7 @@ func concatSetupTable(table *CsvTable, row []string) error { for _, placeholder := range placeholders { columnLabel := placeholder[2 : len(placeholder)-1] // ${columnLabel} if columnLabel == "$" { - return nil // ${$} is a way to print $ + return nil } if placeholderColumn := table.Column(columnLabel); placeholderColumn == nil { return CsvColumnError{ diff --git a/pkg/csv2lp/csv_annotations_test.go b/pkg/csv2lp/csv_annotations_test.go index 6f47648816..2313f856ba 100644 --- a/pkg/csv2lp/csv_annotations_test.go +++ b/pkg/csv2lp/csv_annotations_test.go @@ -169,7 +169,7 @@ func Test_ConcatAnnotation(t *testing.T) { {[]string{"dateTime", "3", ""}, "_", "3", linePartTime}, {[]string{"long", "fN", "fV"}, "fN", "fV", 0}, // concat values - {[]string{"string", "fN", "${$}-${b}-${a}"}, "fN", "$-2-1", 0}, + {[]string{"string", "fN", "$-${b}-${a}"}, "fN", "$-2-1", 0}, } exampleRow := []string{"1", "2"} for i, test := range tests { diff --git a/pkg/csv2lp/data_conversion.go b/pkg/csv2lp/data_conversion.go index c28a4dcb50..12a036afb3 100644 --- a/pkg/csv2lp/data_conversion.go +++ b/pkg/csv2lp/data_conversion.go @@ -93,7 +93,7 @@ func escapeString(val string) string { // // For example, to get a strconv-parseable float from a Spanish value '3.494.826.157,123', use format ",." . func normalizeNumberString(value string, format string, removeFraction bool) (normalized string, truncated bool) { - if format == "" { + if len(format) == 0 { format = ". \n\t\r_" } if strings.ContainsAny(value, format) { From 78fe5c61f9b5c357d5ac2b63a94bc21d95db8126 Mon Sep 17 00:00:00 2001 From: Pavel Zavora Date: Sat, 12 Sep 2020 11:15:58 +0200 Subject: [PATCH 19/34] chore: update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8ae7c66e08..c75d36ffca 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -25,6 +25,7 @@ need to update any InfluxDB CLI config profiles with the new port number. ### Features +1. [18779](https://github.com/influxdata/influxdb/pull/18779): Add new processing options and enhancements to influx write. 1. [19246](https://github.com/influxdata/influxdb/pull/19246): Redesign load data page to increase discovery and ease of use 1. [19334](https://github.com/influxdata/influxdb/pull/19334): Add --active-config flag to influx to set config for single command 1. [19219](https://github.com/influxdata/influxdb/pull/19219): List buckets via the API now supports after (ID) parameter as an alternative to offset. From 2c25044a8cd52938315c3e13f50487c233bb41b0 Mon Sep 17 00:00:00 2001 From: Pavel Zavora Date: Sat, 12 Sep 2020 11:29:05 +0200 Subject: [PATCH 20/34] chore: apply review comments --- pkg/csv2lp/csv_annotations.go | 3 --- 1 file changed, 3 deletions(-) diff --git a/pkg/csv2lp/csv_annotations.go b/pkg/csv2lp/csv_annotations.go index 7416842748..43ae923f6d 100644 --- a/pkg/csv2lp/csv_annotations.go +++ b/pkg/csv2lp/csv_annotations.go @@ -114,9 +114,6 @@ func concatSetupTable(table *CsvTable, row []string) error { placeholders := computedReplacer.FindAllString(template, len(template)) for _, placeholder := range placeholders { columnLabel := placeholder[2 : len(placeholder)-1] // ${columnLabel} - if columnLabel == "$" { - return nil - } if placeholderColumn := table.Column(columnLabel); placeholderColumn == nil { return CsvColumnError{ Column: col.Label, From 13a801b830539fc1addf793b71ba41ee3bc76aa7 Mon Sep 17 00:00:00 2001 From: Pavel Zavora Date: Mon, 31 Aug 2020 08:11:26 +0200 Subject: [PATCH 21/34] fix(pkg/csv2lp): do not override existing line part in group annotation #19452 --- pkg/csv2lp/csv2lp_test.go | 66 +++++++++++++++++++++++++++++++++-- pkg/csv2lp/csv_annotations.go | 5 ++- pkg/csv2lp/csv_table.go | 2 +- 3 files changed, 69 insertions(+), 4 deletions(-) diff --git a/pkg/csv2lp/csv2lp_test.go b/pkg/csv2lp/csv2lp_test.go index 01d5885367..084b5731b9 100644 --- a/pkg/csv2lp/csv2lp_test.go +++ b/pkg/csv2lp/csv2lp_test.go @@ -14,8 +14,8 @@ import ( "github.com/stretchr/testify/require" ) -// Test_CsvToLineProtocol tests conversion of annotated CSV data to line protocol data -func Test_CsvToLineProtocol(t *testing.T) { +// Test_CsvToLineProtocol_variousBufferSize tests conversion of annotated CSV data to line protocol data on various buffer sizes +func Test_CsvToLineProtocol_variousBufferSize(t *testing.T) { var tests = []struct { name string csv string @@ -117,6 +117,68 @@ func Test_CsvToLineProtocol(t *testing.T) { } } +// Test_CsvToLineProtocol_samples tests conversion of annotated CSV data to line protocol data +func Test_CsvToLineProtocol_samples(t *testing.T) { + var tests = []struct { + name string + csv string + lines string + err string + }{ + { + "queryResult_19452", // https://github.com/influxdata/influxdb/issues/19452 + "#datatype,string,long,dateTime:RFC3339,dateTime:RFC3339,dateTime:RFC3339,long,string,string,string\n" + + "#group,false,false,true,true,false,false,true,true,true\n" + + "#default,_result,,,,,,,,\n" + + ",result,table,_start,_stop,_time,_value,_field,_measurement,host\n" + + ",,0,2020-08-26T22:59:23.598653Z,2020-08-26T23:00:23.598653Z,2020-08-26T22:59:30Z,15075651584,active,mem,ip-192-168-86-25.ec2.internal\n", + "mem,host=ip-192-168-86-25.ec2.internal active=15075651584i 1598482770000000000\n", + "", // no error + }, + { + "queryResult_19452_group_first", // issue 19452, but with group annotation first + "#group,false,false,true,true,false,false,true,true,true\n" + + "#datatype,string,long,dateTime:RFC3339,dateTime:RFC3339,dateTime:RFC3339,long,string,string,string\n" + + "#default,_result,,,,,,,,\n" + + ",result,table,_start,_stop,_time,_value,_field,_measurement,host\n" + + ",,0,2020-08-26T22:59:23.598653Z,2020-08-26T23:00:23.598653Z,2020-08-26T22:59:30Z,15075651584,active,mem,ip-192-168-86-25.ec2.internal\n", + "mem,host=ip-192-168-86-25.ec2.internal active=15075651584i 1598482770000000000\n", + "", // no error + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + reader := CsvToLineProtocol(strings.NewReader(test.csv)) + buffer := make([]byte, 100) + lines := make([]byte, 0, 100) + for { + n, err := reader.Read(buffer) + if err != nil { + if err == io.EOF { + break + } + if test.err != "" { + // fmt.Println(err) + if err := err.Error(); !strings.Contains(err, test.err) { + require.Equal(t, err, test.err) + } + return + } + require.Nil(t, err.Error()) + break + } + lines = append(lines, buffer[:n]...) + } + if test.err == "" { + require.Equal(t, test.lines, string(lines)) + } else { + require.Fail(t, "error message with '"+test.err+"' expected") + } + }) + } +} + // Test_CsvToLineProtocol_LogTableColumns checks correct logging of table columns func Test_CsvToLineProtocol_LogTableColumns(t *testing.T) { var buf bytes.Buffer diff --git a/pkg/csv2lp/csv_annotations.go b/pkg/csv2lp/csv_annotations.go index 8b6fa12f8d..5d9a2dff0a 100644 --- a/pkg/csv2lp/csv_annotations.go +++ b/pkg/csv2lp/csv_annotations.go @@ -91,7 +91,10 @@ var supportedAnnotations = []annotationComment{ setupColumn: func(column *CsvTableColumn, value string) { // standard flux query result annotation if strings.HasSuffix(value, "true") { - column.LinePart = linePartTag + // setup column's line part unless it is already set (#19452) + if column.LinePart == 0 { + column.LinePart = linePartTag + } } }, }, diff --git a/pkg/csv2lp/csv_table.go b/pkg/csv2lp/csv_table.go index 9c98fd2574..6f6b454a97 100644 --- a/pkg/csv2lp/csv_table.go +++ b/pkg/csv2lp/csv_table.go @@ -232,7 +232,7 @@ func (t *CsvTable) AddRow(row []string) bool { // detect data row or table header row if len(row[0]) == 0 || row[0][0] != '#' { if !t.readTableData { - // row must a header row now + // expect a header row t.lpColumnsValid = false // line protocol columns change if t.partBits == 0 { // create columns since no column anotations were processed From 1629655a5556d01296b1573e1db3bc69255ee594 Mon Sep 17 00:00:00 2001 From: Pavel Zavora Date: Sat, 12 Sep 2020 11:43:50 +0200 Subject: [PATCH 22/34] chore: update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8ae7c66e08..2bbae3f27e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -39,6 +39,7 @@ need to update any InfluxDB CLI config profiles with the new port number. 1. [19331](https://github.com/influxdata/influxdb/pull/19331): Add description to auth influx command outputs. 1. [19392](https://github.com/influxdata/influxdb/pull/19392): Include the edge of the boundary we are observing. 1. [19453](https://github.com/influxdata/influxdb/pull/19453): Warn about duplicate tag names during influx write csv. +1. [19466](https://github.com/influxdata/influxdb/pull/19466): Do not override existing line part in group annotation. ## v2.0.0-beta.16 [2020-08-07] From 7c76efd6ea6add9246dd4128a1a934fda1015cc5 Mon Sep 17 00:00:00 2001 From: Russ Savage Date: Mon, 14 Sep 2020 10:18:29 -0700 Subject: [PATCH 23/34] fix(cli): update annotation order to match UI (#19503) --- cmd/influx/query.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/influx/query.go b/cmd/influx/query.go index 6e3b6f4bdd..79851ab4bb 100644 --- a/cmd/influx/query.go +++ b/cmd/influx/query.go @@ -105,7 +105,7 @@ func fluxQueryF(cmd *cobra.Command, args []string) error { "query": q, "type": "flux", "dialect": map[string]interface{}{ - "annotations": []string{"datatype", "group", "default"}, + "annotations": []string{"group", "datatype", "default"}, "delimiter": ",", "header": true, }, From ca2055c16c4d69f7155413aebb6d694f65bfaca6 Mon Sep 17 00:00:00 2001 From: Ayan George Date: Wed, 16 Sep 2020 12:20:09 -0400 Subject: [PATCH 24/34] refactor: Replace ctx.Done() with ctx.Err() (#19546) * refactor: Replace ctx.Done() with ctx.Err() Prior to this commit we checked for context cancellation with a select block and context.Context.Done() without multiplexing over any other channel like: select { case <-ctx.Done(): // handle cancellation default: // fallthrough } This commit replaces those type of blocks with a simple check of ctx.Err(). This has the following benefits: * Calling ctx.Err() is much faster than entering a select block. * ctx.Done() allocates a channel when called for the first time. * Testing the result of ctx.Err() is a reliable way of determininging if a context.Context value has been canceled. * fix: Fix data race in execDeleteTagValueEntry() --- influxql/query/executor.go | 10 +--------- kv/store.go | 6 ++---- telemetry/push.go | 8 ++++---- tsdb/index/tsi1/log_file.go | 3 +++ v1/coordinator/statement_executor.go | 11 +++-------- 5 files changed, 13 insertions(+), 25 deletions(-) diff --git a/influxql/query/executor.go b/influxql/query/executor.go index 55f7bf6e24..83e6d36369 100644 --- a/influxql/query/executor.go +++ b/influxql/query/executor.go @@ -311,15 +311,7 @@ LOOP: e.Metrics.Requests.WithLabelValues(statusLabel).Inc() // Check if the query was interrupted during an uninterruptible statement. - interrupted := false - select { - case <-ctx.Done(): - interrupted = true - default: - // Query has not been interrupted. - } - - if interrupted { + if err := ctx.Err(); err != nil { statusLabel = control.LabelInterruptedErr e.Metrics.Requests.WithLabelValues(statusLabel).Inc() break diff --git a/kv/store.go b/kv/store.go index 32e4544292..32792e2bd4 100644 --- a/kv/store.go +++ b/kv/store.go @@ -249,10 +249,8 @@ func WalkCursor(ctx context.Context, cursor ForwardCursor, visit VisitFunc) (err return err } - select { - case <-ctx.Done(): - return ctx.Err() - default: + if err := ctx.Err(); err != nil { + return err } } diff --git a/telemetry/push.go b/telemetry/push.go index 1c89dca09f..71873063ff 100644 --- a/telemetry/push.go +++ b/telemetry/push.go @@ -78,10 +78,10 @@ func (p *Pusher) push(ctx context.Context) error { req.Header.Set("Content-Type", string(p.PushFormat)) res, err := p.Client.Do(req) - select { - case <-ctx.Done(): - return ctx.Err() - default: + + // FIXME: consider why we're checking for cancellation here. + if err := ctx.Err(); err != nil { + return err } if err != nil { diff --git a/tsdb/index/tsi1/log_file.go b/tsdb/index/tsi1/log_file.go index a9f32e0963..acf75457eb 100644 --- a/tsdb/index/tsi1/log_file.go +++ b/tsdb/index/tsi1/log_file.go @@ -108,6 +108,8 @@ func (f *LogFile) bytes() int { // Open reads the log from a file and validates all the checksums. func (f *LogFile) Open() error { + f.mu.Lock() + defer f.mu.Unlock() if err := f.open(); err != nil { f.Close() return err @@ -717,6 +719,7 @@ func (f *LogFile) execSeriesEntry(e *LogEntry) { } ts.tagValues[string(v)] = tv + mm.tagSet[string(k)] = ts } diff --git a/v1/coordinator/statement_executor.go b/v1/coordinator/statement_executor.go index 2f6ddd5da7..fdaebb606a 100644 --- a/v1/coordinator/statement_executor.go +++ b/v1/coordinator/statement_executor.go @@ -203,11 +203,8 @@ func (e *StatementExecutor) executeExplainAnalyzeStatement(ctx context.Context, goto CLEANUP } else if row == nil { // Check if the query was interrupted while emitting. - select { - case <-ctx.Done(): - err = ctx.Err() + if err = ctx.Err(); err != nil { goto CLEANUP - default: } break } @@ -266,10 +263,8 @@ func (e *StatementExecutor) executeSelectStatement(ctx context.Context, stmt *in return err } else if row == nil { // Check if the query was interrupted while emitting. - select { - case <-ctx.Done(): - return ctx.Err() - default: + if err := ctx.Err(); err != nil { + return err } break } From 14c664daa45d6bba494ba7d58e8c4786e890b4cd Mon Sep 17 00:00:00 2001 From: Stuart Carnie Date: Wed, 16 Sep 2020 11:32:59 -0700 Subject: [PATCH 25/34] chore: Format of url.Error message has changed Provide a function to generate the error programmatically, which is resilient to changes in Go versions. --- notification/endpoint/endpoint_test.go | 31 ++++++++++++++++++++------ 1 file changed, 24 insertions(+), 7 deletions(-) diff --git a/notification/endpoint/endpoint_test.go b/notification/endpoint/endpoint_test.go index 278d398676..db16e9130f 100644 --- a/notification/endpoint/endpoint_test.go +++ b/notification/endpoint/endpoint_test.go @@ -2,12 +2,15 @@ package endpoint_test import ( "encoding/json" + "fmt" "net/http" + "net/url" "testing" "time" "github.com/google/go-cmp/cmp" "github.com/influxdata/influxdb/v2" + "github.com/influxdata/influxdb/v2/kit/errors" "github.com/influxdata/influxdb/v2/mock" "github.com/influxdata/influxdb/v2/notification/endpoint" influxTesting "github.com/influxdata/influxdb/v2/testing" @@ -28,9 +31,10 @@ var goodBase = endpoint.Base{ func TestValidEndpoint(t *testing.T) { cases := []struct { - name string - src influxdb.NotificationEndpoint - err error + name string + src influxdb.NotificationEndpoint + err error + errFn func(*testing.T) error }{ { name: "invalid endpoint id", @@ -102,9 +106,16 @@ func TestValidEndpoint(t *testing.T) { Base: goodBase, URL: "posts://er:{DEf1=ghi@:5432/db?ssl", }, - err: &influxdb.Error{ - Code: influxdb.EInvalid, - Msg: "slack endpoint URL is invalid: parse posts://er:{DEf1=ghi@:5432/db?ssl: net/url: invalid userinfo", + errFn: func(t *testing.T) error { + err := url.Error{ + Op: "parse", + URL: "posts://er:{DEf1=ghi@:5432/db?ssl", + Err: errors.New("net/url: invalid userinfo"), + } + return &influxdb.Error{ + Code: influxdb.EInvalid, + Msg: fmt.Sprintf("slack endpoint URL is invalid: %s", err.Error()), + } }, }, { @@ -186,7 +197,13 @@ func TestValidEndpoint(t *testing.T) { for _, c := range cases { t.Run(c.name, func(t *testing.T) { got := c.src.Valid() - influxTesting.ErrorsEqual(t, got, c.err) + var exp error + if c.errFn != nil { + exp = c.errFn(t) + } else { + exp = c.err + } + influxTesting.ErrorsEqual(t, got, exp) }) } } From 8753a7fd08e4e1069756026b6b25bb3671352a29 Mon Sep 17 00:00:00 2001 From: Stuart Carnie Date: Wed, 16 Sep 2020 11:33:39 -0700 Subject: [PATCH 26/34] chore: Fix invalid string casts from integers Newer Go versions generate a compile time error --- authorization/http_server_test.go | 2 +- gather/scraper_test.go | 2 +- tsdb/index/inmem/meta_test.go | 6 +++--- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/authorization/http_server_test.go b/authorization/http_server_test.go index 9301e30bac..83c0ab0cce 100644 --- a/authorization/http_server_test.go +++ b/authorization/http_server_test.go @@ -191,7 +191,7 @@ func TestService_handlePostAuthorization(t *testing.T) { httprouter.Params{ { Key: "userID", - Value: string(tt.args.session.UserID), + Value: fmt.Sprintf("%d", tt.args.session.UserID), }, })) diff --git a/gather/scraper_test.go b/gather/scraper_test.go index 10c21633d4..566cbb0a0f 100644 --- a/gather/scraper_test.go +++ b/gather/scraper_test.go @@ -266,7 +266,7 @@ func (s *mockStorage) UpdateTarget(ctx context.Context, update *influxdb.Scraper defer s.Unlock() for k, v := range s.Targets { - if v.ID.String() == string(update.ID) { + if v.ID.String() == update.ID.String() { s.Targets[k] = *update break } diff --git a/tsdb/index/inmem/meta_test.go b/tsdb/index/inmem/meta_test.go index 3704974df9..be8f3d1ca3 100644 --- a/tsdb/index/inmem/meta_test.go +++ b/tsdb/index/inmem/meta_test.go @@ -175,11 +175,11 @@ func TestTagKeyValue_Concurrent(t *testing.T) { case 1: v.Cardinality() case 2: - v.Contains(string(rand.Intn(52) + 65)) + v.Contains(fmt.Sprintf("%d", rand.Intn(52)+65)) case 3: - v.InsertSeriesIDByte([]byte(string(rand.Intn(52)+65)), rand.Uint64()%1000) + v.InsertSeriesIDByte([]byte(fmt.Sprintf("%d", rand.Intn(52)+65)), rand.Uint64()%1000) case 4: - v.Load(string(rand.Intn(52) + 65)) + v.Load(fmt.Sprintf("%d", rand.Intn(52)+65)) case 5: v.Range(func(tagValue string, a seriesIDs) bool { return rand.Intn(10) == 0 From af0c328095016d4641beeb78b4cc56007f697c03 Mon Sep 17 00:00:00 2001 From: Gershon Shif Date: Wed, 16 Sep 2020 12:36:44 -0700 Subject: [PATCH 27/34] chore(packaging): build rpm and deb packages (#19567) - Update CIrcleCI configuration to start release process on an RC build - Update .goreleaser.yml: - Start building armel and armhf binaries and rpm and debian packages. - Generate sha256 checksum file. - launcher.go: do not use `max` module to escape integeroverflow problem for armel and armhf builds - Start using `v0.142.0` of goreleaser - Added pre and post install/uninstall scripts for rpm amd deb packages --- .circleci/config.yml | 12 +- .goreleaser.yml | 48 ++++++- Makefile | 2 +- cmd/influxd/launcher/launcher.go | 6 +- scripts/influxdb.service | 19 +++ scripts/init.sh | 223 +++++++++++++++++++++++++++++++ scripts/logrotate | 8 ++ scripts/post-install.sh | 74 ++++++++++ scripts/post-uninstall.sh | 58 ++++++++ scripts/pre-install.sh | 37 +++++ 10 files changed, 476 insertions(+), 11 deletions(-) create mode 100644 scripts/influxdb.service create mode 100644 scripts/init.sh create mode 100644 scripts/logrotate create mode 100644 scripts/post-install.sh create mode 100644 scripts/post-uninstall.sh create mode 100644 scripts/pre-install.sh diff --git a/.circleci/config.yml b/.circleci/config.yml index 8a0fd032b6..2fd6dd5a84 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -553,31 +553,31 @@ workflows: branches: ignore: /.*/ tags: - only: /^v[0-9]+\.[0-9]+\.[0-9]+-(alpha|beta)\.[0-9]+$/ + only: /^v[0-9]+\.[0-9]+\.[0-9]+-(rc|alpha|beta)\.[0-9]+$/ - golint: filters: branches: ignore: /.*/ tags: - only: /^v[0-9]+\.[0-9]+\.[0-9]+-(alpha|beta)\.[0-9]+$/ + only: /^v[0-9]+\.[0-9]+\.[0-9]+-(rc|alpha|beta)\.[0-9]+$/ - lint-feature-flags: filters: branches: ignore: /.*/ tags: - only: /^v[0-9]+\.[0-9]+\.[0-9]+-(alpha|beta)\.[0-9]+$/ + only: /^v[0-9]+\.[0-9]+\.[0-9]+-(rc|alpha|beta)\.[0-9]+$/ - jstest: filters: branches: ignore: /.*/ tags: - only: /^v[0-9]+\.[0-9]+\.[0-9]+-(alpha|beta)\.[0-9]+$/ + only: /^v[0-9]+\.[0-9]+\.[0-9]+-(rc|alpha|beta)\.[0-9]+$/ - jslint: filters: branches: ignore: /.*/ tags: - only: /^v[0-9]+\.[0-9]+\.[0-9]+-(alpha|beta)\.[0-9]+$/ + only: /^v[0-9]+\.[0-9]+\.[0-9]+-(rc|alpha|beta)\.[0-9]+$/ - release: requires: - gotest @@ -589,4 +589,4 @@ workflows: branches: ignore: /.*/ tags: - only: /^v[0-9]+\.[0-9]+\.[0-9]+-(alpha|beta)\.[0-9]+$/ + only: /^v[0-9]+\.[0-9]+\.[0-9]+-(rc|alpha|beta)\.[0-9]+$/ diff --git a/.goreleaser.yml b/.goreleaser.yml index 9d7880e9e0..a2419dea32 100644 --- a/.goreleaser.yml +++ b/.goreleaser.yml @@ -7,6 +7,11 @@ builds: goarch: - amd64 - arm64 + - arm + goarm: + - 5 + - 6 + main: ./cmd/influx/ flags: - -tags={{if and (eq .Os "linux") (eq .Arch "amd64")}}osusergo,netgo,static_build{{end}} @@ -19,6 +24,7 @@ builds: ldflags: - -s -w -X main.version={{.Version}} -X main.commit={{.ShortCommit}} -X main.date={{.Date}} {{if and (eq .Os "linux") (eq .Arch "amd64")}}-extldflags "-fno-PIC -static -Wl,-z,stack-size=8388608"{{end}} binary: influx + - id: influxd goos: - linux @@ -26,6 +32,11 @@ builds: goarch: - amd64 - arm64 + - arm + goarm: + - 5 + - 6 + main: ./cmd/influxd/ flags: - -tags=assets{{if and (eq .Os "linux") (eq .Arch "amd64")}},osusergo,netgo,static_build{{end}} @@ -41,6 +52,35 @@ builds: hooks: pre: make generate +nfpms: + - + id: "influxdata" + builds: ["influx", "influxd"] + formats: + - deb + - rpm + bindir: /usr/bin + files: + "scripts/init.sh": "/usr/lib/influxdb/scripts/init.sh" + "scripts/influxdb.service": "/usr/lib/influxdb/scripts/influxdb.service" + "scripts/logrotate": "/etc/logrotate.d/influxdb" + scripts: + preinstall: "scripts/pre-install.sh" + postinstall: "scripts/post-install.sh" + postremove: "scripts/post-uninstall.sh" + overrides: + rpm: + replacements: + amd64: x86_64 + file_name_template: "influxdb-{{ .Version }}.{{ .Arch }}{{if .Arm}}{{ if eq .Arm \"5\" }}el{{end}}{{ end }}{{if .Arm}}{{ if eq .Arm \"6\" }}hf{{end}}{{ end }}" + deb: + file_name_template: "influxdb_{{ .Version }}_{{ .Arch }}{{if .Arm}}{{ if eq .Arm \"5\" }}el{{end}}{{ end }}{{if .Arm}}{{ if eq .Arm \"6\" }}hf{{end}}{{ end }}" + vendor: InfluxData + homepage: https://influxdata.com + maintainer: support@influxdb.com + description: Distributed time-series database. + license: Proprietary + archives: - id: influxdb_client builds: ["influx"] @@ -49,7 +89,7 @@ archives: format_overrides: - goos: windows format: zip - name_template: "influxdb_client_{{ .Version }}_{{ .Os }}_{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}" + name_template: "influxdb_client_{{ .Version }}_{{ .Os }}_{{ .Arch }}{{if .Arm}}{{ if eq .Arm \"5\" }}el{{end}}{{ end }}{{if .Arm}}{{ if eq .Arm \"6\" }}hf{{end}}{{ end }}" files: - LICENSE - README.md @@ -59,7 +99,7 @@ archives: format_overrides: - goos: windows format: zip - name_template: "influxdb_{{ .Version }}_{{ .Os }}_{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}" + name_template: "influxdb-{{ .Version }}_{{ .Os }}_{{ .Arch }}{{if .Arm}}{{ if eq .Arm \"5\" }}el{{end}}{{ end }}{{if .Arm}}{{ if eq .Arm \"6\" }}hf{{end}}{{ end }}" files: - LICENSE - README.md @@ -70,6 +110,10 @@ blobs: region: "us-east-1" folder: "influxdb/releases/" +checksum: + name_template: "influxdb_{{ .Version }}.sha256" + algorithm: sha256 + dockers: - goos: linux goarch: amd64 diff --git a/Makefile b/Makefile index af422bca12..0910f1e81f 100644 --- a/Makefile +++ b/Makefile @@ -162,7 +162,7 @@ build: all goreleaser: curl -sfL -o goreleaser-install https://install.goreleaser.com/github.com/goreleaser/goreleaser.sh - sh goreleaser-install v0.135.0 + sh goreleaser-install v0.142.0 go build -o $(GOPATH)/bin/pkg-config github.com/influxdata/pkg-config install xcc.sh $(GOPATH)/bin/xcc diff --git a/cmd/influxd/launcher/launcher.go b/cmd/influxd/launcher/launcher.go index 5559e9497c..5bfaff7205 100644 --- a/cmd/influxd/launcher/launcher.go +++ b/cmd/influxd/launcher/launcher.go @@ -6,12 +6,12 @@ import ( "errors" "fmt" "io" - "math" "net" nethttp "net/http" _ "net/http/pprof" // needed to add pprof to our binary. "os" "path/filepath" + "strconv" "sync" "time" @@ -91,6 +91,8 @@ const ( LogTracing = "log" // JaegerTracing enables tracing via the Jaeger client library JaegerTracing = "jaeger" + // Max Integer + MaxInt = 1</dev/null; then + # Process is already up + log_success_msg "$NAME process is already running" + return 0 + fi + else + su -s /bin/sh -c "touch $PIDFILE" $USER &>/dev/null + if [ $? -ne 0 ]; then + log_failure_msg "$PIDFILE not writable, check permissions" + exit 5 + fi + fi + + # Bump the file limits, before launching the daemon. These will + # carry over to launched processes. + ulimit -n $OPEN_FILE_LIMIT + if [ $? -ne 0 ]; then + log_failure_msg "Unable to set ulimit to $OPEN_FILE_LIMIT" + exit 1 + fi + + # Launch process + echo "Starting $NAME..." + if command -v start-stop-daemon &>/dev/null; then + start-stop-daemon \ + --chuid $USER:$GROUP \ + --start \ + --quiet \ + --pidfile $PIDFILE \ + --exec $DAEMON \ + -- \ + $INFLUXD_OPTS >>$STDOUT 2>>$STDERR & + else + local CMD="$DAEMON $INFLUXD_OPTS >>$STDOUT 2>>$STDERR &" + su -s /bin/sh -c "$CMD" $USER + fi + + # Sleep to verify process is still up + sleep 1 + echo $(pgrep -u $USER -f influxd) > $PIDFILE + if [ -f $PIDFILE ]; then + # PIDFILE exists + if kill -0 $(cat $PIDFILE) &>/dev/null; then + # PID up, service running + log_success_msg "$NAME process was started" + return 0 + fi + fi + log_failure_msg "$NAME process was unable to start" + exit 1 +} + +function stop() { + # Stop the daemon. + if [ -f $PIDFILE ]; then + local PID="$(cat $PIDFILE)" + if kill -0 $PID &>/dev/null; then + echo "Stopping $NAME..." + # Process still up, send SIGTERM and remove PIDFILE + kill -s TERM $PID &>/dev/null && rm -f "$PIDFILE" &>/dev/null + n=0 + while true; do + # Enter loop to ensure process is stopped + kill -0 $PID &>/dev/null + if [ "$?" != "0" ]; then + # Process stopped, break from loop + log_success_msg "$NAME process was stopped" + return 0 + fi + + # Process still up after signal, sleep and wait + sleep 1 + n=$(expr $n + 1) + if [ $n -eq 30 ]; then + # After 30 seconds, send SIGKILL + echo "Timeout exceeded, sending SIGKILL..." + kill -s KILL $PID &>/dev/null + elif [ $? -eq 40 ]; then + # After 40 seconds, error out + log_failure_msg "could not stop $NAME process" + exit 1 + fi + done + fi + fi + log_success_msg "$NAME process already stopped" +} + +function restart() { + # Restart the daemon. + stop + start +} + +function status() { + # Check the status of the process. + if [ -f $PIDFILE ]; then + PID="$(cat $PIDFILE)" + if kill -0 $PID &>/dev/null; then + log_success_msg "$NAME process is running" + exit 0 + fi + fi + log_failure_msg "$NAME process is not running" + exit 1 +} + +case $1 in + start) + start + ;; + + stop) + stop + ;; + + restart) + restart + ;; + + status) + status + ;; + + version) + $DAEMON version + ;; + + *) + # For invalid arguments, print the usage message. + echo "Usage: $0 {start|stop|restart|status|version}" + exit 2 + ;; +esac diff --git a/scripts/logrotate b/scripts/logrotate new file mode 100644 index 0000000000..de410d48c6 --- /dev/null +++ b/scripts/logrotate @@ -0,0 +1,8 @@ +/var/log/influxdb/influxd.log { + daily + rotate 7 + missingok + dateext + copytruncate + compress +} diff --git a/scripts/post-install.sh b/scripts/post-install.sh new file mode 100644 index 0000000000..9a090dd936 --- /dev/null +++ b/scripts/post-install.sh @@ -0,0 +1,74 @@ +#!/bin/bash + +BIN_DIR=/usr/bin +DATA_DIR=/var/lib/influxdb +LOG_DIR=/var/log/influxdb +SCRIPT_DIR=/usr/lib/influxdb/scripts +LOGROTATE_DIR=/etc/logrotate.d + +function install_init { + cp -f $SCRIPT_DIR/init.sh /etc/init.d/influxdb + chmod +x /etc/init.d/influxdb +} + +function install_systemd { + cp -f $SCRIPT_DIR/influxdb.service /lib/systemd/system/influxdb.service + systemctl enable influxdb +} + +function install_update_rcd { + update-rc.d influxdb defaults +} + +function install_chkconfig { + chkconfig --add influxdb +} + +# Add defaults file, if it doesn't exist +if [[ ! -f /etc/default/influxdb ]]; then + touch /etc/default/influxdb +fi + +# Remove legacy symlink, if it exists +if [[ -L /etc/init.d/influxdb ]]; then + rm -f /etc/init.d/influxdb +fi + +# Distribution-specific logic +if [[ -f /etc/redhat-release ]]; then + # RHEL-variant logic + if command -v systemctl &>/dev/null; then + install_systemd + else + # Assuming sysv + install_init + install_chkconfig + fi +elif [[ -f /etc/debian_version ]]; then + # Ownership for RH-based platforms is set in build.py via the `rmp-attr` option. + # We perform ownership change only for Debian-based systems. + # Moving these lines out of this if statement would make `rmp -V` fail after installation. + chown -R -L influxdb:influxdb $LOG_DIR + chown -R -L influxdb:influxdb $DATA_DIR + chmod 755 $LOG_DIR + chmod 755 $DATA_DIR + + # Debian/Ubuntu logic + if command -v systemctl &>/dev/null; then + install_systemd + else + # Assuming sysv + install_init + install_update_rcd + fi +elif [[ -f /etc/os-release ]]; then + source /etc/os-release + if [[ "$NAME" = "Amazon Linux" ]]; then + # Amazon Linux 2+ logic + install_systemd + elif [[ "$NAME" = "Amazon Linux AMI" ]]; then + # Amazon Linux logic + install_init + install_chkconfig + fi +fi diff --git a/scripts/post-uninstall.sh b/scripts/post-uninstall.sh new file mode 100644 index 0000000000..fdfd6644ee --- /dev/null +++ b/scripts/post-uninstall.sh @@ -0,0 +1,58 @@ +#!/bin/bash + +function disable_systemd { + systemctl disable influxdb + rm -f /lib/systemd/system/influxdb.service +} + +function disable_update_rcd { + update-rc.d -f influxdb remove + rm -f /etc/init.d/influxdb +} + +function disable_chkconfig { + chkconfig --del influxdb + rm -f /etc/init.d/influxdb +} + +if [[ -f /etc/redhat-release ]]; then + # RHEL-variant logic + if [[ "$1" = "0" ]]; then + # InfluxDB is no longer installed, remove from init system + rm -f /etc/default/influxdb + + if command -v systemctl &>/dev/null; then + disable_systemd + else + # Assuming sysv + disable_chkconfig + fi + fi +elif [[ -f /etc/lsb-release ]]; then + # Debian/Ubuntu logic + if [[ "$1" != "upgrade" ]]; then + # Remove/purge + rm -f /etc/default/influxdb + + if command -v systemctl &>/dev/null; then + disable_systemd + else + # Assuming sysv + disable_update_rcd + fi + fi +elif [[ -f /etc/os-release ]]; then + source /etc/os-release + if [[ "$ID" = "amzn" ]] && [[ "$1" = "0" ]]; then + # InfluxDB is no longer installed, remove from init system + rm -f /etc/default/influxdb + + if [[ "$NAME" = "Amazon Linux" ]]; then + # Amazon Linux 2+ logic + disable_systemd + elif [[ "$NAME" = "Amazon Linux AMI" ]]; then + # Amazon Linux logic + disable_chkconfig + fi + fi +fi diff --git a/scripts/pre-install.sh b/scripts/pre-install.sh new file mode 100644 index 0000000000..2eee668151 --- /dev/null +++ b/scripts/pre-install.sh @@ -0,0 +1,37 @@ +#!/bin/bash + +DATA_DIR=/var/lib/influxdb +USER=influxdb +GROUP=influxdb +LOG_DIR=/var/log/influxdb + +if ! id influxdb &>/dev/null; then + useradd --system -U -M influxdb -s /bin/false -d $DATA_DIR +fi + +# check if DATA_DIR exists +if [ ! -d "$DATA_DIR" ]; then + mkdir -p $DATA_DIR + chown $USER:$GROUP $DATA_DIR +fi + +# check if LOG_DIR exists +if [ ! -d "$LOG_DIR" ]; then + mkdir -p $LOG_DIR + chown $USER:$GROUP $DATA_DIR +fi + +if [[ -d /etc/opt/influxdb ]]; then + # Legacy configuration found + if [[ ! -d /etc/influxdb ]]; then + # New configuration does not exist, move legacy configuration to new location + echo -e "Please note, InfluxDB's configuration is now located at '/etc/influxdb' (previously '/etc/opt/influxdb')." + mv -vn /etc/opt/influxdb /etc/influxdb + + if [[ -f /etc/influxdb/influxdb.conf ]]; then + backup_name="influxdb.conf.$(date +%s).backup" + echo "A backup of your current configuration can be found at: /etc/influxdb/$backup_name" + cp -a /etc/influxdb/influxdb.conf /etc/influxdb/$backup_name + fi + fi +fi From e7cbbaa722303f3e806656233a1c5cbe9b47f320 Mon Sep 17 00:00:00 2001 From: "Christopher M. Wolff" Date: Wed, 16 Sep 2020 17:59:15 -0700 Subject: [PATCH 28/34] feat: upgrade Flux to v0.83.2 (#19569) --- cmd/influxd/launcher/launcher.go | 3 - cmd/influxd/launcher/query_test.go | 193 +- cmd/influxd/main.go | 7 +- flags.yml | 13 + go.mod | 16 +- go.sum | 83 +- http/query.go | 24 +- http/query_handler_test.go | 2 +- http/query_test.go | 177 +- kit/feature/list.go | 32 + pkg/flux/README.md | 5 - pkg/flux/ast/edit/option_editor.go | 130 - pkg/flux/ast/edit/task_editor.go | 109 - pkg/flux/ast/helpers.go | 52 - pkg/flux/execute/table/diff.go | 138 - pkg/flux/execute/table/iterator.go | 14 - pkg/flux/execute/table/sort.go | 32 - pkg/flux/execute/table/static/static.go | 703 ----- pkg/flux/execute/table/stringify.go | 151 - pkg/flux/internal/errors/errors.go | 92 - pkg/flux/internal/execute/table/buffered.go | 87 - pkg/flux/internal/execute/table/iterator.go | 5 - pkger/parser.go | 11 +- pkger/parser_models.go | 17 +- query/bridges.go | 9 + query/bridges_test.go | 3 +- query/builtin/builtin.go | 4 +- query/builtinlazy/builtin.go | 20 - query/control/controller.go | 26 +- query/control/controller_test.go | 100 + query/encode.go | 2 - query/fluxlang/service.go | 6 +- query/influxql/compiler.go | 13 +- query/influxql/end_to_end_test.go | 2 + query/logging.go | 48 +- query/logging_test.go | 130 +- query/mock/service.go | 5 +- query/promql/internal/promqltests/engine.go | 2 +- query/promql/internal/promqltests/go.mod | 21 +- query/promql/internal/promqltests/go.sum | 40 +- query/promql/query_test.go | 212 +- query/promql/types.go | 14 +- query/querytest/compile.go | 62 - query/querytest/compiler.go | 6 +- query/service_test.go | 2 +- query/spec.go | 49 - query/stdlib/experimental/to.go | 46 +- query/stdlib/experimental/to_test.go | 55 +- query/stdlib/influxdata/influxdb/buckets.go | 43 +- .../influxdata/influxdb/dependencies.go | 5 +- query/stdlib/influxdata/influxdb/from.go | 129 +- query/stdlib/influxdata/influxdb/from_test.go | 162 +- query/stdlib/influxdata/influxdb/operators.go | 30 +- query/stdlib/influxdata/influxdb/rules.go | 660 ++++- .../stdlib/influxdata/influxdb/rules_test.go | 2440 +++++++++++++++-- query/stdlib/influxdata/influxdb/source.go | 166 +- .../influxdb/source_internal_test.go | 10 + .../stdlib/influxdata/influxdb/source_test.go | 173 +- query/stdlib/influxdata/influxdb/storage.go | 91 +- .../influxdata/influxdb/storage_predicate.go | 45 +- query/stdlib/influxdata/influxdb/to.go | 118 +- query/stdlib/influxdata/influxdb/to_test.go | 217 +- .../influxdata/influxdb/v1/databases.go | 87 +- query/stdlib/testing/end_to_end_test.go | 109 +- query/stdlib/testing/testing.go | 34 +- storage/flux/reader.go | 72 +- task.go | 2 +- task/backend/executor/executor.go | 82 +- task/backend/executor/support_test.go | 12 +- task/options/options.go | 11 +- ui/cypress/e2e/tasks.test.ts | 2 +- 71 files changed, 4443 insertions(+), 3230 deletions(-) delete mode 100644 pkg/flux/README.md delete mode 100644 pkg/flux/ast/edit/option_editor.go delete mode 100644 pkg/flux/ast/edit/task_editor.go delete mode 100644 pkg/flux/ast/helpers.go delete mode 100644 pkg/flux/execute/table/diff.go delete mode 100644 pkg/flux/execute/table/iterator.go delete mode 100644 pkg/flux/execute/table/sort.go delete mode 100644 pkg/flux/execute/table/static/static.go delete mode 100644 pkg/flux/execute/table/stringify.go delete mode 100644 pkg/flux/internal/errors/errors.go delete mode 100644 pkg/flux/internal/execute/table/buffered.go delete mode 100644 pkg/flux/internal/execute/table/iterator.go delete mode 100644 query/builtinlazy/builtin.go delete mode 100644 query/querytest/compile.go delete mode 100644 query/spec.go create mode 100644 query/stdlib/influxdata/influxdb/source_internal_test.go rename storage/flux/predicate.go => query/stdlib/influxdata/influxdb/storage_predicate.go (80%) diff --git a/cmd/influxd/launcher/launcher.go b/cmd/influxd/launcher/launcher.go index 5bfaff7205..e5cb5aaeae 100644 --- a/cmd/influxd/launcher/launcher.go +++ b/cmd/influxd/launcher/launcher.go @@ -48,7 +48,6 @@ import ( "github.com/influxdata/influxdb/v2/pkger" infprom "github.com/influxdata/influxdb/v2/prometheus" "github.com/influxdata/influxdb/v2/query" - "github.com/influxdata/influxdb/v2/query/builtinlazy" "github.com/influxdata/influxdb/v2/query/control" "github.com/influxdata/influxdb/v2/query/fluxlang" "github.com/influxdata/influxdb/v2/query/stdlib/influxdata/influxdb" @@ -137,8 +136,6 @@ func cmdRunE(ctx context.Context, l *Launcher) func() error { // exit with SIGINT and SIGTERM ctx = signals.WithStandardSignals(ctx) - builtinlazy.Initialize() - if err := l.run(ctx); err != nil { return err } else if !l.Running() { diff --git a/cmd/influxd/launcher/query_test.go b/cmd/influxd/launcher/query_test.go index 8ee924a426..bc9653225e 100644 --- a/cmd/influxd/launcher/query_test.go +++ b/cmd/influxd/launcher/query_test.go @@ -19,7 +19,10 @@ import ( "github.com/influxdata/flux/csv" "github.com/influxdata/flux/execute" "github.com/influxdata/flux/execute/executetest" + "github.com/influxdata/flux/execute/table" "github.com/influxdata/flux/lang" + "github.com/influxdata/flux/memory" + "github.com/influxdata/flux/runtime" "github.com/influxdata/flux/values" "github.com/influxdata/influxdb/v2" "github.com/influxdata/influxdb/v2/cmd/influxd/launcher" @@ -27,7 +30,6 @@ import ( "github.com/influxdata/influxdb/v2/kit/feature" "github.com/influxdata/influxdb/v2/kit/prom" "github.com/influxdata/influxdb/v2/mock" - "github.com/influxdata/influxdb/v2/pkg/flux/execute/table" "github.com/influxdata/influxdb/v2/query" ) @@ -221,7 +223,7 @@ func queryPoints(ctx context.Context, t *testing.T, l *launcher.TestLauncher, op if d.verbose { t.Logf("query:\n%s", qs) } - pkg, err := flux.Parse(qs) + pkg, err := runtime.ParseToJSON(qs) if err != nil { t.Fatal(err) } @@ -751,6 +753,193 @@ from(bucket: "%s") } } +type TestQueryProfiler struct{ + start int64 +} + +func (s TestQueryProfiler) Name() string { + return fmt.Sprintf("query%d", s.start) +} + +func (s TestQueryProfiler) GetResult(q flux.Query, alloc *memory.Allocator) (flux.Table, error) { + groupKey := execute.NewGroupKey( + []flux.ColMeta{ + { + Label: "_measurement", + Type: flux.TString, + }, + }, + []values.Value{ + values.NewString(fmt.Sprintf("profiler/query%d", s.start)), + }, + ) + b := execute.NewColListTableBuilder(groupKey, alloc) + colMeta := []flux.ColMeta{ + { + Label: "_measurement", + Type: flux.TString, + }, + { + Label: "TotalDuration", + Type: flux.TInt, + }, + { + Label: "CompileDuration", + Type: flux.TInt, + }, + { + Label: "QueueDuration", + Type: flux.TInt, + }, + { + Label: "PlanDuration", + Type: flux.TInt, + }, + { + Label: "RequeueDuration", + Type: flux.TInt, + }, + { + Label: "ExecuteDuration", + Type: flux.TInt, + }, + { + Label: "Concurrency", + Type: flux.TInt, + }, + { + Label: "MaxAllocated", + Type: flux.TInt, + }, + { + Label: "TotalAllocated", + Type: flux.TInt, + }, + { + Label: "RuntimeErrors", + Type: flux.TString, + }, + { + Label: "influxdb/scanned-bytes", + Type: flux.TInt, + }, + { + Label: "influxdb/scanned-values", + Type: flux.TInt, + }, + { + Label: "flux/query-plan", + Type: flux.TString, + }, + } + colData := []interface{} { + fmt.Sprintf("profiler/query%d", s.start), + s.start, + s.start + 1, + s.start + 2, + s.start + 3, + s.start + 4, + s.start + 5, + s.start + 6, + s.start + 7, + s.start + 8, + "error1\nerror2", + s.start + 9, + s.start + 10, + "query plan", + } + for _, col := range colMeta { + if _, err := b.AddCol(col); err != nil { + return nil, err + } + } + for i := 0; i < len(colData); i++ { + if intValue, ok := colData[i].(int64); ok { + b.AppendInt(i, intValue) + } else { + b.AppendString(i, colData[i].(string)) + } + } + tbl, err := b.Table() + if err != nil { + return nil, err + } + return tbl, nil +} + +func TestFluxProfiler(t *testing.T) { + testcases := []struct { + name string + data []string + query string + want string + }{ + { + name: "range last single point start time", + data: []string{ + "m,tag=a f=1i 1", + }, + query: ` +option profiler.enabledProfilers = ["query0", "query100", "query100", "NonExistentProfiler"] +from(bucket: v.bucket) + |> range(start: 1970-01-01T00:00:00.000000001Z, stop: 1970-01-01T01:00:00Z) + |> last() +`, + want: ` +#datatype,string,long,dateTime:RFC3339,dateTime:RFC3339,dateTime:RFC3339,long,string,string,string +#group,false,false,true,true,false,false,true,true,true +#default,_result,,,,,,,, +,result,table,_start,_stop,_time,_value,_field,_measurement,tag +,,0,1970-01-01T00:00:00.000000001Z,1970-01-01T01:00:00Z,1970-01-01T00:00:00.000000001Z,1,f,m,a + +#datatype,string,long,string,long,long,long,long,long,long,long,long,long,string,string,long,long +#group,false,false,true,false,false,false,false,false,false,false,false,false,false,false,false,false +#default,_profiler,,,,,,,,,,,,,,, +,result,table,_measurement,TotalDuration,CompileDuration,QueueDuration,PlanDuration,RequeueDuration,ExecuteDuration,Concurrency,MaxAllocated,TotalAllocated,RuntimeErrors,flux/query-plan,influxdb/scanned-bytes,influxdb/scanned-values +,,0,profiler/query0,0,1,2,3,4,5,6,7,8,"error1 +error2","query plan",9,10 +,,1,profiler/query100,100,101,102,103,104,105,106,107,108,"error1 +error2","query plan",109,110 +`, + }, + } + execute.RegisterProfilers(&TestQueryProfiler{}, &TestQueryProfiler{start: 100}) + for _, tc := range testcases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + l := launcher.RunTestLauncherOrFail(t, ctx, nil) + + l.SetupOrFail(t) + defer l.ShutdownOrFail(t, ctx) + + l.WritePointsOrFail(t, strings.Join(tc.data, "\n")) + + queryStr := "import \"profiler\"\nv = {bucket: " + "\"" + l.Bucket.Name + "\"" + "}\n" + tc.query + req := &query.Request{ + Authorization: l.Auth, + OrganizationID: l.Org.ID, + Compiler: lang.FluxCompiler{ + Query: queryStr, + }, + } + if got, err := l.FluxQueryService().Query(ctx, req); err != nil { + t.Error(err) + } else { + dec := csv.NewMultiResultDecoder(csv.ResultDecoderConfig{}) + want, err := dec.Decode(ioutil.NopCloser(strings.NewReader(tc.want))) + if err != nil { + t.Fatal(err) + } + defer want.Release() + + if err := executetest.EqualResultIterators(want, got); err != nil { + t.Fatal(err) + } + } + }) + } +} + func TestQueryPushDowns(t *testing.T) { t.Skip("Not supported yet") testcases := []struct { diff --git a/cmd/influxd/main.go b/cmd/influxd/main.go index b9f3ff8b26..8b9819382a 100644 --- a/cmd/influxd/main.go +++ b/cmd/influxd/main.go @@ -7,10 +7,10 @@ import ( "os" "time" - "github.com/influxdata/flux" "github.com/influxdata/influxdb/v2" "github.com/influxdata/influxdb/v2/cmd/influxd/launcher" "github.com/influxdata/influxdb/v2/cmd/influxd/upgrade" + _ "github.com/influxdata/influxdb/v2/query/builtin" _ "github.com/influxdata/influxdb/v2/tsdb/engine/tsm1" _ "github.com/influxdata/influxdb/v2/tsdb/index/tsi1" "github.com/spf13/cobra" @@ -43,11 +43,6 @@ func main() { }, ) - // TODO: this should be removed in the future: https://github.com/influxdata/influxdb/issues/16220 - if os.Getenv("QUERY_TRACING") == "1" { - flux.EnableExperimentalTracing() - } - if err := rootCmd.Execute(); err != nil { os.Exit(1) } diff --git a/flags.yml b/flags.yml index 859b6991f7..f14edbdadd 100644 --- a/flags.yml +++ b/flags.yml @@ -83,6 +83,13 @@ contact: Query Team lifetime: temporary +- name: Query Tracing + description: Turn on query tracing for queries that are sampled + key: queryTracing + default: false + contact: Query Team + lifetime: permanent + - name: Simple Task Options Extraction description: Simplified task options extraction to avoid undefined functions when saving tasks key: simpleTaskOptionsExtraction @@ -133,6 +140,12 @@ default: false contact: Compute Team +- name: Inject Latest Success Time + description: Inject the latest successful task run timestamp into a Task query extern when executing. + key: injectLatestSuccessTime + default: false + contact: Compute Team + - name: Enforce Organization Dashboard Limits description: Enforces the default limit params for the dashboards api when orgs are set key: enforceOrgDashboardLimits diff --git a/go.mod b/go.mod index bc06235750..b2cfa5dbdc 100644 --- a/go.mod +++ b/go.mod @@ -3,9 +3,7 @@ module github.com/influxdata/influxdb/v2 go 1.13 require ( - cloud.google.com/go/bigtable v1.3.0 // indirect github.com/BurntSushi/toml v0.3.1 - github.com/DATA-DOG/go-sqlmock v1.4.1 // indirect github.com/NYTimes/gziphandler v1.0.1 github.com/RoaringBitmap/roaring v0.4.16 github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883 @@ -31,7 +29,6 @@ require ( github.com/glycerine/go-unsnap-stream v0.0.0-20181221182339-f9677308dec2 // indirect github.com/glycerine/goconvey v0.0.0-20180728074245-46e3a41ad493 // indirect github.com/go-chi/chi v4.1.0+incompatible - github.com/go-sql-driver/mysql v1.5.0 // indirect github.com/go-stack/stack v1.8.0 github.com/gogo/protobuf v1.3.1 github.com/golang/gddo v0.0.0-20181116215533-9bd4a3295021 @@ -51,7 +48,7 @@ require ( github.com/hashicorp/vault/api v1.0.2 github.com/imdario/mergo v0.3.9 // indirect github.com/influxdata/cron v0.0.0-20191203200038-ded12750aac6 - github.com/influxdata/flux v0.66.1 + github.com/influxdata/flux v0.83.1 github.com/influxdata/httprouter v1.3.1-0.20191122104820-ee83e2772f69 github.com/influxdata/influxql v0.0.0-20180925231337-1cbfca8e56b6 github.com/influxdata/pkg-config v0.2.3 @@ -102,15 +99,14 @@ require ( github.com/yudai/pp v2.0.1+incompatible // indirect go.uber.org/multierr v1.5.0 go.uber.org/zap v1.14.1 - golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37 - golang.org/x/lint v0.0.0-20200302205851-738671d3881b // indirect - golang.org/x/net v0.0.0-20200226121028-0de0cce0169b + golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 + golang.org/x/net v0.0.0-20200625001655-4c5254603344 golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d - golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e - golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4 + golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208 + golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd golang.org/x/text v0.3.2 golang.org/x/time v0.0.0-20191024005414-555d28b269f0 - golang.org/x/tools v0.0.0-20200304024140-c4206d458c3f + golang.org/x/tools v0.0.0-20200721032237-77f530d86f9a google.golang.org/api v0.17.0 google.golang.org/grpc v1.27.1 gopkg.in/vmihailenco/msgpack.v2 v2.9.1 // indirect diff --git a/go.sum b/go.sum index 77ec39fae5..0c2d65c3f2 100644 --- a/go.sum +++ b/go.sum @@ -2,8 +2,6 @@ cloud.google.com/go v0.26.0 h1:e0WKqKTd5BnrG8aKH3J3h+QvEIQtSUcf2n5UZ5ZgLtQ= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= -cloud.google.com/go v0.43.0 h1:banaiRPAM8kUVYneOSkhgcDsLzEvL25FinuiSZaH/2w= -cloud.google.com/go v0.43.0/go.mod h1:BOSR3VbTLkk6FDC/TcffxP4NF/FFBGA5ku+jvKOP7pg= cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= @@ -29,10 +27,34 @@ cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0Zeo dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= +github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= +github.com/Azure/go-autorest/autorest v0.9.3/go.mod h1:GsRuLYvwzLjjjRoWEIyMUaYq8GNUx2nRB378IPt/1p0= +github.com/Azure/go-autorest/autorest v0.10.1 h1:uaB8A32IZU9YKs9v50+/LWIWTDHJk2vlGzbfd7FfESI= +github.com/Azure/go-autorest/autorest v0.10.1/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630= +github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= +github.com/Azure/go-autorest/autorest/adal v0.8.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= +github.com/Azure/go-autorest/autorest/adal v0.8.1/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= +github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= +github.com/Azure/go-autorest/autorest/adal v0.8.3 h1:O1AGG9Xig71FxdX9HO5pGNyZ7TbSyHaVg+5eJO/jSGw= +github.com/Azure/go-autorest/autorest/adal v0.8.3/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= +github.com/Azure/go-autorest/autorest/azure/auth v0.4.2 h1:iM6UAvjR97ZIeR93qTcwpKNMpV+/FTWjwEbuPD495Tk= +github.com/Azure/go-autorest/autorest/azure/auth v0.4.2/go.mod h1:90gmfKdlmKgfjUpnCEpOJzsUEjrWDSLwHIG73tSXddM= +github.com/Azure/go-autorest/autorest/azure/cli v0.3.1 h1:LXl088ZQlP0SBppGFsRZonW6hSvwgL5gRByMbvUbx8U= +github.com/Azure/go-autorest/autorest/azure/cli v0.3.1/go.mod h1:ZG5p860J94/0kI9mNJVoIoLgXcirM2gF5i2kWloofxw= +github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= +github.com/Azure/go-autorest/autorest/date v0.2.0 h1:yW+Zlqf26583pE43KhfnhFcdmSWlm5Ew6bxipnr/tbM= +github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= +github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.3.0 h1:qJumjCaCudz+OcqE9/XtEPfvtOjOmKaui4EOpFI6zZc= +github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= +github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY= +github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= +github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k= +github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= github.com/DATA-DOG/go-sqlmock v1.4.1 h1:ThlnYciV1iM/V0OSF/dtkqWb6xo5qITT1TJBG1MRDJM= github.com/DATA-DOG/go-sqlmock v1.4.1/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= github.com/Masterminds/semver v1.4.2 h1:WBLTQ37jOCzSLtXNdoo8bNM8876KhNqOKvrlGITgsTc= @@ -68,6 +90,8 @@ github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5 github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da h1:8GUt8eRujhVEGZFFEjBj46YV4rDjvGrNxb0KMWYkL2I= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/aws/aws-sdk-go v1.29.16 h1:Gbtod7Y4W/Ai7wPtesdvgGVTkFN8JxAaGouRLlcQfQs= +github.com/aws/aws-sdk-go v1.29.16/go.mod h1:1KvfttTE3SPKMpo8g2c6jL3ZKfXtFvKscTgahTma5Xg= github.com/benbjohnson/clock v0.0.0-20161215174838-7dc76406b6d3 h1:wOysYcIdqv3WnvwqFFzrYCFALPED7qkUGaLXu359GSc= github.com/benbjohnson/clock v0.0.0-20161215174838-7dc76406b6d3/go.mod h1:UMqtWQTnOe4byzwe7Zhwh8f8s+36uszN51sJrSIZlTE= github.com/benbjohnson/tmpl v1.0.0 h1:T5QPGJD0W6JJxyEEAlVnX3co/IkUrfHen1/42nlgAHo= @@ -81,12 +105,15 @@ github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6r github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/boltdb/bolt v1.3.1 h1:JQmyP4ZBrce+ZQu0dY660FMfatumYDLun9hBCUVIkF4= github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= +github.com/bonitoo-io/go-sql-bigquery v0.3.4-1.4.0 h1:MaVh0h9+KaMnJcoDvvIGp+O3fefdWm+8MBUX6ELTJTM= +github.com/bonitoo-io/go-sql-bigquery v0.3.4-1.4.0/go.mod h1:J4Y6YJm0qTWB9aFziB7cPeSyc6dOZFyJdteSeybVpXQ= github.com/bouk/httprouter v0.0.0-20160817010721-ee8b3818a7f5 h1:kS0dw4K730x7cxT+bVyTyYJZHuSoH7ofSr/Ijit56Qw= github.com/bouk/httprouter v0.0.0-20160817010721-ee8b3818a7f5/go.mod h1:CDReaxg1cmLrtcasZy43l4EYPAknXLiQSrb7tLw5zXM= github.com/buger/jsonparser v0.0.0-20191004114745-ee4c978eae7e h1:oJCXMss/3rg5F6Poy9wG3JQusc58Mzk5B9Z6wSnssNE= github.com/buger/jsonparser v0.0.0-20191004114745-ee4c978eae7e/go.mod h1:errmMKH8tTB49UR2A8C8DPYkyudelsYJwJFaZHQ6ik8= github.com/c-bata/go-prompt v0.2.2 h1:uyKRz6Z6DUyj49QVijyM339UJV9yhbr70gESwbNU3e0= github.com/c-bata/go-prompt v0.2.2/go.mod h1:VzqtzE2ksDBcdln8G7mk2RX9QyGjH+OVqOCSiVIqS34= +github.com/cactus/go-statsd-client/statsd v0.0.0-20191106001114-12b4e2b38748/go.mod h1:l/bIBLeOl9eX+wxJAzxS4TveKRtAqlyDpHjhkfO0MEI= github.com/census-instrumentation/opencensus-proto v0.2.1 h1:glEXhBS5PSLLv4IXzLA5yPRVX4bilULVyxxbrfOtDAk= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= @@ -116,11 +143,15 @@ github.com/dave/jennifer v1.2.0/go.mod h1:fIb+770HOpJ2fmN9EPPKOqm1vMGhB+TwXKMZhr github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/denisenkom/go-mssqldb v0.0.0-20200428022330-06a60b6afbbc h1:VRRKCwnzqk8QCaRC4os14xoKDdbHqqlJtJA0oc1ZAjg= +github.com/denisenkom/go-mssqldb v0.0.0-20200428022330-06a60b6afbbc/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8 h1:akOQj8IVgoeFfBTzGOEQakCYshWD6RNo1M5pivFXt70= github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8/go.mod h1:VMaSuZ+SZcx/wljOQKvp5srsbCiKDEb6K2wC4+PiBmQ= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/dimchansky/utfbom v1.1.0 h1:FcM3g+nofKgUteL8dm/UpdRXNC9KmADgTpLKsu0TRo4= +github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= github.com/docker/distribution v2.7.0+incompatible h1:neUDAlf3wX6Ml4HdqTrbcOHXtfRN0TFIwt6YFL7N9RU= github.com/docker/distribution v2.7.0+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v0.7.3-0.20180815000130-e05b657120a6/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= @@ -164,6 +195,7 @@ github.com/go-chi/chi v4.1.0+incompatible h1:ETj3cggsVIY2Xao5ExCu6YhEh5MD6JTfcBz github.com/go-chi/chi v4.1.0+incompatible/go.mod h1:eB3wogJHnLi3x/kFX2A+IbTBlXxmMeXJVKy9tTv1XzQ= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-kit/kit v0.8.0 h1:Wz+5lgoB0kkuqLEc6NVmwRknTKP6dTGbSqvhZtBI/j0= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= @@ -171,12 +203,13 @@ github.com/go-ldap/ldap v3.0.2+incompatible/go.mod h1:qfd9rJvER9Q0/D/Sqn1DfHRoBp github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0 h1:MP4Eh7ZCb31lleYCFuwm0oe4/YGak+5l1vA2NOE80nA= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.5.0 h1:ozyZYNQW3x3HtqT1jira07DN2PArx2v7/mN66gGcHOs= github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-test/deep v1.0.1/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= +github.com/gofrs/uuid v3.3.0+incompatible h1:8K4tyRfvU1CYPgJsveYFQMhpFd/wXNM7iK6rR7UHz84= +github.com/gofrs/uuid v3.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.0 h1:xU6/SpYbvkNYiptHJYEDRseDLvYE7wSqhYYNy0QSUzI= github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= @@ -185,6 +218,8 @@ github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zV github.com/gogo/protobuf v1.2.2-0.20190730201129-28a6bbf47e48/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe h1:lXe2qZdvpiX5WZkZR4hgp4KJVfY3nMkvmwbVkpv1rVY= +github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= github.com/golang/gddo v0.0.0-20181116215533-9bd4a3295021 h1:HYV500jCgk+IC68L5sWrLFIWMpaUFfXXpJSAb7XOoBk= github.com/golang/gddo v0.0.0-20181116215533-9bd4a3295021/go.mod h1:xEhNfoBDX1hzLm2Nf80qUvZ2sVwoMZ8d6IE2SrsQfh4= github.com/golang/geo v0.0.0-20190916061304-5b978397cfec h1:lJwO/92dFXWeXOZdoGXgptLmNLwynMSHUmU6besqtiw= @@ -318,8 +353,8 @@ github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NH github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/influxdata/cron v0.0.0-20191203200038-ded12750aac6 h1:OtjKkeWDjUbyMi82C7XXy7Tvm2LXMwiBBXyFIGNPaGA= github.com/influxdata/cron v0.0.0-20191203200038-ded12750aac6/go.mod h1:XabtPPW2qsCg0tl+kjaPU+cFS+CjQXEXbT1VJvHT4og= -github.com/influxdata/flux v0.66.1 h1:d98L5k9mmP7bU7d2zAx6C3dCe5B8/PEa1wkWzZAE+Ok= -github.com/influxdata/flux v0.66.1/go.mod h1:BwN2XG2lMszOoquQaFdPET8FRQfrXiZsWmcMO9rkaVY= +github.com/influxdata/flux v0.83.1 h1:KdJ19S2bj0jZvhICdS8d54BHYCJNuq9h3A/HkIKOD6o= +github.com/influxdata/flux v0.83.1/go.mod h1:+6FzHdZdwYjEIa2iuQEJ92x+C2A8X1jI0qdpVT0DJfM= github.com/influxdata/httprouter v1.3.1-0.20191122104820-ee83e2772f69 h1:WQsmW0fXO4ZE/lFGIE84G6rIV5SJN3P3sjIXAP1a8eU= github.com/influxdata/httprouter v1.3.1-0.20191122104820-ee83e2772f69/go.mod h1:pwymjR6SrP3gD3pRj9RJwdl1j5s3doEEV8gS4X9qSzA= github.com/influxdata/influxql v0.0.0-20180925231337-1cbfca8e56b6 h1:CFx+pP90q/qg3spoiZjf8donE4WpAdjeJfPOcoNqkWo= @@ -336,6 +371,8 @@ github.com/influxdata/usage-client v0.0.0-20160829180054-6d3895376368 h1:+TUUmaF github.com/influxdata/usage-client v0.0.0-20160829180054-6d3895376368/go.mod h1:Wbbw6tYNvwa5dlB6304Sd+82Z3f7PmVZHVKU637d4po= github.com/jessevdk/go-flags v1.4.0 h1:4IU2WS7AumrZ/40jfhf4QVDMsQwqA7VEHozFRrGARJA= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM= +github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jonboulle/clockwork v0.1.0 h1:VKV+ZcuP6l3yW9doeqz6ziZGgcynBVQO+obU0+0hcPo= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= @@ -464,6 +501,8 @@ github.com/philhofer/fwd v1.0.0 h1:UbZqGr5Y38ApvM/V/jEljVxwocdweyH+vmYvRPBnbqQ= github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= github.com/pierrec/lz4 v2.0.5+incompatible h1:2xWsjqPFWcplujydGg4WmhC/6fZqK42wMM8aXeqhl0I= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pkg/browser v0.0.0-20180916011732-0a3d74bf9ce4 h1:49lOXmGaUpV9Fz3gd7TFZY106KVlPVa5jcYD1gaQf98= +github.com/pkg/browser v0.0.0-20180916011732-0a3d74bf9ce4/go.mod h1:4OwLy04Bl9Ef3GJJCoec+30X3LQs/0/m4HFRt/2LUSA= github.com/pkg/errors v0.8.0 h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= @@ -537,6 +576,8 @@ github.com/smartystreets/goconvey v0.0.0-20190731233626-505e41936337 h1:WN9BUFbd github.com/smartystreets/goconvey v0.0.0-20190731233626-505e41936337/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/snowflakedb/gosnowflake v1.3.4 h1:Gyoi6g4lMHsilEwW9+KV+bgYkJTgf5pVfvL7Utus920= +github.com/snowflakedb/gosnowflake v1.3.4/go.mod h1:NsRq2QeiMUuoNUJhp5Q6xGC4uBrsS9g6LwZVEkTWgsE= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72 h1:qLC7fQah7D6K1B0ujays3HV9gkFtllcxhzImRR7ArPQ= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= @@ -585,6 +626,10 @@ github.com/tylerb/graceful v1.2.15 h1:B0x01Y8fsJpogzZTkDg6BDi6eMf03s01lEKGdrv83o github.com/tylerb/graceful v1.2.15/go.mod h1:LPYTbOYmUTdabwRt0TGhLllQ0MUNbs0Y5q1WXJOI9II= github.com/uber-go/atomic v1.3.2 h1:Azu9lPBWRNKzYXSIwRfgRuDuS0YKsK4NFhiQv98gkxo= github.com/uber-go/atomic v1.3.2/go.mod h1:/Ct5t2lcmbJ4OSe/waGBoaVvVqtO0bmtfVNex1PFV8g= +github.com/uber-go/tally v3.3.15+incompatible h1:9hLSgNBP28CjIaDmAuRTq9qV+UZY+9PcvAkXO4nNMwg= +github.com/uber-go/tally v3.3.15+incompatible/go.mod h1:YDTIBxdXyOU/sCWilKB4bgyufu1cEi0jdVnRdxvjnmU= +github.com/uber/athenadriver v1.1.4 h1:k6k0RBeXjR7oZ8NO557MsRw3eX1cc/9B0GNx+W9eHiQ= +github.com/uber/athenadriver v1.1.4/go.mod h1:tQjho4NzXw55LGfSZEcETuYydpY1vtmixUabHkC1K/E= github.com/uber/jaeger-client-go v2.16.0+incompatible h1:Q2Pp6v3QYiocMxomCaJuwQGFt7E53bPYqEgug/AoBtY= github.com/uber/jaeger-client-go v2.16.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= github.com/uber/jaeger-lib v2.2.0+incompatible h1:MxZXOiR2JuoANZ3J6DE/U0kSFv/eJ/GfSYVCjK7dyaw= @@ -603,6 +648,7 @@ github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82 h1:BHyfKlQyqbsFN5p3Ifn github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82/go.mod h1:lgjkn3NuSvDfVJdfcVVdX+jpBxNmX4rDAzaS45IcYoM= github.com/yudai/pp v2.0.1+incompatible h1:Q4//iY4pNF6yPLZIigmvcl7k/bPgrcTPIFIcmawg5bI= github.com/yudai/pp v2.0.1+incompatible/go.mod h1:PuxR/8QJ7cyCkFp/aUDS+JY727OFEZkTdatxwunjIkc= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= go.etcd.io/bbolt v1.3.2 h1:Z/90sZLPOeCy2PwprqkFa25PdkusRzaj9P8zm/KNyvk= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= @@ -624,6 +670,7 @@ go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.4.0 h1:f3WCSC2KzAcBXGATIxAB1E2XuCpNU255wNKZ505qi3E= go.uber.org/multierr v1.4.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.5.0 h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A= @@ -634,6 +681,7 @@ go.uber.org/zap v1.9.1 h1:XCJQEf3W6eZaVwhRBof6ImoYGJSITeKWsyeh3HFu/5o= go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.14.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= go.uber.org/zap v1.14.1 h1:nYDKopTbvAPq/NrUVZwT15y2lpROBiLLyoRTbXOYWOo= go.uber.org/zap v1.14.1/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= golang.org/x/crypto v0.0.0-20180505025534-4ec37c66abab/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -642,13 +690,15 @@ golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9 h1:mKdxBk7AujPs8kU4m80U72 golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529 h1:iMGN4xG0cnqj3t+zOM8wUB0BiPKHEwSxEZCvzcbZuvk= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5 h1:58fnuSXlxZmFdJyvtTFVmVhcMLU6v5fEb/ok4wyqtNU= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37 h1:cg5LA/zNPRzIXIWSCxQW10Rvpy94aQh3LT/ShoCpkHw= -golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4 h1:c2HOrn5iMezYjSlGPncknSEr/8x5LELb/ilJbXi9DEA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -662,6 +712,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd h1:zkO/Lhoka23X63N9OSzpSeROEUQ5ODw47tM3YWjygbs= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6 h1:QE6XYQK6naiK1EPAe1g/ILLxN5RBoH5xkJk3CqlMI/Y= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -683,8 +735,11 @@ golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKG golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee h1:WG0RUwxtNT4qqaXX3DPA8zHFNm/D9xaBpxzHt1WcA/E= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0 h1:KU7oHjnv3XNWfa5COkzUifxZmxp1TyI7ImMXqFxLwvQ= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -712,6 +767,8 @@ golang.org/x/net v0.0.0-20200202094626-16171245cfb2 h1:CCH4IOTTfewWjGOlSp+zGcjut golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b h1:0mm1VjtFUOIlE1SbDlwjYaDxZVDP2S5ou6y0gSgXHu8= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200625001655-4c5254603344 h1:vGXIOMxbNfDTk/aXCmfdLgkrSV+Z2tcbze+pEc3v5W4= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0= @@ -729,6 +786,8 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEha golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e h1:vcxGaoTs7kV8m5Np9uUNQin4BrLOthgV7252N8V+FwY= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208 h1:qwRHBd0NqMbJxfbotnDhm2ByMI1Shq4Y6oRJo21SGJA= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -758,6 +817,7 @@ golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0 h1:HyfiK1WMnHj5FXFXatD+Qs1A/ golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -766,6 +826,8 @@ golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4 h1:sfkvUWPNGwSV+8/fNqctR5lS2AqCSqYwXdrjCxp/dXo= golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd h1:xhmwyvizuTgC2qz7ZlMluP20uW+C3Rm0FD/WLDX8884= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -820,6 +882,8 @@ golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56 h1:DFtSed2q3HtNuVazwVDZ4nS golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200304024140-c4206d458c3f h1:haxFuLhmPh0vRpVv5MeXoGyfCB39/Ohsq7A68h65qAg= golang.org/x/tools v0.0.0-20200304024140-c4206d458c3f/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200721032237-77f530d86f9a h1:kVMPw4f6EVqYdfGQTedjrpw1dbE2PEMfw4jwXsNdn9s= +golang.org/x/tools v0.0.0-20200721032237-77f530d86f9a/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898 h1:/atklqdjdhuosWIl6AIbOeHJjicWYPqR9bpxqxYG2pA= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -858,8 +922,6 @@ google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRn google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190716160619-c506a9f90610 h1:Ygq9/SRJX9+dU0WCIICM8RkWvDw03lvB77hrhJnpxfU= -google.golang.org/genproto v0.0.0-20190716160619-c506a9f90610/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64 h1:iKtrH9Y8mcbADOP0YFaEMth7OfuHY9xHOwNj4znpM1A= google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= @@ -934,6 +996,7 @@ honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4 h1:UoveltGrhghAA7ePc+e+QYDHXrBps2PqFZiHkGR/xK8= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= istio.io/api v0.0.0-20190515205759-982e5c3888c6/go.mod h1:hhLFQmpHia8zgaM37vb2ml9iS5NfNfqZGRt1pS9aVEo= diff --git a/http/query.go b/http/query.go index 9b02a3a43b..466c6960b6 100644 --- a/http/query.go +++ b/http/query.go @@ -18,7 +18,6 @@ import ( "github.com/influxdata/flux/ast" "github.com/influxdata/flux/csv" "github.com/influxdata/flux/lang" - "github.com/influxdata/flux/repl" "github.com/influxdata/influxdb/v2" "github.com/influxdata/influxdb/v2/jsonweb" "github.com/influxdata/influxdb/v2/query" @@ -32,11 +31,10 @@ type QueryRequest struct { Query string `json:"query"` // Flux fields - Extern *ast.File `json:"extern,omitempty"` - Spec *flux.Spec `json:"spec,omitempty"` - AST *ast.Package `json:"ast,omitempty"` - Dialect QueryDialect `json:"dialect"` - Now time.Time `json:"now"` + Extern json.RawMessage `json:"extern,omitempty"` + AST json.RawMessage `json:"ast,omitempty"` + Dialect QueryDialect `json:"dialect"` + Now time.Time `json:"now"` // InfluxQL fields Bucket string `json:"bucket,omitempty"` @@ -271,19 +269,13 @@ func (r QueryRequest) proxyRequest(now func() time.Time) (*query.ProxyRequest, e Query: r.Query, } } - } else if r.AST != nil { + } else if len(r.AST) > 0 { c := lang.ASTCompiler{ - AST: r.AST, - Now: n, - } - if r.Extern != nil { - c.PrependFile(r.Extern) + Extern: r.Extern, + AST: r.AST, + Now: n, } compiler = c - } else if r.Spec != nil { - compiler = repl.Compiler{ - Spec: r.Spec, - } } delimiter, _ := utf8.DecodeRuneInString(r.Dialect.Delimiter) diff --git a/http/query_handler_test.go b/http/query_handler_test.go index c3bcb14dd4..9060d05e71 100644 --- a/http/query_handler_test.go +++ b/http/query_handler_test.go @@ -245,7 +245,7 @@ func TestFluxHandler_postFluxAST(t *testing.T) { name: "get ast from()", w: httptest.NewRecorder(), r: httptest.NewRequest("POST", "/api/v2/query/ast", bytes.NewBufferString(`{"query": "from()"}`)), - want: `{"ast":{"type":"Package","package":"main","files":[{"type":"File","location":{"start":{"line":1,"column":1},"end":{"line":1,"column":7},"source":"from()"},"metadata":"parser-type=go","package":null,"imports":null,"body":[{"type":"ExpressionStatement","location":{"start":{"line":1,"column":1},"end":{"line":1,"column":7},"source":"from()"},"expression":{"type":"CallExpression","location":{"start":{"line":1,"column":1},"end":{"line":1,"column":7},"source":"from()"},"callee":{"type":"Identifier","location":{"start":{"line":1,"column":1},"end":{"line":1,"column":5},"source":"from"},"name":"from"}}}]}]}} + want: `{"ast":{"type":"Package","package":"main","files":[{"type":"File","location":{"start":{"line":1,"column":1},"end":{"line":1,"column":7},"source":"from()"},"metadata":"parser-type=rust","package":null,"imports":null,"body":[{"type":"ExpressionStatement","location":{"start":{"line":1,"column":1},"end":{"line":1,"column":7},"source":"from()"},"expression":{"type":"CallExpression","location":{"start":{"line":1,"column":1},"end":{"line":1,"column":7},"source":"from()"},"callee":{"type":"Identifier","location":{"start":{"line":1,"column":1},"end":{"line":1,"column":5},"source":"from"},"name":"from"}}}]}]}} `, status: http.StatusOK, }, diff --git a/http/query_test.go b/http/query_test.go index f2fb14a0fd..24cd70eed8 100644 --- a/http/query_test.go +++ b/http/query_test.go @@ -3,6 +3,7 @@ package http import ( "bytes" "context" + "encoding/json" "net/http" "net/http/httptest" "reflect" @@ -33,7 +34,7 @@ var cmpOptions = cmp.Options{ func TestQueryRequest_WithDefaults(t *testing.T) { type fields struct { Spec *flux.Spec - AST *ast.Package + AST json.RawMessage Query string Type string Dialect QueryDialect @@ -59,7 +60,6 @@ func TestQueryRequest_WithDefaults(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { r := QueryRequest{ - Spec: tt.fields.Spec, AST: tt.fields.AST, Query: tt.fields.Query, Type: tt.fields.Type, @@ -75,9 +75,8 @@ func TestQueryRequest_WithDefaults(t *testing.T) { func TestQueryRequest_Validate(t *testing.T) { type fields struct { - Extern *ast.File - Spec *flux.Spec - AST *ast.Package + Extern json.RawMessage + AST json.RawMessage Query string Type string Dialect QueryDialect @@ -95,19 +94,6 @@ func TestQueryRequest_Validate(t *testing.T) { }, wantErr: true, }, - { - name: "query cannot have both extern and spec", - fields: fields{ - Extern: &ast.File{}, - Spec: &flux.Spec{}, - Type: "flux", - Dialect: QueryDialect{ - Delimiter: ",", - DateTimeFormat: "RFC3339", - }, - }, - wantErr: true, - }, { name: "requires flux type", fields: fields{ @@ -189,7 +175,6 @@ func TestQueryRequest_Validate(t *testing.T) { t.Run(tt.name, func(t *testing.T) { r := QueryRequest{ Extern: tt.fields.Extern, - Spec: tt.fields.Spec, AST: tt.fields.AST, Query: tt.fields.Query, Type: tt.fields.Type, @@ -205,9 +190,9 @@ func TestQueryRequest_Validate(t *testing.T) { func TestQueryRequest_proxyRequest(t *testing.T) { type fields struct { - Extern *ast.File + Extern json.RawMessage Spec *flux.Spec - AST *ast.Package + AST json.RawMessage Query string Type string Dialect QueryDialect @@ -258,7 +243,7 @@ func TestQueryRequest_proxyRequest(t *testing.T) { { name: "valid AST", fields: fields{ - AST: &ast.Package{}, + AST: mustMarshal(&ast.Package{}), Type: "flux", Dialect: QueryDialect{ Delimiter: ",", @@ -271,7 +256,7 @@ func TestQueryRequest_proxyRequest(t *testing.T) { want: &query.ProxyRequest{ Request: query.Request{ Compiler: lang.ASTCompiler{ - AST: &ast.Package{}, + AST: mustMarshal(&ast.Package{}), Now: time.Unix(1, 1), }, }, @@ -286,7 +271,7 @@ func TestQueryRequest_proxyRequest(t *testing.T) { { name: "valid AST with calculated now", fields: fields{ - AST: &ast.Package{}, + AST: mustMarshal(&ast.Package{}), Type: "flux", Dialect: QueryDialect{ Delimiter: ",", @@ -298,7 +283,7 @@ func TestQueryRequest_proxyRequest(t *testing.T) { want: &query.ProxyRequest{ Request: query.Request{ Compiler: lang.ASTCompiler{ - AST: &ast.Package{}, + AST: mustMarshal(&ast.Package{}), Now: time.Unix(2, 2), }, }, @@ -313,7 +298,7 @@ func TestQueryRequest_proxyRequest(t *testing.T) { { name: "valid AST with extern", fields: fields{ - Extern: &ast.File{ + Extern: mustMarshal(&ast.File{ Body: []ast.Statement{ &ast.OptionStatement{ Assignment: &ast.VariableAssignment{ @@ -322,8 +307,8 @@ func TestQueryRequest_proxyRequest(t *testing.T) { }, }, }, - }, - AST: &ast.Package{}, + }), + AST: mustMarshal(&ast.Package{}), Type: "flux", Dialect: QueryDialect{ Delimiter: ",", @@ -335,20 +320,17 @@ func TestQueryRequest_proxyRequest(t *testing.T) { want: &query.ProxyRequest{ Request: query.Request{ Compiler: lang.ASTCompiler{ - AST: &ast.Package{ - Files: []*ast.File{ - { - Body: []ast.Statement{ - &ast.OptionStatement{ - Assignment: &ast.VariableAssignment{ - ID: &ast.Identifier{Name: "x"}, - Init: &ast.IntegerLiteral{Value: 0}, - }, - }, + Extern: mustMarshal(&ast.File{ + Body: []ast.Statement{ + &ast.OptionStatement{ + Assignment: &ast.VariableAssignment{ + ID: &ast.Identifier{Name: "x"}, + Init: &ast.IntegerLiteral{Value: 0}, }, }, }, - }, + }), + AST: mustMarshal(&ast.Package{}), Now: time.Unix(1, 1), }, }, @@ -365,7 +347,6 @@ func TestQueryRequest_proxyRequest(t *testing.T) { t.Run(tt.name, func(t *testing.T) { r := QueryRequest{ Extern: tt.fields.Extern, - Spec: tt.fields.Spec, AST: tt.fields.AST, Query: tt.fields.Query, Type: tt.fields.Type, @@ -385,6 +366,14 @@ func TestQueryRequest_proxyRequest(t *testing.T) { } } +func mustMarshal(p ast.Node) []byte { + bs, err := json.Marshal(p) + if err != nil { + panic(err) + } + return bs +} + func Test_decodeQueryRequest(t *testing.T) { type args struct { ctx context.Context @@ -481,6 +470,25 @@ func Test_decodeQueryRequest(t *testing.T) { } func Test_decodeProxyQueryRequest(t *testing.T) { + externJSON := `{ + "type": "File", + "body": [ + { + "type": "OptionStatement", + "assignment": { + "type": "VariableAssignment", + "id": { + "type": "Identifier", + "name": "x" + }, + "init": { + "type": "IntegerLiteral", + "value": "0" + } + } + } + ] + }` type args struct { ctx context.Context r *http.Request @@ -525,25 +533,7 @@ func Test_decodeProxyQueryRequest(t *testing.T) { args: args{ r: httptest.NewRequest("POST", "/", bytes.NewBufferString(` { - "extern": { - "type": "File", - "body": [ - { - "type": "OptionStatement", - "assignment": { - "type": "VariableAssignment", - "id": { - "type": "Identifier", - "name": "x" - }, - "init": { - "type": "IntegerLiteral", - "value": "0" - } - } - } - ] - }, + "extern": `+externJSON+`, "query": "from(bucket: \"mybucket\")" } `)), @@ -559,17 +549,8 @@ func Test_decodeProxyQueryRequest(t *testing.T) { Request: query.Request{ OrganizationID: func() platform.ID { s, _ := platform.IDFromString("deadbeefdeadbeef"); return *s }(), Compiler: lang.FluxCompiler{ - Extern: &ast.File{ - Body: []ast.Statement{ - &ast.OptionStatement{ - Assignment: &ast.VariableAssignment{ - ID: &ast.Identifier{Name: "x"}, - Init: &ast.IntegerLiteral{Value: 0}, - }, - }, - }, - }, - Query: `from(bucket: "mybucket")`, + Extern: []byte(externJSON), + Query: `from(bucket: "mybucket")`, }, }, Dialect: &csv.Dialect{ @@ -629,3 +610,59 @@ func Test_decodeProxyQueryRequest(t *testing.T) { }) } } + +func TestProxyRequestToQueryRequest_Compilers(t *testing.T) { + tests := []struct { + name string + pr query.ProxyRequest + want QueryRequest + }{ + { + name: "flux compiler copied", + pr: query.ProxyRequest{ + Dialect: &query.NoContentDialect{}, + Request: query.Request{ + Compiler: lang.FluxCompiler{ + Query: `howdy`, + Now: time.Unix(45, 45), + }, + }, + }, + want: QueryRequest{ + Type: "flux", + Query: `howdy`, + PreferNoContent: true, + Now: time.Unix(45, 45), + }, + }, + { + name: "AST compiler copied", + pr: query.ProxyRequest{ + Dialect: &query.NoContentDialect{}, + Request: query.Request{ + Compiler: lang.ASTCompiler{ + Now: time.Unix(45, 45), + AST: mustMarshal(&ast.Package{}), + }, + }, + }, + want: QueryRequest{ + Type: "flux", + PreferNoContent: true, + AST: mustMarshal(&ast.Package{}), + Now: time.Unix(45, 45), + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + + got, err := QueryRequestFromProxyRequest(&tt.pr) + if err != nil { + t.Error(err) + } else if !reflect.DeepEqual(*got, tt.want) { + t.Errorf("QueryRequestFromProxyRequest = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/kit/feature/list.go b/kit/feature/list.go index 5ed755c25a..d381bd5775 100644 --- a/kit/feature/list.go +++ b/kit/feature/list.go @@ -142,6 +142,20 @@ func MemoryOptimizedSchemaMutation() BoolFlag { return memoryOptimizedSchemaMutation } +var queryTracing = MakeBoolFlag( + "Query Tracing", + "queryTracing", + "Query Team", + false, + Permanent, + false, +) + +// QueryTracing - Turn on query tracing for queries that are sampled +func QueryTracing() BoolFlag { + return queryTracing +} + var simpleTaskOptionsExtraction = MakeBoolFlag( "Simple Task Options Extraction", "simpleTaskOptionsExtraction", @@ -240,6 +254,20 @@ func OrgOnlyMemberList() BoolFlag { return orgOnlyMemberList } +var injectLatestSuccessTime = MakeBoolFlag( + "Inject Latest Success Time", + "injectLatestSuccessTime", + "Compute Team", + false, + Temporary, + false, +) + +// InjectLatestSuccessTime - Inject the latest successful task run timestamp into a Task query extern when executing. +func InjectLatestSuccessTime() BoolFlag { + return injectLatestSuccessTime +} + var enforceOrgDashboardLimits = MakeBoolFlag( "Enforce Organization Dashboard Limits", "enforceOrgDashboardLimits", @@ -265,6 +293,7 @@ var all = []Flag{ newLabels, memoryOptimizedFill, memoryOptimizedSchemaMutation, + queryTracing, simpleTaskOptionsExtraction, mergeFiltersRule, bandPlotType, @@ -272,6 +301,7 @@ var all = []Flag{ notebooks, pushDownGroupAggregateMinMax, orgOnlyMemberList, + injectLatestSuccessTime, enforceOrgDashboardLimits, } @@ -286,6 +316,7 @@ var byKey = map[string]Flag{ "newLabels": newLabels, "memoryOptimizedFill": memoryOptimizedFill, "memoryOptimizedSchemaMutation": memoryOptimizedSchemaMutation, + "queryTracing": queryTracing, "simpleTaskOptionsExtraction": simpleTaskOptionsExtraction, "mergeFiltersRule": mergeFiltersRule, "bandPlotType": bandPlotType, @@ -293,5 +324,6 @@ var byKey = map[string]Flag{ "notebooks": notebooks, "pushDownGroupAggregateMinMax": pushDownGroupAggregateMinMax, "orgOnlyMemberList": orgOnlyMemberList, + "injectLatestSuccessTime": injectLatestSuccessTime, "enforceOrgDashboardLimits": enforceOrgDashboardLimits, } diff --git a/pkg/flux/README.md b/pkg/flux/README.md deleted file mode 100644 index daf1c1d9d7..0000000000 --- a/pkg/flux/README.md +++ /dev/null @@ -1,5 +0,0 @@ -# Flux master packages - -This package tree is temporary copied from Flux master to keep unit tests which depend on newer -versions of Flux. Once Flux has been updated, this package should be removed and any clients of -this package referred to the official Flux package. \ No newline at end of file diff --git a/pkg/flux/ast/edit/option_editor.go b/pkg/flux/ast/edit/option_editor.go deleted file mode 100644 index fe1fb77e38..0000000000 --- a/pkg/flux/ast/edit/option_editor.go +++ /dev/null @@ -1,130 +0,0 @@ -package edit - -import ( - "fmt" - - "github.com/influxdata/flux/ast" -) - -// `OptionFn` is a function that, provided with an `OptionStatement`, returns -// an `Expression` or an error. It is used by `Option` functions to edit -// AST's options statements. -type OptionFn func(opt *ast.OptionStatement) (ast.Expression, error) - -// `Option` passes the `OptionStatement` in the AST rooted at `node` that has the -// specified identifier to `fn`. -// The function can have side effects on the option statement -// and/or return a non-nil `Expression` that is set as value for the option. -// If the value returned by the edit function is `nil` (or an error is returned) no new value is set -// for the option statement (but any, maybe partial, side effect is applied). -// `Option` returns whether it could find and edit the option (possibly with errors) or not. -func Option(node ast.Node, optionIdentifier string, fn OptionFn) (bool, error) { - oe := &optionEditor{identifier: optionIdentifier, optionFn: fn, err: nil} - ast.Walk(oe, node) - - if oe.err != nil { - return oe.found, oe.err - } - - return oe.found, nil -} - -// Creates an `OptionFn` for setting the value of an `OptionStatement`. -func OptionValueFn(expr ast.Expression) OptionFn { - return func(opt *ast.OptionStatement) (ast.Expression, error) { - return expr, nil - } -} - -// Creates an `OptionFn` for updating the values of an `OptionStatement` that has an -// `ObjectExpression` as value. Returns error if the child of the option statement is not -// an object expression. If some key is not a property of the object it is added. -func OptionObjectFn(keyMap map[string]ast.Expression) OptionFn { - return func(opt *ast.OptionStatement) (ast.Expression, error) { - a, ok := opt.Assignment.(*ast.VariableAssignment) - if !ok { - return nil, fmt.Errorf("option assignment must be variable assignment") - } - obj, ok := a.Init.(*ast.ObjectExpression) - if !ok { - return nil, fmt.Errorf("value is %s, not an object expression", a.Init.Type()) - } - - // check that every specified property exists in the object - found := make(map[string]bool, len(obj.Properties)) - for _, p := range obj.Properties { - found[p.Key.Key()] = true - } - - for k := range keyMap { - if !found[k] { - obj.Properties = append(obj.Properties, &ast.Property{ - Key: &ast.Identifier{Name: k}, - Value: keyMap[k], - }) - } - } - - for _, p := range obj.Properties { - exp, found := keyMap[p.Key.Key()] - if found { - p.Value = exp - } - } - - return nil, nil - } -} - -//Finds the `OptionStatement` with the specified `identifier` and updates its value. -//There shouldn't be more then one option statement with the same identifier -//in a valid query. -type optionEditor struct { - identifier string - optionFn OptionFn - err error - found bool -} - -func (v *optionEditor) Visit(node ast.Node) ast.Visitor { - if os, ok := node.(*ast.OptionStatement); ok { - switch a := os.Assignment.(type) { - case *ast.VariableAssignment: - if a.ID.Name == v.identifier { - v.found = true - - newInit, err := v.optionFn(os) - - if err != nil { - v.err = err - } else if newInit != nil { - a.Init = newInit - } - - return nil - } - case *ast.MemberAssignment: - id, ok := a.Member.Object.(*ast.Identifier) - if ok { - name := id.Name + "." + a.Member.Property.Key() - if name == v.identifier { - v.found = true - - newInit, err := v.optionFn(os) - - if err != nil { - v.err = err - } else if newInit != nil { - a.Init = newInit - } - - return nil - } - } - } - } - - return v -} - -func (v *optionEditor) Done(node ast.Node) {} diff --git a/pkg/flux/ast/edit/task_editor.go b/pkg/flux/ast/edit/task_editor.go deleted file mode 100644 index 5e9f2ae656..0000000000 --- a/pkg/flux/ast/edit/task_editor.go +++ /dev/null @@ -1,109 +0,0 @@ -package edit - -import ( - "github.com/influxdata/flux" - "github.com/influxdata/flux/ast" - "github.com/influxdata/flux/codes" -) - -// GetOption finds and returns the init for the option's variable assignment -func GetOption(file *ast.File, name string) (ast.Expression, error) { - for _, st := range file.Body { - if val, ok := st.(*ast.OptionStatement); ok { - assign := val.Assignment - if va, ok := assign.(*ast.VariableAssignment); ok { - if va.ID.Name == name { - if ok { - return va.Init, nil - } - } - } - } - } - - return nil, &flux.Error{ - Code: codes.Internal, - Msg: "Option not found", - } -} - -// SetOption replaces an existing option's init with the provided init or adds -// the option if it doesn't exist. The file AST is mutated in place. -func SetOption(file *ast.File, name string, expr ast.Expression) { - // check for the correct file - for _, st := range file.Body { - if val, ok := st.(*ast.OptionStatement); ok { - assign := val.Assignment - if va, ok := assign.(*ast.VariableAssignment); ok { - if va.ID.Name == name { - // replace the variable assignment's init - va.Init = expr - return - } - } - } - } - // option was not found. prepend new option to body - file.Body = append([]ast.Statement{&ast.OptionStatement{ - Assignment: &ast.VariableAssignment{ - ID: &ast.Identifier{Name: name}, - Init: expr, - }, - }}, file.Body...) -} - -// DeleteOption removes an option if it exists. The file AST is mutated in place. -func DeleteOption(file *ast.File, name string) { - for i, st := range file.Body { - if val, ok := st.(*ast.OptionStatement); ok { - assign := val.Assignment - if va, ok := assign.(*ast.VariableAssignment); ok { - if va.ID.Name == name { - file.Body = append(file.Body[:i], file.Body[i+1:]...) - return - } - } - } - } -} - -// GetProperty finds and returns the AST node for the property value. -func GetProperty(obj *ast.ObjectExpression, key string) (ast.Expression, error) { - for _, prop := range obj.Properties { - if key == prop.Key.Key() { - return prop.Value, nil - } - } - return nil, &flux.Error{ - Code: codes.Internal, - Msg: "Property not found", - } -} - -// SetProperty replaces an existing property definition with the provided object expression or adds -// the property if it doesn't exist. The object expression AST is mutated in place. -func SetProperty(obj *ast.ObjectExpression, key string, value ast.Expression) { - for _, prop := range obj.Properties { - if key == prop.Key.Key() { - prop.Value = value - return - } - } - - obj.Properties = append(obj.Properties, &ast.Property{ - BaseNode: obj.BaseNode, - Key: &ast.Identifier{Name: key}, - Value: value, - }) -} - -// DeleteProperty removes a property from the object expression if it exists. -// The object expression AST is mutated in place. -func DeleteProperty(obj *ast.ObjectExpression, key string) { - for i, prop := range obj.Properties { - if key == prop.Key.Key() { - obj.Properties = append(obj.Properties[:i], obj.Properties[i+1:]...) - return - } - } -} diff --git a/pkg/flux/ast/helpers.go b/pkg/flux/ast/helpers.go deleted file mode 100644 index b211c8cd18..0000000000 --- a/pkg/flux/ast/helpers.go +++ /dev/null @@ -1,52 +0,0 @@ -package ast - -import ( - "regexp" - "time" - - "github.com/influxdata/flux/ast" -) - -func IntegerLiteralFromValue(v int64) *ast.IntegerLiteral { - return &ast.IntegerLiteral{Value: v} -} -func UnsignedIntegerLiteralFromValue(v uint64) *ast.UnsignedIntegerLiteral { - return &ast.UnsignedIntegerLiteral{Value: v} -} -func FloatLiteralFromValue(v float64) *ast.FloatLiteral { - return &ast.FloatLiteral{Value: v} -} -func StringLiteralFromValue(v string) *ast.StringLiteral { - return &ast.StringLiteral{Value: v} -} -func BooleanLiteralFromValue(v bool) *ast.BooleanLiteral { - return &ast.BooleanLiteral{Value: v} -} -func DateTimeLiteralFromValue(v time.Time) *ast.DateTimeLiteral { - return &ast.DateTimeLiteral{Value: v} -} -func RegexpLiteralFromValue(v *regexp.Regexp) *ast.RegexpLiteral { - return &ast.RegexpLiteral{Value: v} -} - -func IntegerFromLiteral(lit *ast.IntegerLiteral) int64 { - return lit.Value -} -func UnsignedIntegerFromLiteral(lit *ast.UnsignedIntegerLiteral) uint64 { - return lit.Value -} -func FloatFromLiteral(lit *ast.FloatLiteral) float64 { - return lit.Value -} -func StringFromLiteral(lit *ast.StringLiteral) string { - return lit.Value -} -func BooleanFromLiteral(lit *ast.BooleanLiteral) bool { - return lit.Value -} -func DateTimeFromLiteral(lit *ast.DateTimeLiteral) time.Time { - return lit.Value -} -func RegexpFromLiteral(lit *ast.RegexpLiteral) *regexp.Regexp { - return lit.Value -} diff --git a/pkg/flux/execute/table/diff.go b/pkg/flux/execute/table/diff.go deleted file mode 100644 index 40d535e520..0000000000 --- a/pkg/flux/execute/table/diff.go +++ /dev/null @@ -1,138 +0,0 @@ -package table - -import ( - "fmt" - "strings" - - "github.com/andreyvit/diff" - "github.com/influxdata/flux" -) - -// Diff will perform a diff between two table iterators. -// This will sort the tables within the table iterators and produce -// a diff of the full output. -func Diff(want, got flux.TableIterator, opts ...DiffOption) string { - if want == nil { - want = Iterator{} - } - - var wantS string - if wantT, err := Sort(want); err != nil { - wantS = fmt.Sprintf("table error: %s\n", err) - } else { - var sb strings.Builder - if err := wantT.Do(func(table flux.Table) error { - sb.WriteString(Stringify(table)) - return nil - }); err != nil { - _, _ = fmt.Fprintf(&sb, "table error: %s\n", err) - } - wantS = sb.String() - } - - if got == nil { - got = Iterator{} - } - - var gotS string - if gotT, err := Sort(got); err != nil { - gotS = fmt.Sprintf("table error: %s\n", err) - } else { - var sb strings.Builder - if err := gotT.Do(func(table flux.Table) error { - sb.WriteString(Stringify(table)) - return nil - }); err != nil { - _, _ = fmt.Fprintf(&sb, "table error: %s\n", err) - } - gotS = sb.String() - } - - differ := newDiffer(opts...) - return differ.diff(wantS, gotS) -} - -type differ struct { - ctx *[2]int -} - -func newDiffer(opts ...DiffOption) (d differ) { - for _, opt := range diffDefaultOptions { - opt.apply(&d) - } - for _, opt := range opts { - opt.apply(&d) - } - return d -} - -func (d differ) diff(want, got string) string { - lines := diff.LineDiffAsLines(want, got) - if d.ctx == nil { - return strings.Join(lines, "\n") - } - - difflines := make([]string, 0, len(lines)) -OUTER: - for { - for i := 0; i < len(lines); i++ { - if lines[i][0] == ' ' { - continue - } - - // This is the start of a diff section. Store this location. - start := i - (*d.ctx)[0] - if start < 0 { - start = 0 - } - - // Find the end of this section. - for ; i < len(lines); i++ { - if lines[i][0] == ' ' { - break - } - } - - // Look n points in the future and, if they are - // not part of a diff or don't overrun the number - // of lines, include them. - stop := i - - for n := (*d.ctx)[1]; n > 0; n-- { - if stop+1 >= len(lines) || lines[stop+1][0] != ' ' { - break - } - stop++ - } - - difflines = append(difflines, lines[start:stop]...) - lines = lines[stop:] - continue OUTER - } - return strings.Join(difflines, "\n") - } -} - -type DiffOption interface { - apply(*differ) -} - -type diffOptionFn func(d *differ) - -func (opt diffOptionFn) apply(d *differ) { - opt(d) -} - -var diffDefaultOptions = []DiffOption{ - DiffContext(3), -} - -func DiffContext(n int) DiffOption { - return diffOptionFn(func(d *differ) { - if n < 0 { - d.ctx = nil - } - ctx := [2]int{n, n} - d.ctx = &ctx - }) -} diff --git a/pkg/flux/execute/table/iterator.go b/pkg/flux/execute/table/iterator.go deleted file mode 100644 index 2011c4c6d5..0000000000 --- a/pkg/flux/execute/table/iterator.go +++ /dev/null @@ -1,14 +0,0 @@ -package table - -import "github.com/influxdata/flux" - -type Iterator []flux.Table - -func (t Iterator) Do(f func(flux.Table) error) error { - for _, tbl := range t { - if err := f(tbl); err != nil { - return err - } - } - return nil -} diff --git a/pkg/flux/execute/table/sort.go b/pkg/flux/execute/table/sort.go deleted file mode 100644 index 3b250267a2..0000000000 --- a/pkg/flux/execute/table/sort.go +++ /dev/null @@ -1,32 +0,0 @@ -package table - -import ( - "github.com/influxdata/flux" - "github.com/influxdata/flux/execute" -) - -// Sort will read a TableIterator and produce another TableIterator -// where the keys are sorted. -// -// This method will buffer all of the data since it needs to ensure -// all of the tables are read to avoid any deadlocks. Be careful -// using this method in performance sensitive areas. -func Sort(tables flux.TableIterator) (flux.TableIterator, error) { - groups := execute.NewGroupLookup() - if err := tables.Do(func(table flux.Table) error { - buffered, err := execute.CopyTable(table) - if err != nil { - return err - } - groups.Set(buffered.Key(), buffered) - return nil - }); err != nil { - return nil, err - } - - var buffered []flux.Table - groups.Range(func(_ flux.GroupKey, value interface{}) { - buffered = append(buffered, value.(flux.Table)) - }) - return Iterator(buffered), nil -} diff --git a/pkg/flux/execute/table/static/static.go b/pkg/flux/execute/table/static/static.go deleted file mode 100644 index 3388b00fd3..0000000000 --- a/pkg/flux/execute/table/static/static.go +++ /dev/null @@ -1,703 +0,0 @@ -// Package static provides utilities for easily constructing static -// tables that are meant for tests. -// -// The primary type is Table which will be a mapping of columns to their data. -// The data is defined in a columnar format instead of a row-based one. -// -// The implementations in this package are not performant and are not meant -// to be used in production code. They are good enough for small datasets that -// are present in tests to ensure code correctness. -package static - -import ( - "fmt" - "time" - - stdarrow "github.com/apache/arrow/go/arrow" - "github.com/apache/arrow/go/arrow/array" - "github.com/influxdata/flux" - "github.com/influxdata/flux/arrow" - "github.com/influxdata/flux/codes" - "github.com/influxdata/flux/execute" - "github.com/influxdata/flux/memory" - "github.com/influxdata/flux/values" - "github.com/influxdata/influxdb/v2/pkg/flux/internal/errors" - "github.com/influxdata/influxdb/v2/pkg/flux/internal/execute/table" -) - -// Table is a statically constructed table. -// It is a mapping between column names and the column. -// -// This is not a performant section of code and it is primarily -// meant to make writing unit tests easily. Do not use in -// production code. -// -// The Table struct implements the TableIterator interface -// and not the Table interface. To retrieve a flux.Table compatible -// implementation, the Table() method can be used. -type Table []Column - -// Do will produce the Table and then invoke the function -// on that flux.Table. -// -// If the produced Table is invalid, then this method -// will panic. -func (s Table) Do(f func(flux.Table) error) error { - return f(s.Table()) -} - -func (s Table) Build(template *[]Column) []flux.Table { - t := make(Table, 0, len(*template)+len(s)) - t = append(t, *template...) - t = append(t, s...) - return []flux.Table{t.Table()} -} - -// Table will produce a flux.Table using the Column values -// that are part of this Table. -// -// If the Table produces an invalid buffer, then this method -// will panic. -func (s Table) Table() flux.Table { - if len(s) == 0 { - panic(errors.New(codes.Internal, "static table has no columns")) - } - - key, cols := s.buildSchema() - buffer := &arrow.TableBuffer{ - GroupKey: key, - Columns: cols, - } - - // Determine the size by looking at the first non-key column. - n := 0 - for _, c := range s { - if c.IsKey() { - continue - } - n = c.Len() - break - } - - // Construct each of the buffers. - buffer.Values = make([]array.Interface, len(buffer.Columns)) - for i, c := range s { - buffer.Values[i] = c.Make(n) - } - - if err := buffer.Validate(); err != nil { - panic(err) - } - return table.FromBuffer(buffer) -} - -// buildSchema will construct the schema from the columns. -func (s Table) buildSchema() (flux.GroupKey, []flux.ColMeta) { - var ( - keyCols []flux.ColMeta - keyVals []values.Value - cols []flux.ColMeta - ) - for _, c := range s { - col := flux.ColMeta{Label: c.Label(), Type: c.Type()} - if c.IsKey() { - keyCols = append(keyCols, col) - keyVals = append(keyVals, c.KeyValue()) - } - cols = append(cols, col) - } - return execute.NewGroupKey(keyCols, keyVals), cols -} - -// Column is the definition for how to construct a column for the table. -type Column interface { - // Label returns the label associated with this column. - Label() string - - // Type returns the column type for this column. - Type() flux.ColType - - // Make will construct an array with the given length - // if it is possible. - Make(n int) array.Interface - - // Len will return the length of this column. - // If no length is known, this will return -1. - Len() int - - // IsKey will return true if this is part of the group key. - IsKey() bool - - // KeyValue will return the key value if this column is part - // of the group key. - KeyValue() values.Value - - // TableBuilder allows this column to add itself to a template. - TableBuilder -} - -// IntKey will construct a group key with the integer type. -// The value can be an int, int64, or nil. -func IntKey(k string, v interface{}) KeyColumn { - if iv, ok := mustIntValue(v); ok { - return KeyColumn{k: k, v: iv, t: flux.TInt} - } - return KeyColumn{k: k, t: flux.TInt} -} - -// UintKey will construct a group key with the unsigned type. -// The value can be a uint, uint64, int, int64, or nil. -func UintKey(k string, v interface{}) KeyColumn { - if iv, ok := mustUintValue(v); ok { - return KeyColumn{k: k, v: iv, t: flux.TUInt} - } - return KeyColumn{k: k, t: flux.TUInt} -} - -// FloatKey will construct a group key with the float type. -// The value can be a float64, int, int64, or nil. -func FloatKey(k string, v interface{}) KeyColumn { - if iv, ok := mustFloatValue(v); ok { - return KeyColumn{k: k, v: iv, t: flux.TFloat} - } - return KeyColumn{k: k, t: flux.TFloat} -} - -// StringKey will construct a group key with the string type. -// The value can be a string or nil. -func StringKey(k string, v interface{}) KeyColumn { - if iv, ok := mustStringValue(v); ok { - return KeyColumn{k: k, v: iv, t: flux.TString} - } - return KeyColumn{k: k, t: flux.TString} -} - -// BooleanKey will construct a group key with the boolean type. -// The value can be a bool or nil. -func BooleanKey(k string, v interface{}) KeyColumn { - if iv, ok := mustBooleanValue(v); ok { - return KeyColumn{k: k, v: iv, t: flux.TBool} - } - return KeyColumn{k: k, t: flux.TBool} -} - -// TimeKey will construct a group key with the given time using either a -// string or an integer. If an integer is used, then it is in seconds. -func TimeKey(k string, v interface{}) KeyColumn { - if iv, _, ok := mustTimeValue(v, 0, time.Second); ok { - return KeyColumn{k: k, v: execute.Time(iv), t: flux.TTime} - } - return KeyColumn{k: k, t: flux.TTime} -} - -type KeyColumn struct { - k string - v interface{} - t flux.ColType -} - -func (s KeyColumn) Make(n int) array.Interface { - return arrow.Repeat(s.KeyValue(), n, memory.DefaultAllocator) -} - -func (s KeyColumn) Label() string { return s.k } -func (s KeyColumn) Type() flux.ColType { return s.t } -func (s KeyColumn) Len() int { return -1 } -func (s KeyColumn) IsKey() bool { return true } -func (s KeyColumn) KeyValue() values.Value { return values.New(s.v) } - -func (s KeyColumn) Build(template *[]Column) []flux.Table { - *template = append(*template, s) - return nil -} - -// Ints will construct an array of integers. -// Each value can be an int, int64, or nil. -func Ints(k string, v ...interface{}) Column { - c := intColumn{ - column: column{k: k}, - v: make([]int64, len(v)), - } - for i, iv := range v { - val, ok := mustIntValue(iv) - if !ok { - if c.valid == nil { - c.valid = make([]bool, len(v)) - for i := range c.valid { - c.valid[i] = true - } - } - c.valid[i] = false - } - c.v[i] = val - } - return c -} - -type column struct { - k string - valid []bool -} - -func (s column) Label() string { return s.k } -func (s column) IsKey() bool { return false } - -type intColumn struct { - column - v []int64 -} - -func (s intColumn) Make(n int) array.Interface { - b := array.NewInt64Builder(memory.DefaultAllocator) - b.Resize(len(s.v)) - b.AppendValues(s.v, s.valid) - return b.NewArray() -} - -func (s intColumn) Type() flux.ColType { return flux.TInt } -func (s intColumn) Len() int { return len(s.v) } -func (s intColumn) KeyValue() values.Value { return values.InvalidValue } - -func (s intColumn) Build(template *[]Column) []flux.Table { - *template = append(*template, s) - return nil -} - -func mustIntValue(v interface{}) (int64, bool) { - if v == nil { - return 0, false - } - - switch v := v.(type) { - case int: - return int64(v), true - case int64: - return v, true - default: - panic(fmt.Sprintf("unable to convert type %T to an int value", v)) - } -} - -// Uints will construct an array of unsigned integers. -// Each value can be a uint, uint64, int, int64, or nil. -func Uints(k string, v ...interface{}) Column { - c := uintColumn{ - column: column{k: k}, - v: make([]uint64, len(v)), - } - for i, iv := range v { - val, ok := mustUintValue(iv) - if !ok { - if c.valid == nil { - c.valid = make([]bool, len(v)) - for i := range c.valid { - c.valid[i] = true - } - } - c.valid[i] = false - } - c.v[i] = val - } - return c -} - -type uintColumn struct { - column - v []uint64 -} - -func (s uintColumn) Make(n int) array.Interface { - b := array.NewUint64Builder(memory.DefaultAllocator) - b.Resize(len(s.v)) - b.AppendValues(s.v, s.valid) - return b.NewArray() -} - -func (s uintColumn) Type() flux.ColType { return flux.TUInt } -func (s uintColumn) Len() int { return len(s.v) } -func (s uintColumn) KeyValue() values.Value { return values.InvalidValue } - -func (s uintColumn) Build(template *[]Column) []flux.Table { - *template = append(*template, s) - return nil -} - -func mustUintValue(v interface{}) (uint64, bool) { - if v == nil { - return 0, false - } - - switch v := v.(type) { - case int: - return uint64(v), true - case int64: - return uint64(v), true - case uint: - return uint64(v), true - case uint64: - return v, true - default: - panic(fmt.Sprintf("unable to convert type %T to a uint value", v)) - } -} - -// Floats will construct an array of floats. -// Each value can be a float64, int, int64, or nil. -func Floats(k string, v ...interface{}) Column { - c := floatColumn{ - column: column{k: k}, - v: make([]float64, len(v)), - } - for i, iv := range v { - val, ok := mustFloatValue(iv) - if !ok { - if c.valid == nil { - c.valid = make([]bool, len(v)) - for i := range c.valid { - c.valid[i] = true - } - } - c.valid[i] = false - } - c.v[i] = val - } - return c -} - -type floatColumn struct { - column - v []float64 -} - -func (s floatColumn) Make(n int) array.Interface { - b := array.NewFloat64Builder(memory.DefaultAllocator) - b.Resize(len(s.v)) - b.AppendValues(s.v, s.valid) - return b.NewArray() -} - -func (s floatColumn) Type() flux.ColType { return flux.TFloat } -func (s floatColumn) Len() int { return len(s.v) } -func (s floatColumn) KeyValue() values.Value { return values.InvalidValue } - -func (s floatColumn) Build(template *[]Column) []flux.Table { - *template = append(*template, s) - return nil -} - -func mustFloatValue(v interface{}) (float64, bool) { - if v == nil { - return 0, false - } - - switch v := v.(type) { - case int: - return float64(v), true - case int64: - return float64(v), true - case float64: - return v, true - default: - panic(fmt.Sprintf("unable to convert type %T to a float value", v)) - } -} - -// Strings will construct an array of strings. -// Each value can be a string or nil. -func Strings(k string, v ...interface{}) Column { - c := stringColumn{ - column: column{k: k}, - v: make([]string, len(v)), - } - for i, iv := range v { - val, ok := mustStringValue(iv) - if !ok { - if c.valid == nil { - c.valid = make([]bool, len(v)) - for i := range c.valid { - c.valid[i] = true - } - } - c.valid[i] = false - } - c.v[i] = val - } - return c -} - -type stringColumn struct { - column - v []string -} - -func (s stringColumn) Make(n int) array.Interface { - b := array.NewBinaryBuilder(memory.DefaultAllocator, stdarrow.BinaryTypes.String) - b.Resize(len(s.v)) - b.AppendStringValues(s.v, s.valid) - return b.NewArray() -} - -func (s stringColumn) Type() flux.ColType { return flux.TString } -func (s stringColumn) Len() int { return len(s.v) } -func (s stringColumn) KeyValue() values.Value { return values.InvalidValue } - -func (s stringColumn) Build(template *[]Column) []flux.Table { - *template = append(*template, s) - return nil -} - -func mustStringValue(v interface{}) (string, bool) { - if v == nil { - return "", false - } - - switch v := v.(type) { - case string: - return v, true - default: - panic(fmt.Sprintf("unable to convert type %T to a string value", v)) - } -} - -// Booleans will construct an array of booleans. -// Each value can be a bool or nil. -func Booleans(k string, v ...interface{}) Column { - c := booleanColumn{ - column: column{k: k}, - v: make([]bool, len(v)), - } - for i, iv := range v { - val, ok := mustBooleanValue(iv) - if !ok { - if c.valid == nil { - c.valid = make([]bool, len(v)) - for i := range c.valid { - c.valid[i] = true - } - } - c.valid[i] = false - } - c.v[i] = val - } - return c -} - -type booleanColumn struct { - column - v []bool -} - -func (s booleanColumn) Make(n int) array.Interface { - b := array.NewBooleanBuilder(memory.DefaultAllocator) - b.Resize(len(s.v)) - b.AppendValues(s.v, s.valid) - return b.NewArray() -} - -func (s booleanColumn) Type() flux.ColType { return flux.TBool } -func (s booleanColumn) Len() int { return len(s.v) } -func (s booleanColumn) KeyValue() values.Value { return values.InvalidValue } - -func (s booleanColumn) Build(template *[]Column) []flux.Table { - *template = append(*template, s) - return nil -} - -func mustBooleanValue(v interface{}) (bool, bool) { - if v == nil { - return false, false - } - - switch v := v.(type) { - case bool: - return v, true - default: - panic(fmt.Sprintf("unable to convert type %T to a boolean value", v)) - } -} - -// Times will construct an array of times with the given time using either a -// string or an integer. If an integer is used, then it is in seconds. -// -// If strings and integers are mixed, the integers will be treates as offsets -// from the last string time that was used. -func Times(k string, v ...interface{}) Column { - var offset int64 - c := timeColumn{ - column: column{k: k}, - v: make([]int64, len(v)), - } - for i, iv := range v { - val, abs, ok := mustTimeValue(iv, offset, time.Second) - if !ok { - if c.valid == nil { - c.valid = make([]bool, len(v)) - for i := range c.valid { - c.valid[i] = true - } - } - c.valid[i] = false - } - if abs { - offset = val - } - c.v[i] = val - } - return c -} - -type timeColumn struct { - column - v []int64 -} - -func (s timeColumn) Make(n int) array.Interface { - b := array.NewInt64Builder(memory.DefaultAllocator) - b.Resize(len(s.v)) - b.AppendValues(s.v, s.valid) - return b.NewArray() -} - -func (s timeColumn) Type() flux.ColType { return flux.TTime } -func (s timeColumn) Len() int { return len(s.v) } -func (s timeColumn) KeyValue() values.Value { return values.InvalidValue } - -func (s timeColumn) Build(template *[]Column) []flux.Table { - *template = append(*template, s) - return nil -} - -// mustTimeValue will convert the interface into a time value. -// This must either be an int-like value or a string that can be -// parsed as a time in RFC3339 format. -// -// This will panic otherwise. -func mustTimeValue(v interface{}, offset int64, unit time.Duration) (t int64, abs, ok bool) { - if v == nil { - return 0, false, false - } - - switch v := v.(type) { - case int: - return offset + int64(v)*int64(unit), false, true - case int64: - return offset + v*int64(unit), false, true - case string: - t, err := time.Parse(time.RFC3339, v) - if err != nil { - if t, err = time.Parse(time.RFC3339Nano, v); err != nil { - panic(err) - } - } - return t.UnixNano(), true, true - default: - panic(fmt.Sprintf("unable to convert type %T to a time value", v)) - } -} - -// TableBuilder is used to construct a set of Tables. -type TableBuilder interface { - // Build will construct a set of tables using the - // template as input. - // - // The template is a pointer as a builder is allowed - // to modify the template. For implementors, the - // template pointer must be non-nil. - Build(template *[]Column) []flux.Table -} - -// TableGroup will construct a group of Tables -// that have common values. It includes any TableBuilder -// values. -type TableGroup []TableBuilder - -func (t TableGroup) Do(f func(flux.Table) error) error { - // Use an empty template. - var template []Column - tables := t.Build(&template) - return table.Iterator(tables).Do(f) -} - -// Build will construct Tables using the given template. -func (t TableGroup) Build(template *[]Column) []flux.Table { - // Copy over the template. - gtemplate := make([]Column, len(*template)) - copy(gtemplate, *template) - - var tables []flux.Table - for _, tb := range t { - tables = append(tables, tb.Build(>emplate)...) - } - return tables -} - -// TableList will produce a Table using the template and -// each of the table builders. -// -// Changes to the template are not shared between each of the -// entries. If the TableBuilder does not produce tables, -// this will force a single Table to be created. -type TableList []TableBuilder - -func (t TableList) Build(template *[]Column) []flux.Table { - var tables []flux.Table - for _, tb := range t { - // Copy over the group template for each of these. - gtemplate := make([]Column, len(*template), len(*template)+1) - copy(gtemplate, *template) - - if ntables := tb.Build(>emplate); len(ntables) > 0 { - tables = append(tables, ntables...) - } else { - tables = append(tables, Table(gtemplate).Table()) - } - } - return tables -} - -// StringKeys creates a TableList with the given key values. -func StringKeys(k string, v ...interface{}) TableList { - list := make(TableList, len(v)) - for i := range v { - list[i] = StringKey(k, v[i]) - } - return list -} - -// TableMatrix will produce a set of Tables by producing the -// cross product of each of the TableBuilders with each other. -type TableMatrix []TableList - -func (t TableMatrix) Build(template *[]Column) []flux.Table { - if len(t) == 0 { - return nil - } else if len(t) == 1 { - return t[0].Build(template) - } - - // Split the TableList into their own distinct TableGroups - // so we can produce a cross product of groups. - builders := make([]TableGroup, len(t[0])) - for i, b := range t[0] { - builders[i] = append(builders[i], b) - } - - for i := 1; i < len(t); i++ { - product := make([]TableGroup, 0, len(builders)*len(t[i])) - for _, bs := range t[i] { - a := make([]TableGroup, len(builders)) - copy(a, builders) - for j := range a { - a[j] = append(a[j], bs) - } - product = append(product, a...) - } - builders = product - } - - var tables []flux.Table - for _, b := range builders { - tables = append(tables, b.Build(template)...) - } - return tables -} diff --git a/pkg/flux/execute/table/stringify.go b/pkg/flux/execute/table/stringify.go deleted file mode 100644 index e5b0afbf22..0000000000 --- a/pkg/flux/execute/table/stringify.go +++ /dev/null @@ -1,151 +0,0 @@ -package table - -import ( - "fmt" - "sort" - "strings" - "time" - - "github.com/influxdata/flux" - "github.com/influxdata/flux/execute" - "github.com/influxdata/flux/semantic" - "github.com/influxdata/flux/values" -) - -// Stringify will read a table and turn it into a human-readable string. -func Stringify(table flux.Table) string { - var sb strings.Builder - stringifyKey(&sb, table) - if err := table.Do(func(cr flux.ColReader) error { - stringifyRows(&sb, cr) - return nil - }); err != nil { - _, _ = fmt.Fprintf(&sb, "table error: %s\n", err) - } - return sb.String() -} - -func getSortedIndices(key flux.GroupKey, cols []flux.ColMeta) ([]flux.ColMeta, []int) { - indices := make([]int, len(cols)) - for i := range indices { - indices[i] = i - } - sort.Slice(indices, func(i, j int) bool { - ci, cj := cols[indices[i]], cols[indices[j]] - if key.HasCol(ci.Label) && !key.HasCol(cj.Label) { - return true - } else if !key.HasCol(ci.Label) && key.HasCol(cj.Label) { - return false - } - return ci.Label < cj.Label - }) - return cols, indices -} - -func stringifyKey(sb *strings.Builder, table flux.Table) { - key := table.Key() - cols, indices := getSortedIndices(table.Key(), table.Cols()) - - sb.WriteString("# ") - if len(cols) == 0 { - sb.WriteString("(none)") - } else { - nkeys := 0 - for _, idx := range indices { - c := cols[idx] - kidx := execute.ColIdx(c.Label, key.Cols()) - if kidx < 0 { - continue - } - - if nkeys > 0 { - sb.WriteString(",") - } - sb.WriteString(cols[idx].Label) - sb.WriteString("=") - - v := key.Value(kidx) - stringifyValue(sb, v) - nkeys++ - } - } - sb.WriteString(" ") - - ncols := 0 - for _, idx := range indices { - c := cols[idx] - if key.HasCol(c.Label) { - continue - } - - if ncols > 0 { - sb.WriteString(",") - } - sb.WriteString(cols[idx].Label) - sb.WriteString("=") - sb.WriteString(cols[idx].Type.String()) - ncols++ - } - sb.WriteString("\n") -} - -func stringifyRows(sb *strings.Builder, cr flux.ColReader) { - key := cr.Key() - cols, indices := getSortedIndices(cr.Key(), cr.Cols()) - - for i, sz := 0, cr.Len(); i < sz; i++ { - inKey := true - for j, idx := range indices { - c := cols[idx] - if j > 0 { - if inKey && !key.HasCol(c.Label) { - sb.WriteString(" ") - inKey = false - } else { - sb.WriteString(",") - } - } else if !key.HasCol(c.Label) { - inKey = false - } - sb.WriteString(cols[idx].Label) - sb.WriteString("=") - - v := execute.ValueForRow(cr, i, idx) - stringifyValue(sb, v) - } - sb.WriteString("\n") - } -} - -func stringifyValue(sb *strings.Builder, v values.Value) { - if v.IsNull() { - sb.WriteString("!(nil)") - return - } - - switch v.Type().Nature() { - case semantic.Int: - _, _ = fmt.Fprintf(sb, "%di", v.Int()) - case semantic.UInt: - _, _ = fmt.Fprintf(sb, "%du", v.UInt()) - case semantic.Float: - _, _ = fmt.Fprintf(sb, "%.3f", v.Float()) - case semantic.String: - sb.WriteString(v.Str()) - case semantic.Bool: - if v.Bool() { - sb.WriteString("true") - } else { - sb.WriteString("false") - } - case semantic.Time: - ts := v.Time().Time() - if ts.Nanosecond() > 0 { - sb.WriteString(ts.Format(time.RFC3339Nano)) - } else { - sb.WriteString(ts.Format(time.RFC3339)) - } - default: - sb.WriteString("!(invalid)") - } -} diff --git a/pkg/flux/internal/errors/errors.go b/pkg/flux/internal/errors/errors.go deleted file mode 100644 index c4b1f45342..0000000000 --- a/pkg/flux/internal/errors/errors.go +++ /dev/null @@ -1,92 +0,0 @@ -package errors - -import ( - "fmt" - "strings" - - "github.com/influxdata/flux/codes" -) - -// Error is the error struct of flux. -type Error struct { - // Code is the code of the error as defined in the codes package. - // This describes the type and category of the error. It is required. - Code codes.Code - - // Msg contains a human-readable description and additional information - // about the error itself. This is optional. - Msg string - - // Err contains the error that was the cause of this error. - // This is optional. - Err error -} - -// Error implement the error interface by outputting the Code and Err. -func (e *Error) Error() string { - if e.Msg != "" && e.Err != nil { - var b strings.Builder - b.WriteString(e.Msg) - b.WriteString(": ") - b.WriteString(e.Err.Error()) - return b.String() - } else if e.Msg != "" { - return e.Msg - } else if e.Err != nil { - return e.Err.Error() - } - return e.Code.String() -} - -// Unwrap will return the wrapped error. -func (e *Error) Unwrap() error { - return e.Err -} - -func New(code codes.Code, msg ...interface{}) error { - return Wrap(nil, code, msg...) -} - -func Newf(code codes.Code, fmtStr string, args ...interface{}) error { - return Wrapf(nil, code, fmtStr, args...) -} - -func Wrap(err error, code codes.Code, msg ...interface{}) error { - var s string - if len(msg) > 0 { - s = fmt.Sprint(msg...) - } - return &Error{ - Code: code, - Msg: s, - Err: err, - } -} - -func Wrapf(err error, code codes.Code, format string, a ...interface{}) error { - return &Error{ - Code: code, - Msg: fmt.Sprintf(format, a...), - Err: err, - } -} - -// Code returns the error code for the given error. -// If the error is not a flux.Error, this will return -// Unknown for the code. If the error is a flux.Error -// and its code is Inherit, then this will return the -// wrapped error's code. -func Code(err error) codes.Code { - for { - if ferr, ok := err.(*Error); ok { - if ferr.Code != codes.Inherit { - return ferr.Code - } else if ferr.Err == nil { - return codes.Unknown - } - err = ferr.Err - } else { - return codes.Unknown - } - } -} diff --git a/pkg/flux/internal/execute/table/buffered.go b/pkg/flux/internal/execute/table/buffered.go deleted file mode 100644 index b5fb7dcd49..0000000000 --- a/pkg/flux/internal/execute/table/buffered.go +++ /dev/null @@ -1,87 +0,0 @@ -package table - -import ( - "sync/atomic" - - "github.com/influxdata/flux" - "github.com/influxdata/flux/codes" - "github.com/influxdata/influxdb/v2/pkg/flux/internal/errors" -) - -// BufferedTable represents a table of buffered column readers. -type BufferedTable struct { - used int32 - empty bool - GroupKey flux.GroupKey - Columns []flux.ColMeta - Buffers []flux.ColReader -} - -// FromBuffer constructs a flux.Table from a single flux.ColReader. -func FromBuffer(cr flux.ColReader) flux.Table { - return &BufferedTable{ - GroupKey: cr.Key(), - Columns: cr.Cols(), - Buffers: []flux.ColReader{cr}, - } -} - -func (b *BufferedTable) Key() flux.GroupKey { - return b.GroupKey -} - -func (b *BufferedTable) Cols() []flux.ColMeta { - return b.Columns -} - -func (b *BufferedTable) Do(f func(flux.ColReader) error) error { - if !atomic.CompareAndSwapInt32(&b.used, 0, 1) { - return errors.New(codes.Internal, "table already read") - } - - i := 0 - defer func() { - for ; i < len(b.Buffers); i++ { - b.Buffers[i].Release() - } - }() - - b.empty = true - for ; i < len(b.Buffers); i++ { - cr := b.Buffers[i] - if cr.Len() > 0 { - b.empty = false - } - if err := f(cr); err != nil { - return err - } - cr.Release() - } - return nil -} - -func (b *BufferedTable) Done() { - if atomic.CompareAndSwapInt32(&b.used, 0, 1) { - b.empty = b.isEmpty() - for _, buf := range b.Buffers { - buf.Release() - } - b.Buffers = nil - } -} - -func (b *BufferedTable) Empty() bool { - if atomic.LoadInt32(&b.used) != 0 { - return b.empty - } - return b.isEmpty() -} - -func (b *BufferedTable) isEmpty() bool { - for _, buf := range b.Buffers { - if buf.Len() > 0 { - return false - } - } - return true -} diff --git a/pkg/flux/internal/execute/table/iterator.go b/pkg/flux/internal/execute/table/iterator.go deleted file mode 100644 index c3d8e41f80..0000000000 --- a/pkg/flux/internal/execute/table/iterator.go +++ /dev/null @@ -1,5 +0,0 @@ -package table - -import "github.com/influxdata/influxdb/v2/pkg/flux/execute/table" - -type Iterator = table.Iterator diff --git a/pkger/parser.go b/pkger/parser.go index f4d9a6c1da..0119764a09 100644 --- a/pkger/parser.go +++ b/pkger/parser.go @@ -17,10 +17,9 @@ import ( "time" "github.com/influxdata/flux/ast" + "github.com/influxdata/flux/ast/edit" "github.com/influxdata/flux/parser" "github.com/influxdata/influxdb/v2" - ast2 "github.com/influxdata/influxdb/v2/pkg/flux/ast" - "github.com/influxdata/influxdb/v2/pkg/flux/ast/edit" "github.com/influxdata/influxdb/v2/pkg/jsonnet" "gopkg.in/yaml.v3" ) @@ -1728,16 +1727,16 @@ func valFromExpr(p ast.Expression) interface{} { } return nil case *ast.DateTimeLiteral: - return ast2.DateTimeFromLiteral(literal) + return ast.DateTimeFromLiteral(literal) case *ast.FloatLiteral: - return ast2.FloatFromLiteral(literal) + return ast.FloatFromLiteral(literal) case *ast.IntegerLiteral: - return ast2.IntegerFromLiteral(literal) + return ast.IntegerFromLiteral(literal) case *ast.DurationLiteral: dur, _ := ast.DurationFrom(literal, time.Time{}) return dur case *ast.StringLiteral: - return ast2.StringFromLiteral(literal) + return ast.StringFromLiteral(literal) case *ast.UnaryExpression: // a signed duration is represented by a UnaryExpression. // it is the only unary expression allowed. diff --git a/pkger/parser_models.go b/pkger/parser_models.go index 05642eca74..dfdb67a262 100644 --- a/pkger/parser_models.go +++ b/pkger/parser_models.go @@ -10,14 +10,13 @@ import ( "time" "github.com/influxdata/flux/ast" + "github.com/influxdata/flux/ast/edit" "github.com/influxdata/flux/parser" "github.com/influxdata/influxdb/v2" "github.com/influxdata/influxdb/v2/notification" icheck "github.com/influxdata/influxdb/v2/notification/check" "github.com/influxdata/influxdb/v2/notification/endpoint" "github.com/influxdata/influxdb/v2/notification/rule" - ast2 "github.com/influxdata/influxdb/v2/pkg/flux/ast" - "github.com/influxdata/influxdb/v2/pkg/flux/ast/edit" ) type identity struct { @@ -2314,7 +2313,7 @@ func convertRefToRefSummary(field string, ref *references) SummaryReference { func astBoolFromIface(v interface{}) *ast.BooleanLiteral { b, _ := v.(bool) - return ast2.BooleanLiteralFromValue(b) + return ast.BooleanLiteralFromValue(b) } func astDurationFromIface(v interface{}) *ast.DurationLiteral { @@ -2332,18 +2331,18 @@ func astDurationFromIface(v interface{}) *ast.DurationLiteral { func astFloatFromIface(v interface{}) *ast.FloatLiteral { if i, ok := v.(int); ok { - return ast2.FloatLiteralFromValue(float64(i)) + return ast.FloatLiteralFromValue(float64(i)) } f, _ := v.(float64) - return ast2.FloatLiteralFromValue(f) + return ast.FloatLiteralFromValue(f) } func astIntegerFromIface(v interface{}) *ast.IntegerLiteral { if f, ok := v.(float64); ok { - return ast2.IntegerLiteralFromValue(int64(f)) + return ast.IntegerLiteralFromValue(int64(f)) } i, _ := v.(int64) - return ast2.IntegerLiteralFromValue(i) + return ast.IntegerLiteralFromValue(i) } func astNow() *ast.CallExpression { @@ -2354,12 +2353,12 @@ func astNow() *ast.CallExpression { func astStringFromIface(v interface{}) *ast.StringLiteral { s, _ := v.(string) - return ast2.StringLiteralFromValue(s) + return ast.StringLiteralFromValue(s) } func astTimeFromIface(v interface{}) *ast.DateTimeLiteral { if t, ok := v.(time.Time); ok { - return ast2.DateTimeLiteralFromValue(t) + return ast.DateTimeLiteralFromValue(t) } s, ok := v.(string) diff --git a/query/bridges.go b/query/bridges.go index 9d3c429b75..564f5546fc 100644 --- a/query/bridges.go +++ b/query/bridges.go @@ -149,6 +149,15 @@ func (b ProxyQueryServiceAsyncBridge) Query(ctx context.Context, w io.Writer, re if err != nil { return stats, tracing.LogError(span, err) } + + if results, err := q.ProfilerResults(); err != nil { + return stats, tracing.LogError(span, err) + } else if results != nil { + _, err = encoder.Encode(w, results) + if err != nil { + return stats, tracing.LogError(span, err) + } + } return stats, nil } diff --git a/query/bridges_test.go b/query/bridges_test.go index 3ccd15c70a..94e02c0e3c 100644 --- a/query/bridges_test.go +++ b/query/bridges_test.go @@ -10,6 +10,7 @@ import ( "github.com/influxdata/flux" "github.com/influxdata/flux/csv" "github.com/influxdata/flux/execute/executetest" + "github.com/influxdata/flux/metadata" "github.com/influxdata/influxdb/v2/query" "github.com/influxdata/influxdb/v2/query/mock" ) @@ -26,7 +27,7 @@ func (w failWriter) Write(p []byte) (int, error) { func TestProxyQueryServiceAsyncBridge_StatsOnClientDisconnect(t *testing.T) { q := mock.NewQuery() - q.Metadata = flux.Metadata{ + q.Metadata = metadata.Metadata{ "foo": []interface{}{"bar"}, } r := executetest.NewResult([]*executetest.Table{ diff --git a/query/builtin/builtin.go b/query/builtin/builtin.go index 61be85b0bd..bd221b7f61 100644 --- a/query/builtin/builtin.go +++ b/query/builtin/builtin.go @@ -4,12 +4,12 @@ package builtin import ( - "github.com/influxdata/flux" + "github.com/influxdata/flux/runtime" _ "github.com/influxdata/flux/stdlib" // Import the stdlib _ "github.com/influxdata/influxdb/v2/query/stdlib" // Import the stdlib ) func init() { - flux.FinalizeBuiltIns() + runtime.FinalizeBuiltIns() } diff --git a/query/builtinlazy/builtin.go b/query/builtinlazy/builtin.go deleted file mode 100644 index 36cc682914..0000000000 --- a/query/builtinlazy/builtin.go +++ /dev/null @@ -1,20 +0,0 @@ -package builtinlazy - -import ( - "sync" - - "github.com/influxdata/flux" - _ "github.com/influxdata/flux/stdlib" // Import the stdlib - _ "github.com/influxdata/influxdb/v2/query/stdlib" // Import the stdlib -) - -var once sync.Once - -// Initialize ensures all Flux builtins are configured and should be called -// prior to using the Flux runtime. Initialize is safe to call concurrently -// and is idempotent. -func Initialize() { - once.Do(func() { - flux.FinalizeBuiltIns() - }) -} diff --git a/query/control/controller.go b/query/control/controller.go index f266056997..0ec36252f2 100644 --- a/query/control/controller.go +++ b/query/control/controller.go @@ -26,10 +26,13 @@ import ( "github.com/influxdata/flux" "github.com/influxdata/flux/codes" + "github.com/influxdata/flux/execute/table" "github.com/influxdata/flux/lang" "github.com/influxdata/flux/memory" + "github.com/influxdata/flux/runtime" "github.com/influxdata/influxdb/v2" "github.com/influxdata/influxdb/v2/kit/errors" + "github.com/influxdata/influxdb/v2/kit/feature" "github.com/influxdata/influxdb/v2/kit/prom" "github.com/influxdata/influxdb/v2/kit/tracing" influxlogger "github.com/influxdata/influxdb/v2/logger" @@ -206,6 +209,10 @@ func (c *Controller) Query(ctx context.Context, req *query.Request) (flux.Query, for _, dep := range c.dependencies { ctx = dep.Inject(ctx) } + // Add per-transformation spans if the feature flag is set. + if feature.QueryTracing().Enabled(ctx) { + ctx = flux.WithExperimentalTracingEnabled(ctx) + } q, err := c.query(ctx, req.Compiler) if err != nil { return q, err @@ -338,7 +345,7 @@ func (c *Controller) compileQuery(q *Query, compiler flux.Compiler) (err error) } } - prog, err := compiler.Compile(ctx) + prog, err := compiler.Compile(ctx, runtime.Default) if err != nil { return &flux.Error{ Msg: "compilation failed", @@ -547,6 +554,23 @@ type Query struct { alloc *memory.Allocator } +func (q *Query) ProfilerResults() (flux.ResultIterator, error) { + p := q.program.(*lang.AstProgram) + if len(p.Profilers) == 0 { + return nil, nil + } + tables := make([]flux.Table, 0) + for _, profiler := range p.Profilers { + if result, err := profiler.GetResult(q, q.alloc); err != nil { + return nil, err + } else { + tables = append(tables, result) + } + } + res := table.NewProfilerResult(tables...) + return flux.NewSliceResultIterator([]flux.Result{&res}), nil +} + // ID reports an ephemeral unique ID for the query. func (q *Query) ID() QueryID { return q.id diff --git a/query/control/controller_test.go b/query/control/controller_test.go index a9357c0f9d..47c94b1ea3 100644 --- a/query/control/controller_test.go +++ b/query/control/controller_test.go @@ -20,10 +20,14 @@ import ( "github.com/influxdata/flux/plan" "github.com/influxdata/flux/plan/plantest" "github.com/influxdata/flux/stdlib/universe" + "github.com/influxdata/influxdb/v2/kit/feature" + pmock "github.com/influxdata/influxdb/v2/mock" "github.com/influxdata/influxdb/v2/query" _ "github.com/influxdata/influxdb/v2/query/builtin" "github.com/influxdata/influxdb/v2/query/control" "github.com/influxdata/influxdb/v2/query/stdlib/influxdata/influxdb" + "github.com/opentracing/opentracing-go" + "github.com/opentracing/opentracing-go/mocktracer" "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" "go.uber.org/zap/zaptest" @@ -1289,6 +1293,102 @@ func TestController_ReserveMemoryWithoutExceedingMax(t *testing.T) { validateUnusedMemory(t, reg, config) } +func TestController_QueryTracing(t *testing.T) { + // temporarily install a mock tracer to see which spans are created. + oldTracer := opentracing.GlobalTracer() + defer opentracing.SetGlobalTracer(oldTracer) + mockTracer := mocktracer.New() + opentracing.SetGlobalTracer(mockTracer) + + const memoryBytesQuotaPerQuery = 64 + config := config + config.MemoryBytesQuotaPerQuery = memoryBytesQuotaPerQuery + ctrl, err := control.New(config) + if err != nil { + t.Fatal(err) + } + defer shutdown(t, ctrl) + + flagger := pmock.NewFlagger(map[feature.Flag]interface{}{ + feature.QueryTracing(): true, + }) + plainCtx := context.Background() + withFlagger, err := feature.Annotate(plainCtx, flagger) + if err != nil { + t.Fatal(err) + } + tcs := []struct { + name string + ctx context.Context + doNotWantSpan string + wantSpan string + }{ + { + name: "feature flag off", + ctx: plainCtx, + doNotWantSpan: "*executetest.AllocatingFromProcedureSpec", + }, + { + name: "feature flag on", + ctx: withFlagger, + wantSpan: "*executetest.AllocatingFromProcedureSpec", + }, + } + for _, tc := range tcs { + tc := tc + t.Run(tc.name, func(t *testing.T) { + mockTracer.Reset() + + compiler := &mock.Compiler{ + CompileFn: func(ctx context.Context) (flux.Program, error) { + // Return a program that will allocate one more byte than is allowed. + pts := plantest.PlanSpec{ + Nodes: []plan.Node{ + plan.CreatePhysicalNode("allocating-from-test", &executetest.AllocatingFromProcedureSpec{ + ByteCount: 16, + }), + plan.CreatePhysicalNode("yield", &universe.YieldProcedureSpec{Name: "_result"}), + }, + Edges: [][2]int{ + {0, 1}, + }, + Resources: flux.ResourceManagement{ + ConcurrencyQuota: 1, + }, + } + + ps := plantest.CreatePlanSpec(&pts) + prog := &lang.Program{ + Logger: zaptest.NewLogger(t), + PlanSpec: ps, + } + + return prog, nil + }, + } + + // Depending on how the feature flag is set in the context, + // we may or may not do query tracing here. + q, err := ctrl.Query(tc.ctx, makeRequest(compiler)) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + consumeResults(t, q) + gotSpans := make(map[string]struct{}) + for _, span := range mockTracer.FinishedSpans() { + gotSpans[span.OperationName] = struct{}{} + } + if _, found := gotSpans[tc.doNotWantSpan]; tc.doNotWantSpan != "" && found { + t.Fatalf("did not want to find span %q but it was there", tc.doNotWantSpan) + } + if _, found := gotSpans[tc.wantSpan]; tc.wantSpan != "" && !found { + t.Fatalf("wanted to find span %q but it was not there", tc.wantSpan) + } + }) + } +} + func consumeResults(tb testing.TB, q flux.Query) { tb.Helper() for res := range q.Results() { diff --git a/query/encode.go b/query/encode.go index 09eb08d40e..d8af463183 100644 --- a/query/encode.go +++ b/query/encode.go @@ -55,7 +55,6 @@ func (e *NoContentEncoder) Encode(w io.Writer, results flux.ResultIterator) (int for results.More() { if err := results.Next().Tables().Do(func(tbl flux.Table) error { return tbl.Do(func(cr flux.ColReader) error { - cr.Release() return nil }) }); err != nil { @@ -114,7 +113,6 @@ func (e *NoContentWithErrorEncoder) Encode(w io.Writer, results flux.ResultItera for results.More() { if err := results.Next().Tables().Do(func(tbl flux.Table) error { return tbl.Do(func(cr flux.ColReader) error { - cr.Release() return nil }) }); err != nil { diff --git a/query/fluxlang/service.go b/query/fluxlang/service.go index ab88279e2e..94a8a8f4c4 100644 --- a/query/fluxlang/service.go +++ b/query/fluxlang/service.go @@ -4,11 +4,11 @@ package fluxlang import ( "context" - "github.com/influxdata/flux" "github.com/influxdata/flux/ast" "github.com/influxdata/flux/complete" "github.com/influxdata/flux/interpreter" "github.com/influxdata/flux/parser" + "github.com/influxdata/flux/runtime" "github.com/influxdata/flux/values" "github.com/influxdata/influxdb/v2" ) @@ -27,9 +27,9 @@ func (d defaultService) Parse(source string) (pkg *ast.Package, err error) { } func (d defaultService) EvalAST(ctx context.Context, astPkg *ast.Package) ([]interpreter.SideEffect, values.Scope, error) { - return flux.EvalAST(ctx, astPkg) + return runtime.EvalAST(ctx, astPkg) } func (d defaultService) Completer() complete.Completer { - return complete.NewCompleter(flux.Prelude()) + return complete.NewCompleter(runtime.Prelude()) } diff --git a/query/influxql/compiler.go b/query/influxql/compiler.go index 4397c6f687..1031e87365 100644 --- a/query/influxql/compiler.go +++ b/query/influxql/compiler.go @@ -2,6 +2,7 @@ package influxql import ( "context" + "encoding/json" "time" "github.com/influxdata/flux" @@ -42,7 +43,7 @@ func NewCompiler(dbrpMappingSvc platform.DBRPMappingServiceV2) *Compiler { } // Compile transpiles the query into a Program. -func (c *Compiler) Compile(ctx context.Context) (flux.Program, error) { +func (c *Compiler) Compile(ctx context.Context, runtime flux.Runtime) (flux.Program, error) { var now time.Time if c.Now != nil { now = *c.Now @@ -64,7 +65,15 @@ func (c *Compiler) Compile(ctx context.Context) (flux.Program, error) { return nil, err } compileOptions := lang.WithLogPlanOpts(c.logicalPlannerOptions...) - return lang.CompileAST(astPkg, now, compileOptions), nil + bs, err := json.Marshal(astPkg) + if err != nil { + return nil, err + } + hdl, err := runtime.JSONToHandle(bs) + if err != nil { + return nil, err + } + return lang.CompileAST(hdl, runtime, now, compileOptions), nil } func (c *Compiler) CompilerType() flux.CompilerType { diff --git a/query/influxql/end_to_end_test.go b/query/influxql/end_to_end_test.go index aa00b05665..2194469cd9 100644 --- a/query/influxql/end_to_end_test.go +++ b/query/influxql/end_to_end_test.go @@ -131,6 +131,8 @@ var skipTests = map[string]string{ "SelectorMath_29": "Transpiler: unimplemented functions: top and bottom https://github.com/influxdata/influxdb/issues/10738", "SelectorMath_30": "Transpiler: unimplemented functions: top and bottom https://github.com/influxdata/influxdb/issues/10738", "SelectorMath_31": "Transpiler: unimplemented functions: top and bottom https://github.com/influxdata/influxdb/issues/10738", + "ands": "algo-w: https://github.com/influxdata/influxdb/issues/16811", + "ors": "algo-w: https://github.com/influxdata/influxdb/issues/16811", } var querier = fluxquerytest.NewQuerier() diff --git a/query/logging.go b/query/logging.go index b6ec55f623..7c803d514e 100644 --- a/query/logging.go +++ b/query/logging.go @@ -21,15 +21,45 @@ type LoggingProxyQueryService struct { queryLogger Logger nowFunction func() time.Time log *zap.Logger + cond func(ctx context.Context) bool + + // If this is set then logging happens only if this key is present in the + // metadata. + requireMetadataKey string } -func NewLoggingProxyQueryService(log *zap.Logger, queryLogger Logger, proxyQueryService ProxyQueryService) *LoggingProxyQueryService { - return &LoggingProxyQueryService{ +// LoggingProxyQueryServiceOption provides a way to modify the +// behavior of LoggingProxyQueryService. +type LoggingProxyQueryServiceOption func(lpqs *LoggingProxyQueryService) + +// ConditionalLogging returns a LoggingProxyQueryServiceOption +// that only logs if the passed in function returns true. +// Thus logging can be controlled by a request-scoped attribute, e.g., a feature flag. +func ConditionalLogging(cond func(context.Context) bool) LoggingProxyQueryServiceOption { + return func(lpqs *LoggingProxyQueryService) { + lpqs.cond = cond + } +} + +func RequireMetadataKey(metadataKey string) LoggingProxyQueryServiceOption { + return func(lpqs *LoggingProxyQueryService) { + lpqs.requireMetadataKey = metadataKey + } +} + +func NewLoggingProxyQueryService(log *zap.Logger, queryLogger Logger, proxyQueryService ProxyQueryService, opts ...LoggingProxyQueryServiceOption) *LoggingProxyQueryService { + lpqs := &LoggingProxyQueryService{ proxyQueryService: proxyQueryService, queryLogger: queryLogger, nowFunction: time.Now, log: log, } + + for _, o := range opts { + o(lpqs) + } + + return lpqs } func (s *LoggingProxyQueryService) SetNowFunctionForTesting(nowFunction func() time.Time) { @@ -38,6 +68,12 @@ func (s *LoggingProxyQueryService) SetNowFunctionForTesting(nowFunction func() t // Query executes and logs the query. func (s *LoggingProxyQueryService) Query(ctx context.Context, w io.Writer, req *ProxyRequest) (stats flux.Statistics, err error) { + if s.cond != nil && !s.cond(ctx) { + // Logging is conditional, and we are not logging this request. + // Just invoke the wrapped service directly. + return s.proxyQueryService.Query(ctx, w, req) + } + span, ctx := tracing.StartSpanFromContext(ctx) defer span.Finish() @@ -50,6 +86,14 @@ func (s *LoggingProxyQueryService) Query(ctx context.Context, w io.Writer, req * entry.Write(zap.Error(err)) } } + + // Enforce requireMetadataKey, if set. + if s.requireMetadataKey != "" { + if _, ok := stats.Metadata[s.requireMetadataKey]; !ok { + return + } + } + traceID, sampled, _ := tracing.InfoFromContext(ctx) log := Log{ OrganizationID: req.Request.OrganizationID, diff --git a/query/logging_test.go b/query/logging_test.go index eb2ad9364d..823660f2ff 100644 --- a/query/logging_test.go +++ b/query/logging_test.go @@ -4,12 +4,14 @@ import ( "bytes" "context" "io" + "io/ioutil" "testing" "time" "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" "github.com/influxdata/flux" + "github.com/influxdata/flux/metadata" platform "github.com/influxdata/influxdb/v2" "github.com/influxdata/influxdb/v2/query" "github.com/influxdata/influxdb/v2/query/mock" @@ -34,6 +36,10 @@ var opts = []cmp.Option{ cmpopts.IgnoreUnexported(query.Request{}), } +type contextKey string + +const loggingCtxKey contextKey = "do-logging" + func TestLoggingProxyQueryService(t *testing.T) { // Set a Jaeger in-memory tracer to get span information in the query log. oldTracer := opentracing.GlobalTracer() @@ -53,7 +59,9 @@ func TestLoggingProxyQueryService(t *testing.T) { ExecuteDuration: time.Second, Concurrency: 2, MaxAllocated: 2048, + Metadata: make(metadata.Metadata), } + wantStats.Metadata.Add("some-mock-metadata", 42) wantBytes := 10 pqs := &mock.ProxyQueryService{ QueryF: func(ctx context.Context, w io.Writer, req *query.ProxyRequest) (flux.Statistics, error) { @@ -69,13 +77,6 @@ func TestLoggingProxyQueryService(t *testing.T) { }, } - wantTime := time.Now() - lpqs := query.NewLoggingProxyQueryService(zap.NewNop(), logger, pqs) - lpqs.SetNowFunctionForTesting(func() time.Time { - return wantTime - }) - - var buf bytes.Buffer req := &query.ProxyRequest{ Request: query.Request{ Authorization: nil, @@ -84,25 +85,98 @@ func TestLoggingProxyQueryService(t *testing.T) { }, Dialect: nil, } - stats, err := lpqs.Query(context.Background(), &buf, req) - if err != nil { - t.Fatal(err) - } - if !cmp.Equal(wantStats, stats, opts...) { - t.Errorf("unexpected query stats: -want/+got\n%s", cmp.Diff(wantStats, stats, opts...)) - } - traceID := reporter.GetSpans()[0].Context().(jaeger.SpanContext).TraceID().String() - wantLogs := []query.Log{{ - Time: wantTime, - OrganizationID: orgID, - TraceID: traceID, - Sampled: true, - Error: nil, - ProxyRequest: req, - ResponseSize: int64(wantBytes), - Statistics: wantStats, - }} - if !cmp.Equal(wantLogs, logs, opts...) { - t.Errorf("unexpected query logs: -want/+got\n%s", cmp.Diff(wantLogs, logs, opts...)) - } + + t.Run("log", func(t *testing.T) { + defer func() { + logs = nil + }() + wantTime := time.Now() + lpqs := query.NewLoggingProxyQueryService(zap.NewNop(), logger, pqs) + lpqs.SetNowFunctionForTesting(func() time.Time { + return wantTime + }) + + var buf bytes.Buffer + stats, err := lpqs.Query(context.Background(), &buf, req) + if err != nil { + t.Fatal(err) + } + if !cmp.Equal(wantStats, stats, opts...) { + t.Errorf("unexpected query stats: -want/+got\n%s", cmp.Diff(wantStats, stats, opts...)) + } + traceID := reporter.GetSpans()[0].Context().(jaeger.SpanContext).TraceID().String() + wantLogs := []query.Log{{ + Time: wantTime, + OrganizationID: orgID, + TraceID: traceID, + Sampled: true, + Error: nil, + ProxyRequest: req, + ResponseSize: int64(wantBytes), + Statistics: wantStats, + }} + if !cmp.Equal(wantLogs, logs, opts...) { + t.Errorf("unexpected query logs: -want/+got\n%s", cmp.Diff(wantLogs, logs, opts...)) + } + }) + + t.Run("conditional logging", func(t *testing.T) { + defer func() { + logs = nil + }() + + condLog := query.ConditionalLogging(func(ctx context.Context) bool { + return ctx.Value(loggingCtxKey) != nil + }) + + lpqs := query.NewLoggingProxyQueryService(zap.NewNop(), logger, pqs, condLog) + _, err := lpqs.Query(context.Background(), ioutil.Discard, req) + if err != nil { + t.Fatal(err) + } + + if len(logs) != 0 { + t.Fatal("expected query service not to log") + } + + ctx := context.WithValue(context.Background(), loggingCtxKey, true) + _, err = lpqs.Query(ctx, ioutil.Discard, req) + if err != nil { + t.Fatal(err) + } + + if len(logs) != 1 { + t.Fatal("expected query service to log") + } + }) + + t.Run("require metadata key", func(t *testing.T) { + defer func() { + logs = nil + }() + + reqMeta1 := query.RequireMetadataKey("this-metadata-wont-be-found") + lpqs1 := query.NewLoggingProxyQueryService(zap.NewNop(), logger, pqs, reqMeta1) + + _, err := lpqs1.Query(context.Background(), ioutil.Discard, req) + if err != nil { + t.Fatal(err) + } + + if len(logs) != 0 { + t.Fatal("expected query service not to log") + } + + reqMeta2 := query.RequireMetadataKey("some-mock-metadata") + lpqs2 := query.NewLoggingProxyQueryService(zap.NewNop(), logger, pqs, reqMeta2) + + _, err = lpqs2.Query(context.Background(), ioutil.Discard, req) + if err != nil { + t.Fatal(err) + } + + if len(logs) != 1 { + t.Fatal("expected query service to log") + } + }) } diff --git a/query/mock/service.go b/query/mock/service.go index 71447ca787..9dea08940a 100644 --- a/query/mock/service.go +++ b/query/mock/service.go @@ -6,6 +6,7 @@ import ( "sync" "github.com/influxdata/flux" + "github.com/influxdata/flux/metadata" "github.com/influxdata/influxdb/v2/kit/check" "github.com/influxdata/influxdb/v2/query" ) @@ -52,7 +53,7 @@ func (s *AsyncQueryService) Query(ctx context.Context, req *query.Request) (flux // It contains controls to ensure that the flux.Query object is used correctly. // Note: Query will only return one result, specified by calling the SetResults method. type Query struct { - Metadata flux.Metadata + Metadata metadata.Metadata results chan flux.Result once sync.Once @@ -66,7 +67,7 @@ var _ flux.Query = (*Query)(nil) // NewQuery constructs a new asynchronous query. func NewQuery() *Query { return &Query{ - Metadata: make(flux.Metadata), + Metadata: make(metadata.Metadata), results: make(chan flux.Result, 1), } } diff --git a/query/promql/internal/promqltests/engine.go b/query/promql/internal/promqltests/engine.go index 1dccf8bf8e..3262816eef 100644 --- a/query/promql/internal/promqltests/engine.go +++ b/query/promql/internal/promqltests/engine.go @@ -19,7 +19,7 @@ import ( "github.com/influxdata/influxdb/v2/cmd/influxd/launcher" "github.com/influxdata/influxdb/v2/models" "github.com/influxdata/influxdb/v2/query" - itsdb "github.com/influxdata/influxdb/v2/v1/tsdb" + itsdb "github.com/influxdata/influxdb/v2/tsdb" ipromql "github.com/influxdata/promql/v2" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/pkg/labels" diff --git a/query/promql/internal/promqltests/go.mod b/query/promql/internal/promqltests/go.mod index ba4d4b0a9c..9cbeb557a2 100644 --- a/query/promql/internal/promqltests/go.mod +++ b/query/promql/internal/promqltests/go.mod @@ -1,43 +1,28 @@ module github.com/influxdata/promqltests -go 1.13 +go 1.12 require ( - github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d // indirect github.com/aws/aws-sdk-go v1.29.18 // indirect github.com/docker/go-units v0.4.0 // indirect - github.com/fatih/color v1.9.0 // indirect github.com/glycerine/go-unsnap-stream v0.0.0-20190901134440-81cf024a9e0a // indirect github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31 // indirect github.com/go-kit/kit v0.10.0 // indirect - github.com/gogo/protobuf v1.3.1 // indirect - github.com/google/go-cmp v0.4.0 - github.com/google/uuid v1.1.1 // indirect + github.com/google/go-cmp v0.5.0 github.com/hashicorp/go-rootcerts v1.0.2 // indirect - github.com/influxdata/flux v0.66.1 + github.com/influxdata/flux v0.83.1 github.com/influxdata/influxdb/v2 v2.0.0-00010101000000-000000000000 github.com/influxdata/influxql v1.0.1 // indirect github.com/influxdata/promql/v2 v2.12.0 github.com/kr/pretty v0.2.0 // indirect github.com/mattn/go-isatty v0.0.12 // indirect - github.com/onsi/ginkgo v1.10.1 // indirect - github.com/onsi/gomega v1.7.0 // indirect - github.com/prometheus/client_golang v1.5.1 // indirect github.com/prometheus/common v0.9.1 github.com/prometheus/prometheus v2.5.0+incompatible github.com/prometheus/tsdb v0.10.0 github.com/spf13/afero v1.2.2 // indirect - github.com/spf13/pflag v1.0.5 // indirect github.com/willf/bitset v1.1.10 // indirect - golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413 // indirect - golang.org/x/net v0.0.0-20200301022130-244492dfa37a // indirect - golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d // indirect - golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527 // indirect - golang.org/x/tools v0.0.0-20200305205014-bc073721adb6 // indirect google.golang.org/api v0.20.0 // indirect google.golang.org/genproto v0.0.0-20200305110556-506484158171 // indirect - google.golang.org/grpc v1.27.1 // indirect - gopkg.in/yaml.v2 v2.2.8 // indirect ) replace github.com/influxdata/influxdb/v2 => ../../../../ diff --git a/query/promql/internal/promqltests/go.sum b/query/promql/internal/promqltests/go.sum index 44bdc4f931..b92445a0f6 100644 --- a/query/promql/internal/promqltests/go.sum +++ b/query/promql/internal/promqltests/go.sum @@ -1,8 +1,6 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= -cloud.google.com/go v0.43.0 h1:banaiRPAM8kUVYneOSkhgcDsLzEvL25FinuiSZaH/2w= -cloud.google.com/go v0.43.0/go.mod h1:BOSR3VbTLkk6FDC/TcffxP4NF/FFBGA5ku+jvKOP7pg= cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= @@ -113,6 +111,8 @@ github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6r github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/boltdb/bolt v1.3.1 h1:JQmyP4ZBrce+ZQu0dY660FMfatumYDLun9hBCUVIkF4= github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= +github.com/bonitoo-io/go-sql-bigquery v0.3.4-1.4.0 h1:MaVh0h9+KaMnJcoDvvIGp+O3fefdWm+8MBUX6ELTJTM= +github.com/bonitoo-io/go-sql-bigquery v0.3.4-1.4.0/go.mod h1:J4Y6YJm0qTWB9aFziB7cPeSyc6dOZFyJdteSeybVpXQ= github.com/bouk/httprouter v0.0.0-20160817010721-ee8b3818a7f5 h1:kS0dw4K730x7cxT+bVyTyYJZHuSoH7ofSr/Ijit56Qw= github.com/bouk/httprouter v0.0.0-20160817010721-ee8b3818a7f5/go.mod h1:CDReaxg1cmLrtcasZy43l4EYPAknXLiQSrb7tLw5zXM= github.com/buger/jsonparser v0.0.0-20191004114745-ee4c978eae7e h1:oJCXMss/3rg5F6Poy9wG3JQusc58Mzk5B9Z6wSnssNE= @@ -218,6 +218,7 @@ github.com/go-chi/chi v4.1.0+incompatible h1:ETj3cggsVIY2Xao5ExCu6YhEh5MD6JTfcBz github.com/go-chi/chi v4.1.0+incompatible/go.mod h1:eB3wogJHnLi3x/kFX2A+IbTBlXxmMeXJVKy9tTv1XzQ= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0 h1:wDJmvq38kDhkVxi50ni9ykkdUr1PKgqKOoi01fa0Mdk= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= @@ -235,6 +236,8 @@ github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LB github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-test/deep v1.0.1/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= +github.com/gofrs/uuid v3.3.0+incompatible h1:8K4tyRfvU1CYPgJsveYFQMhpFd/wXNM7iK6rR7UHz84= +github.com/gofrs/uuid v3.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= @@ -389,8 +392,8 @@ github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NH github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/influxdata/cron v0.0.0-20191203200038-ded12750aac6 h1:OtjKkeWDjUbyMi82C7XXy7Tvm2LXMwiBBXyFIGNPaGA= github.com/influxdata/cron v0.0.0-20191203200038-ded12750aac6/go.mod h1:XabtPPW2qsCg0tl+kjaPU+cFS+CjQXEXbT1VJvHT4og= -github.com/influxdata/flux v0.82.2 h1:VtoF8pbyoS+3QLQQmihSmV0Ly6g/A73x+3VBUp9t15g= -github.com/influxdata/flux v0.82.2/go.mod h1:sAAIEgQTlTpsXCUQ49ymoRsKqraPzIb7F3paT72/lE0= +github.com/influxdata/flux v0.83.1 h1:KdJ19S2bj0jZvhICdS8d54BHYCJNuq9h3A/HkIKOD6o= +github.com/influxdata/flux v0.83.1/go.mod h1:+6FzHdZdwYjEIa2iuQEJ92x+C2A8X1jI0qdpVT0DJfM= github.com/influxdata/httprouter v1.3.1-0.20191122104820-ee83e2772f69 h1:WQsmW0fXO4ZE/lFGIE84G6rIV5SJN3P3sjIXAP1a8eU= github.com/influxdata/httprouter v1.3.1-0.20191122104820-ee83e2772f69/go.mod h1:pwymjR6SrP3gD3pRj9RJwdl1j5s3doEEV8gS4X9qSzA= github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= @@ -729,6 +732,8 @@ github.com/willf/bitset v1.1.9/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPyS github.com/willf/bitset v1.1.10 h1:NotGKqX0KwQ72NUzqrjZq5ipPNDQex9lo3WpaS8L2sc= github.com/willf/bitset v1.1.10/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xlab/treeprint v1.0.0 h1:J0TkWtiuYgtdlrkkrDLISYBQ92M+X5m4LrIIMKrbDTs= +github.com/xlab/treeprint v1.0.0/go.mod h1:IoImgRak9i3zJyuxOKUP1v4UZd1tMoKkq/Cimt1uhCg= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= github.com/yudai/gojsondiff v1.0.0 h1:27cbfqXLVEJ1o8I6v3y9lg8Ydm53EKqHXAOMxEGlCOA= github.com/yudai/gojsondiff v1.0.0/go.mod h1:AY32+k2cwILAkW1fbgxQ5mUmMiZFgLIV+FBNExI05xg= @@ -736,6 +741,7 @@ github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82 h1:BHyfKlQyqbsFN5p3Ifn github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82/go.mod h1:lgjkn3NuSvDfVJdfcVVdX+jpBxNmX4rDAzaS45IcYoM= github.com/yudai/pp v2.0.1+incompatible h1:Q4//iY4pNF6yPLZIigmvcl7k/bPgrcTPIFIcmawg5bI= github.com/yudai/pp v2.0.1+incompatible/go.mod h1:PuxR/8QJ7cyCkFp/aUDS+JY727OFEZkTdatxwunjIkc= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.3 h1:MUGmc65QhB3pIlaQ5bB4LwqSj6GIonVJXpZiaKNyaKk= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= @@ -792,8 +798,8 @@ golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37 h1:cg5LA/zNPRzIXIWSCxQW10Rvpy94aQh3LT/ShoCpkHw= -golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -806,6 +812,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd h1:zkO/Lhoka23X63N9OSzpSeROEUQ5ODw47tM3YWjygbs= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6 h1:QE6XYQK6naiK1EPAe1g/ILLxN5RBoH5xkJk3CqlMI/Y= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -826,8 +834,11 @@ golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCc golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0 h1:KU7oHjnv3XNWfa5COkzUifxZmxp1TyI7ImMXqFxLwvQ= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -854,8 +865,8 @@ golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200301022130-244492dfa37a h1:GuSPYbZzB5/dcLNCwLQLsg3obCJtX9IJhpXkvY7kzk0= -golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200625001655-4c5254603344 h1:vGXIOMxbNfDTk/aXCmfdLgkrSV+Z2tcbze+pEc3v5W4= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0= @@ -872,6 +883,8 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEha golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e h1:vcxGaoTs7kV8m5Np9uUNQin4BrLOthgV7252N8V+FwY= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208 h1:qwRHBd0NqMbJxfbotnDhm2ByMI1Shq4Y6oRJo21SGJA= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -899,6 +912,7 @@ golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -909,8 +923,8 @@ golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4 h1:sfkvUWPNGwSV+8/fNqctR5lS2AqCSqYwXdrjCxp/dXo= golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527 h1:uYVVQ9WP/Ds2ROhcaGPeIdVq0RIXVLwsHlnvJ+cT1So= -golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd h1:xhmwyvizuTgC2qz7ZlMluP20uW+C3Rm0FD/WLDX8884= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -962,8 +976,8 @@ golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapK golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200304024140-c4206d458c3f/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200305205014-bc073721adb6 h1:V/kH9fbTtfqZLJU7djyPh+n4yWxBZVU6H5npu6UeY54= -golang.org/x/tools v0.0.0-20200305205014-bc073721adb6/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200721032237-77f530d86f9a h1:kVMPw4f6EVqYdfGQTedjrpw1dbE2PEMfw4jwXsNdn9s= +golang.org/x/tools v0.0.0-20200721032237-77f530d86f9a/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1000,8 +1014,6 @@ google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRn google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= -google.golang.org/genproto v0.0.0-20190716160619-c506a9f90610 h1:Ygq9/SRJX9+dU0WCIICM8RkWvDw03lvB77hrhJnpxfU= -google.golang.org/genproto v0.0.0-20190716160619-c506a9f90610/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= diff --git a/query/promql/query_test.go b/query/promql/query_test.go index 38f1d7b53f..1408ccb1f8 100644 --- a/query/promql/query_test.go +++ b/query/promql/query_test.go @@ -364,8 +364,10 @@ func TestBuild(t *testing.T) { want: &flux.Spec{ Operations: []*flux.Operation{ { - ID: flux.OperationID("from"), - Spec: &influxdb.FromOpSpec{Bucket: "prometheus"}, + ID: flux.OperationID("from"), + Spec: &influxdb.FromOpSpec{ + Bucket: influxdb.NameOrID{Name: "prometheus"}, + }, }, { ID: "where", @@ -373,51 +375,55 @@ func TestBuild(t *testing.T) { Fn: interpreter.ResolvedFunction{ Scope: nil, Fn: &semantic.FunctionExpression{ - Block: &semantic.FunctionBlock{ - Parameters: &semantic.FunctionParameters{ - List: []*semantic.FunctionParameter{{Key: &semantic.Identifier{Name: "r"}}}, - }, - Body: &semantic.LogicalExpression{ - Operator: ast.AndOperator, - Left: &semantic.LogicalExpression{ - Operator: ast.AndOperator, - Left: &semantic.BinaryExpression{ - Operator: ast.EqualOperator, - Left: &semantic.MemberExpression{ - Object: &semantic.IdentifierExpression{ - Name: "r", + Parameters: &semantic.FunctionParameters{ + List: []*semantic.FunctionParameter{{Key: &semantic.Identifier{Name: "r"}}}, + }, + Block: &semantic.Block{ + Body: []semantic.Statement{ + &semantic.ReturnStatement{ + Argument: &semantic.LogicalExpression{ + Operator: ast.AndOperator, + Left: &semantic.LogicalExpression{ + Operator: ast.AndOperator, + Left: &semantic.BinaryExpression{ + Operator: ast.EqualOperator, + Left: &semantic.MemberExpression{ + Object: &semantic.IdentifierExpression{ + Name: "r", + }, + Property: "_metric", + }, + Right: &semantic.StringLiteral{ + Value: "node_cpu", + }, }, - Property: "_metric", - }, - Right: &semantic.StringLiteral{ - Value: "node_cpu", - }, - }, - Right: &semantic.BinaryExpression{ - Operator: ast.EqualOperator, - Left: &semantic.MemberExpression{ - Object: &semantic.IdentifierExpression{ - Name: "r", + Right: &semantic.BinaryExpression{ + Operator: ast.EqualOperator, + Left: &semantic.MemberExpression{ + Object: &semantic.IdentifierExpression{ + Name: "r", + }, + Property: "mode", + }, + Right: &semantic.StringLiteral{ + Value: "user", + }, }, - Property: "mode", }, - Right: &semantic.StringLiteral{ - Value: "user", + Right: &semantic.BinaryExpression{ + Operator: ast.EqualOperator, + Left: &semantic.MemberExpression{ + Object: &semantic.IdentifierExpression{ + Name: "r", + }, + Property: "cpu", + }, + Right: &semantic.StringLiteral{ + Value: "cpu2", + }, }, }, }, - Right: &semantic.BinaryExpression{ - Operator: ast.EqualOperator, - Left: &semantic.MemberExpression{ - Object: &semantic.IdentifierExpression{ - Name: "r", - }, - Property: "cpu", - }, - Right: &semantic.StringLiteral{ - Value: "cpu2", - }, - }, }, }, }, @@ -446,8 +452,10 @@ func TestBuild(t *testing.T) { want: &flux.Spec{ Operations: []*flux.Operation{ { - ID: flux.OperationID("from"), - Spec: &influxdb.FromOpSpec{Bucket: "prometheus"}, + ID: flux.OperationID("from"), + Spec: &influxdb.FromOpSpec{ + Bucket: influxdb.NameOrID{Name: "prometheus"}, + }, }, { ID: flux.OperationID("range"), @@ -461,34 +469,38 @@ func TestBuild(t *testing.T) { Fn: interpreter.ResolvedFunction{ Scope: nil, Fn: &semantic.FunctionExpression{ - Block: &semantic.FunctionBlock{ - Parameters: &semantic.FunctionParameters{ - List: []*semantic.FunctionParameter{{Key: &semantic.Identifier{Name: "r"}}}, - }, - Body: &semantic.LogicalExpression{ - Operator: ast.AndOperator, - Left: &semantic.BinaryExpression{ - Operator: ast.EqualOperator, - Left: &semantic.MemberExpression{ - Object: &semantic.IdentifierExpression{ - Name: "r", + Parameters: &semantic.FunctionParameters{ + List: []*semantic.FunctionParameter{{Key: &semantic.Identifier{Name: "r"}}}, + }, + Block: &semantic.Block{ + Body: []semantic.Statement{ + &semantic.ReturnStatement{ + Argument: &semantic.LogicalExpression{ + Operator: ast.AndOperator, + Left: &semantic.BinaryExpression{ + Operator: ast.EqualOperator, + Left: &semantic.MemberExpression{ + Object: &semantic.IdentifierExpression{ + Name: "r", + }, + Property: "_metric", + }, + Right: &semantic.StringLiteral{ + Value: "node_cpu", + }, }, - Property: "_metric", - }, - Right: &semantic.StringLiteral{ - Value: "node_cpu", - }, - }, - Right: &semantic.BinaryExpression{ - Operator: ast.EqualOperator, - Left: &semantic.MemberExpression{ - Object: &semantic.IdentifierExpression{ - Name: "r", + Right: &semantic.BinaryExpression{ + Operator: ast.EqualOperator, + Left: &semantic.MemberExpression{ + Object: &semantic.IdentifierExpression{ + Name: "r", + }, + Property: "mode", + }, + Right: &semantic.StringLiteral{ + Value: "user", + }, }, - Property: "mode", - }, - Right: &semantic.StringLiteral{ - Value: "user", }, }, }, @@ -517,8 +529,10 @@ func TestBuild(t *testing.T) { want: &flux.Spec{ Operations: []*flux.Operation{ { - ID: flux.OperationID("from"), - Spec: &influxdb.FromOpSpec{Bucket: "prometheus"}, + ID: flux.OperationID("from"), + Spec: &influxdb.FromOpSpec{ + Bucket: influxdb.NameOrID{Name: "prometheus"}, + }, }, { ID: flux.OperationID("range"), @@ -532,34 +546,38 @@ func TestBuild(t *testing.T) { Fn: interpreter.ResolvedFunction{ Scope: nil, Fn: &semantic.FunctionExpression{ - Block: &semantic.FunctionBlock{ - Parameters: &semantic.FunctionParameters{ - List: []*semantic.FunctionParameter{{Key: &semantic.Identifier{Name: "r"}}}, - }, - Body: &semantic.LogicalExpression{ - Operator: ast.AndOperator, - Left: &semantic.BinaryExpression{ - Operator: ast.EqualOperator, - Left: &semantic.MemberExpression{ - Object: &semantic.IdentifierExpression{ - Name: "r", + Parameters: &semantic.FunctionParameters{ + List: []*semantic.FunctionParameter{{Key: &semantic.Identifier{Name: "r"}}}, + }, + Block: &semantic.Block{ + Body: []semantic.Statement{ + &semantic.ReturnStatement{ + Argument: &semantic.LogicalExpression{ + Operator: ast.AndOperator, + Left: &semantic.BinaryExpression{ + Operator: ast.EqualOperator, + Left: &semantic.MemberExpression{ + Object: &semantic.IdentifierExpression{ + Name: "r", + }, + Property: "_metric", + }, + Right: &semantic.StringLiteral{ + Value: "node_cpu", + }, }, - Property: "_metric", - }, - Right: &semantic.StringLiteral{ - Value: "node_cpu", - }, - }, - Right: &semantic.BinaryExpression{ - Operator: ast.EqualOperator, - Left: &semantic.MemberExpression{ - Object: &semantic.IdentifierExpression{ - Name: "r", + Right: &semantic.BinaryExpression{ + Operator: ast.EqualOperator, + Left: &semantic.MemberExpression{ + Object: &semantic.IdentifierExpression{ + Name: "r", + }, + Property: "_measurement", + }, + Right: &semantic.StringLiteral{ + Value: "m0", + }, }, - Property: "_measurement", - }, - Right: &semantic.StringLiteral{ - Value: "m0", }, }, }, diff --git a/query/promql/types.go b/query/promql/types.go index 69ff4dbdba..4b8a8695cc 100644 --- a/query/promql/types.go +++ b/query/promql/types.go @@ -148,7 +148,7 @@ func (s *Selector) QuerySpec() (*flux.Spec, error) { { ID: "from", // TODO: Change this to a UUID Spec: &influxdb.FromOpSpec{ - Bucket: "prometheus", + Bucket: influxdb.NameOrID{Name: "prometheus"}, }, }, } @@ -260,11 +260,15 @@ func NewWhereOperation(metricName string, labels []*LabelMatcher) (*flux.Operati Fn: interpreter.ResolvedFunction{ Scope: nil, Fn: &semantic.FunctionExpression{ - Block: &semantic.FunctionBlock{ - Parameters: &semantic.FunctionParameters{ - List: []*semantic.FunctionParameter{{Key: &semantic.Identifier{Name: "r"}}}, + Parameters: &semantic.FunctionParameters{ + List: []*semantic.FunctionParameter{{Key: &semantic.Identifier{Name: "r"}}}, + }, + Block: &semantic.Block{ + Body: []semantic.Statement{ + &semantic.ReturnStatement{ + Argument: node, + }, }, - Body: node, }, }, }, diff --git a/query/querytest/compile.go b/query/querytest/compile.go deleted file mode 100644 index ea0f27cc13..0000000000 --- a/query/querytest/compile.go +++ /dev/null @@ -1,62 +0,0 @@ -package querytest - -import ( - "fmt" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/influxdata/flux" - platform "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/query" -) - -type BucketsAccessedTestCase struct { - Name string - Raw string - WantErr bool - WantReadBuckets *[]platform.BucketFilter - WantWriteBuckets *[]platform.BucketFilter -} - -func BucketsAccessedTestHelper(t *testing.T, tc BucketsAccessedTestCase) { - t.Helper() - - ast, err := flux.Parse(tc.Raw) - if err != nil { - t.Fatalf("could not parse flux: %v", err) - } - - var gotReadBuckets, gotWriteBuckets []platform.BucketFilter - if tc.WantReadBuckets != nil || tc.WantWriteBuckets != nil { - gotReadBuckets, gotWriteBuckets, err = query.BucketsAccessed(ast, nil) - if err != nil { - t.Fatal(err) - } - } - - if tc.WantReadBuckets != nil { - if diagnostic := verifyBuckets(*tc.WantReadBuckets, gotReadBuckets); diagnostic != "" { - t.Errorf("Could not verify read buckets: %v", diagnostic) - } - } - - if tc.WantWriteBuckets != nil { - if diagnostic := verifyBuckets(*tc.WantWriteBuckets, gotWriteBuckets); diagnostic != "" { - t.Errorf("Could not verify write buckets: %v", diagnostic) - } - } -} - -func verifyBuckets(wantBuckets, gotBuckets []platform.BucketFilter) string { - if len(wantBuckets) != len(gotBuckets) { - return fmt.Sprintf("Expected %v buckets but got %v", len(wantBuckets), len(gotBuckets)) - } - - for i, wantBucket := range wantBuckets { - if diagnostic := cmp.Diff(wantBucket, gotBuckets[i]); diagnostic != "" { - return fmt.Sprintf("Bucket mismatch: -want/+got:\n%v", diagnostic) - } - } - - return "" -} diff --git a/query/querytest/compiler.go b/query/querytest/compiler.go index 26c3a318b4..c5237b7b1c 100644 --- a/query/querytest/compiler.go +++ b/query/querytest/compiler.go @@ -1,10 +1,12 @@ package querytest import ( + "context" + "github.com/influxdata/flux/plan" + "github.com/influxdata/flux/stdlib/influxdata/influxdb" v1 "github.com/influxdata/flux/stdlib/influxdata/influxdb/v1" "github.com/influxdata/influxdb/v2/query/influxql" - "github.com/influxdata/influxdb/v2/query/stdlib/influxdata/influxdb" ) // MakeFromInfluxJSONCompiler returns a compiler that replaces all From operations with FromJSON. @@ -24,7 +26,7 @@ func (ReplaceFromRule) Pattern() plan.Pattern { return plan.Pat(influxdb.FromKind) } -func (r ReplaceFromRule) Rewrite(n plan.Node) (plan.Node, bool, error) { +func (r ReplaceFromRule) Rewrite(ctx context.Context, n plan.Node) (plan.Node, bool, error) { if err := n.ReplaceSpec(&v1.FromInfluxJSONProcedureSpec{ File: r.Filename, }); err != nil { diff --git a/query/service_test.go b/query/service_test.go index 860b22d3bb..ee7470ad07 100644 --- a/query/service_test.go +++ b/query/service_test.go @@ -21,7 +21,7 @@ type compilerA struct { A string `json:"a"` } -func (c compilerA) Compile(ctx context.Context) (flux.Program, error) { +func (c compilerA) Compile(ctx context.Context, runtime flux.Runtime) (flux.Program, error) { panic("not implemented") } diff --git a/query/spec.go b/query/spec.go deleted file mode 100644 index 1261bf79dc..0000000000 --- a/query/spec.go +++ /dev/null @@ -1,49 +0,0 @@ -package query - -import ( - "context" - - "github.com/influxdata/flux" - "github.com/influxdata/flux/ast" - "github.com/influxdata/flux/lang" - platform "github.com/influxdata/influxdb/v2" -) - -// BucketAwareOperationSpec specifies an operation that reads or writes buckets -type BucketAwareOperationSpec interface { - BucketsAccessed(orgID *platform.ID) (readBuckets, writeBuckets []platform.BucketFilter) -} - -type constantSecretService struct{} - -func (s constantSecretService) LoadSecret(ctx context.Context, k string) (string, error) { - return "", nil -} - -func newDeps() flux.Dependencies { - deps := flux.NewDefaultDependencies() - deps.Deps.HTTPClient = nil - deps.Deps.URLValidator = nil - deps.Deps.SecretService = constantSecretService{} - return deps -} - -// BucketsAccessed returns the set of buckets read and written by a query spec -func BucketsAccessed(ast *ast.Package, orgID *platform.ID) (readBuckets, writeBuckets []platform.BucketFilter, err error) { - ctx := newDeps().Inject(context.Background()) - err = lang.WalkIR(ctx, ast, func(o *flux.Operation) error { - bucketAwareOpSpec, ok := o.Spec.(BucketAwareOperationSpec) - if ok { - opBucketsRead, opBucketsWritten := bucketAwareOpSpec.BucketsAccessed(orgID) - readBuckets = append(readBuckets, opBucketsRead...) - writeBuckets = append(writeBuckets, opBucketsWritten...) - } - return nil - }) - - if err != nil { - return nil, nil, err - } - - return readBuckets, writeBuckets, nil -} diff --git a/query/stdlib/experimental/to.go b/query/stdlib/experimental/to.go index 8486c15ea7..3fa5a198aa 100644 --- a/query/stdlib/experimental/to.go +++ b/query/stdlib/experimental/to.go @@ -9,6 +9,7 @@ import ( "github.com/influxdata/flux/codes" "github.com/influxdata/flux/execute" "github.com/influxdata/flux/plan" + "github.com/influxdata/flux/runtime" "github.com/influxdata/flux/semantic" "github.com/influxdata/flux/stdlib/experimental" platform "github.com/influxdata/influxdb/v2" @@ -32,19 +33,8 @@ type ToOpSpec struct { } func init() { - toSignature := flux.FunctionSignature( - map[string]semantic.PolyType{ - "bucket": semantic.String, - "bucketID": semantic.String, - "org": semantic.String, - "orgID": semantic.String, - "host": semantic.String, - "token": semantic.String, - }, - []string{}, - ) - - flux.ReplacePackageValue("experimental", "to", flux.FunctionValueWithSideEffect("to", createToOpSpec, toSignature)) + toSignature := runtime.MustLookupBuiltinType("experimental", "to") + runtime.ReplacePackageValue("experimental", "to", flux.MustValue(flux.FunctionValueWithSideEffect("to", createToOpSpec, toSignature))) flux.RegisterOpSpec(ExperimentalToKind, func() flux.OperationSpec { return &ToOpSpec{} }) plan.RegisterProcedureSpecWithSideEffect(ExperimentalToKind, newToProcedure, ExperimentalToKind) execute.RegisterTransformation(ExperimentalToKind, createToTransformation) @@ -185,9 +175,7 @@ func createToTransformation(id execute.DatasetID, mode execute.AccumulationMode, // ToTransformation is the transformation for the `to` flux function. type ToTransformation struct { ctx context.Context - bucket string bucketID platform.ID - org string orgID platform.ID d execute.Dataset cache execute.TableBuilderCache @@ -206,7 +194,6 @@ func NewToTransformation(ctx context.Context, d execute.Dataset, cache execute.T var err error var orgID platform.ID - var org string // Get organization name and ID if spec.Spec.Org != "" { oID, ok := deps.OrganizationLookup.Lookup(ctx, spec.Spec.Org) @@ -214,7 +201,6 @@ func NewToTransformation(ctx context.Context, d execute.Dataset, cache execute.T return nil, fmt.Errorf("failed to look up organization %q", spec.Spec.Org) } orgID = oID - org = spec.Spec.Org } else if spec.Spec.OrgID != "" { if oid, err := platform.IDFromString(spec.Spec.OrgID); err != nil { return nil, err @@ -229,15 +215,8 @@ func NewToTransformation(ctx context.Context, d execute.Dataset, cache execute.T } orgID = req.OrganizationID } - if org == "" { - org = deps.OrganizationLookup.LookupName(ctx, orgID) - if org == "" { - return nil, fmt.Errorf("failed to look up organization name for ID %q", orgID.String()) - } - } var bucketID *platform.ID - var bucket string // Get bucket name and ID // User will have specified exactly one in the ToOpSpec. if spec.Spec.Bucket != "" { @@ -246,21 +225,14 @@ func NewToTransformation(ctx context.Context, d execute.Dataset, cache execute.T return nil, fmt.Errorf("failed to look up bucket %q in org %q", spec.Spec.Bucket, spec.Spec.Org) } bucketID = &bID - bucket = spec.Spec.Bucket } else { if bucketID, err = platform.IDFromString(spec.Spec.BucketID); err != nil { return nil, err } - bucket = deps.BucketLookup.LookupName(ctx, orgID, *bucketID) - if bucket == "" { - return nil, fmt.Errorf("failed to look up bucket with ID %q in org %q", bucketID, org) - } } return &ToTransformation{ ctx: ctx, - bucket: bucket, bucketID: *bucketID, - org: org, orgID: orgID, d: d, cache: cache, @@ -313,6 +285,8 @@ type TablePointsMetadata struct { MeasurementName string // The tags in the table (final element is left as nil, to be replaced by field name) Tags [][]byte + // The offset in tags where to store the field name + FieldKeyTagValueOffset int // The column offset in the input table where the _time column is stored TimestampOffset int // The labels and offsets of all the fields in the table @@ -428,15 +402,15 @@ func (t *ToTransformation) writeTable(ctx context.Context, tbl flux.Table) error } switch fieldVal.Type() { - case semantic.Float: + case semantic.BasicFloat: fields[lao.Label] = fieldVal.Float() - case semantic.Int: + case semantic.BasicInt: fields[lao.Label] = fieldVal.Int() - case semantic.UInt: + case semantic.BasicUint: fields[lao.Label] = fieldVal.UInt() - case semantic.String: + case semantic.BasicString: fields[lao.Label] = fieldVal.Str() - case semantic.Bool: + case semantic.BasicBool: fields[lao.Label] = fieldVal.Bool() default: return fmt.Errorf("unsupported field type %v", fieldVal.Type()) diff --git a/query/stdlib/experimental/to_test.go b/query/stdlib/experimental/to_test.go index d0a89d3ae3..90fe39594c 100644 --- a/query/stdlib/experimental/to_test.go +++ b/query/stdlib/experimental/to_test.go @@ -3,7 +3,6 @@ package experimental_test import ( "context" "errors" - "fmt" "testing" "time" @@ -17,7 +16,6 @@ import ( "github.com/influxdata/influxdb/v2/mock" "github.com/influxdata/influxdb/v2/models" _ "github.com/influxdata/influxdb/v2/query/builtin" - pquerytest "github.com/influxdata/influxdb/v2/query/querytest" "github.com/influxdata/influxdb/v2/query/stdlib/experimental" "github.com/influxdata/influxdb/v2/query/stdlib/influxdata/influxdb" ) @@ -35,9 +33,9 @@ from(bucket:"mydb") Want: &flux.Spec{ Operations: []*flux.Operation{ { - ID: "influxDBFrom0", + ID: "from0", Spec: &influxdb.FromOpSpec{ - Bucket: "mydb", + Bucket: influxdb.NameOrID{Name: "mydb"}, }, }, { @@ -68,7 +66,7 @@ from(bucket:"mydb") }, }, Edges: []flux.Edge{ - {Parent: "influxDBFrom0", Child: "range1"}, + {Parent: "from0", Child: "range1"}, {Parent: "range1", Child: "pivot2"}, {Parent: "pivot2", Child: "experimental-to3"}, }, @@ -84,53 +82,6 @@ from(bucket:"mydb") } } -func TestToOpSpec_BucketsAccessed(t *testing.T) { - bucketName := "my_bucket" - bucketIDString := "ddddccccbbbbaaaa" - bucketID, err := platform.IDFromString(bucketIDString) - if err != nil { - t.Fatal(err) - } - orgName := "my_org" - orgIDString := "aaaabbbbccccdddd" - orgID, err := platform.IDFromString(orgIDString) - if err != nil { - t.Fatal(err) - } - tests := []pquerytest.BucketsAccessedTestCase{ - { - Name: "from() with bucket and to with org and bucket", - Raw: fmt.Sprintf(`import "experimental" -from(bucket:"%s") - |> experimental.to(bucket:"%s", org:"%s")`, bucketName, bucketName, orgName), - WantReadBuckets: &[]platform.BucketFilter{{Name: &bucketName}}, - WantWriteBuckets: &[]platform.BucketFilter{{Name: &bucketName, Org: &orgName}}, - }, - { - Name: "from() with bucket and to with orgID and bucket", - Raw: fmt.Sprintf(`import "experimental" -from(bucket:"%s") |> experimental.to(bucket:"%s", orgID:"%s")`, bucketName, bucketName, orgIDString), - WantReadBuckets: &[]platform.BucketFilter{{Name: &bucketName}}, - WantWriteBuckets: &[]platform.BucketFilter{{Name: &bucketName, OrganizationID: orgID}}, - }, - { - Name: "from() with bucket and to with orgID and bucketID", - Raw: fmt.Sprintf(`import "experimental" -from(bucket:"%s") |> experimental.to(bucketID:"%s", orgID:"%s")`, bucketName, bucketIDString, orgIDString), - WantReadBuckets: &[]platform.BucketFilter{{Name: &bucketName}}, - WantWriteBuckets: &[]platform.BucketFilter{{ID: bucketID, OrganizationID: orgID}}, - }, - } - - for _, tc := range tests { - tc := tc - t.Run(tc.Name, func(t *testing.T) { - t.Parallel() - pquerytest.BucketsAccessedTestHelper(t, tc) - }) - } -} - func TestTo_Process(t *testing.T) { oid, _ := mock.OrganizationLookup{}.Lookup(context.Background(), "my-org") bid, _ := mock.BucketLookup{}.Lookup(context.Background(), oid, "my-bucket") diff --git a/query/stdlib/influxdata/influxdb/buckets.go b/query/stdlib/influxdata/influxdb/buckets.go index 0a41282908..a5de0e7623 100644 --- a/query/stdlib/influxdata/influxdb/buckets.go +++ b/query/stdlib/influxdata/influxdb/buckets.go @@ -15,8 +15,23 @@ import ( "github.com/influxdata/influxdb/v2/query" ) +const BucketsKind = "influxdata/influxdb.localBuckets" + func init() { - execute.RegisterSource(influxdb.BucketsKind, createBucketsSource) + execute.RegisterSource(BucketsKind, createBucketsSource) + plan.RegisterPhysicalRules(LocalBucketsRule{}) +} + +type LocalBucketsProcedureSpec struct { + plan.DefaultCost +} + +func (s *LocalBucketsProcedureSpec) Kind() plan.ProcedureKind { + return BucketsKind +} + +func (s *LocalBucketsProcedureSpec) Copy() plan.ProcedureSpec { + return new(LocalBucketsProcedureSpec) } type BucketsDecoder struct { @@ -99,7 +114,7 @@ func (bd *BucketsDecoder) Close() error { } func createBucketsSource(prSpec plan.ProcedureSpec, dsid execute.DatasetID, a execute.Administration) (execute.Source, error) { - _, ok := prSpec.(*influxdb.BucketsProcedureSpec) + _, ok := prSpec.(*LocalBucketsProcedureSpec) if !ok { return nil, &flux.Error{ Code: codes.Internal, @@ -128,3 +143,27 @@ type AllBucketLookup interface { FindAllBuckets(ctx context.Context, orgID platform.ID) ([]*platform.Bucket, int) } type BucketDependencies AllBucketLookup + +type LocalBucketsRule struct{} + +func (rule LocalBucketsRule) Name() string { + return "influxdata/influxdb.LocalBucketsRule" +} + +func (rule LocalBucketsRule) Pattern() plan.Pattern { + return plan.Pat(influxdb.BucketsKind) +} + +func (rule LocalBucketsRule) Rewrite(ctx context.Context, node plan.Node) (plan.Node, bool, error) { + fromSpec := node.ProcedureSpec().(*influxdb.BucketsProcedureSpec) + if fromSpec.Host != nil { + return node, false, nil + } else if fromSpec.Org != nil { + return node, false, &flux.Error{ + Code: codes.Unimplemented, + Msg: "buckets cannot list from a separate organization; please specify a host or remove the organization", + } + } + + return plan.CreateLogicalNode("localBuckets", &LocalBucketsProcedureSpec{}), true, nil +} diff --git a/query/stdlib/influxdata/influxdb/dependencies.go b/query/stdlib/influxdata/influxdb/dependencies.go index 4a1691bed1..5e182009e7 100644 --- a/query/stdlib/influxdata/influxdb/dependencies.go +++ b/query/stdlib/influxdata/influxdb/dependencies.go @@ -26,6 +26,9 @@ func (d StorageDependencies) Inject(ctx context.Context) context.Context { } func GetStorageDependencies(ctx context.Context) StorageDependencies { + if ctx.Value(dependenciesKey) == nil { + return StorageDependencies{} + } return ctx.Value(dependenciesKey).(StorageDependencies) } @@ -65,7 +68,7 @@ func (d Dependencies) PrometheusCollectors() []prometheus.Collector { } func NewDependencies( - reader Reader, + reader query.StorageReader, writer storage.PointsWriter, bucketSvc influxdb.BucketService, orgSvc influxdb.OrganizationService, diff --git a/query/stdlib/influxdata/influxdb/from.go b/query/stdlib/influxdata/influxdb/from.go index 5c8f5f6079..4e64dc879d 100644 --- a/query/stdlib/influxdata/influxdb/from.go +++ b/query/stdlib/influxdata/influxdb/from.go @@ -6,131 +6,32 @@ import ( "github.com/influxdata/flux" "github.com/influxdata/flux/codes" "github.com/influxdata/flux/plan" - "github.com/influxdata/flux/semantic" "github.com/influxdata/flux/stdlib/influxdata/influxdb" - platform "github.com/influxdata/influxdb/v2" ) const FromKind = "influxDBFrom" -type FromOpSpec struct { - Bucket string `json:"bucket,omitempty"` - BucketID string `json:"bucketID,omitempty"` +type ( + NameOrID = influxdb.NameOrID + FromOpSpec = influxdb.FromOpSpec +) + +type FromStorageProcedureSpec struct { + Bucket influxdb.NameOrID } -func init() { - fromSignature := semantic.FunctionPolySignature{ - Parameters: map[string]semantic.PolyType{ - "bucket": semantic.String, - "bucketID": semantic.String, - }, - Required: nil, - Return: flux.TableObjectType, - } - - flux.ReplacePackageValue("influxdata/influxdb", influxdb.FromKind, flux.FunctionValue(FromKind, createFromOpSpec, fromSignature)) - flux.RegisterOpSpec(FromKind, newFromOp) - plan.RegisterProcedureSpec(FromKind, newFromProcedure, FromKind) -} - -func createFromOpSpec(args flux.Arguments, a *flux.Administration) (flux.OperationSpec, error) { - spec := new(FromOpSpec) - - if bucket, ok, err := args.GetString("bucket"); err != nil { - return nil, err - } else if ok { - spec.Bucket = bucket - } - - if bucketID, ok, err := args.GetString("bucketID"); err != nil { - return nil, err - } else if ok { - spec.BucketID = bucketID - } - - if spec.Bucket == "" && spec.BucketID == "" { - return nil, &flux.Error{ - Code: codes.Invalid, - Msg: "must specify one of bucket or bucketID", - } - } - if spec.Bucket != "" && spec.BucketID != "" { - return nil, &flux.Error{ - Code: codes.Invalid, - Msg: "must specify only one of bucket or bucketID", - } - } - return spec, nil -} - -func newFromOp() flux.OperationSpec { - return new(FromOpSpec) -} - -func (s *FromOpSpec) Kind() flux.OperationKind { +func (s *FromStorageProcedureSpec) Kind() plan.ProcedureKind { return FromKind } -// BucketsAccessed makes FromOpSpec a query.BucketAwareOperationSpec -func (s *FromOpSpec) BucketsAccessed(orgID *platform.ID) (readBuckets, writeBuckets []platform.BucketFilter) { - bf := platform.BucketFilter{} - if s.Bucket != "" { - bf.Name = &s.Bucket - } - if orgID != nil { - bf.OrganizationID = orgID - } - - if len(s.BucketID) > 0 { - if id, err := platform.IDFromString(s.BucketID); err != nil { - invalidID := platform.InvalidID() - bf.ID = &invalidID - } else { - bf.ID = id - } - } - - if bf.ID != nil || bf.Name != nil { - readBuckets = append(readBuckets, bf) - } - return readBuckets, writeBuckets -} - -type FromProcedureSpec struct { - Bucket string - BucketID string -} - -func newFromProcedure(qs flux.OperationSpec, pa plan.Administration) (plan.ProcedureSpec, error) { - spec, ok := qs.(*FromOpSpec) - if !ok { - return nil, &flux.Error{ - Code: codes.Internal, - Msg: fmt.Sprintf("invalid spec type %T", qs), - } - } - - return &FromProcedureSpec{ - Bucket: spec.Bucket, - BucketID: spec.BucketID, - }, nil -} - -func (s *FromProcedureSpec) Kind() plan.ProcedureKind { - return FromKind -} - -func (s *FromProcedureSpec) Copy() plan.ProcedureSpec { - ns := new(FromProcedureSpec) - +func (s *FromStorageProcedureSpec) Copy() plan.ProcedureSpec { + ns := new(FromStorageProcedureSpec) ns.Bucket = s.Bucket - ns.BucketID = s.BucketID - return ns } -func (s *FromProcedureSpec) PostPhysicalValidate(id plan.NodeID) error { - // FromProcedureSpec is a logical operation representing any read +func (s *FromStorageProcedureSpec) PostPhysicalValidate(id plan.NodeID) error { + // FromStorageProcedureSpec is a logical operation representing any read // from storage. However as a logical operation, it doesn't specify // how data is to be read from storage. It is the query planner's // job to determine the optimal read strategy and to convert this @@ -142,10 +43,10 @@ func (s *FromProcedureSpec) PostPhysicalValidate(id plan.NodeID) error { // not support unbounded reads, and so this query must not be // validated. var bucket string - if len(s.Bucket) > 0 { - bucket = s.Bucket + if s.Bucket.Name != "" { + bucket = s.Bucket.Name } else { - bucket = s.BucketID + bucket = s.Bucket.ID } return &flux.Error{ Code: codes.Invalid, diff --git a/query/stdlib/influxdata/influxdb/from_test.go b/query/stdlib/influxdata/influxdb/from_test.go index a77ddd6a88..da0a31db89 100644 --- a/query/stdlib/influxdata/influxdb/from_test.go +++ b/query/stdlib/influxdata/influxdb/from_test.go @@ -1,168 +1,23 @@ package influxdb_test import ( - "fmt" + "context" "testing" - "time" "github.com/influxdata/flux" - "github.com/influxdata/flux/execute" "github.com/influxdata/flux/plan" "github.com/influxdata/flux/plan/plantest" - "github.com/influxdata/flux/querytest" + "github.com/influxdata/flux/stdlib/influxdata/influxdb" "github.com/influxdata/flux/stdlib/universe" - platform "github.com/influxdata/influxdb/v2" - pquerytest "github.com/influxdata/influxdb/v2/query/querytest" - "github.com/influxdata/influxdb/v2/query/stdlib/influxdata/influxdb" + qinfluxdb "github.com/influxdata/influxdb/v2/query/stdlib/influxdata/influxdb" ) -func TestFrom_NewQuery(t *testing.T) { - t.Skip() - tests := []querytest.NewQueryTestCase{ - { - Name: "from no args", - Raw: `from()`, - WantErr: true, - }, - { - Name: "from conflicting args", - Raw: `from(bucket:"d", bucket:"b")`, - WantErr: true, - }, - { - Name: "from repeat arg", - Raw: `from(bucket:"telegraf", bucket:"oops")`, - WantErr: true, - }, - { - Name: "from", - Raw: `from(bucket:"telegraf", chicken:"what is this?")`, - WantErr: true, - }, - { - Name: "from bucket invalid ID", - Raw: `from(bucketID:"invalid")`, - WantErr: true, - }, - { - Name: "from bucket ID", - Raw: `from(bucketID:"aaaabbbbccccdddd")`, - Want: &flux.Spec{ - Operations: []*flux.Operation{ - { - ID: "from0", - Spec: &influxdb.FromOpSpec{ - BucketID: "aaaabbbbccccdddd", - }, - }, - }, - }, - }, - { - Name: "from with database", - Raw: `from(bucket:"mybucket") |> range(start:-4h, stop:-2h) |> sum()`, - Want: &flux.Spec{ - Operations: []*flux.Operation{ - { - ID: "from0", - Spec: &influxdb.FromOpSpec{ - Bucket: "mybucket", - }, - }, - { - ID: "range1", - Spec: &universe.RangeOpSpec{ - Start: flux.Time{ - Relative: -4 * time.Hour, - IsRelative: true, - }, - Stop: flux.Time{ - Relative: -2 * time.Hour, - IsRelative: true, - }, - TimeColumn: "_time", - StartColumn: "_start", - StopColumn: "_stop", - }, - }, - { - ID: "sum2", - Spec: &universe.SumOpSpec{ - AggregateConfig: execute.DefaultAggregateConfig, - }, - }, - }, - Edges: []flux.Edge{ - {Parent: "from0", Child: "range1"}, - {Parent: "range1", Child: "sum2"}, - }, - }, - }, - } - for _, tc := range tests { - tc := tc - t.Run(tc.Name, func(t *testing.T) { - t.Parallel() - querytest.NewQueryTestHelper(t, tc) - }) - } -} - -func TestFromOperation_Marshaling(t *testing.T) { - t.Skip() - data := []byte(`{"id":"from","kind":"from","spec":{"bucket":"mybucket"}}`) - op := &flux.Operation{ - ID: "from", - Spec: &influxdb.FromOpSpec{ - Bucket: "mybucket", - }, - } - querytest.OperationMarshalingTestHelper(t, data, op) -} - -func TestFromOpSpec_BucketsAccessed(t *testing.T) { - bucketName := "my_bucket" - bucketIDString := "aaaabbbbccccdddd" - bucketID, err := platform.IDFromString(bucketIDString) - if err != nil { - t.Fatal(err) - } - invalidID := platform.InvalidID() - tests := []pquerytest.BucketsAccessedTestCase{ - { - Name: "From with bucket", - Raw: fmt.Sprintf(`from(bucket:"%s")`, bucketName), - WantReadBuckets: &[]platform.BucketFilter{{Name: &bucketName}}, - WantWriteBuckets: &[]platform.BucketFilter{}, - }, - { - Name: "From with bucketID", - Raw: fmt.Sprintf(`from(bucketID:"%s")`, bucketID), - WantReadBuckets: &[]platform.BucketFilter{{ID: bucketID}}, - WantWriteBuckets: &[]platform.BucketFilter{}, - }, - { - Name: "From invalid bucketID", - Raw: `from(bucketID:"invalid")`, - WantReadBuckets: &[]platform.BucketFilter{{ID: &invalidID}}, - WantWriteBuckets: &[]platform.BucketFilter{}, - }, - } - for _, tc := range tests { - tc := tc - t.Run(tc.Name, func(t *testing.T) { - t.Parallel() - pquerytest.BucketsAccessedTestHelper(t, tc) - }) - } -} - func TestFromValidation(t *testing.T) { spec := plantest.PlanSpec{ // from |> group (cannot query an infinite time range) Nodes: []plan.Node{ plan.CreateLogicalNode("from", &influxdb.FromProcedureSpec{ - Bucket: "my-bucket", + Bucket: influxdb.NameOrID{Name: "my-bucket"}, }), plan.CreatePhysicalNode("group", &universe.GroupProcedureSpec{ GroupMode: flux.GroupModeBy, @@ -176,11 +31,12 @@ func TestFromValidation(t *testing.T) { ps := plantest.CreatePlanSpec(&spec) pp := plan.NewPhysicalPlanner(plan.OnlyPhysicalRules( - influxdb.PushDownRangeRule{}, - influxdb.PushDownFilterRule{}, - influxdb.PushDownGroupRule{}, + qinfluxdb.FromStorageRule{}, + qinfluxdb.PushDownRangeRule{}, + qinfluxdb.PushDownFilterRule{}, + qinfluxdb.PushDownGroupRule{}, )) - _, err := pp.Plan(ps) + _, err := pp.Plan(context.Background(), ps) if err == nil { t.Error("Expected query with no call to range to fail physical planning") } diff --git a/query/stdlib/influxdata/influxdb/operators.go b/query/stdlib/influxdata/influxdb/operators.go index 01b09c4779..23c62e7554 100644 --- a/query/stdlib/influxdata/influxdb/operators.go +++ b/query/stdlib/influxdata/influxdb/operators.go @@ -7,9 +7,9 @@ import ( "github.com/influxdata/flux" "github.com/influxdata/flux/codes" "github.com/influxdata/flux/plan" - "github.com/influxdata/flux/semantic" "github.com/influxdata/flux/values" "github.com/influxdata/influxdb/v2" + "github.com/influxdata/influxdb/v2/storage/reads/datatypes" ) const ( @@ -55,12 +55,10 @@ type ReadRangePhysSpec struct { Bucket string BucketID string - // FilterSet is set to true if there is a filter. - FilterSet bool // Filter is the filter to use when calling into // storage. It must be possible to push down this // filter. - Filter *semantic.FunctionExpression + Filter *datatypes.Predicate Bounds flux.Bounds } @@ -69,19 +67,8 @@ func (s *ReadRangePhysSpec) Kind() plan.ProcedureKind { return ReadRangePhysKind } func (s *ReadRangePhysSpec) Copy() plan.ProcedureSpec { - ns := new(ReadRangePhysSpec) - - ns.Bucket = s.Bucket - ns.BucketID = s.BucketID - - ns.FilterSet = s.FilterSet - if ns.FilterSet { - ns.Filter = s.Filter.Copy().(*semantic.FunctionExpression) - } - - ns.Bounds = s.Bounds - - return ns + ns := *s + return &ns } func (s *ReadRangePhysSpec) LookupBucketID(ctx context.Context, orgID influxdb.ID, buckets BucketLookup) (influxdb.ID, error) { @@ -127,22 +114,29 @@ type ReadWindowAggregatePhysSpec struct { ReadRangePhysSpec WindowEvery int64 + Offset int64 Aggregates []plan.ProcedureKind + CreateEmpty bool + TimeColumn string } func (s *ReadWindowAggregatePhysSpec) PlanDetails() string { - return fmt.Sprintf("every = %d, aggregates = %v", s.WindowEvery, s.Aggregates) + return fmt.Sprintf("every = %d, aggregates = %v, createEmpty = %v, timeColumn = \"%s\"", s.WindowEvery, s.Aggregates, s.CreateEmpty, s.TimeColumn) } func (s *ReadWindowAggregatePhysSpec) Kind() plan.ProcedureKind { return ReadWindowAggregatePhysKind } + func (s *ReadWindowAggregatePhysSpec) Copy() plan.ProcedureSpec { ns := new(ReadWindowAggregatePhysSpec) ns.ReadRangePhysSpec = *s.ReadRangePhysSpec.Copy().(*ReadRangePhysSpec) ns.WindowEvery = s.WindowEvery + ns.Offset = s.Offset ns.Aggregates = s.Aggregates + ns.CreateEmpty = s.CreateEmpty + ns.TimeColumn = s.TimeColumn return ns } diff --git a/query/stdlib/influxdata/influxdb/rules.go b/query/stdlib/influxdata/influxdb/rules.go index 4102a2a73e..69d9c46469 100644 --- a/query/stdlib/influxdata/influxdb/rules.go +++ b/query/stdlib/influxdata/influxdb/rules.go @@ -1,23 +1,69 @@ package influxdb import ( + "context" + "math" + "github.com/influxdata/flux" "github.com/influxdata/flux/ast" + "github.com/influxdata/flux/codes" "github.com/influxdata/flux/execute" "github.com/influxdata/flux/plan" "github.com/influxdata/flux/semantic" + "github.com/influxdata/flux/stdlib/influxdata/influxdb" "github.com/influxdata/flux/stdlib/universe" + "github.com/influxdata/flux/values" + "github.com/influxdata/influxdb/v2/kit/feature" + "github.com/influxdata/influxdb/v2/query" ) func init() { plan.RegisterPhysicalRules( + FromStorageRule{}, PushDownRangeRule{}, PushDownFilterRule{}, PushDownGroupRule{}, - PushDownReadTagKeysRule{}, - PushDownReadTagValuesRule{}, + // These rules can be re-enabled with https://github.com/influxdata/influxdb/issues/19561 is fixed + // PushDownReadTagKeysRule{}, + // PushDownReadTagValuesRule{}, SortedPivotRule{}, + PushDownWindowAggregateRule{}, + PushDownWindowAggregateByTimeRule{}, + PushDownBareAggregateRule{}, + GroupWindowAggregateTransposeRule{}, + PushDownGroupAggregateRule{}, + SwitchFillImplRule{}, + SwitchSchemaMutationImplRule{}, ) + plan.RegisterLogicalRules( + MergeFiltersRule{}, + ) +} + +type FromStorageRule struct{} + +func (rule FromStorageRule) Name() string { + return "influxdata/influxdb.FromStorageRule" +} + +func (rule FromStorageRule) Pattern() plan.Pattern { + return plan.Pat(influxdb.FromKind) +} + +func (rule FromStorageRule) Rewrite(ctx context.Context, node plan.Node) (plan.Node, bool, error) { + fromSpec := node.ProcedureSpec().(*influxdb.FromProcedureSpec) + if fromSpec.Host != nil { + return node, false, nil + } else if fromSpec.Org != nil { + return node, false, &flux.Error{ + Code: codes.Unimplemented, + Msg: "reads from the storage engine cannot read from a separate organization; please specify a host or remove the organization", + } + } + + return plan.CreateLogicalNode("fromStorage", &FromStorageProcedureSpec{ + Bucket: fromSpec.Bucket, + }), true, nil } // PushDownGroupRule pushes down a group operation to storage @@ -31,7 +77,7 @@ func (rule PushDownGroupRule) Pattern() plan.Pattern { return plan.Pat(universe.GroupKind, plan.Pat(ReadRangePhysKind)) } -func (rule PushDownGroupRule) Rewrite(node plan.Node) (plan.Node, bool, error) { +func (rule PushDownGroupRule) Rewrite(ctx context.Context, node plan.Node) (plan.Node, bool, error) { src := node.Predecessors()[0].ProcedureSpec().(*ReadRangePhysSpec) grp := node.ProcedureSpec().(*universe.GroupProcedureSpec) @@ -71,14 +117,13 @@ func (rule PushDownRangeRule) Pattern() plan.Pattern { } // Rewrite converts 'from |> range' into 'ReadRange' -func (rule PushDownRangeRule) Rewrite(node plan.Node) (plan.Node, bool, error) { +func (rule PushDownRangeRule) Rewrite(ctx context.Context, node plan.Node) (plan.Node, bool, error) { fromNode := node.Predecessors()[0] - fromSpec := fromNode.ProcedureSpec().(*FromProcedureSpec) - + fromSpec := fromNode.ProcedureSpec().(*FromStorageProcedureSpec) rangeSpec := node.ProcedureSpec().(*universe.RangeProcedureSpec) return plan.CreatePhysicalNode("ReadRange", &ReadRangePhysSpec{ - Bucket: fromSpec.Bucket, - BucketID: fromSpec.BucketID, + Bucket: fromSpec.Bucket.Name, + BucketID: fromSpec.Bucket.ID, Bounds: rangeSpec.Bounds, }), true, nil } @@ -96,7 +141,7 @@ func (PushDownFilterRule) Pattern() plan.Pattern { return plan.Pat(universe.FilterKind, plan.Pat(ReadRangePhysKind)) } -func (PushDownFilterRule) Rewrite(pn plan.Node) (plan.Node, bool, error) { +func (PushDownFilterRule) Rewrite(ctx context.Context, pn plan.Node) (plan.Node, bool, error) { filterSpec := pn.ProcedureSpec().(*universe.FilterProcedureSpec) fromNode := pn.Predecessors()[0] fromSpec := fromNode.ProcedureSpec().(*ReadRangePhysSpec) @@ -106,17 +151,17 @@ func (PushDownFilterRule) Rewrite(pn plan.Node) (plan.Node, bool, error) { return pn, false, nil } - bodyExpr, ok := filterSpec.Fn.Fn.Block.Body.(semantic.Expression) + bodyExpr, ok := filterSpec.Fn.Fn.GetFunctionBodyExpression() if !ok { return pn, false, nil } - if len(filterSpec.Fn.Fn.Block.Parameters.List) != 1 { + if len(filterSpec.Fn.Fn.Parameters.List) != 1 { // I would expect that type checking would catch this, but just to be safe... return pn, false, nil } - paramName := filterSpec.Fn.Fn.Block.Parameters.List[0].Key.Name + paramName := filterSpec.Fn.Fn.Parameters.List[0].Key.Name pushable, notPushable, err := semantic.PartitionPredicates(bodyExpr, func(e semantic.Expression) (bool, error) { return isPushableExpr(paramName, e) @@ -131,17 +176,26 @@ func (PushDownFilterRule) Rewrite(pn plan.Node) (plan.Node, bool, error) { } pushable, _ = rewritePushableExpr(pushable) - newFromSpec := fromSpec.Copy().(*ReadRangePhysSpec) - if newFromSpec.FilterSet { - newBody := semantic.ExprsToConjunction(newFromSpec.Filter.Block.Body.(semantic.Expression), pushable) - newFromSpec.Filter.Block.Body = newBody - } else { - newFromSpec.FilterSet = true - // NOTE: We loose the scope here, but that is ok because we can't push down the scope to storage. - newFromSpec.Filter = filterSpec.Fn.Fn.Copy().(*semantic.FunctionExpression) - newFromSpec.Filter.Block.Body = pushable + // Convert the pushable expression to a storage predicate. + predicate, err := ToStoragePredicate(pushable, paramName) + if err != nil { + return nil, false, err } + // If the filter has already been set, then combine the existing predicate + // with the new one. + if fromSpec.Filter != nil { + mergedPredicate, err := mergePredicates(ast.AndOperator, fromSpec.Filter, predicate) + if err != nil { + return nil, false, err + } + predicate = mergedPredicate + } + + // Copy the specification and set the predicate. + newFromSpec := fromSpec.Copy().(*ReadRangePhysSpec) + newFromSpec.Filter = predicate + if notPushable == nil { // All predicates could be pushed down, so eliminate the filter mergedNode, err := plan.MergeToPhysicalNode(pn, fromNode, newFromSpec) @@ -157,7 +211,11 @@ func (PushDownFilterRule) Rewrite(pn plan.Node) (plan.Node, bool, error) { } newFilterSpec := filterSpec.Copy().(*universe.FilterProcedureSpec) - newFilterSpec.Fn.Fn.Block.Body = notPushable + newFilterSpec.Fn.Fn.Block = &semantic.Block{ + Body: []semantic.Statement{ + &semantic.ReturnStatement{Argument: notPushable}, + }, + } if err := pn.ReplaceSpec(newFilterSpec); err != nil { return nil, false, err } @@ -183,11 +241,11 @@ func (rule PushDownReadTagKeysRule) Pattern() plan.Pattern { plan.Pat(ReadRangePhysKind)))) } -func (rule PushDownReadTagKeysRule) Rewrite(pn plan.Node) (plan.Node, bool, error) { +func (rule PushDownReadTagKeysRule) Rewrite(ctx context.Context, pn plan.Node) (plan.Node, bool, error) { // Retrieve the nodes and specs for all of the predecessors. distinctSpec := pn.ProcedureSpec().(*universe.DistinctProcedureSpec) keepNode := pn.Predecessors()[0] - keepSpec := keepNode.ProcedureSpec().(*universe.SchemaMutationProcedureSpec) + keepSpec := asSchemaMutationProcedureSpec(keepNode.ProcedureSpec()) keysNode := keepNode.Predecessors()[0] keysSpec := keysNode.ProcedureSpec().(*universe.KeysProcedureSpec) fromNode := keysNode.Predecessors()[0] @@ -245,14 +303,14 @@ func (rule PushDownReadTagValuesRule) Pattern() plan.Pattern { plan.Pat(ReadRangePhysKind)))) } -func (rule PushDownReadTagValuesRule) Rewrite(pn plan.Node) (plan.Node, bool, error) { +func (rule PushDownReadTagValuesRule) Rewrite(ctx context.Context, pn plan.Node) (plan.Node, bool, error) { // Retrieve the nodes and specs for all of the predecessors. distinctNode := pn distinctSpec := distinctNode.ProcedureSpec().(*universe.DistinctProcedureSpec) groupNode := distinctNode.Predecessors()[0] groupSpec := groupNode.ProcedureSpec().(*universe.GroupProcedureSpec) keepNode := groupNode.Predecessors()[0] - keepSpec := keepNode.ProcedureSpec().(*universe.SchemaMutationProcedureSpec) + keepSpec := asSchemaMutationProcedureSpec(keepNode.ProcedureSpec()) fromNode := keepNode.Predecessors()[0] fromSpec := fromNode.ProcedureSpec().(*ReadRangePhysSpec) @@ -556,7 +614,7 @@ func (SortedPivotRule) Pattern() plan.Pattern { return plan.Pat(universe.PivotKind, plan.Pat(ReadRangePhysKind)) } -func (SortedPivotRule) Rewrite(pn plan.Node) (plan.Node, bool, error) { +func (SortedPivotRule) Rewrite(ctx context.Context, pn plan.Node) (plan.Node, bool, error) { pivotSpec := pn.ProcedureSpec().Copy().(*universe.PivotProcedureSpec) pivotSpec.IsSortedByFunc = func(cols []string, desc bool) bool { if desc { @@ -595,3 +653,551 @@ func (SortedPivotRule) Rewrite(pn plan.Node) (plan.Node, bool, error) { } return pn, false, nil } + +// +// Push Down of window aggregates. +// ReadRangePhys |> window |> { min, max, mean, count, sum } +// +type PushDownWindowAggregateRule struct{} + +func (PushDownWindowAggregateRule) Name() string { + return "PushDownWindowAggregateRule" +} + +var windowPushableAggs = []plan.ProcedureKind{ + universe.CountKind, + universe.SumKind, + universe.MinKind, + universe.MaxKind, + universe.MeanKind, + universe.FirstKind, + universe.LastKind, +} + +func (rule PushDownWindowAggregateRule) Pattern() plan.Pattern { + return plan.OneOf(windowPushableAggs, + plan.Pat(universe.WindowKind, plan.Pat(ReadRangePhysKind))) +} + +func canPushWindowedAggregate(ctx context.Context, fnNode plan.Node) bool { + caps, ok := capabilities(ctx) + if !ok { + return false + } + // Check the aggregate function spec. Require the operation on _value + // and check the feature flag associated with the aggregate function. + switch fnNode.Kind() { + case universe.MinKind: + if !caps.HaveMin() { + return false + } + minSpec := fnNode.ProcedureSpec().(*universe.MinProcedureSpec) + if minSpec.Column != execute.DefaultValueColLabel { + return false + } + case universe.MaxKind: + if !caps.HaveMax() { + return false + } + maxSpec := fnNode.ProcedureSpec().(*universe.MaxProcedureSpec) + if maxSpec.Column != execute.DefaultValueColLabel { + return false + } + case universe.MeanKind: + if !feature.PushDownWindowAggregateMean().Enabled(ctx) || !caps.HaveMean() { + return false + } + meanSpec := fnNode.ProcedureSpec().(*universe.MeanProcedureSpec) + if len(meanSpec.Columns) != 1 || meanSpec.Columns[0] != execute.DefaultValueColLabel { + return false + } + case universe.CountKind: + if !caps.HaveCount() { + return false + } + countSpec := fnNode.ProcedureSpec().(*universe.CountProcedureSpec) + if len(countSpec.Columns) != 1 || countSpec.Columns[0] != execute.DefaultValueColLabel { + return false + } + case universe.SumKind: + if !caps.HaveSum() { + return false + } + sumSpec := fnNode.ProcedureSpec().(*universe.SumProcedureSpec) + if len(sumSpec.Columns) != 1 || sumSpec.Columns[0] != execute.DefaultValueColLabel { + return false + } + case universe.FirstKind: + if !caps.HaveFirst() { + return false + } + firstSpec := fnNode.ProcedureSpec().(*universe.FirstProcedureSpec) + if firstSpec.Column != execute.DefaultValueColLabel { + return false + } + case universe.LastKind: + if !caps.HaveLast() { + return false + } + lastSpec := fnNode.ProcedureSpec().(*universe.LastProcedureSpec) + if lastSpec.Column != execute.DefaultValueColLabel { + return false + } + } + return true +} + +func isPushableWindow(windowSpec *universe.WindowProcedureSpec) bool { + // every and period must be equal + // every.months must be zero + // every.isNegative must be false + // offset.months must be zero + // offset.isNegative must be false + // timeColumn: must be "_time" + // startColumn: must be "_start" + // stopColumn: must be "_stop" + // createEmpty: must be false + window := windowSpec.Window + return window.Every.Equal(window.Period) && + window.Every.Months() == 0 && + !window.Every.IsNegative() && + !window.Every.IsZero() && + window.Offset.Months() == 0 && + !window.Offset.IsNegative() && + windowSpec.TimeColumn == "_time" && + windowSpec.StartColumn == "_start" && + windowSpec.StopColumn == "_stop" +} + +func capabilities(ctx context.Context) (query.WindowAggregateCapability, bool) { + reader := GetStorageDependencies(ctx).FromDeps.Reader + windowAggregateReader, ok := reader.(query.WindowAggregateReader) + if !ok { + return nil, false + } + caps := windowAggregateReader.GetWindowAggregateCapability(ctx) + return caps, caps != nil +} + +func (PushDownWindowAggregateRule) Rewrite(ctx context.Context, pn plan.Node) (plan.Node, bool, error) { + fnNode := pn + if !canPushWindowedAggregate(ctx, fnNode) { + return pn, false, nil + } + + windowNode := fnNode.Predecessors()[0] + windowSpec := windowNode.ProcedureSpec().(*universe.WindowProcedureSpec) + fromNode := windowNode.Predecessors()[0] + fromSpec := fromNode.ProcedureSpec().(*ReadRangePhysSpec) + + if !isPushableWindow(windowSpec) { + return pn, false, nil + } + + if caps, ok := capabilities(ctx); !ok || windowSpec.Window.Offset.IsPositive() && !caps.HaveOffset() { + return pn, false, nil + } + + // Rule passes. + return plan.CreatePhysicalNode("ReadWindowAggregate", &ReadWindowAggregatePhysSpec{ + ReadRangePhysSpec: *fromSpec.Copy().(*ReadRangePhysSpec), + Aggregates: []plan.ProcedureKind{fnNode.Kind()}, + WindowEvery: windowSpec.Window.Every.Nanoseconds(), + Offset: windowSpec.Window.Offset.Nanoseconds(), + CreateEmpty: windowSpec.CreateEmpty, + }), true, nil +} + +// PushDownWindowAggregateWithTimeRule will match the given pattern. +// ReadWindowAggregatePhys |> duplicate |> window(every: inf) +// +// If this pattern matches and the arguments to duplicate are +// matching time column names, it will set the time column on +// the spec. +type PushDownWindowAggregateByTimeRule struct{} + +func (PushDownWindowAggregateByTimeRule) Name() string { + return "PushDownWindowAggregateByTimeRule" +} + +func (rule PushDownWindowAggregateByTimeRule) Pattern() plan.Pattern { + return plan.Pat(universe.WindowKind, + plan.Pat(universe.SchemaMutationKind, + plan.Pat(ReadWindowAggregatePhysKind))) +} + +func (PushDownWindowAggregateByTimeRule) Rewrite(ctx context.Context, pn plan.Node) (plan.Node, bool, error) { + windowNode := pn + windowSpec := windowNode.ProcedureSpec().(*universe.WindowProcedureSpec) + + duplicateNode := windowNode.Predecessors()[0] + duplicateSpec, duplicateSpecOk := func() (*universe.DuplicateOpSpec, bool) { + s := asSchemaMutationProcedureSpec(duplicateNode.ProcedureSpec()) + if len(s.Mutations) != 1 { + return nil, false + } + mutator, ok := s.Mutations[0].(*universe.DuplicateOpSpec) + return mutator, ok + }() + if !duplicateSpecOk { + return pn, false, nil + } + + // The As field must be the default time value + // and the column must be start or stop. + if duplicateSpec.As != execute.DefaultTimeColLabel || + (duplicateSpec.Column != execute.DefaultStartColLabel && duplicateSpec.Column != execute.DefaultStopColLabel) { + return pn, false, nil + } + + // window(every: inf) + if windowSpec.Window.Every != values.ConvertDuration(math.MaxInt64) || + windowSpec.Window.Every != windowSpec.Window.Period || + windowSpec.TimeColumn != execute.DefaultTimeColLabel || + windowSpec.StartColumn != execute.DefaultStartColLabel || + windowSpec.StopColumn != execute.DefaultStopColLabel || + windowSpec.CreateEmpty { + return pn, false, nil + } + + // Cannot rewrite if already was rewritten. + windowAggregateNode := duplicateNode.Predecessors()[0] + windowAggregateSpec := windowAggregateNode.ProcedureSpec().(*ReadWindowAggregatePhysSpec) + if windowAggregateSpec.TimeColumn != "" { + return pn, false, nil + } + + // Rule passes. + windowAggregateSpec.TimeColumn = duplicateSpec.Column + return plan.CreatePhysicalNode("ReadWindowAggregateByTime", windowAggregateSpec), true, nil +} + +// PushDownBareAggregateRule is a rule that allows pushing down of aggregates +// that are directly over a ReadRange source. +type PushDownBareAggregateRule struct{} + +func (p PushDownBareAggregateRule) Name() string { + return "PushDownBareAggregateRule" +} + +func (p PushDownBareAggregateRule) Pattern() plan.Pattern { + return plan.OneOf(windowPushableAggs, + plan.Pat(ReadRangePhysKind)) +} + +func (p PushDownBareAggregateRule) Rewrite(ctx context.Context, pn plan.Node) (plan.Node, bool, error) { + fnNode := pn + if !canPushWindowedAggregate(ctx, fnNode) { + return pn, false, nil + } + + fromNode := fnNode.Predecessors()[0] + fromSpec := fromNode.ProcedureSpec().(*ReadRangePhysSpec) + + return plan.CreatePhysicalNode("ReadWindowAggregate", &ReadWindowAggregatePhysSpec{ + ReadRangePhysSpec: *fromSpec.Copy().(*ReadRangePhysSpec), + Aggregates: []plan.ProcedureKind{fnNode.Kind()}, + WindowEvery: math.MaxInt64, + }), true, nil +} + +// GroupWindowAggregateTransposeRule will match the given pattern. +// ReadGroupPhys |> window |> { min, max, count, sum } +// +// This pattern will use the PushDownWindowAggregateRule to determine +// if the ReadWindowAggregatePhys operation is available before it will +// rewrite the above. This rewrites the above to: +// +// ReadWindowAggregatePhys |> group(columns: ["_start", "_stop", ...]) |> { min, max, sum } +// +// The count aggregate uses sum to merge the results. +type GroupWindowAggregateTransposeRule struct{} + +func (p GroupWindowAggregateTransposeRule) Name() string { + return "GroupWindowAggregateTransposeRule" +} + +var windowMergeablePushAggs = []plan.ProcedureKind{ + universe.MinKind, + universe.MaxKind, + universe.CountKind, + universe.SumKind, +} + +func (p GroupWindowAggregateTransposeRule) Pattern() plan.Pattern { + return plan.OneOf(windowMergeablePushAggs, + plan.Pat(universe.WindowKind, plan.Pat(ReadGroupPhysKind))) +} + +func (p GroupWindowAggregateTransposeRule) Rewrite(ctx context.Context, pn plan.Node) (plan.Node, bool, error) { + if !feature.GroupWindowAggregateTranspose().Enabled(ctx) { + return pn, false, nil + } + + fnNode := pn + if !canPushWindowedAggregate(ctx, fnNode) { + return pn, false, nil + } + + windowNode := fnNode.Predecessors()[0] + windowSpec := windowNode.ProcedureSpec().(*universe.WindowProcedureSpec) + + if !isPushableWindow(windowSpec) { + return pn, false, nil + } + + if caps, ok := capabilities(ctx); !ok || windowSpec.Window.Offset.IsPositive() && !caps.HaveOffset() { + return pn, false, nil + } + + fromNode := windowNode.Predecessors()[0] + fromSpec := fromNode.ProcedureSpec().(*ReadGroupPhysSpec) + + // This only works with GroupModeBy. It is the case + // that ReadGroup, which we depend on as a predecessor, + // only works with GroupModeBy so it should be impossible + // to fail this condition, but we add this here for extra + // protection. + if fromSpec.GroupMode != flux.GroupModeBy { + return pn, false, nil + } + + // Perform the rewrite by replacing each of the nodes. + newFromNode := plan.CreatePhysicalNode("ReadWindowAggregate", &ReadWindowAggregatePhysSpec{ + ReadRangePhysSpec: *fromSpec.ReadRangePhysSpec.Copy().(*ReadRangePhysSpec), + Aggregates: []plan.ProcedureKind{fnNode.Kind()}, + WindowEvery: windowSpec.Window.Every.Nanoseconds(), + Offset: windowSpec.Window.Offset.Nanoseconds(), + CreateEmpty: windowSpec.CreateEmpty, + }) + + // Replace the window node with a group node first. + groupKeys := make([]string, len(fromSpec.GroupKeys), len(fromSpec.GroupKeys)+2) + copy(groupKeys, fromSpec.GroupKeys) + if !execute.ContainsStr(groupKeys, execute.DefaultStartColLabel) { + groupKeys = append(groupKeys, execute.DefaultStartColLabel) + } + if !execute.ContainsStr(groupKeys, execute.DefaultStopColLabel) { + groupKeys = append(groupKeys, execute.DefaultStopColLabel) + } + newGroupNode := plan.CreatePhysicalNode("group", &universe.GroupProcedureSpec{ + GroupMode: flux.GroupModeBy, + GroupKeys: groupKeys, + }) + newFromNode.AddSuccessors(newGroupNode) + newGroupNode.AddPredecessors(newFromNode) + + // Attach the existing function node to the new group node. + fnNode.ClearPredecessors() + newGroupNode.AddSuccessors(fnNode) + fnNode.AddPredecessors(newGroupNode) + + // Replace the spec for the function if needed. + switch spec := fnNode.ProcedureSpec().(type) { + case *universe.CountProcedureSpec: + newFnNode := plan.CreatePhysicalNode("sum", &universe.SumProcedureSpec{ + AggregateConfig: spec.AggregateConfig, + }) + plan.ReplaceNode(fnNode, newFnNode) + fnNode = newFnNode + default: + // No replacement required. The procedure is idempotent so + // we can use it over and over again and get the same result. + } + return fnNode, true, nil +} + +// +// Push Down of group aggregates. +// ReadGroupPhys |> { count } +// +type PushDownGroupAggregateRule struct{} + +func (PushDownGroupAggregateRule) Name() string { + return "PushDownGroupAggregateRule" +} + +func (rule PushDownGroupAggregateRule) Pattern() plan.Pattern { + return plan.OneOf( + []plan.ProcedureKind{ + universe.CountKind, + universe.SumKind, + universe.FirstKind, + universe.LastKind, + universe.MinKind, + universe.MaxKind, + }, + plan.Pat(ReadGroupPhysKind)) +} + +func (PushDownGroupAggregateRule) Rewrite(ctx context.Context, pn plan.Node) (plan.Node, bool, error) { + group := pn.Predecessors()[0].ProcedureSpec().(*ReadGroupPhysSpec) + // Cannot push down multiple aggregates + if len(group.AggregateMethod) > 0 { + return pn, false, nil + } + + if !canPushGroupedAggregate(ctx, pn) { + return pn, false, nil + } + + switch pn.Kind() { + case universe.CountKind: + // ReadGroup() -> count => ReadGroup(count) + node := plan.CreatePhysicalNode("ReadGroupAggregate", &ReadGroupPhysSpec{ + ReadRangePhysSpec: group.ReadRangePhysSpec, + GroupMode: group.GroupMode, + GroupKeys: group.GroupKeys, + AggregateMethod: universe.CountKind, + }) + return node, true, nil + case universe.SumKind: + // ReadGroup() -> sum => ReadGroup(sum) + node := plan.CreatePhysicalNode("ReadGroupAggregate", &ReadGroupPhysSpec{ + ReadRangePhysSpec: group.ReadRangePhysSpec, + GroupMode: group.GroupMode, + GroupKeys: group.GroupKeys, + AggregateMethod: universe.SumKind, + }) + return node, true, nil + case universe.FirstKind: + // ReadGroup() -> first => ReadGroup(first) + node := plan.CreatePhysicalNode("ReadGroupAggregate", &ReadGroupPhysSpec{ + ReadRangePhysSpec: group.ReadRangePhysSpec, + GroupMode: group.GroupMode, + GroupKeys: group.GroupKeys, + AggregateMethod: universe.FirstKind, + }) + return node, true, nil + case universe.LastKind: + // ReadGroup() -> last => ReadGroup(last) + node := plan.CreatePhysicalNode("ReadGroupAggregate", &ReadGroupPhysSpec{ + ReadRangePhysSpec: group.ReadRangePhysSpec, + GroupMode: group.GroupMode, + GroupKeys: group.GroupKeys, + AggregateMethod: universe.LastKind, + }) + return node, true, nil + case universe.MinKind: + // ReadGroup() -> min => ReadGroup(min) + if feature.PushDownGroupAggregateMinMax().Enabled(ctx) { + node := plan.CreatePhysicalNode("ReadGroupAggregate", &ReadGroupPhysSpec{ + ReadRangePhysSpec: group.ReadRangePhysSpec, + GroupMode: group.GroupMode, + GroupKeys: group.GroupKeys, + AggregateMethod: universe.MinKind, + }) + return node, true, nil + } + case universe.MaxKind: + // ReadGroup() -> max => ReadGroup(max) + if feature.PushDownGroupAggregateMinMax().Enabled(ctx) { + node := plan.CreatePhysicalNode("ReadGroupAggregate", &ReadGroupPhysSpec{ + ReadRangePhysSpec: group.ReadRangePhysSpec, + GroupMode: group.GroupMode, + GroupKeys: group.GroupKeys, + AggregateMethod: universe.MaxKind, + }) + return node, true, nil + } + } + return pn, false, nil +} + +func canPushGroupedAggregate(ctx context.Context, pn plan.Node) bool { + reader := GetStorageDependencies(ctx).FromDeps.Reader + aggregator, ok := reader.(query.GroupAggregator) + if !ok { + return false + } + caps := aggregator.GetGroupCapability(ctx) + if caps == nil { + return false + } + switch pn.Kind() { + case universe.CountKind: + agg := pn.ProcedureSpec().(*universe.CountProcedureSpec) + return caps.HaveCount() && len(agg.Columns) == 1 && agg.Columns[0] == execute.DefaultValueColLabel + case universe.SumKind: + agg := pn.ProcedureSpec().(*universe.SumProcedureSpec) + return caps.HaveSum() && len(agg.Columns) == 1 && agg.Columns[0] == execute.DefaultValueColLabel + case universe.FirstKind: + agg := pn.ProcedureSpec().(*universe.FirstProcedureSpec) + return caps.HaveFirst() && agg.Column == execute.DefaultValueColLabel + case universe.LastKind: + agg := pn.ProcedureSpec().(*universe.LastProcedureSpec) + return caps.HaveLast() && agg.Column == execute.DefaultValueColLabel + case universe.MaxKind: + agg := pn.ProcedureSpec().(*universe.MaxProcedureSpec) + return caps.HaveMax() && agg.Column == execute.DefaultValueColLabel + case universe.MinKind: + agg := pn.ProcedureSpec().(*universe.MinProcedureSpec) + return caps.HaveMin() && agg.Column == execute.DefaultValueColLabel + } + return false +} + +type SwitchFillImplRule struct{} + +func (SwitchFillImplRule) Name() string { + return "SwitchFillImplRule" +} + +func (SwitchFillImplRule) Pattern() plan.Pattern { + return plan.Pat(universe.FillKind, plan.Any()) +} + +func (r SwitchFillImplRule) Rewrite(ctx context.Context, pn plan.Node) (plan.Node, bool, error) { + if !feature.MemoryOptimizedFill().Enabled(ctx) { + spec := pn.ProcedureSpec().Copy() + universe.UseDeprecatedImpl(spec) + if err := pn.ReplaceSpec(spec); err != nil { + return nil, false, err + } + } + return pn, false, nil +} + +type SwitchSchemaMutationImplRule struct{} + +func (SwitchSchemaMutationImplRule) Name() string { + return "SwitchSchemaMutationImplRule" +} + +func (SwitchSchemaMutationImplRule) Pattern() plan.Pattern { + return plan.Pat(universe.SchemaMutationKind, plan.Any()) +} + +func (r SwitchSchemaMutationImplRule) Rewrite(ctx context.Context, pn plan.Node) (plan.Node, bool, error) { + spec, ok := pn.ProcedureSpec().(*universe.DualImplProcedureSpec) + if !ok || spec.UseDeprecated { + return pn, false, nil + } + + spec.UseDeprecated = !feature.MemoryOptimizedSchemaMutation().Enabled(ctx) + return pn, spec.UseDeprecated, nil +} + +func asSchemaMutationProcedureSpec(spec plan.ProcedureSpec) *universe.SchemaMutationProcedureSpec { + if s, ok := spec.(*universe.DualImplProcedureSpec); ok { + spec = s.ProcedureSpec + } + return spec.(*universe.SchemaMutationProcedureSpec) +} + +type MergeFiltersRule struct{} + +func (MergeFiltersRule) Name() string { + return universe.MergeFiltersRule{}.Name() +} + +func (MergeFiltersRule) Pattern() plan.Pattern { + return universe.MergeFiltersRule{}.Pattern() +} + +func (r MergeFiltersRule) Rewrite(ctx context.Context, pn plan.Node) (plan.Node, bool, error) { + if feature.MergedFiltersRule().Enabled(ctx) { + return universe.MergeFiltersRule{}.Rewrite(ctx, pn) + } + return pn, false, nil +} diff --git a/query/stdlib/influxdata/influxdb/rules_test.go b/query/stdlib/influxdata/influxdb/rules_test.go index 903c1bc6b0..1398666ed5 100644 --- a/query/stdlib/influxdata/influxdb/rules_test.go +++ b/query/stdlib/influxdata/influxdb/rules_test.go @@ -1,20 +1,75 @@ package influxdb_test import ( + "context" + "math" "testing" "time" "github.com/influxdata/flux" "github.com/influxdata/flux/ast" "github.com/influxdata/flux/execute" + "github.com/influxdata/flux/execute/executetest" "github.com/influxdata/flux/interpreter" + "github.com/influxdata/flux/memory" "github.com/influxdata/flux/plan" "github.com/influxdata/flux/plan/plantest" "github.com/influxdata/flux/semantic" + fluxinfluxdb "github.com/influxdata/flux/stdlib/influxdata/influxdb" "github.com/influxdata/flux/stdlib/universe" + "github.com/influxdata/flux/values" + "github.com/influxdata/influxdb/v2/kit/feature" + "github.com/influxdata/influxdb/v2/mock" + "github.com/influxdata/influxdb/v2/query" "github.com/influxdata/influxdb/v2/query/stdlib/influxdata/influxdb" + "github.com/influxdata/influxdb/v2/storage/reads/datatypes" ) +// A small mock reader so we can indicate if rule-related capabilities are +// present +type mockReaderCaps struct { + query.StorageReader + Have bool + GroupCapabilities query.GroupCapability +} + +func (caps mockReaderCaps) GetGroupCapability(ctx context.Context) query.GroupCapability { + return caps.GroupCapabilities +} + +func (caps mockReaderCaps) GetWindowAggregateCapability(ctx context.Context) query.WindowAggregateCapability { + return mockWAC{Have: caps.Have} +} + +func (caps mockReaderCaps) ReadWindowAggregate(ctx context.Context, spec query.ReadWindowAggregateSpec, alloc *memory.Allocator) (query.TableIterator, error) { + return nil, nil +} + +type mockGroupCapability struct { + count, sum, first, last, min, max bool +} + +func (c mockGroupCapability) HaveCount() bool { return c.count } +func (c mockGroupCapability) HaveSum() bool { return c.sum } +func (c mockGroupCapability) HaveFirst() bool { return c.first } +func (c mockGroupCapability) HaveLast() bool { return c.last } +func (c mockGroupCapability) HaveMin() bool { return c.min } +func (c mockGroupCapability) HaveMax() bool { return c.max } + +// Mock Window Aggregate Capability +type mockWAC struct { + Have bool +} + +func (m mockWAC) HaveMin() bool { return m.Have } +func (m mockWAC) HaveMax() bool { return m.Have } +func (m mockWAC) HaveMean() bool { return m.Have } +func (m mockWAC) HaveCount() bool { return m.Have } +func (m mockWAC) HaveSum() bool { return m.Have } +func (m mockWAC) HaveFirst() bool { return m.Have } +func (m mockWAC) HaveLast() bool { return m.Have } +func (m mockWAC) HaveOffset() bool { return m.Have } + func fluxTime(t int64) flux.Time { return flux.Time{ Absolute: time.Unix(0, t).UTC(), @@ -22,8 +77,8 @@ func fluxTime(t int64) flux.Time { } func TestPushDownRangeRule(t *testing.T) { - fromSpec := influxdb.FromProcedureSpec{ - Bucket: "my-bucket", + fromSpec := influxdb.FromStorageProcedureSpec{ + Bucket: influxdb.NameOrID{Name: "my-bucket"}, } rangeSpec := universe.RangeProcedureSpec{ Bounds: flux.Bounds{ @@ -44,6 +99,7 @@ func TestPushDownRangeRule(t *testing.T) { Name: "simple", // from -> range => ReadRange Rules: []plan.Rule{ + influxdb.FromStorageRule{}, influxdb.PushDownRangeRule{}, }, Before: &plantest.PlanSpec{ @@ -63,6 +119,7 @@ func TestPushDownRangeRule(t *testing.T) { Name: "with successor", // from -> range -> count => ReadRange -> count Rules: []plan.Rule{ + influxdb.FromStorageRule{}, influxdb.PushDownRangeRule{}, }, Before: &plantest.PlanSpec{ @@ -92,6 +149,7 @@ func TestPushDownRangeRule(t *testing.T) { // | ReadRange // from Rules: []plan.Rule{ + influxdb.FromStorageRule{}, influxdb.PushDownRangeRule{}, }, Before: &plantest.PlanSpec{ @@ -137,65 +195,30 @@ func TestPushDownFilterRule(t *testing.T) { Stop: fluxTime(10), } - pushableExpr1 = &semantic.BinaryExpression{ - Operator: ast.EqualOperator, - Left: &semantic.MemberExpression{ - Object: &semantic.IdentifierExpression{Name: "r"}, - Property: "_measurement", - }, - Right: &semantic.StringLiteral{Value: "cpu"}} - - pushableExpr2 = &semantic.BinaryExpression{ - Operator: ast.EqualOperator, - Left: &semantic.MemberExpression{ - Object: &semantic.IdentifierExpression{Name: "r"}, - Property: "_field", - }, - Right: &semantic.StringLiteral{Value: "cpu"}} - - unpushableExpr = &semantic.BinaryExpression{ - Operator: ast.LessThanOperator, - Left: &semantic.FloatLiteral{Value: 0.5}, - Right: &semantic.MemberExpression{ - Object: &semantic.IdentifierExpression{Name: "r"}, - Property: "_value"}, - } - - statementFn = interpreter.ResolvedFunction{ - Scope: nil, - Fn: &semantic.FunctionExpression{ - Block: &semantic.FunctionBlock{ - Parameters: &semantic.FunctionParameters{ - List: []*semantic.FunctionParameter{ - {Key: &semantic.Identifier{Name: "r"}}, - }, - }, - Body: &semantic.ReturnStatement{ - Argument: &semantic.BooleanLiteral{Value: true}, - }, - }, - }, - } + pushableFn1 = executetest.FunctionExpression(t, `(r) => r._measurement == "cpu"`) + pushableFn2 = executetest.FunctionExpression(t, `(r) => r._field == "cpu"`) + pushableFn1and2 = executetest.FunctionExpression(t, `(r) => r._measurement == "cpu" and r._field == "cpu"`) + unpushableFn = executetest.FunctionExpression(t, `(r) => 0.5 < r._value`) + pushableAndUnpushableFn = executetest.FunctionExpression(t, `(r) => r._measurement == "cpu" and 0.5 < r._value`) ) - makeFilterFn := func(exprs ...semantic.Expression) *semantic.FunctionExpression { - body := semantic.ExprsToConjunction(exprs...) - return &semantic.FunctionExpression{ - Block: &semantic.FunctionBlock{ - Parameters: &semantic.FunctionParameters{ - List: []*semantic.FunctionParameter{ - {Key: &semantic.Identifier{Name: "r"}}, - }, - }, - Body: body, - }, + makeResolvedFilterFn := func(expr *semantic.FunctionExpression) interpreter.ResolvedFunction { + return interpreter.ResolvedFunction{ + Fn: expr, } } - makeResolvedFilterFn := func(exprs ...semantic.Expression) interpreter.ResolvedFunction { - return interpreter.ResolvedFunction{ - Scope: nil, - Fn: makeFilterFn(exprs...), + + toStoragePredicate := func(fn *semantic.FunctionExpression) *datatypes.Predicate { + body, ok := fn.GetFunctionBodyExpression() + if !ok { + panic("more than one statement in function body") } + + predicate, err := influxdb.ToStoragePredicate(body, "r") + if err != nil { + panic(err) + } + return predicate } tests := []plantest.RuleTestCase{ @@ -209,7 +232,7 @@ func TestPushDownFilterRule(t *testing.T) { Bounds: bounds, }), plan.CreatePhysicalNode("filter", &universe.FilterProcedureSpec{ - Fn: makeResolvedFilterFn(pushableExpr1), + Fn: makeResolvedFilterFn(pushableFn1), }), }, Edges: [][2]int{ @@ -219,9 +242,8 @@ func TestPushDownFilterRule(t *testing.T) { After: &plantest.PlanSpec{ Nodes: []plan.Node{ plan.CreatePhysicalNode("merged_ReadRange_filter", &influxdb.ReadRangePhysSpec{ - Bounds: bounds, - FilterSet: true, - Filter: makeFilterFn(pushableExpr1), + Bounds: bounds, + Filter: toStoragePredicate(pushableFn1), }), }, }, @@ -236,10 +258,10 @@ func TestPushDownFilterRule(t *testing.T) { Bounds: bounds, }), plan.CreatePhysicalNode("filter1", &universe.FilterProcedureSpec{ - Fn: makeResolvedFilterFn(pushableExpr1), + Fn: makeResolvedFilterFn(pushableFn1), }), plan.CreatePhysicalNode("filter2", &universe.FilterProcedureSpec{ - Fn: makeResolvedFilterFn(pushableExpr2), + Fn: makeResolvedFilterFn(pushableFn2), }), }, Edges: [][2]int{ @@ -250,9 +272,8 @@ func TestPushDownFilterRule(t *testing.T) { After: &plantest.PlanSpec{ Nodes: []plan.Node{ plan.CreatePhysicalNode("merged_ReadRange_filter1_filter2", &influxdb.ReadRangePhysSpec{ - Bounds: bounds, - FilterSet: true, - Filter: makeFilterFn(pushableExpr1, pushableExpr2), + Bounds: bounds, + Filter: toStoragePredicate(pushableFn1and2), }), }, }, @@ -267,7 +288,7 @@ func TestPushDownFilterRule(t *testing.T) { Bounds: bounds, }), plan.CreatePhysicalNode("filter", &universe.FilterProcedureSpec{ - Fn: makeResolvedFilterFn(pushableExpr1, unpushableExpr), + Fn: makeResolvedFilterFn(pushableAndUnpushableFn), }), }, Edges: [][2]int{ @@ -277,12 +298,11 @@ func TestPushDownFilterRule(t *testing.T) { After: &plantest.PlanSpec{ Nodes: []plan.Node{ plan.CreatePhysicalNode("ReadRange", &influxdb.ReadRangePhysSpec{ - Bounds: bounds, - FilterSet: true, - Filter: makeFilterFn(pushableExpr1), + Bounds: bounds, + Filter: toStoragePredicate(pushableFn1), }), plan.CreatePhysicalNode("filter", &universe.FilterProcedureSpec{ - Fn: makeResolvedFilterFn(unpushableExpr), + Fn: makeResolvedFilterFn(unpushableFn), }), }, Edges: [][2]int{ @@ -294,17 +314,18 @@ func TestPushDownFilterRule(t *testing.T) { Name: "from range filter", // from -> range -> filter => ReadRange Rules: []plan.Rule{ + influxdb.FromStorageRule{}, influxdb.PushDownRangeRule{}, influxdb.PushDownFilterRule{}, }, Before: &plantest.PlanSpec{ Nodes: []plan.Node{ - plan.CreateLogicalNode("from", &influxdb.FromProcedureSpec{}), + plan.CreateLogicalNode("from", &influxdb.FromStorageProcedureSpec{}), plan.CreatePhysicalNode("range", &universe.RangeProcedureSpec{ Bounds: bounds, }), plan.CreatePhysicalNode("filter", &universe.FilterProcedureSpec{ - Fn: makeResolvedFilterFn(pushableExpr1)}, + Fn: makeResolvedFilterFn(pushableFn1)}, ), }, Edges: [][2]int{ @@ -315,9 +336,8 @@ func TestPushDownFilterRule(t *testing.T) { After: &plantest.PlanSpec{ Nodes: []plan.Node{ plan.CreatePhysicalNode("merged_ReadRange_filter", &influxdb.ReadRangePhysSpec{ - Bounds: bounds, - FilterSet: true, - Filter: makeFilterFn(pushableExpr1), + Bounds: bounds, + Filter: toStoragePredicate(pushableFn1), }), }, }, @@ -332,26 +352,7 @@ func TestPushDownFilterRule(t *testing.T) { Bounds: bounds, }), plan.CreatePhysicalNode("filter", &universe.FilterProcedureSpec{ - Fn: makeResolvedFilterFn(unpushableExpr), - }), - }, - Edges: [][2]int{ - {0, 1}, - }, - }, - NoChange: true, - }, - { - Name: "statement filter", - // ReadRange -> filter(with statement function) => ReadRange -> filter(with statement function) (no change) - Rules: []plan.Rule{influxdb.PushDownFilterRule{}}, - Before: &plantest.PlanSpec{ - Nodes: []plan.Node{ - plan.CreatePhysicalNode("ReadRange", &influxdb.ReadRangePhysSpec{ - Bounds: bounds, - }), - plan.CreatePhysicalNode("filter", &universe.FilterProcedureSpec{ - Fn: statementFn, + Fn: makeResolvedFilterFn(unpushableFn), }), }, Edges: [][2]int{ @@ -369,13 +370,7 @@ func TestPushDownFilterRule(t *testing.T) { Bounds: bounds, }), plan.CreatePhysicalNode("filter", &universe.FilterProcedureSpec{ - Fn: makeResolvedFilterFn(&semantic.UnaryExpression{ - Operator: ast.ExistsOperator, - Argument: &semantic.MemberExpression{ - Object: &semantic.IdentifierExpression{Name: "r"}, - Property: "host", - }, - }), + Fn: makeResolvedFilterFn(executetest.FunctionExpression(t, `(r) => exists r.host`)), }), }, Edges: [][2]int{ @@ -385,18 +380,8 @@ func TestPushDownFilterRule(t *testing.T) { After: &plantest.PlanSpec{ Nodes: []plan.Node{ plan.CreatePhysicalNode("merged_ReadRange_filter", &influxdb.ReadRangePhysSpec{ - Bounds: bounds, - FilterSet: true, - Filter: makeFilterFn(&semantic.BinaryExpression{ - Operator: ast.NotEqualOperator, - Left: &semantic.MemberExpression{ - Object: &semantic.IdentifierExpression{Name: "r"}, - Property: "host", - }, - Right: &semantic.StringLiteral{ - Value: "", - }, - }), + Bounds: bounds, + Filter: toStoragePredicate(executetest.FunctionExpression(t, `(r) => r.host != ""`)), }), }, }, @@ -410,16 +395,7 @@ func TestPushDownFilterRule(t *testing.T) { Bounds: bounds, }), plan.CreatePhysicalNode("filter", &universe.FilterProcedureSpec{ - Fn: makeResolvedFilterFn(&semantic.UnaryExpression{ - Operator: ast.NotOperator, - Argument: &semantic.UnaryExpression{ - Operator: ast.ExistsOperator, - Argument: &semantic.MemberExpression{ - Object: &semantic.IdentifierExpression{Name: "r"}, - Property: "host", - }, - }, - }), + Fn: makeResolvedFilterFn(executetest.FunctionExpression(t, `(r) => not exists r.host`)), }), }, Edges: [][2]int{ @@ -429,18 +405,8 @@ func TestPushDownFilterRule(t *testing.T) { After: &plantest.PlanSpec{ Nodes: []plan.Node{ plan.CreatePhysicalNode("merged_ReadRange_filter", &influxdb.ReadRangePhysSpec{ - Bounds: bounds, - FilterSet: true, - Filter: makeFilterFn(&semantic.BinaryExpression{ - Operator: ast.EqualOperator, - Left: &semantic.MemberExpression{ - Object: &semantic.IdentifierExpression{Name: "r"}, - Property: "host", - }, - Right: &semantic.StringLiteral{ - Value: "", - }, - }), + Bounds: bounds, + Filter: toStoragePredicate(executetest.FunctionExpression(t, `(r) => r.host == ""`)), }), }, }, @@ -454,14 +420,7 @@ func TestPushDownFilterRule(t *testing.T) { Bounds: bounds, }), plan.CreatePhysicalNode("filter", &universe.FilterProcedureSpec{ - Fn: makeResolvedFilterFn(&semantic.BinaryExpression{ - Operator: ast.EqualOperator, - Left: &semantic.MemberExpression{ - Object: &semantic.IdentifierExpression{Name: "r"}, - Property: "host", - }, - Right: &semantic.StringLiteral{Value: ""}, - }), + Fn: makeResolvedFilterFn(executetest.FunctionExpression(t, `(r) => r.host == ""`)), }), }, Edges: [][2]int{ @@ -479,14 +438,7 @@ func TestPushDownFilterRule(t *testing.T) { Bounds: bounds, }), plan.CreatePhysicalNode("filter", &universe.FilterProcedureSpec{ - Fn: makeResolvedFilterFn(&semantic.BinaryExpression{ - Operator: ast.NotEqualOperator, - Left: &semantic.MemberExpression{ - Object: &semantic.IdentifierExpression{Name: "r"}, - Property: "host", - }, - Right: &semantic.StringLiteral{Value: ""}, - }), + Fn: makeResolvedFilterFn(executetest.FunctionExpression(t, `(r) => r.host != ""`)), }), }, Edges: [][2]int{ @@ -496,18 +448,8 @@ func TestPushDownFilterRule(t *testing.T) { After: &plantest.PlanSpec{ Nodes: []plan.Node{ plan.CreatePhysicalNode("merged_ReadRange_filter", &influxdb.ReadRangePhysSpec{ - Bounds: bounds, - FilterSet: true, - Filter: makeFilterFn(&semantic.BinaryExpression{ - Operator: ast.NotEqualOperator, - Left: &semantic.MemberExpression{ - Object: &semantic.IdentifierExpression{Name: "r"}, - Property: "host", - }, - Right: &semantic.StringLiteral{ - Value: "", - }, - }), + Bounds: bounds, + Filter: toStoragePredicate(executetest.FunctionExpression(t, `(r) => r.host != ""`)), }), }, }, @@ -521,14 +463,7 @@ func TestPushDownFilterRule(t *testing.T) { Bounds: bounds, }), plan.CreatePhysicalNode("filter", &universe.FilterProcedureSpec{ - Fn: makeResolvedFilterFn(&semantic.BinaryExpression{ - Operator: ast.EqualOperator, - Left: &semantic.MemberExpression{ - Object: &semantic.IdentifierExpression{Name: "r"}, - Property: "_value", - }, - Right: &semantic.StringLiteral{Value: ""}, - }), + Fn: makeResolvedFilterFn(executetest.FunctionExpression(t, `(r) => r._value == ""`)), }), }, Edges: [][2]int{ @@ -538,16 +473,8 @@ func TestPushDownFilterRule(t *testing.T) { After: &plantest.PlanSpec{ Nodes: []plan.Node{ plan.CreatePhysicalNode("merged_ReadRange_filter", &influxdb.ReadRangePhysSpec{ - Bounds: bounds, - FilterSet: true, - Filter: makeFilterFn(&semantic.BinaryExpression{ - Operator: ast.EqualOperator, - Left: &semantic.MemberExpression{ - Object: &semantic.IdentifierExpression{Name: "r"}, - Property: "_value", - }, - Right: &semantic.StringLiteral{Value: ""}, - }), + Bounds: bounds, + Filter: toStoragePredicate(executetest.FunctionExpression(t, `(r) => r._value == ""`)), }), }, }, @@ -562,17 +489,7 @@ func TestPushDownFilterRule(t *testing.T) { Bounds: bounds, }), plan.CreatePhysicalNode("filter", &universe.FilterProcedureSpec{ - Fn: makeResolvedFilterFn(&semantic.UnaryExpression{ - Operator: ast.NotOperator, - Argument: &semantic.BinaryExpression{ - Operator: ast.EqualOperator, - Left: &semantic.MemberExpression{ - Object: &semantic.IdentifierExpression{Name: "r"}, - Property: "host", - }, - Right: &semantic.StringLiteral{Value: "server01"}, - }, - }), + Fn: makeResolvedFilterFn(executetest.FunctionExpression(t, `(r) => not r.host == "server01"`)), }), }, Edges: [][2]int{ @@ -590,26 +507,7 @@ func TestPushDownFilterRule(t *testing.T) { Bounds: bounds, }), plan.CreatePhysicalNode("filter", &universe.FilterProcedureSpec{ - Fn: makeResolvedFilterFn(&semantic.LogicalExpression{ - Operator: ast.AndOperator, - Left: &semantic.BinaryExpression{ - Operator: ast.EqualOperator, - Left: &semantic.MemberExpression{ - Object: &semantic.IdentifierExpression{Name: "r"}, - Property: "host", - }, - Right: &semantic.StringLiteral{ - Value: "cpu", - }, - }, - Right: &semantic.UnaryExpression{ - Operator: ast.ExistsOperator, - Argument: &semantic.MemberExpression{ - Object: &semantic.IdentifierExpression{Name: "r"}, - Property: "host", - }, - }, - }), + Fn: makeResolvedFilterFn(executetest.FunctionExpression(t, `(r) => r.host == "cpu" and exists r.host`)), }), }, Edges: [][2]int{ @@ -619,31 +517,8 @@ func TestPushDownFilterRule(t *testing.T) { After: &plantest.PlanSpec{ Nodes: []plan.Node{ plan.CreatePhysicalNode("merged_ReadRange_filter", &influxdb.ReadRangePhysSpec{ - Bounds: bounds, - FilterSet: true, - Filter: makeFilterFn(&semantic.LogicalExpression{ - Operator: ast.AndOperator, - Left: &semantic.BinaryExpression{ - Operator: ast.EqualOperator, - Left: &semantic.MemberExpression{ - Object: &semantic.IdentifierExpression{Name: "r"}, - Property: "host", - }, - Right: &semantic.StringLiteral{ - Value: "cpu", - }, - }, - Right: &semantic.BinaryExpression{ - Operator: ast.NotEqualOperator, - Left: &semantic.MemberExpression{ - Object: &semantic.IdentifierExpression{Name: "r"}, - Property: "host", - }, - Right: &semantic.StringLiteral{ - Value: "", - }, - }, - }), + Bounds: bounds, + Filter: toStoragePredicate(executetest.FunctionExpression(t, `(r) => r.host == "cpu" and r.host != ""`)), }), }, }, @@ -653,7 +528,6 @@ func TestPushDownFilterRule(t *testing.T) { for _, tc := range tests { tc := tc t.Run(tc.Name, func(t *testing.T) { - t.Parallel() plantest.PhysicalRuleTestHelper(t, &tc) }) } @@ -854,8 +728,8 @@ func TestPushDownGroupRule(t *testing.T) { } func TestReadTagKeysRule(t *testing.T) { - fromSpec := influxdb.FromProcedureSpec{ - Bucket: "my-bucket", + fromSpec := influxdb.FromStorageProcedureSpec{ + Bucket: influxdb.NameOrID{Name: "my-bucket"}, } rangeSpec := universe.RangeProcedureSpec{ Bounds: flux.Bounds{ @@ -867,24 +741,28 @@ func TestReadTagKeysRule(t *testing.T) { Fn: interpreter.ResolvedFunction{ Scope: nil, Fn: &semantic.FunctionExpression{ - Block: &semantic.FunctionBlock{ - Parameters: &semantic.FunctionParameters{ - List: []*semantic.FunctionParameter{{ - Key: &semantic.Identifier{ - Name: "r", - }, - }}, - }, - Body: &semantic.BinaryExpression{ - Operator: ast.EqualOperator, - Left: &semantic.MemberExpression{ - Object: &semantic.IdentifierExpression{ - Name: "r", - }, - Property: "_measurement", + Parameters: &semantic.FunctionParameters{ + List: []*semantic.FunctionParameter{{ + Key: &semantic.Identifier{ + Name: "r", }, - Right: &semantic.StringLiteral{ - Value: "cpu", + }}, + }, + Block: &semantic.Block{ + Body: []semantic.Statement{ + &semantic.ReturnStatement{ + Argument: &semantic.BinaryExpression{ + Operator: ast.EqualOperator, + Left: &semantic.MemberExpression{ + Object: &semantic.IdentifierExpression{ + Name: "r", + }, + Property: "_measurement", + }, + Right: &semantic.StringLiteral{ + Value: "cpu", + }, + }, }, }, }, @@ -917,8 +795,8 @@ func TestReadTagKeysRule(t *testing.T) { }, } if filter { - s.FilterSet = true - s.Filter = filterSpec.Fn.Fn + bodyExpr, _ := filterSpec.Fn.Fn.GetFunctionBodyExpression() + s.Filter, _ = influxdb.ToStoragePredicate(bodyExpr, "r") } return &s } @@ -1069,8 +947,8 @@ func TestReadTagKeysRule(t *testing.T) { } func TestReadTagValuesRule(t *testing.T) { - fromSpec := influxdb.FromProcedureSpec{ - Bucket: "my-bucket", + fromSpec := influxdb.FromStorageProcedureSpec{ + Bucket: influxdb.NameOrID{Name: "my-bucket"}, } rangeSpec := universe.RangeProcedureSpec{ Bounds: flux.Bounds{ @@ -1082,24 +960,28 @@ func TestReadTagValuesRule(t *testing.T) { Fn: interpreter.ResolvedFunction{ Scope: nil, Fn: &semantic.FunctionExpression{ - Block: &semantic.FunctionBlock{ - Parameters: &semantic.FunctionParameters{ - List: []*semantic.FunctionParameter{{ - Key: &semantic.Identifier{ - Name: "r", - }, - }}, - }, - Body: &semantic.BinaryExpression{ - Operator: ast.EqualOperator, - Left: &semantic.MemberExpression{ - Object: &semantic.IdentifierExpression{ - Name: "r", - }, - Property: "_measurement", + Parameters: &semantic.FunctionParameters{ + List: []*semantic.FunctionParameter{{ + Key: &semantic.Identifier{ + Name: "r", }, - Right: &semantic.StringLiteral{ - Value: "cpu", + }}, + }, + Block: &semantic.Block{ + Body: []semantic.Statement{ + &semantic.ReturnStatement{ + Argument: &semantic.BinaryExpression{ + Operator: ast.EqualOperator, + Left: &semantic.MemberExpression{ + Object: &semantic.IdentifierExpression{ + Name: "r", + }, + Property: "_measurement", + }, + Right: &semantic.StringLiteral{ + Value: "cpu", + }, + }, }, }, }, @@ -1134,8 +1016,8 @@ func TestReadTagValuesRule(t *testing.T) { TagKey: "host", } if filter { - s.FilterSet = true - s.Filter = filterSpec.Fn.Fn + bodyExpr, _ := filterSpec.Fn.Fn.GetFunctionBodyExpression() + s.Filter, _ = influxdb.ToStoragePredicate(bodyExpr, "r") } return &s } @@ -1284,3 +1166,1999 @@ func TestReadTagValuesRule(t *testing.T) { }) } } + +func minProcedureSpec() *universe.MinProcedureSpec { + return &universe.MinProcedureSpec{ + SelectorConfig: execute.SelectorConfig{Column: execute.DefaultValueColLabel}, + } +} +func maxProcedureSpec() *universe.MaxProcedureSpec { + return &universe.MaxProcedureSpec{ + SelectorConfig: execute.SelectorConfig{Column: execute.DefaultValueColLabel}, + } +} +func countProcedureSpec() *universe.CountProcedureSpec { + return &universe.CountProcedureSpec{ + AggregateConfig: execute.AggregateConfig{Columns: []string{execute.DefaultValueColLabel}}, + } +} +func sumProcedureSpec() *universe.SumProcedureSpec { + return &universe.SumProcedureSpec{ + AggregateConfig: execute.AggregateConfig{Columns: []string{execute.DefaultValueColLabel}}, + } +} +func firstProcedureSpec() *universe.FirstProcedureSpec { + return &universe.FirstProcedureSpec{ + SelectorConfig: execute.SelectorConfig{Column: execute.DefaultValueColLabel}, + } +} +func lastProcedureSpec() *universe.LastProcedureSpec { + return &universe.LastProcedureSpec{ + SelectorConfig: execute.SelectorConfig{Column: execute.DefaultValueColLabel}, + } +} +func meanProcedureSpec() *universe.MeanProcedureSpec { + return &universe.MeanProcedureSpec{ + AggregateConfig: execute.AggregateConfig{Columns: []string{execute.DefaultValueColLabel}}, + } +} + +// +// Window Aggregate Testing +// +func TestPushDownWindowAggregateRule(t *testing.T) { + // Turn on all variants. + flagger := mock.NewFlagger(map[feature.Flag]interface{}{ + feature.PushDownWindowAggregateMean(): true, + }) + + withFlagger, _ := feature.Annotate(context.Background(), flagger) + + // Construct dependencies either with or without aggregate window caps. + deps := func(have bool) influxdb.StorageDependencies { + return influxdb.StorageDependencies{ + FromDeps: influxdb.FromDependencies{ + Reader: mockReaderCaps{Have: have}, + Metrics: influxdb.NewMetrics(nil), + }, + } + } + + haveCaps := deps(true).Inject(withFlagger) + noCaps := deps(false).Inject(withFlagger) + + readRange := influxdb.ReadRangePhysSpec{ + Bucket: "my-bucket", + Bounds: flux.Bounds{ + Start: fluxTime(5), + Stop: fluxTime(10), + }, + } + + dur1m := values.ConvertDuration(60 * time.Second) + dur2m := values.ConvertDuration(120 * time.Second) + dur0 := values.ConvertDuration(0) + durNeg, _ := values.ParseDuration("-60s") + dur1mo, _ := values.ParseDuration("1mo") + dur1y, _ := values.ParseDuration("1y") + durInf := values.ConvertDuration(math.MaxInt64) + + window := func(dur values.Duration) universe.WindowProcedureSpec { + return universe.WindowProcedureSpec{ + Window: plan.WindowSpec{ + Every: dur, + Period: dur, + Offset: dur0, + }, + TimeColumn: "_time", + StartColumn: "_start", + StopColumn: "_stop", + CreateEmpty: false, + } + } + + window1m := window(dur1m) + window2m := window(dur2m) + windowNeg := window(durNeg) + window1y := window(dur1y) + windowInf := window(durInf) + windowInfCreateEmpty := windowInf + windowInfCreateEmpty.CreateEmpty = true + + tests := make([]plantest.RuleTestCase, 0) + + // construct a simple plan with a specific window and aggregate function + simplePlanWithWindowAgg := func(window universe.WindowProcedureSpec, agg plan.NodeID, spec plan.ProcedureSpec) *plantest.PlanSpec { + return &plantest.PlanSpec{ + Nodes: []plan.Node{ + plan.CreateLogicalNode("ReadRange", &readRange), + plan.CreateLogicalNode("window", &window), + plan.CreateLogicalNode(agg, spec), + }, + Edges: [][2]int{ + {0, 1}, + {1, 2}, + }, + } + } + + // construct a simple result + simpleResult := func(proc plan.ProcedureKind, createEmpty bool, successors ...plan.Node) *plantest.PlanSpec { + spec := &plantest.PlanSpec{ + Nodes: []plan.Node{ + plan.CreatePhysicalNode("ReadWindowAggregate", &influxdb.ReadWindowAggregatePhysSpec{ + ReadRangePhysSpec: readRange, + Aggregates: []plan.ProcedureKind{proc}, + WindowEvery: 60000000000, + CreateEmpty: createEmpty, + }), + }, + } + for i, successor := range successors { + spec.Nodes = append(spec.Nodes, successor) + spec.Edges = append(spec.Edges, [2]int{i, i + 1}) + } + return spec + } + + // ReadRange -> window -> min => ReadWindowAggregate + tests = append(tests, plantest.RuleTestCase{ + Context: haveCaps, + Name: "SimplePassMin", + Rules: []plan.Rule{influxdb.PushDownWindowAggregateRule{}}, + Before: simplePlanWithWindowAgg(window1m, universe.MinKind, minProcedureSpec()), + After: simpleResult(universe.MinKind, false), + }) + + // ReadRange -> window -> max => ReadWindowAggregate + tests = append(tests, plantest.RuleTestCase{ + Context: haveCaps, + Name: "SimplePassMax", + Rules: []plan.Rule{influxdb.PushDownWindowAggregateRule{}}, + Before: simplePlanWithWindowAgg(window1m, universe.MaxKind, maxProcedureSpec()), + After: simpleResult(universe.MaxKind, false), + }) + + // ReadRange -> window -> mean => ReadWindowAggregate + tests = append(tests, plantest.RuleTestCase{ + Context: haveCaps, + Name: "SimplePassMean", + Rules: []plan.Rule{influxdb.PushDownWindowAggregateRule{}}, + Before: simplePlanWithWindowAgg(window1m, universe.MeanKind, meanProcedureSpec()), + After: simpleResult(universe.MeanKind, false), + }) + + // ReadRange -> window -> count => ReadWindowAggregate + tests = append(tests, plantest.RuleTestCase{ + Context: haveCaps, + Name: "SimplePassCount", + Rules: []plan.Rule{influxdb.PushDownWindowAggregateRule{}}, + Before: simplePlanWithWindowAgg(window1m, universe.CountKind, countProcedureSpec()), + After: simpleResult(universe.CountKind, false), + }) + + // ReadRange -> window -> sum => ReadWindowAggregate + tests = append(tests, plantest.RuleTestCase{ + Context: haveCaps, + Name: "SimplePassSum", + Rules: []plan.Rule{influxdb.PushDownWindowAggregateRule{}}, + Before: simplePlanWithWindowAgg(window1m, universe.SumKind, sumProcedureSpec()), + After: simpleResult(universe.SumKind, false), + }) + + // ReadRange -> window -> first => ReadWindowAggregate + tests = append(tests, plantest.RuleTestCase{ + Context: haveCaps, + Name: "SimplePassFirst", + Rules: []plan.Rule{influxdb.PushDownWindowAggregateRule{}}, + Before: simplePlanWithWindowAgg(window1m, universe.FirstKind, firstProcedureSpec()), + After: simpleResult(universe.FirstKind, false), + }) + + // ReadRange -> window -> last => ReadWindowAggregate + tests = append(tests, plantest.RuleTestCase{ + Context: haveCaps, + Name: "SimplePassLast", + Rules: []plan.Rule{influxdb.PushDownWindowAggregateRule{}}, + Before: simplePlanWithWindowAgg(window1m, universe.LastKind, lastProcedureSpec()), + After: simpleResult(universe.LastKind, false), + }) + + // Rewrite with successors + // ReadRange -> window -> min -> count {2} => ReadWindowAggregate -> count {2} + tests = append(tests, plantest.RuleTestCase{ + Context: haveCaps, + Name: "WithSuccessor", + Rules: []plan.Rule{influxdb.PushDownWindowAggregateRule{}}, + Before: &plantest.PlanSpec{ + Nodes: []plan.Node{ + plan.CreateLogicalNode("ReadRange", &readRange), + plan.CreateLogicalNode("window", &window1m), + plan.CreateLogicalNode("min", minProcedureSpec()), + plan.CreateLogicalNode("count", countProcedureSpec()), + plan.CreateLogicalNode("count", countProcedureSpec()), + }, + Edges: [][2]int{ + {0, 1}, + {1, 2}, + {2, 3}, + {2, 4}, + }, + }, + After: &plantest.PlanSpec{ + Nodes: []plan.Node{ + plan.CreatePhysicalNode("ReadWindowAggregate", &influxdb.ReadWindowAggregatePhysSpec{ + ReadRangePhysSpec: readRange, + Aggregates: []plan.ProcedureKind{"min"}, + WindowEvery: 60000000000, + }), + plan.CreateLogicalNode("count", countProcedureSpec()), + plan.CreateLogicalNode("count", countProcedureSpec()), + }, + Edges: [][2]int{ + {0, 1}, + {0, 2}, + }, + }, + }) + + // ReadRange -> window(offset: ...) -> last => ReadWindowAggregate + tests = append(tests, plantest.RuleTestCase{ + Context: haveCaps, + Name: "WindowPositiveOffset", + Rules: []plan.Rule{influxdb.PushDownWindowAggregateRule{}}, + Before: simplePlanWithWindowAgg(universe.WindowProcedureSpec{ + Window: plan.WindowSpec{ + Every: dur2m, + Period: dur2m, + Offset: dur1m, + }, + TimeColumn: "_time", + StartColumn: "_start", + StopColumn: "_stop", + }, universe.LastKind, lastProcedureSpec()), + After: &plantest.PlanSpec{ + Nodes: []plan.Node{ + plan.CreatePhysicalNode("ReadWindowAggregate", &influxdb.ReadWindowAggregatePhysSpec{ + ReadRangePhysSpec: readRange, + Aggregates: []plan.ProcedureKind{universe.LastKind}, + WindowEvery: 120000000000, + Offset: 60000000000, + }), + }, + }, + }) + + // Helper that adds a test with a simple plan that does not pass due to a + // specified bad window + simpleMinUnchanged := func(name string, window universe.WindowProcedureSpec) { + // Note: NoChange is not working correctly for these tests. It is + // expecting empty time, start, and stop column fields. + tests = append(tests, plantest.RuleTestCase{ + Name: name, + Context: haveCaps, + Rules: []plan.Rule{influxdb.PushDownWindowAggregateRule{}}, + Before: simplePlanWithWindowAgg(window, "min", countProcedureSpec()), + NoChange: true, + }) + } + + // Condition not met: period not equal to every + badWindow1 := window1m + badWindow1.Window.Period = dur2m + simpleMinUnchanged("BadPeriod", badWindow1) + + // Condition not met: negative offset + badWindow2 := window1m + badWindow2.Window.Offset = durNeg + simpleMinUnchanged("NegOffset", badWindow2) + + // Condition not met: non-standard _time column + badWindow3 := window1m + badWindow3.TimeColumn = "_timmy" + simpleMinUnchanged("BadTime", badWindow3) + + // Condition not met: non-standard start column + badWindow4 := window1m + badWindow4.StartColumn = "_stooort" + simpleMinUnchanged("BadStart", badWindow4) + + // Condition not met: non-standard stop column + badWindow5 := window1m + badWindow5.StopColumn = "_stappp" + simpleMinUnchanged("BadStop", badWindow5) + + // Condition not met: monthly offset + badWindow6 := window1m + badWindow6.Window.Offset = dur1mo + simpleMinUnchanged("MonthOffset", badWindow6) + + // Condition met: createEmpty is true. + windowCreateEmpty1m := window1m + windowCreateEmpty1m.CreateEmpty = true + tests = append(tests, plantest.RuleTestCase{ + Context: haveCaps, + Name: "CreateEmptyPassMin", + Rules: []plan.Rule{influxdb.PushDownWindowAggregateRule{}}, + Before: simplePlanWithWindowAgg(windowCreateEmpty1m, "min", minProcedureSpec()), + After: simpleResult("min", true), + }) + + // Condition not met: duration too long. + simpleMinUnchanged("WindowTooLarge", window1y) + + // Condition not met: neg duration. + simpleMinUnchanged("WindowNeg", windowNeg) + + // Bad min column + // ReadRange -> window -> min => NO-CHANGE + tests = append(tests, plantest.RuleTestCase{ + Name: "BadMinCol", + Context: haveCaps, + Rules: []plan.Rule{influxdb.PushDownWindowAggregateRule{}}, + Before: simplePlanWithWindowAgg(window1m, "min", &universe.MinProcedureSpec{ + SelectorConfig: execute.SelectorConfig{Column: "_valmoo"}, + }), + NoChange: true, + }) + + // Bad max column + // ReadRange -> window -> max => NO-CHANGE + tests = append(tests, plantest.RuleTestCase{ + Name: "BadMaxCol", + Context: haveCaps, + Rules: []plan.Rule{influxdb.PushDownWindowAggregateRule{}}, + Before: simplePlanWithWindowAgg(window1m, "max", &universe.MaxProcedureSpec{ + SelectorConfig: execute.SelectorConfig{Column: "_valmoo"}, + }), + NoChange: true, + }) + + // Bad mean columns + // ReadRange -> window -> mean => NO-CHANGE + tests = append(tests, plantest.RuleTestCase{ + Name: "BadMeanCol1", + Context: haveCaps, + Rules: []plan.Rule{influxdb.PushDownWindowAggregateRule{}}, + Before: simplePlanWithWindowAgg(window1m, "mean", &universe.MeanProcedureSpec{ + AggregateConfig: execute.AggregateConfig{Columns: []string{"_valmoo"}}, + }), + NoChange: true, + }) + tests = append(tests, plantest.RuleTestCase{ + Name: "BadMeanCol2", + Context: haveCaps, + Rules: []plan.Rule{influxdb.PushDownWindowAggregateRule{}}, + Before: simplePlanWithWindowAgg(window1m, "mean", &universe.MeanProcedureSpec{ + AggregateConfig: execute.AggregateConfig{Columns: []string{"_value", "_valmoo"}}, + }), + NoChange: true, + }) + + // No match due to a collapsed node having a successor + // ReadRange -> window -> min + // \-> min + tests = append(tests, plantest.RuleTestCase{ + Name: "CollapsedWithSuccessor1", + Context: haveCaps, + Rules: []plan.Rule{influxdb.PushDownWindowAggregateRule{}}, + Before: &plantest.PlanSpec{ + Nodes: []plan.Node{ + plan.CreateLogicalNode("ReadRange", &readRange), + plan.CreateLogicalNode("window", &window1m), + plan.CreateLogicalNode("min", minProcedureSpec()), + plan.CreateLogicalNode("min", minProcedureSpec()), + }, + Edges: [][2]int{ + {0, 1}, + {1, 2}, + {1, 3}, + }, + }, + NoChange: true, + }) + + // No match due to a collapsed node having a successor + // ReadRange -> window -> min + // \-> window + tests = append(tests, plantest.RuleTestCase{ + Name: "CollapsedWithSuccessor2", + Context: haveCaps, + Rules: []plan.Rule{influxdb.PushDownWindowAggregateRule{}}, + Before: &plantest.PlanSpec{ + Nodes: []plan.Node{ + plan.CreateLogicalNode("ReadRange", &readRange), + plan.CreateLogicalNode("window", &window1m), + plan.CreateLogicalNode("min", minProcedureSpec()), + plan.CreateLogicalNode("window", &window2m), + }, + Edges: [][2]int{ + {0, 1}, + {1, 2}, + {0, 3}, + }, + }, + NoChange: true, + }) + + // No pattern match + // ReadRange -> filter -> window -> min -> NO-CHANGE + pushableFn1 := executetest.FunctionExpression(t, `(r) => true`) + + makeResolvedFilterFn := func(expr *semantic.FunctionExpression) interpreter.ResolvedFunction { + return interpreter.ResolvedFunction{ + Scope: nil, + Fn: expr, + } + } + noPatternMatch1 := func() *plantest.PlanSpec { + return &plantest.PlanSpec{ + Nodes: []plan.Node{ + plan.CreateLogicalNode("ReadRange", &readRange), + plan.CreatePhysicalNode("filter", &universe.FilterProcedureSpec{ + Fn: makeResolvedFilterFn(pushableFn1), + }), + plan.CreateLogicalNode("window", &window1m), + plan.CreateLogicalNode("min", minProcedureSpec()), + }, + Edges: [][2]int{ + {0, 1}, + {1, 2}, + {2, 3}, + }, + } + } + tests = append(tests, plantest.RuleTestCase{ + Name: "NoPatternMatch1", + Context: haveCaps, + Rules: []plan.Rule{influxdb.PushDownWindowAggregateRule{}}, + Before: noPatternMatch1(), + NoChange: true, + }) + + // No pattern match 2 + // ReadRange -> window -> filter -> min -> NO-CHANGE + noPatternMatch2 := func() *plantest.PlanSpec { + return &plantest.PlanSpec{ + Nodes: []plan.Node{ + plan.CreateLogicalNode("ReadRange", &readRange), + plan.CreateLogicalNode("window", &window1m), + plan.CreatePhysicalNode("filter", &universe.FilterProcedureSpec{ + Fn: makeResolvedFilterFn(pushableFn1), + }), + plan.CreateLogicalNode("min", minProcedureSpec()), + }, + Edges: [][2]int{ + {0, 1}, + {1, 2}, + {2, 3}, + }, + } + } + tests = append(tests, plantest.RuleTestCase{ + Name: "NoPatternMatch2", + Context: haveCaps, + Rules: []plan.Rule{influxdb.PushDownWindowAggregateRule{}}, + Before: noPatternMatch2(), + NoChange: true, + }) + + // Fail due to no capabilities present. + tests = append(tests, plantest.RuleTestCase{ + Context: noCaps, + Name: "FailNoCaps", + Rules: []plan.Rule{influxdb.PushDownWindowAggregateRule{}}, + Before: simplePlanWithWindowAgg(window1m, "count", countProcedureSpec()), + After: simpleResult("count", false), + NoChange: true, + }) + + duplicate := func(column, as string) *universe.SchemaMutationProcedureSpec { + return &universe.SchemaMutationProcedureSpec{ + Mutations: []universe.SchemaMutation{ + &universe.DuplicateOpSpec{ + Column: column, + As: as, + }, + }, + } + } + + aggregateWindowPlan := func(window universe.WindowProcedureSpec, agg plan.NodeID, spec plan.ProcedureSpec, timeColumn string) *plantest.PlanSpec { + return &plantest.PlanSpec{ + Nodes: []plan.Node{ + plan.CreateLogicalNode("ReadRange", &readRange), + plan.CreateLogicalNode("window1", &window), + plan.CreateLogicalNode(agg, spec), + plan.CreateLogicalNode("duplicate", duplicate(timeColumn, "_time")), + plan.CreateLogicalNode("window2", &windowInf), + }, + Edges: [][2]int{ + {0, 1}, + {1, 2}, + {2, 3}, + {3, 4}, + }, + } + } + + aggregateWindowResult := func(proc plan.ProcedureKind, createEmpty bool, timeColumn string, successors ...plan.Node) *plantest.PlanSpec { + spec := &plantest.PlanSpec{ + Nodes: []plan.Node{ + plan.CreatePhysicalNode("ReadWindowAggregateByTime", &influxdb.ReadWindowAggregatePhysSpec{ + ReadRangePhysSpec: readRange, + Aggregates: []plan.ProcedureKind{proc}, + WindowEvery: 60000000000, + CreateEmpty: createEmpty, + TimeColumn: timeColumn, + }), + }, + } + for i, successor := range successors { + spec.Nodes = append(spec.Nodes, successor) + spec.Edges = append(spec.Edges, [2]int{i, i + 1}) + } + return spec + } + + // Push down the duplicate |> window(every: inf) + tests = append(tests, plantest.RuleTestCase{ + Context: haveCaps, + Name: "AggregateWindowCount", + Rules: []plan.Rule{ + influxdb.PushDownWindowAggregateRule{}, + influxdb.PushDownWindowAggregateByTimeRule{}, + }, + Before: aggregateWindowPlan(window1m, "count", countProcedureSpec(), "_stop"), + After: aggregateWindowResult("count", false, "_stop"), + }) + + // Push down the duplicate |> window(every: inf) using _start column + tests = append(tests, plantest.RuleTestCase{ + Context: haveCaps, + Name: "AggregateWindowCount", + Rules: []plan.Rule{ + influxdb.PushDownWindowAggregateRule{}, + influxdb.PushDownWindowAggregateByTimeRule{}, + }, + Before: aggregateWindowPlan(window1m, "count", countProcedureSpec(), "_start"), + After: aggregateWindowResult("count", false, "_start"), + }) + + // Push down duplicate |> window(every: inf) with create empty. + tests = append(tests, plantest.RuleTestCase{ + Context: haveCaps, + Name: "AggregateWindowCountCreateEmpty", + Rules: []plan.Rule{ + influxdb.PushDownWindowAggregateRule{}, + influxdb.PushDownWindowAggregateByTimeRule{}, + }, + Before: aggregateWindowPlan(windowCreateEmpty1m, "count", countProcedureSpec(), "_stop"), + After: aggregateWindowResult("count", true, "_stop"), + }) + + // Invalid duplicate column. + tests = append(tests, plantest.RuleTestCase{ + Context: haveCaps, + Name: "AggregateWindowCountInvalidDuplicateColumn", + Rules: []plan.Rule{ + influxdb.PushDownWindowAggregateRule{}, + influxdb.PushDownWindowAggregateByTimeRule{}, + }, + Before: aggregateWindowPlan(window1m, "count", countProcedureSpec(), "_value"), + After: simpleResult("count", false, + plan.CreatePhysicalNode("duplicate", duplicate("_value", "_time")), + plan.CreatePhysicalNode("window2", &windowInf), + ), + }) + + // Invalid duplicate as. + tests = append(tests, plantest.RuleTestCase{ + Context: haveCaps, + Name: "AggregateWindowCountInvalidDuplicateAs", + Rules: []plan.Rule{ + influxdb.PushDownWindowAggregateRule{}, + influxdb.PushDownWindowAggregateByTimeRule{}, + }, + Before: &plantest.PlanSpec{ + Nodes: []plan.Node{ + plan.CreateLogicalNode("ReadRange", &readRange), + plan.CreateLogicalNode("window1", &window1m), + plan.CreateLogicalNode("count", countProcedureSpec()), + plan.CreateLogicalNode("duplicate", duplicate("_stop", "time")), + plan.CreateLogicalNode("window2", &windowInf), + }, + Edges: [][2]int{ + {0, 1}, + {1, 2}, + {2, 3}, + {3, 4}, + }, + }, + After: simpleResult("count", false, + plan.CreatePhysicalNode("duplicate", duplicate("_stop", "time")), + plan.CreatePhysicalNode("window2", &windowInf), + ), + }) + + // Invalid closing window. + tests = append(tests, plantest.RuleTestCase{ + Context: haveCaps, + Name: "AggregateWindowCountInvalidClosingWindow", + Rules: []plan.Rule{ + influxdb.PushDownWindowAggregateRule{}, + influxdb.PushDownWindowAggregateByTimeRule{}, + }, + Before: &plantest.PlanSpec{ + Nodes: []plan.Node{ + plan.CreateLogicalNode("ReadRange", &readRange), + plan.CreateLogicalNode("window1", &window1m), + plan.CreateLogicalNode("count", countProcedureSpec()), + plan.CreateLogicalNode("duplicate", duplicate("_stop", "_time")), + plan.CreateLogicalNode("window2", &window1m), + }, + Edges: [][2]int{ + {0, 1}, + {1, 2}, + {2, 3}, + {3, 4}, + }, + }, + After: simpleResult("count", false, + plan.CreatePhysicalNode("duplicate", duplicate("_stop", "_time")), + plan.CreatePhysicalNode("window2", &window1m), + ), + }) + + // Invalid closing window with multiple problems. + tests = append(tests, plantest.RuleTestCase{ + Context: haveCaps, + Name: "AggregateWindowCountInvalidClosingWindowMultiple", + Rules: []plan.Rule{ + influxdb.PushDownWindowAggregateRule{}, + influxdb.PushDownWindowAggregateByTimeRule{}, + }, + Before: &plantest.PlanSpec{ + Nodes: []plan.Node{ + plan.CreateLogicalNode("ReadRange", &readRange), + plan.CreateLogicalNode("window1", &window1m), + plan.CreateLogicalNode("count", countProcedureSpec()), + plan.CreateLogicalNode("duplicate", duplicate("_stop", "_time")), + plan.CreateLogicalNode("window2", &badWindow3), + }, + Edges: [][2]int{ + {0, 1}, + {1, 2}, + {2, 3}, + {3, 4}, + }, + }, + After: simpleResult("count", false, + plan.CreatePhysicalNode("duplicate", duplicate("_stop", "_time")), + plan.CreatePhysicalNode("window2", &badWindow3), + ), + }) + + // Invalid closing window with multiple problems. + tests = append(tests, plantest.RuleTestCase{ + Context: haveCaps, + Name: "AggregateWindowCountInvalidClosingWindowCreateEmpty", + Rules: []plan.Rule{ + influxdb.PushDownWindowAggregateRule{}, + influxdb.PushDownWindowAggregateByTimeRule{}, + }, + Before: &plantest.PlanSpec{ + Nodes: []plan.Node{ + plan.CreateLogicalNode("ReadRange", &readRange), + plan.CreateLogicalNode("window1", &window1m), + plan.CreateLogicalNode("count", countProcedureSpec()), + plan.CreateLogicalNode("duplicate", duplicate("_stop", "_time")), + plan.CreateLogicalNode("window2", &windowInfCreateEmpty), + }, + Edges: [][2]int{ + {0, 1}, + {1, 2}, + {2, 3}, + {3, 4}, + }, + }, + After: simpleResult("count", false, + plan.CreatePhysicalNode("duplicate", duplicate("_stop", "_time")), + plan.CreatePhysicalNode("window2", &windowInfCreateEmpty), + ), + }) + + // Multiple matching patterns. + tests = append(tests, plantest.RuleTestCase{ + Context: haveCaps, + Name: "AggregateWindowCountMultipleMatches", + Rules: []plan.Rule{ + influxdb.PushDownWindowAggregateRule{}, + influxdb.PushDownWindowAggregateByTimeRule{}, + }, + Before: &plantest.PlanSpec{ + Nodes: []plan.Node{ + plan.CreateLogicalNode("ReadRange", &readRange), + plan.CreateLogicalNode("window1", &window1m), + plan.CreateLogicalNode("count", countProcedureSpec()), + plan.CreateLogicalNode("duplicate", duplicate("_stop", "_time")), + plan.CreateLogicalNode("window2", &windowInf), + plan.CreateLogicalNode("duplicate2", duplicate("_stop", "_time")), + plan.CreateLogicalNode("window3", &windowInf), + }, + Edges: [][2]int{ + {0, 1}, + {1, 2}, + {2, 3}, + {3, 4}, + {4, 5}, + {5, 6}, + }, + }, + After: aggregateWindowResult("count", false, "_stop", + plan.CreatePhysicalNode("duplicate2", duplicate("_stop", "_time")), + plan.CreatePhysicalNode("window3", &windowInf), + ), + }) + + rename := universe.SchemaMutationProcedureSpec{ + Mutations: []universe.SchemaMutation{ + &universe.RenameOpSpec{ + Columns: map[string]string{"_time": "time"}, + }, + }, + } + + // Wrong schema mutator. + tests = append(tests, plantest.RuleTestCase{ + Context: haveCaps, + Name: "AggregateWindowCountWrongSchemaMutator", + Rules: []plan.Rule{ + influxdb.PushDownWindowAggregateRule{}, + influxdb.PushDownWindowAggregateByTimeRule{}, + }, + Before: &plantest.PlanSpec{ + Nodes: []plan.Node{ + plan.CreateLogicalNode("ReadRange", &readRange), + plan.CreateLogicalNode("window1", &window1m), + plan.CreateLogicalNode("count", countProcedureSpec()), + plan.CreateLogicalNode("rename", &rename), + plan.CreateLogicalNode("window2", &windowInf), + }, + Edges: [][2]int{ + {0, 1}, + {1, 2}, + {2, 3}, + {3, 4}, + }, + }, + After: simpleResult("count", false, + plan.CreatePhysicalNode("rename", &rename), + plan.CreatePhysicalNode("window2", &windowInf), + ), + }) + + for _, tc := range tests { + tc := tc + t.Run(tc.Name, func(t *testing.T) { + t.Parallel() + plantest.PhysicalRuleTestHelper(t, &tc) + }) + } +} + +func TestTransposeGroupToWindowAggregateRule(t *testing.T) { + // Turn on all variants. + flagger := mock.NewFlagger(map[feature.Flag]interface{}{ + feature.GroupWindowAggregateTranspose(): true, + feature.PushDownWindowAggregateMean(): true, + }) + + rules := []plan.Rule{ + influxdb.PushDownGroupRule{}, + influxdb.PushDownWindowAggregateRule{}, + influxdb.PushDownWindowAggregateByTimeRule{}, + influxdb.GroupWindowAggregateTransposeRule{}, + } + + withFlagger, _ := feature.Annotate(context.Background(), flagger) + + // Construct dependencies either with or without aggregate window caps. + deps := func(have bool) influxdb.StorageDependencies { + return influxdb.StorageDependencies{ + FromDeps: influxdb.FromDependencies{ + Reader: mockReaderCaps{Have: have}, + Metrics: influxdb.NewMetrics(nil), + }, + } + } + + haveCaps := deps(true).Inject(withFlagger) + noCaps := deps(false).Inject(withFlagger) + + readRange := influxdb.ReadRangePhysSpec{ + Bucket: "my-bucket", + Bounds: flux.Bounds{ + Start: fluxTime(5), + Stop: fluxTime(10), + }, + } + + group := func(mode flux.GroupMode, keys ...string) *universe.GroupProcedureSpec { + return &universe.GroupProcedureSpec{ + GroupMode: mode, + GroupKeys: keys, + } + } + + groupResult := func(keys ...string) *universe.GroupProcedureSpec { + keys = append(keys, execute.DefaultStartColLabel, execute.DefaultStopColLabel) + return group(flux.GroupModeBy, keys...) + } + + dur1m := values.ConvertDuration(60 * time.Second) + dur2m := values.ConvertDuration(120 * time.Second) + dur0 := values.ConvertDuration(0) + durNeg, _ := values.ParseDuration("-60s") + dur1y, _ := values.ParseDuration("1y") + durInf := values.ConvertDuration(math.MaxInt64) + + window := func(dur values.Duration) universe.WindowProcedureSpec { + return universe.WindowProcedureSpec{ + Window: plan.WindowSpec{ + Every: dur, + Period: dur, + Offset: dur0, + }, + TimeColumn: "_time", + StartColumn: "_start", + StopColumn: "_stop", + CreateEmpty: false, + } + } + + window1m := window(dur1m) + window1mCreateEmpty := window1m + window1mCreateEmpty.CreateEmpty = true + window2m := window(dur2m) + windowNeg := window(durNeg) + window1y := window(dur1y) + windowInf := window(durInf) + windowInfCreateEmpty := windowInf + windowInfCreateEmpty.CreateEmpty = true + + tests := make([]plantest.RuleTestCase, 0) + + // construct a simple plan with a specific window and aggregate function + simplePlan := func(window universe.WindowProcedureSpec, agg plan.NodeID, spec plan.ProcedureSpec, successors ...plan.Node) *plantest.PlanSpec { + pspec := &plantest.PlanSpec{ + Nodes: []plan.Node{ + plan.CreateLogicalNode("ReadRange", &readRange), + plan.CreateLogicalNode("group", group(flux.GroupModeBy)), + plan.CreateLogicalNode("window", &window), + plan.CreateLogicalNode(agg, spec), + }, + Edges: [][2]int{ + {0, 1}, + {1, 2}, + {2, 3}, + }, + } + for i, successor := range successors { + pspec.Nodes = append(pspec.Nodes, successor) + pspec.Edges = append(pspec.Edges, [2]int{i + 3, i + 4}) + } + return pspec + } + + // construct a simple result + simpleResult := func(proc plan.ProcedureKind, every values.Duration, createEmpty bool, successors ...plan.Node) *plantest.PlanSpec { + spec := &plantest.PlanSpec{ + Nodes: []plan.Node{ + plan.CreatePhysicalNode("ReadWindowAggregate", &influxdb.ReadWindowAggregatePhysSpec{ + ReadRangePhysSpec: readRange, + Aggregates: []plan.ProcedureKind{proc}, + WindowEvery: every.Nanoseconds(), + CreateEmpty: createEmpty, + }), + }, + } + for i, successor := range successors { + spec.Nodes = append(spec.Nodes, successor) + spec.Edges = append(spec.Edges, [2]int{i, i + 1}) + } + return spec + } + + duplicateSpec := func(column, as string) *universe.SchemaMutationProcedureSpec { + return &universe.SchemaMutationProcedureSpec{ + Mutations: []universe.SchemaMutation{ + &universe.DuplicateOpSpec{ + Column: execute.DefaultStopColLabel, + As: execute.DefaultTimeColLabel, + }, + }, + } + } + + // ReadRange -> group -> window -> min => ReadWindowAggregate -> group -> min + tests = append(tests, plantest.RuleTestCase{ + Context: haveCaps, + Name: "SimplePassMin", + Rules: rules, + Before: simplePlan(window1m, "min", minProcedureSpec()), + After: simpleResult("min", dur1m, false, + plan.CreatePhysicalNode("group", groupResult()), + plan.CreatePhysicalNode("min", minProcedureSpec()), + ), + }) + + // ReadRange -> group -> window -> max => ReadWindowAggregate -> group -> max + tests = append(tests, plantest.RuleTestCase{ + Context: haveCaps, + Name: "SimplePassMax", + Rules: rules, + Before: simplePlan(window1m, "max", maxProcedureSpec()), + After: simpleResult("max", dur1m, false, + plan.CreatePhysicalNode("group", groupResult()), + plan.CreatePhysicalNode("max", maxProcedureSpec()), + ), + }) + + // ReadRange -> group -> window -> mean => ReadGroup -> mean + // TODO(jsternberg): When we begin pushing down mean calls, + // this test will need to be updated to the appropriate pattern. + // The reason why this is included is because we cannot rewrite + // a grouped mean to use read window aggregate with mean. We + // will need this plan to be something different that doesn't + // exist yet so this is testing that we don't attempt to use + // this planner rule for mean. + tests = append(tests, plantest.RuleTestCase{ + Context: haveCaps, + Name: "SimplePassMean", + Rules: rules, + Before: simplePlan(window1m, "mean", meanProcedureSpec()), + After: &plantest.PlanSpec{ + Nodes: []plan.Node{ + plan.CreatePhysicalNode("ReadGroup", &influxdb.ReadGroupPhysSpec{ + ReadRangePhysSpec: readRange, + GroupMode: flux.GroupModeBy, + }), + plan.CreatePhysicalNode("window", &window1m), + plan.CreatePhysicalNode("mean", meanProcedureSpec()), + }, + Edges: [][2]int{ + {0, 1}, + {1, 2}, + }, + }, + }) + + // ReadRange -> group -> window -> count => ReadWindowAggregate -> group -> sum + tests = append(tests, plantest.RuleTestCase{ + Context: haveCaps, + Name: "SimplePassCount", + Rules: rules, + Before: simplePlan(window1m, "count", countProcedureSpec()), + After: simpleResult("count", dur1m, false, + plan.CreatePhysicalNode("group", groupResult()), + plan.CreatePhysicalNode("sum", sumProcedureSpec()), + ), + }) + + // ReadRange -> group -> window -> sum => ReadWindowAggregate -> group -> sum + tests = append(tests, plantest.RuleTestCase{ + Context: haveCaps, + Name: "SimplePassSum", + Rules: rules, + Before: simplePlan(window1m, "sum", sumProcedureSpec()), + After: simpleResult("sum", dur1m, false, + plan.CreatePhysicalNode("group", groupResult()), + plan.CreatePhysicalNode("sum", sumProcedureSpec()), + ), + }) + + // Rewrite with aggregate window + // ReadRange -> group -> window -> min -> duplicate -> window + tests = append(tests, plantest.RuleTestCase{ + Context: haveCaps, + Name: "WithSuccessor", + Rules: rules, + Before: simplePlan(window1mCreateEmpty, "min", minProcedureSpec(), + plan.CreateLogicalNode("duplicate", duplicateSpec("_stop", "_time")), + plan.CreateLogicalNode("window", &windowInf), + ), + After: simpleResult("min", dur1m, true, + plan.CreatePhysicalNode("group", groupResult()), + plan.CreatePhysicalNode("min", minProcedureSpec()), + plan.CreatePhysicalNode("duplicate", duplicateSpec("_stop", "_time")), + plan.CreatePhysicalNode("window", &windowInf), + ), + }) + + // ReadRange -> group(host) -> window -> min => ReadWindowAggregate -> group(host, _start, _stop) -> min + tests = append(tests, plantest.RuleTestCase{ + Context: haveCaps, + Name: "GroupByHostPassMin", + Rules: rules, + Before: &plantest.PlanSpec{ + Nodes: []plan.Node{ + plan.CreateLogicalNode("ReadRange", &readRange), + plan.CreateLogicalNode("group", group(flux.GroupModeBy, "host")), + plan.CreateLogicalNode("window", &window1m), + plan.CreateLogicalNode("min", minProcedureSpec()), + }, + Edges: [][2]int{ + {0, 1}, + {1, 2}, + {2, 3}, + }, + }, + After: simpleResult("min", dur1m, false, + plan.CreatePhysicalNode("group", groupResult("host")), + plan.CreatePhysicalNode("min", minProcedureSpec()), + ), + }) + + // ReadRange -> group(_start, host) -> window -> min => ReadWindowAggregate -> group(_start, host, _stop) -> min + tests = append(tests, plantest.RuleTestCase{ + Context: haveCaps, + Name: "GroupByStartPassMin", + Rules: rules, + Before: &plantest.PlanSpec{ + Nodes: []plan.Node{ + plan.CreateLogicalNode("ReadRange", &readRange), + plan.CreateLogicalNode("group", group(flux.GroupModeBy, "_start", "host")), + plan.CreateLogicalNode("window", &window1m), + plan.CreateLogicalNode("min", minProcedureSpec()), + }, + Edges: [][2]int{ + {0, 1}, + {1, 2}, + {2, 3}, + }, + }, + After: simpleResult("min", dur1m, false, + plan.CreatePhysicalNode("group", group(flux.GroupModeBy, "_start", "host", "_stop")), + plan.CreatePhysicalNode("min", minProcedureSpec()), + ), + }) + + // ReadRange -> group(host) -> window(offset: ...) -> min => ReadWindowAggregate -> group(host, _start, _stop) -> min + tests = append(tests, plantest.RuleTestCase{ + Context: haveCaps, + Name: "PositiveOffset", + Rules: rules, + Before: &plantest.PlanSpec{ + Nodes: []plan.Node{ + plan.CreateLogicalNode("ReadRange", &readRange), + plan.CreateLogicalNode("group", group(flux.GroupModeBy, "host")), + plan.CreateLogicalNode("window", &universe.WindowProcedureSpec{ + Window: plan.WindowSpec{ + Every: dur2m, + Period: dur2m, + Offset: dur1m, + }, + TimeColumn: "_time", + StartColumn: "_start", + StopColumn: "_stop", + }), + plan.CreateLogicalNode("min", minProcedureSpec()), + }, + Edges: [][2]int{ + {0, 1}, + {1, 2}, + {2, 3}, + }, + }, + After: &plantest.PlanSpec{ + Nodes: []plan.Node{ + plan.CreatePhysicalNode("ReadWindowAggregate", &influxdb.ReadWindowAggregatePhysSpec{ + ReadRangePhysSpec: readRange, + Aggregates: []plan.ProcedureKind{universe.MinKind}, + WindowEvery: dur2m.Nanoseconds(), + Offset: dur1m.Nanoseconds(), + }), + plan.CreatePhysicalNode("group", group(flux.GroupModeBy, "host", "_start", "_stop")), + plan.CreatePhysicalNode("min", minProcedureSpec()), + }, + Edges: [][2]int{ + {0, 1}, + {1, 2}, + }, + }, + }) + + // Helper that adds a test with a simple plan that does not pass due to a + // specified bad window + simpleMinUnchanged := func(name string, window universe.WindowProcedureSpec) { + tests = append(tests, plantest.RuleTestCase{ + Context: haveCaps, + Name: name, + Rules: rules, + Before: simplePlan(window, "min", minProcedureSpec()), + After: &plantest.PlanSpec{ + Nodes: []plan.Node{ + plan.CreatePhysicalNode("ReadGroup", &influxdb.ReadGroupPhysSpec{ + ReadRangePhysSpec: readRange, + GroupMode: flux.GroupModeBy, + }), + plan.CreatePhysicalNode("window", &window), + plan.CreatePhysicalNode("min", minProcedureSpec()), + }, + Edges: [][2]int{ + {0, 1}, + {1, 2}, + }, + }, + }) + } + + // Condition not met: period not equal to every + badWindow1 := window1m + badWindow1.Window.Period = dur2m + simpleMinUnchanged("BadPeriod", badWindow1) + + // Condition not met: non-standard _time column + badWindow3 := window1m + badWindow3.TimeColumn = "_timmy" + simpleMinUnchanged("BadTime", badWindow3) + + // Condition not met: non-standard start column + badWindow4 := window1m + badWindow4.StartColumn = "_stooort" + simpleMinUnchanged("BadStart", badWindow4) + + // Condition not met: non-standard stop column + badWindow5 := window1m + badWindow5.StopColumn = "_stappp" + simpleMinUnchanged("BadStop", badWindow5) + + // Condition met: createEmpty is true. + windowCreateEmpty1m := window1m + windowCreateEmpty1m.CreateEmpty = true + tests = append(tests, plantest.RuleTestCase{ + Context: haveCaps, + Name: "CreateEmptyPassMin", + Rules: rules, + Before: simplePlan(window1mCreateEmpty, "min", minProcedureSpec()), + After: simpleResult("min", dur1m, true, + plan.CreatePhysicalNode("group", groupResult()), + plan.CreatePhysicalNode("min", minProcedureSpec()), + ), + }) + + // Condition not met: duration too long. + simpleMinUnchanged("WindowTooLarge", window1y) + + // Condition not met: neg duration. + simpleMinUnchanged("WindowNeg", windowNeg) + + // Bad min column + // ReadRange -> group -> window -> min => ReadGroup -> window -> min + badMinSpec := universe.MinProcedureSpec{ + SelectorConfig: execute.SelectorConfig{Column: "_valmoo"}, + } + tests = append(tests, plantest.RuleTestCase{ + Name: "BadMinCol", + Context: haveCaps, + Rules: rules, + Before: simplePlan(window1m, "min", &badMinSpec), + After: &plantest.PlanSpec{ + Nodes: []plan.Node{ + plan.CreatePhysicalNode("ReadGroup", &influxdb.ReadGroupPhysSpec{ + ReadRangePhysSpec: readRange, + GroupMode: flux.GroupModeBy, + }), + plan.CreatePhysicalNode("window", &window1m), + plan.CreatePhysicalNode("min", &badMinSpec), + }, + Edges: [][2]int{ + {0, 1}, + {1, 2}, + }, + }, + }) + + // Bad max column + // ReadRange -> group -> window -> max => ReadGroup -> window -> max + badMaxSpec := universe.MaxProcedureSpec{ + SelectorConfig: execute.SelectorConfig{Column: "_valmoo"}, + } + tests = append(tests, plantest.RuleTestCase{ + Name: "BadMaxCol", + Context: haveCaps, + Rules: rules, + Before: simplePlan(window1m, "max", &badMaxSpec), + After: &plantest.PlanSpec{ + Nodes: []plan.Node{ + plan.CreatePhysicalNode("ReadGroup", &influxdb.ReadGroupPhysSpec{ + ReadRangePhysSpec: readRange, + GroupMode: flux.GroupModeBy, + }), + plan.CreatePhysicalNode("window", &window1m), + plan.CreatePhysicalNode("max", &badMaxSpec), + }, + Edges: [][2]int{ + {0, 1}, + {1, 2}, + }, + }, + }) + + // No match due to a collapsed node having a successor + // ReadRange -> group -> window -> min + // \-> min + tests = append(tests, plantest.RuleTestCase{ + Name: "CollapsedWithSuccessor1", + Context: haveCaps, + Rules: rules, + Before: &plantest.PlanSpec{ + Nodes: []plan.Node{ + plan.CreateLogicalNode("ReadRange", &readRange), + plan.CreateLogicalNode("group", group(flux.GroupModeBy)), + plan.CreateLogicalNode("window", &window1m), + plan.CreateLogicalNode("min", minProcedureSpec()), + plan.CreateLogicalNode("min", minProcedureSpec()), + }, + Edges: [][2]int{ + {0, 1}, + {1, 2}, + {2, 3}, + {2, 4}, + }, + }, + After: &plantest.PlanSpec{ + Nodes: []plan.Node{ + plan.CreatePhysicalNode("ReadGroup", &influxdb.ReadGroupPhysSpec{ + ReadRangePhysSpec: readRange, + GroupMode: flux.GroupModeBy, + }), + plan.CreatePhysicalNode("window", &window1m), + plan.CreatePhysicalNode("min", minProcedureSpec()), + plan.CreatePhysicalNode("min", minProcedureSpec()), + }, + Edges: [][2]int{ + {0, 1}, + {1, 2}, + {1, 3}, + }, + }, + }) + + // No match due to a collapsed node having a successor + // ReadRange -> group -> window -> min + // \-> window + tests = append(tests, plantest.RuleTestCase{ + Name: "CollapsedWithSuccessor2", + Context: haveCaps, + Rules: rules, + Before: &plantest.PlanSpec{ + Nodes: []plan.Node{ + plan.CreateLogicalNode("ReadRange", &readRange), + plan.CreateLogicalNode("group", group(flux.GroupModeBy)), + plan.CreateLogicalNode("window", &window1m), + plan.CreateLogicalNode("min", minProcedureSpec()), + plan.CreateLogicalNode("window", &window2m), + }, + Edges: [][2]int{ + {0, 1}, + {1, 2}, + {2, 3}, + {1, 4}, + }, + }, + After: &plantest.PlanSpec{ + Nodes: []plan.Node{ + plan.CreatePhysicalNode("ReadGroup", &influxdb.ReadGroupPhysSpec{ + ReadRangePhysSpec: readRange, + GroupMode: flux.GroupModeBy, + }), + plan.CreatePhysicalNode("window", &window1m), + plan.CreatePhysicalNode("min", minProcedureSpec()), + plan.CreatePhysicalNode("window", &window2m), + }, + Edges: [][2]int{ + {0, 1}, + {1, 2}, + {0, 3}, + }, + }, + }) + + // Fail due to no capabilities present. + tests = append(tests, plantest.RuleTestCase{ + Context: noCaps, + Name: "FailNoCaps", + Rules: rules, + Before: simplePlan(window1m, "count", countProcedureSpec()), + After: &plantest.PlanSpec{ + Nodes: []plan.Node{ + plan.CreatePhysicalNode("ReadGroup", &influxdb.ReadGroupPhysSpec{ + ReadRangePhysSpec: readRange, + GroupMode: flux.GroupModeBy, + }), + plan.CreatePhysicalNode("window", &window1m), + plan.CreatePhysicalNode("count", countProcedureSpec()), + }, + Edges: [][2]int{ + {0, 1}, + {1, 2}, + }, + }, + }) + + for _, tc := range tests { + tc := tc + t.Run(tc.Name, func(t *testing.T) { + t.Parallel() + plantest.PhysicalRuleTestHelper(t, &tc) + }) + } +} + +func TestPushDownBareAggregateRule(t *testing.T) { + // Turn on support for window aggregate count + flagger := mock.NewFlagger(map[feature.Flag]interface{}{}) + + withFlagger, _ := feature.Annotate(context.Background(), flagger) + + // Construct dependencies either with or without aggregate window caps. + deps := func(have bool) influxdb.StorageDependencies { + return influxdb.StorageDependencies{ + FromDeps: influxdb.FromDependencies{ + Reader: mockReaderCaps{Have: have}, + Metrics: influxdb.NewMetrics(nil), + }, + } + } + + haveCaps := deps(true).Inject(withFlagger) + noCaps := deps(false).Inject(withFlagger) + + readRange := &influxdb.ReadRangePhysSpec{ + Bucket: "my-bucket", + Bounds: flux.Bounds{ + Start: fluxTime(5), + Stop: fluxTime(10), + }, + } + + readWindowAggregate := func(proc plan.ProcedureKind) *influxdb.ReadWindowAggregatePhysSpec { + return &influxdb.ReadWindowAggregatePhysSpec{ + ReadRangePhysSpec: *(readRange.Copy().(*influxdb.ReadRangePhysSpec)), + WindowEvery: math.MaxInt64, + Aggregates: []plan.ProcedureKind{proc}, + } + } + + testcases := []plantest.RuleTestCase{ + { + // ReadRange -> count => ReadWindowAggregate + Context: haveCaps, + Name: "push down count", + Rules: []plan.Rule{influxdb.PushDownBareAggregateRule{}}, + Before: &plantest.PlanSpec{ + Nodes: []plan.Node{ + plan.CreatePhysicalNode("ReadRange", readRange), + plan.CreatePhysicalNode("count", countProcedureSpec()), + }, + Edges: [][2]int{ + {0, 1}, + }, + }, + After: &plantest.PlanSpec{ + Nodes: []plan.Node{ + plan.CreatePhysicalNode("ReadWindowAggregate", readWindowAggregate(universe.CountKind)), + }, + }, + }, + { + // ReadRange -> sum => ReadWindowAggregate + Context: haveCaps, + Name: "push down sum", + Rules: []plan.Rule{influxdb.PushDownBareAggregateRule{}}, + Before: &plantest.PlanSpec{ + Nodes: []plan.Node{ + plan.CreatePhysicalNode("ReadRange", readRange), + plan.CreatePhysicalNode("sum", sumProcedureSpec()), + }, + Edges: [][2]int{ + {0, 1}, + }, + }, + After: &plantest.PlanSpec{ + Nodes: []plan.Node{ + plan.CreatePhysicalNode("ReadWindowAggregate", readWindowAggregate(universe.SumKind)), + }, + }, + }, + { + // ReadRange -> first => ReadWindowAggregate + Context: haveCaps, + Name: "push down first", + Rules: []plan.Rule{influxdb.PushDownBareAggregateRule{}}, + Before: &plantest.PlanSpec{ + Nodes: []plan.Node{ + plan.CreatePhysicalNode("ReadRange", readRange), + plan.CreatePhysicalNode("first", firstProcedureSpec()), + }, + Edges: [][2]int{ + {0, 1}, + }, + }, + After: &plantest.PlanSpec{ + Nodes: []plan.Node{ + plan.CreatePhysicalNode("ReadWindowAggregate", readWindowAggregate(universe.FirstKind)), + }, + }, + }, + { + // ReadRange -> last => ReadWindowAggregate + Context: haveCaps, + Name: "push down last", + Rules: []plan.Rule{influxdb.PushDownBareAggregateRule{}}, + Before: &plantest.PlanSpec{ + Nodes: []plan.Node{ + plan.CreatePhysicalNode("ReadRange", readRange), + plan.CreatePhysicalNode("last", lastProcedureSpec()), + }, + Edges: [][2]int{ + {0, 1}, + }, + }, + After: &plantest.PlanSpec{ + Nodes: []plan.Node{ + plan.CreatePhysicalNode("ReadWindowAggregate", readWindowAggregate(universe.LastKind)), + }, + }, + }, + { + // capability not provided in storage layer + Context: noCaps, + Name: "no caps", + Rules: []plan.Rule{influxdb.PushDownBareAggregateRule{}}, + Before: &plantest.PlanSpec{ + Nodes: []plan.Node{ + plan.CreatePhysicalNode("ReadRange", readRange), + plan.CreatePhysicalNode("count", countProcedureSpec()), + }, + Edges: [][2]int{ + {0, 1}, + }, + }, + NoChange: true, + }, + } + + for _, tc := range testcases { + tc := tc + t.Run(tc.Name, func(t *testing.T) { + t.Parallel() + plantest.PhysicalRuleTestHelper(t, &tc) + }) + } +} + +// +// Group Aggregate Testing +// +func TestPushDownGroupAggregateRule(t *testing.T) { + // Turn on all flags + ctx, _ := feature.Annotate(context.Background(), mock.NewFlagger(map[feature.Flag]interface{}{ + feature.PushDownGroupAggregateMinMax(): true, + })) + + caps := func(c query.GroupCapability) context.Context { + deps := influxdb.StorageDependencies{ + FromDeps: influxdb.FromDependencies{ + Reader: mockReaderCaps{ + GroupCapabilities: c, + }, + Metrics: influxdb.NewMetrics(nil), + }, + } + return deps.Inject(ctx) + } + + readGroupAgg := func(aggregateMethod string) *influxdb.ReadGroupPhysSpec { + return &influxdb.ReadGroupPhysSpec{ + ReadRangePhysSpec: influxdb.ReadRangePhysSpec{ + Bucket: "my-bucket", + Bounds: flux.Bounds{ + Start: fluxTime(5), + Stop: fluxTime(10), + }, + }, + GroupMode: flux.GroupModeBy, + GroupKeys: []string{"_measurement", "tag0", "tag1"}, + AggregateMethod: aggregateMethod, + } + } + readGroup := func() *influxdb.ReadGroupPhysSpec { + return readGroupAgg("") + } + + tests := make([]plantest.RuleTestCase, 0) + + // construct a simple plan with a specific aggregate + simplePlanWithAgg := func(agg plan.NodeID, spec plan.ProcedureSpec) *plantest.PlanSpec { + return &plantest.PlanSpec{ + Nodes: []plan.Node{ + plan.CreateLogicalNode("ReadGroup", readGroup()), + plan.CreateLogicalNode(agg, spec), + }, + Edges: [][2]int{ + {0, 1}, + }, + } + } + + minProcedureSpec := func() *universe.MinProcedureSpec { + return &universe.MinProcedureSpec{ + SelectorConfig: execute.SelectorConfig{ + Column: execute.DefaultTimeColLabel, + }, + } + } + minProcedureSpecVal := func() *universe.MinProcedureSpec { + return &universe.MinProcedureSpec{ + SelectorConfig: execute.SelectorConfig{ + Column: execute.DefaultValueColLabel, + }, + } + } + maxProcedureSpecVal := func() *universe.MaxProcedureSpec { + return &universe.MaxProcedureSpec{ + SelectorConfig: execute.SelectorConfig{ + Column: execute.DefaultValueColLabel, + }, + } + } + countProcedureSpec := func() *universe.CountProcedureSpec { + return &universe.CountProcedureSpec{ + AggregateConfig: execute.DefaultAggregateConfig, + } + } + sumProcedureSpec := func() *universe.SumProcedureSpec { + return &universe.SumProcedureSpec{ + AggregateConfig: execute.DefaultAggregateConfig, + } + } + firstProcedureSpec := func() *universe.FirstProcedureSpec { + return &universe.FirstProcedureSpec{ + SelectorConfig: execute.DefaultSelectorConfig, + } + } + lastProcedureSpec := func() *universe.LastProcedureSpec { + return &universe.LastProcedureSpec{ + SelectorConfig: execute.DefaultSelectorConfig, + } + } + + // ReadGroup() -> count => ReadGroup(count) + tests = append(tests, plantest.RuleTestCase{ + Context: caps(mockGroupCapability{count: true}), + Name: "RewriteGroupCount", + Rules: []plan.Rule{influxdb.PushDownGroupAggregateRule{}}, + Before: simplePlanWithAgg("count", countProcedureSpec()), + After: &plantest.PlanSpec{ + Nodes: []plan.Node{ + plan.CreateLogicalNode("ReadGroupAggregate", readGroupAgg("count")), + }, + }, + }) + + // ReadGroup() -> count => ReadGroup() -> count + tests = append(tests, plantest.RuleTestCase{ + Context: caps(mockGroupCapability{}), + Name: "NoCountCapability", + Rules: []plan.Rule{influxdb.PushDownGroupAggregateRule{}}, + Before: simplePlanWithAgg("count", countProcedureSpec()), + NoChange: true, + }) + + // ReadGroup() -> sum => ReadGroup(sum) + tests = append(tests, plantest.RuleTestCase{ + Context: caps(mockGroupCapability{sum: true}), + Name: "RewriteGroupSum", + Rules: []plan.Rule{influxdb.PushDownGroupAggregateRule{}}, + Before: simplePlanWithAgg("sum", sumProcedureSpec()), + After: &plantest.PlanSpec{ + Nodes: []plan.Node{ + plan.CreateLogicalNode("ReadGroupAggregate", readGroupAgg("sum")), + }, + }, + }) + + // ReadGroup() -> sum => ReadGroup() -> sum + tests = append(tests, plantest.RuleTestCase{ + Context: caps(mockGroupCapability{}), + Name: "NoSumCapability", + Rules: []plan.Rule{influxdb.PushDownGroupAggregateRule{}}, + Before: simplePlanWithAgg("sum", sumProcedureSpec()), + NoChange: true, + }) + + // ReadGroup() -> first => ReadGroup(first) + tests = append(tests, plantest.RuleTestCase{ + Context: caps(mockGroupCapability{first: true}), + Name: "RewriteGroupFirst", + Rules: []plan.Rule{influxdb.PushDownGroupAggregateRule{}}, + Before: simplePlanWithAgg("first", firstProcedureSpec()), + After: &plantest.PlanSpec{ + Nodes: []plan.Node{ + plan.CreateLogicalNode("ReadGroupAggregate", readGroupAgg("first")), + }, + }, + }) + + // ReadGroup() -> first => ReadGroup() -> first + tests = append(tests, plantest.RuleTestCase{ + Context: caps(mockGroupCapability{}), + Name: "NoFirstCapability", + Rules: []plan.Rule{influxdb.PushDownGroupAggregateRule{}}, + Before: simplePlanWithAgg("first", firstProcedureSpec()), + NoChange: true, + }) + + // ReadGroup() -> last => ReadGroup(last) + tests = append(tests, plantest.RuleTestCase{ + Context: caps(mockGroupCapability{last: true}), + Name: "RewriteGroupLast", + Rules: []plan.Rule{influxdb.PushDownGroupAggregateRule{}}, + Before: simplePlanWithAgg("last", lastProcedureSpec()), + After: &plantest.PlanSpec{ + Nodes: []plan.Node{ + plan.CreateLogicalNode("ReadGroupAggregate", readGroupAgg("last")), + }, + }, + }) + + // ReadGroup() -> last => ReadGroup() -> last + tests = append(tests, plantest.RuleTestCase{ + Context: caps(mockGroupCapability{}), + Name: "NoLastCapability", + Rules: []plan.Rule{influxdb.PushDownGroupAggregateRule{}}, + Before: simplePlanWithAgg("last", lastProcedureSpec()), + NoChange: true, + }) + + // ReadGroup() -> max => ReadGroup(max) + tests = append(tests, plantest.RuleTestCase{ + Context: caps(mockGroupCapability{max: true}), + Name: "RewriteGroupMax", + Rules: []plan.Rule{influxdb.PushDownGroupAggregateRule{}}, + Before: simplePlanWithAgg("max", maxProcedureSpecVal()), + After: &plantest.PlanSpec{ + Nodes: []plan.Node{ + plan.CreateLogicalNode("ReadGroupAggregate", readGroupAgg("max")), + }, + }, + }) + + // ReadGroup() -> max => ReadGroup() -> max + tests = append(tests, plantest.RuleTestCase{ + Context: caps(mockGroupCapability{}), + Name: "NoMaxCapability", + Rules: []plan.Rule{influxdb.PushDownGroupAggregateRule{}}, + Before: simplePlanWithAgg("max", maxProcedureSpecVal()), + NoChange: true, + }) + + // ReadGroup() -> min => ReadGroup(min) + tests = append(tests, plantest.RuleTestCase{ + Context: caps(mockGroupCapability{min: true}), + Name: "RewriteGroupMin", + Rules: []plan.Rule{influxdb.PushDownGroupAggregateRule{}}, + Before: simplePlanWithAgg("min", minProcedureSpecVal()), + After: &plantest.PlanSpec{ + Nodes: []plan.Node{ + plan.CreateLogicalNode("ReadGroupAggregate", readGroupAgg("min")), + }, + }, + }) + + // ReadGroup() -> min => ReadGroup() -> min + tests = append(tests, plantest.RuleTestCase{ + Context: caps(mockGroupCapability{}), + Name: "NoMinCapability", + Rules: []plan.Rule{influxdb.PushDownGroupAggregateRule{}}, + Before: simplePlanWithAgg("min", minProcedureSpecVal()), + NoChange: true, + }) + + // Rewrite with successors + // ReadGroup() -> count -> sum {2} => ReadGroup(count) -> sum {2} + tests = append(tests, plantest.RuleTestCase{ + Context: caps(mockGroupCapability{count: true}), + Name: "WithSuccessor1", + Rules: []plan.Rule{influxdb.PushDownGroupAggregateRule{}}, + Before: &plantest.PlanSpec{ + Nodes: []plan.Node{ + plan.CreateLogicalNode("ReadGroup", readGroup()), + plan.CreateLogicalNode("count", countProcedureSpec()), + plan.CreateLogicalNode("sum", sumProcedureSpec()), + plan.CreateLogicalNode("sum", sumProcedureSpec()), + }, + Edges: [][2]int{ + {0, 1}, + {1, 2}, + {1, 3}, + }, + }, + After: &plantest.PlanSpec{ + Nodes: []plan.Node{ + plan.CreatePhysicalNode("ReadGroupAggregate", readGroupAgg("count")), + plan.CreateLogicalNode("sum", sumProcedureSpec()), + plan.CreateLogicalNode("sum", sumProcedureSpec()), + }, + Edges: [][2]int{ + {0, 1}, + {1, 2}, + }, + }, + }) + + // Cannot replace a ReadGroup that already has an aggregate. This exercises + // the check that ReadGroup aggregate is not set. + // ReadGroup() -> count -> count => ReadGroup(count) -> count + tests = append(tests, plantest.RuleTestCase{ + Context: caps(mockGroupCapability{count: true}), + Name: "WithSuccessor2", + Rules: []plan.Rule{influxdb.PushDownGroupAggregateRule{}}, + Before: &plantest.PlanSpec{ + Nodes: []plan.Node{ + plan.CreateLogicalNode("ReadGroup", readGroup()), + plan.CreateLogicalNode("count", countProcedureSpec()), + plan.CreateLogicalNode("count", countProcedureSpec()), + }, + Edges: [][2]int{ + {0, 1}, + {1, 2}, + }, + }, + After: &plantest.PlanSpec{ + Nodes: []plan.Node{ + plan.CreatePhysicalNode("ReadGroupAggregate", readGroupAgg("count")), + plan.CreateLogicalNode("count", countProcedureSpec()), + }, + Edges: [][2]int{ + {0, 1}, + }, + }, + }) + + // Bad count column + // ReadGroup -> count => NO-CHANGE + tests = append(tests, plantest.RuleTestCase{ + Name: "BadCountCol", + Context: caps(mockGroupCapability{count: true}), + Rules: []plan.Rule{influxdb.PushDownGroupAggregateRule{}}, + Before: simplePlanWithAgg("count", &universe.CountProcedureSpec{ + AggregateConfig: execute.AggregateConfig{Columns: []string{"_valmoo"}}, + }), + NoChange: true, + }) + + // No match due to a collapsed node having a successor + // ReadGroup -> count + // \-> min + tests = append(tests, plantest.RuleTestCase{ + Name: "CollapsedWithSuccessor", + Context: caps(mockGroupCapability{count: true}), + Rules: []plan.Rule{influxdb.PushDownGroupAggregateRule{}}, + Before: &plantest.PlanSpec{ + Nodes: []plan.Node{ + plan.CreateLogicalNode("ReadGroup", readGroup()), + plan.CreateLogicalNode("count", countProcedureSpec()), + plan.CreateLogicalNode("min", minProcedureSpec()), + }, + Edges: [][2]int{ + {0, 1}, + {0, 2}, + }, + }, + NoChange: true, + }) + + // No pattern match + // ReadGroup -> filter -> min -> NO-CHANGE + pushableFn1 := executetest.FunctionExpression(t, `(r) => true`) + + makeResolvedFilterFn := func(expr *semantic.FunctionExpression) interpreter.ResolvedFunction { + return interpreter.ResolvedFunction{ + Scope: nil, + Fn: expr, + } + } + noPatternMatch1 := func() *plantest.PlanSpec { + return &plantest.PlanSpec{ + Nodes: []plan.Node{ + plan.CreateLogicalNode("ReadGroup", readGroup()), + plan.CreatePhysicalNode("filter", &universe.FilterProcedureSpec{ + Fn: makeResolvedFilterFn(pushableFn1), + }), + plan.CreateLogicalNode("count", countProcedureSpec()), + }, + Edges: [][2]int{ + {0, 1}, + {1, 2}, + }, + } + } + tests = append(tests, plantest.RuleTestCase{ + Name: "NoPatternMatch", + Context: caps(mockGroupCapability{count: true}), + Rules: []plan.Rule{influxdb.PushDownWindowAggregateRule{}}, + Before: noPatternMatch1(), + NoChange: true, + }) + + for _, tc := range tests { + tc := tc + t.Run(tc.Name, func(t *testing.T) { + t.Parallel() + plantest.PhysicalRuleTestHelper(t, &tc) + }) + } +} + +func TestSwitchFillImplRule(t *testing.T) { + flagger := mock.NewFlagger(map[feature.Flag]interface{}{ + feature.MemoryOptimizedFill(): true, + }) + withFlagger, _ := feature.Annotate(context.Background(), flagger) + readRange := &influxdb.ReadRangePhysSpec{ + Bucket: "my-bucket", + Bounds: flux.Bounds{ + Start: fluxTime(5), + Stop: fluxTime(10), + }, + } + sourceSpec := &universe.DualImplProcedureSpec{ + ProcedureSpec: &universe.FillProcedureSpec{ + DefaultCost: plan.DefaultCost{}, + Column: "_value", + Value: values.NewFloat(0), + UsePrevious: false, + }, + UseDeprecated: false, + } + targetSpec := sourceSpec.Copy().(*universe.DualImplProcedureSpec) + universe.UseDeprecatedImpl(targetSpec) + + testcases := []plantest.RuleTestCase{ + { + Context: withFlagger, + Name: "enable memory optimized fill", + Rules: []plan.Rule{influxdb.SwitchFillImplRule{}}, + Before: &plantest.PlanSpec{ + Nodes: []plan.Node{ + plan.CreatePhysicalNode("ReadRange", readRange), + plan.CreatePhysicalNode("fill", sourceSpec), + }, + Edges: [][2]int{ + {0, 1}, + }, + }, + NoChange: true, + }, + { + Context: context.Background(), + Name: "disable memory optimized fill", + Rules: []plan.Rule{influxdb.SwitchFillImplRule{}}, + Before: &plantest.PlanSpec{ + Nodes: []plan.Node{ + plan.CreatePhysicalNode("ReadRange", readRange), + plan.CreatePhysicalNode("fill", sourceSpec), + }, + Edges: [][2]int{ + {0, 1}, + }, + }, + After: &plantest.PlanSpec{ + Nodes: []plan.Node{ + plan.CreatePhysicalNode("ReadRange", readRange), + plan.CreatePhysicalNode("fill", targetSpec), + }, + Edges: [][2]int{ + {0, 1}, + }, + }, + }, + } + + for _, tc := range testcases { + tc := tc + t.Run(tc.Name, func(t *testing.T) { + t.Parallel() + plantest.PhysicalRuleTestHelper(t, &tc) + }) + } +} + +func TestMergeFilterRule(t *testing.T) { + flaggerOn := mock.NewFlagger(map[feature.Flag]interface{}{ + feature.MergedFiltersRule(): true, + }) + flaggerOff := mock.NewFlagger(map[feature.Flag]interface{}{ + feature.MergedFiltersRule(): false, + }) + + withFlagger, _ := feature.Annotate(context.Background(), flaggerOn) + withOutFlagger, _ := feature.Annotate(context.Background(), flaggerOff) + + from := &fluxinfluxdb.FromProcedureSpec{} + filter0 := func() *universe.FilterProcedureSpec { + return &universe.FilterProcedureSpec{ + Fn: interpreter.ResolvedFunction{ + Fn: executetest.FunctionExpression(t, `(r) => r._field == "usage_idle"`), + }, + } + } + filter1 := func() *universe.FilterProcedureSpec { + return &universe.FilterProcedureSpec{ + Fn: interpreter.ResolvedFunction{ + Fn: executetest.FunctionExpression(t, `(r) => r._measurement == "cpu"`), + }, + } + } + filterMerge := func() *universe.FilterProcedureSpec { + return &universe.FilterProcedureSpec{ + Fn: interpreter.ResolvedFunction{ + Fn: executetest.FunctionExpression(t, `(r) => r._measurement == "cpu" and r._field == "usage_idle"`), + }, + } + } + + testcases := []plantest.RuleTestCase{ + { + Context: withFlagger, + Name: "merge filter on", + Rules: []plan.Rule{influxdb.MergeFiltersRule{}}, + Before: &plantest.PlanSpec{ + Nodes: []plan.Node{ + plan.CreatePhysicalNode("from", from), + plan.CreatePhysicalNode("filter0", filter0()), + plan.CreatePhysicalNode("filter1", filter1()), + }, + Edges: [][2]int{ + {0, 1}, + {1, 2}, + }, + }, + After: &plantest.PlanSpec{ + Nodes: []plan.Node{ + plan.CreatePhysicalNode("from", from), + plan.CreatePhysicalNode("filter0", filterMerge()), + }, + Edges: [][2]int{{0, 1}}, + }, + }, + { + Context: withOutFlagger, + Name: "merge filter off", + Rules: []plan.Rule{influxdb.MergeFiltersRule{}}, + Before: &plantest.PlanSpec{ + Nodes: []plan.Node{ + plan.CreatePhysicalNode("from", from), + plan.CreatePhysicalNode("filter0", filter0()), + plan.CreatePhysicalNode("filter1", filter1()), + }, + Edges: [][2]int{ + {0, 1}, + {1, 2}, + }, + }, + NoChange: true, + }, + } + for _, tc := range testcases { + tc := tc + t.Run(tc.Name, func(t *testing.T) { + t.Parallel() + plantest.LogicalRuleTestHelper(t, &tc) + }) + } +} diff --git a/query/stdlib/influxdata/influxdb/source.go b/query/stdlib/influxdata/influxdb/source.go index ab1d501f12..87141898a1 100644 --- a/query/stdlib/influxdata/influxdb/source.go +++ b/query/stdlib/influxdata/influxdb/source.go @@ -9,8 +9,8 @@ import ( "github.com/influxdata/flux/codes" "github.com/influxdata/flux/execute" "github.com/influxdata/flux/memory" + "github.com/influxdata/flux/metadata" "github.com/influxdata/flux/plan" - "github.com/influxdata/flux/semantic" platform "github.com/influxdata/influxdb/v2" "github.com/influxdata/influxdb/v2/kit/tracing" "github.com/influxdata/influxdb/v2/query" @@ -20,6 +20,7 @@ import ( func init() { execute.RegisterSource(ReadRangePhysKind, createReadFilterSource) execute.RegisterSource(ReadGroupPhysKind, createReadGroupSource) + execute.RegisterSource(ReadWindowAggregatePhysKind, createReadWindowAggregateSource) execute.RegisterSource(ReadTagKeysPhysKind, createReadTagKeysSource) execute.RegisterSource(ReadTagValuesPhysKind, createReadTagValuesSource) } @@ -46,7 +47,7 @@ func (s *Source) Run(ctx context.Context) { labelValues := s.m.getLabelValues(ctx, s.orgID, s.op) start := time.Now() var err error - if flux.IsExperimentalTracingEnabled() { + if flux.IsExperimentalTracingEnabled(ctx) { span, ctxWithSpan := tracing.StartSpanFromContextWithOperationName(ctx, "source-"+s.op) err = s.runner.run(ctxWithSpan) span.Finish() @@ -63,14 +64,14 @@ func (s *Source) AddTransformation(t execute.Transformation) { s.ts = append(s.ts, t) } -func (s *Source) Metadata() flux.Metadata { - return flux.Metadata{ +func (s *Source) Metadata() metadata.Metadata { + return metadata.Metadata{ "influxdb/scanned-bytes": []interface{}{s.stats.ScannedBytes}, "influxdb/scanned-values": []interface{}{s.stats.ScannedValues}, } } -func (s *Source) processTables(ctx context.Context, tables TableIterator, watermark execute.Time) error { +func (s *Source) processTables(ctx context.Context, tables query.TableIterator, watermark execute.Time) error { err := tables.Do(func(tbl flux.Table) error { return s.processTable(ctx, tbl) }) @@ -117,11 +118,11 @@ func (s *Source) processTable(ctx context.Context, tbl flux.Table) error { type readFilterSource struct { Source - reader Reader - readSpec ReadFilterSpec + reader query.StorageReader + readSpec query.ReadFilterSpec } -func ReadFilterSource(id execute.DatasetID, r Reader, readSpec ReadFilterSpec, a execute.Administration) execute.Source { +func ReadFilterSource(id execute.DatasetID, r query.StorageReader, readSpec query.ReadFilterSpec, a execute.Administration) execute.Source { src := new(readFilterSource) src.id = id @@ -181,18 +182,14 @@ func createReadFilterSource(s plan.ProcedureSpec, id execute.DatasetID, a execut return nil, err } - var filter *semantic.FunctionExpression - if spec.FilterSet { - filter = spec.Filter - } return ReadFilterSource( id, deps.Reader, - ReadFilterSpec{ + query.ReadFilterSpec{ OrganizationID: orgID, BucketID: bucketID, Bounds: *bounds, - Predicate: filter, + Predicate: spec.Filter, }, a, ), nil @@ -200,11 +197,11 @@ func createReadFilterSource(s plan.ProcedureSpec, id execute.DatasetID, a execut type readGroupSource struct { Source - reader Reader - readSpec ReadGroupSpec + reader query.StorageReader + readSpec query.ReadGroupSpec } -func ReadGroupSource(id execute.DatasetID, r Reader, readSpec ReadGroupSpec, a execute.Administration) execute.Source { +func ReadGroupSource(id execute.DatasetID, r query.StorageReader, readSpec query.ReadGroupSpec, a execute.Administration) execute.Source { src := new(readGroupSource) src.id = id @@ -215,7 +212,7 @@ func ReadGroupSource(id execute.DatasetID, r Reader, readSpec ReadGroupSpec, a e src.m = GetStorageDependencies(a.Context()).FromDeps.Metrics src.orgID = readSpec.OrganizationID - src.op = "readGroup" + src.op = readSpec.Name() src.runner = src return src @@ -258,21 +255,17 @@ func createReadGroupSource(s plan.ProcedureSpec, id execute.DatasetID, a execute return nil, err } - var filter *semantic.FunctionExpression - if spec.FilterSet { - filter = spec.Filter - } return ReadGroupSource( id, deps.Reader, - ReadGroupSpec{ - ReadFilterSpec: ReadFilterSpec{ + query.ReadGroupSpec{ + ReadFilterSpec: query.ReadFilterSpec{ OrganizationID: orgID, BucketID: bucketID, Bounds: *bounds, - Predicate: filter, + Predicate: spec.Filter, }, - GroupMode: ToGroupMode(spec.GroupMode), + GroupMode: query.ToGroupMode(spec.GroupMode), GroupKeys: spec.GroupKeys, AggregateMethod: spec.AggregateMethod, }, @@ -280,6 +273,93 @@ func createReadGroupSource(s plan.ProcedureSpec, id execute.DatasetID, a execute ), nil } +type readWindowAggregateSource struct { + Source + reader query.WindowAggregateReader + readSpec query.ReadWindowAggregateSpec +} + +func ReadWindowAggregateSource(id execute.DatasetID, r query.WindowAggregateReader, readSpec query.ReadWindowAggregateSpec, a execute.Administration) execute.Source { + src := new(readWindowAggregateSource) + + src.id = id + src.alloc = a.Allocator() + + src.reader = r + src.readSpec = readSpec + + src.m = GetStorageDependencies(a.Context()).FromDeps.Metrics + src.orgID = readSpec.OrganizationID + src.op = readSpec.Name() + + src.runner = src + return src +} + +func (s *readWindowAggregateSource) run(ctx context.Context) error { + stop := s.readSpec.Bounds.Stop + tables, err := s.reader.ReadWindowAggregate( + ctx, + s.readSpec, + s.alloc, + ) + if err != nil { + return err + } + return s.processTables(ctx, tables, stop) +} + +func createReadWindowAggregateSource(s plan.ProcedureSpec, id execute.DatasetID, a execute.Administration) (execute.Source, error) { + span, ctx := tracing.StartSpanFromContext(a.Context()) + defer span.Finish() + + spec := s.(*ReadWindowAggregatePhysSpec) + + bounds := a.StreamContext().Bounds() + if bounds == nil { + return nil, &flux.Error{ + Code: codes.Internal, + Msg: "nil bounds passed to from", + } + } + + deps := GetStorageDependencies(a.Context()).FromDeps + reader := deps.Reader.(query.WindowAggregateReader) + + req := query.RequestFromContext(a.Context()) + if req == nil { + return nil, &flux.Error{ + Code: codes.Internal, + Msg: "missing request on context", + } + } + + orgID := req.OrganizationID + bucketID, err := spec.LookupBucketID(ctx, orgID, deps.BucketLookup) + if err != nil { + return nil, err + } + + return ReadWindowAggregateSource( + id, + reader, + query.ReadWindowAggregateSpec{ + ReadFilterSpec: query.ReadFilterSpec{ + OrganizationID: orgID, + BucketID: bucketID, + Bounds: *bounds, + Predicate: spec.Filter, + }, + WindowEvery: spec.WindowEvery, + Offset: spec.Offset, + Aggregates: spec.Aggregates, + CreateEmpty: spec.CreateEmpty, + TimeColumn: spec.TimeColumn, + }, + a, + ), nil +} + func createReadTagKeysSource(prSpec plan.ProcedureSpec, dsid execute.DatasetID, a execute.Administration) (execute.Source, error) { span, ctx := tracing.StartSpanFromContext(a.Context()) defer span.Finish() @@ -297,21 +377,16 @@ func createReadTagKeysSource(prSpec plan.ProcedureSpec, dsid execute.DatasetID, return nil, err } - var filter *semantic.FunctionExpression - if spec.FilterSet { - filter = spec.Filter - } - bounds := a.StreamContext().Bounds() return ReadTagKeysSource( dsid, deps.Reader, - ReadTagKeysSpec{ - ReadFilterSpec: ReadFilterSpec{ + query.ReadTagKeysSpec{ + ReadFilterSpec: query.ReadFilterSpec{ OrganizationID: orgID, BucketID: bucketID, Bounds: *bounds, - Predicate: filter, + Predicate: spec.Filter, }, }, a, @@ -321,11 +396,11 @@ func createReadTagKeysSource(prSpec plan.ProcedureSpec, dsid execute.DatasetID, type readTagKeysSource struct { Source - reader Reader - readSpec ReadTagKeysSpec + reader query.StorageReader + readSpec query.ReadTagKeysSpec } -func ReadTagKeysSource(id execute.DatasetID, r Reader, readSpec ReadTagKeysSpec, a execute.Administration) execute.Source { +func ReadTagKeysSource(id execute.DatasetID, r query.StorageReader, readSpec query.ReadTagKeysSpec, a execute.Administration) execute.Source { src := &readTagKeysSource{ reader: r, readSpec: readSpec, @@ -366,21 +441,16 @@ func createReadTagValuesSource(prSpec plan.ProcedureSpec, dsid execute.DatasetID return nil, err } - var filter *semantic.FunctionExpression - if spec.FilterSet { - filter = spec.Filter - } - bounds := a.StreamContext().Bounds() return ReadTagValuesSource( dsid, deps.Reader, - ReadTagValuesSpec{ - ReadFilterSpec: ReadFilterSpec{ + query.ReadTagValuesSpec{ + ReadFilterSpec: query.ReadFilterSpec{ OrganizationID: orgID, BucketID: bucketID, Bounds: *bounds, - Predicate: filter, + Predicate: spec.Filter, }, TagKey: spec.TagKey, }, @@ -391,11 +461,11 @@ func createReadTagValuesSource(prSpec plan.ProcedureSpec, dsid execute.DatasetID type readTagValuesSource struct { Source - reader Reader - readSpec ReadTagValuesSpec + reader query.StorageReader + readSpec query.ReadTagValuesSpec } -func ReadTagValuesSource(id execute.DatasetID, r Reader, readSpec ReadTagValuesSpec, a execute.Administration) execute.Source { +func ReadTagValuesSource(id execute.DatasetID, r query.StorageReader, readSpec query.ReadTagValuesSpec, a execute.Administration) execute.Source { src := &readTagValuesSource{ reader: r, readSpec: readSpec, diff --git a/query/stdlib/influxdata/influxdb/source_internal_test.go b/query/stdlib/influxdata/influxdb/source_internal_test.go new file mode 100644 index 0000000000..a8afefe465 --- /dev/null +++ b/query/stdlib/influxdata/influxdb/source_internal_test.go @@ -0,0 +1,10 @@ +package influxdb + +import ( + "github.com/influxdata/flux/execute" + "github.com/influxdata/flux/plan" +) + +func CreateReadWindowAggregateSource(s plan.ProcedureSpec, id execute.DatasetID, a execute.Administration) (execute.Source, error) { + return createReadWindowAggregateSource(s, id, a) +} diff --git a/query/stdlib/influxdata/influxdb/source_test.go b/query/stdlib/influxdata/influxdb/source_test.go index 6304f03024..71776aac9c 100644 --- a/query/stdlib/influxdata/influxdb/source_test.go +++ b/query/stdlib/influxdata/influxdb/source_test.go @@ -5,13 +5,18 @@ import ( "testing" "time" + "github.com/google/go-cmp/cmp" "github.com/influxdata/flux" "github.com/influxdata/flux/dependencies/dependenciestest" "github.com/influxdata/flux/execute" + "github.com/influxdata/flux/execute/executetest" "github.com/influxdata/flux/memory" + "github.com/influxdata/flux/plan" + "github.com/influxdata/flux/stdlib/universe" platform "github.com/influxdata/influxdb/v2" "github.com/influxdata/influxdb/v2/kit/prom/promtest" "github.com/influxdata/influxdb/v2/mock" + "github.com/influxdata/influxdb/v2/query" "github.com/influxdata/influxdb/v2/query/stdlib/influxdata/influxdb" "github.com/influxdata/influxdb/v2/tsdb/cursors" "github.com/influxdata/influxdb/v2/uuid" @@ -32,19 +37,19 @@ func (mockTableIterator) Statistics() cursors.CursorStats { type mockReader struct { } -func (mockReader) ReadFilter(ctx context.Context, spec influxdb.ReadFilterSpec, alloc *memory.Allocator) (influxdb.TableIterator, error) { +func (mockReader) ReadFilter(ctx context.Context, spec query.ReadFilterSpec, alloc *memory.Allocator) (query.TableIterator, error) { return &mockTableIterator{}, nil } -func (mockReader) ReadGroup(ctx context.Context, spec influxdb.ReadGroupSpec, alloc *memory.Allocator) (influxdb.TableIterator, error) { +func (mockReader) ReadGroup(ctx context.Context, spec query.ReadGroupSpec, alloc *memory.Allocator) (query.TableIterator, error) { return &mockTableIterator{}, nil } -func (mockReader) ReadTagKeys(ctx context.Context, spec influxdb.ReadTagKeysSpec, alloc *memory.Allocator) (influxdb.TableIterator, error) { +func (mockReader) ReadTagKeys(ctx context.Context, spec query.ReadTagKeysSpec, alloc *memory.Allocator) (query.TableIterator, error) { return &mockTableIterator{}, nil } -func (mockReader) ReadTagValues(ctx context.Context, spec influxdb.ReadTagValuesSpec, alloc *memory.Allocator) (influxdb.TableIterator, error) { +func (mockReader) ReadTagValues(ctx context.Context, spec query.ReadTagValuesSpec, alloc *memory.Allocator) (query.TableIterator, error) { return &mockTableIterator{}, nil } @@ -52,7 +57,8 @@ func (mockReader) Close() { } type mockAdministration struct { - Ctx context.Context + Ctx context.Context + StreamBounds *execute.Bounds } func (a mockAdministration) Context() context.Context { @@ -63,8 +69,12 @@ func (mockAdministration) ResolveTime(qt flux.Time) execute.Time { return 0 } -func (mockAdministration) StreamContext() execute.StreamContext { - return nil +func (a mockAdministration) StreamContext() execute.StreamContext { + return a +} + +func (a mockAdministration) Bounds() *execute.Bounds { + return a.StreamBounds } func (mockAdministration) Allocator() *memory.Allocator { @@ -110,7 +120,7 @@ func TestMetrics(t *testing.T) { rfs := influxdb.ReadFilterSource( execute.DatasetID(uuid.FromTime(time.Now())), &mockReader{}, - influxdb.ReadFilterSpec{ + query.ReadFilterSpec{ OrganizationID: *orgID, }, a, @@ -129,3 +139,150 @@ func TestMetrics(t *testing.T) { t.Fatalf("expected sample count of %v, got %v", want, got) } } + +type TableIterator struct { + Tables []*executetest.Table +} + +func (t *TableIterator) Do(f func(flux.Table) error) error { + for _, table := range t.Tables { + if err := f(table); err != nil { + return err + } + } + return nil +} + +func (t *TableIterator) Statistics() cursors.CursorStats { + return cursors.CursorStats{} +} + +func TestReadWindowAggregateSource(t *testing.T) { + t.Skip("test panics in CI; issue: https://github.com/influxdata/influxdb/issues/17847") + + orgID, bucketID := platform.ID(1), platform.ID(2) + executetest.RunSourceHelper(t, + []*executetest.Table{ + { + ColMeta: []flux.ColMeta{ + {Label: "_time", Type: flux.TTime}, + {Label: "_measurement", Type: flux.TString}, + {Label: "_field", Type: flux.TString}, + {Label: "host", Type: flux.TString}, + {Label: "_value", Type: flux.TFloat}, + }, + KeyCols: []string{"_measurement", "_field", "host"}, + Data: [][]interface{}{ + {execute.Time(0), "cpu", "usage_user", "server01", 2.0}, + {execute.Time(10), "cpu", "usage_user", "server01", 1.5}, + {execute.Time(20), "cpu", "usage_user", "server01", 5.0}, + }, + }, + { + ColMeta: []flux.ColMeta{ + {Label: "_time", Type: flux.TTime}, + {Label: "_measurement", Type: flux.TString}, + {Label: "_field", Type: flux.TString}, + {Label: "host", Type: flux.TString}, + {Label: "_value", Type: flux.TFloat}, + }, + KeyCols: []string{"_measurement", "_field", "host"}, + Data: [][]interface{}{ + {execute.Time(0), "cpu", "usage_system", "server01", 8.0}, + {execute.Time(10), "cpu", "usage_system", "server01", 3.0}, + {execute.Time(20), "cpu", "usage_system", "server01", 6.0}, + }, + }, + }, + nil, + func(id execute.DatasetID) execute.Source { + pspec := &influxdb.ReadWindowAggregatePhysSpec{ + ReadRangePhysSpec: influxdb.ReadRangePhysSpec{ + BucketID: bucketID.String(), + }, + WindowEvery: 10, + Aggregates: []plan.ProcedureKind{ + universe.SumKind, + }, + } + reader := &mock.WindowAggregateStoreReader{ + ReadWindowAggregateFn: func(ctx context.Context, spec query.ReadWindowAggregateSpec, alloc *memory.Allocator) (query.TableIterator, error) { + if want, got := orgID, spec.OrganizationID; want != got { + t.Errorf("unexpected organization id -want/+got:\n\t- %s\n\t+ %s", want, got) + } + if want, got := bucketID, spec.BucketID; want != got { + t.Errorf("unexpected bucket id -want/+got:\n\t- %s\n\t+ %s", want, got) + } + if want, got := (execute.Bounds{Start: 0, Stop: 30}), spec.Bounds; want != got { + t.Errorf("unexpected bounds -want/+got:\n%s", cmp.Diff(want, got)) + } + if want, got := int64(10), spec.WindowEvery; want != got { + t.Errorf("unexpected window every value -want/+got:\n\t- %d\n\t+ %d", want, got) + } + if want, got := []plan.ProcedureKind{universe.SumKind}, spec.Aggregates; !cmp.Equal(want, got) { + t.Errorf("unexpected aggregates -want/+got:\n%s", cmp.Diff(want, got)) + } + return &TableIterator{ + Tables: []*executetest.Table{ + { + ColMeta: []flux.ColMeta{ + {Label: "_time", Type: flux.TTime}, + {Label: "_measurement", Type: flux.TString}, + {Label: "_field", Type: flux.TString}, + {Label: "host", Type: flux.TString}, + {Label: "_value", Type: flux.TFloat}, + }, + KeyCols: []string{"_measurement", "_field", "host"}, + Data: [][]interface{}{ + {execute.Time(0), "cpu", "usage_user", "server01", 2.0}, + {execute.Time(10), "cpu", "usage_user", "server01", 1.5}, + {execute.Time(20), "cpu", "usage_user", "server01", 5.0}, + }, + }, + { + ColMeta: []flux.ColMeta{ + {Label: "_time", Type: flux.TTime}, + {Label: "_measurement", Type: flux.TString}, + {Label: "_field", Type: flux.TString}, + {Label: "host", Type: flux.TString}, + {Label: "_value", Type: flux.TFloat}, + }, + KeyCols: []string{"_measurement", "_field", "host"}, + Data: [][]interface{}{ + {execute.Time(0), "cpu", "usage_system", "server01", 8.0}, + {execute.Time(10), "cpu", "usage_system", "server01", 3.0}, + {execute.Time(20), "cpu", "usage_system", "server01", 6.0}, + }, + }, + }, + }, nil + }, + } + + metrics := influxdb.NewMetrics(nil) + deps := influxdb.StorageDependencies{ + FromDeps: influxdb.FromDependencies{ + Reader: reader, + Metrics: metrics, + }, + } + ctx := deps.Inject(context.Background()) + ctx = query.ContextWithRequest(ctx, &query.Request{ + OrganizationID: orgID, + }) + a := mockAdministration{ + Ctx: ctx, + StreamBounds: &execute.Bounds{ + Start: execute.Time(0), + Stop: execute.Time(30), + }, + } + + s, err := influxdb.CreateReadWindowAggregateSource(pspec, id, a) + if err != nil { + t.Fatal(err) + } + return s + }, + ) +} diff --git a/query/stdlib/influxdata/influxdb/storage.go b/query/stdlib/influxdata/influxdb/storage.go index 03d8d43cc9..07d59ec580 100644 --- a/query/stdlib/influxdata/influxdb/storage.go +++ b/query/stdlib/influxdata/influxdb/storage.go @@ -2,15 +2,10 @@ package influxdb import ( "context" - "fmt" - "github.com/influxdata/flux" - "github.com/influxdata/flux/execute" - "github.com/influxdata/flux/memory" - "github.com/influxdata/flux/semantic" platform "github.com/influxdata/influxdb/v2" "github.com/influxdata/influxdb/v2/kit/prom" - "github.com/influxdata/influxdb/v2/tsdb/cursors" + "github.com/influxdata/influxdb/v2/query" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" ) @@ -22,16 +17,14 @@ type HostLookup interface { type BucketLookup interface { Lookup(ctx context.Context, orgID platform.ID, name string) (platform.ID, bool) - LookupName(ctx context.Context, orgID platform.ID, id platform.ID) string } type OrganizationLookup interface { Lookup(ctx context.Context, name string) (platform.ID, bool) - LookupName(ctx context.Context, id platform.ID) string } type FromDependencies struct { - Reader Reader + Reader query.StorageReader BucketLookup BucketLookup OrganizationLookup OrganizationLookup Metrics *metrics @@ -79,83 +72,3 @@ func (l StaticLookup) Watch() <-chan struct{} { // A nil channel always blocks, since hosts never change this is appropriate. return nil } - -type GroupMode int - -const ( - // GroupModeNone merges all series into a single group. - GroupModeNone GroupMode = iota - // GroupModeBy produces a table for each unique value of the specified GroupKeys. - GroupModeBy -) - -// ToGroupMode accepts the group mode from Flux and produces the appropriate storage group mode. -func ToGroupMode(fluxMode flux.GroupMode) GroupMode { - switch fluxMode { - case flux.GroupModeNone: - return GroupModeNone - case flux.GroupModeBy: - return GroupModeBy - default: - panic(fmt.Sprint("unknown group mode: ", fluxMode)) - } -} - -type ReadFilterSpec struct { - OrganizationID platform.ID - BucketID platform.ID - Database string - RetentionPolicy string - - Bounds execute.Bounds - - Predicate *semantic.FunctionExpression -} - -type ReadGroupSpec struct { - ReadFilterSpec - - GroupMode GroupMode - GroupKeys []string - - AggregateMethod string -} - -type ReadTagKeysSpec struct { - ReadFilterSpec -} - -type ReadTagValuesSpec struct { - ReadFilterSpec - TagKey string -} - -type Reader interface { - ReadFilter(ctx context.Context, spec ReadFilterSpec, alloc *memory.Allocator) (TableIterator, error) - ReadGroup(ctx context.Context, spec ReadGroupSpec, alloc *memory.Allocator) (TableIterator, error) - - ReadTagKeys(ctx context.Context, spec ReadTagKeysSpec, alloc *memory.Allocator) (TableIterator, error) - ReadTagValues(ctx context.Context, spec ReadTagValuesSpec, alloc *memory.Allocator) (TableIterator, error) - - Close() -} - -// TableIterator is a table iterator that also keeps track of cursor statistics from the storage engine. -type TableIterator interface { - flux.TableIterator - Statistics() cursors.CursorStats -} - -type ReadWindowAggregateSpec struct { - ReadFilterSpec - // TODO(issue #17784): add attributes for the window aggregate spec. -} - -// WindowAggregateReader implements the WindowAggregate capability. -type WindowAggregateReader interface { - // HasWindowAggregateCapability will test if this Reader source supports the ReadWindowAggregate capability. - HasWindowAggregateCapability(ctx context.Context) bool - - // ReadWindowAggregate will read a table using the WindowAggregate method. - ReadWindowAggregate(ctx context.Context, spec ReadWindowAggregateSpec, alloc *memory.Allocator) (TableIterator, error) -} diff --git a/storage/flux/predicate.go b/query/stdlib/influxdata/influxdb/storage_predicate.go similarity index 80% rename from storage/flux/predicate.go rename to query/stdlib/influxdata/influxdb/storage_predicate.go index f6dd6e3159..a5e6cd282d 100644 --- a/storage/flux/predicate.go +++ b/query/stdlib/influxdata/influxdb/storage_predicate.go @@ -1,4 +1,4 @@ -package storageflux +package influxdb import ( "fmt" @@ -10,12 +10,10 @@ import ( "github.com/pkg/errors" ) -func toStoragePredicate(f *semantic.FunctionExpression) (*datatypes.Predicate, error) { - if f.Block.Parameters == nil || len(f.Block.Parameters.List) != 1 { - return nil, errors.New("storage predicate functions must have exactly one parameter") - } - - root, err := toStoragePredicateHelper(f.Block.Body.(semantic.Expression), f.Block.Parameters.List[0].Key.Name) +// ToStoragePredicate will convert a FunctionExpression into a predicate that can be +// sent down to the storage layer. +func ToStoragePredicate(n semantic.Expression, objectName string) (*datatypes.Predicate, error) { + root, err := toStoragePredicateHelper(n, objectName) if err != nil { return nil, err } @@ -25,6 +23,39 @@ func toStoragePredicate(f *semantic.FunctionExpression) (*datatypes.Predicate, e }, nil } +func mergePredicates(op ast.LogicalOperatorKind, predicates ...*datatypes.Predicate) (*datatypes.Predicate, error) { + if len(predicates) == 0 { + return nil, errors.New("at least one predicate is needed") + } + + var value datatypes.Node_Logical + switch op { + case ast.AndOperator: + value = datatypes.LogicalAnd + case ast.OrOperator: + value = datatypes.LogicalOr + default: + return nil, fmt.Errorf("unknown logical operator %v", op) + } + + // Nest the predicates backwards. This way we get a tree like this: + // a AND (b AND c) + root := predicates[len(predicates)-1].Root + for i := len(predicates) - 2; i >= 0; i-- { + root = &datatypes.Node{ + NodeType: datatypes.NodeTypeLogicalExpression, + Value: &datatypes.Node_Logical_{Logical: value}, + Children: []*datatypes.Node{ + predicates[i].Root, + root, + }, + } + } + return &datatypes.Predicate{ + Root: root, + }, nil +} + func toStoragePredicateHelper(n semantic.Expression, objectName string) (*datatypes.Node, error) { switch n := n.(type) { case *semantic.LogicalExpression: diff --git a/query/stdlib/influxdata/influxdb/to.go b/query/stdlib/influxdata/influxdb/to.go index 195cc7b823..62f07e5034 100644 --- a/query/stdlib/influxdata/influxdb/to.go +++ b/query/stdlib/influxdata/influxdb/to.go @@ -2,7 +2,6 @@ package influxdb import ( "context" - "errors" "fmt" "sort" "time" @@ -13,6 +12,7 @@ import ( "github.com/influxdata/flux/execute" "github.com/influxdata/flux/interpreter" "github.com/influxdata/flux/plan" + "github.com/influxdata/flux/runtime" "github.com/influxdata/flux/semantic" "github.com/influxdata/flux/stdlib/influxdata/influxdb" "github.com/influxdata/flux/stdlib/kafka" @@ -24,13 +24,17 @@ import ( "github.com/influxdata/influxdb/v2/storage" ) -// ToKind is the kind for the `to` flux function -const ToKind = influxdb.ToKind +const ( + // ToKind is the kind for the `to` flux function + ToKind = influxdb.ToKind -// TODO(jlapacik) remove this once we have execute.DefaultFieldColLabel -const defaultFieldColLabel = "_field" -const DefaultMeasurementColLabel = "_measurement" -const DefaultBufferSize = 1 << 14 + // TODO(jlapacik) remove this once we have execute.DefaultFieldColLabel + defaultFieldColLabel = "_field" + DefaultMeasurementColLabel = "_measurement" + DefaultBufferSize = 1 << 14 + + toOp = "influxdata/influxdb/to" +) // ToOpSpec is the flux.OperationSpec for the `to` flux function. type ToOpSpec struct { @@ -47,29 +51,8 @@ type ToOpSpec struct { } func init() { - toSignature := flux.FunctionSignature( - map[string]semantic.PolyType{ - "bucket": semantic.String, - "bucketID": semantic.String, - "org": semantic.String, - "orgID": semantic.String, - "host": semantic.String, - "token": semantic.String, - "timeColumn": semantic.String, - "measurementColumn": semantic.String, - "tagColumns": semantic.Array, - "fieldFn": semantic.NewFunctionPolyType(semantic.FunctionPolySignature{ - Parameters: map[string]semantic.PolyType{ - "r": semantic.Tvar(1), - }, - Required: semantic.LabelSet{"r"}, - Return: semantic.Tvar(2), - }), - }, - []string{}, - ) - - flux.ReplacePackageValue("influxdata/influxdb", "to", flux.FunctionValueWithSideEffect(ToKind, createToOpSpec, toSignature)) + toSignature := runtime.MustLookupBuiltinType("influxdata/influxdb", ToKind) + runtime.ReplacePackageValue("influxdata/influxdb", "to", flux.MustValue(flux.FunctionValueWithSideEffect(ToKind, createToOpSpec, toSignature))) flux.RegisterOpSpec(ToKind, func() flux.OperationSpec { return &ToOpSpec{} }) plan.RegisterProcedureSpecWithSideEffect(ToKind, newToProcedure, ToKind) execute.RegisterTransformation(ToKind, createToTransformation) @@ -257,8 +240,15 @@ func createToTransformation(id execute.DatasetID, mode execute.AccumulationMode, } cache := execute.NewTableBuilderCache(a.Allocator()) d := execute.NewDataset(id, mode, cache) - deps := GetStorageDependencies(a.Context()).ToDeps - t, err := NewToTransformation(a.Context(), d, cache, s, deps) + deps := GetStorageDependencies(a.Context()) + if deps == (StorageDependencies{}) { + return nil, nil, &flux.Error{ + Code: codes.Unimplemented, + Msg: "cannot return storage dependencies; storage dependencies are unimplemented", + } + } + toDeps := deps.ToDeps + t, err := NewToTransformation(a.Context(), d, cache, s, toDeps) if err != nil { return nil, nil, err } @@ -287,13 +277,10 @@ func (t *ToTransformation) RetractTable(id execute.DatasetID, key flux.GroupKey) // NewToTransformation returns a new *ToTransformation with the appropriate fields set. func NewToTransformation(ctx context.Context, d execute.Dataset, cache execute.TableBuilderCache, toSpec *ToProcedureSpec, deps ToDependencies) (x *ToTransformation, err error) { var fn *execute.RowMapFn - //var err error spec := toSpec.Spec var bucketID, orgID *platform.ID if spec.FieldFn.Fn != nil { - if fn, err = execute.NewRowMapFn(spec.FieldFn.Fn, compiler.ToScope(spec.FieldFn.Scope)); err != nil { - return nil, err - } + fn = execute.NewRowMapFn(spec.FieldFn.Fn, compiler.ToScope(spec.FieldFn.Scope)) } // Get organization ID if spec.Org != "" { @@ -313,7 +300,11 @@ func NewToTransformation(ctx context.Context, d execute.Dataset, cache execute.T // No org or orgID provided as an arg, use the orgID from the context req := query.RequestFromContext(ctx) if req == nil { - return nil, errors.New("missing request on context") + return nil, &platform.Error{ + Code: platform.EInternal, + Msg: "missing request on context", + Op: toOp, + } } orgID = &req.OrganizationID } @@ -360,23 +351,26 @@ func (t *ToTransformation) Process(id execute.DatasetID, tbl flux.Table) error { if t.implicitTagColumns { // If no tag columns are specified, by default we exclude - // _field and _value from being tag columns. + // _field, _value and _measurement from being tag columns. excludeColumns := map[string]bool{ execute.DefaultValueColLabel: true, defaultFieldColLabel: true, + DefaultMeasurementColLabel: true, } // If a field function is specified then we exclude any column that // is referenced in the function expression from being a tag column. if t.spec.Spec.FieldFn.Fn != nil { - recordParam := t.spec.Spec.FieldFn.Fn.Block.Parameters.List[0].Key.Name + recordParam := t.spec.Spec.FieldFn.Fn.Parameters.List[0].Key.Name exprNode := t.spec.Spec.FieldFn.Fn colVisitor := newFieldFunctionVisitor(recordParam, tbl.Cols()) // Walk the field function expression and record which columns // are referenced. None of these columns will be used as tag columns. semantic.Walk(colVisitor, exprNode) - excludeColumns = colVisitor.captured + for k, v := range colVisitor.captured { + excludeColumns[k] = v + } } addTagsFromTable(t.spec.Spec, tbl, excludeColumns) @@ -471,13 +465,25 @@ type ToDependencies struct { // Validate returns an error if any required field is unset. func (d ToDependencies) Validate() error { if d.BucketLookup == nil { - return errors.New("missing bucket lookup dependency") + return &platform.Error{ + Code: platform.EInternal, + Msg: "missing bucket lookup dependency", + Op: toOp, + } } if d.OrganizationLookup == nil { - return errors.New("missing organization lookup dependency") + return &platform.Error{ + Code: platform.EInternal, + Msg: "missing organization lookup dependency", + Op: toOp, + } } if d.PointsWriter == nil { - return errors.New("missing points writer dependency") + return &platform.Error{ + Code: platform.EInternal, + Msg: "missing points writer dependency", + Op: toOp, + } } return nil } @@ -540,8 +546,10 @@ func writeTable(ctx context.Context, t *ToTransformation, tbl flux.Table) (err e } // prepare field function if applicable and record the number of values to write per row + var fn *execute.RowMapPreparedFn if spec.FieldFn.Fn != nil { - if err = t.fn.Prepare(columns); err != nil { + var err error + if fn, err = t.fn.Prepare(columns); err != nil { return err } @@ -580,7 +588,11 @@ func writeTable(ctx context.Context, t *ToTransformation, tbl flux.Table) (err e pointTime = valueTime.Time().Time() case isTag[j]: if col.Type != flux.TString { - return errors.New("invalid type for tag column") + return &platform.Error{ + Code: platform.EInvalid, + Msg: "invalid type for tag column", + Op: toOp, + } } // TODO(docmerlin): instead of doing this sort of thing, it would be nice if we had a way that allocated a lot less. kv = append(kv, []byte(col.Label), er.Strings(j).Value(i)) @@ -602,11 +614,11 @@ func writeTable(ctx context.Context, t *ToTransformation, tbl flux.Table) (err e } var fieldValues values.Object - if spec.FieldFn.Fn == nil { + if fn == nil { if fieldValues, err = defaultFieldMapping(er, i); err != nil { return err } - } else if fieldValues, err = t.fn.Eval(t.Ctx, i, er); err != nil { + } else if fieldValues, err = fn.Eval(t.Ctx, i, er); err != nil { return err } @@ -615,7 +627,7 @@ func writeTable(ctx context.Context, t *ToTransformation, tbl flux.Table) (err e fields[k] = nil return } - switch v.Type() { + switch v.Type().Nature() { case semantic.Float: fields[k] = v.Float() case semantic.Int: @@ -680,10 +692,14 @@ func defaultFieldMapping(er flux.ColReader, row int) (values.Object, error) { } value := execute.ValueForRow(er, row, valueColumnIdx) - - fieldValueMapping := values.NewObject() field := execute.ValueForRow(er, row, fieldColumnIdx) + props := []semantic.PropertyType{ + { + Key: []byte(field.Str()), + Value: value.Type(), + }, + } + fieldValueMapping := values.NewObject(semantic.NewObjectType(props)) fieldValueMapping.Set(field.Str(), value) - return fieldValueMapping, nil } diff --git a/query/stdlib/influxdata/influxdb/to_test.go b/query/stdlib/influxdata/influxdb/to_test.go index dc4694f3d5..9af6cbd9bc 100644 --- a/query/stdlib/influxdata/influxdb/to_test.go +++ b/query/stdlib/influxdata/influxdb/to_test.go @@ -2,24 +2,19 @@ package influxdb_test import ( "context" - "fmt" "testing" "github.com/google/go-cmp/cmp" "github.com/influxdata/flux" - "github.com/influxdata/flux/ast" "github.com/influxdata/flux/dependencies/dependenciestest" "github.com/influxdata/flux/execute" "github.com/influxdata/flux/execute/executetest" "github.com/influxdata/flux/interpreter" "github.com/influxdata/flux/querytest" - "github.com/influxdata/flux/semantic" "github.com/influxdata/flux/values/valuestest" - platform "github.com/influxdata/influxdb/v2" "github.com/influxdata/influxdb/v2/mock" "github.com/influxdata/influxdb/v2/models" _ "github.com/influxdata/influxdb/v2/query/builtin" - pquerytest "github.com/influxdata/influxdb/v2/query/querytest" "github.com/influxdata/influxdb/v2/query/stdlib/influxdata/influxdb" ) @@ -31,9 +26,9 @@ func TestTo_Query(t *testing.T) { Want: &flux.Spec{ Operations: []*flux.Operation{ { - ID: "influxDBFrom0", + ID: "from0", Spec: &influxdb.FromOpSpec{ - Bucket: "mydb", + Bucket: influxdb.NameOrID{Name: "mydb"}, }, }, { @@ -46,35 +41,14 @@ func TestTo_Query(t *testing.T) { TimeColumn: execute.DefaultTimeColLabel, MeasurementColumn: influxdb.DefaultMeasurementColLabel, FieldFn: interpreter.ResolvedFunction{ - Scope: valuestest.NowScope(), - Fn: &semantic.FunctionExpression{ - Block: &semantic.FunctionBlock{ - Parameters: &semantic.FunctionParameters{ - List: []*semantic.FunctionParameter{ - { - Key: &semantic.Identifier{Name: "r"}, - }, - }, - }, - Body: &semantic.ObjectExpression{ - Properties: []*semantic.Property{ - { - Key: &semantic.Identifier{Name: "col"}, - Value: &semantic.MemberExpression{ - Object: &semantic.IdentifierExpression{Name: "r"}, - Property: "col", - }, - }, - }, - }, - }, - }, + Scope: valuestest.Scope(), + Fn: executetest.FunctionExpression(t, `(r) => ({col: r.col})`), }, }, }, }, Edges: []flux.Edge{ - {Parent: "influxDBFrom0", Child: "to1"}, + {Parent: "from0", Child: "to1"}, }, }, }, @@ -88,49 +62,6 @@ func TestTo_Query(t *testing.T) { } } -func TestToOpSpec_BucketsAccessed(t *testing.T) { - bucketName := "my_bucket" - bucketIDString := "ddddccccbbbbaaaa" - bucketID, err := platform.IDFromString(bucketIDString) - if err != nil { - t.Fatal(err) - } - orgName := "my_org" - orgIDString := "aaaabbbbccccdddd" - orgID, err := platform.IDFromString(orgIDString) - if err != nil { - t.Fatal(err) - } - tests := []pquerytest.BucketsAccessedTestCase{ - { - Name: "from() with bucket and to with org and bucket", - Raw: fmt.Sprintf(`from(bucket:"%s") |> to(bucket:"%s", org:"%s")`, bucketName, bucketName, orgName), - WantReadBuckets: &[]platform.BucketFilter{{Name: &bucketName}}, - WantWriteBuckets: &[]platform.BucketFilter{{Name: &bucketName, Org: &orgName}}, - }, - { - Name: "from() with bucket and to with orgID and bucket", - Raw: fmt.Sprintf(`from(bucket:"%s") |> to(bucket:"%s", orgID:"%s")`, bucketName, bucketName, orgIDString), - WantReadBuckets: &[]platform.BucketFilter{{Name: &bucketName}}, - WantWriteBuckets: &[]platform.BucketFilter{{Name: &bucketName, OrganizationID: orgID}}, - }, - { - Name: "from() with bucket and to with orgID and bucketID", - Raw: fmt.Sprintf(`from(bucket:"%s") |> to(bucketID:"%s", orgID:"%s")`, bucketName, bucketIDString, orgIDString), - WantReadBuckets: &[]platform.BucketFilter{{Name: &bucketName}}, - WantWriteBuckets: &[]platform.BucketFilter{{ID: bucketID, OrganizationID: orgID}}, - }, - } - - for _, tc := range tests { - tc := tc - t.Run(tc.Name, func(t *testing.T) { - t.Parallel() - pquerytest.BucketsAccessedTestHelper(t, tc) - }) - } -} - func TestTo_Process(t *testing.T) { type wanted struct { result *mock.PointsWriter @@ -413,29 +344,8 @@ m,tag1=c,tag2=ee _value=4 41`), TimeColumn: "_time", MeasurementColumn: "_measurement", FieldFn: interpreter.ResolvedFunction{ - Scope: valuestest.NowScope(), - Fn: &semantic.FunctionExpression{ - Block: &semantic.FunctionBlock{ - Parameters: &semantic.FunctionParameters{ - List: []*semantic.FunctionParameter{ - { - Key: &semantic.Identifier{Name: "r"}, - }, - }, - }, - Body: &semantic.ObjectExpression{ - Properties: []*semantic.Property{ - { - Key: &semantic.Identifier{Name: "temperature"}, - Value: &semantic.MemberExpression{ - Object: &semantic.IdentifierExpression{Name: "r"}, - Property: "temperature", - }, - }, - }, - }, - }, - }, + Scope: valuestest.Scope(), + Fn: executetest.FunctionExpression(t, `(r) => ({temperature: r.temperature})`), }, }, }, @@ -486,74 +396,29 @@ c temperature=4 41`), TimeColumn: "_time", MeasurementColumn: "tag", FieldFn: interpreter.ResolvedFunction{ - Scope: valuestest.NowScope(), - Fn: &semantic.FunctionExpression{ - Block: &semantic.FunctionBlock{ - Parameters: &semantic.FunctionParameters{ - List: []*semantic.FunctionParameter{ - { - Key: &semantic.Identifier{Name: "r"}, - }, - }, - }, - Body: &semantic.ObjectExpression{ - Properties: []*semantic.Property{ - { - Key: &semantic.Identifier{Name: "day"}, - Value: &semantic.MemberExpression{ - Object: &semantic.IdentifierExpression{Name: "r"}, - Property: "day", - }, - }, - { - Key: &semantic.Identifier{Name: "temperature"}, - Value: &semantic.MemberExpression{ - Object: &semantic.IdentifierExpression{Name: "r"}, - Property: "temperature", - }, - }, - { - Key: &semantic.Identifier{Name: "humidity"}, - Value: &semantic.MemberExpression{ - Object: &semantic.IdentifierExpression{Name: "r"}, - Property: "humidity", - }, - }, - { - Key: &semantic.Identifier{Name: "ratio"}, - Value: &semantic.BinaryExpression{ - Operator: ast.DivisionOperator, - Left: &semantic.MemberExpression{ - Object: &semantic.IdentifierExpression{Name: "r"}, - Property: "temperature", - }, - Right: &semantic.MemberExpression{ - Object: &semantic.IdentifierExpression{Name: "r"}, - Property: "humidity", - }, - }, - }, - }, - }, - }, - }, + Scope: valuestest.Scope(), + Fn: executetest.FunctionExpression(t, `(r) => ({day: r.day, temperature: r.temperature, humidity: r.humidity, ratio: r.temperature / r.humidity})`), }, }, }, data: []flux.Table{executetest.MustCopyTable(&executetest.Table{ ColMeta: []flux.ColMeta{ + {Label: "_measurement", Type: flux.TString}, + {Label: "_field", Type: flux.TString}, {Label: "_time", Type: flux.TTime}, {Label: "day", Type: flux.TString}, {Label: "tag", Type: flux.TString}, {Label: "temperature", Type: flux.TFloat}, {Label: "humidity", Type: flux.TFloat}, + {Label: "_value", Type: flux.TString}, }, + KeyCols: []string{"_measurement", "_field"}, Data: [][]interface{}{ - {execute.Time(11), "Monday", "a", 2.0, 1.0}, - {execute.Time(21), "Tuesday", "a", 2.0, 2.0}, - {execute.Time(21), "Wednesday", "b", 1.0, 4.0}, - {execute.Time(31), "Thursday", "a", 3.0, 3.0}, - {execute.Time(41), "Friday", "c", 4.0, 5.0}, + {"m", "f", execute.Time(11), "Monday", "a", 2.0, 1.0, "bogus"}, + {"m", "f", execute.Time(21), "Tuesday", "a", 2.0, 2.0, "bogus"}, + {"m", "f", execute.Time(21), "Wednesday", "b", 1.0, 4.0, "bogus"}, + {"m", "f", execute.Time(31), "Thursday", "a", 3.0, 3.0, "bogus"}, + {"m", "f", execute.Time(41), "Friday", "c", 4.0, 5.0, "bogus"}, }, })}, want: wanted{ @@ -566,18 +431,22 @@ c day="Friday",humidity=5,ratio=0.8,temperature=4 41`), }, tables: []*executetest.Table{{ ColMeta: []flux.ColMeta{ + {Label: "_measurement", Type: flux.TString}, + {Label: "_field", Type: flux.TString}, {Label: "_time", Type: flux.TTime}, {Label: "day", Type: flux.TString}, {Label: "tag", Type: flux.TString}, {Label: "temperature", Type: flux.TFloat}, {Label: "humidity", Type: flux.TFloat}, + {Label: "_value", Type: flux.TString}, }, + KeyCols: []string{"_measurement", "_field"}, Data: [][]interface{}{ - {execute.Time(11), "Monday", "a", 2.0, 1.0}, - {execute.Time(21), "Tuesday", "a", 2.0, 2.0}, - {execute.Time(21), "Wednesday", "b", 1.0, 4.0}, - {execute.Time(31), "Thursday", "a", 3.0, 3.0}, - {execute.Time(41), "Friday", "c", 4.0, 5.0}, + {"m", "f", execute.Time(11), "Monday", "a", 2.0, 1.0, "bogus"}, + {"m", "f", execute.Time(21), "Tuesday", "a", 2.0, 2.0, "bogus"}, + {"m", "f", execute.Time(21), "Wednesday", "b", 1.0, 4.0, "bogus"}, + {"m", "f", execute.Time(31), "Thursday", "a", 3.0, 3.0, "bogus"}, + {"m", "f", execute.Time(41), "Friday", "c", 4.0, 5.0, "bogus"}, }, }}, }, @@ -592,36 +461,8 @@ c day="Friday",humidity=5,ratio=0.8,temperature=4 41`), MeasurementColumn: "tag1", TagColumns: []string{"tag2"}, FieldFn: interpreter.ResolvedFunction{ - Scope: valuestest.NowScope(), - Fn: &semantic.FunctionExpression{ - Block: &semantic.FunctionBlock{ - Parameters: &semantic.FunctionParameters{ - List: []*semantic.FunctionParameter{ - { - Key: &semantic.Identifier{Name: "r"}, - }, - }, - }, - Body: &semantic.ObjectExpression{ - Properties: []*semantic.Property{ - { - Key: &semantic.Identifier{Name: "temperature"}, - Value: &semantic.MemberExpression{ - Object: &semantic.IdentifierExpression{Name: "r"}, - Property: "temperature", - }, - }, - { - Key: &semantic.Identifier{Name: "humidity"}, - Value: &semantic.MemberExpression{ - Object: &semantic.IdentifierExpression{Name: "r"}, - Property: "humidity", - }, - }, - }, - }, - }, - }, + Scope: valuestest.Scope(), + Fn: executetest.FunctionExpression(t, `(r) => ({temperature: r.temperature, humidity: r.humidity})`), }, }, }, diff --git a/query/stdlib/influxdata/influxdb/v1/databases.go b/query/stdlib/influxdata/influxdb/v1/databases.go index a8c50b9da9..230cfaa2bd 100644 --- a/query/stdlib/influxdata/influxdb/v1/databases.go +++ b/query/stdlib/influxdata/influxdb/v1/databases.go @@ -6,6 +6,7 @@ import ( "time" "github.com/influxdata/flux" + "github.com/influxdata/flux/codes" "github.com/influxdata/flux/execute" "github.com/influxdata/flux/memory" "github.com/influxdata/flux/plan" @@ -16,54 +17,24 @@ import ( "github.com/pkg/errors" ) -const DatabasesKind = v1.DatabasesKind - -type DatabasesOpSpec struct { -} - -func init() { - flux.ReplacePackageValue("influxdata/influxdb/v1", DatabasesKind, flux.FunctionValue(DatabasesKind, createDatabasesOpSpec, v1.DatabasesSignature)) - flux.RegisterOpSpec(DatabasesKind, newDatabasesOp) - plan.RegisterProcedureSpec(DatabasesKind, newDatabasesProcedure, DatabasesKind) -} - -func createDatabasesOpSpec(args flux.Arguments, a *flux.Administration) (flux.OperationSpec, error) { - spec := new(DatabasesOpSpec) - return spec, nil -} - -func newDatabasesOp() flux.OperationSpec { - return new(DatabasesOpSpec) -} - -func (s *DatabasesOpSpec) Kind() flux.OperationKind { - return DatabasesKind -} - -type DatabasesProcedureSpec struct { - plan.DefaultCost -} - -func newDatabasesProcedure(qs flux.OperationSpec, pa plan.Administration) (plan.ProcedureSpec, error) { - _, ok := qs.(*DatabasesOpSpec) - if !ok { - return nil, fmt.Errorf("invalid spec type %T", qs) - } - - return &DatabasesProcedureSpec{}, nil -} - -func (s *DatabasesProcedureSpec) Kind() plan.ProcedureKind { - return DatabasesKind -} - -func (s *DatabasesProcedureSpec) Copy() plan.ProcedureSpec { - ns := new(DatabasesProcedureSpec) - return ns -} +const DatabasesKind = "influxdata/influxdb/v1.localDatabases" func init() { execute.RegisterSource(DatabasesKind, createDatabasesSource) + plan.RegisterPhysicalRules(LocalDatabasesRule{}) +} + +type LocalDatabasesProcedureSpec struct { + plan.DefaultCost +} + +func (s *LocalDatabasesProcedureSpec) Kind() plan.ProcedureKind { + return DatabasesKind +} + +func (s *LocalDatabasesProcedureSpec) Copy() plan.ProcedureSpec { + ns := new(LocalDatabasesProcedureSpec) + return ns } type DatabasesDecoder struct { @@ -177,7 +148,7 @@ func (bd *DatabasesDecoder) Close() error { } func createDatabasesSource(prSpec plan.ProcedureSpec, dsid execute.DatasetID, a execute.Administration) (execute.Source, error) { - _, ok := prSpec.(*DatabasesProcedureSpec) + _, ok := prSpec.(*LocalDatabasesProcedureSpec) if !ok { return nil, fmt.Errorf("invalid spec type %T", prSpec) } @@ -219,3 +190,27 @@ func (d DatabasesDependencies) Validate() error { } return nil } + +type LocalDatabasesRule struct{} + +func (rule LocalDatabasesRule) Name() string { + return "influxdata/influxdb.LocalDatabasesRule" +} + +func (rule LocalDatabasesRule) Pattern() plan.Pattern { + return plan.Pat(v1.DatabasesKind) +} + +func (rule LocalDatabasesRule) Rewrite(ctx context.Context, node plan.Node) (plan.Node, bool, error) { + fromSpec := node.ProcedureSpec().(*v1.DatabasesProcedureSpec) + if fromSpec.Host != nil { + return node, false, nil + } else if fromSpec.Org != nil { + return node, false, &flux.Error{ + Code: codes.Unimplemented, + Msg: "buckets cannot list from a separate organization; please specify a host or remove the organization", + } + } + + return plan.CreateLogicalNode("localDatabases", &LocalDatabasesProcedureSpec{}), true, nil +} diff --git a/query/stdlib/testing/end_to_end_test.go b/query/stdlib/testing/end_to_end_test.go index f8197f1501..71a96c4a76 100644 --- a/query/stdlib/testing/end_to_end_test.go +++ b/query/stdlib/testing/end_to_end_test.go @@ -1,25 +1,25 @@ -//lint:file-ignore U1000 ignore these flagger-related dead code issues until we can circle back package testing_test import ( "bufio" "bytes" "context" + "encoding/json" "strings" "testing" - "github.com/influxdata/flux" "github.com/influxdata/flux/ast" "github.com/influxdata/flux/execute" "github.com/influxdata/flux/lang" "github.com/influxdata/flux/parser" + "github.com/influxdata/flux/runtime" "github.com/influxdata/flux/stdlib" - "github.com/influxdata/influxdb/v2/kit/feature" - "github.com/influxdata/influxdb/v2/kit/feature/override" platform "github.com/influxdata/influxdb/v2" "github.com/influxdata/influxdb/v2/cmd/influxd/launcher" influxdbcontext "github.com/influxdata/influxdb/v2/context" + "github.com/influxdata/influxdb/v2/kit/feature" + "github.com/influxdata/influxdb/v2/kit/feature/override" "github.com/influxdata/influxdb/v2/mock" "github.com/influxdata/influxdb/v2/query" _ "github.com/influxdata/influxdb/v2/query/stdlib" @@ -77,11 +77,10 @@ func (f Flagger) Flags(ctx context.Context, _f ...feature.Flag) (map[string]inte var ctx = influxdbcontext.SetAuthorizer(context.Background(), mock.NewMockAuthorizer(true, nil)) func init() { - flux.FinalizeBuiltIns() + runtime.FinalizeBuiltIns() } func TestFluxEndToEnd(t *testing.T) { - t.Skip("Skipping per https://github.com/influxdata/influxdb/issues/19299") runEndToEnd(t, stdlib.FluxTestPackages) } func BenchmarkFluxEndToEnd(b *testing.B) { @@ -110,6 +109,8 @@ func runEndToEnd(t *testing.T, pkgs []*ast.Package) { if reason, ok := itesting.FluxEndToEndSkipList[pkg.Path][name]; ok { t.Skip(reason) } + + flagger.SetActiveTestCase(pkg.Path, name) testFlux(t, l, file) }) } @@ -153,12 +154,15 @@ func makeTestPackage(file *ast.File) *ast.Package { var optionsSource = ` import "testing" import c "csv" +import "experimental" // Options bucket and org are defined dynamically per test option testing.loadStorage = (csv) => { - c.from(csv: csv) |> to(bucket: bucket, org: org) - return from(bucket: bucket) + return experimental.chain( + first: c.from(csv: csv) |> to(bucket: bucket, org: org), + second: from(bucket:bucket) + ) } ` var optionsAST *ast.File @@ -173,8 +177,6 @@ func init() { func testFlux(t testing.TB, l *launcher.TestLauncher, file *ast.File) { - // Query server to ensure write persists. - b := &platform.Bucket{ OrgID: l.Org.ID, Name: t.Name(), @@ -206,70 +208,32 @@ func testFlux(t testing.TB, l *launcher.TestLauncher, file *ast.File) { pkg := makeTestPackage(file) pkg.Files = append(pkg.Files, options) - // Add testing.inspect call to ensure the data is loaded + // Use testing.inspect call to get all of diff, want, and got inspectCalls := stdlib.TestingInspectCalls(pkg) pkg.Files = append(pkg.Files, inspectCalls) - req := &query.Request{ - OrganizationID: l.Org.ID, - Compiler: lang.ASTCompiler{AST: pkg}, - } - if r, err := l.FluxQueryService().Query(ctx, req); err != nil { - t.Fatal(err) - } else { - for r.More() { - v := r.Next() - if err := v.Tables().Do(func(tbl flux.Table) error { - return tbl.Do(func(reader flux.ColReader) error { - return nil - }) - }); err != nil { - t.Error(err) - } - } - } - - // quirk: our execution engine doesn't guarantee the order of execution for disconnected DAGS - // so that our function-with-side effects call to `to` may run _after_ the test instead of before. - // running twice makes sure that `to` happens at least once before we run the test. - // this time we use a call to `run` so that the assertion error is triggered - runCalls := stdlib.TestingRunCalls(pkg) - pkg.Files[len(pkg.Files)-1] = runCalls - r, err := l.FluxQueryService().Query(ctx, req) + bs, err := json.Marshal(pkg) if err != nil { t.Fatal(err) } - for r.More() { - v := r.Next() - if err := v.Tables().Do(func(tbl flux.Table) error { - return tbl.Do(func(reader flux.ColReader) error { - return nil - }) - }); err != nil { - t.Error(err) - } + req := &query.Request{ + OrganizationID: l.Org.ID, + Compiler: lang.ASTCompiler{AST: bs}, } - if err := r.Err(); err != nil { - t.Error(err) - // Replace the testing.run calls with testing.inspect calls. - pkg.Files[len(pkg.Files)-1] = inspectCalls - r, err := l.FluxQueryService().Query(ctx, req) - if err != nil { - t.Fatal(err) - } - var out bytes.Buffer - defer func() { - if t.Failed() { - scanner := bufio.NewScanner(&out) - for scanner.Scan() { - t.Log(scanner.Text()) - } - } - }() + + if r, err := l.FluxQueryService().Query(ctx, req); err != nil { + t.Fatal(err) + } else { + results := make(map[string]*bytes.Buffer) + for r.More() { v := r.Next() - err := execute.FormatResult(&out, v) + + if _, ok := results[v.Name()]; !ok { + results[v.Name()] = &bytes.Buffer{} + } + err := execute.FormatResult(results[v.Name()], v) if err != nil { t.Error(err) } @@ -277,5 +241,22 @@ func testFlux(t testing.TB, l *launcher.TestLauncher, file *ast.File) { if err := r.Err(); err != nil { t.Error(err) } + + logFormatted := func(name string, results map[string]*bytes.Buffer) { + if _, ok := results[name]; ok { + scanner := bufio.NewScanner(results[name]) + for scanner.Scan() { + t.Log(scanner.Text()) + } + } else { + t.Log("table ", name, " not present in results") + } + } + if _, ok := results["diff"]; ok { + t.Error("diff table was not empty") + logFormatted("diff", results) + logFormatted("want", results) + logFormatted("got", results) + } } } diff --git a/query/stdlib/testing/testing.go b/query/stdlib/testing/testing.go index 5e8585c06b..9b9f192e8b 100644 --- a/query/stdlib/testing/testing.go +++ b/query/stdlib/testing/testing.go @@ -51,6 +51,7 @@ var FluxEndToEndSkipList = map[string]map[string]string{ "integral_columns": "unbounded test", "map": "unbounded test", "join_missing_on_col": "unbounded test", + "join_use_previous": "unbounded test (https://github.com/influxdata/flux/issues/2996)", "rowfn_with_import": "unbounded test", // the following tests have a difference between the CSV-decoded input table, and the storage-retrieved version of that table @@ -90,6 +91,11 @@ var FluxEndToEndSkipList = map[string]map[string]string{ "to_uint": "dateTime conversion issue: https://github.com/influxdata/influxdb/issues/14575", "holt_winters_panic": "Expected output is an empty table which breaks the testing framework (https://github.com/influxdata/influxdb/issues/14749)", + "map_nulls": "to cannot write null values", + + "range_stop": "pushed down range stop no longer exclusive https://github.com/influxdata/influxdb/issues/19564", + + "to_time": "Flaky test https://github.com/influxdata/influxdb/issues/19577", }, "experimental": { "set": "Reason TBD", @@ -137,7 +143,9 @@ var FluxEndToEndSkipList = map[string]map[string]string{ "join": "unbounded test", }, "testing/chronograf": { - "buckets": "unbounded test", + "buckets": "unbounded test", + "aggregate_window_count": "flakey test: https://github.com/influxdata/influxdb/issues/18463", + "aggregate_window_median": "failing with \"field type conflict\": https://github.com/influxdata/influxdb/issues/19565", }, "testing/kapacitor": { "fill_default": "unknown field type for f1", @@ -147,14 +155,32 @@ var FluxEndToEndSkipList = map[string]map[string]string{ "partition_strings_splitN": "pandas. map does not correctly handled returned arrays (https://github.com/influxdata/flux/issues/1387)", }, "testing/promql": { - "emptyTable": "tests a source", - "year": "flakey test: https://github.com/influxdata/influxdb/issues/15667", + "emptyTable": "tests a source", + "year": "flakey test: https://github.com/influxdata/influxdb/issues/15667", + "extrapolatedRate_counter_rate": "option \"testing.loadStorage\" reassigned: https://github.com/influxdata/flux/issues/3155", + "extrapolatedRate_nocounter": "option \"testing.loadStorage\" reassigned: https://github.com/influxdata/flux/issues/3155", + "extrapolatedRate_norate": "option \"testing.loadStorage\" reassigned: https://github.com/influxdata/flux/issues/3155", + "linearRegression_nopredict": "option \"testing.loadStorage\" reassigned: https://github.com/influxdata/flux/issues/3155", + "linearRegression_predict": "option \"testing.loadStorage\" reassigned: https://github.com/influxdata/flux/issues/3155", }, "testing/influxql": { "cumulative_sum": "invalid test data requires loadStorage to be overridden. See https://github.com/influxdata/flux/issues/3145", + "elapsed": "failing since split with Flux upgrade: https://github.com/influxdata/influxdb/issues/19568", }, } type PerTestFeatureFlagMap = map[string]map[string]map[string]string -var FluxEndToEndFeatureFlags = PerTestFeatureFlagMap{} +var FluxEndToEndFeatureFlags = PerTestFeatureFlagMap{ + "planner": { + "bare_mean_push": { + "pushDownWindowAggregateMean": "true", + }, + "window_mean_push": { + "pushDownWindowAggregateMean": "true", + }, + "merge_filters": { + "mergeFilterRule": "true", + }, + }, +} diff --git a/storage/flux/reader.go b/storage/flux/reader.go index 76e3ce4548..ec273bb24a 100644 --- a/storage/flux/reader.go +++ b/storage/flux/reader.go @@ -11,7 +11,7 @@ import ( "github.com/influxdata/flux/memory" "github.com/influxdata/flux/values" "github.com/influxdata/influxdb/v2/models" - "github.com/influxdata/influxdb/v2/query/stdlib/influxdata/influxdb" + "github.com/influxdata/influxdb/v2/query" storage "github.com/influxdata/influxdb/v2/storage/reads" "github.com/influxdata/influxdb/v2/storage/reads/datatypes" "github.com/influxdata/influxdb/v2/tsdb/cursors" @@ -55,11 +55,11 @@ type storeReader struct { } // NewReader returns a new storageflux reader -func NewReader(s storage.Store) influxdb.Reader { +func NewReader(s storage.Store) query.StorageReader { return &storeReader{s: s} } -func (r *storeReader) ReadFilter(ctx context.Context, spec influxdb.ReadFilterSpec, alloc *memory.Allocator) (influxdb.TableIterator, error) { +func (r *storeReader) ReadFilter(ctx context.Context, spec query.ReadFilterSpec, alloc *memory.Allocator) (query.TableIterator, error) { return &filterIterator{ ctx: ctx, s: r.s, @@ -69,7 +69,7 @@ func (r *storeReader) ReadFilter(ctx context.Context, spec influxdb.ReadFilterSp }, nil } -func (r *storeReader) ReadGroup(ctx context.Context, spec influxdb.ReadGroupSpec, alloc *memory.Allocator) (influxdb.TableIterator, error) { +func (r *storeReader) ReadGroup(ctx context.Context, spec query.ReadGroupSpec, alloc *memory.Allocator) (query.TableIterator, error) { return &groupIterator{ ctx: ctx, s: r.s, @@ -79,42 +79,24 @@ func (r *storeReader) ReadGroup(ctx context.Context, spec influxdb.ReadGroupSpec }, nil } -func (r *storeReader) ReadTagKeys(ctx context.Context, spec influxdb.ReadTagKeysSpec, alloc *memory.Allocator) (influxdb.TableIterator, error) { - var predicate *datatypes.Predicate - if spec.Predicate != nil { - p, err := toStoragePredicate(spec.Predicate) - if err != nil { - return nil, err - } - predicate = p - } - +func (r *storeReader) ReadTagKeys(ctx context.Context, spec query.ReadTagKeysSpec, alloc *memory.Allocator) (query.TableIterator, error) { return &tagKeysIterator{ ctx: ctx, bounds: spec.Bounds, s: r.s, readSpec: spec, - predicate: predicate, + predicate: spec.Predicate, alloc: alloc, }, nil } -func (r *storeReader) ReadTagValues(ctx context.Context, spec influxdb.ReadTagValuesSpec, alloc *memory.Allocator) (influxdb.TableIterator, error) { - var predicate *datatypes.Predicate - if spec.Predicate != nil { - p, err := toStoragePredicate(spec.Predicate) - if err != nil { - return nil, err - } - predicate = p - } - +func (r *storeReader) ReadTagValues(ctx context.Context, spec query.ReadTagValuesSpec, alloc *memory.Allocator) (query.TableIterator, error) { return &tagValuesIterator{ ctx: ctx, bounds: spec.Bounds, s: r.s, readSpec: spec, - predicate: predicate, + predicate: spec.Predicate, alloc: alloc, }, nil } @@ -124,7 +106,7 @@ func (r *storeReader) Close() {} type filterIterator struct { ctx context.Context s storage.Store - spec influxdb.ReadFilterSpec + spec query.ReadFilterSpec stats cursors.CursorStats cache *tagsCache alloc *memory.Allocator @@ -144,18 +126,9 @@ func (fi *filterIterator) Do(f func(flux.Table) error) error { return err } - var predicate *datatypes.Predicate - if fi.spec.Predicate != nil { - p, err := toStoragePredicate(fi.spec.Predicate) - if err != nil { - return err - } - predicate = p - } - var req datatypes.ReadFilterRequest req.ReadSource = any - req.Predicate = predicate + req.Predicate = fi.spec.Predicate req.Range.Start = int64(fi.spec.Bounds.Start) req.Range.End = int64(fi.spec.Bounds.Stop) @@ -248,7 +221,7 @@ READ: type groupIterator struct { ctx context.Context s storage.Store - spec influxdb.ReadGroupSpec + spec query.ReadGroupSpec stats cursors.CursorStats cache *tagsCache alloc *memory.Allocator @@ -268,18 +241,9 @@ func (gi *groupIterator) Do(f func(flux.Table) error) error { return err } - var predicate *datatypes.Predicate - if gi.spec.Predicate != nil { - p, err := toStoragePredicate(gi.spec.Predicate) - if err != nil { - return err - } - predicate = p - } - var req datatypes.ReadGroupRequest req.ReadSource = any - req.Predicate = predicate + req.Predicate = gi.spec.Predicate req.Range.Start = int64(gi.spec.Bounds.Start) req.Range.End = int64(gi.spec.Bounds.Stop) @@ -402,11 +366,11 @@ func determineAggregateMethod(agg string) (datatypes.Aggregate_AggregateType, er return 0, fmt.Errorf("unknown aggregate type %q", agg) } -func convertGroupMode(m influxdb.GroupMode) datatypes.ReadGroupRequest_Group { +func convertGroupMode(m query.GroupMode) datatypes.ReadGroupRequest_Group { switch m { - case influxdb.GroupModeNone: + case query.GroupModeNone: return datatypes.GroupNone - case influxdb.GroupModeBy: + case query.GroupModeBy: return datatypes.GroupBy } panic(fmt.Sprint("invalid group mode: ", m)) @@ -501,7 +465,7 @@ func determineTableColsForGroup(tagKeys [][]byte, typ flux.ColType) ([]flux.ColM return cols, defs } -func groupKeyForGroup(kv [][]byte, spec *influxdb.ReadGroupSpec, bnds execute.Bounds) flux.GroupKey { +func groupKeyForGroup(kv [][]byte, spec *query.ReadGroupSpec, bnds execute.Bounds) flux.GroupKey { cols := make([]flux.ColMeta, 2, len(spec.GroupKeys)+2) vs := make([]values.Value, 2, len(spec.GroupKeys)+2) cols[startColIdx] = flux.ColMeta{ @@ -531,7 +495,7 @@ type tagKeysIterator struct { ctx context.Context bounds execute.Bounds s storage.Store - readSpec influxdb.ReadTagKeysSpec + readSpec query.ReadTagKeysSpec predicate *datatypes.Predicate alloc *memory.Allocator } @@ -614,7 +578,7 @@ type tagValuesIterator struct { ctx context.Context bounds execute.Bounds s storage.Store - readSpec influxdb.ReadTagValuesSpec + readSpec query.ReadTagValuesSpec predicate *datatypes.Predicate alloc *memory.Allocator } diff --git a/task.go b/task.go index 111ea43df9..079ac3b40b 100644 --- a/task.go +++ b/task.go @@ -9,8 +9,8 @@ import ( "time" "github.com/influxdata/flux/ast" + "github.com/influxdata/flux/ast/edit" "github.com/influxdata/influxdb/v2/kit/feature" - "github.com/influxdata/influxdb/v2/pkg/flux/ast/edit" "github.com/influxdata/influxdb/v2/task/options" ) diff --git a/task/backend/executor/executor.go b/task/backend/executor/executor.go index 993e14f7c9..806c6b1dff 100644 --- a/task/backend/executor/executor.go +++ b/task/backend/executor/executor.go @@ -2,12 +2,15 @@ package executor import ( "context" + "encoding/json" "fmt" "sync" "time" "github.com/influxdata/flux" + "github.com/influxdata/flux/ast" "github.com/influxdata/flux/lang" + "github.com/influxdata/flux/runtime" "github.com/influxdata/influxdb/v2" icontext "github.com/influxdata/influxdb/v2/context" "github.com/influxdata/influxdb/v2/kit/feature" @@ -21,6 +24,8 @@ import ( const ( maxPromises = 1000 defaultMaxWorkers = 100 + + lastSuccessOption = "tasks.lastSuccessTime" ) var _ scheduler.Executor = (*Executor)(nil) @@ -69,7 +74,31 @@ func WithMaxWorkers(n int) executorOption { // CompilerBuilderFunc is a function that yields a new flux.Compiler. The // context.Context provided can be assumed to be an authorized context. -type CompilerBuilderFunc func(ctx context.Context, query string, now time.Time) (flux.Compiler, error) +type CompilerBuilderFunc func(ctx context.Context, query string, ts CompilerBuilderTimestamps) (flux.Compiler, error) + +// CompilerBuilderTimestamps contains timestamps which should be provided along +// with a Task query. +type CompilerBuilderTimestamps struct { + Now time.Time + LatestSuccess time.Time +} + +func (ts CompilerBuilderTimestamps) Extern() *ast.File { + var body []ast.Statement + + if !ts.LatestSuccess.IsZero() { + body = append(body, &ast.OptionStatement{ + Assignment: &ast.VariableAssignment{ + ID: &ast.Identifier{Name: lastSuccessOption}, + Init: &ast.DateTimeLiteral{ + Value: ts.LatestSuccess, + }, + }, + }) + } + + return &ast.File{Body: body} +} // WithSystemCompilerBuilder is an Executor option that configures a // CompilerBuilderFunc to be used when compiling queries for System Tasks. @@ -415,8 +444,6 @@ func (w *worker) start(p *promise) { } func (w *worker) finish(p *promise, rs influxdb.RunStatus, err error) { - - // trace span, ctx := tracing.StartSpanFromContext(p.ctx) defer span.Finish() @@ -470,7 +497,10 @@ func (w *worker) executeQuery(p *promise) { if p.task.Type != influxdb.TaskSystemType { buildCompiler = w.nonSystemBuildCompiler } - compiler, err := buildCompiler(ctx, p.task.Flux, p.run.ScheduledFor) + compiler, err := buildCompiler(ctx, p.task.Flux, CompilerBuilderTimestamps{ + Now: p.run.ScheduledFor, + LatestSuccess: p.task.LatestSuccess, + }) if err != nil { w.finish(p, influxdb.RunFail, influxdb.ErrFluxParseError(err)) return @@ -591,21 +621,45 @@ func exhaustResultIterators(res flux.Result) error { } // NewASTCompiler parses a Flux query string into an AST representatation. -func NewASTCompiler(_ context.Context, query string, now time.Time) (flux.Compiler, error) { - pkg, err := flux.Parse(query) +func NewASTCompiler(ctx context.Context, query string, ts CompilerBuilderTimestamps) (flux.Compiler, error) { + pkg, err := runtime.ParseToJSON(query) if err != nil { return nil, err } + var externBytes []byte + if feature.InjectLatestSuccessTime().Enabled(ctx) { + extern := ts.Extern() + if len(extern.Body) > 0 { + var err error + externBytes, err = json.Marshal(extern) + if err != nil { + return nil, err + } + } + } return lang.ASTCompiler{ - AST: pkg, - Now: now, + AST: pkg, + Now: ts.Now, + Extern: externBytes, }, nil } // NewFluxCompiler wraps a Flux query string in a raw-query representation. -func NewFluxCompiler(_ context.Context, query string, _ time.Time) (flux.Compiler, error) { +func NewFluxCompiler(ctx context.Context, query string, ts CompilerBuilderTimestamps) (flux.Compiler, error) { + var externBytes []byte + if feature.InjectLatestSuccessTime().Enabled(ctx) { + extern := ts.Extern() + if len(extern.Body) > 0 { + var err error + externBytes, err = json.Marshal(extern) + if err != nil { + return nil, err + } + } + } return lang.FluxCompiler{ - Query: query, + Query: query, + Extern: externBytes, // TODO(brett): This mitigates an immediate problem where // Checks/Notifications breaks when sending Now, and system Tasks do not // break when sending Now. We are currently sending C+N through using @@ -616,7 +670,13 @@ func NewFluxCompiler(_ context.Context, query string, _ time.Time) (flux.Compile // we are able to locate the root cause and use Flux Compiler for all // Task types. // - // This should be removed once we diagnose the problem. + // It turns out this is due to the exclusive nature of the stop time in + // Flux "from" and that we weren't including the left-hand boundary of + // the range check for notifications. We're shipping a fix soon in + // + // https://github.com/influxdata/influxdb/pull/19392 + // + // Once this has merged, we can send Now again. // // Now: now, }, nil diff --git a/task/backend/executor/support_test.go b/task/backend/executor/support_test.go index ddc98b16db..8a552958d0 100644 --- a/task/backend/executor/support_test.go +++ b/task/backend/executor/support_test.go @@ -12,6 +12,7 @@ import ( "github.com/influxdata/flux/execute" "github.com/influxdata/flux/lang" "github.com/influxdata/flux/memory" + "github.com/influxdata/flux/runtime" "github.com/influxdata/flux/values" "github.com/influxdata/influxdb/v2" "github.com/influxdata/influxdb/v2/kv" @@ -31,7 +32,7 @@ type fakeQueryService struct { var _ query.AsyncQueryService = (*fakeQueryService)(nil) func makeAST(q string) lang.ASTCompiler { - pkg, err := flux.Parse(q) + pkg, err := runtime.ParseToJSON(q) if err != nil { panic(err) } @@ -170,10 +171,11 @@ type fakeQuery struct { var _ flux.Query = (*fakeQuery)(nil) -func (q *fakeQuery) Done() {} -func (q *fakeQuery) Cancel() { close(q.results) } -func (q *fakeQuery) Statistics() flux.Statistics { return flux.Statistics{} } -func (q *fakeQuery) Results() <-chan flux.Result { return q.results } +func (q *fakeQuery) Done() {} +func (q *fakeQuery) Cancel() { close(q.results) } +func (q *fakeQuery) Statistics() flux.Statistics { return flux.Statistics{} } +func (q *fakeQuery) Results() <-chan flux.Result { return q.results } +func (q *fakeQuery) ProfilerResults() (flux.ResultIterator, error) { return nil, nil } func (q *fakeQuery) Err() error { if q.ctxErr != nil { diff --git a/task/options/options.go b/task/options/options.go index 04e6f9a206..11b9e4e897 100644 --- a/task/options/options.go +++ b/task/options/options.go @@ -11,11 +11,10 @@ import ( "github.com/influxdata/cron" "github.com/influxdata/flux" "github.com/influxdata/flux/ast" + "github.com/influxdata/flux/ast/edit" "github.com/influxdata/flux/interpreter" "github.com/influxdata/flux/semantic" "github.com/influxdata/flux/values" - ast2 "github.com/influxdata/influxdb/v2/pkg/flux/ast" - "github.com/influxdata/influxdb/v2/pkg/flux/ast/edit" "github.com/influxdata/influxdb/v2/pkg/pointer" ) @@ -293,7 +292,7 @@ func extractNameOption(opts *Options, objExpr *ast.ObjectExpression) error { if !ok { return errParseTaskOptionField(optName) } - opts.Name = ast2.StringFromLiteral(nameStr) + opts.Name = ast.StringFromLiteral(nameStr) return nil } @@ -313,7 +312,7 @@ func extractScheduleOptions(opts *Options, objExpr *ast.ObjectExpression) error if !ok { return errParseTaskOptionField(optCron) } - opts.Cron = ast2.StringFromLiteral(cronExprStr) + opts.Cron = ast.StringFromLiteral(cronExprStr) } if everyErr == nil { @@ -359,7 +358,7 @@ func extractConcurrencyOption(opts *Options, objExpr *ast.ObjectExpression) erro if !ok { return errParseTaskOptionField(optConcurrency) } - val := ast2.IntegerFromLiteral(concurInt) + val := ast.IntegerFromLiteral(concurInt) opts.Concurrency = &val return nil @@ -375,7 +374,7 @@ func extractRetryOption(opts *Options, objExpr *ast.ObjectExpression) error { if !ok { return errParseTaskOptionField(optRetry) } - val := ast2.IntegerFromLiteral(retryInt) + val := ast.IntegerFromLiteral(retryInt) opts.Retry = &val return nil diff --git a/ui/cypress/e2e/tasks.test.ts b/ui/cypress/e2e/tasks.test.ts index 443b58e6cc..e40384d4ef 100644 --- a/ui/cypress/e2e/tasks.test.ts +++ b/ui/cypress/e2e/tasks.test.ts @@ -39,7 +39,7 @@ from(bucket: "${name}"{rightarrow} cy.getByTestID('notification-error').should( 'contain', - 'error calling function "to": missing required keyword argument "bucketID"' + 'error calling function "to" @12:8-12:26: missing required keyword argument "bucketID"' ) }) From 025319c3875a73ede0345687e256ebc3249318ab Mon Sep 17 00:00:00 2001 From: "Jonathan A. Sternberg" Date: Thu, 17 Sep 2020 14:28:24 -0500 Subject: [PATCH 29/34] fix(services/storage): multi measurement queries return all applicable series (#19566) This fixes multi measurement queries that go through the storage service to correctly pick up all series that apply with the filter. Previously, negative queries such as `!=`, `!~`, and predicates attempting to match empty tags did not work correctly with the storage service when multiple measurements or `OR` conditions were included. This was because these predicates would be categorized as "multiple measurements" and then it would attempt to use the field keys iterator to find the fields for each measurement. The meta queries for these did not correctly account for negative equality operators or empty tags when finding appropriate measurements and those could not be changed because it would cause a breaking change to influxql too. This modifies the storage service to use new methods that correctly account for the above situations rather than the field keys iterator. Some queries that appeared to be single measurement queries also get considered as multiple measurement queries. Any query with an `OR` condition will be considered a multiple measurement query. This bug did not apply to single measurement queries where one measurement was selected and all of the logical operators were `AND` values. This is because it used a different code path that correctly handled these situations. --- tsdb/index.go | 327 ++++++++++++++++++++++++++- tsdb/index_test.go | 90 ++++++++ tsdb/shard.go | 44 +++- v1/services/storage/series_cursor.go | 85 ++----- 4 files changed, 468 insertions(+), 78 deletions(-) diff --git a/tsdb/index.go b/tsdb/index.go index be020b2ca3..9a64fe6659 100644 --- a/tsdb/index.go +++ b/tsdb/index.go @@ -1326,7 +1326,7 @@ func (is IndexSet) MeasurementNamesByExpr(auth query.Authorizer, expr influxql.E // Determine if there exists at least one authorised series for the // measurement name. - if is.measurementAuthorizedSeries(auth, e) { + if is.measurementAuthorizedSeries(auth, e, nil) { names = append(names, e) } } @@ -1430,7 +1430,7 @@ func (is IndexSet) measurementNamesByNameFilter(auth query.Authorizer, op influx matched = !regex.Match(e) } - if matched && is.measurementAuthorizedSeries(auth, e) { + if matched && is.measurementAuthorizedSeries(auth, e, nil) { names = append(names, e) } } @@ -1438,6 +1438,116 @@ func (is IndexSet) measurementNamesByNameFilter(auth query.Authorizer, op influx return names, nil } +// MeasurementNamesByPredicate returns a slice of measurement names matching the +// provided condition. If no condition is provided then all names are returned. +// This behaves differently from MeasurementNamesByExpr because it will +// return measurements using flux predicates. +func (is IndexSet) MeasurementNamesByPredicate(auth query.Authorizer, expr influxql.Expr) ([][]byte, error) { + release := is.SeriesFile.Retain() + defer release() + + // Return filtered list if expression exists. + if expr != nil { + names, err := is.measurementNamesByPredicate(auth, expr) + if err != nil { + return nil, err + } + return slices.CopyChunkedByteSlices(names, 1000), nil + } + + itr, err := is.measurementIterator() + if err != nil { + return nil, err + } else if itr == nil { + return nil, nil + } + defer itr.Close() + + // Iterate over all measurements if no condition exists. + var names [][]byte + for { + e, err := itr.Next() + if err != nil { + return nil, err + } else if e == nil { + break + } + + // Determine if there exists at least one authorised series for the + // measurement name. + if is.measurementAuthorizedSeries(auth, e, nil) { + names = append(names, e) + } + } + return slices.CopyChunkedByteSlices(names, 1000), nil +} + +func (is IndexSet) measurementNamesByPredicate(auth query.Authorizer, expr influxql.Expr) ([][]byte, error) { + if expr == nil { + return nil, nil + } + + switch e := expr.(type) { + case *influxql.BinaryExpr: + switch e.Op { + case influxql.EQ, influxql.NEQ, influxql.EQREGEX, influxql.NEQREGEX: + tag, ok := e.LHS.(*influxql.VarRef) + if !ok { + return nil, fmt.Errorf("left side of '%s' must be a tag key", e.Op.String()) + } + + // Retrieve value or regex expression from RHS. + var value string + var regex *regexp.Regexp + if influxql.IsRegexOp(e.Op) { + re, ok := e.RHS.(*influxql.RegexLiteral) + if !ok { + return nil, fmt.Errorf("right side of '%s' must be a regular expression", e.Op.String()) + } + regex = re.Val + } else { + s, ok := e.RHS.(*influxql.StringLiteral) + if !ok { + return nil, fmt.Errorf("right side of '%s' must be a tag value string", e.Op.String()) + } + value = s.Val + } + + // Match on name, if specified. + if tag.Val == "_name" { + return is.measurementNamesByNameFilter(auth, e.Op, value, regex) + } else if influxql.IsSystemName(tag.Val) { + return nil, nil + } + return is.measurementNamesByTagPredicate(auth, e.Op, tag.Val, value, regex) + + case influxql.OR, influxql.AND: + lhs, err := is.measurementNamesByPredicate(auth, e.LHS) + if err != nil { + return nil, err + } + + rhs, err := is.measurementNamesByPredicate(auth, e.RHS) + if err != nil { + return nil, err + } + + if e.Op == influxql.OR { + return bytesutil.Union(lhs, rhs), nil + } + return bytesutil.Intersect(lhs, rhs), nil + + default: + return nil, fmt.Errorf("invalid tag comparison operator") + } + + case *influxql.ParenExpr: + return is.measurementNamesByPredicate(auth, e.Expr) + default: + return nil, fmt.Errorf("%#v", expr) + } +} + func (is IndexSet) measurementNamesByTagFilter(auth query.Authorizer, op influxql.Token, key, val string, regex *regexp.Regexp) ([][]byte, error) { var names [][]byte @@ -1546,7 +1656,7 @@ func (is IndexSet) measurementNamesByTagFilter(auth query.Authorizer, op influxq // an authorized series belonging to the measurement must be located. // Then, the measurement can be added iff !tagMatch && authorized. if (op == influxql.NEQ || op == influxql.NEQREGEX) && !tagMatch { - authorized = is.measurementAuthorizedSeries(auth, me) + authorized = is.measurementAuthorizedSeries(auth, me, nil) } // tags match | operation is EQ | measurement matches @@ -1565,13 +1675,80 @@ func (is IndexSet) measurementNamesByTagFilter(auth query.Authorizer, op influxq return names, nil } +func (is IndexSet) measurementNamesByTagPredicate(auth query.Authorizer, op influxql.Token, key, val string, regex *regexp.Regexp) ([][]byte, error) { + var names [][]byte + + mitr, err := is.measurementIterator() + if err != nil { + return nil, err + } else if mitr == nil { + return nil, nil + } + defer mitr.Close() + + var checkMeasurement func(auth query.Authorizer, me []byte) (bool, error) + switch op { + case influxql.EQ: + checkMeasurement = func(auth query.Authorizer, me []byte) (bool, error) { + return is.measurementHasTagValue(auth, me, []byte(key), []byte(val)) + } + case influxql.NEQ: + checkMeasurement = func(auth query.Authorizer, me []byte) (bool, error) { + // If there is an authorized series in this measurement and that series + // does not contain the tag key/value. + ok := is.measurementAuthorizedSeries(auth, me, func(tags models.Tags) bool { + return tags.GetString(key) == val + }) + return ok, nil + } + case influxql.EQREGEX: + checkMeasurement = func(auth query.Authorizer, me []byte) (bool, error) { + return is.measurementHasTagValueRegex(auth, me, []byte(key), regex) + } + case influxql.NEQREGEX: + checkMeasurement = func(auth query.Authorizer, me []byte) (bool, error) { + // If there is an authorized series in this measurement and that series + // does not contain the tag key/value. + ok := is.measurementAuthorizedSeries(auth, me, func(tags models.Tags) bool { + return regex.MatchString(tags.GetString(key)) + }) + return ok, nil + } + default: + return nil, fmt.Errorf("unsupported operand: %s", op) + } + + for { + me, err := mitr.Next() + if err != nil { + return nil, err + } else if me == nil { + break + } + + ok, err := checkMeasurement(auth, me) + if err != nil { + return nil, err + } else if ok { + names = append(names, me) + } + } + + bytesutil.Sort(names) + return names, nil +} + // measurementAuthorizedSeries determines if the measurement contains a series // that is authorized to be read. -func (is IndexSet) measurementAuthorizedSeries(auth query.Authorizer, name []byte) bool { - if query.AuthorizerIsOpen(auth) { +func (is IndexSet) measurementAuthorizedSeries(auth query.Authorizer, name []byte, exclude func(tags models.Tags) bool) bool { + if query.AuthorizerIsOpen(auth) && exclude == nil { return true } + if auth == nil { + auth = query.OpenAuthorizer + } + sitr, err := is.measurementSeriesIDIterator(name) if err != nil || sitr == nil { return false @@ -1591,11 +1768,151 @@ func (is IndexSet) measurementAuthorizedSeries(auth query.Authorizer, name []byt name, tags := is.SeriesFile.Series(series.SeriesID) if auth.AuthorizeSeriesRead(is.Database(), name, tags) { + if exclude != nil && exclude(tags) { + continue + } return true } } } +func (is IndexSet) measurementHasTagValue(auth query.Authorizer, me, key, value []byte) (bool, error) { + if len(value) == 0 { + return is.measurementHasEmptyTagValue(auth, me, key) + } + + hasTagValue, err := is.HasTagValue(me, key, value) + if err != nil || !hasTagValue { + return false, err + } + + // If the authorizer is open, return true. + if query.AuthorizerIsOpen(auth) { + return true, nil + } + + // When an authorizer is present, the measurement should be + // included only if one of it's series is authorized. + sitr, err := is.tagValueSeriesIDIterator(me, key, value) + if err != nil || sitr == nil { + return false, err + } + defer sitr.Close() + sitr = FilterUndeletedSeriesIDIterator(is.SeriesFile, sitr) + + // Locate a series with this matching tag value that's authorized. + for { + se, err := sitr.Next() + if err != nil || se.SeriesID == 0 { + return false, err + } + + name, tags := is.SeriesFile.Series(se.SeriesID) + if auth.AuthorizeSeriesRead(is.Database(), name, tags) { + return true, nil + } + } +} + +func (is IndexSet) measurementHasEmptyTagValue(auth query.Authorizer, me, key []byte) (bool, error) { + // Any series that does not have a tag key + // has an empty tag value for that key. + // Iterate through all of the series to find one + // series that does not have the tag key. + sitr, err := is.measurementSeriesIDIterator(me) + if err != nil || sitr == nil { + return false, err + } + defer sitr.Close() + sitr = FilterUndeletedSeriesIDIterator(is.SeriesFile, sitr) + + for { + series, err := sitr.Next() + if err != nil || series.SeriesID == 0 { + return false, err + } + + name, tags := is.SeriesFile.Series(series.SeriesID) + if len(tags.Get(key)) > 0 { + // The tag key exists in this series. We need + // at least one series that does not have the tag + // keys. + continue + } + + // Verify that we can see this series. + if query.AuthorizerIsOpen(auth) { + return true, nil + } else if auth.AuthorizeSeriesRead(is.Database(), name, tags) { + return true, nil + } + } +} + +func (is IndexSet) measurementHasTagValueRegex(auth query.Authorizer, me, key []byte, value *regexp.Regexp) (bool, error) { + // If the regex matches the empty string, do a special check to see + // if we have an empty tag value. + if matchEmpty := value.MatchString(""); matchEmpty { + if ok, err := is.measurementHasEmptyTagValue(auth, me, key); err != nil { + return false, err + } else if ok { + return true, nil + } + } + + // Iterate over the tag values and find one that matches the value. + vitr, err := is.tagValueIterator(me, key) + if err != nil || vitr == nil { + return false, err + } + defer vitr.Close() + + for { + ve, err := vitr.Next() + if err != nil || ve == nil { + return false, err + } + + if !value.Match(ve) { + // The regex does not match this tag value. + continue + } + + // If the authorizer is open, then we have found a suitable tag value. + if query.AuthorizerIsOpen(auth) { + return true, nil + } + + // When an authorizer is present, the measurement should only be included + // if one of the series is authorized. + if authorized, err := func() (bool, error) { + sitr, err := is.tagValueSeriesIDIterator(me, key, ve) + if err != nil || sitr == nil { + return false, err + } + defer sitr.Close() + sitr = FilterUndeletedSeriesIDIterator(is.SeriesFile, sitr) + + // Locate an authorized series. + for { + se, err := sitr.Next() + if err != nil || se.SeriesID == 0 { + return false, err + } + + name, tags := is.SeriesFile.Series(se.SeriesID) + if auth.AuthorizeSeriesRead(is.Database(), name, tags) { + return true, nil + } + } + }(); err != nil { + return false, err + } else if authorized { + return true, nil + } + } +} + // HasTagKey returns true if the tag key exists in any index for the provided // measurement. func (is IndexSet) HasTagKey(name, key []byte) (bool, error) { diff --git a/tsdb/index_test.go b/tsdb/index_test.go index 53678137cd..10071b2bca 100644 --- a/tsdb/index_test.go +++ b/tsdb/index_test.go @@ -137,6 +137,96 @@ func TestIndexSet_MeasurementNamesByExpr(t *testing.T) { } } +func TestIndexSet_MeasurementNamesByPredicate(t *testing.T) { + // Setup indexes + indexes := map[string]*Index{} + for _, name := range tsdb.RegisteredIndexes() { + idx := MustOpenNewIndex(name) + idx.AddSeries("cpu", map[string]string{"region": "east"}) + idx.AddSeries("cpu", map[string]string{"region": "west", "secret": "foo"}) + idx.AddSeries("disk", map[string]string{"secret": "foo"}) + idx.AddSeries("mem", map[string]string{"region": "west"}) + idx.AddSeries("gpu", map[string]string{"region": "east"}) + idx.AddSeries("pci", map[string]string{"region": "east", "secret": "foo"}) + indexes[name] = idx + defer idx.Close() + } + + authorizer := &internal.AuthorizerMock{ + AuthorizeSeriesReadFn: func(database string, measurement []byte, tags models.Tags) bool { + if tags.GetString("secret") != "" { + t.Logf("Rejecting series db=%s, m=%s, tags=%v", database, measurement, tags) + return false + } + return true + }, + } + + type example struct { + name string + expr influxql.Expr + expected [][]byte + } + + // These examples should be run without any auth. + examples := []example{ + {name: "all", expected: slices.StringsToBytes("cpu", "disk", "gpu", "mem", "pci")}, + {name: "EQ", expr: influxql.MustParseExpr(`region = 'west'`), expected: slices.StringsToBytes("cpu", "mem")}, + {name: "NEQ", expr: influxql.MustParseExpr(`region != 'west'`), expected: slices.StringsToBytes("cpu", "disk", "gpu", "pci")}, + {name: "EQREGEX", expr: influxql.MustParseExpr(`region =~ /.*st/`), expected: slices.StringsToBytes("cpu", "gpu", "mem", "pci")}, + {name: "NEQREGEX", expr: influxql.MustParseExpr(`region !~ /.*est/`), expected: slices.StringsToBytes("cpu", "disk", "gpu", "pci")}, + // None of the series have this tag so all should be selected. + {name: "EQ empty", expr: influxql.MustParseExpr(`host = ''`), expected: slices.StringsToBytes("cpu", "disk", "gpu", "mem", "pci")}, + // Measurements that have this tag at all should be returned. + {name: "NEQ empty", expr: influxql.MustParseExpr(`region != ''`), expected: slices.StringsToBytes("cpu", "gpu", "mem", "pci")}, + {name: "EQREGEX empty", expr: influxql.MustParseExpr(`host =~ /.*/`), expected: slices.StringsToBytes("cpu", "disk", "gpu", "mem", "pci")}, + {name: "NEQ empty", expr: influxql.MustParseExpr(`region !~ /.*/`), expected: slices.StringsToBytes()}, + } + + // These examples should be run with the authorizer. + authExamples := []example{ + {name: "all", expected: slices.StringsToBytes("cpu", "gpu", "mem")}, + {name: "EQ", expr: influxql.MustParseExpr(`region = 'west'`), expected: slices.StringsToBytes("mem")}, + {name: "NEQ", expr: influxql.MustParseExpr(`region != 'west'`), expected: slices.StringsToBytes("cpu", "gpu")}, + {name: "EQREGEX", expr: influxql.MustParseExpr(`region =~ /.*st/`), expected: slices.StringsToBytes("cpu", "gpu", "mem")}, + {name: "NEQREGEX", expr: influxql.MustParseExpr(`region !~ /.*est/`), expected: slices.StringsToBytes("cpu", "gpu")}, + {name: "EQ empty", expr: influxql.MustParseExpr(`host = ''`), expected: slices.StringsToBytes("cpu", "gpu", "mem")}, + {name: "NEQ empty", expr: influxql.MustParseExpr(`region != ''`), expected: slices.StringsToBytes("cpu", "gpu", "mem")}, + {name: "EQREGEX empty", expr: influxql.MustParseExpr(`host =~ /.*/`), expected: slices.StringsToBytes("cpu", "gpu", "mem")}, + {name: "NEQ empty", expr: influxql.MustParseExpr(`region !~ /.*/`), expected: slices.StringsToBytes()}, + } + + for _, idx := range tsdb.RegisteredIndexes() { + t.Run(idx, func(t *testing.T) { + t.Run("no authorization", func(t *testing.T) { + for _, example := range examples { + t.Run(example.name, func(t *testing.T) { + names, err := indexes[idx].IndexSet().MeasurementNamesByPredicate(nil, example.expr) + if err != nil { + t.Fatal(err) + } else if !reflect.DeepEqual(names, example.expected) { + t.Fatalf("got names: %v, expected %v", slices.BytesToStrings(names), slices.BytesToStrings(example.expected)) + } + }) + } + }) + + t.Run("with authorization", func(t *testing.T) { + for _, example := range authExamples { + t.Run(example.name, func(t *testing.T) { + names, err := indexes[idx].IndexSet().MeasurementNamesByPredicate(authorizer, example.expr) + if err != nil { + t.Fatal(err) + } else if !reflect.DeepEqual(names, example.expected) { + t.Fatalf("got names: %v, expected %v", slices.BytesToStrings(names), slices.BytesToStrings(example.expected)) + } + }) + } + }) + }) + } +} + func TestIndexSet_DedupeInmemIndexes(t *testing.T) { testCases := []struct { tsiN int // Quantity of TSI indexes diff --git a/tsdb/shard.go b/tsdb/shard.go index 4457cac57b..b958f0ccf9 100644 --- a/tsdb/shard.go +++ b/tsdb/shard.go @@ -799,9 +799,17 @@ func (s *Shard) MeasurementTagKeyValuesByExpr(auth query.Authorizer, name []byte return indexSet.MeasurementTagKeyValuesByExpr(auth, name, key, expr, keysSorted) } +// MeasurementNamesByPredicate returns fields for a measurement filtered by an expression. +func (s *Shard) MeasurementNamesByPredicate(expr influxql.Expr) ([][]byte, error) { + index, err := s.Index() + if err != nil { + return nil, err + } + indexSet := IndexSet{Indexes: []Index{index}, SeriesFile: s.sfile} + return indexSet.MeasurementNamesByPredicate(query.OpenAuthorizer, expr) +} + // MeasurementFields returns fields for a measurement. -// TODO(edd): This method is currently only being called from tests; do we -// really need it? func (s *Shard) MeasurementFields(name []byte) *MeasurementFields { engine, err := s.Engine() if err != nil { @@ -1256,6 +1264,38 @@ func (a Shards) FieldKeysByMeasurement(name []byte) []string { return slices.MergeSortedStrings(all...) } +// MeasurementNamesByPredicate returns the measurements that match the given predicate. +func (a Shards) MeasurementNamesByPredicate(expr influxql.Expr) ([][]byte, error) { + if len(a) == 1 { + return a[0].MeasurementNamesByPredicate(expr) + } + + all := make([][][]byte, len(a)) + for i, shard := range a { + names, err := shard.MeasurementNamesByPredicate(expr) + if err != nil { + return nil, err + } + all[i] = names + } + return slices.MergeSortedBytes(all...), nil +} + +// FieldKeysByPredicate returns the field keys for series that match +// the given predicate. +func (a Shards) FieldKeysByPredicate(expr influxql.Expr) (map[string][]string, error) { + names, err := a.MeasurementNamesByPredicate(expr) + if err != nil { + return nil, err + } + + all := make(map[string][]string, len(names)) + for _, name := range names { + all[string(name)] = a.FieldKeysByMeasurement(name) + } + return all, nil +} + func (a Shards) FieldDimensions(measurements []string) (fields map[string]influxql.DataType, dimensions map[string]struct{}, err error) { fields = make(map[string]influxql.DataType) dimensions = make(map[string]struct{}) diff --git a/v1/services/storage/series_cursor.go b/v1/services/storage/series_cursor.go index 17e0c226e6..9bc0e4aaa9 100644 --- a/v1/services/storage/series_cursor.go +++ b/v1/services/storage/series_cursor.go @@ -2,8 +2,6 @@ package storage import ( "context" - "errors" - "sort" "github.com/influxdata/influxdb/v2/influxql/query" "github.com/influxdata/influxdb/v2/models" @@ -112,22 +110,21 @@ func newIndexSeriesCursor(ctx context.Context, predicate *datatypes.Predicate, s return p, nil } - var ( - itr query.Iterator - fi query.FloatIterator - ) - if itr, err = sg.CreateIterator(ctx, &influxql.Measurement{SystemIterator: "_fieldKeys"}, opt); itr != nil && err == nil { - if fi, err = toFloatIterator(itr); err != nil { - goto CLEANUP - } - - p.fields = extractFields(fi) - fi.Close() - if len(p.fields) == 0 { - goto CLEANUP - } - return p, nil + var mfkeys map[string][]string + mfkeys, err = sg.FieldKeysByPredicate(opt.Condition) + if err != nil { + goto CLEANUP } + + p.fields = make(map[string][]field, len(mfkeys)) + for name, fkeys := range mfkeys { + fields := make([]field, 0, len(fkeys)) + for _, key := range fkeys { + fields = append(fields, field{n: key, nb: []byte(key)}) + } + p.fields[name] = fields + } + return p, nil } CLEANUP: @@ -230,57 +227,3 @@ type field struct { n string nb []byte } - -func extractFields(itr query.FloatIterator) measurementFields { - mf := make(measurementFields) - - for { - p, err := itr.Next() - if err != nil { - return nil - } else if p == nil { - break - } - - // Aux is populated by `fieldKeysIterator#Next` - fields := append(mf[p.Name], field{ - n: p.Aux[0].(string), - }) - - mf[p.Name] = fields - } - - if len(mf) == 0 { - return nil - } - - for k, fields := range mf { - sort.Slice(fields, func(i, j int) bool { - return fields[i].n < fields[j].n - }) - - // deduplicate - i := 1 - fields[0].nb = []byte(fields[0].n) - for j := 1; j < len(fields); j++ { - if fields[j].n != fields[j-1].n { - fields[i] = fields[j] - fields[i].nb = []byte(fields[i].n) - i++ - } - } - - mf[k] = fields[:i] - } - - return mf -} - -func toFloatIterator(iter query.Iterator) (query.FloatIterator, error) { - sitr, ok := iter.(query.FloatIterator) - if !ok { - return nil, errors.New("expected FloatIterator") - } - - return sitr, nil -} From 46db75d9a9c43bdb7e49f50c4ea56b1565f5108f Mon Sep 17 00:00:00 2001 From: Stuart Carnie Date: Thu, 17 Sep 2020 17:16:21 -0700 Subject: [PATCH 30/34] fix: ShardGroupDuration is updated for retention period updates Fixes #19518 --- cmd/influxd/launcher/storage_test.go | 18 ++++++++++++++++++ storage/engine.go | 10 +++++++--- 2 files changed, 25 insertions(+), 3 deletions(-) diff --git a/cmd/influxd/launcher/storage_test.go b/cmd/influxd/launcher/storage_test.go index 9e3808e098..8f8bf3d3b5 100644 --- a/cmd/influxd/launcher/storage_test.go +++ b/cmd/influxd/launcher/storage_test.go @@ -5,11 +5,14 @@ import ( "io/ioutil" nethttp "net/http" "testing" + "time" "github.com/google/go-cmp/cmp" "github.com/influxdata/influxdb/v2" "github.com/influxdata/influxdb/v2/cmd/influxd/launcher" "github.com/influxdata/influxdb/v2/http" + "github.com/influxdata/influxdb/v2/pkg/testing/assert" + "github.com/stretchr/testify/require" ) func TestStorage_WriteAndQuery(t *testing.T) { @@ -152,3 +155,18 @@ func TestLauncher_BucketDelete(t *testing.T) { t.Fatalf("after bucket delete got %d, exp %d", got, exp) } } + +func TestLauncher_UpdateRetentionPolicy(t *testing.T) { + l := launcher.RunTestLauncherOrFail(t, ctx, nil) + l.SetupOrFail(t) + defer l.ShutdownOrFail(t, ctx) + + bucket, err := l.BucketService(t).FindBucket(ctx, influxdb.BucketFilter{ID: &l.Bucket.ID}) + require.NoError(t, err) + require.NotNil(t, bucket) + + newRetentionPeriod := 1 * time.Hour + bucket, err = l.BucketService(t).UpdateBucket(ctx, bucket.ID, influxdb.BucketUpdate{RetentionPeriod: &newRetentionPeriod}) + require.NoError(t, err) + assert.Equal(t, bucket.RetentionPeriod, newRetentionPeriod) +} diff --git a/storage/engine.go b/storage/engine.go index 2c1d3f465b..91bf7e41be 100644 --- a/storage/engine.go +++ b/storage/engine.go @@ -238,7 +238,6 @@ func (e *Engine) WritePoints(ctx context.Context, orgID influxdb.ID, bucketID in defer span.Finish() //TODO - remember to add back unicode validation... - //TODO - remember to check that there is a _field key / \xff key added. e.mu.RLock() defer e.mu.RUnlock() @@ -266,13 +265,18 @@ func (e *Engine) CreateBucket(ctx context.Context, b *influxdb.Bucket) (err erro return nil } -func (e *Engine) UpdateBucketRetentionPeriod(ctx context.Context, bucketID influxdb.ID, d time.Duration) (err error) { +func (e *Engine) UpdateBucketRetentionPeriod(ctx context.Context, bucketID influxdb.ID, d time.Duration) error { span, _ := tracing.StartSpanFromContext(ctx) defer span.Finish() + // A value of zero ensures the ShardGroupDuration is adjusted to an appropriate value based on the specified + // duration + zero := time.Duration(0) rpu := meta.RetentionPolicyUpdate{ - Duration: &d, + Duration: &d, + ShardGroupDuration: &zero, } + return e.metaClient.UpdateRetentionPolicy(bucketID.String(), meta.DefaultRetentionPolicyName, &rpu, true) } From f144d8d6140259df87023c5d59008336afb7ee5e Mon Sep 17 00:00:00 2001 From: Jacob Marble Date: Mon, 21 Sep 2020 09:32:05 -0700 Subject: [PATCH 31/34] chore(storage): remove storage-team from CODEOWNERS (#19574) The cloud 2 storage team doesn't "own" OSS code any more. We are still happy to review storage PRs, but we don't need to review all of them. --- .github/CODEOWNERS | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 10701ea33b..d11a55289e 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -12,5 +12,5 @@ http/swagger.yml @influxdata/monitoring-team /pkger/ @influxdata/tools-team # Storage code -/storage/ @influxdata/storage-team -/tsdb/ @influxdata/storage-team +#/storage/ @influxdata/storage-team +#/tsdb/ @influxdata/storage-team From fbe56d7e235dfe3381196573e2cbe49fb5d63051 Mon Sep 17 00:00:00 2001 From: Timmy Luong Date: Mon, 21 Sep 2020 11:02:51 -0700 Subject: [PATCH 32/34] feat: add legendOrientationThreshold (#19584) * feat: add legendOrientationThreshold * feat: add legendOpacity * chore: fix float definition in swagger and use single-value context in Resource float64 --- dashboard.go | 262 +++++++------ dashboard_test.go | 8 +- http/dashboard_test.go | 42 ++- http/swagger.yml | 45 +++ pkger/clone_resource.go | 37 +- pkger/parser.go | 54 +-- pkger/parser_models.go | 350 +++++++++--------- pkger/parser_test.go | 14 + pkger/service_test.go | 264 +++++++------ pkger/testdata/dashboard_band.yml | 2 + pkger/testdata/dashboard_heatmap.json | 64 +++- pkger/testdata/dashboard_heatmap.yml | 2 + pkger/testdata/dashboard_histogram.json | 16 +- pkger/testdata/dashboard_histogram.yml | 2 + pkger/testdata/dashboard_mosaic.yml | 2 + pkger/testdata/dashboard_scatter.json | 16 +- pkger/testdata/dashboard_scatter.yml | 2 + .../dashboard_single_stat_plus_line.json | 4 +- .../dashboard_single_stat_plus_line.yml | 2 + pkger/testdata/dashboard_xy.json | 5 +- pkger/testdata/dashboard_xy.yml | 3 +- 21 files changed, 706 insertions(+), 490 deletions(-) diff --git a/dashboard.go b/dashboard.go index e92505d482..63308fac25 100644 --- a/dashboard.go +++ b/dashboard.go @@ -706,160 +706,178 @@ func (u ViewUpdate) MarshalJSON() ([]byte, error) { // LinePlusSingleStatProperties represents options for line plus single stat view in Chronograf type LinePlusSingleStatProperties struct { - Queries []DashboardQuery `json:"queries"` - Axes map[string]Axis `json:"axes"` - Type string `json:"type"` - Legend Legend `json:"legend"` - ViewColors []ViewColor `json:"colors"` - Prefix string `json:"prefix"` - Suffix string `json:"suffix"` - DecimalPlaces DecimalPlaces `json:"decimalPlaces"` - Note string `json:"note"` - ShowNoteWhenEmpty bool `json:"showNoteWhenEmpty"` - XColumn string `json:"xColumn"` - YColumn string `json:"yColumn"` - ShadeBelow bool `json:"shadeBelow"` - Position string `json:"position"` - TimeFormat string `json:"timeFormat"` - HoverDimension string `json:"hoverDimension"` + Queries []DashboardQuery `json:"queries"` + Axes map[string]Axis `json:"axes"` + Type string `json:"type"` + Legend Legend `json:"legend"` + ViewColors []ViewColor `json:"colors"` + Prefix string `json:"prefix"` + Suffix string `json:"suffix"` + DecimalPlaces DecimalPlaces `json:"decimalPlaces"` + Note string `json:"note"` + ShowNoteWhenEmpty bool `json:"showNoteWhenEmpty"` + XColumn string `json:"xColumn"` + YColumn string `json:"yColumn"` + ShadeBelow bool `json:"shadeBelow"` + Position string `json:"position"` + TimeFormat string `json:"timeFormat"` + HoverDimension string `json:"hoverDimension"` + LegendOpacity float64 `json:"legendOpacity"` + LegendOrientationThreshold int `json:"legendOrientationThreshold"` } // XYViewProperties represents options for line, bar, step, or stacked view in Chronograf type XYViewProperties struct { - Queries []DashboardQuery `json:"queries"` - Axes map[string]Axis `json:"axes"` - Type string `json:"type"` - Legend Legend `json:"legend"` - Geom string `json:"geom"` // Either "line", "step", "stacked", or "bar" - ViewColors []ViewColor `json:"colors"` - Note string `json:"note"` - ShowNoteWhenEmpty bool `json:"showNoteWhenEmpty"` - XColumn string `json:"xColumn"` - YColumn string `json:"yColumn"` - ShadeBelow bool `json:"shadeBelow"` - Position string `json:"position"` - TimeFormat string `json:"timeFormat"` - HoverDimension string `json:"hoverDimension"` + Queries []DashboardQuery `json:"queries"` + Axes map[string]Axis `json:"axes"` + Type string `json:"type"` + Legend Legend `json:"legend"` + Geom string `json:"geom"` // Either "line", "step", "stacked", or "bar" + ViewColors []ViewColor `json:"colors"` + Note string `json:"note"` + ShowNoteWhenEmpty bool `json:"showNoteWhenEmpty"` + XColumn string `json:"xColumn"` + YColumn string `json:"yColumn"` + ShadeBelow bool `json:"shadeBelow"` + Position string `json:"position"` + TimeFormat string `json:"timeFormat"` + HoverDimension string `json:"hoverDimension"` + LegendOpacity float64 `json:"legendOpacity"` + LegendOrientationThreshold int `json:"legendOrientationThreshold"` } // BandViewProperties represents options for the band view type BandViewProperties struct { - Queries []DashboardQuery `json:"queries"` - Axes map[string]Axis `json:"axes"` - Type string `json:"type"` - Legend Legend `json:"legend"` - Geom string `json:"geom"` - ViewColors []ViewColor `json:"colors"` - Note string `json:"note"` - ShowNoteWhenEmpty bool `json:"showNoteWhenEmpty"` - TimeFormat string `json:"timeFormat"` - HoverDimension string `json:"hoverDimension"` - XColumn string `json:"xColumn"` - YColumn string `json:"yColumn"` - UpperColumn string `json:"upperColumn"` - MainColumn string `json:"mainColumn"` - LowerColumn string `json:"lowerColumn"` + Queries []DashboardQuery `json:"queries"` + Axes map[string]Axis `json:"axes"` + Type string `json:"type"` + Legend Legend `json:"legend"` + Geom string `json:"geom"` + ViewColors []ViewColor `json:"colors"` + Note string `json:"note"` + ShowNoteWhenEmpty bool `json:"showNoteWhenEmpty"` + TimeFormat string `json:"timeFormat"` + HoverDimension string `json:"hoverDimension"` + XColumn string `json:"xColumn"` + YColumn string `json:"yColumn"` + UpperColumn string `json:"upperColumn"` + MainColumn string `json:"mainColumn"` + LowerColumn string `json:"lowerColumn"` + LegendOpacity float64 `json:"legendOpacity"` + LegendOrientationThreshold int `json:"legendOrientationThreshold"` } // CheckViewProperties represents options for a view representing a check type CheckViewProperties struct { - Type string `json:"type"` - CheckID string `json:"checkID"` - Queries []DashboardQuery `json:"queries"` - ViewColors []string `json:"colors"` + Type string `json:"type"` + CheckID string `json:"checkID"` + Queries []DashboardQuery `json:"queries"` + ViewColors []string `json:"colors"` + LegendOpacity float64 `json:"legendOpacity"` + LegendOrientationThreshold int `json:"legendOrientationThreshold"` } // SingleStatViewProperties represents options for single stat view in Chronograf type SingleStatViewProperties struct { - Type string `json:"type"` - Queries []DashboardQuery `json:"queries"` - Prefix string `json:"prefix"` - TickPrefix string `json:"tickPrefix"` - Suffix string `json:"suffix"` - TickSuffix string `json:"tickSuffix"` - ViewColors []ViewColor `json:"colors"` - DecimalPlaces DecimalPlaces `json:"decimalPlaces"` - Note string `json:"note"` - ShowNoteWhenEmpty bool `json:"showNoteWhenEmpty"` + Type string `json:"type"` + Queries []DashboardQuery `json:"queries"` + Prefix string `json:"prefix"` + TickPrefix string `json:"tickPrefix"` + Suffix string `json:"suffix"` + TickSuffix string `json:"tickSuffix"` + ViewColors []ViewColor `json:"colors"` + DecimalPlaces DecimalPlaces `json:"decimalPlaces"` + Note string `json:"note"` + ShowNoteWhenEmpty bool `json:"showNoteWhenEmpty"` + LegendOpacity float64 `json:"legendOpacity"` + LegendOrientationThreshold int `json:"legendOrientationThreshold"` } // HistogramViewProperties represents options for histogram view in Chronograf type HistogramViewProperties struct { - Type string `json:"type"` - Queries []DashboardQuery `json:"queries"` - ViewColors []ViewColor `json:"colors"` - XColumn string `json:"xColumn"` - FillColumns []string `json:"fillColumns"` - XDomain []float64 `json:"xDomain,omitempty"` - XAxisLabel string `json:"xAxisLabel"` - Position string `json:"position"` - BinCount int `json:"binCount"` - Note string `json:"note"` - ShowNoteWhenEmpty bool `json:"showNoteWhenEmpty"` + Type string `json:"type"` + Queries []DashboardQuery `json:"queries"` + ViewColors []ViewColor `json:"colors"` + XColumn string `json:"xColumn"` + FillColumns []string `json:"fillColumns"` + XDomain []float64 `json:"xDomain,omitempty"` + XAxisLabel string `json:"xAxisLabel"` + Position string `json:"position"` + BinCount int `json:"binCount"` + Note string `json:"note"` + ShowNoteWhenEmpty bool `json:"showNoteWhenEmpty"` + LegendOpacity float64 `json:"legendOpacity"` + LegendOrientationThreshold int `json:"legendOrientationThreshold"` } // HeatmapViewProperties represents options for heatmap view in Chronograf type HeatmapViewProperties struct { - Type string `json:"type"` - Queries []DashboardQuery `json:"queries"` - ViewColors []string `json:"colors"` - BinSize int32 `json:"binSize"` - XColumn string `json:"xColumn"` - YColumn string `json:"yColumn"` - XDomain []float64 `json:"xDomain,omitempty"` - YDomain []float64 `json:"yDomain,omitempty"` - XAxisLabel string `json:"xAxisLabel"` - YAxisLabel string `json:"yAxisLabel"` - XPrefix string `json:"xPrefix"` - XSuffix string `json:"xSuffix"` - YPrefix string `json:"yPrefix"` - YSuffix string `json:"ySuffix"` - Note string `json:"note"` - ShowNoteWhenEmpty bool `json:"showNoteWhenEmpty"` - TimeFormat string `json:"timeFormat"` + Type string `json:"type"` + Queries []DashboardQuery `json:"queries"` + ViewColors []string `json:"colors"` + BinSize int32 `json:"binSize"` + XColumn string `json:"xColumn"` + YColumn string `json:"yColumn"` + XDomain []float64 `json:"xDomain,omitempty"` + YDomain []float64 `json:"yDomain,omitempty"` + XAxisLabel string `json:"xAxisLabel"` + YAxisLabel string `json:"yAxisLabel"` + XPrefix string `json:"xPrefix"` + XSuffix string `json:"xSuffix"` + YPrefix string `json:"yPrefix"` + YSuffix string `json:"ySuffix"` + Note string `json:"note"` + ShowNoteWhenEmpty bool `json:"showNoteWhenEmpty"` + TimeFormat string `json:"timeFormat"` + LegendOpacity float64 `json:"legendOpacity"` + LegendOrientationThreshold int `json:"legendOrientationThreshold"` } // ScatterViewProperties represents options for scatter view in Chronograf type ScatterViewProperties struct { - Type string `json:"type"` - Queries []DashboardQuery `json:"queries"` - ViewColors []string `json:"colors"` - FillColumns []string `json:"fillColumns"` - SymbolColumns []string `json:"symbolColumns"` - XColumn string `json:"xColumn"` - YColumn string `json:"yColumn"` - XDomain []float64 `json:"xDomain,omitempty"` - YDomain []float64 `json:"yDomain,omitempty"` - XAxisLabel string `json:"xAxisLabel"` - YAxisLabel string `json:"yAxisLabel"` - XPrefix string `json:"xPrefix"` - XSuffix string `json:"xSuffix"` - YPrefix string `json:"yPrefix"` - YSuffix string `json:"ySuffix"` - Note string `json:"note"` - ShowNoteWhenEmpty bool `json:"showNoteWhenEmpty"` - TimeFormat string `json:"timeFormat"` + Type string `json:"type"` + Queries []DashboardQuery `json:"queries"` + ViewColors []string `json:"colors"` + FillColumns []string `json:"fillColumns"` + SymbolColumns []string `json:"symbolColumns"` + XColumn string `json:"xColumn"` + YColumn string `json:"yColumn"` + XDomain []float64 `json:"xDomain,omitempty"` + YDomain []float64 `json:"yDomain,omitempty"` + XAxisLabel string `json:"xAxisLabel"` + YAxisLabel string `json:"yAxisLabel"` + XPrefix string `json:"xPrefix"` + XSuffix string `json:"xSuffix"` + YPrefix string `json:"yPrefix"` + YSuffix string `json:"ySuffix"` + Note string `json:"note"` + ShowNoteWhenEmpty bool `json:"showNoteWhenEmpty"` + TimeFormat string `json:"timeFormat"` + LegendOpacity float64 `json:"legendOpacity"` + LegendOrientationThreshold int `json:"legendOrientationThreshold"` } // MosaicViewProperties represents options for mosaic view in Chronograf type MosaicViewProperties struct { - Type string `json:"type"` - Queries []DashboardQuery `json:"queries"` - ViewColors []string `json:"colors"` - FillColumns []string `json:"fillColumns"` - XColumn string `json:"xColumn"` - YSeriesColumns []string `json:"ySeriesColumns"` - XDomain []float64 `json:"xDomain,omitempty"` - YDomain []float64 `json:"yDomain,omitempty"` - XAxisLabel string `json:"xAxisLabel"` - YAxisLabel string `json:"yAxisLabel"` - XPrefix string `json:"xPrefix"` - XSuffix string `json:"xSuffix"` - YPrefix string `json:"yPrefix"` - YSuffix string `json:"ySuffix"` - Note string `json:"note"` - ShowNoteWhenEmpty bool `json:"showNoteWhenEmpty"` - TimeFormat string `json:"timeFormat"` + Type string `json:"type"` + Queries []DashboardQuery `json:"queries"` + ViewColors []string `json:"colors"` + FillColumns []string `json:"fillColumns"` + XColumn string `json:"xColumn"` + YSeriesColumns []string `json:"ySeriesColumns"` + XDomain []float64 `json:"xDomain,omitempty"` + YDomain []float64 `json:"yDomain,omitempty"` + XAxisLabel string `json:"xAxisLabel"` + YAxisLabel string `json:"yAxisLabel"` + XPrefix string `json:"xPrefix"` + XSuffix string `json:"xSuffix"` + YPrefix string `json:"yPrefix"` + YSuffix string `json:"ySuffix"` + Note string `json:"note"` + ShowNoteWhenEmpty bool `json:"showNoteWhenEmpty"` + TimeFormat string `json:"timeFormat"` + LegendOpacity float64 `json:"legendOpacity"` + LegendOrientationThreshold int `json:"legendOrientationThreshold"` } // GaugeViewProperties represents options for gauge view in Chronograf diff --git a/dashboard_test.go b/dashboard_test.go index 7fc978922e..45e748a726 100644 --- a/dashboard_test.go +++ b/dashboard_test.go @@ -51,9 +51,11 @@ func TestView_MarshalJSON(t *testing.T) { "xColumn": "", "yColumn": "", "shadeBelow": false, - "position": "", - "timeFormat": "", - "hoverDimension": "" + "position": "", + "timeFormat": "", + "hoverDimension": "", + "legendOpacity": 0, + "legendOrientationThreshold": 0 } } `, diff --git a/http/dashboard_test.go b/http/dashboard_test.go index d19f3b645d..05ad5a54ed 100644 --- a/http/dashboard_test.go +++ b/http/dashboard_test.go @@ -448,24 +448,26 @@ func TestService_handleGetDashboard(t *testing.T) { "x": 1, "y": 2, "w": 3, - "h": 4, - "name": "the cell name", - "properties": { - "shape": "chronograf-v2", - "axes": null, - "colors": null, - "geom": "", - "legend": {}, - "position": "", - "note": "", - "queries": null, - "shadeBelow": false, - "hoverDimension": "", - "showNoteWhenEmpty": false, - "timeFormat": "", - "type": "xy", - "xColumn": "", - "yColumn": "" + "h": 4, + "name": "the cell name", + "properties": { + "shape": "chronograf-v2", + "axes": null, + "colors": null, + "geom": "", + "legend": {}, + "position": "", + "note": "", + "queries": null, + "shadeBelow": false, + "hoverDimension": "", + "showNoteWhenEmpty": false, + "timeFormat": "", + "type": "xy", + "xColumn": "", + "yColumn": "", + "legendOpacity": 0, + "legendOrientationThreshold": 0 }, "links": { "self": "/api/v2/dashboards/020f755c3c082000/cells/da7aba5e5d81e550", @@ -983,7 +985,9 @@ func TestService_handlePostDashboard(t *testing.T) { "type": "", "xColumn": "", "yColumn": "", - "type": "xy" + "type": "xy", + "legendOpacity": 0, + "legendOrientationThreshold": 0 }, "links": { "self": "/api/v2/dashboards/020f755c3c082000/cells/da7aba5e5d81e550", diff --git a/http/swagger.yml b/http/swagger.yml index 32e0e2da9d..38ae8ca04d 100644 --- a/http/swagger.yml +++ b/http/swagger.yml @@ -8956,6 +8956,11 @@ components: enum: [overlaid, stacked] geom: $ref: "#/components/schemas/XYGeom" + legendOpacity: + type: number + format: float + legendOrientationThreshold: + type: integer XYGeom: type: string enum: [line, step, stacked, bar, monotoneX] @@ -9013,6 +9018,11 @@ components: enum: [auto, x, y, xy] geom: $ref: "#/components/schemas/XYGeom" + legendOpacity: + type: number + format: float + legendOrientationThreshold: + type: integer LinePlusSingleStatProperties: type: object required: @@ -9073,6 +9083,11 @@ components: type: string decimalPlaces: $ref: "#/components/schemas/DecimalPlaces" + legendOpacity: + type: number + format: float + legendOrientationThreshold: + type: integer MosaicViewProperties: type: object required: @@ -9148,6 +9163,11 @@ components: type: string ySuffix: type: string + legendOpacity: + type: number + format: float + legendOrientationThreshold: + type: integer ScatterViewProperties: type: object required: @@ -9226,6 +9246,11 @@ components: type: string ySuffix: type: string + legendOpacity: + type: number + format: float + legendOrientationThreshold: + type: integer HeatmapViewProperties: type: object required: @@ -9297,6 +9322,11 @@ components: type: string binSize: type: number + legendOpacity: + type: number + format: float + legendOrientationThreshold: + type: integer SingleStatViewProperties: type: object required: @@ -9345,6 +9375,11 @@ components: $ref: "#/components/schemas/Legend" decimalPlaces: $ref: "#/components/schemas/DecimalPlaces" + legendOpacity: + type: number + format: float + legendOrientationThreshold: + type: integer HistogramViewProperties: type: object required: @@ -9399,6 +9434,11 @@ components: enum: [overlaid, stacked] binCount: type: integer + legendOpacity: + type: number + format: float + legendOrientationThreshold: + type: integer GaugeViewProperties: type: object required: @@ -9561,6 +9601,11 @@ components: type: array items: $ref: "#/components/schemas/DashboardColor" + legendOpacity: + type: number + format: float + legendOrientationThreshold: + type: integer Axes: description: The viewport for a View's visualizations type: object diff --git a/pkger/clone_resource.go b/pkger/clone_resource.go index 84c1d5e9ac..40d9345454 100644 --- a/pkger/clone_resource.go +++ b/pkger/clone_resource.go @@ -612,6 +612,8 @@ func convertCellView(cell influxdb.Cell) chart { ch.Note = p.Note ch.NoteOnEmpty = p.ShowNoteWhenEmpty ch.BinSize = int(p.BinSize) + ch.LegendOpacity = float64(p.LegendOpacity) + ch.LegendOrientationThreshold = int(p.LegendOrientationThreshold) case influxdb.HistogramViewProperties: ch.Kind = chartKindHistogram ch.Queries = convertQueries(p.Queries) @@ -623,6 +625,8 @@ func convertCellView(cell influxdb.Cell) chart { ch.NoteOnEmpty = p.ShowNoteWhenEmpty ch.BinCount = p.BinCount ch.Position = p.Position + ch.LegendOpacity = float64(p.LegendOpacity) + ch.LegendOrientationThreshold = int(p.LegendOrientationThreshold) case influxdb.MarkdownViewProperties: ch.Kind = chartKindMarkdown ch.Note = p.Note @@ -636,11 +640,15 @@ func convertCellView(cell influxdb.Cell) chart { ch.XCol = p.XColumn ch.YCol = p.YColumn ch.Position = p.Position + ch.LegendOpacity = float64(p.LegendOpacity) + ch.LegendOrientationThreshold = int(p.LegendOrientationThreshold) case influxdb.SingleStatViewProperties: setCommon(chartKindSingleStat, p.ViewColors, p.DecimalPlaces, p.Queries) setNoteFixes(p.Note, p.ShowNoteWhenEmpty, p.Prefix, p.Suffix) ch.TickPrefix = p.TickPrefix ch.TickSuffix = p.TickSuffix + ch.LegendOpacity = float64(p.LegendOpacity) + ch.LegendOrientationThreshold = int(p.LegendOrientationThreshold) case influxdb.MosaicViewProperties: ch.Kind = chartKindMosaic ch.Queries = convertQueries(p.Queries) @@ -653,6 +661,8 @@ func convertCellView(cell influxdb.Cell) chart { } ch.Note = p.Note ch.NoteOnEmpty = p.ShowNoteWhenEmpty + ch.LegendOpacity = float64(p.LegendOpacity) + ch.LegendOrientationThreshold = int(p.LegendOrientationThreshold) case influxdb.ScatterViewProperties: ch.Kind = chartKindScatter ch.Queries = convertQueries(p.Queries) @@ -665,6 +675,8 @@ func convertCellView(cell influxdb.Cell) chart { } ch.Note = p.Note ch.NoteOnEmpty = p.ShowNoteWhenEmpty + ch.LegendOpacity = float64(p.LegendOpacity) + ch.LegendOrientationThreshold = int(p.LegendOrientationThreshold) case influxdb.TableViewProperties: setCommon(chartKindTable, p.ViewColors, p.DecimalPlaces, p.Queries) setNoteFixes(p.Note, p.ShowNoteWhenEmpty, "", "") @@ -694,6 +706,8 @@ func convertCellView(cell influxdb.Cell) chart { ch.UpperColumn = p.UpperColumn ch.MainColumn = p.MainColumn ch.LowerColumn = p.LowerColumn + ch.LegendOpacity = float64(p.LegendOpacity) + ch.LegendOrientationThreshold = int(p.LegendOrientationThreshold) case influxdb.XYViewProperties: setCommon(chartKindXY, p.ViewColors, influxdb.DecimalPlaces{}, p.Queries) setNoteFixes(p.Note, p.ShowNoteWhenEmpty, "", "") @@ -705,6 +719,8 @@ func convertCellView(cell influxdb.Cell) chart { ch.XCol = p.XColumn ch.YCol = p.YColumn ch.Position = p.Position + ch.LegendOpacity = float64(p.LegendOpacity) + ch.LegendOrientationThreshold = int(p.LegendOrientationThreshold) } sort.Slice(ch.Axes, func(i, j int) bool { @@ -808,10 +824,15 @@ func convertChartToResource(ch chart) Resource { }) assignNonZeroInts(r, map[string]int{ - fieldChartXPos: ch.XPos, - fieldChartYPos: ch.YPos, - fieldChartBinCount: ch.BinCount, - fieldChartBinSize: ch.BinSize, + fieldChartXPos: ch.XPos, + fieldChartYPos: ch.YPos, + fieldChartBinCount: ch.BinCount, + fieldChartBinSize: ch.BinSize, + fieldChartLegendOrientationThreshold: ch.LegendOrientationThreshold, + }) + + assignNonZeroFloats(r, map[string]float64{ + fieldChartLegendOpacity: ch.LegendOpacity, }) return r @@ -1123,6 +1144,14 @@ func assignNonZeroInts(r Resource, m map[string]int) { } } +func assignNonZeroFloats(r Resource, m map[string]float64) { + for k, v := range m { + if v != 0 { + r[k] = v + } + } +} + func assignNonZeroStrings(r Resource, m map[string]string) { for k, v := range m { if v != "" { diff --git a/pkger/parser.go b/pkger/parser.go index 0119764a09..924de07c6f 100644 --- a/pkger/parser.go +++ b/pkger/parser.go @@ -1442,32 +1442,34 @@ func (p *Template) parseChart(dashMetaName string, chartIdx int, r Resource) (*c } c := chart{ - Kind: ck, - Name: r.Name(), - BinSize: r.intShort(fieldChartBinSize), - BinCount: r.intShort(fieldChartBinCount), - Geom: r.stringShort(fieldChartGeom), - Height: r.intShort(fieldChartHeight), - Note: r.stringShort(fieldChartNote), - NoteOnEmpty: r.boolShort(fieldChartNoteOnEmpty), - Position: r.stringShort(fieldChartPosition), - Prefix: r.stringShort(fieldPrefix), - Shade: r.boolShort(fieldChartShade), - HoverDimension: r.stringShort(fieldChartHoverDimension), - Suffix: r.stringShort(fieldSuffix), - TickPrefix: r.stringShort(fieldChartTickPrefix), - TickSuffix: r.stringShort(fieldChartTickSuffix), - TimeFormat: r.stringShort(fieldChartTimeFormat), - Width: r.intShort(fieldChartWidth), - XCol: r.stringShort(fieldChartXCol), - YCol: r.stringShort(fieldChartYCol), - XPos: r.intShort(fieldChartXPos), - YPos: r.intShort(fieldChartYPos), - FillColumns: r.slcStr(fieldChartFillColumns), - YSeriesColumns: r.slcStr(fieldChartYSeriesColumns), - UpperColumn: r.stringShort(fieldChartUpperColumn), - MainColumn: r.stringShort(fieldChartMainColumn), - LowerColumn: r.stringShort(fieldChartLowerColumn), + Kind: ck, + Name: r.Name(), + BinSize: r.intShort(fieldChartBinSize), + BinCount: r.intShort(fieldChartBinCount), + Geom: r.stringShort(fieldChartGeom), + Height: r.intShort(fieldChartHeight), + Note: r.stringShort(fieldChartNote), + NoteOnEmpty: r.boolShort(fieldChartNoteOnEmpty), + Position: r.stringShort(fieldChartPosition), + Prefix: r.stringShort(fieldPrefix), + Shade: r.boolShort(fieldChartShade), + HoverDimension: r.stringShort(fieldChartHoverDimension), + Suffix: r.stringShort(fieldSuffix), + TickPrefix: r.stringShort(fieldChartTickPrefix), + TickSuffix: r.stringShort(fieldChartTickSuffix), + TimeFormat: r.stringShort(fieldChartTimeFormat), + Width: r.intShort(fieldChartWidth), + XCol: r.stringShort(fieldChartXCol), + YCol: r.stringShort(fieldChartYCol), + XPos: r.intShort(fieldChartXPos), + YPos: r.intShort(fieldChartYPos), + FillColumns: r.slcStr(fieldChartFillColumns), + YSeriesColumns: r.slcStr(fieldChartYSeriesColumns), + UpperColumn: r.stringShort(fieldChartUpperColumn), + MainColumn: r.stringShort(fieldChartMainColumn), + LowerColumn: r.stringShort(fieldChartLowerColumn), + LegendOpacity: r.float64Short(fieldChartLegendOpacity), + LegendOrientationThreshold: r.intShort(fieldChartLegendOrientationThreshold), } if presLeg, ok := r[fieldChartLegend].(legend); ok { diff --git a/pkger/parser_models.go b/pkger/parser_models.go index dfdb67a262..6c78363cb6 100644 --- a/pkger/parser_models.go +++ b/pkger/parser_models.go @@ -531,70 +531,74 @@ func (d *dashboard) valid() []validationErr { } const ( - fieldChartAxes = "axes" - fieldChartBinCount = "binCount" - fieldChartBinSize = "binSize" - fieldChartColors = "colors" - fieldChartDecimalPlaces = "decimalPlaces" - fieldChartDomain = "domain" - fieldChartFillColumns = "fillColumns" - fieldChartGeom = "geom" - fieldChartHeight = "height" - fieldChartLegend = "legend" - fieldChartNote = "note" - fieldChartNoteOnEmpty = "noteOnEmpty" - fieldChartPosition = "position" - fieldChartQueries = "queries" - fieldChartShade = "shade" - fieldChartHoverDimension = "hoverDimension" - fieldChartFieldOptions = "fieldOptions" - fieldChartTableOptions = "tableOptions" - fieldChartTickPrefix = "tickPrefix" - fieldChartTickSuffix = "tickSuffix" - fieldChartTimeFormat = "timeFormat" - fieldChartYSeriesColumns = "ySeriesColumns" - fieldChartUpperColumn = "upperColumn" - fieldChartMainColumn = "mainColumn" - fieldChartLowerColumn = "lowerColumn" - fieldChartWidth = "width" - fieldChartXCol = "xCol" - fieldChartXPos = "xPos" - fieldChartYCol = "yCol" - fieldChartYPos = "yPos" + fieldChartAxes = "axes" + fieldChartBinCount = "binCount" + fieldChartBinSize = "binSize" + fieldChartColors = "colors" + fieldChartDecimalPlaces = "decimalPlaces" + fieldChartDomain = "domain" + fieldChartFillColumns = "fillColumns" + fieldChartGeom = "geom" + fieldChartHeight = "height" + fieldChartLegend = "legend" + fieldChartNote = "note" + fieldChartNoteOnEmpty = "noteOnEmpty" + fieldChartPosition = "position" + fieldChartQueries = "queries" + fieldChartShade = "shade" + fieldChartHoverDimension = "hoverDimension" + fieldChartFieldOptions = "fieldOptions" + fieldChartTableOptions = "tableOptions" + fieldChartTickPrefix = "tickPrefix" + fieldChartTickSuffix = "tickSuffix" + fieldChartTimeFormat = "timeFormat" + fieldChartYSeriesColumns = "ySeriesColumns" + fieldChartUpperColumn = "upperColumn" + fieldChartMainColumn = "mainColumn" + fieldChartLowerColumn = "lowerColumn" + fieldChartWidth = "width" + fieldChartXCol = "xCol" + fieldChartXPos = "xPos" + fieldChartYCol = "yCol" + fieldChartYPos = "yPos" + fieldChartLegendOpacity = "legendOpacity" + fieldChartLegendOrientationThreshold = "legendOrientationThreshold" ) type chart struct { - Kind chartKind - Name string - Prefix string - TickPrefix string - Suffix string - TickSuffix string - Note string - NoteOnEmpty bool - DecimalPlaces int - EnforceDecimals bool - Shade bool - HoverDimension string - Legend legend - Colors colors - Queries queries - Axes axes - Geom string - YSeriesColumns []string - XCol, YCol string - UpperColumn string - MainColumn string - LowerColumn string - XPos, YPos int - Height, Width int - BinSize int - BinCount int - Position string - FieldOptions []fieldOption - FillColumns []string - TableOptions tableOptions - TimeFormat string + Kind chartKind + Name string + Prefix string + TickPrefix string + Suffix string + TickSuffix string + Note string + NoteOnEmpty bool + DecimalPlaces int + EnforceDecimals bool + Shade bool + HoverDimension string + Legend legend + Colors colors + Queries queries + Axes axes + Geom string + YSeriesColumns []string + XCol, YCol string + UpperColumn string + MainColumn string + LowerColumn string + XPos, YPos int + Height, Width int + BinSize int + BinCount int + Position string + FieldOptions []fieldOption + FillColumns []string + TableOptions tableOptions + TimeFormat string + LegendOpacity float64 + LegendOrientationThreshold int } func (c *chart) properties() influxdb.ViewProperties { @@ -617,37 +621,41 @@ func (c *chart) properties() influxdb.ViewProperties { } case chartKindHeatMap: return influxdb.HeatmapViewProperties{ - Type: influxdb.ViewPropertyTypeHeatMap, - Queries: c.Queries.influxDashQueries(), - ViewColors: c.Colors.strings(), - BinSize: int32(c.BinSize), - XColumn: c.XCol, - YColumn: c.YCol, - XDomain: c.Axes.get("x").Domain, - YDomain: c.Axes.get("y").Domain, - XPrefix: c.Axes.get("x").Prefix, - YPrefix: c.Axes.get("y").Prefix, - XSuffix: c.Axes.get("x").Suffix, - YSuffix: c.Axes.get("y").Suffix, - XAxisLabel: c.Axes.get("x").Label, - YAxisLabel: c.Axes.get("y").Label, - Note: c.Note, - ShowNoteWhenEmpty: c.NoteOnEmpty, - TimeFormat: c.TimeFormat, + Type: influxdb.ViewPropertyTypeHeatMap, + Queries: c.Queries.influxDashQueries(), + ViewColors: c.Colors.strings(), + BinSize: int32(c.BinSize), + XColumn: c.XCol, + YColumn: c.YCol, + XDomain: c.Axes.get("x").Domain, + YDomain: c.Axes.get("y").Domain, + XPrefix: c.Axes.get("x").Prefix, + YPrefix: c.Axes.get("y").Prefix, + XSuffix: c.Axes.get("x").Suffix, + YSuffix: c.Axes.get("y").Suffix, + XAxisLabel: c.Axes.get("x").Label, + YAxisLabel: c.Axes.get("y").Label, + Note: c.Note, + ShowNoteWhenEmpty: c.NoteOnEmpty, + TimeFormat: c.TimeFormat, + LegendOpacity: float64(c.LegendOpacity), + LegendOrientationThreshold: int(c.LegendOrientationThreshold), } case chartKindHistogram: return influxdb.HistogramViewProperties{ - Type: influxdb.ViewPropertyTypeHistogram, - Queries: c.Queries.influxDashQueries(), - ViewColors: c.Colors.influxViewColors(), - FillColumns: c.FillColumns, - XColumn: c.XCol, - XDomain: c.Axes.get("x").Domain, - XAxisLabel: c.Axes.get("x").Label, - Position: c.Position, - BinCount: c.BinCount, - Note: c.Note, - ShowNoteWhenEmpty: c.NoteOnEmpty, + Type: influxdb.ViewPropertyTypeHistogram, + Queries: c.Queries.influxDashQueries(), + ViewColors: c.Colors.influxViewColors(), + FillColumns: c.FillColumns, + XColumn: c.XCol, + XDomain: c.Axes.get("x").Domain, + XAxisLabel: c.Axes.get("x").Label, + Position: c.Position, + BinCount: c.BinCount, + Note: c.Note, + ShowNoteWhenEmpty: c.NoteOnEmpty, + LegendOpacity: float64(c.LegendOpacity), + LegendOrientationThreshold: int(c.LegendOrientationThreshold), } case chartKindMarkdown: return influxdb.MarkdownViewProperties{ @@ -656,59 +664,65 @@ func (c *chart) properties() influxdb.ViewProperties { } case chartKindMosaic: return influxdb.MosaicViewProperties{ - Type: influxdb.ViewPropertyTypeMosaic, - Queries: c.Queries.influxDashQueries(), - ViewColors: c.Colors.strings(), - XColumn: c.XCol, - YSeriesColumns: c.YSeriesColumns, - XDomain: c.Axes.get("x").Domain, - YDomain: c.Axes.get("y").Domain, - XPrefix: c.Axes.get("x").Prefix, - YPrefix: c.Axes.get("y").Prefix, - XSuffix: c.Axes.get("x").Suffix, - YSuffix: c.Axes.get("y").Suffix, - XAxisLabel: c.Axes.get("x").Label, - YAxisLabel: c.Axes.get("y").Label, - Note: c.Note, - ShowNoteWhenEmpty: c.NoteOnEmpty, - TimeFormat: c.TimeFormat, + Type: influxdb.ViewPropertyTypeMosaic, + Queries: c.Queries.influxDashQueries(), + ViewColors: c.Colors.strings(), + XColumn: c.XCol, + YSeriesColumns: c.YSeriesColumns, + XDomain: c.Axes.get("x").Domain, + YDomain: c.Axes.get("y").Domain, + XPrefix: c.Axes.get("x").Prefix, + YPrefix: c.Axes.get("y").Prefix, + XSuffix: c.Axes.get("x").Suffix, + YSuffix: c.Axes.get("y").Suffix, + XAxisLabel: c.Axes.get("x").Label, + YAxisLabel: c.Axes.get("y").Label, + Note: c.Note, + ShowNoteWhenEmpty: c.NoteOnEmpty, + TimeFormat: c.TimeFormat, + LegendOpacity: float64(c.LegendOpacity), + LegendOrientationThreshold: int(c.LegendOrientationThreshold), } case chartKindBand: return influxdb.BandViewProperties{ - Type: influxdb.ViewPropertyTypeBand, - Queries: c.Queries.influxDashQueries(), - ViewColors: c.Colors.influxViewColors(), - Legend: c.Legend.influxLegend(), - HoverDimension: c.HoverDimension, - XColumn: c.XCol, - YColumn: c.YCol, - UpperColumn: c.UpperColumn, - MainColumn: c.MainColumn, - LowerColumn: c.LowerColumn, - Axes: c.Axes.influxAxes(), - Geom: c.Geom, - Note: c.Note, - ShowNoteWhenEmpty: c.NoteOnEmpty, - TimeFormat: c.TimeFormat, + Type: influxdb.ViewPropertyTypeBand, + Queries: c.Queries.influxDashQueries(), + ViewColors: c.Colors.influxViewColors(), + Legend: c.Legend.influxLegend(), + HoverDimension: c.HoverDimension, + XColumn: c.XCol, + YColumn: c.YCol, + UpperColumn: c.UpperColumn, + MainColumn: c.MainColumn, + LowerColumn: c.LowerColumn, + Axes: c.Axes.influxAxes(), + Geom: c.Geom, + Note: c.Note, + ShowNoteWhenEmpty: c.NoteOnEmpty, + TimeFormat: c.TimeFormat, + LegendOpacity: float64(c.LegendOpacity), + LegendOrientationThreshold: int(c.LegendOrientationThreshold), } case chartKindScatter: return influxdb.ScatterViewProperties{ - Type: influxdb.ViewPropertyTypeScatter, - Queries: c.Queries.influxDashQueries(), - ViewColors: c.Colors.strings(), - XColumn: c.XCol, - YColumn: c.YCol, - XDomain: c.Axes.get("x").Domain, - YDomain: c.Axes.get("y").Domain, - XPrefix: c.Axes.get("x").Prefix, - YPrefix: c.Axes.get("y").Prefix, - XSuffix: c.Axes.get("x").Suffix, - YSuffix: c.Axes.get("y").Suffix, - XAxisLabel: c.Axes.get("x").Label, - YAxisLabel: c.Axes.get("y").Label, - Note: c.Note, - ShowNoteWhenEmpty: c.NoteOnEmpty, - TimeFormat: c.TimeFormat, + Type: influxdb.ViewPropertyTypeScatter, + Queries: c.Queries.influxDashQueries(), + ViewColors: c.Colors.strings(), + XColumn: c.XCol, + YColumn: c.YCol, + XDomain: c.Axes.get("x").Domain, + YDomain: c.Axes.get("y").Domain, + XPrefix: c.Axes.get("x").Prefix, + YPrefix: c.Axes.get("y").Prefix, + XSuffix: c.Axes.get("x").Suffix, + YSuffix: c.Axes.get("y").Suffix, + XAxisLabel: c.Axes.get("x").Label, + YAxisLabel: c.Axes.get("y").Label, + Note: c.Note, + ShowNoteWhenEmpty: c.NoteOnEmpty, + TimeFormat: c.TimeFormat, + LegendOpacity: float64(c.LegendOpacity), + LegendOrientationThreshold: int(c.LegendOrientationThreshold), } case chartKindSingleStat: return influxdb.SingleStatViewProperties{ @@ -721,10 +735,12 @@ func (c *chart) properties() influxdb.ViewProperties { IsEnforced: c.EnforceDecimals, Digits: int32(c.DecimalPlaces), }, - Note: c.Note, - ShowNoteWhenEmpty: c.NoteOnEmpty, - Queries: c.Queries.influxDashQueries(), - ViewColors: c.Colors.influxViewColors(), + Note: c.Note, + ShowNoteWhenEmpty: c.NoteOnEmpty, + Queries: c.Queries.influxDashQueries(), + ViewColors: c.Colors.influxViewColors(), + LegendOpacity: float64(c.LegendOpacity), + LegendOrientationThreshold: int(c.LegendOrientationThreshold), } case chartKindSingleStatPlusLine: return influxdb.LinePlusSingleStatProperties{ @@ -735,17 +751,19 @@ func (c *chart) properties() influxdb.ViewProperties { IsEnforced: c.EnforceDecimals, Digits: int32(c.DecimalPlaces), }, - Note: c.Note, - ShowNoteWhenEmpty: c.NoteOnEmpty, - XColumn: c.XCol, - YColumn: c.YCol, - ShadeBelow: c.Shade, - HoverDimension: c.HoverDimension, - Legend: c.Legend.influxLegend(), - Queries: c.Queries.influxDashQueries(), - ViewColors: c.Colors.influxViewColors(), - Axes: c.Axes.influxAxes(), - Position: c.Position, + Note: c.Note, + ShowNoteWhenEmpty: c.NoteOnEmpty, + XColumn: c.XCol, + YColumn: c.YCol, + ShadeBelow: c.Shade, + HoverDimension: c.HoverDimension, + Legend: c.Legend.influxLegend(), + Queries: c.Queries.influxDashQueries(), + ViewColors: c.Colors.influxViewColors(), + Axes: c.Axes.influxAxes(), + Position: c.Position, + LegendOpacity: float64(c.LegendOpacity), + LegendOrientationThreshold: int(c.LegendOrientationThreshold), } case chartKindTable: fieldOptions := make([]influxdb.RenamableField, 0, len(c.FieldOptions)) @@ -780,20 +798,22 @@ func (c *chart) properties() influxdb.ViewProperties { } case chartKindXY: return influxdb.XYViewProperties{ - Type: influxdb.ViewPropertyTypeXY, - Note: c.Note, - ShowNoteWhenEmpty: c.NoteOnEmpty, - XColumn: c.XCol, - YColumn: c.YCol, - ShadeBelow: c.Shade, - HoverDimension: c.HoverDimension, - Legend: c.Legend.influxLegend(), - Queries: c.Queries.influxDashQueries(), - ViewColors: c.Colors.influxViewColors(), - Axes: c.Axes.influxAxes(), - Geom: c.Geom, - Position: c.Position, - TimeFormat: c.TimeFormat, + Type: influxdb.ViewPropertyTypeXY, + Note: c.Note, + ShowNoteWhenEmpty: c.NoteOnEmpty, + XColumn: c.XCol, + YColumn: c.YCol, + ShadeBelow: c.Shade, + HoverDimension: c.HoverDimension, + Legend: c.Legend.influxLegend(), + Queries: c.Queries.influxDashQueries(), + ViewColors: c.Colors.influxViewColors(), + Axes: c.Axes.influxAxes(), + Geom: c.Geom, + Position: c.Position, + TimeFormat: c.TimeFormat, + LegendOpacity: float64(c.LegendOpacity), + LegendOrientationThreshold: int(c.LegendOrientationThreshold), } default: return nil diff --git a/pkger/parser_test.go b/pkger/parser_test.go index a15cde51d5..ade8a15e45 100644 --- a/pkger/parser_test.go +++ b/pkger/parser_test.go @@ -1056,6 +1056,8 @@ spec: assert.Equal(t, "heatmap", props.GetType()) assert.Equal(t, "heatmap note", props.Note) assert.Equal(t, int32(10), props.BinSize) + assert.Equal(t, 1.0, props.LegendOpacity) + assert.Equal(t, 5, props.LegendOrientationThreshold) assert.True(t, props.ShowNoteWhenEmpty) assert.Equal(t, []float64{0, 10}, props.XDomain) @@ -1173,6 +1175,8 @@ spec: assert.Equal(t, "histogram", props.GetType()) assert.Equal(t, "histogram note", props.Note) assert.Equal(t, 30, props.BinCount) + assert.Equal(t, 1.0, props.LegendOpacity) + assert.Equal(t, 5, props.LegendOrientationThreshold) assert.True(t, props.ShowNoteWhenEmpty) assert.Equal(t, []float64{0, 10}, props.XDomain) assert.Equal(t, []string{"a", "b"}, props.FillColumns) @@ -1287,6 +1291,8 @@ spec: assert.Equal(t, "y_prefix", props.YPrefix) assert.Equal(t, "x_suffix", props.XSuffix) assert.Equal(t, "y_suffix", props.YSuffix) + assert.Equal(t, 1.0, props.LegendOpacity) + assert.Equal(t, 5, props.LegendOrientationThreshold) }) }) }) @@ -1317,6 +1323,8 @@ spec: assert.Equal(t, "foo", props.UpperColumn) assert.Equal(t, "baz", props.MainColumn) assert.Equal(t, "bar", props.LowerColumn) + assert.Equal(t, 1.0, props.LegendOpacity) + assert.Equal(t, 5, props.LegendOrientationThreshold) require.Len(t, props.ViewColors, 1) c := props.ViewColors[0] @@ -1380,6 +1388,8 @@ spec: assert.Equal(t, "y_prefix", props.YPrefix) assert.Equal(t, "x_suffix", props.XSuffix) assert.Equal(t, "y_suffix", props.YSuffix) + assert.Equal(t, 1.0, props.LegendOpacity) + assert.Equal(t, 5, props.LegendOrientationThreshold) }) }) @@ -1818,6 +1828,8 @@ spec: assert.Equal(t, "overlaid", props.Position) assert.Equal(t, "leg_type", props.Legend.Type) assert.Equal(t, "horizontal", props.Legend.Orientation) + assert.Equal(t, 1.0, props.LegendOpacity) + assert.Equal(t, 5, props.LegendOrientationThreshold) require.Len(t, props.Queries, 1) q := props.Queries[0] @@ -2272,6 +2284,8 @@ spec: assert.Equal(t, "xy chart note", props.Note) assert.True(t, props.ShowNoteWhenEmpty) assert.Equal(t, "stacked", props.Position) + assert.Equal(t, 1.0, props.LegendOpacity) + assert.Equal(t, 5, props.LegendOrientationThreshold) require.Len(t, props.Queries, 1) q := props.Queries[0] diff --git a/pkger/service_test.go b/pkger/service_test.go index e82cec8c98..93181ba297 100644 --- a/pkger/service_test.go +++ b/pkger/service_test.go @@ -2038,23 +2038,25 @@ func TestService(t *testing.T) { Name: "view name", }, Properties: influxdb.HeatmapViewProperties{ - Type: influxdb.ViewPropertyTypeHeatMap, - Note: "a note", - Queries: []influxdb.DashboardQuery{newQuery()}, - ShowNoteWhenEmpty: true, - ViewColors: []string{"#8F8AF4", "#8F8AF4", "#8F8AF4"}, - XColumn: "x", - YColumn: "y", - XDomain: []float64{0, 10}, - YDomain: []float64{0, 100}, - XAxisLabel: "x_label", - XPrefix: "x_prefix", - XSuffix: "x_suffix", - YAxisLabel: "y_label", - YPrefix: "y_prefix", - YSuffix: "y_suffix", - BinSize: 10, - TimeFormat: "", + Type: influxdb.ViewPropertyTypeHeatMap, + Note: "a note", + Queries: []influxdb.DashboardQuery{newQuery()}, + ShowNoteWhenEmpty: true, + ViewColors: []string{"#8F8AF4", "#8F8AF4", "#8F8AF4"}, + XColumn: "x", + YColumn: "y", + XDomain: []float64{0, 10}, + YDomain: []float64{0, 100}, + XAxisLabel: "x_label", + XPrefix: "x_prefix", + XSuffix: "x_suffix", + YAxisLabel: "y_label", + YPrefix: "y_prefix", + YSuffix: "y_suffix", + BinSize: 10, + TimeFormat: "", + LegendOpacity: 1.0, + LegendOrientationThreshold: 5, }, }, }, @@ -2066,17 +2068,19 @@ func TestService(t *testing.T) { Name: "view name", }, Properties: influxdb.HistogramViewProperties{ - Type: influxdb.ViewPropertyTypeHistogram, - Note: "a note", - Queries: []influxdb.DashboardQuery{newQuery()}, - ShowNoteWhenEmpty: true, - ViewColors: []influxdb.ViewColor{{Type: "scale", Hex: "#8F8AF4", Value: 0}, {Type: "scale", Hex: "#8F8AF4", Value: 0}, {Type: "scale", Hex: "#8F8AF4", Value: 0}}, - FillColumns: []string{"a", "b"}, - XColumn: "_value", - XDomain: []float64{0, 10}, - XAxisLabel: "x_label", - BinCount: 30, - Position: "stacked", + Type: influxdb.ViewPropertyTypeHistogram, + Note: "a note", + Queries: []influxdb.DashboardQuery{newQuery()}, + ShowNoteWhenEmpty: true, + ViewColors: []influxdb.ViewColor{{Type: "scale", Hex: "#8F8AF4", Value: 0}, {Type: "scale", Hex: "#8F8AF4", Value: 0}, {Type: "scale", Hex: "#8F8AF4", Value: 0}}, + FillColumns: []string{"a", "b"}, + XColumn: "_value", + XDomain: []float64{0, 10}, + XAxisLabel: "x_label", + BinCount: 30, + Position: "stacked", + LegendOpacity: 1.0, + LegendOrientationThreshold: 5, }, }, }, @@ -2088,22 +2092,24 @@ func TestService(t *testing.T) { Name: "view name", }, Properties: influxdb.ScatterViewProperties{ - Type: influxdb.ViewPropertyTypeScatter, - Note: "a note", - Queries: []influxdb.DashboardQuery{newQuery()}, - ShowNoteWhenEmpty: true, - ViewColors: []string{"#8F8AF4", "#8F8AF4", "#8F8AF4"}, - XColumn: "x", - YColumn: "y", - XDomain: []float64{0, 10}, - YDomain: []float64{0, 100}, - XAxisLabel: "x_label", - XPrefix: "x_prefix", - XSuffix: "x_suffix", - YAxisLabel: "y_label", - YPrefix: "y_prefix", - YSuffix: "y_suffix", - TimeFormat: "", + Type: influxdb.ViewPropertyTypeScatter, + Note: "a note", + Queries: []influxdb.DashboardQuery{newQuery()}, + ShowNoteWhenEmpty: true, + ViewColors: []string{"#8F8AF4", "#8F8AF4", "#8F8AF4"}, + XColumn: "x", + YColumn: "y", + XDomain: []float64{0, 10}, + YDomain: []float64{0, 100}, + XAxisLabel: "x_label", + XPrefix: "x_prefix", + XSuffix: "x_suffix", + YAxisLabel: "y_label", + YPrefix: "y_prefix", + YSuffix: "y_suffix", + TimeFormat: "", + LegendOpacity: 1.0, + LegendOrientationThreshold: 5, }, }, }, @@ -2114,21 +2120,23 @@ func TestService(t *testing.T) { Name: "view name", }, Properties: influxdb.MosaicViewProperties{ - Type: influxdb.ViewPropertyTypeMosaic, - Note: "a note", - Queries: []influxdb.DashboardQuery{newQuery()}, - ShowNoteWhenEmpty: true, - ViewColors: []string{"#8F8AF4", "#8F8AF4", "#8F8AF4"}, - XColumn: "x", - YSeriesColumns: []string{"y"}, - XDomain: []float64{0, 10}, - YDomain: []float64{0, 100}, - XAxisLabel: "x_label", - XPrefix: "x_prefix", - XSuffix: "x_suffix", - YAxisLabel: "y_label", - YPrefix: "y_prefix", - YSuffix: "y_suffix", + Type: influxdb.ViewPropertyTypeMosaic, + Note: "a note", + Queries: []influxdb.DashboardQuery{newQuery()}, + ShowNoteWhenEmpty: true, + ViewColors: []string{"#8F8AF4", "#8F8AF4", "#8F8AF4"}, + XColumn: "x", + YSeriesColumns: []string{"y"}, + XDomain: []float64{0, 10}, + YDomain: []float64{0, 100}, + XAxisLabel: "x_label", + XPrefix: "x_prefix", + XSuffix: "x_suffix", + YAxisLabel: "y_label", + YPrefix: "y_prefix", + YSuffix: "y_suffix", + LegendOpacity: 1.0, + LegendOrientationThreshold: 5, }, }, }, @@ -2139,16 +2147,18 @@ func TestService(t *testing.T) { Name: "view name", }, Properties: influxdb.SingleStatViewProperties{ - Type: influxdb.ViewPropertyTypeSingleStat, - DecimalPlaces: influxdb.DecimalPlaces{IsEnforced: true, Digits: 1}, - Note: "a note", - Queries: []influxdb.DashboardQuery{newQuery()}, - Prefix: "pre", - TickPrefix: "false", - ShowNoteWhenEmpty: true, - Suffix: "suf", - TickSuffix: "true", - ViewColors: []influxdb.ViewColor{{Type: "text", Hex: "red"}}, + Type: influxdb.ViewPropertyTypeSingleStat, + DecimalPlaces: influxdb.DecimalPlaces{IsEnforced: true, Digits: 1}, + Note: "a note", + Queries: []influxdb.DashboardQuery{newQuery()}, + Prefix: "pre", + TickPrefix: "false", + ShowNoteWhenEmpty: true, + Suffix: "suf", + TickSuffix: "true", + ViewColors: []influxdb.ViewColor{{Type: "text", Hex: "red"}}, + LegendOpacity: 1.0, + LegendOrientationThreshold: 5, }, }, }, @@ -2160,16 +2170,18 @@ func TestService(t *testing.T) { Name: "view name", }, Properties: influxdb.SingleStatViewProperties{ - Type: influxdb.ViewPropertyTypeSingleStat, - DecimalPlaces: influxdb.DecimalPlaces{IsEnforced: true, Digits: 1}, - Note: "a note", - Queries: []influxdb.DashboardQuery{newQuery()}, - Prefix: "pre", - TickPrefix: "false", - ShowNoteWhenEmpty: true, - Suffix: "suf", - TickSuffix: "true", - ViewColors: []influxdb.ViewColor{{Type: "text", Hex: "red"}}, + Type: influxdb.ViewPropertyTypeSingleStat, + DecimalPlaces: influxdb.DecimalPlaces{IsEnforced: true, Digits: 1}, + Note: "a note", + Queries: []influxdb.DashboardQuery{newQuery()}, + Prefix: "pre", + TickPrefix: "false", + ShowNoteWhenEmpty: true, + Suffix: "suf", + TickSuffix: "true", + ViewColors: []influxdb.ViewColor{{Type: "text", Hex: "red"}}, + LegendOpacity: 1.0, + LegendOrientationThreshold: 5, }, }, }, @@ -2181,21 +2193,23 @@ func TestService(t *testing.T) { Name: "view name", }, Properties: influxdb.LinePlusSingleStatProperties{ - Type: influxdb.ViewPropertyTypeSingleStatPlusLine, - Axes: newAxes(), - DecimalPlaces: influxdb.DecimalPlaces{IsEnforced: true, Digits: 1}, - Legend: influxdb.Legend{Type: "type", Orientation: "horizontal"}, - Note: "a note", - Prefix: "pre", - Suffix: "suf", - Queries: []influxdb.DashboardQuery{newQuery()}, - ShadeBelow: true, - HoverDimension: "y", - ShowNoteWhenEmpty: true, - ViewColors: []influxdb.ViewColor{{Type: "text", Hex: "red"}}, - XColumn: "x", - YColumn: "y", - Position: "stacked", + Type: influxdb.ViewPropertyTypeSingleStatPlusLine, + Axes: newAxes(), + DecimalPlaces: influxdb.DecimalPlaces{IsEnforced: true, Digits: 1}, + Legend: influxdb.Legend{Type: "type", Orientation: "horizontal"}, + Note: "a note", + Prefix: "pre", + Suffix: "suf", + Queries: []influxdb.DashboardQuery{newQuery()}, + ShadeBelow: true, + HoverDimension: "y", + ShowNoteWhenEmpty: true, + ViewColors: []influxdb.ViewColor{{Type: "text", Hex: "red"}}, + XColumn: "x", + YColumn: "y", + Position: "stacked", + LegendOpacity: 1.0, + LegendOrientationThreshold: 5, }, }, }, @@ -2207,20 +2221,22 @@ func TestService(t *testing.T) { Name: "view name", }, Properties: influxdb.XYViewProperties{ - Type: influxdb.ViewPropertyTypeXY, - Axes: newAxes(), - Geom: "step", - Legend: influxdb.Legend{Type: "type", Orientation: "horizontal"}, - Note: "a note", - Queries: []influxdb.DashboardQuery{newQuery()}, - ShadeBelow: true, - HoverDimension: "y", - ShowNoteWhenEmpty: true, - ViewColors: []influxdb.ViewColor{{Type: "text", Hex: "red"}}, - XColumn: "x", - YColumn: "y", - Position: "overlaid", - TimeFormat: "", + Type: influxdb.ViewPropertyTypeXY, + Axes: newAxes(), + Geom: "step", + Legend: influxdb.Legend{Type: "type", Orientation: "horizontal"}, + Note: "a note", + Queries: []influxdb.DashboardQuery{newQuery()}, + ShadeBelow: true, + HoverDimension: "y", + ShowNoteWhenEmpty: true, + ViewColors: []influxdb.ViewColor{{Type: "text", Hex: "red"}}, + XColumn: "x", + YColumn: "y", + Position: "overlaid", + TimeFormat: "", + LegendOpacity: 1.0, + LegendOrientationThreshold: 5, }, }, }, @@ -2232,21 +2248,23 @@ func TestService(t *testing.T) { Name: "view name", }, Properties: influxdb.BandViewProperties{ - Type: influxdb.ViewPropertyTypeBand, - Axes: newAxes(), - Geom: "step", - Legend: influxdb.Legend{Type: "type", Orientation: "horizontal"}, - Note: "a note", - Queries: []influxdb.DashboardQuery{newQuery()}, - HoverDimension: "y", - ShowNoteWhenEmpty: true, - ViewColors: []influxdb.ViewColor{{Type: "text", Hex: "red"}}, - XColumn: "x", - YColumn: "y", - UpperColumn: "upper", - MainColumn: "main", - LowerColumn: "lower", - TimeFormat: "", + Type: influxdb.ViewPropertyTypeBand, + Axes: newAxes(), + Geom: "step", + Legend: influxdb.Legend{Type: "type", Orientation: "horizontal"}, + Note: "a note", + Queries: []influxdb.DashboardQuery{newQuery()}, + HoverDimension: "y", + ShowNoteWhenEmpty: true, + ViewColors: []influxdb.ViewColor{{Type: "text", Hex: "red"}}, + XColumn: "x", + YColumn: "y", + UpperColumn: "upper", + MainColumn: "main", + LowerColumn: "lower", + TimeFormat: "", + LegendOpacity: 1.0, + LegendOrientationThreshold: 5, }, }, }, diff --git a/pkger/testdata/dashboard_band.yml b/pkger/testdata/dashboard_band.yml index 38fe98b266..ceed092f45 100644 --- a/pkger/testdata/dashboard_band.yml +++ b/pkger/testdata/dashboard_band.yml @@ -22,6 +22,8 @@ spec: geom: line width: 6 height: 3 + legendOpacity: 1.0 + legendOrientationThreshold: 5 queries: - query: > from(bucket: v.bucket) |> range(start: v.timeRangeStart) |> filter(fn: (r) => r._measurement == "mem") |> filter(fn: (r) => r._field == "used_percent") |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false) |> yield(name: "mean") diff --git a/pkger/testdata/dashboard_heatmap.json b/pkger/testdata/dashboard_heatmap.json index f6e9c9e9fb..2be6d8fed8 100644 --- a/pkger/testdata/dashboard_heatmap.json +++ b/pkger/testdata/dashboard_heatmap.json @@ -20,43 +20,75 @@ "xCol": "_time", "yCol": "_value", "binSize": 10, + "legendOpacity": 1.0, + "legendOrientationThreshold": 5, "queries": [ { "query": "from(bucket: v.bucket) |> range(start: v.timeRangeStart) |> filter(fn: (r) => r._measurement == \"mem\") |> filter(fn: (r) => r._field == \"used_percent\") |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false) |> yield(name: \"mean\")" } ], - "axes":[ + "axes": [ { "name": "x", "label": "x_label", "prefix": "x_prefix", "suffix": "x_suffix", - "domain": [0, 10] + "domain": [ + 0, + 10 + ] }, { "name": "y", "label": "y_label", "prefix": "y_prefix", "suffix": "y_suffix", - "domain": [0, 100] + "domain": [ + 0, + 100 + ] } ], "colors": [ - { "hex": "#000004" }, - { "hex": "#110a30" }, - { "hex": "#320a5e" }, - { "hex": "#57106e" }, - { "hex": "#781c6d" }, - { "hex": "#9a2865" }, - { "hex": "#bc3754" }, - { "hex": "#d84c3e" }, - { "hex": "#ed6925" }, - { "hex": "#f98e09" }, - { "hex": "#fbb61a" }, - { "hex": "#f4df53" } + { + "hex": "#000004" + }, + { + "hex": "#110a30" + }, + { + "hex": "#320a5e" + }, + { + "hex": "#57106e" + }, + { + "hex": "#781c6d" + }, + { + "hex": "#9a2865" + }, + { + "hex": "#bc3754" + }, + { + "hex": "#d84c3e" + }, + { + "hex": "#ed6925" + }, + { + "hex": "#f98e09" + }, + { + "hex": "#fbb61a" + }, + { + "hex": "#f4df53" + } ] } ] } } -] +] \ No newline at end of file diff --git a/pkger/testdata/dashboard_heatmap.yml b/pkger/testdata/dashboard_heatmap.yml index fc45c907a1..a0ed081a55 100644 --- a/pkger/testdata/dashboard_heatmap.yml +++ b/pkger/testdata/dashboard_heatmap.yml @@ -14,6 +14,8 @@ spec: width: 6 height: 3 binSize: 10 + legendOpacity: 1.0 + legendOrientationThreshold: 5 xCol: _time yCol: _value queries: diff --git a/pkger/testdata/dashboard_histogram.json b/pkger/testdata/dashboard_histogram.json index e3949e5cf9..4b4063c43c 100644 --- a/pkger/testdata/dashboard_histogram.json +++ b/pkger/testdata/dashboard_histogram.json @@ -18,17 +18,25 @@ "xCol": "_value", "position": "stacked", "binCount": 30, - "fillColumns": ["a", "b"], + "legendOpacity": 1.0, + "legendOrientationThreshold": 5, + "fillColumns": [ + "a", + "b" + ], "queries": [ { "query": "from(bucket: v.bucket) |> range(start: v.timeRangeStart, stop: v.timeRangeStop) |> filter(fn: (r) => r._measurement == \"boltdb_reads_total\") |> filter(fn: (r) => r._field == \"counter\")" } ], - "axes":[ + "axes": [ { "name": "x", "label": "x_label", - "domain": [0, 10] + "domain": [ + 0, + 10 + ] } ], "colors": [ @@ -55,4 +63,4 @@ ] } } -] +] \ No newline at end of file diff --git a/pkger/testdata/dashboard_histogram.yml b/pkger/testdata/dashboard_histogram.yml index ea9c16373a..3ccd770faf 100644 --- a/pkger/testdata/dashboard_histogram.yml +++ b/pkger/testdata/dashboard_histogram.yml @@ -14,6 +14,8 @@ spec: height: 3 binCount: 30 fillColumns: ["a", "b"] + legendOpacity: 1.0 + legendOrientationThreshold: 5 queries: - query: > from(bucket: v.bucket) |> range(start: v.timeRangeStart, stop: v.timeRangeStop) |> filter(fn: (r) => r._measurement == "boltdb_reads_total") |> filter(fn: (r) => r._field == "counter") diff --git a/pkger/testdata/dashboard_mosaic.yml b/pkger/testdata/dashboard_mosaic.yml index 01a3cc0144..5383759915 100644 --- a/pkger/testdata/dashboard_mosaic.yml +++ b/pkger/testdata/dashboard_mosaic.yml @@ -18,6 +18,8 @@ spec: width: 6 height: 3 ySeriesColumns: ["_value", "foo"] + legendOpacity: 1.0 + legendOrientationThreshold: 5 queries: - query: > from(bucket: v.bucket) |> range(start: v.timeRangeStart) |> filter(fn: (r) => r._measurement == "mem") |> filter(fn: (r) => r._field == "used_percent") |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false) |> yield(name: "mean") diff --git a/pkger/testdata/dashboard_scatter.json b/pkger/testdata/dashboard_scatter.json index 63fb14303a..c764e803e4 100644 --- a/pkger/testdata/dashboard_scatter.json +++ b/pkger/testdata/dashboard_scatter.json @@ -19,25 +19,33 @@ "height": 3, "xCol": "_time", "yCol": "_value", + "legendOpacity": 1.0, + "legendOrientationThreshold": 5, "queries": [ { "query": "from(bucket: v.bucket) |> range(start: v.timeRangeStart) |> filter(fn: (r) => r._measurement == \"mem\") |> filter(fn: (r) => r._field == \"used_percent\") |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false) |> yield(name: \"mean\")" } ], - "axes":[ + "axes": [ { "name": "x", "label": "x_label", "prefix": "x_prefix", "suffix": "x_suffix", - "domain": [0, 10] + "domain": [ + 0, + 10 + ] }, { "name": "y", "label": "y_label", "prefix": "y_prefix", "suffix": "y_suffix", - "domain": [0, 100] + "domain": [ + 0, + 100 + ] } ], "colors": [ @@ -55,4 +63,4 @@ ] } } -] +] \ No newline at end of file diff --git a/pkger/testdata/dashboard_scatter.yml b/pkger/testdata/dashboard_scatter.yml index 173358b887..fee5cdba98 100644 --- a/pkger/testdata/dashboard_scatter.yml +++ b/pkger/testdata/dashboard_scatter.yml @@ -17,6 +17,8 @@ spec: yCol: _value width: 6 height: 3 + legendOpacity: 1.0 + legendOrientationThreshold: 5 queries: - query: > from(bucket: v.bucket) |> range(start: v.timeRangeStart) |> filter(fn: (r) => r._measurement == "mem") |> filter(fn: (r) => r._field == "used_percent") |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false) |> yield(name: "mean") diff --git a/pkger/testdata/dashboard_single_stat_plus_line.json b/pkger/testdata/dashboard_single_stat_plus_line.json index 8b1f28c55a..e93f52285c 100644 --- a/pkger/testdata/dashboard_single_stat_plus_line.json +++ b/pkger/testdata/dashboard_single_stat_plus_line.json @@ -25,6 +25,8 @@ "xColumn": "_time", "yColumn": "_value", "position": "overlaid", + "legendOpacity": 1.0, + "legendOrientationThreshold": 5, "legend": { "type": "leg_type", "orientation": "horizontal" @@ -70,4 +72,4 @@ ] } } -] +] \ No newline at end of file diff --git a/pkger/testdata/dashboard_single_stat_plus_line.yml b/pkger/testdata/dashboard_single_stat_plus_line.yml index c9248db311..6686782366 100644 --- a/pkger/testdata/dashboard_single_stat_plus_line.yml +++ b/pkger/testdata/dashboard_single_stat_plus_line.yml @@ -19,6 +19,8 @@ spec: shade: true hoverDimension: "y" position: overlaid + legendOpacity: 1.0 + legendOrientationThreshold: 5 legend: type: leg_type orientation: horizontal diff --git a/pkger/testdata/dashboard_xy.json b/pkger/testdata/dashboard_xy.json index 75ee399fbb..a20b9b208e 100644 --- a/pkger/testdata/dashboard_xy.json +++ b/pkger/testdata/dashboard_xy.json @@ -24,7 +24,8 @@ "hoverDimension": "y", "xColumn": "_time", "yColumn": "_value", - "legend": {}, + "legendOpacity": 1.0, + "legendOrientationThreshold": 5, "queries": [ { "query": "from(bucket: v.bucket) |> range(start: v.timeRangeStart, stop: v.timeRangeStop) |> filter(fn: (r) => r._measurement == \"boltdb_writes_total\") |> filter(fn: (r) => r._field == \"counter\")" @@ -61,4 +62,4 @@ ] } } -] +] \ No newline at end of file diff --git a/pkger/testdata/dashboard_xy.yml b/pkger/testdata/dashboard_xy.yml index e34d611deb..aca4142563 100644 --- a/pkger/testdata/dashboard_xy.yml +++ b/pkger/testdata/dashboard_xy.yml @@ -17,7 +17,8 @@ spec: hoverDimension: "y" geom: line position: stacked - legend: + legendOpacity: 1.0 + legendOrientationThreshold: 5 queries: - query: > from(bucket: v.bucket) |> range(start: v.timeRangeStart, stop: v.timeRangeStop) |> filter(fn: (r) => r._measurement == "boltdb_writes_total") |> filter(fn: (r) => r._field == "counter") From b17acf8b315825d659f461fdb26cac08aad3603b Mon Sep 17 00:00:00 2001 From: Stuart Carnie Date: Mon, 21 Sep 2020 11:39:08 -0700 Subject: [PATCH 33/34] fix: Ensure temporary bucket and test data is removed after each test This is required to keep the system resources low when running the Flux end-to-end tests, which create a bucket for each test. A bucket creates at least 17 files after the first write: * 8 for the `_series` segment files * 8 for the `index` log files * 1 for the `wal` --- query/stdlib/testing/end_to_end_test.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/query/stdlib/testing/end_to_end_test.go b/query/stdlib/testing/end_to_end_test.go index 71a96c4a76..135aa9699b 100644 --- a/query/stdlib/testing/end_to_end_test.go +++ b/query/stdlib/testing/end_to_end_test.go @@ -187,6 +187,11 @@ func testFlux(t testing.TB, l *launcher.TestLauncher, file *ast.File) { if err := s.CreateBucket(context.Background(), b); err != nil { t.Fatal(err) } + defer func() { + if err := s.DeleteBucket(context.Background(), b.ID); err != nil { + t.Logf("Failed to delete bucket: %s", err) + } + }() // Define bucket and org options bucketOpt := &ast.OptionStatement{ From a0ce7c38ef64833fc9cb788797bf1efaeb1f3204 Mon Sep 17 00:00:00 2001 From: Stuart Carnie Date: Mon, 21 Sep 2020 11:40:41 -0700 Subject: [PATCH 34/34] fix: Use DeleteDatabase to guarantee all files are closed and removed A bucket is represented as a single database and retention policy and therefore `DeleteDatabase` is the appropriate API to remove a bucket from TSDB. Fixes #19600 --- storage/engine.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/storage/engine.go b/storage/engine.go index 91bf7e41be..792fe553b6 100644 --- a/storage/engine.go +++ b/storage/engine.go @@ -284,7 +284,7 @@ func (e *Engine) UpdateBucketRetentionPeriod(ctx context.Context, bucketID influ func (e *Engine) DeleteBucket(ctx context.Context, orgID, bucketID influxdb.ID) error { span, _ := tracing.StartSpanFromContext(ctx) defer span.Finish() - return e.tsdbStore.DeleteRetentionPolicy(bucketID.String(), meta.DefaultRetentionPolicyName) + return e.tsdbStore.DeleteDatabase(bucketID.String()) } // DeleteBucketRange deletes an entire range of data from the storage engine.