chore(query/influxql): remove the influxql transpiler (#23000)

pull/23098/head
Jonathan A. Sternberg 2022-01-31 10:34:37 -06:00 committed by GitHub
parent 888f82c9c8
commit 2a957c9a56
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
367 changed files with 23 additions and 6337 deletions

View File

@ -733,7 +733,6 @@ func (m *Launcher) run(ctx context.Context, opts *InfluxdOpts) (err error) {
SourceService: sourceSvc,
VariableService: variableSvc,
PasswordsService: ts.PasswordsService,
InfluxQLService: storageQueryService,
InfluxqldService: iqlquery.NewProxyExecutor(m.log, qe),
FluxService: storageQueryService,
FluxLanguageService: fluxlang.DefaultService,

View File

@ -88,7 +88,6 @@ type APIBackend struct {
SourceService influxdb.SourceService
VariableService influxdb.VariableService
PasswordsService influxdb.PasswordsService
InfluxQLService query.ProxyQueryService
InfluxqldService influxql.ProxyQueryService
FluxService query.ProxyQueryService
FluxLanguageService fluxlang.FluxLanguageService
@ -214,11 +213,11 @@ func NewAPIHandler(b *APIBackend, opts ...APIHandlerOptFn) *APIHandler {
writeBackend := NewWriteBackend(b.Logger.With(zap.String("handler", "write")), b)
h.Mount(prefixWrite, NewWriteHandler(b.Logger, writeBackend,
WithMaxBatchSizeBytes(b.MaxBatchSizeBytes),
//WithParserOptions(
// WithParserOptions(
// models.WithParserMaxBytes(b.WriteParserMaxBytes),
// models.WithParserMaxLines(b.WriteParserMaxLines),
// models.WithParserMaxValues(b.WriteParserMaxValues),
//),
// ),
))
for _, o := range opts {

View File

@ -17,7 +17,6 @@ import (
platform2 "github.com/influxdata/influxdb/v2/kit/platform"
"github.com/influxdata/influxdb/v2/kit/tracing"
"github.com/influxdata/influxdb/v2/query"
"github.com/influxdata/influxdb/v2/query/influxql"
)
type SourceProxyQueryService struct {
@ -30,8 +29,6 @@ type SourceProxyQueryService struct {
func (s *SourceProxyQueryService) Query(ctx context.Context, w io.Writer, req *query.ProxyRequest) (flux.Statistics, error) {
switch req.Request.Compiler.CompilerType() {
case influxql.CompilerType:
return s.influxQuery(ctx, w, req)
case lang.FluxCompilerType:
return s.fluxQuery(ctx, w, req)
}
@ -105,63 +102,6 @@ func (s *SourceProxyQueryService) fluxQuery(ctx context.Context, w io.Writer, re
return flux.Statistics{}, nil
}
func (s *SourceProxyQueryService) influxQuery(ctx context.Context, w io.Writer, req *query.ProxyRequest) (flux.Statistics, error) {
span, ctx := tracing.StartSpanFromContext(ctx)
defer span.Finish()
if len(s.URL) == 0 {
return flux.Statistics{}, tracing.LogError(span, fmt.Errorf("URL from source cannot be empty if the compiler type is influxql"))
}
u, err := newURL(s.URL, "/query")
if err != nil {
return flux.Statistics{}, tracing.LogError(span, err)
}
hreq, err := http.NewRequest("POST", u.String(), nil)
if err != nil {
return flux.Statistics{}, tracing.LogError(span, err)
}
// TODO(fntlnz): configure authentication methods username/password and stuff
hreq = hreq.WithContext(ctx)
params := hreq.URL.Query()
compiler, ok := req.Request.Compiler.(*influxql.Compiler)
if !ok {
return flux.Statistics{}, tracing.LogError(span, fmt.Errorf("passed compiler is not of type 'influxql'"))
}
params.Set("q", compiler.Query)
params.Set("db", compiler.DB)
params.Set("rp", compiler.RP)
hreq.URL.RawQuery = params.Encode()
hc := newTraceClient(u.Scheme, s.InsecureSkipVerify)
resp, err := hc.Do(hreq)
if err != nil {
return flux.Statistics{}, tracing.LogError(span, err)
}
defer resp.Body.Close()
if err := platformhttp.CheckError(resp); err != nil {
return flux.Statistics{}, tracing.LogError(span, err)
}
res := &influxql.Response{}
if err := json.NewDecoder(resp.Body).Decode(res); err != nil {
return flux.Statistics{}, tracing.LogError(span, err)
}
csvDialect, ok := req.Dialect.(csv.Dialect)
if !ok {
return flux.Statistics{}, tracing.LogError(span, fmt.Errorf("unsupported dialect %T", req.Dialect))
}
if _, err = csv.NewMultiResultEncoder(csvDialect.ResultEncoderConfig).Encode(w, influxql.NewResponseIterator(res)); err != nil {
return flux.Statistics{}, tracing.LogError(span, err)
}
return flux.Statistics{}, nil
}
func (s *SourceProxyQueryService) Check(context.Context) check.Response {
return platformhttp.QueryHealthCheck(s.URL, s.InsecureSkipVerify)
}

View File

@ -10,13 +10,12 @@ func newLegacyBackend(b *APIBackend) *legacy.Backend {
HTTPErrorHandler: b.HTTPErrorHandler,
Logger: b.Logger,
// TODO(sgc): /write support
//MaxBatchSizeBytes: b.APIBackend.MaxBatchSizeBytes,
// MaxBatchSizeBytes: b.APIBackend.MaxBatchSizeBytes,
AuthorizationService: b.AuthorizationService,
OrganizationService: b.OrganizationService,
BucketService: b.BucketService,
PointsWriter: b.PointsWriter,
DBRPMappingService: b.DBRPService,
ProxyQueryService: b.InfluxQLService,
InfluxqldQueryService: b.InfluxqldService,
WriteEventRecorder: b.WriteEventRecorder,
}

View File

@ -8,7 +8,6 @@ import (
"github.com/influxdata/influxdb/v2/influxql"
"github.com/influxdata/influxdb/v2/kit/cli"
"github.com/influxdata/influxdb/v2/kit/platform/errors"
"github.com/influxdata/influxdb/v2/query"
"github.com/influxdata/influxdb/v2/storage"
"github.com/prometheus/client_golang/prometheus"
"go.uber.org/zap"
@ -33,7 +32,6 @@ type Backend struct {
BucketService influxdb.BucketService
PointsWriter storage.PointsWriter
DBRPMappingService influxdb.DBRPMappingService
ProxyQueryService query.ProxyQueryService
InfluxqldQueryService influxql.ProxyQueryService
}

View File

@ -9,8 +9,6 @@ import (
"io/ioutil"
"mime"
"net/http"
"regexp"
"strconv"
"time"
"unicode/utf8"
@ -22,8 +20,6 @@ import (
"github.com/influxdata/influxdb/v2/jsonweb"
"github.com/influxdata/influxdb/v2/query"
"github.com/influxdata/influxdb/v2/query/fluxlang"
transpiler "github.com/influxdata/influxdb/v2/query/influxql"
"github.com/influxdata/influxql"
)
// QueryRequest is a flux query request.
@ -37,9 +33,6 @@ type QueryRequest struct {
Dialect QueryDialect `json:"dialect"`
Now time.Time `json:"now"`
// InfluxQL fields
Bucket string `json:"bucket,omitempty"`
Org *influxdb.Organization `json:"-"`
// PreferNoContent specifies if the Response to this request should
@ -93,14 +86,10 @@ func (r QueryRequest) Validate() error {
return errors.New(`request body requires either query or AST`)
}
if r.Type != "flux" && r.Type != "influxql" {
if r.Type != "flux" {
return fmt.Errorf(`unknown query type: %s`, r.Type)
}
if r.Type == "influxql" && r.Bucket == "" {
return fmt.Errorf("bucket parameter is required for influxql queries")
}
if len(r.Dialect.CommentPrefix) > 1 {
return fmt.Errorf("invalid dialect comment prefix: must be length 0 or 1")
}
@ -149,8 +138,6 @@ func (r QueryRequest) Analyze(l fluxlang.FluxLanguageService) (*QueryAnalysis, e
switch r.Type {
case "flux":
return r.analyzeFluxQuery(l)
case "influxql":
return r.analyzeInfluxQLQuery()
}
return nil, fmt.Errorf("unknown query request type %s", r.Type)
@ -181,61 +168,6 @@ func (r QueryRequest) analyzeFluxQuery(l fluxlang.FluxLanguageService) (*QueryAn
return a, nil
}
func (r QueryRequest) analyzeInfluxQLQuery() (*QueryAnalysis, error) {
a := &QueryAnalysis{}
_, err := influxql.ParseQuery(r.Query)
if err == nil {
a.Errors = []queryParseError{}
return a, nil
}
ms := influxqlParseErrorRE.FindAllStringSubmatch(err.Error(), -1)
a.Errors = make([]queryParseError, 0, len(ms))
for _, m := range ms {
if len(m) != 4 {
return nil, fmt.Errorf("influxql query error is not formatted as expected: got %d matches expected 4", len(m))
}
msg := m[1]
lineStr := m[2]
line, err := strconv.Atoi(lineStr)
if err != nil {
return nil, fmt.Errorf("failed to parse line number from error mesage: %s -> %v", lineStr, err)
}
charStr := m[3]
char, err := strconv.Atoi(charStr)
if err != nil {
return nil, fmt.Errorf("failed to parse character number from error mesage: %s -> %v", charStr, err)
}
a.Errors = append(a.Errors, queryParseError{
Line: line,
Column: columnFromCharacter(r.Query, char),
Character: char,
Message: msg,
})
}
return a, nil
}
func columnFromCharacter(q string, char int) int {
col := 0
for i, c := range q {
if c == '\n' {
col = 0
}
if i == char {
break
}
col++
}
return col
}
var influxqlParseErrorRE = regexp.MustCompile(`^(.+) at line (\d+), char (\d+)$`)
// ProxyRequest returns a request to proxy from the flux.
func (r QueryRequest) ProxyRequest() (*query.ProxyRequest, error) {
return r.proxyRequest(time.Now)
@ -255,12 +187,6 @@ func (r QueryRequest) proxyRequest(now func() time.Time) (*query.ProxyRequest, e
var compiler flux.Compiler
if r.Query != "" {
switch r.Type {
case "influxql":
compiler = &transpiler.Compiler{
Now: &n,
Query: r.Query,
Bucket: r.Bucket,
}
case "flux":
fallthrough
default:
@ -290,25 +216,20 @@ func (r QueryRequest) proxyRequest(now func() time.Time) (*query.ProxyRequest, e
if r.PreferNoContent {
dialect = &query.NoContentDialect{}
} else {
if r.Type == "influxql" {
// Use default transpiler dialect
dialect = &transpiler.Dialect{}
} else {
// TODO(nathanielc): Use commentPrefix and dateTimeFormat
// once they are supported.
encConfig := csv.ResultEncoderConfig{
NoHeader: noHeader,
Delimiter: delimiter,
Annotations: r.Dialect.Annotations,
// TODO(nathanielc): Use commentPrefix and dateTimeFormat
// once they are supported.
encConfig := csv.ResultEncoderConfig{
NoHeader: noHeader,
Delimiter: delimiter,
Annotations: r.Dialect.Annotations,
}
if r.PreferNoContentWithError {
dialect = &query.NoContentWithErrorDialect{
ResultEncoderConfig: encConfig,
}
if r.PreferNoContentWithError {
dialect = &query.NoContentWithErrorDialect{
ResultEncoderConfig: encConfig,
}
} else {
dialect = &csv.Dialect{
ResultEncoderConfig: encConfig,
}
} else {
dialect = &csv.Dialect{
ResultEncoderConfig: encConfig,
}
}
}

View File

@ -9,7 +9,6 @@ import (
"io/ioutil"
"net/http"
"net/url"
"sort"
"time"
"github.com/NYTimes/gziphandler"
@ -31,7 +30,6 @@ import (
"github.com/influxdata/influxdb/v2/logger"
"github.com/influxdata/influxdb/v2/query"
"github.com/influxdata/influxdb/v2/query/fluxlang"
"github.com/influxdata/influxdb/v2/query/influxql"
"github.com/pkg/errors"
prom "github.com/prometheus/client_golang/prometheus"
"go.uber.org/zap"
@ -60,15 +58,12 @@ type FluxBackend struct {
// NewFluxBackend returns a new instance of FluxBackend.
func NewFluxBackend(log *zap.Logger, b *APIBackend) *FluxBackend {
return &FluxBackend{
HTTPErrorHandler: b.HTTPErrorHandler,
log: log,
FluxLogEnabled: b.FluxLogEnabled,
QueryEventRecorder: b.QueryEventRecorder,
AlgoWProxy: b.AlgoWProxy,
ProxyQueryService: routingQueryService{
InfluxQLService: b.InfluxQLService,
DefaultService: b.FluxService,
},
HTTPErrorHandler: b.HTTPErrorHandler,
log: log,
FluxLogEnabled: b.FluxLogEnabled,
QueryEventRecorder: b.QueryEventRecorder,
AlgoWProxy: b.AlgoWProxy,
ProxyQueryService: b.FluxService,
OrganizationService: b.OrganizationService,
FluxLanguageService: b.FluxLanguageService,
Flagger: b.Flagger,
@ -658,38 +653,3 @@ func QueryHealthCheck(url string, insecureSkipVerify bool) check.Response {
return healthResponse
}
// routingQueryService routes queries to specific query services based on their compiler type.
type routingQueryService struct {
// InfluxQLService handles queries with compiler type of "influxql"
InfluxQLService query.ProxyQueryService
// DefaultService handles all other queries
DefaultService query.ProxyQueryService
}
func (s routingQueryService) Check(ctx context.Context) check.Response {
// Produce combined check response
response := check.Response{
Name: "internal-routingQueryService",
Status: check.StatusPass,
}
def := s.DefaultService.Check(ctx)
influxql := s.InfluxQLService.Check(ctx)
if def.Status == check.StatusFail {
response.Status = def.Status
response.Message = def.Message
} else if influxql.Status == check.StatusFail {
response.Status = influxql.Status
response.Message = influxql.Message
}
response.Checks = []check.Response{def, influxql}
sort.Sort(response.Checks)
return response
}
func (s routingQueryService) Query(ctx context.Context, w io.Writer, req *query.ProxyRequest) (flux.Statistics, error) {
if req.Request.Compiler.CompilerType() == influxql.CompilerType {
return s.InfluxQLService.Query(ctx, w, req)
}
return s.DefaultService.Query(ctx, w, req)
}

View File

@ -7,8 +7,6 @@ import (
"fmt"
"io"
"net/http"
"net/url"
"strings"
"github.com/influxdata/flux"
"github.com/influxdata/flux/lang"
@ -16,7 +14,6 @@ import (
"github.com/influxdata/influxdb/v2/kit/check"
"github.com/influxdata/influxdb/v2/kit/tracing"
"github.com/influxdata/influxdb/v2/query"
"github.com/influxdata/influxdb/v2/query/influxql"
)
type SourceProxyQueryService struct {
@ -27,8 +24,6 @@ type SourceProxyQueryService struct {
func (s *SourceProxyQueryService) Query(ctx context.Context, w io.Writer, req *query.ProxyRequest) (flux.Statistics, error) {
switch req.Request.Compiler.CompilerType() {
case influxql.CompilerType:
return s.queryInfluxQL(ctx, w, req)
case lang.FluxCompilerType:
return s.queryFlux(ctx, w, req)
}
@ -72,51 +67,6 @@ func (s *SourceProxyQueryService) queryFlux(ctx context.Context, w io.Writer, re
return flux.Statistics{}, nil
}
func (s *SourceProxyQueryService) queryInfluxQL(ctx context.Context, w io.Writer, req *query.ProxyRequest) (flux.Statistics, error) {
span, ctx := tracing.StartSpanFromContext(ctx)
defer span.Finish()
compiler, ok := req.Request.Compiler.(*influxql.Compiler)
if !ok {
return flux.Statistics{}, tracing.LogError(span, fmt.Errorf("compiler is not of type 'influxql'"))
}
u, err := NewURL(s.Addr, "/query")
if err != nil {
return flux.Statistics{}, tracing.LogError(span, err)
}
body := url.Values{}
body.Add("db", compiler.DB)
body.Add("org", compiler.Cluster)
body.Add("q", compiler.Query)
body.Add("rp", compiler.RP)
hreq, err := http.NewRequest("POST", u.String(), strings.NewReader(body.Encode()))
if err != nil {
return flux.Statistics{}, tracing.LogError(span, err)
}
hreq.Header.Set("Content-Type", "application/x-www-form-urlencoded")
hreq.Header.Set("Authorization", fmt.Sprintf("Token %s", s.Token))
hreq = hreq.WithContext(ctx)
hc := NewClient(u.Scheme, s.InsecureSkipVerify)
resp, err := hc.Do(hreq)
if err != nil {
return flux.Statistics{}, tracing.LogError(span, err)
}
defer resp.Body.Close()
if err := CheckError(resp); err != nil {
return flux.Statistics{}, tracing.LogError(span, err)
}
if _, err = io.Copy(w, resp.Body); err != nil {
return flux.Statistics{}, tracing.LogError(span, err)
}
return flux.Statistics{}, nil
}
func (s *SourceProxyQueryService) Check(context.Context) check.Response {
return QueryHealthCheck(s.Addr, s.InsecureSkipVerify)
}

View File

@ -16,7 +16,6 @@ import (
"github.com/influxdata/influxdb/v2/kit/platform/errors"
"github.com/influxdata/influxdb/v2/pkg/httpc"
"github.com/influxdata/influxdb/v2/query"
"github.com/influxdata/influxdb/v2/query/influxql"
"go.uber.org/zap"
)
@ -170,13 +169,6 @@ func decodeSourceQueryRequest(r *http.Request) (*query.ProxyRequest, error) {
req.Request.Compiler = lang.FluxCompiler{
Query: request.Query,
}
case influxql.CompilerType:
req.Request.Compiler = &influxql.Compiler{
Cluster: request.Cluster,
DB: request.DB,
RP: request.RP,
Query: request.Query,
}
default:
return nil, fmt.Errorf("compiler type not supported")
}

View File

@ -1,367 +0,0 @@
# InfluxQL Transpiler
The InfluxQL Transpiler exists to rewrite an InfluxQL query into its equivalent query in Flux. The transpiler works off of a few simple rules that match with the equivalent method of constructing queries in InfluxDB.
**NOTE:** The transpiler code is not finished and may not necessarily reflect what is in this document. When they conflict, this document is considered to be the correct way to do it. If you wish to change how the transpiler works, modify this file first.
1. [Select Statement](#select-statement)
1. [Identify the cursors](#identify-cursors)
2. [Identify the query type](#identify-query-type)
3. [Group the cursors](#group-cursors)
4. [Create the cursors for each group](#create-groups)
1. [Create cursor](#create-cursor)
2. [Filter by measurement and fields](#filter-cursor)
3. [Generate the pivot table](#generate-pivot-table)
4. [Evaluate the condition](#evaluate-condition)
5. [Perform the grouping](#perform-grouping)
6. [Evaluate the function](#evaluate-function)
7. [Normalize the time column](#normalize-time)
8. [Combine windows](#combine-windows)
3. [Join the groups](#join-groups)
4. [Map and eval columns](#map-and-eval)
2. [Show Databases](#show-databases)
1. [Create cursor](#show-databases-cursor)
2. [Rename and Keep the name databaseName column](#show-databases-name)
3. [Show Retention Policies](#show-retention-policies)
1. [Create cursor](#show-retention-policies-cursor)
2. [Filter by the database name](#show-retention-policies-database-filter)
3. [Rename Columns](#show-retention-policies-rename)
4. [Set Static Columns](#show-retention-policies-static-cols)
5. [Keep Specific Columns](#show-retention-policies-keep)
4. [Show Tag Values](#show-tag-values)
1. [Create cursor](#show-tag-values-cursor)
2. [Filter by the measurement](#show-tag-values-measurement-filter)
3. [Evaluate the condition](#show-tag-values-evaluate-condition)
4. [Retrieve the key values](#show-tag-values-key-values)
5. [Find the distinct key values](#show-tag-values-distinct-key-values)
3. [Encoding the results](#encoding)
## <a name="select-statement"></a> Select Statement
### <a name="identify-cursors"></a> Identify the cursors
The InfluxQL query engine works by filling in variables and evaluating the query for the values in each row. The first step of transforming a query is identifying the cursors so we can figure out how to fill them correctly. A cursor is any point in the query that has a **variable or a function call**. Math functions do not count as function calls and are handled in the eval phase.
For the following query, it is easy to identify the cursors:
```
SELECT max(usage_user), usage_system FROM telegraf..cpu
```
`max(usage_user)` and `usage_system` are the cursors that we need to fill in for each row. Cursors are global and are not per-field.
### <a name="identify-query-type"></a> Identify the query type
There are four types of queries: meta, raw, aggregate, and selector. A meta query is one that retrieves descriptive information about a measurement or series, rather than about the data within the measurement or series. A raw query is one where all of the cursors reference a variable. An aggregate is one where all of the cursors reference a function call. A selector is one where there is exactly one function call that is a selector (such as `max()` or `min()`) and the remaining variables, if there are any, are variables. If there is only one function call with no variables and that function is a selector, then the function type is a selector.
### <a name="group-cursors"></a> Group the cursors
We group the cursors based on the query type. For raw queries and selectors, all of the cursors are put into the same group. \ For aggregates, each function call is put into a separate group so they can be joined at the end.
### <a name="create-groups"></a> Create the cursors for each group
We create the cursors within each group. This process is repeated for every group.
#### <a name="create-cursor"></a> Create cursor
The cursor is generated using the following template:
```
create_cursor = (db, rp="autogen", start, stop=now()) => from(bucket: db+"/"+rp)
|> range(start: start, stop: stop)
```
This is called once per group.
#### <a name="identify-variables"></a> Identify the variables
Each of the variables in the group are identified. This involves inspecting the condition to collect the common variables in the expression while also retrieving the variables for each expression within the group. For a function call, this retrieves the variable used as a function argument rather than the function itself.
If a wildcard is identified in the fields, then the field filter is cleared and only the measurement filter is used. If a regex wildcard is identified, it is added as one of the field filters.
#### <a name="filter-cursor"></a> Filter by measurement and fields
A filter expression is generated by using the measurement and the fields that were identified. It follows this template:
```
... |> filter(fn: (r) => r._measurement == <measurement> and <field_expr>)
```
The `<measurement>` is equal to the measurement name from the `FROM` clause. The `<field_expr>` section is generated differently depending on the fields that were found. If more than one field was selected, then each of the field filters is combined by using `or` and the expression itself is surrounded by parenthesis. For a non-wildcard field, the following expression is used:
```
r._field == <name>
```
For a regex wildcard, the following is used:
```
r._field =~ <regex>
```
If a star wildcard was used, the `<field_expr>` is omitted from the filter expression.
#### <a name="generate-pivot-table"></a> Generate the pivot table
If there was more than one field selected or if one of the fields was some form of wildcard, a pivot expression is generated.
```
... |> pivot(rowKey: ["_time"], colKey: ["_field"], valueCol: "_value")
```
#### <a name="evaluate-condition"></a> Evaluate the condition
At this point, generate the `filter` call to evaluate the condition. If there is no condition outside of the time selector, then this step is skipped.
#### <a name="perform-grouping"></a> Perform the grouping
We group together the streams based on the `GROUP BY` clause. As an example:
```
> SELECT mean(usage_user) FROM telegraf..cpu WHERE time >= now() - 5m GROUP BY time(5m), host
... |> group(columns: ["_measurement", "_start", "host"]) |> window(every: 5m)
```
If the `GROUP BY time(...)` doesn't exist, `window()` is skipped. Grouping will have a default of [`_measurement`, `_start`], regardless of whether a GROUP BY clause is present. If there are keys in the group by clause, they are concatenated with the default list. If a wildcard is used for grouping, then this step is skipped.
#### <a name="evaluate-function"></a> Evaluate the function
If this group contains a function call, the function is evaluated at this stage and invoked on the specific column. As an example:
```
> SELECT max(usage_user), usage_system FROM telegraf..cpu
val1 = create_cursor(bucket: "telegraf/autogen", start: -5m, m: "cpu", f: "usage_user")
val1 = create_cursor(bucket: "telegraf/autogen", start: -5m, m: "cpu", f: "usage_system")
inner_join(tables: {val1: val1, val2: val2}, except: ["_field"], fn: (tables) => {val1: tables.val1, val2: tables.val2})
|> max(column: "val1")
```
For an aggregate, the following is used instead:
```
> SELECT mean(usage_user) FROM telegraf..cpu
create_cursor(bucket: "telegraf/autogen", start: -5m, m: "cpu", f: "usage_user")
|> group(columns: ["_field"], mode: "except")
|> mean(timeSrc: "_start", columns: ["_value"])
```
If the aggregate is combined with conditions, the column name of `_value` is replaced with whatever the generated column name is.
#### <a name="normalize-time"></a> Normalize the time column
If a function was evaluated and the query type is an aggregate type or if we are grouping by time, then all of the functions need to have their time normalized. If the function is an aggregate, the following is added:
```
... |> mean() |> duplicate(column: "_start", as: "_time")
```
If it is a selector, then we need to also drop the existing `_time` column with the following:
```
... |> max() |> drop(columns: ["_time"]) |> duplicate(column: "_start", as: "_time")
```
This step does not apply if there are no functions.
#### <a name="combine-windows"></a> Combine windows
If there a window operation was added, we then combine each of the function results from the windows back into a single table.
```
... |> window(every: inf)
```
This step is skipped if there was no window function.
### <a name="join-groups"></a> Join the groups
If there is only one group, this does not need to be done and can be skipped.
If there are multiple groups, as is the case when there are multiple function calls, then we perform an `outer_join` using the time and any remaining group keys.
### <a name="map-and-eval"></a> Map and eval the columns
After joining the results if a join was required, then a `map` call is used to both evaluate the math functions and name the columns. The time is also passed through the `map()` function so it is available for the encoder.
```
result |> map(fn: (r) => {_time: r._time, max: r.val1, usage_system: r.val2})
```
This is the final result. It will also include any tags in the group key and the time will be located in the `_time` variable.
TODO(jsternberg): The `_time` variable is only needed for selectors and raw queries. We can actually drop this variable for aggregate queries and use the `_start` time from the group key. Consider whether or not we should do this and if it is worth it.
## <a name="show-databases"></a> Show Databases
In 2.0, not all "buckets" will be conceptually equivalent to a 1.X database. If a bucket is intended to represent a collection of 1.X data, it will be specifically identified as such. `flux` provides a special function `databases()` that will retrieve information about all registered 1.X compatible buckets.
### <a name="show-databases-cursor"></a> Create Cursor
The cursor is trivially implemented as a no-argument call to the `databases` function:
```
databases()
```
### <a name="show-databases-name"></a>Rename and Keep the databaseName Column
The result of `databases()` has several columns. In this application, we only need the `databaseName` but in 1.X output, the label is `name`:
```
databases()
|> rename(columns: {databaseName: "name"})
|> keep(columns: ["name"])
```
## <a name="show-retention-policies"></a> Show Retention Policies
Similar to `SHOW DATABASES`, show retention policies also returns information only for 1.X compatible buckets. It uses different columns from the same `databses()` function.
### <a name="how-retention-policies-cursor"></a> Create cursor
The cursor is trivially implemented as a no-argument call to the `databases` function:
```
databases()
```
### <a name="show-retention-policies-database-filter"></a> Filter by the database name
The databases function will return rows of database/retention policy pairs for all databases. The result of `SHOW RETENTION POLICIES` is defined for a single database, so we filter:
```
databases() |> filter(fn: (r) => r.databaseName == <DBNAME>
```
### <a name="show-retention-policies-rename"></a> Rename Columns
Several columns must be renamed to match the 1.X format:
```
... |> rename(columns: {retentionPolicy: "name", retentionPeriod: "duration"})
```
### <a name="show-retention-policies-static-cols"></a> Set Static Columns
Two static columns are set. In 1.X the columns for `shardGroupDuration` and `replicaN` could vary depending on the database/retention policy definition. In 2.0, there is no shardGroups to configure, and the replication level is always 2.
```
... |> set(key: "shardGroupDuration", value: "0") |> set(key: "replicaN", value: "2")
```
### <a name="show-retention-policies-keep"></a> Keep Specific Columns
Finally, we will identify the columns in the table that we wish to keep:
```
... |> keep(columns: ["name", "duration", "shardGroupDuration", "replicaN", "default"])
```
## <a name="show-tag-values"></a> Show Tag Values
In flux, retrieving the tag values is different than influxql. In influxdb 1.x, tags were included in the index and restricting them by time did not exist or make any sense. In the 2.0 platform, tag keys and values are scoped by time and it is more expensive to retrieve all of the tag values for all time. For this reason, there are some small changes to how the command works and therefore how it is transpiled.
### <a name="show-tag-values-cursor"></a> Create cursor
The first step is to construct the initial cursor. This is done similar to a select statement, but we do not filter on the fields.
```
from(bucket: "telegraf/autogen") |>
|> range(start: -1h)
```
If no time specifier is specified, as would be expected by most transpiled queries, we default to the last hour. If a time range is present in the `WHERE` clause, that time is used instead.
### <a name="show-tag-values-measurement-filter"></a> Filter by the measurement
If a `FROM <measurement>` clause is present in the statement, then we filter by the measurement name.
```
... |> filter(fn: (r) => r._measurement == <measurement>)
```
This step may be skipped if the `FROM` clause is not present. In which case, it will return the tag values for every measurement.
### <a name="show-tag-values-evaluate-condition"></a> Evaluate the condition
The condition within the `WHERE` clause is evaluated. It generates a filter in the same way that a [select statement)(#evaluate-condition) would, but with the added assumption that all of the values refer to tags. There is no attempt made at determining if a value is a field or tag.
### <a name="show-tag-values-key-values"></a> Retrieve the key values
The key values are retrieved using the `keyValues` function. The `SHOW TAG VALUES` statement requires a tag key filter.
If a single value is specified with the `=` operator, then that value is used as the single argument to the function.
```
# SHOW TAG VALUES WITH KEY = "host"
... |> keyValues(keyCols: ["host"])
```
If the `IN` operator is used, then all of the values are used as a list argument to the `keyValues()`.
```
# SHOW TAG VALUES WITH KEY IN ("host", "region")
... |> keyValues(keyCols: ["host", "region"])
```
If any other operation is used, such as `!=` or a regex operator, then a schema function must be used like follows:
```
# SHOW TAG VALUES WITH KEY != "host"
... |> keyValues(fn: (schema) => schema.keys |> filter(fn: (col) => col.name != "host"))
# SHOW TAG VALUES WITH KEY =~ /host|region/
... |> keyValues(fn: (schema) => schema.keys |> filter(fn: (col) => col.name =~ /host|region/))
# SHOW TAG VALUES WITH KEY !~ /host|region/
... |> keyValues(fn: (schema) => schema.keys |> filter(fn: (col) => col.name !~ /host|region/))
```
TODO(jsternberg): The schema function has not been solidifed, but the basics are that we take the list of group keys and then run a filter using the condition.
At this point, we have a table with the partition key that is organized by the keys and values of the selected columns.
### <a name="show-tag-values-distinct-key-values"></a> Find the distinct key values
We group by the measurement and the key and then use `distinct` on the values. After we find the distinct values, we group these values back by their measurements again so all of the tag values for a measurement are grouped together. We then rename the columns to the expected names.
```
... |> group(columns: ["_measurement", "_key"])
|> distinct(column: "_value")
|> group(columns: ["_measurement"])
|> rename(columns: {_key: "key", _value: "value"})
```
### <a name="encoding"></a> Encoding the results
Each statement will be terminated by a `yield()` call. This call will embed the statement id as the result name. The result name is always of type string, but the transpiler will encode an integer in this field so it can be parsed by the encoder. For example:
```
result |> yield(name: "0")
```
The edge nodes from the query specification will be used to encode the results back to the user in the JSON format used in 1.x. The JSON format from 1.x is below:
```
{
"results": [
{
"statement_id": 0,
"series": [
{
"name": "_measurement",
"tags": {
"key": "value"
},
"columns": [
"time",
"value"
],
"values": [
[
"2015-01-29T21:55:43.702900257Z",
2
]
]
}
]
}
]
}
```
The measurement name is retrieved from the `_measurement` column in the results. For the tags, the values in the group key that are of type string are included with both the keys and the values mapped to each other. Any values in the group key that are not strings, like the start and stop times, are ignored and discarded. If the `_field` key is still present in the group key, it is also discarded. For all normal fields, they are included in the array of values for each row. The `_time` field will be renamed to `time` (or whatever the time alias is set to by the query).
The chunking options that existed in 1.x are not supported by the encoder and should not be used. To minimize the amount of breaking code, using a chunking option will be ignored and the encoder will operate as normal, but it will include a message in the result so that a user can be informed that an invalid query option was used. The 1.x format has a field for sending back informational messages in it already.
**TODO(jsternberg):** Find a way for a column to be both used as a tag and a field. This is not currently possible because the encoder can't tell the difference between the two.

View File

@ -1,85 +0,0 @@
package influxql
import (
"context"
"encoding/json"
"time"
"github.com/influxdata/flux"
"github.com/influxdata/flux/lang"
"github.com/influxdata/flux/plan"
platform "github.com/influxdata/influxdb/v2"
)
const CompilerType = "influxql"
// AddCompilerMappings adds the influxql specific compiler mappings.
func AddCompilerMappings(mappings flux.CompilerMappings, dbrpMappingSvc platform.DBRPMappingService) error {
return mappings.Add(CompilerType, func() flux.Compiler {
return NewCompiler(dbrpMappingSvc)
})
}
// Compiler is the transpiler to convert InfluxQL to a Flux specification.
type Compiler struct {
Cluster string `json:"cluster,omitempty"`
DB string `json:"db,omitempty"`
RP string `json:"rp,omitempty"`
Bucket string `json:"bucket,omitempty"`
Query string `json:"query"`
Now *time.Time `json:"now,omitempty"`
logicalPlannerOptions []plan.LogicalOption
dbrpMappingSvc platform.DBRPMappingService
}
var _ flux.Compiler = &Compiler{}
func NewCompiler(dbrpMappingSvc platform.DBRPMappingService) *Compiler {
return &Compiler{
dbrpMappingSvc: dbrpMappingSvc,
}
}
// Compile transpiles the query into a Program.
func (c *Compiler) Compile(ctx context.Context, runtime flux.Runtime) (flux.Program, error) {
var now time.Time
if c.Now != nil {
now = *c.Now
} else {
now = time.Now()
}
transpiler := NewTranspilerWithConfig(
c.dbrpMappingSvc,
Config{
Bucket: c.Bucket,
Cluster: c.Cluster,
DefaultDatabase: c.DB,
DefaultRetentionPolicy: c.RP,
Now: now,
},
)
astPkg, err := transpiler.Transpile(ctx, c.Query)
if err != nil {
return nil, err
}
compileOptions := lang.WithLogPlanOpts(c.logicalPlannerOptions...)
bs, err := json.Marshal(astPkg)
if err != nil {
return nil, err
}
hdl, err := runtime.JSONToHandle(bs)
if err != nil {
return nil, err
}
return lang.CompileAST(hdl, runtime, now, compileOptions), nil
}
func (c *Compiler) CompilerType() flux.CompilerType {
return CompilerType
}
func (c *Compiler) WithLogicalPlannerOptions(opts ...plan.LogicalOption) {
c.logicalPlannerOptions = opts
}

View File

@ -1,12 +0,0 @@
package influxql_test
import (
"testing"
"github.com/influxdata/flux"
"github.com/influxdata/influxdb/v2/query/influxql"
)
func TestCompiler(t *testing.T) {
var _ flux.Compiler = (*influxql.Compiler)(nil)
}

View File

@ -1,19 +0,0 @@
package influxql
import (
"time"
)
// Config modifies the behavior of the Transpiler.
type Config struct {
// Bucket is the name of a bucket to use instead of the db/rp from the query.
// If bucket is empty then the dbrp mapping is used.
Bucket string
DefaultDatabase string
DefaultRetentionPolicy string
Cluster string
Now time.Time
// FallbackToDBRP if true will use the naming convention of `db/rp`
// for a bucket name when an mapping is not found
FallbackToDBRP bool
}

View File

@ -1,181 +0,0 @@
package influxql
import (
"errors"
"github.com/influxdata/flux/ast"
"github.com/influxdata/flux/execute"
"github.com/influxdata/influxql"
)
// cursor is holds known information about the current stream. It maps the influxql ast information
// to the attributes on a table.
type cursor interface {
// Expr is the AST expression that produces this table.
Expr() ast.Expression
// Keys returns all of the expressions that this cursor contains.
Keys() []influxql.Expr
// Value returns the string that can be used to access the computed expression.
// If this cursor does not produce the expression, this returns false for the second
// return argument.
Value(expr influxql.Expr) (string, bool)
}
// varRefCursor contains a cursor for a single variable. This is usually the raw value
// coming from the database and points to the default value column property.
type varRefCursor struct {
expr ast.Expression
ref *influxql.VarRef
}
// createVarRefCursor creates a new cursor from a variable reference using the sources
// in the transpilerState.
func createVarRefCursor(t *transpilerState, ref *influxql.VarRef) (cursor, error) {
if len(t.stmt.Sources) != 1 {
// TODO(jsternberg): Support multiple sources.
return nil, errors.New("unimplemented: only one source is allowed")
}
// Only support a direct measurement. Subqueries are not supported yet.
mm, ok := t.stmt.Sources[0].(*influxql.Measurement)
if !ok {
return nil, errors.New("unimplemented: source must be a measurement")
}
// Create the from spec and add it to the list of operations.
from, err := t.from(mm)
if err != nil {
return nil, err
}
valuer := influxql.NowValuer{Now: t.config.Now}
_, tr, err := influxql.ConditionExpr(t.stmt.Condition, &valuer)
if err != nil {
return nil, err
}
// If the maximum is not set and we have a windowing function, then
// the end time will be set to now.
if tr.Max.IsZero() {
if window, err := t.stmt.GroupByInterval(); err == nil && window > 0 {
tr.Max = t.config.Now
}
}
range_ := &ast.PipeExpression{
Argument: from,
Call: &ast.CallExpression{
Callee: &ast.Identifier{
Name: "range",
},
Arguments: []ast.Expression{
&ast.ObjectExpression{
Properties: []*ast.Property{
{
Key: &ast.Identifier{
Name: "start",
},
Value: &ast.DateTimeLiteral{
Value: tr.MinTime().UTC(),
},
},
{
Key: &ast.Identifier{
Name: "stop",
},
Value: &ast.DateTimeLiteral{
Value: tr.MaxTime().UTC(),
},
},
},
},
},
},
}
expr := &ast.PipeExpression{
Argument: range_,
Call: &ast.CallExpression{
Callee: &ast.Identifier{
Name: "filter",
},
Arguments: []ast.Expression{
&ast.ObjectExpression{
Properties: []*ast.Property{
{
Key: &ast.Identifier{
Name: "fn",
},
Value: &ast.FunctionExpression{
Params: []*ast.Property{{
Key: &ast.Identifier{
Name: "r",
},
}},
Body: &ast.LogicalExpression{
Operator: ast.AndOperator,
Left: &ast.BinaryExpression{
Operator: ast.EqualOperator,
Left: &ast.MemberExpression{
Object: &ast.Identifier{Name: "r"},
Property: &ast.Identifier{Name: "_measurement"},
},
Right: &ast.StringLiteral{
Value: mm.Name,
},
},
Right: &ast.BinaryExpression{
Operator: ast.EqualOperator,
Left: &ast.MemberExpression{
Object: &ast.Identifier{Name: "r"},
Property: &ast.Identifier{Name: "_field"},
},
Right: &ast.StringLiteral{
Value: ref.Val,
},
},
},
},
},
},
},
},
},
}
return &varRefCursor{
expr: expr,
ref: ref,
}, nil
}
func (c *varRefCursor) Expr() ast.Expression {
return c.expr
}
func (c *varRefCursor) Keys() []influxql.Expr {
return []influxql.Expr{c.ref}
}
func (c *varRefCursor) Value(expr influxql.Expr) (string, bool) {
ref, ok := expr.(*influxql.VarRef)
if !ok {
return "", false
}
// If these are the same variable reference (by pointer), then they are equal.
if ref == c.ref || *ref == *c.ref {
return execute.DefaultValueColLabel, true
}
return "", false
}
// pipeCursor wraps a cursor with a new expression while delegating all calls to the
// wrapped cursor.
type pipeCursor struct {
expr ast.Expression
cursor
}
func (c *pipeCursor) Expr() ast.Expression { return c.expr }

View File

@ -1,91 +0,0 @@
package influxql
import (
"net/http"
"github.com/influxdata/flux"
)
const DialectType = "influxql"
// AddDialectMappings adds the influxql specific dialect mappings.
func AddDialectMappings(mappings flux.DialectMappings) error {
return mappings.Add(DialectType, func() flux.Dialect {
return new(Dialect)
})
}
// Dialect describes the output format of InfluxQL queries.
type Dialect struct {
TimeFormat TimeFormat // TimeFormat is the format of the timestamp; defaults to RFC3339Nano.
Encoding EncodingFormat // Encoding is the format of the results; defaults to JSON.
ChunkSize int // Chunks is the number of points per chunk encoding batch; defaults to 0 or no chunking.
Compression CompressionFormat // Compression is the compression of the result output; defaults to None.
}
func (d *Dialect) SetHeaders(w http.ResponseWriter) {
switch d.Encoding {
case JSON, JSONPretty:
w.Header().Set("Content-Type", "application/json")
case CSV:
w.Header().Set("Content-Type", "text/csv")
case Msgpack:
w.Header().Set("Content-Type", "application/x-msgpack")
}
}
func (d *Dialect) Encoder() flux.MultiResultEncoder {
switch d.Encoding {
case JSON, JSONPretty:
return new(MultiResultEncoder)
default:
panic("not implemented")
}
}
func (d *Dialect) DialectType() flux.DialectType {
return DialectType
}
// TimeFormat specifies the format of the timestamp in the query results.
type TimeFormat int
const (
// RFC3339Nano is the default format for timestamps for InfluxQL.
RFC3339Nano TimeFormat = iota
// Hour formats time as the number of hours in the unix epoch.
Hour
// Minute formats time as the number of minutes in the unix epoch.
Minute
// Second formats time as the number of seconds in the unix epoch.
Second
// Millisecond formats time as the number of milliseconds in the unix epoch.
Millisecond
// Microsecond formats time as the number of microseconds in the unix epoch.
Microsecond
// Nanosecond formats time as the number of nanoseconds in the unix epoch.
Nanosecond
)
// CompressionFormat is the format to compress the query results.
type CompressionFormat int
const (
// None does not compress the results and is the default.
None CompressionFormat = iota
// Gzip compresses the query results with gzip.
Gzip
)
// EncodingFormat is the output format for the query response content.
type EncodingFormat int
const (
// JSON marshals the response to JSON octets.
JSON EncodingFormat = iota
// JSONPretty marshals the response to JSON octets with indents.
JSONPretty
// CSV marshals the response to CSV.
CSV
// Msgpack has a similar structure as the JSON response. Used?
Msgpack
)

View File

@ -1,12 +0,0 @@
package influxql_test
import (
"testing"
"github.com/influxdata/flux"
"github.com/influxdata/influxdb/v2/query/influxql"
)
func TestDialect(t *testing.T) {
var _ flux.Dialect = (*influxql.Dialect)(nil)
}

View File

@ -1,281 +0,0 @@
package influxql_test
import (
"bufio"
"bytes"
"context"
"io"
"io/ioutil"
"os"
"path/filepath"
"testing"
"github.com/influxdata/flux"
"github.com/influxdata/flux/execute"
"github.com/influxdata/flux/execute/executetest"
ifql "github.com/influxdata/flux/influxql"
"github.com/influxdata/flux/memory"
fluxquerytest "github.com/influxdata/flux/querytest"
platform "github.com/influxdata/influxdb/v2"
_ "github.com/influxdata/influxdb/v2/fluxinit/static"
platform2 "github.com/influxdata/influxdb/v2/kit/platform"
"github.com/influxdata/influxdb/v2/mock"
"github.com/influxdata/influxdb/v2/query"
"github.com/influxdata/influxdb/v2/query/influxql"
"github.com/influxdata/influxdb/v2/query/querytest"
platformtesting "github.com/influxdata/influxdb/v2/testing"
)
const generatedInfluxQLDataDir = "testdata"
var dbrpMappingSvcE2E = &mock.DBRPMappingService{}
func init() {
mapping := platform.DBRPMapping{
Database: "db0",
RetentionPolicy: "autogen",
Default: true,
OrganizationID: platformtesting.MustIDBase16("cadecadecadecade"),
BucketID: platformtesting.MustIDBase16("da7aba5e5eedca5e"),
}
dbrpMappingSvcE2E.FindByIDFn = func(ctx context.Context, orgID, id platform2.ID) (*platform.DBRPMapping, error) {
return &mapping, nil
}
dbrpMappingSvcE2E.FindManyFn = func(ctx context.Context, filter platform.DBRPMappingFilter, opt ...platform.FindOptions) ([]*platform.DBRPMapping, int, error) {
return []*platform.DBRPMapping{&mapping}, 1, nil
}
}
var skipTests = map[string]string{
"hardcoded_literal_1": "transpiler count query is off by 1 https://github.com/influxdata/influxdb/issues/10744",
"hardcoded_literal_3": "transpiler count query is off by 1 https://github.com/influxdata/influxdb/issues/10744",
"fuzz_join_within_cursor": "transpiler does not implement joining fields within a cursor https://github.com/influxdata/influxdb/issues/10743",
"derivative_count": "add derivative support to the transpiler https://github.com/influxdata/influxdb/issues/10759",
"derivative_first": "add derivative support to the transpiler https://github.com/influxdata/influxdb/issues/10759",
"derivative_last": "add derivative support to the transpiler https://github.com/influxdata/influxdb/issues/10759",
"derivative_max": "add derivative support to the transpiler https://github.com/influxdata/influxdb/issues/10759",
"derivative_mean": "add derivative support to the transpiler https://github.com/influxdata/influxdb/issues/10759",
"derivative_median": "add derivative support to the transpiler https://github.com/influxdata/influxdb/issues/10759",
"derivative_min": "add derivative support to the transpiler https://github.com/influxdata/influxdb/issues/10759",
"derivative_mode": "add derivative support to the transpiler https://github.com/influxdata/influxdb/issues/10759",
"derivative_percentile_10": "add derivative support to the transpiler https://github.com/influxdata/influxdb/issues/10759",
"derivative_percentile_50": "add derivative support to the transpiler https://github.com/influxdata/influxdb/issues/10759",
"derivative_percentile_90": "add derivative support to the transpiler https://github.com/influxdata/influxdb/issues/10759",
"derivative_sum": "add derivative support to the transpiler https://github.com/influxdata/influxdb/issues/10759",
"regex_measurement_0": "Transpiler: regex on measurements not evaluated https://github.com/influxdata/influxdb/issues/10740",
"regex_measurement_1": "Transpiler: regex on measurements not evaluated https://github.com/influxdata/influxdb/issues/10740",
"regex_measurement_2": "Transpiler: regex on measurements not evaluated https://github.com/influxdata/influxdb/issues/10740",
"regex_measurement_3": "Transpiler: regex on measurements not evaluated https://github.com/influxdata/influxdb/issues/10740",
"regex_measurement_4": "Transpiler: regex on measurements not evaluated https://github.com/influxdata/influxdb/issues/10740",
"regex_measurement_5": "Transpiler: regex on measurements not evaluated https://github.com/influxdata/influxdb/issues/10740",
"regex_tag_0": "Transpiler: Returns results in wrong sort order for regex filter on tags https://github.com/influxdata/influxdb/issues/10739",
"regex_tag_1": "Transpiler: Returns results in wrong sort order for regex filter on tags https://github.com/influxdata/influxdb/issues/10739",
"regex_tag_2": "Transpiler: Returns results in wrong sort order for regex filter on tags https://github.com/influxdata/influxdb/issues/10739",
"regex_tag_3": "Transpiler: Returns results in wrong sort order for regex filter on tags https://github.com/influxdata/influxdb/issues/10739",
"explicit_type_0": "Transpiler should remove _start column https://github.com/influxdata/influxdb/issues/10742",
"explicit_type_1": "Transpiler should remove _start column https://github.com/influxdata/influxdb/issues/10742",
"fills_0": "need fill/Interpolate function https://github.com/influxdata/flux/issues/436",
"random_math_0": "transpiler does not implement joining fields within a cursor https://github.com/influxdata/influxdb/issues/10743",
"selector_0": "Transpiler: unimplemented functions: top and bottom https://github.com/influxdata/influxdb/issues/10738",
"selector_1": "Transpiler: unimplemented functions: top and bottom https://github.com/influxdata/influxdb/issues/10738",
"selector_2": "Transpiler: first function uses different series than influxQL https://github.com/influxdata/influxdb/issues/10737",
"selector_6": "Transpiler: first function uses different series than influxQL https://github.com/influxdata/influxdb/issues/10737",
"selector_7": "Transpiler: first function uses different series than influxQL https://github.com/influxdata/influxdb/issues/10737",
"series_agg_3": "Transpiler: Implement elapsed https://github.com/influxdata/influxdb/issues/10733",
"series_agg_4": "Transpiler: Implement cumulative_sum https://github.com/influxdata/influxdb/issues/10732",
"series_agg_5": "add derivative support to the transpiler https://github.com/influxdata/influxdb/issues/10759",
"series_agg_6": "Transpiler: Implement non_negative_derivative https://github.com/influxdata/influxdb/issues/10731",
"Subquery_0": "Implement subqueries in the transpiler https://github.com/influxdata/influxdb/issues/10660",
"Subquery_1": "Implement subqueries in the transpiler https://github.com/influxdata/influxdb/issues/10660",
"Subquery_2": "Implement subqueries in the transpiler https://github.com/influxdata/influxdb/issues/10660",
"Subquery_3": "Implement subqueries in the transpiler https://github.com/influxdata/influxdb/issues/10660",
"Subquery_4": "Implement subqueries in the transpiler https://github.com/influxdata/influxdb/issues/10660",
"Subquery_5": "Implement subqueries in the transpiler https://github.com/influxdata/influxdb/issues/10660",
"NestedSubquery_0": "Implement subqueries in the transpiler https://github.com/influxdata/influxdb/issues/10660",
"NestedSubquery_1": "Implement subqueries in the transpiler https://github.com/influxdata/influxdb/issues/10660",
"NestedSubquery_2": "Implement subqueries in the transpiler https://github.com/influxdata/influxdb/issues/10660",
"NestedSubquery_3": "Implement subqueries in the transpiler https://github.com/influxdata/influxdb/issues/10660",
"SimulatedHTTP_0": "Implement subqueries in the transpiler https://github.com/influxdata/influxdb/issues/10660",
"SimulatedHTTP_1": "Implement subqueries in the transpiler https://github.com/influxdata/influxdb/issues/10660",
"SimulatedHTTP_2": "Implement subqueries in the transpiler https://github.com/influxdata/influxdb/issues/10660",
"SimulatedHTTP_3": "Implement subqueries in the transpiler https://github.com/influxdata/influxdb/issues/10660",
"SimulatedHTTP_4": "Implement subqueries in the transpiler https://github.com/influxdata/influxdb/issues/10660",
"SelectorMath_0": "Transpiler: unimplemented functions: top and bottom https://github.com/influxdata/influxdb/issues/10738",
"SelectorMath_1": "Transpiler: unimplemented functions: top and bottom https://github.com/influxdata/influxdb/issues/10738",
"SelectorMath_2": "Transpiler: unimplemented functions: top and bottom https://github.com/influxdata/influxdb/issues/10738",
"SelectorMath_3": "Transpiler: unimplemented functions: top and bottom https://github.com/influxdata/influxdb/issues/10738",
"SelectorMath_4": "Transpiler: unimplemented functions: top and bottom https://github.com/influxdata/influxdb/issues/10738",
"SelectorMath_5": "Transpiler: unimplemented functions: top and bottom https://github.com/influxdata/influxdb/issues/10738",
"SelectorMath_6": "Transpiler: unimplemented functions: top and bottom https://github.com/influxdata/influxdb/issues/10738",
"SelectorMath_7": "Transpiler: unimplemented functions: top and bottom https://github.com/influxdata/influxdb/issues/10738",
"SelectorMath_8": "Transpiler: unimplemented functions: top and bottom https://github.com/influxdata/influxdb/issues/10738",
"SelectorMath_9": "Transpiler: unimplemented functions: top and bottom https://github.com/influxdata/influxdb/issues/10738",
"SelectorMath_10": "Transpiler: unimplemented functions: top and bottom https://github.com/influxdata/influxdb/issues/10738",
"SelectorMath_11": "Transpiler: unimplemented functions: top and bottom https://github.com/influxdata/influxdb/issues/10738",
"SelectorMath_12": "Transpiler: unimplemented functions: top and bottom https://github.com/influxdata/influxdb/issues/10738",
"SelectorMath_13": "Transpiler: unimplemented functions: top and bottom https://github.com/influxdata/influxdb/issues/10738",
"SelectorMath_14": "Transpiler: unimplemented functions: top and bottom https://github.com/influxdata/influxdb/issues/10738",
"SelectorMath_15": "Transpiler: unimplemented functions: top and bottom https://github.com/influxdata/influxdb/issues/10738",
"SelectorMath_16": "Transpiler: unimplemented functions: top and bottom https://github.com/influxdata/influxdb/issues/10738",
"SelectorMath_17": "Transpiler: unimplemented functions: top and bottom https://github.com/influxdata/influxdb/issues/10738",
"SelectorMath_18": "Transpiler: unimplemented functions: top and bottom https://github.com/influxdata/influxdb/issues/10738",
"SelectorMath_19": "Transpiler: unimplemented functions: top and bottom https://github.com/influxdata/influxdb/issues/10738",
"SelectorMath_20": "Transpiler: unimplemented functions: top and bottom https://github.com/influxdata/influxdb/issues/10738",
"SelectorMath_21": "Transpiler: unimplemented functions: top and bottom https://github.com/influxdata/influxdb/issues/10738",
"SelectorMath_22": "Transpiler: unimplemented functions: top and bottom https://github.com/influxdata/influxdb/issues/10738",
"SelectorMath_23": "Transpiler: unimplemented functions: top and bottom https://github.com/influxdata/influxdb/issues/10738",
"SelectorMath_24": "Transpiler: unimplemented functions: top and bottom https://github.com/influxdata/influxdb/issues/10738",
"SelectorMath_25": "Transpiler: unimplemented functions: top and bottom https://github.com/influxdata/influxdb/issues/10738",
"SelectorMath_26": "Transpiler: unimplemented functions: top and bottom https://github.com/influxdata/influxdb/issues/10738",
"SelectorMath_27": "Transpiler: unimplemented functions: top and bottom https://github.com/influxdata/influxdb/issues/10738",
"SelectorMath_28": "Transpiler: unimplemented functions: top and bottom https://github.com/influxdata/influxdb/issues/10738",
"SelectorMath_29": "Transpiler: unimplemented functions: top and bottom https://github.com/influxdata/influxdb/issues/10738",
"SelectorMath_30": "Transpiler: unimplemented functions: top and bottom https://github.com/influxdata/influxdb/issues/10738",
"SelectorMath_31": "Transpiler: unimplemented functions: top and bottom https://github.com/influxdata/influxdb/issues/10738",
"ands": "algo-w: https://github.com/influxdata/influxdb/issues/16811",
"ors": "algo-w: https://github.com/influxdata/influxdb/issues/16811",
}
var querier = fluxquerytest.NewQuerier()
func withEachInfluxQLFile(t testing.TB, fn func(prefix, caseName string)) {
dir, err := os.Getwd()
if err != nil {
t.Fatal(err)
}
path := filepath.Join(dir, generatedInfluxQLDataDir)
influxqlFiles, err := filepath.Glob(filepath.Join(path, "*.influxql"))
if err != nil {
t.Fatalf("error searching for influxQL files: %s", err)
}
for _, influxqlFile := range influxqlFiles {
ext := filepath.Ext(influxqlFile)
prefix := influxqlFile[0 : len(influxqlFile)-len(ext)]
_, caseName := filepath.Split(prefix)
fn(prefix, caseName)
}
}
func Test_GeneratedInfluxQLQueries(t *testing.T) {
withEachInfluxQLFile(t, func(prefix, caseName string) {
reason, skip := skipTests[caseName]
influxqlName := caseName + ".influxql"
t.Run(influxqlName, func(t *testing.T) {
if skip {
t.Skip(reason)
}
testGeneratedInfluxQL(t, prefix, ".influxql")
})
})
}
func testGeneratedInfluxQL(t testing.TB, prefix, queryExt string) {
q, err := ioutil.ReadFile(prefix + queryExt)
if err != nil {
if !os.IsNotExist(err) {
t.Fatal(err)
}
t.Skip("influxql query is missing")
}
inFile := prefix + ".in.json"
outFile := prefix + ".out.json"
out, err := jsonToResultIterator(outFile)
if err != nil {
t.Fatalf("failed to read expected JSON results: %v", err)
}
defer out.Release()
var exp []flux.Result
for out.More() {
exp = append(exp, out.Next())
}
res, err := resultsFromQuerier(querier, influxQLCompiler(string(q), inFile))
if err != nil {
t.Fatalf("failed to run query: %v", err)
}
defer res.Release()
var got []flux.Result
for res.More() {
got = append(got, res.Next())
}
if err := executetest.EqualResults(exp, got); err != nil {
t.Errorf("result not as expected: %v", err)
expBuffer := new(bytes.Buffer)
for _, e := range exp {
e.Tables().Do(func(tbl flux.Table) error {
_, err := execute.NewFormatter(tbl, nil).WriteTo(expBuffer)
return err
})
}
gotBuffer := new(bytes.Buffer)
for _, e := range got {
e.Tables().Do(func(tbl flux.Table) error {
_, err := execute.NewFormatter(tbl, nil).WriteTo(gotBuffer)
return err
})
}
t.Logf("\nExpected Tables:\n%s\nActualTables:\n%s\n", expBuffer.String(), gotBuffer.String())
}
}
func resultsFromQuerier(querier *fluxquerytest.Querier, compiler flux.Compiler) (flux.ResultIterator, error) {
req := &query.ProxyRequest{
Request: query.Request{
Compiler: compiler,
},
Dialect: new(influxql.Dialect),
}
jsonBuf, err := queryToJSON(querier, req)
if err != nil {
return nil, err
}
decoder := ifql.NewResultDecoder(new(memory.Allocator))
return decoder.Decode(ioutil.NopCloser(jsonBuf))
}
func influxQLCompiler(query, filename string) flux.Compiler {
compiler := influxql.NewCompiler(dbrpMappingSvcE2E)
compiler.Cluster = "cluster"
compiler.DB = "db0"
compiler.Query = query
querytest.MakeFromInfluxJSONCompiler(compiler, filename)
return compiler
}
func queryToJSON(querier *fluxquerytest.Querier, req *query.ProxyRequest) (io.ReadCloser, error) {
var buf bytes.Buffer
_, err := querier.Query(context.Background(), &buf, req.Request.Compiler, req.Dialect)
if err != nil {
return nil, err
}
return ioutil.NopCloser(&buf), nil
}
func jsonToResultIterator(file string) (flux.ResultIterator, error) {
f, err := os.Open(file)
if err != nil {
return nil, err
}
// Reader for influxql json file
jsonReader := bufio.NewReaderSize(f, 8196)
// InfluxQL json -> Flux tables decoder
decoder := ifql.NewResultDecoder(new(memory.Allocator))
// Decode json into Flux tables
results, err := decoder.Decode(ioutil.NopCloser(jsonReader))
if err != nil {
return nil, err
}
return results, nil
}

View File

@ -1,7 +0,0 @@
package influxql
import "errors"
var (
errDatabaseNameRequired = errors.New("database name required")
)

View File

@ -1,415 +0,0 @@
package influxql
import (
"errors"
"fmt"
"time"
"github.com/influxdata/flux/ast"
"github.com/influxdata/flux/execute"
"github.com/influxdata/influxql"
)
func isTransformation(expr influxql.Expr) bool {
if call, ok := expr.(*influxql.Call); ok {
switch call.Name {
// TODO(ethan): more to be added here.
case "difference", "derivative", "cumulative_sum", "elapsed":
return true
}
}
return false
}
// function contains the prototype for invoking a function.
// TODO(jsternberg): This should do a lot more heavy lifting, but it mostly just
// pre-validates that we know the function exists. The cursor creation should be
// done by this struct, but it isn't at the moment.
type function struct {
Ref *influxql.VarRef
call *influxql.Call
}
// parseFunction parses a call AST and creates the function for it.
func parseFunction(expr *influxql.Call) (*function, error) {
switch expr.Name {
case "count":
if exp, got := 1, len(expr.Args); exp != got {
return nil, fmt.Errorf("invalid number of arguments for %s, expected %d, got %d", expr.Name, exp, got)
}
switch ref := expr.Args[0].(type) {
case *influxql.VarRef:
return &function{
Ref: ref,
call: expr,
}, nil
case *influxql.Call:
if ref.Name == "distinct" {
return nil, errors.New("unimplemented: count(distinct)")
}
return nil, fmt.Errorf("expected field argument in %s()", expr.Name)
case *influxql.Distinct:
return nil, errors.New("unimplemented: count(distinct)")
case *influxql.Wildcard:
return nil, errors.New("unimplemented: wildcard function")
case *influxql.RegexLiteral:
return nil, errors.New("unimplemented: wildcard regex function")
default:
return nil, fmt.Errorf("expected field argument in %s()", expr.Name)
}
case "min", "max", "sum", "first", "last", "mean", "median", "difference", "stddev", "spread":
if exp, got := 1, len(expr.Args); exp != got {
return nil, fmt.Errorf("invalid number of arguments for %s, expected %d, got %d", expr.Name, exp, got)
}
switch ref := expr.Args[0].(type) {
case *influxql.VarRef:
return &function{
Ref: ref,
call: expr,
}, nil
case *influxql.Wildcard:
return nil, errors.New("unimplemented: wildcard function")
case *influxql.RegexLiteral:
return nil, errors.New("unimplemented: wildcard regex function")
default:
return nil, fmt.Errorf("expected field argument in %s()", expr.Name)
}
case "percentile":
if exp, got := 2, len(expr.Args); exp != got {
return nil, fmt.Errorf("invalid number of arguments for %s, expected %d, got %d", expr.Name, exp, got)
}
var functionRef *influxql.VarRef
switch ref := expr.Args[0].(type) {
case *influxql.VarRef:
functionRef = ref
case *influxql.Wildcard:
return nil, errors.New("unimplemented: wildcard function")
case *influxql.RegexLiteral:
return nil, errors.New("unimplemented: wildcard regex function")
default:
return nil, fmt.Errorf("expected field argument in %s()", expr.Name)
}
switch expr.Args[1].(type) {
case *influxql.IntegerLiteral:
case *influxql.NumberLiteral:
default:
return nil, fmt.Errorf("expected float argument in %s()", expr.Name)
}
return &function{
Ref: functionRef,
call: expr,
}, nil
default:
return nil, fmt.Errorf("unimplemented function: %q", expr.Name)
}
}
// createFunctionCursor creates a new cursor that calls a function on one of the columns
// and returns the result.
func createFunctionCursor(t *transpilerState, call *influxql.Call, in cursor, normalize bool) (cursor, error) {
cur := &functionCursor{
call: call,
parent: in,
}
switch call.Name {
case "count", "min", "max", "sum", "first", "last", "mean", "difference", "stddev", "spread":
value, ok := in.Value(call.Args[0])
if !ok {
return nil, fmt.Errorf("undefined variable: %s", call.Args[0])
}
cur.expr = &ast.PipeExpression{
Argument: in.Expr(),
Call: &ast.CallExpression{
Callee: &ast.Identifier{
Name: call.Name,
},
},
}
cur.value = value
cur.exclude = map[influxql.Expr]struct{}{call.Args[0]: {}}
case "elapsed":
// TODO(ethan): https://github.com/influxdata/influxdb/issues/10733 to enable this.
value, ok := in.Value(call.Args[0])
if !ok {
return nil, fmt.Errorf("undefined variable: %s", call.Args[0])
}
unit := []ast.Duration{{
Magnitude: 1,
Unit: "ns",
}}
// elapsed has an optional unit parameter, default to 1ns
// https://docs.influxdata.com/influxdb/v1.7/query_language/functions/#elapsed
if len(call.Args) == 2 {
switch arg := call.Args[1].(type) {
case *influxql.DurationLiteral:
unit = durationLiteral(arg.Val)
default:
return nil, errors.New("argument unit must be a duration type")
}
}
cur.expr = &ast.PipeExpression{
Argument: in.Expr(),
Call: &ast.CallExpression{
Callee: &ast.Identifier{
Name: call.Name,
},
Arguments: []ast.Expression{
&ast.ObjectExpression{
Properties: []*ast.Property{
{
Key: &ast.Identifier{
Name: "unit",
},
Value: &ast.DurationLiteral{
Values: unit,
},
},
},
},
},
},
}
cur.value = value
case "median":
value, ok := in.Value(call.Args[0])
if !ok {
return nil, fmt.Errorf("undefined variable: %s", call.Args[0])
}
cur.expr = &ast.PipeExpression{
Argument: in.Expr(),
Call: &ast.CallExpression{
Callee: &ast.Identifier{
Name: "median",
},
Arguments: []ast.Expression{
&ast.ObjectExpression{
Properties: []*ast.Property{
{
Key: &ast.Identifier{
Name: "method",
},
Value: &ast.StringLiteral{
Value: "exact_mean",
},
},
},
},
},
},
}
cur.value = value
cur.exclude = map[influxql.Expr]struct{}{call.Args[0]: {}}
case "percentile":
if len(call.Args) != 2 {
return nil, errors.New("percentile function requires two arguments field_key and N")
}
fieldName, ok := in.Value(call.Args[0])
if !ok {
return nil, fmt.Errorf("undefined variable: %s", call.Args[0])
}
var percentile float64
switch arg := call.Args[1].(type) {
case *influxql.NumberLiteral:
percentile = arg.Val / 100.0
case *influxql.IntegerLiteral:
percentile = float64(arg.Val) / 100.0
default:
return nil, errors.New("argument N must be a float type")
}
if percentile < 0 || percentile > 1 {
return nil, errors.New("argument N must be between 0 and 100")
}
args := []*ast.Property{
{
Key: &ast.Identifier{
Name: "q",
},
Value: &ast.FloatLiteral{
Value: percentile,
},
},
{
Key: &ast.Identifier{
Name: "method",
},
Value: &ast.StringLiteral{
Value: "exact_selector",
},
},
}
if fieldName != execute.DefaultValueColLabel {
args = append(args, &ast.Property{
Key: &ast.Identifier{
Name: "columns",
},
Value: &ast.ArrayExpression{
Elements: []ast.Expression{
&ast.StringLiteral{Value: fieldName},
},
},
})
}
cur.expr = &ast.PipeExpression{
Argument: in.Expr(),
Call: &ast.CallExpression{
Callee: &ast.Identifier{
Name: "quantile",
},
Arguments: []ast.Expression{
&ast.ObjectExpression{
Properties: args,
},
},
},
}
cur.value = fieldName
cur.exclude = map[influxql.Expr]struct{}{call.Args[0]: {}}
default:
return nil, fmt.Errorf("unimplemented function: %q", call.Name)
}
// If we have been told to normalize the time, we do it here.
if normalize {
if influxql.IsSelector(call) {
cur.expr = &ast.PipeExpression{
Argument: cur.expr,
Call: &ast.CallExpression{
Callee: &ast.Identifier{
Name: "drop",
},
Arguments: []ast.Expression{
&ast.ObjectExpression{
Properties: []*ast.Property{{
Key: &ast.Identifier{
Name: "columns",
},
Value: &ast.ArrayExpression{
Elements: []ast.Expression{
&ast.StringLiteral{Value: execute.DefaultTimeColLabel},
},
},
}},
},
},
},
}
}
// err checked in caller
interval, _ := t.stmt.GroupByInterval()
var timeValue ast.Expression
if interval > 0 {
timeValue = &ast.MemberExpression{
Object: &ast.Identifier{
Name: "r",
},
Property: &ast.Identifier{
Name: execute.DefaultStartColLabel,
},
}
} else if isTransformation(call) || influxql.IsSelector(call) {
timeValue = &ast.MemberExpression{
Object: &ast.Identifier{
Name: "r",
},
Property: &ast.Identifier{
Name: execute.DefaultTimeColLabel,
},
}
} else {
valuer := influxql.NowValuer{Now: t.config.Now}
_, tr, err := influxql.ConditionExpr(t.stmt.Condition, &valuer)
if err != nil {
return nil, err
}
if tr.MinTime().UnixNano() == influxql.MinTime {
timeValue = &ast.DateTimeLiteral{Value: time.Unix(0, 0).UTC()}
} else {
timeValue = &ast.MemberExpression{
Object: &ast.Identifier{
Name: "r",
},
Property: &ast.Identifier{
Name: execute.DefaultStartColLabel,
},
}
}
}
cur.expr = &ast.PipeExpression{
Argument: cur.expr,
Call: &ast.CallExpression{
Callee: &ast.Identifier{
Name: "map",
},
Arguments: []ast.Expression{
&ast.ObjectExpression{
Properties: []*ast.Property{
{
Key: &ast.Identifier{
Name: "fn",
},
Value: &ast.FunctionExpression{
Params: []*ast.Property{{
Key: &ast.Identifier{Name: "r"},
}},
Body: &ast.ObjectExpression{
With: &ast.Identifier{Name: "r"},
Properties: []*ast.Property{{
Key: &ast.Identifier{Name: execute.DefaultTimeColLabel},
Value: timeValue,
}},
},
},
},
},
},
},
},
}
}
return cur, nil
}
type functionCursor struct {
expr ast.Expression
call *influxql.Call
value string
exclude map[influxql.Expr]struct{}
parent cursor
}
func (c *functionCursor) Expr() ast.Expression {
return c.expr
}
func (c *functionCursor) Keys() []influxql.Expr {
keys := []influxql.Expr{c.call}
if a := c.parent.Keys(); len(a) > 0 {
for _, e := range a {
if _, ok := c.exclude[e]; ok {
continue
}
keys = append(keys, e)
}
}
return keys
}
func (c *functionCursor) Value(expr influxql.Expr) (string, bool) {
if expr == c.call {
return c.value, true
} else if _, ok := c.exclude[expr]; ok {
return "", false
}
return c.parent.Value(expr)
}

View File

@ -1,542 +0,0 @@
package influxql
import (
"fmt"
"strings"
"time"
"github.com/influxdata/flux/ast"
"github.com/influxdata/flux/execute"
"github.com/influxdata/influxql"
"github.com/pkg/errors"
)
type groupInfo struct {
call *influxql.Call
refs []*influxql.VarRef
needNormalization bool
}
type groupVisitor struct {
calls []*function
refs []*influxql.VarRef
err error
}
func (v *groupVisitor) Visit(n influxql.Node) influxql.Visitor {
if v.err != nil {
return nil
}
// TODO(jsternberg): Identify duplicates so they are a single common instance.
switch expr := n.(type) {
case *influxql.Call:
// TODO(jsternberg): Identify math functions so we visit their arguments instead of recording them.
fn, err := parseFunction(expr)
if err != nil {
v.err = err
return nil
}
v.calls = append(v.calls, fn)
return nil
case *influxql.Distinct:
v.err = errors.New("unimplemented: distinct expression")
return nil
case *influxql.VarRef:
if expr.Val == "time" {
return nil
}
v.refs = append(v.refs, expr)
return nil
case *influxql.Wildcard:
v.err = errors.New("unimplemented: field wildcard")
return nil
case *influxql.RegexLiteral:
v.err = errors.New("unimplemented: field regex wildcard")
return nil
}
return v
}
// identifyGroups will identify the groups for creating data access cursors.
func identifyGroups(stmt *influxql.SelectStatement) ([]*groupInfo, error) {
v := &groupVisitor{}
influxql.Walk(v, stmt.Fields)
if v.err != nil {
return nil, v.err
}
// Attempt to take the calls and variables and put them into groups.
if len(v.refs) > 0 {
// If any of the calls are not selectors, we have an error message.
for _, fn := range v.calls {
if !influxql.IsSelector(fn.call) {
return nil, errors.New("mixing aggregate and non-aggregate queries is not supported")
}
}
// All of the functions are selectors. If we have more than 1, then we have another error message.
if len(v.calls) > 1 {
return nil, errors.New("mixing multiple selector functions with tags or fields is not supported")
}
// Otherwise, we create a single group.
var call *influxql.Call
if len(v.calls) == 1 {
call = v.calls[0].call
}
return []*groupInfo{{
call: call,
refs: v.refs,
needNormalization: false, // Always a selector if we are here.
}}, nil
}
// We do not have any auxiliary fields so each of the function calls goes into
// its own group.
groups := make([]*groupInfo, 0, len(v.calls))
for _, fn := range v.calls {
groups = append(groups, &groupInfo{call: fn.call})
}
// If there is exactly one group and that contains a selector or a transformation function,
// then mark it does not need normalization.
if len(groups) == 1 {
groups[0].needNormalization = !isTransformation(groups[0].call) && !influxql.IsSelector(groups[0].call)
}
return groups, nil
}
func (gr *groupInfo) createCursor(t *transpilerState) (cursor, error) {
// Create all of the cursors for every variable reference.
// TODO(jsternberg): Determine which of these cursors are from fields and which are tags.
var cursors []cursor
if gr.call != nil {
ref, ok := gr.call.Args[0].(*influxql.VarRef)
if !ok {
// TODO(jsternberg): This should be validated and figured out somewhere else.
return nil, fmt.Errorf("first argument to %q must be a variable", gr.call.Name)
}
cur, err := createVarRefCursor(t, ref)
if err != nil {
return nil, err
}
cursors = append(cursors, cur)
}
for _, ref := range gr.refs {
cur, err := createVarRefCursor(t, ref)
if err != nil {
return nil, err
}
cursors = append(cursors, cur)
}
// TODO(jsternberg): Establish which variables in the condition are tags and which are fields.
// We need to create the references to fields here so they can be joined.
var (
tags map[influxql.VarRef]struct{}
cond influxql.Expr
)
valuer := influxql.NowValuer{Now: t.config.Now}
if t.stmt.Condition != nil {
var err error
if cond, _, err = influxql.ConditionExpr(t.stmt.Condition, &valuer); err != nil {
return nil, err
} else if cond != nil {
tags = make(map[influxql.VarRef]struct{})
// Walk through the condition for every variable reference. There will be no function
// calls here.
var condErr error
influxql.WalkFunc(cond, func(node influxql.Node) {
if condErr != nil {
return
}
ref, ok := node.(*influxql.VarRef)
if !ok {
return
}
// If the variable reference is in any of the cursors, it is definitely
// a field and we do not have to inspect it further.
for _, cur := range cursors {
if _, ok := cur.Value(ref); ok {
return
}
}
// This may be a field or a tag. If it is a field, we need to create the cursor
// and add it to the listing of cursors so it can be joined before we evaluate the condition.
switch typ := t.mapType(ref); typ {
case influxql.Tag:
// Add this variable name to the listing of tags.
tags[*ref] = struct{}{}
default:
cur, err := createVarRefCursor(t, ref)
if err != nil {
condErr = err
return
}
cursors = append(cursors, cur)
}
})
}
}
// Join the cursors using an inner join.
// TODO(jsternberg): We need to differentiate between various join types and this needs to be
// except: ["_field"] rather than joining on the _measurement. This also needs to specify what the time
// column should be.
if len(cursors) > 1 {
return nil, errors.New("unimplemented: joining fields within a cursor")
}
cur := Join(t, cursors, []string{"_measurement"})
if len(tags) > 0 {
cur = &tagsCursor{cursor: cur, tags: tags}
}
// Evaluate the conditional and insert a filter if a condition exists.
if cond != nil {
// // Generate a filter expression by evaluating the condition and wrapping it in a filter op.
expr, err := t.mapField(cond, cur, true)
if err != nil {
return nil, errors.Wrap(err, "unable to evaluate condition")
}
cur = &pipeCursor{
expr: &ast.PipeExpression{
Argument: cur.Expr(),
Call: &ast.CallExpression{
Callee: &ast.Identifier{
Name: "filter",
},
Arguments: []ast.Expression{
&ast.ObjectExpression{
Properties: []*ast.Property{{
Key: &ast.Identifier{Name: "fn"},
Value: &ast.FunctionExpression{
Params: []*ast.Property{{
Key: &ast.Identifier{Name: "r"},
}},
Body: expr,
},
}},
},
},
},
},
cursor: cur,
}
}
// Group together the results.
if c, err := gr.group(t, cur); err != nil {
return nil, err
} else {
cur = c
}
interval, err := t.stmt.GroupByInterval()
if err != nil {
return nil, err
}
// If a function call is present, evaluate the function call.
if gr.call != nil {
c, err := createFunctionCursor(t, gr.call, cur, gr.needNormalization || interval > 0)
if err != nil {
return nil, err
}
cur = c
// If there was a window operation, we now need to undo that and sort by the start column
// so they stay in the same table and are joined in the correct order.
if interval > 0 {
cur = &pipeCursor{
expr: &ast.PipeExpression{
Argument: cur.Expr(),
Call: &ast.CallExpression{
Callee: &ast.Identifier{Name: "window"},
Arguments: []ast.Expression{
&ast.ObjectExpression{
Properties: []*ast.Property{{
Key: &ast.Identifier{Name: "every"},
Value: &ast.Identifier{Name: "inf"},
}},
},
},
},
},
cursor: cur,
}
}
} else {
// If we do not have a function, but we have a field option,
// return the appropriate error message if there is something wrong with the flux.
if interval > 0 {
return nil, errors.New("using GROUP BY requires at least one aggregate function")
}
// TODO(jsternberg): Fill needs to be somewhere and it's probably here somewhere.
// Move this to the correct location once we've figured it out.
switch t.stmt.Fill {
case influxql.NoFill:
return nil, errors.New("fill(none) must be used with a function")
case influxql.LinearFill:
return nil, errors.New("fill(linear) must be used with a function")
}
}
return cur, nil
}
func (gr *groupInfo) group(t *transpilerState, in cursor) (cursor, error) {
var windowEvery time.Duration
var windowStart time.Time
tags := []ast.Expression{
&ast.StringLiteral{Value: "_measurement"},
&ast.StringLiteral{Value: "_start"},
&ast.StringLiteral{Value: "_stop"},
&ast.StringLiteral{Value: "_field"},
}
if len(t.stmt.Dimensions) > 0 {
// Maintain a set of the dimensions we have encountered.
// This is so we don't duplicate groupings, but we still maintain the
// listing of tags in the tags slice so it is deterministic.
m := make(map[string]struct{})
for _, d := range t.stmt.Dimensions {
// Reduce the expression before attempting anything. Do not evaluate the call.
expr := influxql.Reduce(d.Expr, nil)
switch expr := expr.(type) {
case *influxql.VarRef:
if strings.ToLower(expr.Val) == "time" {
return nil, errors.New("time() is a function and expects at least one argument")
} else if _, ok := m[expr.Val]; ok {
continue
}
tags = append(tags, &ast.StringLiteral{
Value: expr.Val,
})
m[expr.Val] = struct{}{}
case *influxql.Call:
// Ensure the call is time() and it has one or two duration arguments.
if expr.Name != "time" {
return nil, errors.New("only time() calls allowed in dimensions")
} else if got := len(expr.Args); got < 1 || got > 2 {
return nil, errors.New("time dimension expected 1 or 2 arguments")
} else if lit, ok := expr.Args[0].(*influxql.DurationLiteral); !ok {
return nil, errors.New("time dimension must have duration argument")
} else if windowEvery != 0 {
return nil, errors.New("multiple time dimensions not allowed")
} else {
windowEvery = lit.Val
var windowOffset time.Duration
if len(expr.Args) == 2 {
switch lit2 := expr.Args[1].(type) {
case *influxql.DurationLiteral:
windowOffset = lit2.Val % windowEvery
case *influxql.TimeLiteral:
windowOffset = lit2.Val.Sub(lit2.Val.Truncate(windowEvery))
case *influxql.Call:
if lit2.Name != "now" {
return nil, errors.New("time dimension offset function must be now()")
} else if len(lit2.Args) != 0 {
return nil, errors.New("time dimension offset now() function requires no arguments")
}
now := t.config.Now
windowOffset = now.Sub(now.Truncate(windowEvery))
// Use the evaluated offset to replace the argument. Ideally, we would
// use the interval assigned above, but the query engine hasn't been changed
// to use the compiler information yet.
expr.Args[1] = &influxql.DurationLiteral{Val: windowOffset}
case *influxql.StringLiteral:
// If literal looks like a date time then parse it as a time literal.
if lit2.IsTimeLiteral() {
t, err := lit2.ToTimeLiteral(t.stmt.Location)
if err != nil {
return nil, err
}
windowOffset = t.Val.Sub(t.Val.Truncate(windowEvery))
} else {
return nil, errors.New("time dimension offset must be duration or now()")
}
default:
return nil, errors.New("time dimension offset must be duration or now()")
}
//TODO set windowStart
windowStart = time.Unix(0, 0).Add(windowOffset)
}
}
case *influxql.Wildcard:
// Do not add a group call for wildcard, which means group by everything
return in, nil
case *influxql.RegexLiteral:
return nil, errors.New("unimplemented: dimension regex wildcards")
default:
return nil, errors.New("only time and tag dimensions allowed")
}
}
}
// Perform the grouping by the tags we found. There is always a group by because
// there is always something to group in influxql.
// TODO(jsternberg): A wildcard will skip this step.
in = &pipeCursor{
expr: &ast.PipeExpression{
Argument: in.Expr(),
Call: &ast.CallExpression{
Callee: &ast.Identifier{
Name: "group",
},
Arguments: []ast.Expression{
&ast.ObjectExpression{
Properties: []*ast.Property{
{
Key: &ast.Identifier{
Name: "columns",
},
Value: &ast.ArrayExpression{
Elements: tags,
},
},
{
Key: &ast.Identifier{
Name: "mode",
},
Value: &ast.StringLiteral{
Value: "by",
},
},
},
},
},
},
},
cursor: in,
}
in = &pipeCursor{
expr: &ast.PipeExpression{
Argument: in.Expr(),
Call: &ast.CallExpression{
Callee: &ast.Identifier{
Name: "keep",
},
Arguments: []ast.Expression{
&ast.ObjectExpression{
Properties: []*ast.Property{{
Key: &ast.Identifier{
Name: "columns",
},
Value: &ast.ArrayExpression{
Elements: append(tags,
&ast.StringLiteral{Value: execute.DefaultTimeColLabel},
&ast.StringLiteral{Value: execute.DefaultValueColLabel}),
},
}},
},
},
},
},
cursor: in,
}
if windowEvery > 0 {
args := []*ast.Property{{
Key: &ast.Identifier{
Name: "every",
},
Value: &ast.DurationLiteral{
Values: durationLiteral(windowEvery),
},
}}
if !windowStart.IsZero() {
args = append(args, &ast.Property{
Key: &ast.Identifier{
Name: "start",
},
Value: &ast.DateTimeLiteral{
Value: windowStart.UTC(),
},
})
}
in = &pipeCursor{
expr: &ast.PipeExpression{
Argument: in.Expr(),
Call: &ast.CallExpression{
Callee: &ast.Identifier{
Name: "window",
},
Arguments: []ast.Expression{
&ast.ObjectExpression{
Properties: args,
},
},
},
},
cursor: in,
}
}
return in, nil
}
// tagsCursor is a pseudo-cursor that can be used to access tags within the cursor.
type tagsCursor struct {
cursor
tags map[influxql.VarRef]struct{}
}
func (c *tagsCursor) Value(expr influxql.Expr) (string, bool) {
if value, ok := c.cursor.Value(expr); ok {
return value, ok
}
if ref, ok := expr.(*influxql.VarRef); ok {
if _, ok := c.tags[*ref]; ok {
return ref.Val, true
}
}
return "", false
}
func durationLiteral(d time.Duration) (dur []ast.Duration) {
for d != 0 {
switch {
case d/time.Hour > 0:
dur = append(dur, ast.Duration{
Magnitude: int64(d / time.Hour),
Unit: "h",
})
d = d % time.Hour
case d/time.Minute > 0:
dur = append(dur, ast.Duration{
Magnitude: int64(d / time.Minute),
Unit: "m",
})
d = d % time.Minute
case d/time.Second > 0:
dur = append(dur, ast.Duration{
Magnitude: int64(d / time.Second),
Unit: "s",
})
d = d % time.Second
default:
dur = append(dur, ast.Duration{
Magnitude: int64(d),
Unit: "ns",
})
return dur
}
}
if len(dur) == 0 {
dur = append(dur, ast.Duration{
Magnitude: 0,
Unit: "s",
})
}
return dur
}

View File

@ -1,98 +0,0 @@
package influxql
import (
"fmt"
"github.com/influxdata/flux/ast"
"github.com/influxdata/influxql"
)
type joinCursor struct {
expr ast.Expression
m map[influxql.Expr]string
exprs []influxql.Expr
}
func Join(t *transpilerState, cursors []cursor, on []string) cursor {
if len(cursors) == 1 {
return cursors[0]
}
// Iterate through each cursor and each expression within each cursor to assign them an id.
var exprs []influxql.Expr
m := make(map[influxql.Expr]string)
tables := make([]*ast.Property, 0, len(cursors))
for _, cur := range cursors {
// Perform a variable assignment and use it for the table name.
ident := t.assignment(cur.Expr())
tableName := ident.Name
tables = append(tables, &ast.Property{
Key: ident,
Value: &ast.Identifier{
Name: ident.Name,
},
})
for _, k := range cur.Keys() {
// Combine the table name with the name to access this attribute so we can know
// what it will be mapped to.
varName, _ := cur.Value(k)
name := fmt.Sprintf("%s_%s", tableName, varName)
exprs = append(exprs, k)
m[k] = name
}
}
// Construct the expression for the on parameter.
onExpr := make([]ast.Expression, 0, len(on))
for _, name := range on {
onExpr = append(onExpr, &ast.StringLiteral{
Value: name,
})
}
expr := &ast.CallExpression{
Callee: &ast.Identifier{
Name: "join",
},
Arguments: []ast.Expression{
&ast.ObjectExpression{
Properties: []*ast.Property{
{
Key: &ast.Identifier{Name: "tables"},
Value: &ast.ObjectExpression{
Properties: tables,
},
},
{
Key: &ast.Identifier{Name: "on"},
Value: &ast.ArrayExpression{
Elements: onExpr,
},
},
},
},
},
}
return &joinCursor{
expr: expr,
m: m,
exprs: exprs,
}
}
func (c *joinCursor) Expr() ast.Expression {
return c.expr
}
func (c *joinCursor) Keys() []influxql.Expr {
keys := make([]influxql.Expr, 0, len(c.m))
for k := range c.m {
keys = append(keys, k)
}
return keys
}
func (c *joinCursor) Value(expr influxql.Expr) (string, bool) {
value, ok := c.m[expr]
return value, ok
}

View File

@ -1,205 +0,0 @@
package influxql
import (
"fmt"
"strings"
"time"
"github.com/influxdata/flux/ast"
"github.com/influxdata/influxql"
)
// mapCursor holds the mapping of expressions to specific fields that happens at the end of
// the transpilation.
// TODO(jsternberg): This abstraction might be useful for subqueries, but we only need the expr
// at the moment so just hold that.
type mapCursor struct {
expr ast.Expression
}
func (c *mapCursor) Expr() ast.Expression {
return c.expr
}
func (c *mapCursor) Keys() []influxql.Expr {
panic("unimplemented")
}
func (c *mapCursor) Value(expr influxql.Expr) (string, bool) {
panic("unimplemented")
}
// mapFields will take the list of symbols and maps each of the operations
// using the column names.
func (t *transpilerState) mapFields(in cursor) (cursor, error) {
columns := t.stmt.ColumnNames()
if len(columns) != len(t.stmt.Fields) {
// TODO(jsternberg): This scenario should not be possible. Replace the use of ColumnNames with a more
// statically verifiable list of columns when we process the fields from the select statement instead
// of doing this in the future.
panic("number of columns does not match the number of fields")
}
properties := make([]*ast.Property, 0, len(t.stmt.Fields))
for i, f := range t.stmt.Fields {
if ref, ok := f.Expr.(*influxql.VarRef); ok && ref.Val == "time" {
// Skip past any time columns.
continue
}
fieldName, err := t.mapField(f.Expr, in, false)
if err != nil {
return nil, err
}
properties = append(properties, &ast.Property{
Key: fieldName.(ast.PropertyKey),
Value: &ast.StringLiteral{Value: columns[i]},
})
}
return &mapCursor{
expr: &ast.PipeExpression{
Argument: in.Expr(),
Call: &ast.CallExpression{
Callee: &ast.Identifier{
Name: "rename",
},
Arguments: []ast.Expression{
&ast.ObjectExpression{
Properties: []*ast.Property{{
Key: &ast.Identifier{
Name: "columns",
},
Value: &ast.ObjectExpression{
Properties: properties,
},
}},
},
},
},
},
}, nil
}
func (t *transpilerState) mapField(expr influxql.Expr, in cursor, returnMemberExpr bool) (ast.Expression, error) {
if sym, ok := in.Value(expr); ok {
var mappedName ast.Expression
if strings.HasPrefix(sym, "_") {
mappedName = &ast.Identifier{Name: sym}
} else {
mappedName = &ast.StringLiteral{Value: sym}
}
if returnMemberExpr {
return &ast.MemberExpression{
Object: &ast.Identifier{Name: "r"},
Property: mappedName.(ast.PropertyKey),
}, nil
}
return mappedName, nil
}
switch expr := expr.(type) {
case *influxql.Call:
if isMathFunction(expr) {
return nil, fmt.Errorf("unimplemented math function: %q", expr.Name)
}
return nil, fmt.Errorf("missing symbol for %s", expr)
case *influxql.VarRef:
return nil, fmt.Errorf("missing symbol for %s", expr)
case *influxql.BinaryExpr:
return t.evalBinaryExpr(expr, in)
case *influxql.ParenExpr:
return t.mapField(expr.Expr, in, returnMemberExpr)
case *influxql.StringLiteral:
if ts, err := expr.ToTimeLiteral(time.UTC); err == nil {
return &ast.DateTimeLiteral{Value: ts.Val}, nil
}
return &ast.StringLiteral{Value: expr.Val}, nil
case *influxql.NumberLiteral:
return &ast.FloatLiteral{Value: expr.Val}, nil
case *influxql.IntegerLiteral:
return &ast.IntegerLiteral{Value: expr.Val}, nil
case *influxql.BooleanLiteral:
return &ast.BooleanLiteral{Value: expr.Val}, nil
case *influxql.DurationLiteral:
return &ast.DurationLiteral{
Values: durationLiteral(expr.Val),
}, nil
case *influxql.TimeLiteral:
return &ast.DateTimeLiteral{Value: expr.Val}, nil
case *influxql.RegexLiteral:
return &ast.RegexpLiteral{Value: expr.Val}, nil
default:
// TODO(jsternberg): Handle the other expressions by turning them into
// an equivalent expression.
return nil, fmt.Errorf("unimplemented: %T", expr)
}
}
func (t *transpilerState) evalBinaryExpr(expr *influxql.BinaryExpr, in cursor) (ast.Expression, error) {
fn := func() func(left, right ast.Expression) ast.Expression {
b := evalBuilder{}
switch expr.Op {
case influxql.EQ:
return b.eval(ast.EqualOperator)
case influxql.NEQ:
return b.eval(ast.NotEqualOperator)
case influxql.GT:
return b.eval(ast.GreaterThanOperator)
case influxql.GTE:
return b.eval(ast.GreaterThanEqualOperator)
case influxql.LT:
return b.eval(ast.LessThanOperator)
case influxql.LTE:
return b.eval(ast.LessThanEqualOperator)
case influxql.ADD:
return b.eval(ast.AdditionOperator)
case influxql.SUB:
return b.eval(ast.SubtractionOperator)
case influxql.AND:
return b.logical(ast.AndOperator)
case influxql.OR:
return b.logical(ast.OrOperator)
case influxql.EQREGEX:
return b.eval(ast.RegexpMatchOperator)
case influxql.NEQREGEX:
return b.eval(ast.NotRegexpMatchOperator)
default:
return nil
}
}()
if fn == nil {
return nil, fmt.Errorf("unimplemented binary expression: %s", expr.Op)
}
lhs, err := t.mapField(expr.LHS, in, true)
if err != nil {
return nil, err
}
rhs, err := t.mapField(expr.RHS, in, true)
if err != nil {
return nil, err
}
return fn(lhs, rhs), nil
}
// evalBuilder is used for namespacing the logical and eval wrapping functions.
type evalBuilder struct{}
func (evalBuilder) logical(op ast.LogicalOperatorKind) func(left, right ast.Expression) ast.Expression {
return func(left, right ast.Expression) ast.Expression {
return &ast.LogicalExpression{
Operator: op,
Left: left,
Right: right,
}
}
}
func (evalBuilder) eval(op ast.OperatorKind) func(left, right ast.Expression) ast.Expression {
return func(left, right ast.Expression) ast.Expression {
return &ast.BinaryExpression{
Operator: op,
Left: left,
Right: right,
}
}
}

View File

@ -1,12 +0,0 @@
package influxql
import "github.com/influxdata/influxql"
// isMathFunction returns true if the call is a math function.
func isMathFunction(expr *influxql.Call) bool {
switch expr.Name {
case "abs", "sin", "cos", "tan", "asin", "acos", "atan", "atan2", "exp", "log", "ln", "log2", "log10", "sqrt", "pow", "floor", "ceil", "round":
return true
}
return false
}

View File

@ -1,44 +0,0 @@
package influxql
// all of this code is copied more or less verbatim from the influxdb repo.
// we copy instead of sharing because we want to prevent inadvertent breaking
// changes introduced by the transpiler vs the actual InfluxQL engine.
// By copying the code, we'll be able to detect more explicitly that the
// results generated by the transpiler diverge from InfluxQL.
type Response struct {
Results []Result `json:"results,omitempty"`
Err string `json:"error,omitempty"`
}
func (r *Response) error(err error) {
r.Results = nil
r.Err = err.Error()
}
// Message represents a user-facing message to be included with the result.
type Message struct {
Level string `json:"level"`
Text string `json:"text"`
}
// Result represents a resultset returned from a single statement.
// Rows represents a list of rows that can be sorted consistently by name/tag.
type Result struct {
// StatementID is just the statement's position in the query. It's used
// to combine statement results if they're being buffered in memory.
StatementID int `json:"statement_id"`
Series []*Row `json:"series,omitempty"`
Messages []*Message `json:"messages,omitempty"`
Partial bool `json:"partial,omitempty"`
Err string `json:"error,omitempty"`
}
// Row represents a single row returned from the execution of a statement.
type Row struct {
Name string `json:"name,omitempty"`
Tags map[string]string `json:"tags,omitempty"`
Columns []string `json:"columns,omitempty"`
Values [][]interface{} `json:"values,omitempty"`
Partial bool `json:"partial,omitempty"`
}

View File

@ -1,402 +0,0 @@
package influxql
import (
"fmt"
"sort"
"strconv"
"time"
"github.com/influxdata/flux"
"github.com/influxdata/flux/array"
"github.com/influxdata/flux/arrow"
"github.com/influxdata/flux/execute"
"github.com/influxdata/flux/memory"
"github.com/influxdata/flux/values"
)
// responseIterator implements flux.ResultIterator for a Response.
type responseIterator struct {
response *Response
resultIdx int
}
// NewResponseIterator constructs a flux.ResultIterator from a Response.
func NewResponseIterator(r *Response) flux.ResultIterator {
return &responseIterator{
response: r,
}
}
// More returns true if there are results left to iterate through.
// It is used to implement flux.ResultIterator.
func (r *responseIterator) More() bool {
return r.resultIdx < len(r.response.Results)
}
// Next retrieves the next flux.Result.
// It is used to implement flux.ResultIterator.
func (r *responseIterator) Next() flux.Result {
res := r.response.Results[r.resultIdx]
r.resultIdx++
return newQueryResult(&res)
}
// Release is a noop.
// It is used to implement flux.ResultIterator.
func (r *responseIterator) Release() {}
// Err returns an error if the response contained an error.
// It is used to implement flux.ResultIterator.
func (r *responseIterator) Err() error {
if r.response.Err != "" {
return fmt.Errorf(r.response.Err)
}
return nil
}
func (r *responseIterator) Statistics() flux.Statistics {
return flux.Statistics{}
}
// seriesIterator is a simple wrapper for Result that implements flux.Result and flux.TableIterator.
type seriesIterator struct {
result *Result
}
func newQueryResult(r *Result) *seriesIterator {
return &seriesIterator{
result: r,
}
}
// Name returns the results statement id.
// It is used to implement flux.Result.
func (r *seriesIterator) Name() string {
return strconv.Itoa(r.result.StatementID)
}
// Tables returns the original as a flux.TableIterator.
// It is used to implement flux.Result.
func (r *seriesIterator) Tables() flux.TableIterator {
return r
}
// Do iterates through the series of a Result.
// It is used to implement flux.TableIterator.
func (r *seriesIterator) Do(f func(flux.Table) error) error {
for _, row := range r.result.Series {
t, err := newQueryTable(row)
if err != nil {
return err
}
if err := f(t); err != nil {
return err
}
}
return nil
}
func (r *seriesIterator) Statistics() flux.Statistics {
return flux.Statistics{}
}
// queryTable implements flux.Table and flux.ColReader.
type queryTable struct {
row *Row
groupKey flux.GroupKey
colMeta []flux.ColMeta
cols []array.Interface
}
func newQueryTable(r *Row) (*queryTable, error) {
t := &queryTable{
row: r,
}
if err := t.translateRowsToColumns(); err != nil {
return nil, err
}
return t, nil
}
func (t *queryTable) Statistics() flux.Statistics {
return flux.Statistics{}
}
// Data in a column is laid out in the following way:
// [ r.row.Columns... , r.tagKeys()... , r.row.Name ]
func (t *queryTable) translateRowsToColumns() error {
t.cols = make([]array.Interface, len(t.Cols()))
for i := range t.row.Columns {
col := t.Cols()[i]
switch col.Type {
case flux.TFloat:
b := arrow.NewFloatBuilder(&memory.Allocator{})
b.Reserve(t.Len())
for _, row := range t.row.Values {
val, ok := row[i].(float64)
if !ok {
return fmt.Errorf("unsupported type %T found in column %s of type %s", val, col.Label, col.Type)
}
b.Append(val)
}
t.cols[i] = b.NewArray()
b.Release()
case flux.TInt:
b := arrow.NewIntBuilder(&memory.Allocator{})
b.Reserve(t.Len())
for _, row := range t.row.Values {
val, ok := row[i].(int64)
if !ok {
return fmt.Errorf("unsupported type %T found in column %s of type %s", val, col.Label, col.Type)
}
b.Append(val)
}
t.cols[i] = b.NewArray()
b.Release()
case flux.TUInt:
b := arrow.NewUintBuilder(&memory.Allocator{})
b.Reserve(t.Len())
for _, row := range t.row.Values {
val, ok := row[i].(uint64)
if !ok {
return fmt.Errorf("unsupported type %T found in column %s of type %s", val, col.Label, col.Type)
}
b.Append(val)
}
t.cols[i] = b.NewArray()
b.Release()
case flux.TString:
b := arrow.NewStringBuilder(&memory.Allocator{})
b.Reserve(t.Len())
for _, row := range t.row.Values {
val, ok := row[i].(string)
if !ok {
return fmt.Errorf("unsupported type %T found in column %s of type %s", val, col.Label, col.Type)
}
b.Append(val)
}
t.cols[i] = b.NewArray()
b.Release()
case flux.TBool:
b := arrow.NewBoolBuilder(&memory.Allocator{})
b.Reserve(t.Len())
for _, row := range t.row.Values {
val, ok := row[i].(bool)
if !ok {
return fmt.Errorf("unsupported type %T found in column %s of type %s", val, col.Label, col.Type)
}
b.Append(val)
}
t.cols[i] = b.NewArray()
b.Release()
case flux.TTime:
b := arrow.NewIntBuilder(&memory.Allocator{})
b.Reserve(t.Len())
for _, row := range t.row.Values {
switch val := row[i].(type) {
case int64:
b.Append(val)
case float64:
b.Append(int64(val))
case string:
tm, err := time.Parse(time.RFC3339, val)
if err != nil {
return fmt.Errorf("could not parse string %q as time: %v", val, err)
}
b.Append(tm.UnixNano())
default:
return fmt.Errorf("unsupported type %T found in column %s", val, col.Label)
}
}
t.cols[i] = b.NewArray()
b.Release()
default:
return fmt.Errorf("invalid type %T found in column %s", col.Type, col.Label)
}
}
for j := len(t.row.Columns); j < len(t.Cols()); j++ {
b := arrow.NewStringBuilder(&memory.Allocator{})
b.Reserve(t.Len())
var value string
if key := t.Cols()[j].Label; key == "_measurement" {
value = t.row.Name
} else {
value = t.row.Tags[key]
}
for i := 0; i < t.Len(); i++ {
b.Append(value)
}
t.cols[j] = b.NewArray()
b.Release()
}
return nil
}
// Key constructs the flux.GroupKey for a Row from the rows
// tags and measurement.
// It is used to implement flux.Table and flux.ColReader.
func (r *queryTable) Key() flux.GroupKey {
if r.groupKey == nil {
cols := make([]flux.ColMeta, len(r.row.Tags)+1) // plus one is for measurement
vs := make([]values.Value, len(r.row.Tags)+1)
kvs := make([]interface{}, len(r.row.Tags)+1)
colMeta := r.Cols()
labels := append(r.tagKeys(), "_measurement")
for j, label := range labels {
idx := execute.ColIdx(label, colMeta)
if idx < 0 {
panic(fmt.Errorf("table invalid: missing group column %q", label))
}
cols[j] = colMeta[idx]
kvs[j] = "string"
v := values.New(kvs[j])
if v == values.InvalidValue {
panic(fmt.Sprintf("unsupported value kind %T", kvs[j]))
}
vs[j] = v
}
r.groupKey = execute.NewGroupKey(cols, vs)
}
return r.groupKey
}
// tags returns the tag keys for a Row.
func (r *queryTable) tagKeys() []string {
tags := []string{}
for t := range r.row.Tags {
tags = append(tags, t)
}
sort.Strings(tags)
return tags
}
// Cols returns the columns for a row where the data is laid out in the following way:
// [ r.row.Columns... , r.tagKeys()... , r.row.Name ]
// It is used to implement flux.Table and flux.ColReader.
func (r *queryTable) Cols() []flux.ColMeta {
if r.colMeta == nil {
colMeta := make([]flux.ColMeta, len(r.row.Columns)+len(r.row.Tags)+1)
for i, col := range r.row.Columns {
colMeta[i] = flux.ColMeta{
Label: col,
Type: flux.TInvalid,
}
if col == "time" {
// rename the time column
colMeta[i].Label = "_time"
colMeta[i].Type = flux.TTime
}
}
if len(r.row.Values) < 1 {
panic("must have at least one value")
}
data := r.row.Values[0]
for i := range r.row.Columns {
v := data[i]
if colMeta[i].Label == "_time" {
continue
}
switch v.(type) {
case float64:
colMeta[i].Type = flux.TFloat
case int64:
colMeta[i].Type = flux.TInt
case uint64:
colMeta[i].Type = flux.TUInt
case bool:
colMeta[i].Type = flux.TBool
case string:
colMeta[i].Type = flux.TString
}
}
tags := r.tagKeys()
leng := len(r.row.Columns)
for i, tag := range tags {
colMeta[leng+i] = flux.ColMeta{
Label: tag,
Type: flux.TString,
}
}
leng = leng + len(tags)
colMeta[leng] = flux.ColMeta{
Label: "_measurement",
Type: flux.TString,
}
r.colMeta = colMeta
}
return r.colMeta
}
// Do applies f to itself. This is because Row is a flux.ColReader.
// It is used to implement flux.Table.
func (r *queryTable) Do(f func(flux.ColReader) error) error {
return f(r)
}
func (r *queryTable) Done() {}
// Empty returns true if a Row has no values.
// It is used to implement flux.Table.
func (r *queryTable) Empty() bool { return r.Len() == 0 }
// Len returns the length or r.row.Values
// It is used to implement flux.ColReader.
func (r *queryTable) Len() int {
return len(r.row.Values)
}
func (r *queryTable) Retain() {}
func (r *queryTable) Release() {}
// Bools returns the values in column index j as bools.
// It will panic if the column is not a []bool.
// It is used to implement flux.ColReader.
func (r *queryTable) Bools(j int) *array.Boolean {
return r.cols[j].(*array.Boolean)
}
// Ints returns the values in column index j as ints.
// It will panic if the column is not a []int64.
// It is used to implement flux.ColReader.
func (r *queryTable) Ints(j int) *array.Int {
return r.cols[j].(*array.Int)
}
// UInts returns the values in column index j as ints.
// It will panic if the column is not a []uint64.
// It is used to implement flux.ColReader.
func (r *queryTable) UInts(j int) *array.Uint {
return r.cols[j].(*array.Uint)
}
// Floats returns the values in column index j as floats.
// It will panic if the column is not a []float64.
// It is used to implement flux.ColReader.
func (r *queryTable) Floats(j int) *array.Float {
return r.cols[j].(*array.Float)
}
// Strings returns the values in column index j as strings.
// It will panic if the column is not a []string.
// It is used to implement flux.ColReader.
func (r *queryTable) Strings(j int) *array.String {
return r.cols[j].(*array.String)
}
// Times returns the values in column index j as values.Times.
// It will panic if the column is not a []values.Time.
// It is used to implement flux.ColReader.
func (r *queryTable) Times(j int) *array.Int {
return r.cols[j].(*array.Int)
}

View File

@ -1,285 +0,0 @@
package influxql_test
import (
"bytes"
"regexp"
"testing"
"github.com/andreyvit/diff"
"github.com/google/go-cmp/cmp"
"github.com/influxdata/flux/csv"
"github.com/influxdata/influxdb/v2/query/influxql"
)
var crlfPattern = regexp.MustCompile(`\r?\n`)
func toCRLF(data string) []byte {
return []byte(crlfPattern.ReplaceAllString(data, "\r\n"))
}
func TestResponse_ResultIterator(t *testing.T) {
type testCase struct {
name string
encoded []byte
response *influxql.Response
err error
}
tests := []testCase{
{
name: "single series",
encoded: toCRLF(`#datatype,string,long,dateTime:RFC3339,double,long,string,boolean,string,string,string
#group,false,false,false,false,false,false,false,true,true,true
#default,0,,,,,,,,,
,result,table,_time,usage_user,test,mystr,this,cpu,host,_measurement
,,0,2018-08-29T13:08:47Z,10.2,10,yay,true,cpu-total,a,cpu
,,0,2018-08-29T13:08:48Z,12.1,20,nay,false,cpu-total,a,cpu
,,0,2018-08-29T13:08:47Z,112,30,way,false,cpu-total,a,cpu
,,0,2018-08-29T13:08:48Z,123.2,40,pay,true,cpu-total,a,cpu
`),
response: &influxql.Response{
Results: []influxql.Result{
{
StatementID: 0,
Series: []*influxql.Row{
{
Name: "cpu",
Tags: map[string]string{
"cpu": "cpu-total",
"host": "a",
},
Columns: []string{"time", "usage_user", "test", "mystr", "this"},
Values: [][]interface{}{
{int64(1535548127000000000), 10.2, int64(10), "yay", true},
{int64(1535548128000000000), 12.1, int64(20), "nay", false},
{int64(1535548127000000000), 112.0, int64(30), "way", false},
{int64(1535548128000000000), 123.2, int64(40), "pay", true},
},
},
},
},
},
},
},
{
name: "multiple series",
encoded: toCRLF(`#datatype,string,long,dateTime:RFC3339,double,long,string,string,string,string
#group,false,false,false,false,false,false,true,true,true
#default,0,,,,,,,,
,result,table,_time,usage_user,test,mystr,cpu,host,_measurement
,,0,2018-08-29T13:08:47Z,10.2,10,yay,cpu-total,a,cpu
,,0,2018-08-29T13:08:48Z,12.1,20,nay,cpu-total,a,cpu
,,0,2018-08-29T13:08:47Z,112,30,way,cpu-total,a,cpu
,,0,2018-08-29T13:08:48Z,123.2,40,pay,cpu-total,a,cpu
,,1,2018-08-29T18:27:31Z,10.2,10,yay,cpu-total,b,cpu
,,1,2018-08-29T18:27:31Z,12.1,20,nay,cpu-total,b,cpu
,,1,2018-08-29T18:27:31Z,112,30,way,cpu-total,b,cpu
,,1,2018-08-29T18:27:31Z,123.2,40,pay,cpu-total,b,cpu
`),
response: &influxql.Response{
Results: []influxql.Result{
{
StatementID: 0,
Series: []*influxql.Row{
{
Name: "cpu",
Tags: map[string]string{
"cpu": "cpu-total",
"host": "a",
},
Columns: []string{"time", "usage_user", "test", "mystr"},
Values: [][]interface{}{
{float64(1535548127000000000), 10.2, int64(10), "yay"},
{float64(1535548128000000000), 12.1, int64(20), "nay"},
{float64(1535548127000000000), 112.0, int64(30), "way"},
{float64(1535548128000000000), 123.2, int64(40), "pay"},
},
},
{
Name: "cpu",
Tags: map[string]string{
"cpu": "cpu-total",
"host": "b",
},
Columns: []string{"time", "usage_user", "test", "mystr"},
Values: [][]interface{}{
{"2018-08-29T18:27:31Z", 10.2, int64(10), "yay"},
{"2018-08-29T18:27:31Z", 12.1, int64(20), "nay"},
{"2018-08-29T18:27:31Z", 112.0, int64(30), "way"},
{"2018-08-29T18:27:31Z", 123.2, int64(40), "pay"},
},
},
},
},
},
},
},
{
name: "multiple series with same columns but different types",
encoded: toCRLF(`#datatype,string,long,dateTime:RFC3339,double,long,string,string,string,string
#group,false,false,false,false,false,false,true,true,true
#default,0,,,,,,,,
,result,table,_time,usage_user,test,mystr,cpu,host,_measurement
,,0,2018-08-29T13:08:47Z,10.2,1,yay,cpu-total,a,cpu
,,0,2018-08-29T13:08:48Z,12.1,2,nay,cpu-total,a,cpu
,,0,2018-08-29T13:08:47Z,112,3,way,cpu-total,a,cpu
,,0,2018-08-29T13:08:48Z,123.2,4,pay,cpu-total,a,cpu
#datatype,string,long,dateTime:RFC3339,double,double,string,string,string,string
#group,false,false,false,false,false,false,true,true,true
#default,0,,,,,,,,
,result,table,_time,usage_user,test,mystr,cpu,host,_measurement
,,1,2018-08-29T13:08:47Z,10.2,10,yay,cpu-total,a,cpu
,,1,2018-08-29T13:08:48Z,12.1,20,nay,cpu-total,a,cpu
,,1,2018-08-29T13:08:47Z,112,30,way,cpu-total,a,cpu
,,1,2018-08-29T13:08:48Z,123.2,40,pay,cpu-total,a,cpu
`),
response: &influxql.Response{
Results: []influxql.Result{
{
StatementID: 0,
Series: []*influxql.Row{
{
Name: "cpu",
Tags: map[string]string{
"cpu": "cpu-total",
"host": "a",
},
Columns: []string{"time", "usage_user", "test", "mystr"},
Values: [][]interface{}{
{int64(1535548127000000000), 10.2, int64(1), "yay"},
{int64(1535548128000000000), 12.1, int64(2), "nay"},
{int64(1535548127000000000), 112.0, int64(3), "way"},
{int64(1535548128000000000), 123.2, int64(4), "pay"},
},
},
{
Name: "cpu",
Tags: map[string]string{
"cpu": "cpu-total",
"host": "a",
},
Columns: []string{"time", "usage_user", "test", "mystr"},
Values: [][]interface{}{
{int64(1535548127000000000), 10.2, float64(10), "yay"},
{int64(1535548128000000000), 12.1, float64(20), "nay"},
{int64(1535548127000000000), 112.0, float64(30), "way"},
{int64(1535548128000000000), 123.2, float64(40), "pay"},
},
},
},
},
},
},
},
{
name: "multiple results",
encoded: toCRLF(`#datatype,string,long,dateTime:RFC3339,double,long,string,string,string,string
#group,false,false,false,false,false,false,true,true,true
#default,0,,,,,,,,
,result,table,_time,usage_user,test,mystr,cpu,host,_measurement
,,0,2018-08-29T13:08:47Z,10.2,10,yay,cpu-total,a,cpu
,,0,2018-08-29T13:08:48Z,12.1,20,nay,cpu-total,a,cpu
,,0,2018-08-29T13:08:47Z,112,30,way,cpu-total,a,cpu
,,0,2018-08-29T13:08:48Z,123.2,40,pay,cpu-total,a,cpu
,,1,2018-08-29T13:08:47Z,10.2,10,yay,cpu-total,b,cpu
,,1,2018-08-29T13:08:48Z,12.1,20,nay,cpu-total,b,cpu
,,1,2018-08-29T13:08:47Z,112,30,way,cpu-total,b,cpu
,,1,2018-08-29T13:08:48Z,123.2,40,pay,cpu-total,b,cpu
#datatype,string,long,dateTime:RFC3339,double,long,string,string,string,string
#group,false,false,false,false,false,false,true,true,true
#default,1,,,,,,,,
,result,table,_time,usage_user,test,mystr,cpu,host,_measurement
,,0,2018-08-29T13:08:47Z,10.2,10,yay,cpu-total,a,cpu
,,0,2018-08-29T13:08:48Z,12.1,20,nay,cpu-total,a,cpu
,,0,2018-08-29T13:08:47Z,112,30,way,cpu-total,a,cpu
,,0,2018-08-29T13:08:48Z,123.2,40,pay,cpu-total,a,cpu
`),
response: &influxql.Response{
Results: []influxql.Result{
{
StatementID: 0,
Series: []*influxql.Row{
{
Name: "cpu",
Tags: map[string]string{
"cpu": "cpu-total",
"host": "a",
},
Columns: []string{"time", "usage_user", "test", "mystr"},
Values: [][]interface{}{
{int64(1535548127000000000), 10.2, int64(10), "yay"},
{int64(1535548128000000000), 12.1, int64(20), "nay"},
{int64(1535548127000000000), 112.0, int64(30), "way"},
{int64(1535548128000000000), 123.2, int64(40), "pay"},
},
},
{
Name: "cpu",
Tags: map[string]string{
"cpu": "cpu-total",
"host": "b",
},
Columns: []string{"time", "usage_user", "test", "mystr"},
Values: [][]interface{}{
{int64(1535548127000000000), 10.2, int64(10), "yay"},
{int64(1535548128000000000), 12.1, int64(20), "nay"},
{int64(1535548127000000000), 112.0, int64(30), "way"},
{int64(1535548128000000000), 123.2, int64(40), "pay"},
},
},
},
},
{
StatementID: 1,
Series: []*influxql.Row{
{
Name: "cpu",
Tags: map[string]string{
"cpu": "cpu-total",
"host": "a",
},
Columns: []string{"time", "usage_user", "test", "mystr"},
Values: [][]interface{}{
{int64(1535548127000000000), 10.2, int64(10), "yay"},
{int64(1535548128000000000), 12.1, int64(20), "nay"},
{int64(1535548127000000000), 112.0, int64(30), "way"},
{int64(1535548128000000000), 123.2, int64(40), "pay"},
},
},
},
},
},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
encoderConfig := csv.DefaultEncoderConfig()
encoder := csv.NewMultiResultEncoder(encoderConfig)
var got bytes.Buffer
n, err := encoder.Encode(&got, influxql.NewResponseIterator(tt.response))
if err != nil && tt.err != nil {
if err.Error() != tt.err.Error() {
t.Errorf("unexpected error want: %s\n got: %s\n", tt.err.Error(), err.Error())
}
} else if err != nil {
t.Errorf("unexpected error want: none\n got: %s\n", err.Error())
} else if tt.err != nil {
t.Errorf("unexpected error want: %s\n got: none", tt.err.Error())
}
if g, w := got.String(), string(tt.encoded); g != w {
t.Errorf("unexpected encoding -want/+got:\n%s", diff.LineDiff(w, g))
}
if g, w := n, int64(len(tt.encoded)); g != w {
t.Errorf("unexpected encoding count -want/+got:\n%s", cmp.Diff(w, g))
}
})
}
}

View File

@ -1,190 +0,0 @@
package influxql
import (
"encoding/json"
"fmt"
"io"
"strconv"
"time"
"github.com/influxdata/flux"
"github.com/influxdata/flux/execute"
"github.com/influxdata/flux/iocounter"
)
// MultiResultEncoder encodes results as InfluxQL JSON format.
type MultiResultEncoder struct{}
// Encode writes a collection of results to the influxdb 1.X http response format.
// Expectations/Assumptions:
// 1. Each result will be published as a 'statement' in the top-level list of results. The result name
// will be interpreted as an integer and used as the statement id.
// 2. If the _measurement name is present in the group key, it will be used as the result name instead
// of as a normal tag.
// 3. All columns in the group key must be strings and they will be used as tags. There is no current way
// to have a tag and field be the same name in the results.
// TODO(jsternberg): For full compatibility, the above must be possible.
// 4. All other columns are fields and will be output in the order they are found.
// TODO(jsternberg): This function currently requires the first column to be a time field, but this isn't
// a strict requirement and will be lifted when we begin to work on transpiling meta queries.
func (e *MultiResultEncoder) Encode(w io.Writer, results flux.ResultIterator) (int64, error) {
resp := Response{}
wc := &iocounter.Writer{Writer: w}
for results.More() {
res := results.Next()
name := res.Name()
id, err := strconv.Atoi(name)
if err != nil {
resp.error(fmt.Errorf("unable to parse statement id from result name: %s", err))
results.Release()
break
}
tables := res.Tables()
result := Result{StatementID: id}
if err := tables.Do(func(tbl flux.Table) error {
var row Row
for j, c := range tbl.Key().Cols() {
if c.Type != flux.TString {
// Skip any columns that aren't strings. They are extra ones that
// flux includes by default like the start and end times that we do not
// care about.
continue
}
v := tbl.Key().Value(j).Str()
if c.Label == "_measurement" {
row.Name = v
} else if c.Label == "_field" {
// If the field key was not removed by a previous operation, we explicitly
// ignore it here when encoding the result back.
} else {
if row.Tags == nil {
row.Tags = make(map[string]string)
}
row.Tags[c.Label] = v
}
}
// TODO: resultColMap should be constructed from query metadata once it is provided.
// for now we know that an influxql query ALWAYS has time first, so we put this placeholder
// here to catch this most obvious requirement. Column orderings should be explicitly determined
// from the ordering given in the original flux.
resultColMap := map[string]int{}
j := 1
for _, c := range tbl.Cols() {
if c.Label == execute.DefaultTimeColLabel {
resultColMap[c.Label] = 0
} else if !tbl.Key().HasCol(c.Label) {
resultColMap[c.Label] = j
j++
}
}
if _, ok := resultColMap[execute.DefaultTimeColLabel]; !ok {
for k, v := range resultColMap {
resultColMap[k] = v - 1
}
}
row.Columns = make([]string, len(resultColMap))
for k, v := range resultColMap {
if k == execute.DefaultTimeColLabel {
k = "time"
}
row.Columns[v] = k
}
if err := tbl.Do(func(cr flux.ColReader) error {
// Preallocate the number of rows for the response to make this section
// of code easier to read. Find a time column which should exist
// in the output.
values := make([][]interface{}, cr.Len())
for j := range values {
values[j] = make([]interface{}, len(row.Columns))
}
j := 0
for idx, c := range tbl.Cols() {
if cr.Key().HasCol(c.Label) {
continue
}
j = resultColMap[c.Label]
// Fill in the values for each column.
switch c.Type {
case flux.TFloat:
vs := cr.Floats(idx)
for i := 0; i < vs.Len(); i++ {
if vs.IsValid(i) {
values[i][j] = vs.Value(i)
}
}
case flux.TInt:
vs := cr.Ints(idx)
for i := 0; i < vs.Len(); i++ {
if vs.IsValid(i) {
values[i][j] = vs.Value(i)
}
}
case flux.TString:
vs := cr.Strings(idx)
for i := 0; i < vs.Len(); i++ {
if vs.IsValid(i) {
values[i][j] = vs.Value(i)
}
}
case flux.TUInt:
vs := cr.UInts(idx)
for i := 0; i < vs.Len(); i++ {
if vs.IsValid(i) {
values[i][j] = vs.Value(i)
}
}
case flux.TBool:
vs := cr.Bools(idx)
for i := 0; i < vs.Len(); i++ {
if vs.IsValid(i) {
values[i][j] = vs.Value(i)
}
}
case flux.TTime:
vs := cr.Times(idx)
for i := 0; i < vs.Len(); i++ {
if vs.IsValid(i) {
values[i][j] = execute.Time(vs.Value(i)).Time().Format(time.RFC3339Nano)
}
}
default:
return fmt.Errorf("unsupported column type: %s", c.Type)
}
}
row.Values = append(row.Values, values...)
return nil
}); err != nil {
return err
}
result.Series = append(result.Series, &row)
return nil
}); err != nil {
resp.error(err)
results.Release()
break
}
resp.Results = append(resp.Results, result)
}
if err := results.Err(); err != nil && resp.Err == "" {
resp.error(err)
}
err := json.NewEncoder(wc).Encode(resp)
return wc.Count(), err
}
func NewMultiResultEncoder() *MultiResultEncoder {
return new(MultiResultEncoder)
}

View File

@ -1,136 +0,0 @@
package influxql_test
import (
"bytes"
"errors"
"testing"
"time"
"github.com/google/go-cmp/cmp"
"github.com/influxdata/flux"
"github.com/influxdata/flux/execute"
"github.com/influxdata/flux/execute/executetest"
"github.com/influxdata/influxdb/v2/query/influxql"
)
func TestMultiResultEncoder_Encode(t *testing.T) {
for _, tt := range []struct {
name string
in flux.ResultIterator
out string
}{
{
name: "Default",
in: flux.NewSliceResultIterator(
[]flux.Result{&executetest.Result{
Nm: "0",
Tbls: []*executetest.Table{{
KeyCols: []string{"_measurement", "host"},
ColMeta: []flux.ColMeta{
{Label: "_time", Type: flux.TTime},
{Label: "_measurement", Type: flux.TString},
{Label: "host", Type: flux.TString},
{Label: "value", Type: flux.TFloat},
},
Data: [][]interface{}{
{ts("2018-05-24T09:00:00Z"), "m0", "server01", float64(2)},
},
}},
}},
),
out: `{"results":[{"statement_id":0,"series":[{"name":"m0","tags":{"host":"server01"},"columns":["time","value"],"values":[["2018-05-24T09:00:00Z",2]]}]}]}`,
},
{
name: "No _time column",
in: flux.NewSliceResultIterator(
[]flux.Result{&executetest.Result{
Nm: "0",
Tbls: []*executetest.Table{{
KeyCols: []string{"_measurement", "host"},
ColMeta: []flux.ColMeta{
{Label: "_measurement", Type: flux.TString},
{Label: "host", Type: flux.TString},
{Label: "value", Type: flux.TFloat},
},
Data: [][]interface{}{
{"m0", "server01", float64(2)},
},
}},
}},
),
out: `{"results":[{"statement_id":0,"series":[{"name":"m0","tags":{"host":"server01"},"columns":["value"],"values":[[2]]}]}]}`,
},
{
name: "Just One Value Column",
in: flux.NewSliceResultIterator(
[]flux.Result{&executetest.Result{
Nm: "0",
Tbls: []*executetest.Table{{
KeyCols: []string{},
ColMeta: []flux.ColMeta{
{Label: "name", Type: flux.TString},
},
Data: [][]interface{}{
{"telegraf"},
},
}},
}},
),
out: `{"results":[{"statement_id":0,"series":[{"columns":["name"],"values":[["telegraf"]]}]}]}`,
},
{
name: "Error",
in: &resultErrorIterator{Error: "expected"},
out: `{"error":"expected"}`,
},
} {
tt := tt
t.Run(tt.name, func(t *testing.T) {
// Add expected newline to end of output
tt.out += "\n"
var buf bytes.Buffer
enc := influxql.NewMultiResultEncoder()
n, err := enc.Encode(&buf, tt.in)
if err != nil {
t.Fatalf("unexpected error: %s", err)
}
if got, exp := buf.String(), tt.out; got != exp {
t.Fatalf("unexpected output:\nexp=%s\ngot=%s", exp, got)
}
if g, w := n, int64(len(tt.out)); g != w {
t.Errorf("unexpected encoding count -want/+got:\n%s", cmp.Diff(w, g))
}
})
}
}
type resultErrorIterator struct {
Error string
}
func (*resultErrorIterator) Statistics() flux.Statistics {
return flux.Statistics{}
}
func (*resultErrorIterator) Release() {}
func (*resultErrorIterator) More() bool { return false }
func (*resultErrorIterator) Next() flux.Result { panic("no results") }
func (ri *resultErrorIterator) Err() error {
return errors.New(ri.Error)
}
func mustParseTime(s string) time.Time {
t, err := time.Parse(time.RFC3339, s)
if err != nil {
panic(err)
}
return t
}
// ts takes an RFC3339 time string and returns an execute.Time from it using the unix timestamp.
func ts(s string) execute.Time {
return execute.Time(mustParseTime(s).UnixNano())
}

View File

@ -1,123 +0,0 @@
package influxql
import (
"context"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"github.com/influxdata/flux"
"github.com/influxdata/influxdb/v2/kit/tracing"
"github.com/influxdata/influxdb/v2/query"
)
// Endpoint contains the necessary information to connect to a specific cluster.
type Endpoint struct {
URL string `json:"url"`
Username string `json:"username,omitempty"`
Password string `json:"password,omitempty"`
}
// Service is a client for the influxdb 1.x endpoint that implements the QueryService
// for the influxql compiler type.
type Service struct {
// Endpoints maps a cluster name to the influxdb 1.x endpoint.
Endpoints map[string]Endpoint
}
// Query will execute a query for the influxql.Compiler type against an influxdb 1.x endpoint,
// and return results using the default decoder.
func (s *Service) Query(ctx context.Context, req *query.Request) (flux.ResultIterator, error) {
span, ctx := tracing.StartSpanFromContext(ctx)
defer span.Finish()
resp, err := s.query(ctx, req)
if err != nil {
return nil, tracing.LogError(span, err)
}
// Decode the response into the JSON structure.
var results Response
if err := json.NewDecoder(resp.Body).Decode(&results); err != nil {
return nil, tracing.LogError(span, err)
}
// Return a result iterator using the response.
return NewResponseIterator(&results), nil
}
// QueryRawJSON will execute a query for the influxql.Compiler type against an influxdb 1.x endpoint,
// and return the body of the response as a byte array.
func (s *Service) QueryRawJSON(ctx context.Context, req *query.Request) ([]byte, error) {
span, ctx := tracing.StartSpanFromContext(ctx)
defer span.Finish()
resp, err := s.query(ctx, req)
if err != nil {
return nil, tracing.LogError(span, err)
}
b, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, tracing.LogError(span, err)
}
return b, nil
}
func (s *Service) query(ctx context.Context, req *query.Request) (*http.Response, error) {
span, ctx := tracing.StartSpanFromContext(ctx)
defer span.Finish()
// Verify that this is an influxql query in the compiler.
compiler, ok := req.Compiler.(*Compiler)
if !ok {
err := fmt.Errorf("influxql query service does not support the '%s' compiler type", req.Compiler.CompilerType())
return nil, tracing.LogError(span, err)
}
// Lookup the endpoint information for the cluster.
endpoint, ok := s.Endpoints[compiler.Cluster]
if !ok {
err := fmt.Errorf("no endpoint found for cluster %s", compiler.Cluster)
return nil, tracing.LogError(span, err)
}
// Prepare the HTTP request.
u, err := url.Parse(endpoint.URL)
if err != nil {
return nil, tracing.LogError(span, err)
}
u.Path += "/query"
params := url.Values{}
params.Set("q", compiler.Query)
if compiler.DB != "" {
params.Set("db", compiler.DB)
}
if compiler.RP != "" {
params.Set("rp", compiler.RP)
}
u.RawQuery = params.Encode()
hreq, err := http.NewRequest("POST", u.String(), nil)
if err != nil {
return nil, tracing.LogError(span, err)
}
hreq = hreq.WithContext(ctx)
hreq.SetBasicAuth(endpoint.Username, endpoint.Password)
tracing.InjectToHTTPRequest(span, hreq)
// Perform the request and look at the status code.
resp, err := http.DefaultClient.Do(hreq)
if err != nil {
return nil, tracing.LogError(span, err)
} else if resp.StatusCode/100 != 2 {
err = fmt.Errorf("unexpected http status: %s", resp.Status)
return nil, tracing.LogError(span, err)
}
return resp, nil
}

View File

@ -1,69 +0,0 @@
package influxql_test
import (
"context"
"io"
"net/http"
"net/http/httptest"
"testing"
"github.com/influxdata/influxdb/v2/query"
"github.com/influxdata/influxdb/v2/query/influxql"
)
func TestService(t *testing.T) {
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// Verify the parameters were passed correctly.
if want, got := "POST", r.Method; want != got {
t.Errorf("unexpected method -want/+got\n\t- %q\n\t+ %q", want, got)
}
if want, got := "SHOW DATABASES", r.FormValue("q"); want != got {
t.Errorf("unexpected query -want/+got\n\t- %q\n\t+ %q", want, got)
}
if want, got := "db0", r.FormValue("db"); want != got {
t.Errorf("unexpected database -want/+got\n\t- %q\n\t+ %q", want, got)
}
if want, got := "rp0", r.FormValue("rp"); want != got {
t.Errorf("unexpected retention policy -want/+got\n\t- %q\n\t+ %q", want, got)
}
user, pass, ok := r.BasicAuth()
if !ok {
w.WriteHeader(http.StatusUnauthorized)
return
}
if want, got := "me", user; want != got {
t.Errorf("unexpected username -want/+got\n\t- %q\n\t+ %q", want, got)
}
if want, got := "secretpassword", pass; want != got {
t.Errorf("unexpected password -want/+got\n\t- %q\n\t+ %q", want, got)
}
io.WriteString(w, `{"results":[{"statement_id":0,"series":[{"name":"databases","columns":["name"],"values":[["db0"]]}]}]}`)
}))
defer server.Close()
service := &influxql.Service{
Endpoints: map[string]influxql.Endpoint{
"myserver": {
URL: server.URL,
Username: "me",
Password: "secretpassword",
},
},
}
req := &query.Request{Compiler: &influxql.Compiler{
Cluster: "myserver",
DB: "db0",
RP: "rp0",
Query: "SHOW DATABASES",
}}
results, err := service.Query(context.Background(), req)
if err != nil {
t.Fatalf("unexpected error: %s", err)
}
defer results.Release()
_, err = service.QueryRawJSON(context.Background(), req)
if err != nil {
t.Fatalf("unexpected error: %s", err)
}
}

View File

@ -1,45 +0,0 @@
package spectests
import (
"fmt"
"path/filepath"
"runtime"
)
var aggregateFuncNames = []string{
"count",
"mean",
"sum",
}
func AggregateTest(fn func(name string) (stmt, want string)) Fixture {
_, file, line, _ := runtime.Caller(1)
fixture := &collection{
file: filepath.Base(file),
line: line,
}
for _, name := range aggregateFuncNames {
stmt, want := fn(name)
fixture.Add(stmt, want)
}
return fixture
}
func init() {
RegisterFixture(
AggregateTest(func(name string) (stmt, want string) {
return fmt.Sprintf(`SELECT %s(value) FROM db0..cpu`, name),
`package main
` + fmt.Sprintf(`from(bucketID: "%s")`, bucketID.String()) + ` |> range(start: 1677-09-21T00:12:43.145224194Z, stop: 2262-04-11T23:47:16.854775806Z) |> filter(fn: (r) => r._measurement == "cpu" and r._field == "value")
|> group(columns: ["_measurement", "_start", "_stop", "_field"], mode: "by")
|> keep(columns: ["_measurement", "_start", "_stop", "_field", "_time", "_value"])
|> ` + name + `()
|> map(fn: (r) => ({r with _time: 1970-01-01T00:00:00Z}))
|> rename(columns: {_value: "` + name + `"})
|> yield(name: "0")
`
}),
)
}

View File

@ -1,22 +0,0 @@
package spectests
import "fmt"
func init() {
RegisterFixture(
AggregateTest(func(name string) (stmt, want string) {
return fmt.Sprintf(`SELECT %s(value) FROM db0..cpu WHERE host = 'server01'`, name),
`package main
` + fmt.Sprintf(`from(bucketID: "%s"`, bucketID.String()) + `) |> range(start: 1677-09-21T00:12:43.145224194Z, stop: 2262-04-11T23:47:16.854775806Z) |> filter(fn: (r) => r._measurement == "cpu" and r._field == "value")
|> filter(fn: (r) => r["host"] == "server01")
|> group(columns: ["_measurement", "_start", "_stop", "_field"], mode: "by")
|> keep(columns: ["_measurement", "_start", "_stop", "_field", "_time", "_value"])
|> ` + name + `()
|> map(fn: (r) => ({r with _time: 1970-01-01T00:00:00Z}))
|> rename(columns: {_value: "` + name + `"})
|> yield(name: "0")
`
}),
)
}

View File

@ -1,21 +0,0 @@
package spectests
import "fmt"
func init() {
RegisterFixture(
AggregateTest(func(name string) (stmt, want string) {
return fmt.Sprintf(`SELECT %s(value) FROM db0..cpu GROUP BY host`, name),
`package main
` + fmt.Sprintf(`from(bucketID: "%s"`, bucketID.String()) + `) |> range(start: 1677-09-21T00:12:43.145224194Z, stop: 2262-04-11T23:47:16.854775806Z) |> filter(fn: (r) => r._measurement == "cpu" and r._field == "value")
|> group(columns: ["_measurement", "_start", "_stop", "_field", "host"], mode: "by")
|> keep(columns: ["_measurement", "_start", "_stop", "_field", "host", "_time", "_value"])
|> ` + name + `()
|> map(fn: (r) => ({r with _time: 1970-01-01T00:00:00Z}))
|> rename(columns: {_value: "` + name + `"})
|> yield(name: "0")
`
}),
)
}

View File

@ -1,23 +0,0 @@
package spectests
import "fmt"
func init() {
RegisterFixture(
AggregateTest(func(name string) (stmt, want string) {
return fmt.Sprintf(`SELECT %s(value) FROM db0..cpu WHERE time >= now() - 10m GROUP BY time(1m)`, name),
`package main
` + fmt.Sprintf(`from(bucketID: "%s"`, bucketID.String()) + `) |> range(start: 2010-09-15T08:50:00Z, stop: 2010-09-15T09:00:00Z) |> filter(fn: (r) => r._measurement == "cpu" and r._field == "value")
|> group(columns: ["_measurement", "_start", "_stop", "_field"], mode: "by")
|> keep(columns: ["_measurement", "_start", "_stop", "_field", "_time", "_value"])
|> window(every: 1m)
|> ` + name + `()
|> map(fn: (r) => ({r with _time: r._start}))
|> window(every: inf)
|> rename(columns: {_value: "` + name + `"})
|> yield(name: "0")
`
}),
)
}

View File

@ -1,23 +0,0 @@
package spectests
import "fmt"
func init() {
RegisterFixture(
AggregateTest(func(name string) (stmt, want string) {
return fmt.Sprintf(`SELECT %s(value) FROM db0..cpu WHERE time >= now() - 10m GROUP BY time(5m, 12m)`, name),
`package main
` + fmt.Sprintf(`from(bucketID: "%s"`, bucketID.String()) + `) |> range(start: 2010-09-15T08:50:00Z, stop: 2010-09-15T09:00:00Z) |> filter(fn: (r) => r._measurement == "cpu" and r._field == "value")
|> group(columns: ["_measurement", "_start", "_stop", "_field"], mode: "by")
|> keep(columns: ["_measurement", "_start", "_stop", "_field", "_time", "_value"])
|> window(every: 5m, start: 1970-01-01T00:02:00Z)
|> ` + name + `()
|> map(fn: (r) => ({r with _time: r._start}))
|> window(every: inf)
|> rename(columns: {_value: "` + name + `"})
|> yield(name: "0")
`
}),
)
}

View File

@ -1,2 +0,0 @@
// Package spectests the influxql transpiler specification tests.
package spectests

View File

@ -1,29 +0,0 @@
package spectests
func init() {
// TODO(ethan): https://github.com/influxdata/flux/issues/2594
// RegisterFixture(
// NewFixture(
// `SELECT mean(value), max(value) FROM db0..cpu`,
// `package main
//
//t0 = from(bucketID: "")
// |> range(start: 1677-09-21T00:12:43.145224194Z, stop: 2262-04-11T23:47:16.854775806Z)
// |> filter(fn: (r) => r._measurement == "cpu" and r._field == "value")
// |> group(columns: ["_measurement", "_start"], mode: "by")
// |> mean()
// |> duplicate(column: "_start", as: "_time")
//t1 = from(bucketID: "")
// |> range(start: 1677-09-21T00:12:43.145224194Z, stop: 2262-04-11T23:47:16.854775806Z)
// |> filter(fn: (r) => r._measurement == "cpu" and r._field == "value")
// |> group(columns: ["_measurement", "_start"], mode: "by")
// |> max()
// |> drop(columns: ["_time"])
// |> duplicate(column: "_start", as: "_time")
//join(tables: {t0: t0, t1: t1}, on: ["_time", "_measurement"])
// |> map(fn: (r) => ({_time: r._time, mean: r["t0__value"], max: r["t1__value"]}), mergeKey: true)
// |> yield(name: "0")
//`,
// ),
// )
}

View File

@ -1,25 +0,0 @@
package spectests
func init() {
RegisterFixture(
NewFixture(
`SELECT mean(value) FROM db0..cpu; SELECT max(value) FROM db0..cpu`,
`package main
from(bucketID: "") |> range(start: 1677-09-21T00:12:43.145224194Z, stop: 2262-04-11T23:47:16.854775806Z) |> filter(fn: (r) => r._measurement == "cpu" and r._field == "value")
|> group(columns: ["_measurement", "_start", "_stop", "_field"], mode: "by")
|> keep(columns: ["_measurement", "_start", "_stop", "_field", "_time", "_value"])
|> mean()
|> map(fn: (r) => ({r with _time: 1970-01-01T00:00:00Z}))
|> rename(columns: {_value: "mean"})
|> yield(name: "0")
from(bucketID: "") |> range(start: 1677-09-21T00:12:43.145224194Z, stop: 2262-04-11T23:47:16.854775806Z) |> filter(fn: (r) => r._measurement == "cpu" and r._field == "value")
|> group(columns: ["_measurement", "_start", "_stop", "_field"], mode: "by")
|> keep(columns: ["_measurement", "_start", "_stop", "_field", "_time", "_value"])
|> max()
|> rename(columns: {_value: "max"})
|> yield(name: "1")
`,
),
)
}

View File

@ -1,17 +0,0 @@
package spectests
func init() {
RegisterFixture(
NewFixture(
`SELECT value FROM db0..cpu`,
`package main
from(bucketID: "") |> range(start: 1677-09-21T00:12:43.145224194Z, stop: 2262-04-11T23:47:16.854775806Z) |> filter(fn: (r) => r._measurement == "cpu" and r._field == "value")
|> group(columns: ["_measurement", "_start", "_stop", "_field"], mode: "by")
|> keep(columns: ["_measurement", "_start", "_stop", "_field", "_time", "_value"])
|> rename(columns: {_value: "value"})
|> yield(name: "0")
`,
),
)
}

View File

@ -1,18 +0,0 @@
package spectests
func init() {
RegisterFixture(
NewFixture(
`SELECT value FROM db0..cpu WHERE host = 'server01'`,
`package main
from(bucketID: "") |> range(start: 1677-09-21T00:12:43.145224194Z, stop: 2262-04-11T23:47:16.854775806Z) |> filter(fn: (r) => r._measurement == "cpu" and r._field == "value")
|> filter(fn: (r) => r["host"] == "server01")
|> group(columns: ["_measurement", "_start", "_stop", "_field"], mode: "by")
|> keep(columns: ["_measurement", "_start", "_stop", "_field", "_time", "_value"])
|> rename(columns: {_value: "value"})
|> yield(name: "0")
`,
),
)
}

View File

@ -1,18 +0,0 @@
package spectests
func init() {
RegisterFixture(
NewFixture(
`SELECT value FROM db0..cpu WHERE host =~ /.*er01/`,
`package main
from(bucketID: "") |> range(start: 1677-09-21T00:12:43.145224194Z, stop: 2262-04-11T23:47:16.854775806Z) |> filter(fn: (r) => r._measurement == "cpu" and r._field == "value")
|> filter(fn: (r) => r["host"] =~ /.*er01/)
|> group(columns: ["_measurement", "_start", "_stop", "_field"], mode: "by")
|> keep(columns: ["_measurement", "_start", "_stop", "_field", "_time", "_value"])
|> rename(columns: {_value: "value"})
|> yield(name: "0")
`,
),
)
}

View File

@ -1,19 +0,0 @@
package spectests
import "fmt"
func init() {
RegisterFixture(
NewFixture(
`SELECT value FROM db0.alternate.cpu`,
`package main
`+fmt.Sprintf(`from(bucketID: "%s")`, altBucketID.String())+` |> range(start: 1677-09-21T00:12:43.145224194Z, stop: 2262-04-11T23:47:16.854775806Z) |> filter(fn: (r) => r._measurement == "cpu" and r._field == "value")
|> group(columns: ["_measurement", "_start", "_stop", "_field"], mode: "by")
|> keep(columns: ["_measurement", "_start", "_stop", "_field", "_time", "_value"])
|> rename(columns: {_value: "value"})
|> yield(name: "0")
`,
),
)
}

View File

@ -1,45 +0,0 @@
package spectests
import (
"fmt"
"path/filepath"
"runtime"
)
var selectorFuncNames = []string{
"first",
"last",
"max",
"min",
}
func SelectorTest(fn func(name string) (stmt, want string)) Fixture {
_, file, line, _ := runtime.Caller(1)
fixture := &collection{
file: filepath.Base(file),
line: line,
}
for _, name := range selectorFuncNames {
stmt, want := fn(name)
fixture.Add(stmt, want)
}
return fixture
}
func init() {
RegisterFixture(
SelectorTest(func(name string) (stmt, want string) {
return fmt.Sprintf(`SELECT %s(value) FROM db0..cpu`, name),
`package main
` + fmt.Sprintf(`from(bucketID: "%s")`, bucketID.String()) + ` |> range(start: 1677-09-21T00:12:43.145224194Z, stop: 2262-04-11T23:47:16.854775806Z) |> filter(fn: (r) => r._measurement == "cpu" and r._field == "value")
|> group(columns: ["_measurement", "_start", "_stop", "_field"], mode: "by")
|> keep(columns: ["_measurement", "_start", "_stop", "_field", "_time", "_value"])
|> ` + name + `()
|> rename(columns: {_value: "` + name + `"})
|> yield(name: "0")
`
}),
)
}

View File

@ -1,14 +0,0 @@
package spectests
func init() {
RegisterFixture(
NewFixture(
`SHOW DATABASES`,
`package main
import v1 "influxdata/influxdb/v1"
v1.databases() |> rename(columns: {databaseName: "name"}) |> keep(columns: ["name"]) |> yield(name: "0")`,
),
)
}

View File

@ -1,19 +0,0 @@
package spectests
func init() {
RegisterFixture(
NewFixture(
`SHOW RETENTION POLICIES ON telegraf`,
`package main
import v1 "influxdata/influxdb/v1"
v1.databases() |> filter(fn: (r) => r.databaseName == "telegraf") |> rename(columns: {retentionPolicy: "name", retentionPeriod: "duration"})
|> set(key: "shardGroupDuration", value: "0")
|> set(key: "replicaN", value: "2")
|> keep(columns: ["name", "duration", "shardGroupDuration", "replicaN", "default"])
|> yield(name: "0")
`,
),
)
}

View File

@ -1,18 +0,0 @@
package spectests
func init() {
RegisterFixture(
NewFixture(
`SHOW TAG VALUES ON "db0" WITH KEY = "host"`,
`package main
from(bucketID: "") |> range(start: -1h) |> keyValues(keyColumns: ["host"])
|> group(columns: ["_measurement", "_key"], mode: "by")
|> distinct()
|> group(columns: ["_measurement"], mode: "by")
|> rename(columns: {_key: "key", _value: "value"})
|> yield(name: "0")
`,
),
)
}

View File

@ -1,18 +0,0 @@
package spectests
func init() {
RegisterFixture(
NewFixture(
`SHOW TAG VALUES ON "db0" WITH KEY IN ("host", "region")`,
`package main
from(bucketID: "") |> range(start: -1h) |> keyValues(keyColumns: ["host", "region"])
|> group(columns: ["_measurement", "_key"], mode: "by")
|> distinct()
|> group(columns: ["_measurement"], mode: "by")
|> rename(columns: {_key: "key", _value: "value"})
|> yield(name: "0")
`,
),
)
}

View File

@ -1,19 +0,0 @@
package spectests
func init() {
RegisterFixture(
NewFixture(
`SHOW TAG VALUES ON "db0" FROM "cpu", "mem", "gpu" WITH KEY = "host"`,
`package main
from(bucketID: "") |> range(start: -1h) |> filter(fn: (r) => r._measurement == "cpu" or (r._measurement == "mem" or r._measurement == "gpu"))
|> keyValues(keyColumns: ["host"])
|> group(columns: ["_measurement", "_key"], mode: "by")
|> distinct()
|> group(columns: ["_measurement"], mode: "by")
|> rename(columns: {_key: "key", _value: "value"})
|> yield(name: "0")
`,
),
)
}

View File

@ -1,155 +0,0 @@
package spectests
import (
"context"
"path/filepath"
"runtime"
"testing"
"time"
"github.com/andreyvit/diff"
"github.com/influxdata/flux/ast"
"github.com/influxdata/flux/ast/astutil"
"github.com/influxdata/flux/parser"
platform "github.com/influxdata/influxdb/v2"
platform2 "github.com/influxdata/influxdb/v2/kit/platform"
"github.com/influxdata/influxdb/v2/mock"
"github.com/influxdata/influxdb/v2/query/influxql"
platformtesting "github.com/influxdata/influxdb/v2/testing"
"github.com/stretchr/testify/require"
)
var dbrpMappingSvc = &mock.DBRPMappingService{}
var organizationID platform2.ID
var bucketID platform2.ID
var altBucketID platform2.ID
func init() {
mapping := platform.DBRPMapping{
Database: "db0",
RetentionPolicy: "autogen",
Default: true,
OrganizationID: organizationID,
BucketID: bucketID,
}
altMapping := platform.DBRPMapping{
Database: "db0",
RetentionPolicy: "autogen",
Default: true,
OrganizationID: organizationID,
BucketID: altBucketID,
}
dbrpMappingSvc.FindByIDFn = func(ctx context.Context, orgID, id platform2.ID) (*platform.DBRPMapping, error) {
return &mapping, nil
}
dbrpMappingSvc.FindManyFn = func(ctx context.Context, filter platform.DBRPMappingFilter, opt ...platform.FindOptions) ([]*platform.DBRPMapping, int, error) {
m := &mapping
if filter.RetentionPolicy != nil && *filter.RetentionPolicy == "alternate" {
m = &altMapping
}
return []*platform.DBRPMapping{m}, 1, nil
}
}
// Fixture is a structure that will run tests.
type Fixture interface {
Run(t *testing.T)
}
type fixture struct {
stmt string
want string
file string
line int
}
func NewFixture(stmt, want string) Fixture {
_, file, line, _ := runtime.Caller(1)
return &fixture{
stmt: stmt,
want: want,
file: filepath.Base(file),
line: line,
}
}
func (f *fixture) Run(t *testing.T) {
organizationID = platformtesting.MustIDBase16("aaaaaaaaaaaaaaaa")
bucketID = platformtesting.MustIDBase16("bbbbbbbbbbbbbbbb")
altBucketID = platformtesting.MustIDBase16("cccccccccccccccc")
t.Run(f.stmt, func(t *testing.T) {
wantAST := parser.ParseSource(f.want)
if ast.Check(wantAST) > 0 {
err := ast.GetError(wantAST)
t.Fatalf("found parser errors in the want text: %s", err.Error())
}
want, err := astutil.Format(wantAST.Files[0])
require.NoError(t, err)
transpiler := influxql.NewTranspilerWithConfig(
dbrpMappingSvc,
influxql.Config{
DefaultDatabase: "db0",
Cluster: "cluster",
Now: Now(),
},
)
pkg, err := transpiler.Transpile(context.Background(), f.stmt)
if err != nil {
t.Fatalf("%s:%d: unexpected error: %s", f.file, f.line, err)
}
got, err := astutil.Format(pkg.Files[0])
require.NoError(t, err)
// Encode both of these to JSON and compare the results.
if want != got {
out := diff.LineDiff(want, got)
t.Fatalf("unexpected ast at %s:%d\n%s", f.file, f.line, out)
}
})
}
type collection struct {
stmts []string
wants []string
file string
line int
}
func (c *collection) Add(stmt, want string) {
c.stmts = append(c.stmts, stmt)
c.wants = append(c.wants, want)
}
func (c *collection) Run(t *testing.T) {
for i, stmt := range c.stmts {
f := fixture{
stmt: stmt,
want: c.wants[i],
file: c.file,
line: c.line,
}
f.Run(t)
}
}
var allFixtures []Fixture
func RegisterFixture(fixtures ...Fixture) {
allFixtures = append(allFixtures, fixtures...)
}
func All() []Fixture {
return allFixtures
}
func Now() time.Time {
t, err := time.Parse(time.RFC3339, "2010-09-15T09:00:00Z")
if err != nil {
panic(err)
}
return t
}

View File

@ -1 +0,0 @@
{"results":[{"statement_id":0,"series":[{"name":"ctr","columns":["time","n"],"values":[["1970-01-01T00:00:00Z",0],["1970-01-01T00:00:00.000000001Z",1]]}]}]}

View File

@ -1 +0,0 @@
SELECT n FROM (SELECT top(n, 1) AS n FROM (SELECT n FROM ctr))

View File

@ -1 +0,0 @@
{"results":[{"statement_id":0,"series":[{"name":"ctr","columns":["time","n"],"values":[["1970-01-01T00:00:00.000000001Z",1]]}]}]}

View File

@ -1 +0,0 @@
{"results":[{"statement_id":0,"series":[{"name":"ctr","columns":["time","n"],"values":[["1970-01-01T00:00:00Z",0],["1970-01-01T00:00:00.000000001Z",1]]}]}]}

View File

@ -1 +0,0 @@
SELECT n FROM (SELECT top(n, 1) AS n FROM (SELECT n FROM ctr))

View File

@ -1 +0,0 @@
{"results":[{"statement_id":0,"series":[{"name":"ctr","columns":["time","n"],"values":[["1970-01-01T00:00:00.000000001Z",1]]}]}]}

View File

@ -1 +0,0 @@
{"results":[{"statement_id":0,"series":[{"name":"ctr","columns":["time","n"],"values":[["1970-01-01T00:00:00Z",0],["1970-01-01T00:00:00.000000001Z",1]]}]}]}

View File

@ -1 +0,0 @@
SELECT n FROM (SELECT n FROM (SELECT n FROM ctr) LIMIT 1)

View File

@ -1 +0,0 @@
{"results":[{"statement_id":0,"series":[{"name":"ctr","columns":["time","n"],"values":[["1970-01-01T00:00:00Z",0]]}]}]}

View File

@ -1 +0,0 @@
{"results":[{"statement_id":0,"series":[{"name":"ctr","columns":["time","n"],"values":[["1970-01-01T00:00:00Z",0],["1970-01-01T00:00:00.000000001Z",1]]}]}]}

View File

@ -1 +0,0 @@
SELECT n FROM (SELECT n FROM (SELECT n FROM ctr ORDER BY DESC) ORDER BY DESC LIMIT 1) ORDER BY DESC

View File

@ -1 +0,0 @@
{"results":[{"statement_id":0,"series":[{"name":"ctr","columns":["time","n"],"values":[["1970-01-01T00:00:00.000000001Z",1]]}]}]}

File diff suppressed because one or more lines are too long

View File

@ -1 +0,0 @@
SELECT top(f3), f3 FROM m

View File

@ -1 +0,0 @@
{"results":[{"statement_id":0,"error":"invalid number of arguments for top, expected at least 2, got 1"}]}

File diff suppressed because one or more lines are too long

View File

@ -1 +0,0 @@
SELECT top(f3, 1), f3 FROM m

View File

@ -1 +0,0 @@
{"results":[{"statement_id":0,"series":[{"name":"m","columns":["time","top","f3"],"values":[["1970-01-01T00:01:16Z",0.9952755778325094,0.9952755778325094]]}]}]}

File diff suppressed because one or more lines are too long

View File

@ -1 +0,0 @@
SELECT bottom(f3, t0, t1), f3 FROM m

View File

@ -1 +0,0 @@
{"results":[{"statement_id":0,"error":"expected integer as last argument in bottom(), found t1"}]}

File diff suppressed because one or more lines are too long

View File

@ -1 +0,0 @@
SELECT bottom(f3, t0, t1, 3), f3 FROM m

View File

@ -1 +0,0 @@
{"results":[{"statement_id":0,"series":[{"name":"m","columns":["time","bottom","t0","t1","f3"],"values":[["1970-01-01T00:00:26Z",0.0021121066840707092,"0","1",0.0021121066840707092],["1970-01-01T00:00:45Z",0.0010706815382571008,"0","0",0.0010706815382571008],["1970-01-01T00:00:46Z",0.006158089902988762,"1","0",0.006158089902988762]]}]}]}

File diff suppressed because one or more lines are too long

View File

@ -1 +0,0 @@
SELECT first(f3), f3 FROM m

View File

@ -1 +0,0 @@
{"results":[{"statement_id":0,"series":[{"name":"m","columns":["time","first","f3"],"values":[["1970-01-01T00:00:00Z",0.1744015390865842,0.1744015390865842]]}]}]}

File diff suppressed because one or more lines are too long

View File

@ -1 +0,0 @@
SELECT last(f3), f3 FROM m

View File

@ -1 +0,0 @@
{"results":[{"statement_id":0,"series":[{"name":"m","columns":["time","last","f3"],"values":[["1970-01-01T00:01:39Z",0.974927114882693,0.974927114882693]]}]}]}

File diff suppressed because one or more lines are too long

View File

@ -1 +0,0 @@
SELECT max(f3), f3 FROM m

View File

@ -1 +0,0 @@
{"results":[{"statement_id":0,"series":[{"name":"m","columns":["time","max","f3"],"values":[["1970-01-01T00:01:16Z",0.9952755778325094,0.9952755778325094]]}]}]}

File diff suppressed because one or more lines are too long

View File

@ -1 +0,0 @@
SELECT min(f3), f3 FROM m

View File

@ -1 +0,0 @@
{"results":[{"statement_id":0,"series":[{"name":"m","columns":["time","min","f3"],"values":[["1970-01-01T00:00:45Z",0.0010706815382571008,0.0010706815382571008]]}]}]}

File diff suppressed because one or more lines are too long

View File

@ -1 +0,0 @@
SELECT top(f3), f3 FROM m WHERE time >= 0 AND time < 100s GROUP BY time(20s)

View File

@ -1 +0,0 @@
{"results":[{"statement_id":0,"error":"invalid number of arguments for top, expected at least 2, got 1"}]}

File diff suppressed because one or more lines are too long

View File

@ -1 +0,0 @@
SELECT top(f3, 1), f3 FROM m WHERE time >= 0 AND time < 100s GROUP BY time(20s)

View File

@ -1 +0,0 @@
{"results":[{"statement_id":0,"series":[{"name":"m","columns":["time","top","f3"],"values":[["1970-01-01T00:00:08Z",0.9899480437410666,0.9899480437410666],["1970-01-01T00:00:39Z",0.9291806569775031,0.9291806569775031],["1970-01-01T00:00:51Z",0.973351319236113,0.973351319236113],["1970-01-01T00:01:16Z",0.9952755778325094,0.9952755778325094],["1970-01-01T00:01:27Z",0.9758817359379315,0.9758817359379315]]}]}]}

File diff suppressed because one or more lines are too long

View File

@ -1 +0,0 @@
SELECT top(f3, 2), f3 FROM m WHERE time >= 0 AND time < 100s GROUP BY time(20s)

View File

@ -1 +0,0 @@
{"results":[{"statement_id":0,"series":[{"name":"m","columns":["time","top","f3"],"values":[["1970-01-01T00:00:08Z",0.9899480437410666,0.9899480437410666],["1970-01-01T00:00:18Z",0.9751382010698921,0.9751382010698921],["1970-01-01T00:00:39Z",0.9291806569775031,0.9291806569775031],["1970-01-01T00:00:39Z",0.9170080200153685,0.9170080200153685],["1970-01-01T00:00:47Z",0.9657761219824897,0.9657761219824897],["1970-01-01T00:00:51Z",0.973351319236113,0.973351319236113],["1970-01-01T00:01:08Z",0.9876344023126428,0.9876344023126428],["1970-01-01T00:01:16Z",0.9952755778325094,0.9952755778325094],["1970-01-01T00:01:27Z",0.9758817359379315,0.9758817359379315],["1970-01-01T00:01:39Z",0.974927114882693,0.974927114882693]]}]}]}

File diff suppressed because one or more lines are too long

View File

@ -1 +0,0 @@
SELECT top(f3, 3), f3 FROM m WHERE time >= 0 AND time < 100s GROUP BY time(20s)

View File

@ -1 +0,0 @@
{"results":[{"statement_id":0,"series":[{"name":"m","columns":["time","top","f3"],"values":[["1970-01-01T00:00:05Z",0.9615579192535703,0.9615579192535703],["1970-01-01T00:00:08Z",0.9899480437410666,0.9899480437410666],["1970-01-01T00:00:18Z",0.9751382010698921,0.9751382010698921],["1970-01-01T00:00:36Z",0.9143732087549132,0.9143732087549132],["1970-01-01T00:00:39Z",0.9291806569775031,0.9291806569775031],["1970-01-01T00:00:39Z",0.9170080200153685,0.9170080200153685],["1970-01-01T00:00:42Z",0.9651782198335684,0.9651782198335684],["1970-01-01T00:00:47Z",0.9657761219824897,0.9657761219824897],["1970-01-01T00:00:51Z",0.973351319236113,0.973351319236113],["1970-01-01T00:01:08Z",0.9876344023126428,0.9876344023126428],["1970-01-01T00:01:16Z",0.9952755778325094,0.9952755778325094],["1970-01-01T00:01:18Z",0.961049761969049,0.961049761969049],["1970-01-01T00:01:27Z",0.9758817359379315,0.9758817359379315],["1970-01-01T00:01:38Z",0.964011598308669,0.964011598308669],["1970-01-01T00:01:39Z",0.974927114882693,0.974927114882693]]}]}]}

File diff suppressed because one or more lines are too long

View File

@ -1 +0,0 @@
SELECT top(f3, 2), f3 FROM m

View File

@ -1 +0,0 @@
{"results":[{"statement_id":0,"series":[{"name":"m","columns":["time","top","f3"],"values":[["1970-01-01T00:00:08Z",0.9899480437410666,0.9899480437410666],["1970-01-01T00:01:16Z",0.9952755778325094,0.9952755778325094]]}]}]}

Some files were not shown because too many files have changed in this diff Show More