feat: upgrade Flux to v0.83.2 (#19569)
parent
af0c328095
commit
e7cbbaa722
|
@ -48,7 +48,6 @@ import (
|
|||
"github.com/influxdata/influxdb/v2/pkger"
|
||||
infprom "github.com/influxdata/influxdb/v2/prometheus"
|
||||
"github.com/influxdata/influxdb/v2/query"
|
||||
"github.com/influxdata/influxdb/v2/query/builtinlazy"
|
||||
"github.com/influxdata/influxdb/v2/query/control"
|
||||
"github.com/influxdata/influxdb/v2/query/fluxlang"
|
||||
"github.com/influxdata/influxdb/v2/query/stdlib/influxdata/influxdb"
|
||||
|
@ -137,8 +136,6 @@ func cmdRunE(ctx context.Context, l *Launcher) func() error {
|
|||
// exit with SIGINT and SIGTERM
|
||||
ctx = signals.WithStandardSignals(ctx)
|
||||
|
||||
builtinlazy.Initialize()
|
||||
|
||||
if err := l.run(ctx); err != nil {
|
||||
return err
|
||||
} else if !l.Running() {
|
||||
|
|
|
@ -19,7 +19,10 @@ import (
|
|||
"github.com/influxdata/flux/csv"
|
||||
"github.com/influxdata/flux/execute"
|
||||
"github.com/influxdata/flux/execute/executetest"
|
||||
"github.com/influxdata/flux/execute/table"
|
||||
"github.com/influxdata/flux/lang"
|
||||
"github.com/influxdata/flux/memory"
|
||||
"github.com/influxdata/flux/runtime"
|
||||
"github.com/influxdata/flux/values"
|
||||
"github.com/influxdata/influxdb/v2"
|
||||
"github.com/influxdata/influxdb/v2/cmd/influxd/launcher"
|
||||
|
@ -27,7 +30,6 @@ import (
|
|||
"github.com/influxdata/influxdb/v2/kit/feature"
|
||||
"github.com/influxdata/influxdb/v2/kit/prom"
|
||||
"github.com/influxdata/influxdb/v2/mock"
|
||||
"github.com/influxdata/influxdb/v2/pkg/flux/execute/table"
|
||||
"github.com/influxdata/influxdb/v2/query"
|
||||
)
|
||||
|
||||
|
@ -221,7 +223,7 @@ func queryPoints(ctx context.Context, t *testing.T, l *launcher.TestLauncher, op
|
|||
if d.verbose {
|
||||
t.Logf("query:\n%s", qs)
|
||||
}
|
||||
pkg, err := flux.Parse(qs)
|
||||
pkg, err := runtime.ParseToJSON(qs)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -751,6 +753,193 @@ from(bucket: "%s")
|
|||
}
|
||||
}
|
||||
|
||||
type TestQueryProfiler struct{
|
||||
start int64
|
||||
}
|
||||
|
||||
func (s TestQueryProfiler) Name() string {
|
||||
return fmt.Sprintf("query%d", s.start)
|
||||
}
|
||||
|
||||
func (s TestQueryProfiler) GetResult(q flux.Query, alloc *memory.Allocator) (flux.Table, error) {
|
||||
groupKey := execute.NewGroupKey(
|
||||
[]flux.ColMeta{
|
||||
{
|
||||
Label: "_measurement",
|
||||
Type: flux.TString,
|
||||
},
|
||||
},
|
||||
[]values.Value{
|
||||
values.NewString(fmt.Sprintf("profiler/query%d", s.start)),
|
||||
},
|
||||
)
|
||||
b := execute.NewColListTableBuilder(groupKey, alloc)
|
||||
colMeta := []flux.ColMeta{
|
||||
{
|
||||
Label: "_measurement",
|
||||
Type: flux.TString,
|
||||
},
|
||||
{
|
||||
Label: "TotalDuration",
|
||||
Type: flux.TInt,
|
||||
},
|
||||
{
|
||||
Label: "CompileDuration",
|
||||
Type: flux.TInt,
|
||||
},
|
||||
{
|
||||
Label: "QueueDuration",
|
||||
Type: flux.TInt,
|
||||
},
|
||||
{
|
||||
Label: "PlanDuration",
|
||||
Type: flux.TInt,
|
||||
},
|
||||
{
|
||||
Label: "RequeueDuration",
|
||||
Type: flux.TInt,
|
||||
},
|
||||
{
|
||||
Label: "ExecuteDuration",
|
||||
Type: flux.TInt,
|
||||
},
|
||||
{
|
||||
Label: "Concurrency",
|
||||
Type: flux.TInt,
|
||||
},
|
||||
{
|
||||
Label: "MaxAllocated",
|
||||
Type: flux.TInt,
|
||||
},
|
||||
{
|
||||
Label: "TotalAllocated",
|
||||
Type: flux.TInt,
|
||||
},
|
||||
{
|
||||
Label: "RuntimeErrors",
|
||||
Type: flux.TString,
|
||||
},
|
||||
{
|
||||
Label: "influxdb/scanned-bytes",
|
||||
Type: flux.TInt,
|
||||
},
|
||||
{
|
||||
Label: "influxdb/scanned-values",
|
||||
Type: flux.TInt,
|
||||
},
|
||||
{
|
||||
Label: "flux/query-plan",
|
||||
Type: flux.TString,
|
||||
},
|
||||
}
|
||||
colData := []interface{} {
|
||||
fmt.Sprintf("profiler/query%d", s.start),
|
||||
s.start,
|
||||
s.start + 1,
|
||||
s.start + 2,
|
||||
s.start + 3,
|
||||
s.start + 4,
|
||||
s.start + 5,
|
||||
s.start + 6,
|
||||
s.start + 7,
|
||||
s.start + 8,
|
||||
"error1\nerror2",
|
||||
s.start + 9,
|
||||
s.start + 10,
|
||||
"query plan",
|
||||
}
|
||||
for _, col := range colMeta {
|
||||
if _, err := b.AddCol(col); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
for i := 0; i < len(colData); i++ {
|
||||
if intValue, ok := colData[i].(int64); ok {
|
||||
b.AppendInt(i, intValue)
|
||||
} else {
|
||||
b.AppendString(i, colData[i].(string))
|
||||
}
|
||||
}
|
||||
tbl, err := b.Table()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return tbl, nil
|
||||
}
|
||||
|
||||
func TestFluxProfiler(t *testing.T) {
|
||||
testcases := []struct {
|
||||
name string
|
||||
data []string
|
||||
query string
|
||||
want string
|
||||
}{
|
||||
{
|
||||
name: "range last single point start time",
|
||||
data: []string{
|
||||
"m,tag=a f=1i 1",
|
||||
},
|
||||
query: `
|
||||
option profiler.enabledProfilers = ["query0", "query100", "query100", "NonExistentProfiler"]
|
||||
from(bucket: v.bucket)
|
||||
|> range(start: 1970-01-01T00:00:00.000000001Z, stop: 1970-01-01T01:00:00Z)
|
||||
|> last()
|
||||
`,
|
||||
want: `
|
||||
#datatype,string,long,dateTime:RFC3339,dateTime:RFC3339,dateTime:RFC3339,long,string,string,string
|
||||
#group,false,false,true,true,false,false,true,true,true
|
||||
#default,_result,,,,,,,,
|
||||
,result,table,_start,_stop,_time,_value,_field,_measurement,tag
|
||||
,,0,1970-01-01T00:00:00.000000001Z,1970-01-01T01:00:00Z,1970-01-01T00:00:00.000000001Z,1,f,m,a
|
||||
|
||||
#datatype,string,long,string,long,long,long,long,long,long,long,long,long,string,string,long,long
|
||||
#group,false,false,true,false,false,false,false,false,false,false,false,false,false,false,false,false
|
||||
#default,_profiler,,,,,,,,,,,,,,,
|
||||
,result,table,_measurement,TotalDuration,CompileDuration,QueueDuration,PlanDuration,RequeueDuration,ExecuteDuration,Concurrency,MaxAllocated,TotalAllocated,RuntimeErrors,flux/query-plan,influxdb/scanned-bytes,influxdb/scanned-values
|
||||
,,0,profiler/query0,0,1,2,3,4,5,6,7,8,"error1
|
||||
error2","query plan",9,10
|
||||
,,1,profiler/query100,100,101,102,103,104,105,106,107,108,"error1
|
||||
error2","query plan",109,110
|
||||
`,
|
||||
},
|
||||
}
|
||||
execute.RegisterProfilers(&TestQueryProfiler{}, &TestQueryProfiler{start: 100})
|
||||
for _, tc := range testcases {
|
||||
tc := tc
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
l := launcher.RunTestLauncherOrFail(t, ctx, nil)
|
||||
|
||||
l.SetupOrFail(t)
|
||||
defer l.ShutdownOrFail(t, ctx)
|
||||
|
||||
l.WritePointsOrFail(t, strings.Join(tc.data, "\n"))
|
||||
|
||||
queryStr := "import \"profiler\"\nv = {bucket: " + "\"" + l.Bucket.Name + "\"" + "}\n" + tc.query
|
||||
req := &query.Request{
|
||||
Authorization: l.Auth,
|
||||
OrganizationID: l.Org.ID,
|
||||
Compiler: lang.FluxCompiler{
|
||||
Query: queryStr,
|
||||
},
|
||||
}
|
||||
if got, err := l.FluxQueryService().Query(ctx, req); err != nil {
|
||||
t.Error(err)
|
||||
} else {
|
||||
dec := csv.NewMultiResultDecoder(csv.ResultDecoderConfig{})
|
||||
want, err := dec.Decode(ioutil.NopCloser(strings.NewReader(tc.want)))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer want.Release()
|
||||
|
||||
if err := executetest.EqualResultIterators(want, got); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestQueryPushDowns(t *testing.T) {
|
||||
t.Skip("Not supported yet")
|
||||
testcases := []struct {
|
||||
|
|
|
@ -7,10 +7,10 @@ import (
|
|||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/flux"
|
||||
"github.com/influxdata/influxdb/v2"
|
||||
"github.com/influxdata/influxdb/v2/cmd/influxd/launcher"
|
||||
"github.com/influxdata/influxdb/v2/cmd/influxd/upgrade"
|
||||
_ "github.com/influxdata/influxdb/v2/query/builtin"
|
||||
_ "github.com/influxdata/influxdb/v2/tsdb/engine/tsm1"
|
||||
_ "github.com/influxdata/influxdb/v2/tsdb/index/tsi1"
|
||||
"github.com/spf13/cobra"
|
||||
|
@ -43,11 +43,6 @@ func main() {
|
|||
},
|
||||
)
|
||||
|
||||
// TODO: this should be removed in the future: https://github.com/influxdata/influxdb/issues/16220
|
||||
if os.Getenv("QUERY_TRACING") == "1" {
|
||||
flux.EnableExperimentalTracing()
|
||||
}
|
||||
|
||||
if err := rootCmd.Execute(); err != nil {
|
||||
os.Exit(1)
|
||||
}
|
||||
|
|
13
flags.yml
13
flags.yml
|
@ -83,6 +83,13 @@
|
|||
contact: Query Team
|
||||
lifetime: temporary
|
||||
|
||||
- name: Query Tracing
|
||||
description: Turn on query tracing for queries that are sampled
|
||||
key: queryTracing
|
||||
default: false
|
||||
contact: Query Team
|
||||
lifetime: permanent
|
||||
|
||||
- name: Simple Task Options Extraction
|
||||
description: Simplified task options extraction to avoid undefined functions when saving tasks
|
||||
key: simpleTaskOptionsExtraction
|
||||
|
@ -133,6 +140,12 @@
|
|||
default: false
|
||||
contact: Compute Team
|
||||
|
||||
- name: Inject Latest Success Time
|
||||
description: Inject the latest successful task run timestamp into a Task query extern when executing.
|
||||
key: injectLatestSuccessTime
|
||||
default: false
|
||||
contact: Compute Team
|
||||
|
||||
- name: Enforce Organization Dashboard Limits
|
||||
description: Enforces the default limit params for the dashboards api when orgs are set
|
||||
key: enforceOrgDashboardLimits
|
||||
|
|
16
go.mod
16
go.mod
|
@ -3,9 +3,7 @@ module github.com/influxdata/influxdb/v2
|
|||
go 1.13
|
||||
|
||||
require (
|
||||
cloud.google.com/go/bigtable v1.3.0 // indirect
|
||||
github.com/BurntSushi/toml v0.3.1
|
||||
github.com/DATA-DOG/go-sqlmock v1.4.1 // indirect
|
||||
github.com/NYTimes/gziphandler v1.0.1
|
||||
github.com/RoaringBitmap/roaring v0.4.16
|
||||
github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883
|
||||
|
@ -31,7 +29,6 @@ require (
|
|||
github.com/glycerine/go-unsnap-stream v0.0.0-20181221182339-f9677308dec2 // indirect
|
||||
github.com/glycerine/goconvey v0.0.0-20180728074245-46e3a41ad493 // indirect
|
||||
github.com/go-chi/chi v4.1.0+incompatible
|
||||
github.com/go-sql-driver/mysql v1.5.0 // indirect
|
||||
github.com/go-stack/stack v1.8.0
|
||||
github.com/gogo/protobuf v1.3.1
|
||||
github.com/golang/gddo v0.0.0-20181116215533-9bd4a3295021
|
||||
|
@ -51,7 +48,7 @@ require (
|
|||
github.com/hashicorp/vault/api v1.0.2
|
||||
github.com/imdario/mergo v0.3.9 // indirect
|
||||
github.com/influxdata/cron v0.0.0-20191203200038-ded12750aac6
|
||||
github.com/influxdata/flux v0.66.1
|
||||
github.com/influxdata/flux v0.83.1
|
||||
github.com/influxdata/httprouter v1.3.1-0.20191122104820-ee83e2772f69
|
||||
github.com/influxdata/influxql v0.0.0-20180925231337-1cbfca8e56b6
|
||||
github.com/influxdata/pkg-config v0.2.3
|
||||
|
@ -102,15 +99,14 @@ require (
|
|||
github.com/yudai/pp v2.0.1+incompatible // indirect
|
||||
go.uber.org/multierr v1.5.0
|
||||
go.uber.org/zap v1.14.1
|
||||
golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37
|
||||
golang.org/x/lint v0.0.0-20200302205851-738671d3881b // indirect
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9
|
||||
golang.org/x/net v0.0.0-20200625001655-4c5254603344
|
||||
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e
|
||||
golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4
|
||||
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208
|
||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd
|
||||
golang.org/x/text v0.3.2
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0
|
||||
golang.org/x/tools v0.0.0-20200304024140-c4206d458c3f
|
||||
golang.org/x/tools v0.0.0-20200721032237-77f530d86f9a
|
||||
google.golang.org/api v0.17.0
|
||||
google.golang.org/grpc v1.27.1
|
||||
gopkg.in/vmihailenco/msgpack.v2 v2.9.1 // indirect
|
||||
|
|
83
go.sum
83
go.sum
|
@ -2,8 +2,6 @@ cloud.google.com/go v0.26.0 h1:e0WKqKTd5BnrG8aKH3J3h+QvEIQtSUcf2n5UZ5ZgLtQ=
|
|||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
|
||||
cloud.google.com/go v0.43.0 h1:banaiRPAM8kUVYneOSkhgcDsLzEvL25FinuiSZaH/2w=
|
||||
cloud.google.com/go v0.43.0/go.mod h1:BOSR3VbTLkk6FDC/TcffxP4NF/FFBGA5ku+jvKOP7pg=
|
||||
cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
|
||||
cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
|
||||
cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
|
||||
|
@ -29,10 +27,34 @@ cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0Zeo
|
|||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
|
||||
github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI=
|
||||
github.com/Azure/go-autorest/autorest v0.9.3/go.mod h1:GsRuLYvwzLjjjRoWEIyMUaYq8GNUx2nRB378IPt/1p0=
|
||||
github.com/Azure/go-autorest/autorest v0.10.1 h1:uaB8A32IZU9YKs9v50+/LWIWTDHJk2vlGzbfd7FfESI=
|
||||
github.com/Azure/go-autorest/autorest v0.10.1/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.8.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.8.1/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.8.3 h1:O1AGG9Xig71FxdX9HO5pGNyZ7TbSyHaVg+5eJO/jSGw=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.8.3/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q=
|
||||
github.com/Azure/go-autorest/autorest/azure/auth v0.4.2 h1:iM6UAvjR97ZIeR93qTcwpKNMpV+/FTWjwEbuPD495Tk=
|
||||
github.com/Azure/go-autorest/autorest/azure/auth v0.4.2/go.mod h1:90gmfKdlmKgfjUpnCEpOJzsUEjrWDSLwHIG73tSXddM=
|
||||
github.com/Azure/go-autorest/autorest/azure/cli v0.3.1 h1:LXl088ZQlP0SBppGFsRZonW6hSvwgL5gRByMbvUbx8U=
|
||||
github.com/Azure/go-autorest/autorest/azure/cli v0.3.1/go.mod h1:ZG5p860J94/0kI9mNJVoIoLgXcirM2gF5i2kWloofxw=
|
||||
github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA=
|
||||
github.com/Azure/go-autorest/autorest/date v0.2.0 h1:yW+Zlqf26583pE43KhfnhFcdmSWlm5Ew6bxipnr/tbM=
|
||||
github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g=
|
||||
github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
|
||||
github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
|
||||
github.com/Azure/go-autorest/autorest/mocks v0.3.0 h1:qJumjCaCudz+OcqE9/XtEPfvtOjOmKaui4EOpFI6zZc=
|
||||
github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM=
|
||||
github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY=
|
||||
github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc=
|
||||
github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k=
|
||||
github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk=
|
||||
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM=
|
||||
github.com/DATA-DOG/go-sqlmock v1.4.1 h1:ThlnYciV1iM/V0OSF/dtkqWb6xo5qITT1TJBG1MRDJM=
|
||||
github.com/DATA-DOG/go-sqlmock v1.4.1/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM=
|
||||
github.com/Masterminds/semver v1.4.2 h1:WBLTQ37jOCzSLtXNdoo8bNM8876KhNqOKvrlGITgsTc=
|
||||
|
@ -68,6 +90,8 @@ github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5
|
|||
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da h1:8GUt8eRujhVEGZFFEjBj46YV4rDjvGrNxb0KMWYkL2I=
|
||||
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
|
||||
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
|
||||
github.com/aws/aws-sdk-go v1.29.16 h1:Gbtod7Y4W/Ai7wPtesdvgGVTkFN8JxAaGouRLlcQfQs=
|
||||
github.com/aws/aws-sdk-go v1.29.16/go.mod h1:1KvfttTE3SPKMpo8g2c6jL3ZKfXtFvKscTgahTma5Xg=
|
||||
github.com/benbjohnson/clock v0.0.0-20161215174838-7dc76406b6d3 h1:wOysYcIdqv3WnvwqFFzrYCFALPED7qkUGaLXu359GSc=
|
||||
github.com/benbjohnson/clock v0.0.0-20161215174838-7dc76406b6d3/go.mod h1:UMqtWQTnOe4byzwe7Zhwh8f8s+36uszN51sJrSIZlTE=
|
||||
github.com/benbjohnson/tmpl v1.0.0 h1:T5QPGJD0W6JJxyEEAlVnX3co/IkUrfHen1/42nlgAHo=
|
||||
|
@ -81,12 +105,15 @@ github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6r
|
|||
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
|
||||
github.com/boltdb/bolt v1.3.1 h1:JQmyP4ZBrce+ZQu0dY660FMfatumYDLun9hBCUVIkF4=
|
||||
github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps=
|
||||
github.com/bonitoo-io/go-sql-bigquery v0.3.4-1.4.0 h1:MaVh0h9+KaMnJcoDvvIGp+O3fefdWm+8MBUX6ELTJTM=
|
||||
github.com/bonitoo-io/go-sql-bigquery v0.3.4-1.4.0/go.mod h1:J4Y6YJm0qTWB9aFziB7cPeSyc6dOZFyJdteSeybVpXQ=
|
||||
github.com/bouk/httprouter v0.0.0-20160817010721-ee8b3818a7f5 h1:kS0dw4K730x7cxT+bVyTyYJZHuSoH7ofSr/Ijit56Qw=
|
||||
github.com/bouk/httprouter v0.0.0-20160817010721-ee8b3818a7f5/go.mod h1:CDReaxg1cmLrtcasZy43l4EYPAknXLiQSrb7tLw5zXM=
|
||||
github.com/buger/jsonparser v0.0.0-20191004114745-ee4c978eae7e h1:oJCXMss/3rg5F6Poy9wG3JQusc58Mzk5B9Z6wSnssNE=
|
||||
github.com/buger/jsonparser v0.0.0-20191004114745-ee4c978eae7e/go.mod h1:errmMKH8tTB49UR2A8C8DPYkyudelsYJwJFaZHQ6ik8=
|
||||
github.com/c-bata/go-prompt v0.2.2 h1:uyKRz6Z6DUyj49QVijyM339UJV9yhbr70gESwbNU3e0=
|
||||
github.com/c-bata/go-prompt v0.2.2/go.mod h1:VzqtzE2ksDBcdln8G7mk2RX9QyGjH+OVqOCSiVIqS34=
|
||||
github.com/cactus/go-statsd-client/statsd v0.0.0-20191106001114-12b4e2b38748/go.mod h1:l/bIBLeOl9eX+wxJAzxS4TveKRtAqlyDpHjhkfO0MEI=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1 h1:glEXhBS5PSLLv4IXzLA5yPRVX4bilULVyxxbrfOtDAk=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
|
||||
|
@ -116,11 +143,15 @@ github.com/dave/jennifer v1.2.0/go.mod h1:fIb+770HOpJ2fmN9EPPKOqm1vMGhB+TwXKMZhr
|
|||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/denisenkom/go-mssqldb v0.0.0-20200428022330-06a60b6afbbc h1:VRRKCwnzqk8QCaRC4os14xoKDdbHqqlJtJA0oc1ZAjg=
|
||||
github.com/denisenkom/go-mssqldb v0.0.0-20200428022330-06a60b6afbbc/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU=
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
||||
github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8 h1:akOQj8IVgoeFfBTzGOEQakCYshWD6RNo1M5pivFXt70=
|
||||
github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8/go.mod h1:VMaSuZ+SZcx/wljOQKvp5srsbCiKDEb6K2wC4+PiBmQ=
|
||||
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
|
||||
github.com/dimchansky/utfbom v1.1.0 h1:FcM3g+nofKgUteL8dm/UpdRXNC9KmADgTpLKsu0TRo4=
|
||||
github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8=
|
||||
github.com/docker/distribution v2.7.0+incompatible h1:neUDAlf3wX6Ml4HdqTrbcOHXtfRN0TFIwt6YFL7N9RU=
|
||||
github.com/docker/distribution v2.7.0+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/docker v0.7.3-0.20180815000130-e05b657120a6/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
|
@ -164,6 +195,7 @@ github.com/go-chi/chi v4.1.0+incompatible h1:ETj3cggsVIY2Xao5ExCu6YhEh5MD6JTfcBz
|
|||
github.com/go-chi/chi v4.1.0+incompatible/go.mod h1:eB3wogJHnLi3x/kFX2A+IbTBlXxmMeXJVKy9tTv1XzQ=
|
||||
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
|
||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||
github.com/go-kit/kit v0.8.0 h1:Wz+5lgoB0kkuqLEc6NVmwRknTKP6dTGbSqvhZtBI/j0=
|
||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
|
@ -171,12 +203,13 @@ github.com/go-ldap/ldap v3.0.2+incompatible/go.mod h1:qfd9rJvER9Q0/D/Sqn1DfHRoBp
|
|||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||
github.com/go-logfmt/logfmt v0.4.0 h1:MP4Eh7ZCb31lleYCFuwm0oe4/YGak+5l1vA2NOE80nA=
|
||||
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
||||
github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
|
||||
github.com/go-sql-driver/mysql v1.5.0 h1:ozyZYNQW3x3HtqT1jira07DN2PArx2v7/mN66gGcHOs=
|
||||
github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
|
||||
github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/go-test/deep v1.0.1/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA=
|
||||
github.com/gofrs/uuid v3.3.0+incompatible h1:8K4tyRfvU1CYPgJsveYFQMhpFd/wXNM7iK6rR7UHz84=
|
||||
github.com/gofrs/uuid v3.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.2.0 h1:xU6/SpYbvkNYiptHJYEDRseDLvYE7wSqhYYNy0QSUzI=
|
||||
github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
|
@ -185,6 +218,8 @@ github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zV
|
|||
github.com/gogo/protobuf v1.2.2-0.20190730201129-28a6bbf47e48/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
|
||||
github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls=
|
||||
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
|
||||
github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe h1:lXe2qZdvpiX5WZkZR4hgp4KJVfY3nMkvmwbVkpv1rVY=
|
||||
github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0=
|
||||
github.com/golang/gddo v0.0.0-20181116215533-9bd4a3295021 h1:HYV500jCgk+IC68L5sWrLFIWMpaUFfXXpJSAb7XOoBk=
|
||||
github.com/golang/gddo v0.0.0-20181116215533-9bd4a3295021/go.mod h1:xEhNfoBDX1hzLm2Nf80qUvZ2sVwoMZ8d6IE2SrsQfh4=
|
||||
github.com/golang/geo v0.0.0-20190916061304-5b978397cfec h1:lJwO/92dFXWeXOZdoGXgptLmNLwynMSHUmU6besqtiw=
|
||||
|
@ -318,8 +353,8 @@ github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NH
|
|||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||
github.com/influxdata/cron v0.0.0-20191203200038-ded12750aac6 h1:OtjKkeWDjUbyMi82C7XXy7Tvm2LXMwiBBXyFIGNPaGA=
|
||||
github.com/influxdata/cron v0.0.0-20191203200038-ded12750aac6/go.mod h1:XabtPPW2qsCg0tl+kjaPU+cFS+CjQXEXbT1VJvHT4og=
|
||||
github.com/influxdata/flux v0.66.1 h1:d98L5k9mmP7bU7d2zAx6C3dCe5B8/PEa1wkWzZAE+Ok=
|
||||
github.com/influxdata/flux v0.66.1/go.mod h1:BwN2XG2lMszOoquQaFdPET8FRQfrXiZsWmcMO9rkaVY=
|
||||
github.com/influxdata/flux v0.83.1 h1:KdJ19S2bj0jZvhICdS8d54BHYCJNuq9h3A/HkIKOD6o=
|
||||
github.com/influxdata/flux v0.83.1/go.mod h1:+6FzHdZdwYjEIa2iuQEJ92x+C2A8X1jI0qdpVT0DJfM=
|
||||
github.com/influxdata/httprouter v1.3.1-0.20191122104820-ee83e2772f69 h1:WQsmW0fXO4ZE/lFGIE84G6rIV5SJN3P3sjIXAP1a8eU=
|
||||
github.com/influxdata/httprouter v1.3.1-0.20191122104820-ee83e2772f69/go.mod h1:pwymjR6SrP3gD3pRj9RJwdl1j5s3doEEV8gS4X9qSzA=
|
||||
github.com/influxdata/influxql v0.0.0-20180925231337-1cbfca8e56b6 h1:CFx+pP90q/qg3spoiZjf8donE4WpAdjeJfPOcoNqkWo=
|
||||
|
@ -336,6 +371,8 @@ github.com/influxdata/usage-client v0.0.0-20160829180054-6d3895376368 h1:+TUUmaF
|
|||
github.com/influxdata/usage-client v0.0.0-20160829180054-6d3895376368/go.mod h1:Wbbw6tYNvwa5dlB6304Sd+82Z3f7PmVZHVKU637d4po=
|
||||
github.com/jessevdk/go-flags v1.4.0 h1:4IU2WS7AumrZ/40jfhf4QVDMsQwqA7VEHozFRrGARJA=
|
||||
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
|
||||
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM=
|
||||
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
|
||||
github.com/jonboulle/clockwork v0.1.0 h1:VKV+ZcuP6l3yW9doeqz6ziZGgcynBVQO+obU0+0hcPo=
|
||||
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
|
||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
|
@ -464,6 +501,8 @@ github.com/philhofer/fwd v1.0.0 h1:UbZqGr5Y38ApvM/V/jEljVxwocdweyH+vmYvRPBnbqQ=
|
|||
github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU=
|
||||
github.com/pierrec/lz4 v2.0.5+incompatible h1:2xWsjqPFWcplujydGg4WmhC/6fZqK42wMM8aXeqhl0I=
|
||||
github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
|
||||
github.com/pkg/browser v0.0.0-20180916011732-0a3d74bf9ce4 h1:49lOXmGaUpV9Fz3gd7TFZY106KVlPVa5jcYD1gaQf98=
|
||||
github.com/pkg/browser v0.0.0-20180916011732-0a3d74bf9ce4/go.mod h1:4OwLy04Bl9Ef3GJJCoec+30X3LQs/0/m4HFRt/2LUSA=
|
||||
github.com/pkg/errors v0.8.0 h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
|
||||
|
@ -537,6 +576,8 @@ github.com/smartystreets/goconvey v0.0.0-20190731233626-505e41936337 h1:WN9BUFbd
|
|||
github.com/smartystreets/goconvey v0.0.0-20190731233626-505e41936337/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
|
||||
github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s=
|
||||
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
|
||||
github.com/snowflakedb/gosnowflake v1.3.4 h1:Gyoi6g4lMHsilEwW9+KV+bgYkJTgf5pVfvL7Utus920=
|
||||
github.com/snowflakedb/gosnowflake v1.3.4/go.mod h1:NsRq2QeiMUuoNUJhp5Q6xGC4uBrsS9g6LwZVEkTWgsE=
|
||||
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
|
||||
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72 h1:qLC7fQah7D6K1B0ujays3HV9gkFtllcxhzImRR7ArPQ=
|
||||
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
|
@ -585,6 +626,10 @@ github.com/tylerb/graceful v1.2.15 h1:B0x01Y8fsJpogzZTkDg6BDi6eMf03s01lEKGdrv83o
|
|||
github.com/tylerb/graceful v1.2.15/go.mod h1:LPYTbOYmUTdabwRt0TGhLllQ0MUNbs0Y5q1WXJOI9II=
|
||||
github.com/uber-go/atomic v1.3.2 h1:Azu9lPBWRNKzYXSIwRfgRuDuS0YKsK4NFhiQv98gkxo=
|
||||
github.com/uber-go/atomic v1.3.2/go.mod h1:/Ct5t2lcmbJ4OSe/waGBoaVvVqtO0bmtfVNex1PFV8g=
|
||||
github.com/uber-go/tally v3.3.15+incompatible h1:9hLSgNBP28CjIaDmAuRTq9qV+UZY+9PcvAkXO4nNMwg=
|
||||
github.com/uber-go/tally v3.3.15+incompatible/go.mod h1:YDTIBxdXyOU/sCWilKB4bgyufu1cEi0jdVnRdxvjnmU=
|
||||
github.com/uber/athenadriver v1.1.4 h1:k6k0RBeXjR7oZ8NO557MsRw3eX1cc/9B0GNx+W9eHiQ=
|
||||
github.com/uber/athenadriver v1.1.4/go.mod h1:tQjho4NzXw55LGfSZEcETuYydpY1vtmixUabHkC1K/E=
|
||||
github.com/uber/jaeger-client-go v2.16.0+incompatible h1:Q2Pp6v3QYiocMxomCaJuwQGFt7E53bPYqEgug/AoBtY=
|
||||
github.com/uber/jaeger-client-go v2.16.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk=
|
||||
github.com/uber/jaeger-lib v2.2.0+incompatible h1:MxZXOiR2JuoANZ3J6DE/U0kSFv/eJ/GfSYVCjK7dyaw=
|
||||
|
@ -603,6 +648,7 @@ github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82 h1:BHyfKlQyqbsFN5p3Ifn
|
|||
github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82/go.mod h1:lgjkn3NuSvDfVJdfcVVdX+jpBxNmX4rDAzaS45IcYoM=
|
||||
github.com/yudai/pp v2.0.1+incompatible h1:Q4//iY4pNF6yPLZIigmvcl7k/bPgrcTPIFIcmawg5bI=
|
||||
github.com/yudai/pp v2.0.1+incompatible/go.mod h1:PuxR/8QJ7cyCkFp/aUDS+JY727OFEZkTdatxwunjIkc=
|
||||
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
go.etcd.io/bbolt v1.3.2 h1:Z/90sZLPOeCy2PwprqkFa25PdkusRzaj9P8zm/KNyvk=
|
||||
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||
go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
|
||||
|
@ -624,6 +670,7 @@ go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk=
|
|||
go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
|
||||
go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI=
|
||||
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
||||
go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
|
||||
go.uber.org/multierr v1.4.0 h1:f3WCSC2KzAcBXGATIxAB1E2XuCpNU255wNKZ505qi3E=
|
||||
go.uber.org/multierr v1.4.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
|
||||
go.uber.org/multierr v1.5.0 h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A=
|
||||
|
@ -634,6 +681,7 @@ go.uber.org/zap v1.9.1 h1:XCJQEf3W6eZaVwhRBof6ImoYGJSITeKWsyeh3HFu/5o=
|
|||
go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||
go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM=
|
||||
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||
go.uber.org/zap v1.14.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM=
|
||||
go.uber.org/zap v1.14.1 h1:nYDKopTbvAPq/NrUVZwT15y2lpROBiLLyoRTbXOYWOo=
|
||||
go.uber.org/zap v1.14.1/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc=
|
||||
golang.org/x/crypto v0.0.0-20180505025534-4ec37c66abab/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
|
@ -642,13 +690,15 @@ golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9 h1:mKdxBk7AujPs8kU4m80U72
|
|||
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529 h1:iMGN4xG0cnqj3t+zOM8wUB0BiPKHEwSxEZCvzcbZuvk=
|
||||
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5 h1:58fnuSXlxZmFdJyvtTFVmVhcMLU6v5fEb/ok4wyqtNU=
|
||||
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37 h1:cg5LA/zNPRzIXIWSCxQW10Rvpy94aQh3LT/ShoCpkHw=
|
||||
golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4 h1:c2HOrn5iMezYjSlGPncknSEr/8x5LELb/ilJbXi9DEA=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
|
@ -662,6 +712,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0
|
|||
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd h1:zkO/Lhoka23X63N9OSzpSeROEUQ5ODw47tM3YWjygbs=
|
||||
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
|
||||
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6 h1:QE6XYQK6naiK1EPAe1g/ILLxN5RBoH5xkJk3CqlMI/Y=
|
||||
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
|
||||
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
||||
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
|
@ -683,8 +735,11 @@ golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKG
|
|||
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
|
||||
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee h1:WG0RUwxtNT4qqaXX3DPA8zHFNm/D9xaBpxzHt1WcA/E=
|
||||
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/mod v0.2.0 h1:KU7oHjnv3XNWfa5COkzUifxZmxp1TyI7ImMXqFxLwvQ=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
|
@ -712,6 +767,8 @@ golang.org/x/net v0.0.0-20200202094626-16171245cfb2 h1:CCH4IOTTfewWjGOlSp+zGcjut
|
|||
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b h1:0mm1VjtFUOIlE1SbDlwjYaDxZVDP2S5ou6y0gSgXHu8=
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200625001655-4c5254603344 h1:vGXIOMxbNfDTk/aXCmfdLgkrSV+Z2tcbze+pEc3v5W4=
|
||||
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0=
|
||||
|
@ -729,6 +786,8 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEha
|
|||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e h1:vcxGaoTs7kV8m5Np9uUNQin4BrLOthgV7252N8V+FwY=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208 h1:qwRHBd0NqMbJxfbotnDhm2ByMI1Shq4Y6oRJo21SGJA=
|
||||
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
|
@ -758,6 +817,7 @@ golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0 h1:HyfiK1WMnHj5FXFXatD+Qs1A/
|
|||
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
|
@ -766,6 +826,8 @@ golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7w
|
|||
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4 h1:sfkvUWPNGwSV+8/fNqctR5lS2AqCSqYwXdrjCxp/dXo=
|
||||
golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd h1:xhmwyvizuTgC2qz7ZlMluP20uW+C3Rm0FD/WLDX8884=
|
||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
|
@ -820,6 +882,8 @@ golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56 h1:DFtSed2q3HtNuVazwVDZ4nS
|
|||
golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200304024140-c4206d458c3f h1:haxFuLhmPh0vRpVv5MeXoGyfCB39/Ohsq7A68h65qAg=
|
||||
golang.org/x/tools v0.0.0-20200304024140-c4206d458c3f/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
|
||||
golang.org/x/tools v0.0.0-20200721032237-77f530d86f9a h1:kVMPw4f6EVqYdfGQTedjrpw1dbE2PEMfw4jwXsNdn9s=
|
||||
golang.org/x/tools v0.0.0-20200721032237-77f530d86f9a/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898 h1:/atklqdjdhuosWIl6AIbOeHJjicWYPqR9bpxqxYG2pA=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
|
@ -858,8 +922,6 @@ google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRn
|
|||
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190716160619-c506a9f90610 h1:Ygq9/SRJX9+dU0WCIICM8RkWvDw03lvB77hrhJnpxfU=
|
||||
google.golang.org/genproto v0.0.0-20190716160619-c506a9f90610/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64 h1:iKtrH9Y8mcbADOP0YFaEMth7OfuHY9xHOwNj4znpM1A=
|
||||
google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
|
@ -934,6 +996,7 @@ honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWh
|
|||
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
||||
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||
honnef.co/go/tools v0.0.1-2020.1.4 h1:UoveltGrhghAA7ePc+e+QYDHXrBps2PqFZiHkGR/xK8=
|
||||
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||
istio.io/api v0.0.0-20190515205759-982e5c3888c6/go.mod h1:hhLFQmpHia8zgaM37vb2ml9iS5NfNfqZGRt1pS9aVEo=
|
||||
|
|
|
@ -18,7 +18,6 @@ import (
|
|||
"github.com/influxdata/flux/ast"
|
||||
"github.com/influxdata/flux/csv"
|
||||
"github.com/influxdata/flux/lang"
|
||||
"github.com/influxdata/flux/repl"
|
||||
"github.com/influxdata/influxdb/v2"
|
||||
"github.com/influxdata/influxdb/v2/jsonweb"
|
||||
"github.com/influxdata/influxdb/v2/query"
|
||||
|
@ -32,11 +31,10 @@ type QueryRequest struct {
|
|||
Query string `json:"query"`
|
||||
|
||||
// Flux fields
|
||||
Extern *ast.File `json:"extern,omitempty"`
|
||||
Spec *flux.Spec `json:"spec,omitempty"`
|
||||
AST *ast.Package `json:"ast,omitempty"`
|
||||
Dialect QueryDialect `json:"dialect"`
|
||||
Now time.Time `json:"now"`
|
||||
Extern json.RawMessage `json:"extern,omitempty"`
|
||||
AST json.RawMessage `json:"ast,omitempty"`
|
||||
Dialect QueryDialect `json:"dialect"`
|
||||
Now time.Time `json:"now"`
|
||||
|
||||
// InfluxQL fields
|
||||
Bucket string `json:"bucket,omitempty"`
|
||||
|
@ -271,19 +269,13 @@ func (r QueryRequest) proxyRequest(now func() time.Time) (*query.ProxyRequest, e
|
|||
Query: r.Query,
|
||||
}
|
||||
}
|
||||
} else if r.AST != nil {
|
||||
} else if len(r.AST) > 0 {
|
||||
c := lang.ASTCompiler{
|
||||
AST: r.AST,
|
||||
Now: n,
|
||||
}
|
||||
if r.Extern != nil {
|
||||
c.PrependFile(r.Extern)
|
||||
Extern: r.Extern,
|
||||
AST: r.AST,
|
||||
Now: n,
|
||||
}
|
||||
compiler = c
|
||||
} else if r.Spec != nil {
|
||||
compiler = repl.Compiler{
|
||||
Spec: r.Spec,
|
||||
}
|
||||
}
|
||||
|
||||
delimiter, _ := utf8.DecodeRuneInString(r.Dialect.Delimiter)
|
||||
|
|
|
@ -245,7 +245,7 @@ func TestFluxHandler_postFluxAST(t *testing.T) {
|
|||
name: "get ast from()",
|
||||
w: httptest.NewRecorder(),
|
||||
r: httptest.NewRequest("POST", "/api/v2/query/ast", bytes.NewBufferString(`{"query": "from()"}`)),
|
||||
want: `{"ast":{"type":"Package","package":"main","files":[{"type":"File","location":{"start":{"line":1,"column":1},"end":{"line":1,"column":7},"source":"from()"},"metadata":"parser-type=go","package":null,"imports":null,"body":[{"type":"ExpressionStatement","location":{"start":{"line":1,"column":1},"end":{"line":1,"column":7},"source":"from()"},"expression":{"type":"CallExpression","location":{"start":{"line":1,"column":1},"end":{"line":1,"column":7},"source":"from()"},"callee":{"type":"Identifier","location":{"start":{"line":1,"column":1},"end":{"line":1,"column":5},"source":"from"},"name":"from"}}}]}]}}
|
||||
want: `{"ast":{"type":"Package","package":"main","files":[{"type":"File","location":{"start":{"line":1,"column":1},"end":{"line":1,"column":7},"source":"from()"},"metadata":"parser-type=rust","package":null,"imports":null,"body":[{"type":"ExpressionStatement","location":{"start":{"line":1,"column":1},"end":{"line":1,"column":7},"source":"from()"},"expression":{"type":"CallExpression","location":{"start":{"line":1,"column":1},"end":{"line":1,"column":7},"source":"from()"},"callee":{"type":"Identifier","location":{"start":{"line":1,"column":1},"end":{"line":1,"column":5},"source":"from"},"name":"from"}}}]}]}}
|
||||
`,
|
||||
status: http.StatusOK,
|
||||
},
|
||||
|
|
|
@ -3,6 +3,7 @@ package http
|
|||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"reflect"
|
||||
|
@ -33,7 +34,7 @@ var cmpOptions = cmp.Options{
|
|||
func TestQueryRequest_WithDefaults(t *testing.T) {
|
||||
type fields struct {
|
||||
Spec *flux.Spec
|
||||
AST *ast.Package
|
||||
AST json.RawMessage
|
||||
Query string
|
||||
Type string
|
||||
Dialect QueryDialect
|
||||
|
@ -59,7 +60,6 @@ func TestQueryRequest_WithDefaults(t *testing.T) {
|
|||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
r := QueryRequest{
|
||||
Spec: tt.fields.Spec,
|
||||
AST: tt.fields.AST,
|
||||
Query: tt.fields.Query,
|
||||
Type: tt.fields.Type,
|
||||
|
@ -75,9 +75,8 @@ func TestQueryRequest_WithDefaults(t *testing.T) {
|
|||
|
||||
func TestQueryRequest_Validate(t *testing.T) {
|
||||
type fields struct {
|
||||
Extern *ast.File
|
||||
Spec *flux.Spec
|
||||
AST *ast.Package
|
||||
Extern json.RawMessage
|
||||
AST json.RawMessage
|
||||
Query string
|
||||
Type string
|
||||
Dialect QueryDialect
|
||||
|
@ -95,19 +94,6 @@ func TestQueryRequest_Validate(t *testing.T) {
|
|||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "query cannot have both extern and spec",
|
||||
fields: fields{
|
||||
Extern: &ast.File{},
|
||||
Spec: &flux.Spec{},
|
||||
Type: "flux",
|
||||
Dialect: QueryDialect{
|
||||
Delimiter: ",",
|
||||
DateTimeFormat: "RFC3339",
|
||||
},
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "requires flux type",
|
||||
fields: fields{
|
||||
|
@ -189,7 +175,6 @@ func TestQueryRequest_Validate(t *testing.T) {
|
|||
t.Run(tt.name, func(t *testing.T) {
|
||||
r := QueryRequest{
|
||||
Extern: tt.fields.Extern,
|
||||
Spec: tt.fields.Spec,
|
||||
AST: tt.fields.AST,
|
||||
Query: tt.fields.Query,
|
||||
Type: tt.fields.Type,
|
||||
|
@ -205,9 +190,9 @@ func TestQueryRequest_Validate(t *testing.T) {
|
|||
|
||||
func TestQueryRequest_proxyRequest(t *testing.T) {
|
||||
type fields struct {
|
||||
Extern *ast.File
|
||||
Extern json.RawMessage
|
||||
Spec *flux.Spec
|
||||
AST *ast.Package
|
||||
AST json.RawMessage
|
||||
Query string
|
||||
Type string
|
||||
Dialect QueryDialect
|
||||
|
@ -258,7 +243,7 @@ func TestQueryRequest_proxyRequest(t *testing.T) {
|
|||
{
|
||||
name: "valid AST",
|
||||
fields: fields{
|
||||
AST: &ast.Package{},
|
||||
AST: mustMarshal(&ast.Package{}),
|
||||
Type: "flux",
|
||||
Dialect: QueryDialect{
|
||||
Delimiter: ",",
|
||||
|
@ -271,7 +256,7 @@ func TestQueryRequest_proxyRequest(t *testing.T) {
|
|||
want: &query.ProxyRequest{
|
||||
Request: query.Request{
|
||||
Compiler: lang.ASTCompiler{
|
||||
AST: &ast.Package{},
|
||||
AST: mustMarshal(&ast.Package{}),
|
||||
Now: time.Unix(1, 1),
|
||||
},
|
||||
},
|
||||
|
@ -286,7 +271,7 @@ func TestQueryRequest_proxyRequest(t *testing.T) {
|
|||
{
|
||||
name: "valid AST with calculated now",
|
||||
fields: fields{
|
||||
AST: &ast.Package{},
|
||||
AST: mustMarshal(&ast.Package{}),
|
||||
Type: "flux",
|
||||
Dialect: QueryDialect{
|
||||
Delimiter: ",",
|
||||
|
@ -298,7 +283,7 @@ func TestQueryRequest_proxyRequest(t *testing.T) {
|
|||
want: &query.ProxyRequest{
|
||||
Request: query.Request{
|
||||
Compiler: lang.ASTCompiler{
|
||||
AST: &ast.Package{},
|
||||
AST: mustMarshal(&ast.Package{}),
|
||||
Now: time.Unix(2, 2),
|
||||
},
|
||||
},
|
||||
|
@ -313,7 +298,7 @@ func TestQueryRequest_proxyRequest(t *testing.T) {
|
|||
{
|
||||
name: "valid AST with extern",
|
||||
fields: fields{
|
||||
Extern: &ast.File{
|
||||
Extern: mustMarshal(&ast.File{
|
||||
Body: []ast.Statement{
|
||||
&ast.OptionStatement{
|
||||
Assignment: &ast.VariableAssignment{
|
||||
|
@ -322,8 +307,8 @@ func TestQueryRequest_proxyRequest(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
AST: &ast.Package{},
|
||||
}),
|
||||
AST: mustMarshal(&ast.Package{}),
|
||||
Type: "flux",
|
||||
Dialect: QueryDialect{
|
||||
Delimiter: ",",
|
||||
|
@ -335,20 +320,17 @@ func TestQueryRequest_proxyRequest(t *testing.T) {
|
|||
want: &query.ProxyRequest{
|
||||
Request: query.Request{
|
||||
Compiler: lang.ASTCompiler{
|
||||
AST: &ast.Package{
|
||||
Files: []*ast.File{
|
||||
{
|
||||
Body: []ast.Statement{
|
||||
&ast.OptionStatement{
|
||||
Assignment: &ast.VariableAssignment{
|
||||
ID: &ast.Identifier{Name: "x"},
|
||||
Init: &ast.IntegerLiteral{Value: 0},
|
||||
},
|
||||
},
|
||||
Extern: mustMarshal(&ast.File{
|
||||
Body: []ast.Statement{
|
||||
&ast.OptionStatement{
|
||||
Assignment: &ast.VariableAssignment{
|
||||
ID: &ast.Identifier{Name: "x"},
|
||||
Init: &ast.IntegerLiteral{Value: 0},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}),
|
||||
AST: mustMarshal(&ast.Package{}),
|
||||
Now: time.Unix(1, 1),
|
||||
},
|
||||
},
|
||||
|
@ -365,7 +347,6 @@ func TestQueryRequest_proxyRequest(t *testing.T) {
|
|||
t.Run(tt.name, func(t *testing.T) {
|
||||
r := QueryRequest{
|
||||
Extern: tt.fields.Extern,
|
||||
Spec: tt.fields.Spec,
|
||||
AST: tt.fields.AST,
|
||||
Query: tt.fields.Query,
|
||||
Type: tt.fields.Type,
|
||||
|
@ -385,6 +366,14 @@ func TestQueryRequest_proxyRequest(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func mustMarshal(p ast.Node) []byte {
|
||||
bs, err := json.Marshal(p)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return bs
|
||||
}
|
||||
|
||||
func Test_decodeQueryRequest(t *testing.T) {
|
||||
type args struct {
|
||||
ctx context.Context
|
||||
|
@ -481,6 +470,25 @@ func Test_decodeQueryRequest(t *testing.T) {
|
|||
}
|
||||
|
||||
func Test_decodeProxyQueryRequest(t *testing.T) {
|
||||
externJSON := `{
|
||||
"type": "File",
|
||||
"body": [
|
||||
{
|
||||
"type": "OptionStatement",
|
||||
"assignment": {
|
||||
"type": "VariableAssignment",
|
||||
"id": {
|
||||
"type": "Identifier",
|
||||
"name": "x"
|
||||
},
|
||||
"init": {
|
||||
"type": "IntegerLiteral",
|
||||
"value": "0"
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}`
|
||||
type args struct {
|
||||
ctx context.Context
|
||||
r *http.Request
|
||||
|
@ -525,25 +533,7 @@ func Test_decodeProxyQueryRequest(t *testing.T) {
|
|||
args: args{
|
||||
r: httptest.NewRequest("POST", "/", bytes.NewBufferString(`
|
||||
{
|
||||
"extern": {
|
||||
"type": "File",
|
||||
"body": [
|
||||
{
|
||||
"type": "OptionStatement",
|
||||
"assignment": {
|
||||
"type": "VariableAssignment",
|
||||
"id": {
|
||||
"type": "Identifier",
|
||||
"name": "x"
|
||||
},
|
||||
"init": {
|
||||
"type": "IntegerLiteral",
|
||||
"value": "0"
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
"extern": `+externJSON+`,
|
||||
"query": "from(bucket: \"mybucket\")"
|
||||
}
|
||||
`)),
|
||||
|
@ -559,17 +549,8 @@ func Test_decodeProxyQueryRequest(t *testing.T) {
|
|||
Request: query.Request{
|
||||
OrganizationID: func() platform.ID { s, _ := platform.IDFromString("deadbeefdeadbeef"); return *s }(),
|
||||
Compiler: lang.FluxCompiler{
|
||||
Extern: &ast.File{
|
||||
Body: []ast.Statement{
|
||||
&ast.OptionStatement{
|
||||
Assignment: &ast.VariableAssignment{
|
||||
ID: &ast.Identifier{Name: "x"},
|
||||
Init: &ast.IntegerLiteral{Value: 0},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Query: `from(bucket: "mybucket")`,
|
||||
Extern: []byte(externJSON),
|
||||
Query: `from(bucket: "mybucket")`,
|
||||
},
|
||||
},
|
||||
Dialect: &csv.Dialect{
|
||||
|
@ -629,3 +610,59 @@ func Test_decodeProxyQueryRequest(t *testing.T) {
|
|||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestProxyRequestToQueryRequest_Compilers(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
pr query.ProxyRequest
|
||||
want QueryRequest
|
||||
}{
|
||||
{
|
||||
name: "flux compiler copied",
|
||||
pr: query.ProxyRequest{
|
||||
Dialect: &query.NoContentDialect{},
|
||||
Request: query.Request{
|
||||
Compiler: lang.FluxCompiler{
|
||||
Query: `howdy`,
|
||||
Now: time.Unix(45, 45),
|
||||
},
|
||||
},
|
||||
},
|
||||
want: QueryRequest{
|
||||
Type: "flux",
|
||||
Query: `howdy`,
|
||||
PreferNoContent: true,
|
||||
Now: time.Unix(45, 45),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "AST compiler copied",
|
||||
pr: query.ProxyRequest{
|
||||
Dialect: &query.NoContentDialect{},
|
||||
Request: query.Request{
|
||||
Compiler: lang.ASTCompiler{
|
||||
Now: time.Unix(45, 45),
|
||||
AST: mustMarshal(&ast.Package{}),
|
||||
},
|
||||
},
|
||||
},
|
||||
want: QueryRequest{
|
||||
Type: "flux",
|
||||
PreferNoContent: true,
|
||||
AST: mustMarshal(&ast.Package{}),
|
||||
Now: time.Unix(45, 45),
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
|
||||
got, err := QueryRequestFromProxyRequest(&tt.pr)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
} else if !reflect.DeepEqual(*got, tt.want) {
|
||||
t.Errorf("QueryRequestFromProxyRequest = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -142,6 +142,20 @@ func MemoryOptimizedSchemaMutation() BoolFlag {
|
|||
return memoryOptimizedSchemaMutation
|
||||
}
|
||||
|
||||
var queryTracing = MakeBoolFlag(
|
||||
"Query Tracing",
|
||||
"queryTracing",
|
||||
"Query Team",
|
||||
false,
|
||||
Permanent,
|
||||
false,
|
||||
)
|
||||
|
||||
// QueryTracing - Turn on query tracing for queries that are sampled
|
||||
func QueryTracing() BoolFlag {
|
||||
return queryTracing
|
||||
}
|
||||
|
||||
var simpleTaskOptionsExtraction = MakeBoolFlag(
|
||||
"Simple Task Options Extraction",
|
||||
"simpleTaskOptionsExtraction",
|
||||
|
@ -240,6 +254,20 @@ func OrgOnlyMemberList() BoolFlag {
|
|||
return orgOnlyMemberList
|
||||
}
|
||||
|
||||
var injectLatestSuccessTime = MakeBoolFlag(
|
||||
"Inject Latest Success Time",
|
||||
"injectLatestSuccessTime",
|
||||
"Compute Team",
|
||||
false,
|
||||
Temporary,
|
||||
false,
|
||||
)
|
||||
|
||||
// InjectLatestSuccessTime - Inject the latest successful task run timestamp into a Task query extern when executing.
|
||||
func InjectLatestSuccessTime() BoolFlag {
|
||||
return injectLatestSuccessTime
|
||||
}
|
||||
|
||||
var enforceOrgDashboardLimits = MakeBoolFlag(
|
||||
"Enforce Organization Dashboard Limits",
|
||||
"enforceOrgDashboardLimits",
|
||||
|
@ -265,6 +293,7 @@ var all = []Flag{
|
|||
newLabels,
|
||||
memoryOptimizedFill,
|
||||
memoryOptimizedSchemaMutation,
|
||||
queryTracing,
|
||||
simpleTaskOptionsExtraction,
|
||||
mergeFiltersRule,
|
||||
bandPlotType,
|
||||
|
@ -272,6 +301,7 @@ var all = []Flag{
|
|||
notebooks,
|
||||
pushDownGroupAggregateMinMax,
|
||||
orgOnlyMemberList,
|
||||
injectLatestSuccessTime,
|
||||
enforceOrgDashboardLimits,
|
||||
}
|
||||
|
||||
|
@ -286,6 +316,7 @@ var byKey = map[string]Flag{
|
|||
"newLabels": newLabels,
|
||||
"memoryOptimizedFill": memoryOptimizedFill,
|
||||
"memoryOptimizedSchemaMutation": memoryOptimizedSchemaMutation,
|
||||
"queryTracing": queryTracing,
|
||||
"simpleTaskOptionsExtraction": simpleTaskOptionsExtraction,
|
||||
"mergeFiltersRule": mergeFiltersRule,
|
||||
"bandPlotType": bandPlotType,
|
||||
|
@ -293,5 +324,6 @@ var byKey = map[string]Flag{
|
|||
"notebooks": notebooks,
|
||||
"pushDownGroupAggregateMinMax": pushDownGroupAggregateMinMax,
|
||||
"orgOnlyMemberList": orgOnlyMemberList,
|
||||
"injectLatestSuccessTime": injectLatestSuccessTime,
|
||||
"enforceOrgDashboardLimits": enforceOrgDashboardLimits,
|
||||
}
|
||||
|
|
|
@ -1,5 +0,0 @@
|
|||
# Flux master packages
|
||||
|
||||
This package tree is temporary copied from Flux master to keep unit tests which depend on newer
|
||||
versions of Flux. Once Flux has been updated, this package should be removed and any clients of
|
||||
this package referred to the official Flux package.
|
|
@ -1,130 +0,0 @@
|
|||
package edit
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/influxdata/flux/ast"
|
||||
)
|
||||
|
||||
// `OptionFn` is a function that, provided with an `OptionStatement`, returns
|
||||
// an `Expression` or an error. It is used by `Option` functions to edit
|
||||
// AST's options statements.
|
||||
type OptionFn func(opt *ast.OptionStatement) (ast.Expression, error)
|
||||
|
||||
// `Option` passes the `OptionStatement` in the AST rooted at `node` that has the
|
||||
// specified identifier to `fn`.
|
||||
// The function can have side effects on the option statement
|
||||
// and/or return a non-nil `Expression` that is set as value for the option.
|
||||
// If the value returned by the edit function is `nil` (or an error is returned) no new value is set
|
||||
// for the option statement (but any, maybe partial, side effect is applied).
|
||||
// `Option` returns whether it could find and edit the option (possibly with errors) or not.
|
||||
func Option(node ast.Node, optionIdentifier string, fn OptionFn) (bool, error) {
|
||||
oe := &optionEditor{identifier: optionIdentifier, optionFn: fn, err: nil}
|
||||
ast.Walk(oe, node)
|
||||
|
||||
if oe.err != nil {
|
||||
return oe.found, oe.err
|
||||
}
|
||||
|
||||
return oe.found, nil
|
||||
}
|
||||
|
||||
// Creates an `OptionFn` for setting the value of an `OptionStatement`.
|
||||
func OptionValueFn(expr ast.Expression) OptionFn {
|
||||
return func(opt *ast.OptionStatement) (ast.Expression, error) {
|
||||
return expr, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Creates an `OptionFn` for updating the values of an `OptionStatement` that has an
|
||||
// `ObjectExpression` as value. Returns error if the child of the option statement is not
|
||||
// an object expression. If some key is not a property of the object it is added.
|
||||
func OptionObjectFn(keyMap map[string]ast.Expression) OptionFn {
|
||||
return func(opt *ast.OptionStatement) (ast.Expression, error) {
|
||||
a, ok := opt.Assignment.(*ast.VariableAssignment)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("option assignment must be variable assignment")
|
||||
}
|
||||
obj, ok := a.Init.(*ast.ObjectExpression)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("value is %s, not an object expression", a.Init.Type())
|
||||
}
|
||||
|
||||
// check that every specified property exists in the object
|
||||
found := make(map[string]bool, len(obj.Properties))
|
||||
for _, p := range obj.Properties {
|
||||
found[p.Key.Key()] = true
|
||||
}
|
||||
|
||||
for k := range keyMap {
|
||||
if !found[k] {
|
||||
obj.Properties = append(obj.Properties, &ast.Property{
|
||||
Key: &ast.Identifier{Name: k},
|
||||
Value: keyMap[k],
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
for _, p := range obj.Properties {
|
||||
exp, found := keyMap[p.Key.Key()]
|
||||
if found {
|
||||
p.Value = exp
|
||||
}
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
|
||||
//Finds the `OptionStatement` with the specified `identifier` and updates its value.
|
||||
//There shouldn't be more then one option statement with the same identifier
|
||||
//in a valid query.
|
||||
type optionEditor struct {
|
||||
identifier string
|
||||
optionFn OptionFn
|
||||
err error
|
||||
found bool
|
||||
}
|
||||
|
||||
func (v *optionEditor) Visit(node ast.Node) ast.Visitor {
|
||||
if os, ok := node.(*ast.OptionStatement); ok {
|
||||
switch a := os.Assignment.(type) {
|
||||
case *ast.VariableAssignment:
|
||||
if a.ID.Name == v.identifier {
|
||||
v.found = true
|
||||
|
||||
newInit, err := v.optionFn(os)
|
||||
|
||||
if err != nil {
|
||||
v.err = err
|
||||
} else if newInit != nil {
|
||||
a.Init = newInit
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
case *ast.MemberAssignment:
|
||||
id, ok := a.Member.Object.(*ast.Identifier)
|
||||
if ok {
|
||||
name := id.Name + "." + a.Member.Property.Key()
|
||||
if name == v.identifier {
|
||||
v.found = true
|
||||
|
||||
newInit, err := v.optionFn(os)
|
||||
|
||||
if err != nil {
|
||||
v.err = err
|
||||
} else if newInit != nil {
|
||||
a.Init = newInit
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return v
|
||||
}
|
||||
|
||||
func (v *optionEditor) Done(node ast.Node) {}
|
|
@ -1,109 +0,0 @@
|
|||
package edit
|
||||
|
||||
import (
|
||||
"github.com/influxdata/flux"
|
||||
"github.com/influxdata/flux/ast"
|
||||
"github.com/influxdata/flux/codes"
|
||||
)
|
||||
|
||||
// GetOption finds and returns the init for the option's variable assignment
|
||||
func GetOption(file *ast.File, name string) (ast.Expression, error) {
|
||||
for _, st := range file.Body {
|
||||
if val, ok := st.(*ast.OptionStatement); ok {
|
||||
assign := val.Assignment
|
||||
if va, ok := assign.(*ast.VariableAssignment); ok {
|
||||
if va.ID.Name == name {
|
||||
if ok {
|
||||
return va.Init, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil, &flux.Error{
|
||||
Code: codes.Internal,
|
||||
Msg: "Option not found",
|
||||
}
|
||||
}
|
||||
|
||||
// SetOption replaces an existing option's init with the provided init or adds
|
||||
// the option if it doesn't exist. The file AST is mutated in place.
|
||||
func SetOption(file *ast.File, name string, expr ast.Expression) {
|
||||
// check for the correct file
|
||||
for _, st := range file.Body {
|
||||
if val, ok := st.(*ast.OptionStatement); ok {
|
||||
assign := val.Assignment
|
||||
if va, ok := assign.(*ast.VariableAssignment); ok {
|
||||
if va.ID.Name == name {
|
||||
// replace the variable assignment's init
|
||||
va.Init = expr
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// option was not found. prepend new option to body
|
||||
file.Body = append([]ast.Statement{&ast.OptionStatement{
|
||||
Assignment: &ast.VariableAssignment{
|
||||
ID: &ast.Identifier{Name: name},
|
||||
Init: expr,
|
||||
},
|
||||
}}, file.Body...)
|
||||
}
|
||||
|
||||
// DeleteOption removes an option if it exists. The file AST is mutated in place.
|
||||
func DeleteOption(file *ast.File, name string) {
|
||||
for i, st := range file.Body {
|
||||
if val, ok := st.(*ast.OptionStatement); ok {
|
||||
assign := val.Assignment
|
||||
if va, ok := assign.(*ast.VariableAssignment); ok {
|
||||
if va.ID.Name == name {
|
||||
file.Body = append(file.Body[:i], file.Body[i+1:]...)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// GetProperty finds and returns the AST node for the property value.
|
||||
func GetProperty(obj *ast.ObjectExpression, key string) (ast.Expression, error) {
|
||||
for _, prop := range obj.Properties {
|
||||
if key == prop.Key.Key() {
|
||||
return prop.Value, nil
|
||||
}
|
||||
}
|
||||
return nil, &flux.Error{
|
||||
Code: codes.Internal,
|
||||
Msg: "Property not found",
|
||||
}
|
||||
}
|
||||
|
||||
// SetProperty replaces an existing property definition with the provided object expression or adds
|
||||
// the property if it doesn't exist. The object expression AST is mutated in place.
|
||||
func SetProperty(obj *ast.ObjectExpression, key string, value ast.Expression) {
|
||||
for _, prop := range obj.Properties {
|
||||
if key == prop.Key.Key() {
|
||||
prop.Value = value
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
obj.Properties = append(obj.Properties, &ast.Property{
|
||||
BaseNode: obj.BaseNode,
|
||||
Key: &ast.Identifier{Name: key},
|
||||
Value: value,
|
||||
})
|
||||
}
|
||||
|
||||
// DeleteProperty removes a property from the object expression if it exists.
|
||||
// The object expression AST is mutated in place.
|
||||
func DeleteProperty(obj *ast.ObjectExpression, key string) {
|
||||
for i, prop := range obj.Properties {
|
||||
if key == prop.Key.Key() {
|
||||
obj.Properties = append(obj.Properties[:i], obj.Properties[i+1:]...)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,52 +0,0 @@
|
|||
package ast
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/flux/ast"
|
||||
)
|
||||
|
||||
func IntegerLiteralFromValue(v int64) *ast.IntegerLiteral {
|
||||
return &ast.IntegerLiteral{Value: v}
|
||||
}
|
||||
func UnsignedIntegerLiteralFromValue(v uint64) *ast.UnsignedIntegerLiteral {
|
||||
return &ast.UnsignedIntegerLiteral{Value: v}
|
||||
}
|
||||
func FloatLiteralFromValue(v float64) *ast.FloatLiteral {
|
||||
return &ast.FloatLiteral{Value: v}
|
||||
}
|
||||
func StringLiteralFromValue(v string) *ast.StringLiteral {
|
||||
return &ast.StringLiteral{Value: v}
|
||||
}
|
||||
func BooleanLiteralFromValue(v bool) *ast.BooleanLiteral {
|
||||
return &ast.BooleanLiteral{Value: v}
|
||||
}
|
||||
func DateTimeLiteralFromValue(v time.Time) *ast.DateTimeLiteral {
|
||||
return &ast.DateTimeLiteral{Value: v}
|
||||
}
|
||||
func RegexpLiteralFromValue(v *regexp.Regexp) *ast.RegexpLiteral {
|
||||
return &ast.RegexpLiteral{Value: v}
|
||||
}
|
||||
|
||||
func IntegerFromLiteral(lit *ast.IntegerLiteral) int64 {
|
||||
return lit.Value
|
||||
}
|
||||
func UnsignedIntegerFromLiteral(lit *ast.UnsignedIntegerLiteral) uint64 {
|
||||
return lit.Value
|
||||
}
|
||||
func FloatFromLiteral(lit *ast.FloatLiteral) float64 {
|
||||
return lit.Value
|
||||
}
|
||||
func StringFromLiteral(lit *ast.StringLiteral) string {
|
||||
return lit.Value
|
||||
}
|
||||
func BooleanFromLiteral(lit *ast.BooleanLiteral) bool {
|
||||
return lit.Value
|
||||
}
|
||||
func DateTimeFromLiteral(lit *ast.DateTimeLiteral) time.Time {
|
||||
return lit.Value
|
||||
}
|
||||
func RegexpFromLiteral(lit *ast.RegexpLiteral) *regexp.Regexp {
|
||||
return lit.Value
|
||||
}
|
|
@ -1,138 +0,0 @@
|
|||
package table
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/andreyvit/diff"
|
||||
"github.com/influxdata/flux"
|
||||
)
|
||||
|
||||
// Diff will perform a diff between two table iterators.
|
||||
// This will sort the tables within the table iterators and produce
|
||||
// a diff of the full output.
|
||||
func Diff(want, got flux.TableIterator, opts ...DiffOption) string {
|
||||
if want == nil {
|
||||
want = Iterator{}
|
||||
}
|
||||
|
||||
var wantS string
|
||||
if wantT, err := Sort(want); err != nil {
|
||||
wantS = fmt.Sprintf("table error: %s\n", err)
|
||||
} else {
|
||||
var sb strings.Builder
|
||||
if err := wantT.Do(func(table flux.Table) error {
|
||||
sb.WriteString(Stringify(table))
|
||||
return nil
|
||||
}); err != nil {
|
||||
_, _ = fmt.Fprintf(&sb, "table error: %s\n", err)
|
||||
}
|
||||
wantS = sb.String()
|
||||
}
|
||||
|
||||
if got == nil {
|
||||
got = Iterator{}
|
||||
}
|
||||
|
||||
var gotS string
|
||||
if gotT, err := Sort(got); err != nil {
|
||||
gotS = fmt.Sprintf("table error: %s\n", err)
|
||||
} else {
|
||||
var sb strings.Builder
|
||||
if err := gotT.Do(func(table flux.Table) error {
|
||||
sb.WriteString(Stringify(table))
|
||||
return nil
|
||||
}); err != nil {
|
||||
_, _ = fmt.Fprintf(&sb, "table error: %s\n", err)
|
||||
}
|
||||
gotS = sb.String()
|
||||
}
|
||||
|
||||
differ := newDiffer(opts...)
|
||||
return differ.diff(wantS, gotS)
|
||||
}
|
||||
|
||||
type differ struct {
|
||||
ctx *[2]int
|
||||
}
|
||||
|
||||
func newDiffer(opts ...DiffOption) (d differ) {
|
||||
for _, opt := range diffDefaultOptions {
|
||||
opt.apply(&d)
|
||||
}
|
||||
for _, opt := range opts {
|
||||
opt.apply(&d)
|
||||
}
|
||||
return d
|
||||
}
|
||||
|
||||
func (d differ) diff(want, got string) string {
|
||||
lines := diff.LineDiffAsLines(want, got)
|
||||
if d.ctx == nil {
|
||||
return strings.Join(lines, "\n")
|
||||
}
|
||||
|
||||
difflines := make([]string, 0, len(lines))
|
||||
OUTER:
|
||||
for {
|
||||
for i := 0; i < len(lines); i++ {
|
||||
if lines[i][0] == ' ' {
|
||||
continue
|
||||
}
|
||||
|
||||
// This is the start of a diff section. Store this location.
|
||||
start := i - (*d.ctx)[0]
|
||||
if start < 0 {
|
||||
start = 0
|
||||
}
|
||||
|
||||
// Find the end of this section.
|
||||
for ; i < len(lines); i++ {
|
||||
if lines[i][0] == ' ' {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Look n points in the future and, if they are
|
||||
// not part of a diff or don't overrun the number
|
||||
// of lines, include them.
|
||||
stop := i
|
||||
|
||||
for n := (*d.ctx)[1]; n > 0; n-- {
|
||||
if stop+1 >= len(lines) || lines[stop+1][0] != ' ' {
|
||||
break
|
||||
}
|
||||
stop++
|
||||
}
|
||||
|
||||
difflines = append(difflines, lines[start:stop]...)
|
||||
lines = lines[stop:]
|
||||
continue OUTER
|
||||
}
|
||||
return strings.Join(difflines, "\n")
|
||||
}
|
||||
}
|
||||
|
||||
type DiffOption interface {
|
||||
apply(*differ)
|
||||
}
|
||||
|
||||
type diffOptionFn func(d *differ)
|
||||
|
||||
func (opt diffOptionFn) apply(d *differ) {
|
||||
opt(d)
|
||||
}
|
||||
|
||||
var diffDefaultOptions = []DiffOption{
|
||||
DiffContext(3),
|
||||
}
|
||||
|
||||
func DiffContext(n int) DiffOption {
|
||||
return diffOptionFn(func(d *differ) {
|
||||
if n < 0 {
|
||||
d.ctx = nil
|
||||
}
|
||||
ctx := [2]int{n, n}
|
||||
d.ctx = &ctx
|
||||
})
|
||||
}
|
|
@ -1,14 +0,0 @@
|
|||
package table
|
||||
|
||||
import "github.com/influxdata/flux"
|
||||
|
||||
type Iterator []flux.Table
|
||||
|
||||
func (t Iterator) Do(f func(flux.Table) error) error {
|
||||
for _, tbl := range t {
|
||||
if err := f(tbl); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -1,32 +0,0 @@
|
|||
package table
|
||||
|
||||
import (
|
||||
"github.com/influxdata/flux"
|
||||
"github.com/influxdata/flux/execute"
|
||||
)
|
||||
|
||||
// Sort will read a TableIterator and produce another TableIterator
|
||||
// where the keys are sorted.
|
||||
//
|
||||
// This method will buffer all of the data since it needs to ensure
|
||||
// all of the tables are read to avoid any deadlocks. Be careful
|
||||
// using this method in performance sensitive areas.
|
||||
func Sort(tables flux.TableIterator) (flux.TableIterator, error) {
|
||||
groups := execute.NewGroupLookup()
|
||||
if err := tables.Do(func(table flux.Table) error {
|
||||
buffered, err := execute.CopyTable(table)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
groups.Set(buffered.Key(), buffered)
|
||||
return nil
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var buffered []flux.Table
|
||||
groups.Range(func(_ flux.GroupKey, value interface{}) {
|
||||
buffered = append(buffered, value.(flux.Table))
|
||||
})
|
||||
return Iterator(buffered), nil
|
||||
}
|
|
@ -1,703 +0,0 @@
|
|||
// Package static provides utilities for easily constructing static
|
||||
// tables that are meant for tests.
|
||||
//
|
||||
// The primary type is Table which will be a mapping of columns to their data.
|
||||
// The data is defined in a columnar format instead of a row-based one.
|
||||
//
|
||||
// The implementations in this package are not performant and are not meant
|
||||
// to be used in production code. They are good enough for small datasets that
|
||||
// are present in tests to ensure code correctness.
|
||||
package static
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
stdarrow "github.com/apache/arrow/go/arrow"
|
||||
"github.com/apache/arrow/go/arrow/array"
|
||||
"github.com/influxdata/flux"
|
||||
"github.com/influxdata/flux/arrow"
|
||||
"github.com/influxdata/flux/codes"
|
||||
"github.com/influxdata/flux/execute"
|
||||
"github.com/influxdata/flux/memory"
|
||||
"github.com/influxdata/flux/values"
|
||||
"github.com/influxdata/influxdb/v2/pkg/flux/internal/errors"
|
||||
"github.com/influxdata/influxdb/v2/pkg/flux/internal/execute/table"
|
||||
)
|
||||
|
||||
// Table is a statically constructed table.
|
||||
// It is a mapping between column names and the column.
|
||||
//
|
||||
// This is not a performant section of code and it is primarily
|
||||
// meant to make writing unit tests easily. Do not use in
|
||||
// production code.
|
||||
//
|
||||
// The Table struct implements the TableIterator interface
|
||||
// and not the Table interface. To retrieve a flux.Table compatible
|
||||
// implementation, the Table() method can be used.
|
||||
type Table []Column
|
||||
|
||||
// Do will produce the Table and then invoke the function
|
||||
// on that flux.Table.
|
||||
//
|
||||
// If the produced Table is invalid, then this method
|
||||
// will panic.
|
||||
func (s Table) Do(f func(flux.Table) error) error {
|
||||
return f(s.Table())
|
||||
}
|
||||
|
||||
func (s Table) Build(template *[]Column) []flux.Table {
|
||||
t := make(Table, 0, len(*template)+len(s))
|
||||
t = append(t, *template...)
|
||||
t = append(t, s...)
|
||||
return []flux.Table{t.Table()}
|
||||
}
|
||||
|
||||
// Table will produce a flux.Table using the Column values
|
||||
// that are part of this Table.
|
||||
//
|
||||
// If the Table produces an invalid buffer, then this method
|
||||
// will panic.
|
||||
func (s Table) Table() flux.Table {
|
||||
if len(s) == 0 {
|
||||
panic(errors.New(codes.Internal, "static table has no columns"))
|
||||
}
|
||||
|
||||
key, cols := s.buildSchema()
|
||||
buffer := &arrow.TableBuffer{
|
||||
GroupKey: key,
|
||||
Columns: cols,
|
||||
}
|
||||
|
||||
// Determine the size by looking at the first non-key column.
|
||||
n := 0
|
||||
for _, c := range s {
|
||||
if c.IsKey() {
|
||||
continue
|
||||
}
|
||||
n = c.Len()
|
||||
break
|
||||
}
|
||||
|
||||
// Construct each of the buffers.
|
||||
buffer.Values = make([]array.Interface, len(buffer.Columns))
|
||||
for i, c := range s {
|
||||
buffer.Values[i] = c.Make(n)
|
||||
}
|
||||
|
||||
if err := buffer.Validate(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return table.FromBuffer(buffer)
|
||||
}
|
||||
|
||||
// buildSchema will construct the schema from the columns.
|
||||
func (s Table) buildSchema() (flux.GroupKey, []flux.ColMeta) {
|
||||
var (
|
||||
keyCols []flux.ColMeta
|
||||
keyVals []values.Value
|
||||
cols []flux.ColMeta
|
||||
)
|
||||
for _, c := range s {
|
||||
col := flux.ColMeta{Label: c.Label(), Type: c.Type()}
|
||||
if c.IsKey() {
|
||||
keyCols = append(keyCols, col)
|
||||
keyVals = append(keyVals, c.KeyValue())
|
||||
}
|
||||
cols = append(cols, col)
|
||||
}
|
||||
return execute.NewGroupKey(keyCols, keyVals), cols
|
||||
}
|
||||
|
||||
// Column is the definition for how to construct a column for the table.
|
||||
type Column interface {
|
||||
// Label returns the label associated with this column.
|
||||
Label() string
|
||||
|
||||
// Type returns the column type for this column.
|
||||
Type() flux.ColType
|
||||
|
||||
// Make will construct an array with the given length
|
||||
// if it is possible.
|
||||
Make(n int) array.Interface
|
||||
|
||||
// Len will return the length of this column.
|
||||
// If no length is known, this will return -1.
|
||||
Len() int
|
||||
|
||||
// IsKey will return true if this is part of the group key.
|
||||
IsKey() bool
|
||||
|
||||
// KeyValue will return the key value if this column is part
|
||||
// of the group key.
|
||||
KeyValue() values.Value
|
||||
|
||||
// TableBuilder allows this column to add itself to a template.
|
||||
TableBuilder
|
||||
}
|
||||
|
||||
// IntKey will construct a group key with the integer type.
|
||||
// The value can be an int, int64, or nil.
|
||||
func IntKey(k string, v interface{}) KeyColumn {
|
||||
if iv, ok := mustIntValue(v); ok {
|
||||
return KeyColumn{k: k, v: iv, t: flux.TInt}
|
||||
}
|
||||
return KeyColumn{k: k, t: flux.TInt}
|
||||
}
|
||||
|
||||
// UintKey will construct a group key with the unsigned type.
|
||||
// The value can be a uint, uint64, int, int64, or nil.
|
||||
func UintKey(k string, v interface{}) KeyColumn {
|
||||
if iv, ok := mustUintValue(v); ok {
|
||||
return KeyColumn{k: k, v: iv, t: flux.TUInt}
|
||||
}
|
||||
return KeyColumn{k: k, t: flux.TUInt}
|
||||
}
|
||||
|
||||
// FloatKey will construct a group key with the float type.
|
||||
// The value can be a float64, int, int64, or nil.
|
||||
func FloatKey(k string, v interface{}) KeyColumn {
|
||||
if iv, ok := mustFloatValue(v); ok {
|
||||
return KeyColumn{k: k, v: iv, t: flux.TFloat}
|
||||
}
|
||||
return KeyColumn{k: k, t: flux.TFloat}
|
||||
}
|
||||
|
||||
// StringKey will construct a group key with the string type.
|
||||
// The value can be a string or nil.
|
||||
func StringKey(k string, v interface{}) KeyColumn {
|
||||
if iv, ok := mustStringValue(v); ok {
|
||||
return KeyColumn{k: k, v: iv, t: flux.TString}
|
||||
}
|
||||
return KeyColumn{k: k, t: flux.TString}
|
||||
}
|
||||
|
||||
// BooleanKey will construct a group key with the boolean type.
|
||||
// The value can be a bool or nil.
|
||||
func BooleanKey(k string, v interface{}) KeyColumn {
|
||||
if iv, ok := mustBooleanValue(v); ok {
|
||||
return KeyColumn{k: k, v: iv, t: flux.TBool}
|
||||
}
|
||||
return KeyColumn{k: k, t: flux.TBool}
|
||||
}
|
||||
|
||||
// TimeKey will construct a group key with the given time using either a
|
||||
// string or an integer. If an integer is used, then it is in seconds.
|
||||
func TimeKey(k string, v interface{}) KeyColumn {
|
||||
if iv, _, ok := mustTimeValue(v, 0, time.Second); ok {
|
||||
return KeyColumn{k: k, v: execute.Time(iv), t: flux.TTime}
|
||||
}
|
||||
return KeyColumn{k: k, t: flux.TTime}
|
||||
}
|
||||
|
||||
type KeyColumn struct {
|
||||
k string
|
||||
v interface{}
|
||||
t flux.ColType
|
||||
}
|
||||
|
||||
func (s KeyColumn) Make(n int) array.Interface {
|
||||
return arrow.Repeat(s.KeyValue(), n, memory.DefaultAllocator)
|
||||
}
|
||||
|
||||
func (s KeyColumn) Label() string { return s.k }
|
||||
func (s KeyColumn) Type() flux.ColType { return s.t }
|
||||
func (s KeyColumn) Len() int { return -1 }
|
||||
func (s KeyColumn) IsKey() bool { return true }
|
||||
func (s KeyColumn) KeyValue() values.Value { return values.New(s.v) }
|
||||
|
||||
func (s KeyColumn) Build(template *[]Column) []flux.Table {
|
||||
*template = append(*template, s)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Ints will construct an array of integers.
|
||||
// Each value can be an int, int64, or nil.
|
||||
func Ints(k string, v ...interface{}) Column {
|
||||
c := intColumn{
|
||||
column: column{k: k},
|
||||
v: make([]int64, len(v)),
|
||||
}
|
||||
for i, iv := range v {
|
||||
val, ok := mustIntValue(iv)
|
||||
if !ok {
|
||||
if c.valid == nil {
|
||||
c.valid = make([]bool, len(v))
|
||||
for i := range c.valid {
|
||||
c.valid[i] = true
|
||||
}
|
||||
}
|
||||
c.valid[i] = false
|
||||
}
|
||||
c.v[i] = val
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
type column struct {
|
||||
k string
|
||||
valid []bool
|
||||
}
|
||||
|
||||
func (s column) Label() string { return s.k }
|
||||
func (s column) IsKey() bool { return false }
|
||||
|
||||
type intColumn struct {
|
||||
column
|
||||
v []int64
|
||||
}
|
||||
|
||||
func (s intColumn) Make(n int) array.Interface {
|
||||
b := array.NewInt64Builder(memory.DefaultAllocator)
|
||||
b.Resize(len(s.v))
|
||||
b.AppendValues(s.v, s.valid)
|
||||
return b.NewArray()
|
||||
}
|
||||
|
||||
func (s intColumn) Type() flux.ColType { return flux.TInt }
|
||||
func (s intColumn) Len() int { return len(s.v) }
|
||||
func (s intColumn) KeyValue() values.Value { return values.InvalidValue }
|
||||
|
||||
func (s intColumn) Build(template *[]Column) []flux.Table {
|
||||
*template = append(*template, s)
|
||||
return nil
|
||||
}
|
||||
|
||||
func mustIntValue(v interface{}) (int64, bool) {
|
||||
if v == nil {
|
||||
return 0, false
|
||||
}
|
||||
|
||||
switch v := v.(type) {
|
||||
case int:
|
||||
return int64(v), true
|
||||
case int64:
|
||||
return v, true
|
||||
default:
|
||||
panic(fmt.Sprintf("unable to convert type %T to an int value", v))
|
||||
}
|
||||
}
|
||||
|
||||
// Uints will construct an array of unsigned integers.
|
||||
// Each value can be a uint, uint64, int, int64, or nil.
|
||||
func Uints(k string, v ...interface{}) Column {
|
||||
c := uintColumn{
|
||||
column: column{k: k},
|
||||
v: make([]uint64, len(v)),
|
||||
}
|
||||
for i, iv := range v {
|
||||
val, ok := mustUintValue(iv)
|
||||
if !ok {
|
||||
if c.valid == nil {
|
||||
c.valid = make([]bool, len(v))
|
||||
for i := range c.valid {
|
||||
c.valid[i] = true
|
||||
}
|
||||
}
|
||||
c.valid[i] = false
|
||||
}
|
||||
c.v[i] = val
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
type uintColumn struct {
|
||||
column
|
||||
v []uint64
|
||||
}
|
||||
|
||||
func (s uintColumn) Make(n int) array.Interface {
|
||||
b := array.NewUint64Builder(memory.DefaultAllocator)
|
||||
b.Resize(len(s.v))
|
||||
b.AppendValues(s.v, s.valid)
|
||||
return b.NewArray()
|
||||
}
|
||||
|
||||
func (s uintColumn) Type() flux.ColType { return flux.TUInt }
|
||||
func (s uintColumn) Len() int { return len(s.v) }
|
||||
func (s uintColumn) KeyValue() values.Value { return values.InvalidValue }
|
||||
|
||||
func (s uintColumn) Build(template *[]Column) []flux.Table {
|
||||
*template = append(*template, s)
|
||||
return nil
|
||||
}
|
||||
|
||||
func mustUintValue(v interface{}) (uint64, bool) {
|
||||
if v == nil {
|
||||
return 0, false
|
||||
}
|
||||
|
||||
switch v := v.(type) {
|
||||
case int:
|
||||
return uint64(v), true
|
||||
case int64:
|
||||
return uint64(v), true
|
||||
case uint:
|
||||
return uint64(v), true
|
||||
case uint64:
|
||||
return v, true
|
||||
default:
|
||||
panic(fmt.Sprintf("unable to convert type %T to a uint value", v))
|
||||
}
|
||||
}
|
||||
|
||||
// Floats will construct an array of floats.
|
||||
// Each value can be a float64, int, int64, or nil.
|
||||
func Floats(k string, v ...interface{}) Column {
|
||||
c := floatColumn{
|
||||
column: column{k: k},
|
||||
v: make([]float64, len(v)),
|
||||
}
|
||||
for i, iv := range v {
|
||||
val, ok := mustFloatValue(iv)
|
||||
if !ok {
|
||||
if c.valid == nil {
|
||||
c.valid = make([]bool, len(v))
|
||||
for i := range c.valid {
|
||||
c.valid[i] = true
|
||||
}
|
||||
}
|
||||
c.valid[i] = false
|
||||
}
|
||||
c.v[i] = val
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
type floatColumn struct {
|
||||
column
|
||||
v []float64
|
||||
}
|
||||
|
||||
func (s floatColumn) Make(n int) array.Interface {
|
||||
b := array.NewFloat64Builder(memory.DefaultAllocator)
|
||||
b.Resize(len(s.v))
|
||||
b.AppendValues(s.v, s.valid)
|
||||
return b.NewArray()
|
||||
}
|
||||
|
||||
func (s floatColumn) Type() flux.ColType { return flux.TFloat }
|
||||
func (s floatColumn) Len() int { return len(s.v) }
|
||||
func (s floatColumn) KeyValue() values.Value { return values.InvalidValue }
|
||||
|
||||
func (s floatColumn) Build(template *[]Column) []flux.Table {
|
||||
*template = append(*template, s)
|
||||
return nil
|
||||
}
|
||||
|
||||
func mustFloatValue(v interface{}) (float64, bool) {
|
||||
if v == nil {
|
||||
return 0, false
|
||||
}
|
||||
|
||||
switch v := v.(type) {
|
||||
case int:
|
||||
return float64(v), true
|
||||
case int64:
|
||||
return float64(v), true
|
||||
case float64:
|
||||
return v, true
|
||||
default:
|
||||
panic(fmt.Sprintf("unable to convert type %T to a float value", v))
|
||||
}
|
||||
}
|
||||
|
||||
// Strings will construct an array of strings.
|
||||
// Each value can be a string or nil.
|
||||
func Strings(k string, v ...interface{}) Column {
|
||||
c := stringColumn{
|
||||
column: column{k: k},
|
||||
v: make([]string, len(v)),
|
||||
}
|
||||
for i, iv := range v {
|
||||
val, ok := mustStringValue(iv)
|
||||
if !ok {
|
||||
if c.valid == nil {
|
||||
c.valid = make([]bool, len(v))
|
||||
for i := range c.valid {
|
||||
c.valid[i] = true
|
||||
}
|
||||
}
|
||||
c.valid[i] = false
|
||||
}
|
||||
c.v[i] = val
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
type stringColumn struct {
|
||||
column
|
||||
v []string
|
||||
}
|
||||
|
||||
func (s stringColumn) Make(n int) array.Interface {
|
||||
b := array.NewBinaryBuilder(memory.DefaultAllocator, stdarrow.BinaryTypes.String)
|
||||
b.Resize(len(s.v))
|
||||
b.AppendStringValues(s.v, s.valid)
|
||||
return b.NewArray()
|
||||
}
|
||||
|
||||
func (s stringColumn) Type() flux.ColType { return flux.TString }
|
||||
func (s stringColumn) Len() int { return len(s.v) }
|
||||
func (s stringColumn) KeyValue() values.Value { return values.InvalidValue }
|
||||
|
||||
func (s stringColumn) Build(template *[]Column) []flux.Table {
|
||||
*template = append(*template, s)
|
||||
return nil
|
||||
}
|
||||
|
||||
func mustStringValue(v interface{}) (string, bool) {
|
||||
if v == nil {
|
||||
return "", false
|
||||
}
|
||||
|
||||
switch v := v.(type) {
|
||||
case string:
|
||||
return v, true
|
||||
default:
|
||||
panic(fmt.Sprintf("unable to convert type %T to a string value", v))
|
||||
}
|
||||
}
|
||||
|
||||
// Booleans will construct an array of booleans.
|
||||
// Each value can be a bool or nil.
|
||||
func Booleans(k string, v ...interface{}) Column {
|
||||
c := booleanColumn{
|
||||
column: column{k: k},
|
||||
v: make([]bool, len(v)),
|
||||
}
|
||||
for i, iv := range v {
|
||||
val, ok := mustBooleanValue(iv)
|
||||
if !ok {
|
||||
if c.valid == nil {
|
||||
c.valid = make([]bool, len(v))
|
||||
for i := range c.valid {
|
||||
c.valid[i] = true
|
||||
}
|
||||
}
|
||||
c.valid[i] = false
|
||||
}
|
||||
c.v[i] = val
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
type booleanColumn struct {
|
||||
column
|
||||
v []bool
|
||||
}
|
||||
|
||||
func (s booleanColumn) Make(n int) array.Interface {
|
||||
b := array.NewBooleanBuilder(memory.DefaultAllocator)
|
||||
b.Resize(len(s.v))
|
||||
b.AppendValues(s.v, s.valid)
|
||||
return b.NewArray()
|
||||
}
|
||||
|
||||
func (s booleanColumn) Type() flux.ColType { return flux.TBool }
|
||||
func (s booleanColumn) Len() int { return len(s.v) }
|
||||
func (s booleanColumn) KeyValue() values.Value { return values.InvalidValue }
|
||||
|
||||
func (s booleanColumn) Build(template *[]Column) []flux.Table {
|
||||
*template = append(*template, s)
|
||||
return nil
|
||||
}
|
||||
|
||||
func mustBooleanValue(v interface{}) (bool, bool) {
|
||||
if v == nil {
|
||||
return false, false
|
||||
}
|
||||
|
||||
switch v := v.(type) {
|
||||
case bool:
|
||||
return v, true
|
||||
default:
|
||||
panic(fmt.Sprintf("unable to convert type %T to a boolean value", v))
|
||||
}
|
||||
}
|
||||
|
||||
// Times will construct an array of times with the given time using either a
|
||||
// string or an integer. If an integer is used, then it is in seconds.
|
||||
//
|
||||
// If strings and integers are mixed, the integers will be treates as offsets
|
||||
// from the last string time that was used.
|
||||
func Times(k string, v ...interface{}) Column {
|
||||
var offset int64
|
||||
c := timeColumn{
|
||||
column: column{k: k},
|
||||
v: make([]int64, len(v)),
|
||||
}
|
||||
for i, iv := range v {
|
||||
val, abs, ok := mustTimeValue(iv, offset, time.Second)
|
||||
if !ok {
|
||||
if c.valid == nil {
|
||||
c.valid = make([]bool, len(v))
|
||||
for i := range c.valid {
|
||||
c.valid[i] = true
|
||||
}
|
||||
}
|
||||
c.valid[i] = false
|
||||
}
|
||||
if abs {
|
||||
offset = val
|
||||
}
|
||||
c.v[i] = val
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
type timeColumn struct {
|
||||
column
|
||||
v []int64
|
||||
}
|
||||
|
||||
func (s timeColumn) Make(n int) array.Interface {
|
||||
b := array.NewInt64Builder(memory.DefaultAllocator)
|
||||
b.Resize(len(s.v))
|
||||
b.AppendValues(s.v, s.valid)
|
||||
return b.NewArray()
|
||||
}
|
||||
|
||||
func (s timeColumn) Type() flux.ColType { return flux.TTime }
|
||||
func (s timeColumn) Len() int { return len(s.v) }
|
||||
func (s timeColumn) KeyValue() values.Value { return values.InvalidValue }
|
||||
|
||||
func (s timeColumn) Build(template *[]Column) []flux.Table {
|
||||
*template = append(*template, s)
|
||||
return nil
|
||||
}
|
||||
|
||||
// mustTimeValue will convert the interface into a time value.
|
||||
// This must either be an int-like value or a string that can be
|
||||
// parsed as a time in RFC3339 format.
|
||||
//
|
||||
// This will panic otherwise.
|
||||
func mustTimeValue(v interface{}, offset int64, unit time.Duration) (t int64, abs, ok bool) {
|
||||
if v == nil {
|
||||
return 0, false, false
|
||||
}
|
||||
|
||||
switch v := v.(type) {
|
||||
case int:
|
||||
return offset + int64(v)*int64(unit), false, true
|
||||
case int64:
|
||||
return offset + v*int64(unit), false, true
|
||||
case string:
|
||||
t, err := time.Parse(time.RFC3339, v)
|
||||
if err != nil {
|
||||
if t, err = time.Parse(time.RFC3339Nano, v); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
return t.UnixNano(), true, true
|
||||
default:
|
||||
panic(fmt.Sprintf("unable to convert type %T to a time value", v))
|
||||
}
|
||||
}
|
||||
|
||||
// TableBuilder is used to construct a set of Tables.
|
||||
type TableBuilder interface {
|
||||
// Build will construct a set of tables using the
|
||||
// template as input.
|
||||
//
|
||||
// The template is a pointer as a builder is allowed
|
||||
// to modify the template. For implementors, the
|
||||
// template pointer must be non-nil.
|
||||
Build(template *[]Column) []flux.Table
|
||||
}
|
||||
|
||||
// TableGroup will construct a group of Tables
|
||||
// that have common values. It includes any TableBuilder
|
||||
// values.
|
||||
type TableGroup []TableBuilder
|
||||
|
||||
func (t TableGroup) Do(f func(flux.Table) error) error {
|
||||
// Use an empty template.
|
||||
var template []Column
|
||||
tables := t.Build(&template)
|
||||
return table.Iterator(tables).Do(f)
|
||||
}
|
||||
|
||||
// Build will construct Tables using the given template.
|
||||
func (t TableGroup) Build(template *[]Column) []flux.Table {
|
||||
// Copy over the template.
|
||||
gtemplate := make([]Column, len(*template))
|
||||
copy(gtemplate, *template)
|
||||
|
||||
var tables []flux.Table
|
||||
for _, tb := range t {
|
||||
tables = append(tables, tb.Build(>emplate)...)
|
||||
}
|
||||
return tables
|
||||
}
|
||||
|
||||
// TableList will produce a Table using the template and
|
||||
// each of the table builders.
|
||||
//
|
||||
// Changes to the template are not shared between each of the
|
||||
// entries. If the TableBuilder does not produce tables,
|
||||
// this will force a single Table to be created.
|
||||
type TableList []TableBuilder
|
||||
|
||||
func (t TableList) Build(template *[]Column) []flux.Table {
|
||||
var tables []flux.Table
|
||||
for _, tb := range t {
|
||||
// Copy over the group template for each of these.
|
||||
gtemplate := make([]Column, len(*template), len(*template)+1)
|
||||
copy(gtemplate, *template)
|
||||
|
||||
if ntables := tb.Build(>emplate); len(ntables) > 0 {
|
||||
tables = append(tables, ntables...)
|
||||
} else {
|
||||
tables = append(tables, Table(gtemplate).Table())
|
||||
}
|
||||
}
|
||||
return tables
|
||||
}
|
||||
|
||||
// StringKeys creates a TableList with the given key values.
|
||||
func StringKeys(k string, v ...interface{}) TableList {
|
||||
list := make(TableList, len(v))
|
||||
for i := range v {
|
||||
list[i] = StringKey(k, v[i])
|
||||
}
|
||||
return list
|
||||
}
|
||||
|
||||
// TableMatrix will produce a set of Tables by producing the
|
||||
// cross product of each of the TableBuilders with each other.
|
||||
type TableMatrix []TableList
|
||||
|
||||
func (t TableMatrix) Build(template *[]Column) []flux.Table {
|
||||
if len(t) == 0 {
|
||||
return nil
|
||||
} else if len(t) == 1 {
|
||||
return t[0].Build(template)
|
||||
}
|
||||
|
||||
// Split the TableList into their own distinct TableGroups
|
||||
// so we can produce a cross product of groups.
|
||||
builders := make([]TableGroup, len(t[0]))
|
||||
for i, b := range t[0] {
|
||||
builders[i] = append(builders[i], b)
|
||||
}
|
||||
|
||||
for i := 1; i < len(t); i++ {
|
||||
product := make([]TableGroup, 0, len(builders)*len(t[i]))
|
||||
for _, bs := range t[i] {
|
||||
a := make([]TableGroup, len(builders))
|
||||
copy(a, builders)
|
||||
for j := range a {
|
||||
a[j] = append(a[j], bs)
|
||||
}
|
||||
product = append(product, a...)
|
||||
}
|
||||
builders = product
|
||||
}
|
||||
|
||||
var tables []flux.Table
|
||||
for _, b := range builders {
|
||||
tables = append(tables, b.Build(template)...)
|
||||
}
|
||||
return tables
|
||||
}
|
|
@ -1,151 +0,0 @@
|
|||
package table
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/flux"
|
||||
"github.com/influxdata/flux/execute"
|
||||
"github.com/influxdata/flux/semantic"
|
||||
"github.com/influxdata/flux/values"
|
||||
)
|
||||
|
||||
// Stringify will read a table and turn it into a human-readable string.
|
||||
func Stringify(table flux.Table) string {
|
||||
var sb strings.Builder
|
||||
stringifyKey(&sb, table)
|
||||
if err := table.Do(func(cr flux.ColReader) error {
|
||||
stringifyRows(&sb, cr)
|
||||
return nil
|
||||
}); err != nil {
|
||||
_, _ = fmt.Fprintf(&sb, "table error: %s\n", err)
|
||||
}
|
||||
return sb.String()
|
||||
}
|
||||
|
||||
func getSortedIndices(key flux.GroupKey, cols []flux.ColMeta) ([]flux.ColMeta, []int) {
|
||||
indices := make([]int, len(cols))
|
||||
for i := range indices {
|
||||
indices[i] = i
|
||||
}
|
||||
sort.Slice(indices, func(i, j int) bool {
|
||||
ci, cj := cols[indices[i]], cols[indices[j]]
|
||||
if key.HasCol(ci.Label) && !key.HasCol(cj.Label) {
|
||||
return true
|
||||
} else if !key.HasCol(ci.Label) && key.HasCol(cj.Label) {
|
||||
return false
|
||||
}
|
||||
return ci.Label < cj.Label
|
||||
})
|
||||
return cols, indices
|
||||
}
|
||||
|
||||
func stringifyKey(sb *strings.Builder, table flux.Table) {
|
||||
key := table.Key()
|
||||
cols, indices := getSortedIndices(table.Key(), table.Cols())
|
||||
|
||||
sb.WriteString("# ")
|
||||
if len(cols) == 0 {
|
||||
sb.WriteString("(none)")
|
||||
} else {
|
||||
nkeys := 0
|
||||
for _, idx := range indices {
|
||||
c := cols[idx]
|
||||
kidx := execute.ColIdx(c.Label, key.Cols())
|
||||
if kidx < 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
if nkeys > 0 {
|
||||
sb.WriteString(",")
|
||||
}
|
||||
sb.WriteString(cols[idx].Label)
|
||||
sb.WriteString("=")
|
||||
|
||||
v := key.Value(kidx)
|
||||
stringifyValue(sb, v)
|
||||
nkeys++
|
||||
}
|
||||
}
|
||||
sb.WriteString(" ")
|
||||
|
||||
ncols := 0
|
||||
for _, idx := range indices {
|
||||
c := cols[idx]
|
||||
if key.HasCol(c.Label) {
|
||||
continue
|
||||
}
|
||||
|
||||
if ncols > 0 {
|
||||
sb.WriteString(",")
|
||||
}
|
||||
sb.WriteString(cols[idx].Label)
|
||||
sb.WriteString("=")
|
||||
sb.WriteString(cols[idx].Type.String())
|
||||
ncols++
|
||||
}
|
||||
sb.WriteString("\n")
|
||||
}
|
||||
|
||||
func stringifyRows(sb *strings.Builder, cr flux.ColReader) {
|
||||
key := cr.Key()
|
||||
cols, indices := getSortedIndices(cr.Key(), cr.Cols())
|
||||
|
||||
for i, sz := 0, cr.Len(); i < sz; i++ {
|
||||
inKey := true
|
||||
for j, idx := range indices {
|
||||
c := cols[idx]
|
||||
if j > 0 {
|
||||
if inKey && !key.HasCol(c.Label) {
|
||||
sb.WriteString(" ")
|
||||
inKey = false
|
||||
} else {
|
||||
sb.WriteString(",")
|
||||
}
|
||||
} else if !key.HasCol(c.Label) {
|
||||
inKey = false
|
||||
}
|
||||
sb.WriteString(cols[idx].Label)
|
||||
sb.WriteString("=")
|
||||
|
||||
v := execute.ValueForRow(cr, i, idx)
|
||||
stringifyValue(sb, v)
|
||||
}
|
||||
sb.WriteString("\n")
|
||||
}
|
||||
}
|
||||
|
||||
func stringifyValue(sb *strings.Builder, v values.Value) {
|
||||
if v.IsNull() {
|
||||
sb.WriteString("!(nil)")
|
||||
return
|
||||
}
|
||||
|
||||
switch v.Type().Nature() {
|
||||
case semantic.Int:
|
||||
_, _ = fmt.Fprintf(sb, "%di", v.Int())
|
||||
case semantic.UInt:
|
||||
_, _ = fmt.Fprintf(sb, "%du", v.UInt())
|
||||
case semantic.Float:
|
||||
_, _ = fmt.Fprintf(sb, "%.3f", v.Float())
|
||||
case semantic.String:
|
||||
sb.WriteString(v.Str())
|
||||
case semantic.Bool:
|
||||
if v.Bool() {
|
||||
sb.WriteString("true")
|
||||
} else {
|
||||
sb.WriteString("false")
|
||||
}
|
||||
case semantic.Time:
|
||||
ts := v.Time().Time()
|
||||
if ts.Nanosecond() > 0 {
|
||||
sb.WriteString(ts.Format(time.RFC3339Nano))
|
||||
} else {
|
||||
sb.WriteString(ts.Format(time.RFC3339))
|
||||
}
|
||||
default:
|
||||
sb.WriteString("!(invalid)")
|
||||
}
|
||||
}
|
|
@ -1,92 +0,0 @@
|
|||
package errors
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/influxdata/flux/codes"
|
||||
)
|
||||
|
||||
// Error is the error struct of flux.
|
||||
type Error struct {
|
||||
// Code is the code of the error as defined in the codes package.
|
||||
// This describes the type and category of the error. It is required.
|
||||
Code codes.Code
|
||||
|
||||
// Msg contains a human-readable description and additional information
|
||||
// about the error itself. This is optional.
|
||||
Msg string
|
||||
|
||||
// Err contains the error that was the cause of this error.
|
||||
// This is optional.
|
||||
Err error
|
||||
}
|
||||
|
||||
// Error implement the error interface by outputting the Code and Err.
|
||||
func (e *Error) Error() string {
|
||||
if e.Msg != "" && e.Err != nil {
|
||||
var b strings.Builder
|
||||
b.WriteString(e.Msg)
|
||||
b.WriteString(": ")
|
||||
b.WriteString(e.Err.Error())
|
||||
return b.String()
|
||||
} else if e.Msg != "" {
|
||||
return e.Msg
|
||||
} else if e.Err != nil {
|
||||
return e.Err.Error()
|
||||
}
|
||||
return e.Code.String()
|
||||
}
|
||||
|
||||
// Unwrap will return the wrapped error.
|
||||
func (e *Error) Unwrap() error {
|
||||
return e.Err
|
||||
}
|
||||
|
||||
func New(code codes.Code, msg ...interface{}) error {
|
||||
return Wrap(nil, code, msg...)
|
||||
}
|
||||
|
||||
func Newf(code codes.Code, fmtStr string, args ...interface{}) error {
|
||||
return Wrapf(nil, code, fmtStr, args...)
|
||||
}
|
||||
|
||||
func Wrap(err error, code codes.Code, msg ...interface{}) error {
|
||||
var s string
|
||||
if len(msg) > 0 {
|
||||
s = fmt.Sprint(msg...)
|
||||
}
|
||||
return &Error{
|
||||
Code: code,
|
||||
Msg: s,
|
||||
Err: err,
|
||||
}
|
||||
}
|
||||
|
||||
func Wrapf(err error, code codes.Code, format string, a ...interface{}) error {
|
||||
return &Error{
|
||||
Code: code,
|
||||
Msg: fmt.Sprintf(format, a...),
|
||||
Err: err,
|
||||
}
|
||||
}
|
||||
|
||||
// Code returns the error code for the given error.
|
||||
// If the error is not a flux.Error, this will return
|
||||
// Unknown for the code. If the error is a flux.Error
|
||||
// and its code is Inherit, then this will return the
|
||||
// wrapped error's code.
|
||||
func Code(err error) codes.Code {
|
||||
for {
|
||||
if ferr, ok := err.(*Error); ok {
|
||||
if ferr.Code != codes.Inherit {
|
||||
return ferr.Code
|
||||
} else if ferr.Err == nil {
|
||||
return codes.Unknown
|
||||
}
|
||||
err = ferr.Err
|
||||
} else {
|
||||
return codes.Unknown
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,87 +0,0 @@
|
|||
package table
|
||||
|
||||
import (
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/influxdata/flux"
|
||||
"github.com/influxdata/flux/codes"
|
||||
"github.com/influxdata/influxdb/v2/pkg/flux/internal/errors"
|
||||
)
|
||||
|
||||
// BufferedTable represents a table of buffered column readers.
|
||||
type BufferedTable struct {
|
||||
used int32
|
||||
empty bool
|
||||
GroupKey flux.GroupKey
|
||||
Columns []flux.ColMeta
|
||||
Buffers []flux.ColReader
|
||||
}
|
||||
|
||||
// FromBuffer constructs a flux.Table from a single flux.ColReader.
|
||||
func FromBuffer(cr flux.ColReader) flux.Table {
|
||||
return &BufferedTable{
|
||||
GroupKey: cr.Key(),
|
||||
Columns: cr.Cols(),
|
||||
Buffers: []flux.ColReader{cr},
|
||||
}
|
||||
}
|
||||
|
||||
func (b *BufferedTable) Key() flux.GroupKey {
|
||||
return b.GroupKey
|
||||
}
|
||||
|
||||
func (b *BufferedTable) Cols() []flux.ColMeta {
|
||||
return b.Columns
|
||||
}
|
||||
|
||||
func (b *BufferedTable) Do(f func(flux.ColReader) error) error {
|
||||
if !atomic.CompareAndSwapInt32(&b.used, 0, 1) {
|
||||
return errors.New(codes.Internal, "table already read")
|
||||
}
|
||||
|
||||
i := 0
|
||||
defer func() {
|
||||
for ; i < len(b.Buffers); i++ {
|
||||
b.Buffers[i].Release()
|
||||
}
|
||||
}()
|
||||
|
||||
b.empty = true
|
||||
for ; i < len(b.Buffers); i++ {
|
||||
cr := b.Buffers[i]
|
||||
if cr.Len() > 0 {
|
||||
b.empty = false
|
||||
}
|
||||
if err := f(cr); err != nil {
|
||||
return err
|
||||
}
|
||||
cr.Release()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *BufferedTable) Done() {
|
||||
if atomic.CompareAndSwapInt32(&b.used, 0, 1) {
|
||||
b.empty = b.isEmpty()
|
||||
for _, buf := range b.Buffers {
|
||||
buf.Release()
|
||||
}
|
||||
b.Buffers = nil
|
||||
}
|
||||
}
|
||||
|
||||
func (b *BufferedTable) Empty() bool {
|
||||
if atomic.LoadInt32(&b.used) != 0 {
|
||||
return b.empty
|
||||
}
|
||||
return b.isEmpty()
|
||||
}
|
||||
|
||||
func (b *BufferedTable) isEmpty() bool {
|
||||
for _, buf := range b.Buffers {
|
||||
if buf.Len() > 0 {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
|
@ -1,5 +0,0 @@
|
|||
package table
|
||||
|
||||
import "github.com/influxdata/influxdb/v2/pkg/flux/execute/table"
|
||||
|
||||
type Iterator = table.Iterator
|
|
@ -17,10 +17,9 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/influxdata/flux/ast"
|
||||
"github.com/influxdata/flux/ast/edit"
|
||||
"github.com/influxdata/flux/parser"
|
||||
"github.com/influxdata/influxdb/v2"
|
||||
ast2 "github.com/influxdata/influxdb/v2/pkg/flux/ast"
|
||||
"github.com/influxdata/influxdb/v2/pkg/flux/ast/edit"
|
||||
"github.com/influxdata/influxdb/v2/pkg/jsonnet"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
@ -1728,16 +1727,16 @@ func valFromExpr(p ast.Expression) interface{} {
|
|||
}
|
||||
return nil
|
||||
case *ast.DateTimeLiteral:
|
||||
return ast2.DateTimeFromLiteral(literal)
|
||||
return ast.DateTimeFromLiteral(literal)
|
||||
case *ast.FloatLiteral:
|
||||
return ast2.FloatFromLiteral(literal)
|
||||
return ast.FloatFromLiteral(literal)
|
||||
case *ast.IntegerLiteral:
|
||||
return ast2.IntegerFromLiteral(literal)
|
||||
return ast.IntegerFromLiteral(literal)
|
||||
case *ast.DurationLiteral:
|
||||
dur, _ := ast.DurationFrom(literal, time.Time{})
|
||||
return dur
|
||||
case *ast.StringLiteral:
|
||||
return ast2.StringFromLiteral(literal)
|
||||
return ast.StringFromLiteral(literal)
|
||||
case *ast.UnaryExpression:
|
||||
// a signed duration is represented by a UnaryExpression.
|
||||
// it is the only unary expression allowed.
|
||||
|
|
|
@ -10,14 +10,13 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/influxdata/flux/ast"
|
||||
"github.com/influxdata/flux/ast/edit"
|
||||
"github.com/influxdata/flux/parser"
|
||||
"github.com/influxdata/influxdb/v2"
|
||||
"github.com/influxdata/influxdb/v2/notification"
|
||||
icheck "github.com/influxdata/influxdb/v2/notification/check"
|
||||
"github.com/influxdata/influxdb/v2/notification/endpoint"
|
||||
"github.com/influxdata/influxdb/v2/notification/rule"
|
||||
ast2 "github.com/influxdata/influxdb/v2/pkg/flux/ast"
|
||||
"github.com/influxdata/influxdb/v2/pkg/flux/ast/edit"
|
||||
)
|
||||
|
||||
type identity struct {
|
||||
|
@ -2314,7 +2313,7 @@ func convertRefToRefSummary(field string, ref *references) SummaryReference {
|
|||
|
||||
func astBoolFromIface(v interface{}) *ast.BooleanLiteral {
|
||||
b, _ := v.(bool)
|
||||
return ast2.BooleanLiteralFromValue(b)
|
||||
return ast.BooleanLiteralFromValue(b)
|
||||
}
|
||||
|
||||
func astDurationFromIface(v interface{}) *ast.DurationLiteral {
|
||||
|
@ -2332,18 +2331,18 @@ func astDurationFromIface(v interface{}) *ast.DurationLiteral {
|
|||
|
||||
func astFloatFromIface(v interface{}) *ast.FloatLiteral {
|
||||
if i, ok := v.(int); ok {
|
||||
return ast2.FloatLiteralFromValue(float64(i))
|
||||
return ast.FloatLiteralFromValue(float64(i))
|
||||
}
|
||||
f, _ := v.(float64)
|
||||
return ast2.FloatLiteralFromValue(f)
|
||||
return ast.FloatLiteralFromValue(f)
|
||||
}
|
||||
|
||||
func astIntegerFromIface(v interface{}) *ast.IntegerLiteral {
|
||||
if f, ok := v.(float64); ok {
|
||||
return ast2.IntegerLiteralFromValue(int64(f))
|
||||
return ast.IntegerLiteralFromValue(int64(f))
|
||||
}
|
||||
i, _ := v.(int64)
|
||||
return ast2.IntegerLiteralFromValue(i)
|
||||
return ast.IntegerLiteralFromValue(i)
|
||||
}
|
||||
|
||||
func astNow() *ast.CallExpression {
|
||||
|
@ -2354,12 +2353,12 @@ func astNow() *ast.CallExpression {
|
|||
|
||||
func astStringFromIface(v interface{}) *ast.StringLiteral {
|
||||
s, _ := v.(string)
|
||||
return ast2.StringLiteralFromValue(s)
|
||||
return ast.StringLiteralFromValue(s)
|
||||
}
|
||||
|
||||
func astTimeFromIface(v interface{}) *ast.DateTimeLiteral {
|
||||
if t, ok := v.(time.Time); ok {
|
||||
return ast2.DateTimeLiteralFromValue(t)
|
||||
return ast.DateTimeLiteralFromValue(t)
|
||||
}
|
||||
|
||||
s, ok := v.(string)
|
||||
|
|
|
@ -149,6 +149,15 @@ func (b ProxyQueryServiceAsyncBridge) Query(ctx context.Context, w io.Writer, re
|
|||
if err != nil {
|
||||
return stats, tracing.LogError(span, err)
|
||||
}
|
||||
|
||||
if results, err := q.ProfilerResults(); err != nil {
|
||||
return stats, tracing.LogError(span, err)
|
||||
} else if results != nil {
|
||||
_, err = encoder.Encode(w, results)
|
||||
if err != nil {
|
||||
return stats, tracing.LogError(span, err)
|
||||
}
|
||||
}
|
||||
return stats, nil
|
||||
}
|
||||
|
||||
|
|
|
@ -10,6 +10,7 @@ import (
|
|||
"github.com/influxdata/flux"
|
||||
"github.com/influxdata/flux/csv"
|
||||
"github.com/influxdata/flux/execute/executetest"
|
||||
"github.com/influxdata/flux/metadata"
|
||||
"github.com/influxdata/influxdb/v2/query"
|
||||
"github.com/influxdata/influxdb/v2/query/mock"
|
||||
)
|
||||
|
@ -26,7 +27,7 @@ func (w failWriter) Write(p []byte) (int, error) {
|
|||
|
||||
func TestProxyQueryServiceAsyncBridge_StatsOnClientDisconnect(t *testing.T) {
|
||||
q := mock.NewQuery()
|
||||
q.Metadata = flux.Metadata{
|
||||
q.Metadata = metadata.Metadata{
|
||||
"foo": []interface{}{"bar"},
|
||||
}
|
||||
r := executetest.NewResult([]*executetest.Table{
|
||||
|
|
|
@ -4,12 +4,12 @@
|
|||
package builtin
|
||||
|
||||
import (
|
||||
"github.com/influxdata/flux"
|
||||
"github.com/influxdata/flux/runtime"
|
||||
|
||||
_ "github.com/influxdata/flux/stdlib" // Import the stdlib
|
||||
_ "github.com/influxdata/influxdb/v2/query/stdlib" // Import the stdlib
|
||||
)
|
||||
|
||||
func init() {
|
||||
flux.FinalizeBuiltIns()
|
||||
runtime.FinalizeBuiltIns()
|
||||
}
|
||||
|
|
|
@ -1,20 +0,0 @@
|
|||
package builtinlazy
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/influxdata/flux"
|
||||
_ "github.com/influxdata/flux/stdlib" // Import the stdlib
|
||||
_ "github.com/influxdata/influxdb/v2/query/stdlib" // Import the stdlib
|
||||
)
|
||||
|
||||
var once sync.Once
|
||||
|
||||
// Initialize ensures all Flux builtins are configured and should be called
|
||||
// prior to using the Flux runtime. Initialize is safe to call concurrently
|
||||
// and is idempotent.
|
||||
func Initialize() {
|
||||
once.Do(func() {
|
||||
flux.FinalizeBuiltIns()
|
||||
})
|
||||
}
|
|
@ -26,10 +26,13 @@ import (
|
|||
|
||||
"github.com/influxdata/flux"
|
||||
"github.com/influxdata/flux/codes"
|
||||
"github.com/influxdata/flux/execute/table"
|
||||
"github.com/influxdata/flux/lang"
|
||||
"github.com/influxdata/flux/memory"
|
||||
"github.com/influxdata/flux/runtime"
|
||||
"github.com/influxdata/influxdb/v2"
|
||||
"github.com/influxdata/influxdb/v2/kit/errors"
|
||||
"github.com/influxdata/influxdb/v2/kit/feature"
|
||||
"github.com/influxdata/influxdb/v2/kit/prom"
|
||||
"github.com/influxdata/influxdb/v2/kit/tracing"
|
||||
influxlogger "github.com/influxdata/influxdb/v2/logger"
|
||||
|
@ -206,6 +209,10 @@ func (c *Controller) Query(ctx context.Context, req *query.Request) (flux.Query,
|
|||
for _, dep := range c.dependencies {
|
||||
ctx = dep.Inject(ctx)
|
||||
}
|
||||
// Add per-transformation spans if the feature flag is set.
|
||||
if feature.QueryTracing().Enabled(ctx) {
|
||||
ctx = flux.WithExperimentalTracingEnabled(ctx)
|
||||
}
|
||||
q, err := c.query(ctx, req.Compiler)
|
||||
if err != nil {
|
||||
return q, err
|
||||
|
@ -338,7 +345,7 @@ func (c *Controller) compileQuery(q *Query, compiler flux.Compiler) (err error)
|
|||
}
|
||||
}
|
||||
|
||||
prog, err := compiler.Compile(ctx)
|
||||
prog, err := compiler.Compile(ctx, runtime.Default)
|
||||
if err != nil {
|
||||
return &flux.Error{
|
||||
Msg: "compilation failed",
|
||||
|
@ -547,6 +554,23 @@ type Query struct {
|
|||
alloc *memory.Allocator
|
||||
}
|
||||
|
||||
func (q *Query) ProfilerResults() (flux.ResultIterator, error) {
|
||||
p := q.program.(*lang.AstProgram)
|
||||
if len(p.Profilers) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
tables := make([]flux.Table, 0)
|
||||
for _, profiler := range p.Profilers {
|
||||
if result, err := profiler.GetResult(q, q.alloc); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
tables = append(tables, result)
|
||||
}
|
||||
}
|
||||
res := table.NewProfilerResult(tables...)
|
||||
return flux.NewSliceResultIterator([]flux.Result{&res}), nil
|
||||
}
|
||||
|
||||
// ID reports an ephemeral unique ID for the query.
|
||||
func (q *Query) ID() QueryID {
|
||||
return q.id
|
||||
|
|
|
@ -20,10 +20,14 @@ import (
|
|||
"github.com/influxdata/flux/plan"
|
||||
"github.com/influxdata/flux/plan/plantest"
|
||||
"github.com/influxdata/flux/stdlib/universe"
|
||||
"github.com/influxdata/influxdb/v2/kit/feature"
|
||||
pmock "github.com/influxdata/influxdb/v2/mock"
|
||||
"github.com/influxdata/influxdb/v2/query"
|
||||
_ "github.com/influxdata/influxdb/v2/query/builtin"
|
||||
"github.com/influxdata/influxdb/v2/query/control"
|
||||
"github.com/influxdata/influxdb/v2/query/stdlib/influxdata/influxdb"
|
||||
"github.com/opentracing/opentracing-go"
|
||||
"github.com/opentracing/opentracing-go/mocktracer"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
"go.uber.org/zap/zaptest"
|
||||
|
@ -1289,6 +1293,102 @@ func TestController_ReserveMemoryWithoutExceedingMax(t *testing.T) {
|
|||
validateUnusedMemory(t, reg, config)
|
||||
}
|
||||
|
||||
func TestController_QueryTracing(t *testing.T) {
|
||||
// temporarily install a mock tracer to see which spans are created.
|
||||
oldTracer := opentracing.GlobalTracer()
|
||||
defer opentracing.SetGlobalTracer(oldTracer)
|
||||
mockTracer := mocktracer.New()
|
||||
opentracing.SetGlobalTracer(mockTracer)
|
||||
|
||||
const memoryBytesQuotaPerQuery = 64
|
||||
config := config
|
||||
config.MemoryBytesQuotaPerQuery = memoryBytesQuotaPerQuery
|
||||
ctrl, err := control.New(config)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer shutdown(t, ctrl)
|
||||
|
||||
flagger := pmock.NewFlagger(map[feature.Flag]interface{}{
|
||||
feature.QueryTracing(): true,
|
||||
})
|
||||
plainCtx := context.Background()
|
||||
withFlagger, err := feature.Annotate(plainCtx, flagger)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
tcs := []struct {
|
||||
name string
|
||||
ctx context.Context
|
||||
doNotWantSpan string
|
||||
wantSpan string
|
||||
}{
|
||||
{
|
||||
name: "feature flag off",
|
||||
ctx: plainCtx,
|
||||
doNotWantSpan: "*executetest.AllocatingFromProcedureSpec",
|
||||
},
|
||||
{
|
||||
name: "feature flag on",
|
||||
ctx: withFlagger,
|
||||
wantSpan: "*executetest.AllocatingFromProcedureSpec",
|
||||
},
|
||||
}
|
||||
for _, tc := range tcs {
|
||||
tc := tc
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
mockTracer.Reset()
|
||||
|
||||
compiler := &mock.Compiler{
|
||||
CompileFn: func(ctx context.Context) (flux.Program, error) {
|
||||
// Return a program that will allocate one more byte than is allowed.
|
||||
pts := plantest.PlanSpec{
|
||||
Nodes: []plan.Node{
|
||||
plan.CreatePhysicalNode("allocating-from-test", &executetest.AllocatingFromProcedureSpec{
|
||||
ByteCount: 16,
|
||||
}),
|
||||
plan.CreatePhysicalNode("yield", &universe.YieldProcedureSpec{Name: "_result"}),
|
||||
},
|
||||
Edges: [][2]int{
|
||||
{0, 1},
|
||||
},
|
||||
Resources: flux.ResourceManagement{
|
||||
ConcurrencyQuota: 1,
|
||||
},
|
||||
}
|
||||
|
||||
ps := plantest.CreatePlanSpec(&pts)
|
||||
prog := &lang.Program{
|
||||
Logger: zaptest.NewLogger(t),
|
||||
PlanSpec: ps,
|
||||
}
|
||||
|
||||
return prog, nil
|
||||
},
|
||||
}
|
||||
|
||||
// Depending on how the feature flag is set in the context,
|
||||
// we may or may not do query tracing here.
|
||||
q, err := ctrl.Query(tc.ctx, makeRequest(compiler))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
|
||||
consumeResults(t, q)
|
||||
gotSpans := make(map[string]struct{})
|
||||
for _, span := range mockTracer.FinishedSpans() {
|
||||
gotSpans[span.OperationName] = struct{}{}
|
||||
}
|
||||
if _, found := gotSpans[tc.doNotWantSpan]; tc.doNotWantSpan != "" && found {
|
||||
t.Fatalf("did not want to find span %q but it was there", tc.doNotWantSpan)
|
||||
}
|
||||
if _, found := gotSpans[tc.wantSpan]; tc.wantSpan != "" && !found {
|
||||
t.Fatalf("wanted to find span %q but it was not there", tc.wantSpan)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func consumeResults(tb testing.TB, q flux.Query) {
|
||||
tb.Helper()
|
||||
for res := range q.Results() {
|
||||
|
|
|
@ -55,7 +55,6 @@ func (e *NoContentEncoder) Encode(w io.Writer, results flux.ResultIterator) (int
|
|||
for results.More() {
|
||||
if err := results.Next().Tables().Do(func(tbl flux.Table) error {
|
||||
return tbl.Do(func(cr flux.ColReader) error {
|
||||
cr.Release()
|
||||
return nil
|
||||
})
|
||||
}); err != nil {
|
||||
|
@ -114,7 +113,6 @@ func (e *NoContentWithErrorEncoder) Encode(w io.Writer, results flux.ResultItera
|
|||
for results.More() {
|
||||
if err := results.Next().Tables().Do(func(tbl flux.Table) error {
|
||||
return tbl.Do(func(cr flux.ColReader) error {
|
||||
cr.Release()
|
||||
return nil
|
||||
})
|
||||
}); err != nil {
|
||||
|
|
|
@ -4,11 +4,11 @@ package fluxlang
|
|||
import (
|
||||
"context"
|
||||
|
||||
"github.com/influxdata/flux"
|
||||
"github.com/influxdata/flux/ast"
|
||||
"github.com/influxdata/flux/complete"
|
||||
"github.com/influxdata/flux/interpreter"
|
||||
"github.com/influxdata/flux/parser"
|
||||
"github.com/influxdata/flux/runtime"
|
||||
"github.com/influxdata/flux/values"
|
||||
"github.com/influxdata/influxdb/v2"
|
||||
)
|
||||
|
@ -27,9 +27,9 @@ func (d defaultService) Parse(source string) (pkg *ast.Package, err error) {
|
|||
}
|
||||
|
||||
func (d defaultService) EvalAST(ctx context.Context, astPkg *ast.Package) ([]interpreter.SideEffect, values.Scope, error) {
|
||||
return flux.EvalAST(ctx, astPkg)
|
||||
return runtime.EvalAST(ctx, astPkg)
|
||||
}
|
||||
|
||||
func (d defaultService) Completer() complete.Completer {
|
||||
return complete.NewCompleter(flux.Prelude())
|
||||
return complete.NewCompleter(runtime.Prelude())
|
||||
}
|
||||
|
|
|
@ -2,6 +2,7 @@ package influxql
|
|||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/flux"
|
||||
|
@ -42,7 +43,7 @@ func NewCompiler(dbrpMappingSvc platform.DBRPMappingServiceV2) *Compiler {
|
|||
}
|
||||
|
||||
// Compile transpiles the query into a Program.
|
||||
func (c *Compiler) Compile(ctx context.Context) (flux.Program, error) {
|
||||
func (c *Compiler) Compile(ctx context.Context, runtime flux.Runtime) (flux.Program, error) {
|
||||
var now time.Time
|
||||
if c.Now != nil {
|
||||
now = *c.Now
|
||||
|
@ -64,7 +65,15 @@ func (c *Compiler) Compile(ctx context.Context) (flux.Program, error) {
|
|||
return nil, err
|
||||
}
|
||||
compileOptions := lang.WithLogPlanOpts(c.logicalPlannerOptions...)
|
||||
return lang.CompileAST(astPkg, now, compileOptions), nil
|
||||
bs, err := json.Marshal(astPkg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hdl, err := runtime.JSONToHandle(bs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return lang.CompileAST(hdl, runtime, now, compileOptions), nil
|
||||
}
|
||||
|
||||
func (c *Compiler) CompilerType() flux.CompilerType {
|
||||
|
|
|
@ -131,6 +131,8 @@ var skipTests = map[string]string{
|
|||
"SelectorMath_29": "Transpiler: unimplemented functions: top and bottom https://github.com/influxdata/influxdb/issues/10738",
|
||||
"SelectorMath_30": "Transpiler: unimplemented functions: top and bottom https://github.com/influxdata/influxdb/issues/10738",
|
||||
"SelectorMath_31": "Transpiler: unimplemented functions: top and bottom https://github.com/influxdata/influxdb/issues/10738",
|
||||
"ands": "algo-w: https://github.com/influxdata/influxdb/issues/16811",
|
||||
"ors": "algo-w: https://github.com/influxdata/influxdb/issues/16811",
|
||||
}
|
||||
|
||||
var querier = fluxquerytest.NewQuerier()
|
||||
|
|
|
@ -21,15 +21,45 @@ type LoggingProxyQueryService struct {
|
|||
queryLogger Logger
|
||||
nowFunction func() time.Time
|
||||
log *zap.Logger
|
||||
cond func(ctx context.Context) bool
|
||||
|
||||
// If this is set then logging happens only if this key is present in the
|
||||
// metadata.
|
||||
requireMetadataKey string
|
||||
}
|
||||
|
||||
func NewLoggingProxyQueryService(log *zap.Logger, queryLogger Logger, proxyQueryService ProxyQueryService) *LoggingProxyQueryService {
|
||||
return &LoggingProxyQueryService{
|
||||
// LoggingProxyQueryServiceOption provides a way to modify the
|
||||
// behavior of LoggingProxyQueryService.
|
||||
type LoggingProxyQueryServiceOption func(lpqs *LoggingProxyQueryService)
|
||||
|
||||
// ConditionalLogging returns a LoggingProxyQueryServiceOption
|
||||
// that only logs if the passed in function returns true.
|
||||
// Thus logging can be controlled by a request-scoped attribute, e.g., a feature flag.
|
||||
func ConditionalLogging(cond func(context.Context) bool) LoggingProxyQueryServiceOption {
|
||||
return func(lpqs *LoggingProxyQueryService) {
|
||||
lpqs.cond = cond
|
||||
}
|
||||
}
|
||||
|
||||
func RequireMetadataKey(metadataKey string) LoggingProxyQueryServiceOption {
|
||||
return func(lpqs *LoggingProxyQueryService) {
|
||||
lpqs.requireMetadataKey = metadataKey
|
||||
}
|
||||
}
|
||||
|
||||
func NewLoggingProxyQueryService(log *zap.Logger, queryLogger Logger, proxyQueryService ProxyQueryService, opts ...LoggingProxyQueryServiceOption) *LoggingProxyQueryService {
|
||||
lpqs := &LoggingProxyQueryService{
|
||||
proxyQueryService: proxyQueryService,
|
||||
queryLogger: queryLogger,
|
||||
nowFunction: time.Now,
|
||||
log: log,
|
||||
}
|
||||
|
||||
for _, o := range opts {
|
||||
o(lpqs)
|
||||
}
|
||||
|
||||
return lpqs
|
||||
}
|
||||
|
||||
func (s *LoggingProxyQueryService) SetNowFunctionForTesting(nowFunction func() time.Time) {
|
||||
|
@ -38,6 +68,12 @@ func (s *LoggingProxyQueryService) SetNowFunctionForTesting(nowFunction func() t
|
|||
|
||||
// Query executes and logs the query.
|
||||
func (s *LoggingProxyQueryService) Query(ctx context.Context, w io.Writer, req *ProxyRequest) (stats flux.Statistics, err error) {
|
||||
if s.cond != nil && !s.cond(ctx) {
|
||||
// Logging is conditional, and we are not logging this request.
|
||||
// Just invoke the wrapped service directly.
|
||||
return s.proxyQueryService.Query(ctx, w, req)
|
||||
}
|
||||
|
||||
span, ctx := tracing.StartSpanFromContext(ctx)
|
||||
defer span.Finish()
|
||||
|
||||
|
@ -50,6 +86,14 @@ func (s *LoggingProxyQueryService) Query(ctx context.Context, w io.Writer, req *
|
|||
entry.Write(zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
// Enforce requireMetadataKey, if set.
|
||||
if s.requireMetadataKey != "" {
|
||||
if _, ok := stats.Metadata[s.requireMetadataKey]; !ok {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
traceID, sampled, _ := tracing.InfoFromContext(ctx)
|
||||
log := Log{
|
||||
OrganizationID: req.Request.OrganizationID,
|
||||
|
|
|
@ -4,12 +4,14 @@ import (
|
|||
"bytes"
|
||||
"context"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/google/go-cmp/cmp/cmpopts"
|
||||
"github.com/influxdata/flux"
|
||||
"github.com/influxdata/flux/metadata"
|
||||
platform "github.com/influxdata/influxdb/v2"
|
||||
"github.com/influxdata/influxdb/v2/query"
|
||||
"github.com/influxdata/influxdb/v2/query/mock"
|
||||
|
@ -34,6 +36,10 @@ var opts = []cmp.Option{
|
|||
cmpopts.IgnoreUnexported(query.Request{}),
|
||||
}
|
||||
|
||||
type contextKey string
|
||||
|
||||
const loggingCtxKey contextKey = "do-logging"
|
||||
|
||||
func TestLoggingProxyQueryService(t *testing.T) {
|
||||
// Set a Jaeger in-memory tracer to get span information in the query log.
|
||||
oldTracer := opentracing.GlobalTracer()
|
||||
|
@ -53,7 +59,9 @@ func TestLoggingProxyQueryService(t *testing.T) {
|
|||
ExecuteDuration: time.Second,
|
||||
Concurrency: 2,
|
||||
MaxAllocated: 2048,
|
||||
Metadata: make(metadata.Metadata),
|
||||
}
|
||||
wantStats.Metadata.Add("some-mock-metadata", 42)
|
||||
wantBytes := 10
|
||||
pqs := &mock.ProxyQueryService{
|
||||
QueryF: func(ctx context.Context, w io.Writer, req *query.ProxyRequest) (flux.Statistics, error) {
|
||||
|
@ -69,13 +77,6 @@ func TestLoggingProxyQueryService(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
wantTime := time.Now()
|
||||
lpqs := query.NewLoggingProxyQueryService(zap.NewNop(), logger, pqs)
|
||||
lpqs.SetNowFunctionForTesting(func() time.Time {
|
||||
return wantTime
|
||||
})
|
||||
|
||||
var buf bytes.Buffer
|
||||
req := &query.ProxyRequest{
|
||||
Request: query.Request{
|
||||
Authorization: nil,
|
||||
|
@ -84,25 +85,98 @@ func TestLoggingProxyQueryService(t *testing.T) {
|
|||
},
|
||||
Dialect: nil,
|
||||
}
|
||||
stats, err := lpqs.Query(context.Background(), &buf, req)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !cmp.Equal(wantStats, stats, opts...) {
|
||||
t.Errorf("unexpected query stats: -want/+got\n%s", cmp.Diff(wantStats, stats, opts...))
|
||||
}
|
||||
traceID := reporter.GetSpans()[0].Context().(jaeger.SpanContext).TraceID().String()
|
||||
wantLogs := []query.Log{{
|
||||
Time: wantTime,
|
||||
OrganizationID: orgID,
|
||||
TraceID: traceID,
|
||||
Sampled: true,
|
||||
Error: nil,
|
||||
ProxyRequest: req,
|
||||
ResponseSize: int64(wantBytes),
|
||||
Statistics: wantStats,
|
||||
}}
|
||||
if !cmp.Equal(wantLogs, logs, opts...) {
|
||||
t.Errorf("unexpected query logs: -want/+got\n%s", cmp.Diff(wantLogs, logs, opts...))
|
||||
}
|
||||
|
||||
t.Run("log", func(t *testing.T) {
|
||||
defer func() {
|
||||
logs = nil
|
||||
}()
|
||||
wantTime := time.Now()
|
||||
lpqs := query.NewLoggingProxyQueryService(zap.NewNop(), logger, pqs)
|
||||
lpqs.SetNowFunctionForTesting(func() time.Time {
|
||||
return wantTime
|
||||
})
|
||||
|
||||
var buf bytes.Buffer
|
||||
stats, err := lpqs.Query(context.Background(), &buf, req)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !cmp.Equal(wantStats, stats, opts...) {
|
||||
t.Errorf("unexpected query stats: -want/+got\n%s", cmp.Diff(wantStats, stats, opts...))
|
||||
}
|
||||
traceID := reporter.GetSpans()[0].Context().(jaeger.SpanContext).TraceID().String()
|
||||
wantLogs := []query.Log{{
|
||||
Time: wantTime,
|
||||
OrganizationID: orgID,
|
||||
TraceID: traceID,
|
||||
Sampled: true,
|
||||
Error: nil,
|
||||
ProxyRequest: req,
|
||||
ResponseSize: int64(wantBytes),
|
||||
Statistics: wantStats,
|
||||
}}
|
||||
if !cmp.Equal(wantLogs, logs, opts...) {
|
||||
t.Errorf("unexpected query logs: -want/+got\n%s", cmp.Diff(wantLogs, logs, opts...))
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("conditional logging", func(t *testing.T) {
|
||||
defer func() {
|
||||
logs = nil
|
||||
}()
|
||||
|
||||
condLog := query.ConditionalLogging(func(ctx context.Context) bool {
|
||||
return ctx.Value(loggingCtxKey) != nil
|
||||
})
|
||||
|
||||
lpqs := query.NewLoggingProxyQueryService(zap.NewNop(), logger, pqs, condLog)
|
||||
_, err := lpqs.Query(context.Background(), ioutil.Discard, req)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if len(logs) != 0 {
|
||||
t.Fatal("expected query service not to log")
|
||||
}
|
||||
|
||||
ctx := context.WithValue(context.Background(), loggingCtxKey, true)
|
||||
_, err = lpqs.Query(ctx, ioutil.Discard, req)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if len(logs) != 1 {
|
||||
t.Fatal("expected query service to log")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("require metadata key", func(t *testing.T) {
|
||||
defer func() {
|
||||
logs = nil
|
||||
}()
|
||||
|
||||
reqMeta1 := query.RequireMetadataKey("this-metadata-wont-be-found")
|
||||
lpqs1 := query.NewLoggingProxyQueryService(zap.NewNop(), logger, pqs, reqMeta1)
|
||||
|
||||
_, err := lpqs1.Query(context.Background(), ioutil.Discard, req)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if len(logs) != 0 {
|
||||
t.Fatal("expected query service not to log")
|
||||
}
|
||||
|
||||
reqMeta2 := query.RequireMetadataKey("some-mock-metadata")
|
||||
lpqs2 := query.NewLoggingProxyQueryService(zap.NewNop(), logger, pqs, reqMeta2)
|
||||
|
||||
_, err = lpqs2.Query(context.Background(), ioutil.Discard, req)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if len(logs) != 1 {
|
||||
t.Fatal("expected query service to log")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
|
|
@ -6,6 +6,7 @@ import (
|
|||
"sync"
|
||||
|
||||
"github.com/influxdata/flux"
|
||||
"github.com/influxdata/flux/metadata"
|
||||
"github.com/influxdata/influxdb/v2/kit/check"
|
||||
"github.com/influxdata/influxdb/v2/query"
|
||||
)
|
||||
|
@ -52,7 +53,7 @@ func (s *AsyncQueryService) Query(ctx context.Context, req *query.Request) (flux
|
|||
// It contains controls to ensure that the flux.Query object is used correctly.
|
||||
// Note: Query will only return one result, specified by calling the SetResults method.
|
||||
type Query struct {
|
||||
Metadata flux.Metadata
|
||||
Metadata metadata.Metadata
|
||||
|
||||
results chan flux.Result
|
||||
once sync.Once
|
||||
|
@ -66,7 +67,7 @@ var _ flux.Query = (*Query)(nil)
|
|||
// NewQuery constructs a new asynchronous query.
|
||||
func NewQuery() *Query {
|
||||
return &Query{
|
||||
Metadata: make(flux.Metadata),
|
||||
Metadata: make(metadata.Metadata),
|
||||
results: make(chan flux.Result, 1),
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,7 +19,7 @@ import (
|
|||
"github.com/influxdata/influxdb/v2/cmd/influxd/launcher"
|
||||
"github.com/influxdata/influxdb/v2/models"
|
||||
"github.com/influxdata/influxdb/v2/query"
|
||||
itsdb "github.com/influxdata/influxdb/v2/v1/tsdb"
|
||||
itsdb "github.com/influxdata/influxdb/v2/tsdb"
|
||||
ipromql "github.com/influxdata/promql/v2"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/prometheus/prometheus/pkg/labels"
|
||||
|
|
|
@ -1,43 +1,28 @@
|
|||
module github.com/influxdata/promqltests
|
||||
|
||||
go 1.13
|
||||
go 1.12
|
||||
|
||||
require (
|
||||
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d // indirect
|
||||
github.com/aws/aws-sdk-go v1.29.18 // indirect
|
||||
github.com/docker/go-units v0.4.0 // indirect
|
||||
github.com/fatih/color v1.9.0 // indirect
|
||||
github.com/glycerine/go-unsnap-stream v0.0.0-20190901134440-81cf024a9e0a // indirect
|
||||
github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31 // indirect
|
||||
github.com/go-kit/kit v0.10.0 // indirect
|
||||
github.com/gogo/protobuf v1.3.1 // indirect
|
||||
github.com/google/go-cmp v0.4.0
|
||||
github.com/google/uuid v1.1.1 // indirect
|
||||
github.com/google/go-cmp v0.5.0
|
||||
github.com/hashicorp/go-rootcerts v1.0.2 // indirect
|
||||
github.com/influxdata/flux v0.66.1
|
||||
github.com/influxdata/flux v0.83.1
|
||||
github.com/influxdata/influxdb/v2 v2.0.0-00010101000000-000000000000
|
||||
github.com/influxdata/influxql v1.0.1 // indirect
|
||||
github.com/influxdata/promql/v2 v2.12.0
|
||||
github.com/kr/pretty v0.2.0 // indirect
|
||||
github.com/mattn/go-isatty v0.0.12 // indirect
|
||||
github.com/onsi/ginkgo v1.10.1 // indirect
|
||||
github.com/onsi/gomega v1.7.0 // indirect
|
||||
github.com/prometheus/client_golang v1.5.1 // indirect
|
||||
github.com/prometheus/common v0.9.1
|
||||
github.com/prometheus/prometheus v2.5.0+incompatible
|
||||
github.com/prometheus/tsdb v0.10.0
|
||||
github.com/spf13/afero v1.2.2 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/willf/bitset v1.1.10 // indirect
|
||||
golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413 // indirect
|
||||
golang.org/x/net v0.0.0-20200301022130-244492dfa37a // indirect
|
||||
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d // indirect
|
||||
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527 // indirect
|
||||
golang.org/x/tools v0.0.0-20200305205014-bc073721adb6 // indirect
|
||||
google.golang.org/api v0.20.0 // indirect
|
||||
google.golang.org/genproto v0.0.0-20200305110556-506484158171 // indirect
|
||||
google.golang.org/grpc v1.27.1 // indirect
|
||||
gopkg.in/yaml.v2 v2.2.8 // indirect
|
||||
)
|
||||
|
||||
replace github.com/influxdata/influxdb/v2 => ../../../../
|
||||
|
|
|
@ -1,8 +1,6 @@
|
|||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
|
||||
cloud.google.com/go v0.43.0 h1:banaiRPAM8kUVYneOSkhgcDsLzEvL25FinuiSZaH/2w=
|
||||
cloud.google.com/go v0.43.0/go.mod h1:BOSR3VbTLkk6FDC/TcffxP4NF/FFBGA5ku+jvKOP7pg=
|
||||
cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
|
||||
cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
|
||||
cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
|
||||
|
@ -113,6 +111,8 @@ github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6r
|
|||
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
|
||||
github.com/boltdb/bolt v1.3.1 h1:JQmyP4ZBrce+ZQu0dY660FMfatumYDLun9hBCUVIkF4=
|
||||
github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps=
|
||||
github.com/bonitoo-io/go-sql-bigquery v0.3.4-1.4.0 h1:MaVh0h9+KaMnJcoDvvIGp+O3fefdWm+8MBUX6ELTJTM=
|
||||
github.com/bonitoo-io/go-sql-bigquery v0.3.4-1.4.0/go.mod h1:J4Y6YJm0qTWB9aFziB7cPeSyc6dOZFyJdteSeybVpXQ=
|
||||
github.com/bouk/httprouter v0.0.0-20160817010721-ee8b3818a7f5 h1:kS0dw4K730x7cxT+bVyTyYJZHuSoH7ofSr/Ijit56Qw=
|
||||
github.com/bouk/httprouter v0.0.0-20160817010721-ee8b3818a7f5/go.mod h1:CDReaxg1cmLrtcasZy43l4EYPAknXLiQSrb7tLw5zXM=
|
||||
github.com/buger/jsonparser v0.0.0-20191004114745-ee4c978eae7e h1:oJCXMss/3rg5F6Poy9wG3JQusc58Mzk5B9Z6wSnssNE=
|
||||
|
@ -218,6 +218,7 @@ github.com/go-chi/chi v4.1.0+incompatible h1:ETj3cggsVIY2Xao5ExCu6YhEh5MD6JTfcBz
|
|||
github.com/go-chi/chi v4.1.0+incompatible/go.mod h1:eB3wogJHnLi3x/kFX2A+IbTBlXxmMeXJVKy9tTv1XzQ=
|
||||
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
|
||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-kit/kit v0.9.0 h1:wDJmvq38kDhkVxi50ni9ykkdUr1PKgqKOoi01fa0Mdk=
|
||||
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
|
@ -235,6 +236,8 @@ github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LB
|
|||
github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/go-test/deep v1.0.1/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA=
|
||||
github.com/gofrs/uuid v3.3.0+incompatible h1:8K4tyRfvU1CYPgJsveYFQMhpFd/wXNM7iK6rR7UHz84=
|
||||
github.com/gofrs/uuid v3.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
|
||||
github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s=
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
|
@ -389,8 +392,8 @@ github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NH
|
|||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||
github.com/influxdata/cron v0.0.0-20191203200038-ded12750aac6 h1:OtjKkeWDjUbyMi82C7XXy7Tvm2LXMwiBBXyFIGNPaGA=
|
||||
github.com/influxdata/cron v0.0.0-20191203200038-ded12750aac6/go.mod h1:XabtPPW2qsCg0tl+kjaPU+cFS+CjQXEXbT1VJvHT4og=
|
||||
github.com/influxdata/flux v0.82.2 h1:VtoF8pbyoS+3QLQQmihSmV0Ly6g/A73x+3VBUp9t15g=
|
||||
github.com/influxdata/flux v0.82.2/go.mod h1:sAAIEgQTlTpsXCUQ49ymoRsKqraPzIb7F3paT72/lE0=
|
||||
github.com/influxdata/flux v0.83.1 h1:KdJ19S2bj0jZvhICdS8d54BHYCJNuq9h3A/HkIKOD6o=
|
||||
github.com/influxdata/flux v0.83.1/go.mod h1:+6FzHdZdwYjEIa2iuQEJ92x+C2A8X1jI0qdpVT0DJfM=
|
||||
github.com/influxdata/httprouter v1.3.1-0.20191122104820-ee83e2772f69 h1:WQsmW0fXO4ZE/lFGIE84G6rIV5SJN3P3sjIXAP1a8eU=
|
||||
github.com/influxdata/httprouter v1.3.1-0.20191122104820-ee83e2772f69/go.mod h1:pwymjR6SrP3gD3pRj9RJwdl1j5s3doEEV8gS4X9qSzA=
|
||||
github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo=
|
||||
|
@ -729,6 +732,8 @@ github.com/willf/bitset v1.1.9/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPyS
|
|||
github.com/willf/bitset v1.1.10 h1:NotGKqX0KwQ72NUzqrjZq5ipPNDQex9lo3WpaS8L2sc=
|
||||
github.com/willf/bitset v1.1.10/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
|
||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
||||
github.com/xlab/treeprint v1.0.0 h1:J0TkWtiuYgtdlrkkrDLISYBQ92M+X5m4LrIIMKrbDTs=
|
||||
github.com/xlab/treeprint v1.0.0/go.mod h1:IoImgRak9i3zJyuxOKUP1v4UZd1tMoKkq/Cimt1uhCg=
|
||||
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
|
||||
github.com/yudai/gojsondiff v1.0.0 h1:27cbfqXLVEJ1o8I6v3y9lg8Ydm53EKqHXAOMxEGlCOA=
|
||||
github.com/yudai/gojsondiff v1.0.0/go.mod h1:AY32+k2cwILAkW1fbgxQ5mUmMiZFgLIV+FBNExI05xg=
|
||||
|
@ -736,6 +741,7 @@ github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82 h1:BHyfKlQyqbsFN5p3Ifn
|
|||
github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82/go.mod h1:lgjkn3NuSvDfVJdfcVVdX+jpBxNmX4rDAzaS45IcYoM=
|
||||
github.com/yudai/pp v2.0.1+incompatible h1:Q4//iY4pNF6yPLZIigmvcl7k/bPgrcTPIFIcmawg5bI=
|
||||
github.com/yudai/pp v2.0.1+incompatible/go.mod h1:PuxR/8QJ7cyCkFp/aUDS+JY727OFEZkTdatxwunjIkc=
|
||||
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||
go.etcd.io/bbolt v1.3.3 h1:MUGmc65QhB3pIlaQ5bB4LwqSj6GIonVJXpZiaKNyaKk=
|
||||
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||
|
@ -792,8 +798,8 @@ golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8U
|
|||
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37 h1:cg5LA/zNPRzIXIWSCxQW10Rvpy94aQh3LT/ShoCpkHw=
|
||||
golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
|
@ -806,6 +812,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0
|
|||
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd h1:zkO/Lhoka23X63N9OSzpSeROEUQ5ODw47tM3YWjygbs=
|
||||
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
|
||||
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6 h1:QE6XYQK6naiK1EPAe1g/ILLxN5RBoH5xkJk3CqlMI/Y=
|
||||
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
|
||||
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
||||
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
|
@ -826,8 +834,11 @@ golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCc
|
|||
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
|
||||
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
|
||||
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/mod v0.2.0 h1:KU7oHjnv3XNWfa5COkzUifxZmxp1TyI7ImMXqFxLwvQ=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
|
@ -854,8 +865,8 @@ golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLL
|
|||
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200301022130-244492dfa37a h1:GuSPYbZzB5/dcLNCwLQLsg3obCJtX9IJhpXkvY7kzk0=
|
||||
golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200625001655-4c5254603344 h1:vGXIOMxbNfDTk/aXCmfdLgkrSV+Z2tcbze+pEc3v5W4=
|
||||
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0=
|
||||
|
@ -872,6 +883,8 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEha
|
|||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e h1:vcxGaoTs7kV8m5Np9uUNQin4BrLOthgV7252N8V+FwY=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208 h1:qwRHBd0NqMbJxfbotnDhm2ByMI1Shq4Y6oRJo21SGJA=
|
||||
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
|
@ -899,6 +912,7 @@ golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7w
|
|||
golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
|
@ -909,8 +923,8 @@ golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7w
|
|||
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4 h1:sfkvUWPNGwSV+8/fNqctR5lS2AqCSqYwXdrjCxp/dXo=
|
||||
golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527 h1:uYVVQ9WP/Ds2ROhcaGPeIdVq0RIXVLwsHlnvJ+cT1So=
|
||||
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd h1:xhmwyvizuTgC2qz7ZlMluP20uW+C3Rm0FD/WLDX8884=
|
||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
|
@ -962,8 +976,8 @@ golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapK
|
|||
golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200304024140-c4206d458c3f/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
|
||||
golang.org/x/tools v0.0.0-20200305205014-bc073721adb6 h1:V/kH9fbTtfqZLJU7djyPh+n4yWxBZVU6H5npu6UeY54=
|
||||
golang.org/x/tools v0.0.0-20200305205014-bc073721adb6/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
|
||||
golang.org/x/tools v0.0.0-20200721032237-77f530d86f9a h1:kVMPw4f6EVqYdfGQTedjrpw1dbE2PEMfw4jwXsNdn9s=
|
||||
golang.org/x/tools v0.0.0-20200721032237-77f530d86f9a/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
|
@ -1000,8 +1014,6 @@ google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRn
|
|||
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s=
|
||||
google.golang.org/genproto v0.0.0-20190716160619-c506a9f90610 h1:Ygq9/SRJX9+dU0WCIICM8RkWvDw03lvB77hrhJnpxfU=
|
||||
google.golang.org/genproto v0.0.0-20190716160619-c506a9f90610/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
|
||||
|
|
|
@ -364,8 +364,10 @@ func TestBuild(t *testing.T) {
|
|||
want: &flux.Spec{
|
||||
Operations: []*flux.Operation{
|
||||
{
|
||||
ID: flux.OperationID("from"),
|
||||
Spec: &influxdb.FromOpSpec{Bucket: "prometheus"},
|
||||
ID: flux.OperationID("from"),
|
||||
Spec: &influxdb.FromOpSpec{
|
||||
Bucket: influxdb.NameOrID{Name: "prometheus"},
|
||||
},
|
||||
},
|
||||
{
|
||||
ID: "where",
|
||||
|
@ -373,51 +375,55 @@ func TestBuild(t *testing.T) {
|
|||
Fn: interpreter.ResolvedFunction{
|
||||
Scope: nil,
|
||||
Fn: &semantic.FunctionExpression{
|
||||
Block: &semantic.FunctionBlock{
|
||||
Parameters: &semantic.FunctionParameters{
|
||||
List: []*semantic.FunctionParameter{{Key: &semantic.Identifier{Name: "r"}}},
|
||||
},
|
||||
Body: &semantic.LogicalExpression{
|
||||
Operator: ast.AndOperator,
|
||||
Left: &semantic.LogicalExpression{
|
||||
Operator: ast.AndOperator,
|
||||
Left: &semantic.BinaryExpression{
|
||||
Operator: ast.EqualOperator,
|
||||
Left: &semantic.MemberExpression{
|
||||
Object: &semantic.IdentifierExpression{
|
||||
Name: "r",
|
||||
Parameters: &semantic.FunctionParameters{
|
||||
List: []*semantic.FunctionParameter{{Key: &semantic.Identifier{Name: "r"}}},
|
||||
},
|
||||
Block: &semantic.Block{
|
||||
Body: []semantic.Statement{
|
||||
&semantic.ReturnStatement{
|
||||
Argument: &semantic.LogicalExpression{
|
||||
Operator: ast.AndOperator,
|
||||
Left: &semantic.LogicalExpression{
|
||||
Operator: ast.AndOperator,
|
||||
Left: &semantic.BinaryExpression{
|
||||
Operator: ast.EqualOperator,
|
||||
Left: &semantic.MemberExpression{
|
||||
Object: &semantic.IdentifierExpression{
|
||||
Name: "r",
|
||||
},
|
||||
Property: "_metric",
|
||||
},
|
||||
Right: &semantic.StringLiteral{
|
||||
Value: "node_cpu",
|
||||
},
|
||||
},
|
||||
Property: "_metric",
|
||||
},
|
||||
Right: &semantic.StringLiteral{
|
||||
Value: "node_cpu",
|
||||
},
|
||||
},
|
||||
Right: &semantic.BinaryExpression{
|
||||
Operator: ast.EqualOperator,
|
||||
Left: &semantic.MemberExpression{
|
||||
Object: &semantic.IdentifierExpression{
|
||||
Name: "r",
|
||||
Right: &semantic.BinaryExpression{
|
||||
Operator: ast.EqualOperator,
|
||||
Left: &semantic.MemberExpression{
|
||||
Object: &semantic.IdentifierExpression{
|
||||
Name: "r",
|
||||
},
|
||||
Property: "mode",
|
||||
},
|
||||
Right: &semantic.StringLiteral{
|
||||
Value: "user",
|
||||
},
|
||||
},
|
||||
Property: "mode",
|
||||
},
|
||||
Right: &semantic.StringLiteral{
|
||||
Value: "user",
|
||||
Right: &semantic.BinaryExpression{
|
||||
Operator: ast.EqualOperator,
|
||||
Left: &semantic.MemberExpression{
|
||||
Object: &semantic.IdentifierExpression{
|
||||
Name: "r",
|
||||
},
|
||||
Property: "cpu",
|
||||
},
|
||||
Right: &semantic.StringLiteral{
|
||||
Value: "cpu2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Right: &semantic.BinaryExpression{
|
||||
Operator: ast.EqualOperator,
|
||||
Left: &semantic.MemberExpression{
|
||||
Object: &semantic.IdentifierExpression{
|
||||
Name: "r",
|
||||
},
|
||||
Property: "cpu",
|
||||
},
|
||||
Right: &semantic.StringLiteral{
|
||||
Value: "cpu2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -446,8 +452,10 @@ func TestBuild(t *testing.T) {
|
|||
want: &flux.Spec{
|
||||
Operations: []*flux.Operation{
|
||||
{
|
||||
ID: flux.OperationID("from"),
|
||||
Spec: &influxdb.FromOpSpec{Bucket: "prometheus"},
|
||||
ID: flux.OperationID("from"),
|
||||
Spec: &influxdb.FromOpSpec{
|
||||
Bucket: influxdb.NameOrID{Name: "prometheus"},
|
||||
},
|
||||
},
|
||||
{
|
||||
ID: flux.OperationID("range"),
|
||||
|
@ -461,34 +469,38 @@ func TestBuild(t *testing.T) {
|
|||
Fn: interpreter.ResolvedFunction{
|
||||
Scope: nil,
|
||||
Fn: &semantic.FunctionExpression{
|
||||
Block: &semantic.FunctionBlock{
|
||||
Parameters: &semantic.FunctionParameters{
|
||||
List: []*semantic.FunctionParameter{{Key: &semantic.Identifier{Name: "r"}}},
|
||||
},
|
||||
Body: &semantic.LogicalExpression{
|
||||
Operator: ast.AndOperator,
|
||||
Left: &semantic.BinaryExpression{
|
||||
Operator: ast.EqualOperator,
|
||||
Left: &semantic.MemberExpression{
|
||||
Object: &semantic.IdentifierExpression{
|
||||
Name: "r",
|
||||
Parameters: &semantic.FunctionParameters{
|
||||
List: []*semantic.FunctionParameter{{Key: &semantic.Identifier{Name: "r"}}},
|
||||
},
|
||||
Block: &semantic.Block{
|
||||
Body: []semantic.Statement{
|
||||
&semantic.ReturnStatement{
|
||||
Argument: &semantic.LogicalExpression{
|
||||
Operator: ast.AndOperator,
|
||||
Left: &semantic.BinaryExpression{
|
||||
Operator: ast.EqualOperator,
|
||||
Left: &semantic.MemberExpression{
|
||||
Object: &semantic.IdentifierExpression{
|
||||
Name: "r",
|
||||
},
|
||||
Property: "_metric",
|
||||
},
|
||||
Right: &semantic.StringLiteral{
|
||||
Value: "node_cpu",
|
||||
},
|
||||
},
|
||||
Property: "_metric",
|
||||
},
|
||||
Right: &semantic.StringLiteral{
|
||||
Value: "node_cpu",
|
||||
},
|
||||
},
|
||||
Right: &semantic.BinaryExpression{
|
||||
Operator: ast.EqualOperator,
|
||||
Left: &semantic.MemberExpression{
|
||||
Object: &semantic.IdentifierExpression{
|
||||
Name: "r",
|
||||
Right: &semantic.BinaryExpression{
|
||||
Operator: ast.EqualOperator,
|
||||
Left: &semantic.MemberExpression{
|
||||
Object: &semantic.IdentifierExpression{
|
||||
Name: "r",
|
||||
},
|
||||
Property: "mode",
|
||||
},
|
||||
Right: &semantic.StringLiteral{
|
||||
Value: "user",
|
||||
},
|
||||
},
|
||||
Property: "mode",
|
||||
},
|
||||
Right: &semantic.StringLiteral{
|
||||
Value: "user",
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -517,8 +529,10 @@ func TestBuild(t *testing.T) {
|
|||
want: &flux.Spec{
|
||||
Operations: []*flux.Operation{
|
||||
{
|
||||
ID: flux.OperationID("from"),
|
||||
Spec: &influxdb.FromOpSpec{Bucket: "prometheus"},
|
||||
ID: flux.OperationID("from"),
|
||||
Spec: &influxdb.FromOpSpec{
|
||||
Bucket: influxdb.NameOrID{Name: "prometheus"},
|
||||
},
|
||||
},
|
||||
{
|
||||
ID: flux.OperationID("range"),
|
||||
|
@ -532,34 +546,38 @@ func TestBuild(t *testing.T) {
|
|||
Fn: interpreter.ResolvedFunction{
|
||||
Scope: nil,
|
||||
Fn: &semantic.FunctionExpression{
|
||||
Block: &semantic.FunctionBlock{
|
||||
Parameters: &semantic.FunctionParameters{
|
||||
List: []*semantic.FunctionParameter{{Key: &semantic.Identifier{Name: "r"}}},
|
||||
},
|
||||
Body: &semantic.LogicalExpression{
|
||||
Operator: ast.AndOperator,
|
||||
Left: &semantic.BinaryExpression{
|
||||
Operator: ast.EqualOperator,
|
||||
Left: &semantic.MemberExpression{
|
||||
Object: &semantic.IdentifierExpression{
|
||||
Name: "r",
|
||||
Parameters: &semantic.FunctionParameters{
|
||||
List: []*semantic.FunctionParameter{{Key: &semantic.Identifier{Name: "r"}}},
|
||||
},
|
||||
Block: &semantic.Block{
|
||||
Body: []semantic.Statement{
|
||||
&semantic.ReturnStatement{
|
||||
Argument: &semantic.LogicalExpression{
|
||||
Operator: ast.AndOperator,
|
||||
Left: &semantic.BinaryExpression{
|
||||
Operator: ast.EqualOperator,
|
||||
Left: &semantic.MemberExpression{
|
||||
Object: &semantic.IdentifierExpression{
|
||||
Name: "r",
|
||||
},
|
||||
Property: "_metric",
|
||||
},
|
||||
Right: &semantic.StringLiteral{
|
||||
Value: "node_cpu",
|
||||
},
|
||||
},
|
||||
Property: "_metric",
|
||||
},
|
||||
Right: &semantic.StringLiteral{
|
||||
Value: "node_cpu",
|
||||
},
|
||||
},
|
||||
Right: &semantic.BinaryExpression{
|
||||
Operator: ast.EqualOperator,
|
||||
Left: &semantic.MemberExpression{
|
||||
Object: &semantic.IdentifierExpression{
|
||||
Name: "r",
|
||||
Right: &semantic.BinaryExpression{
|
||||
Operator: ast.EqualOperator,
|
||||
Left: &semantic.MemberExpression{
|
||||
Object: &semantic.IdentifierExpression{
|
||||
Name: "r",
|
||||
},
|
||||
Property: "_measurement",
|
||||
},
|
||||
Right: &semantic.StringLiteral{
|
||||
Value: "m0",
|
||||
},
|
||||
},
|
||||
Property: "_measurement",
|
||||
},
|
||||
Right: &semantic.StringLiteral{
|
||||
Value: "m0",
|
||||
},
|
||||
},
|
||||
},
|
||||
|
|
|
@ -148,7 +148,7 @@ func (s *Selector) QuerySpec() (*flux.Spec, error) {
|
|||
{
|
||||
ID: "from", // TODO: Change this to a UUID
|
||||
Spec: &influxdb.FromOpSpec{
|
||||
Bucket: "prometheus",
|
||||
Bucket: influxdb.NameOrID{Name: "prometheus"},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
@ -260,11 +260,15 @@ func NewWhereOperation(metricName string, labels []*LabelMatcher) (*flux.Operati
|
|||
Fn: interpreter.ResolvedFunction{
|
||||
Scope: nil,
|
||||
Fn: &semantic.FunctionExpression{
|
||||
Block: &semantic.FunctionBlock{
|
||||
Parameters: &semantic.FunctionParameters{
|
||||
List: []*semantic.FunctionParameter{{Key: &semantic.Identifier{Name: "r"}}},
|
||||
Parameters: &semantic.FunctionParameters{
|
||||
List: []*semantic.FunctionParameter{{Key: &semantic.Identifier{Name: "r"}}},
|
||||
},
|
||||
Block: &semantic.Block{
|
||||
Body: []semantic.Statement{
|
||||
&semantic.ReturnStatement{
|
||||
Argument: node,
|
||||
},
|
||||
},
|
||||
Body: node,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
|
|
@ -1,62 +0,0 @@
|
|||
package querytest
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/influxdata/flux"
|
||||
platform "github.com/influxdata/influxdb/v2"
|
||||
"github.com/influxdata/influxdb/v2/query"
|
||||
)
|
||||
|
||||
type BucketsAccessedTestCase struct {
|
||||
Name string
|
||||
Raw string
|
||||
WantErr bool
|
||||
WantReadBuckets *[]platform.BucketFilter
|
||||
WantWriteBuckets *[]platform.BucketFilter
|
||||
}
|
||||
|
||||
func BucketsAccessedTestHelper(t *testing.T, tc BucketsAccessedTestCase) {
|
||||
t.Helper()
|
||||
|
||||
ast, err := flux.Parse(tc.Raw)
|
||||
if err != nil {
|
||||
t.Fatalf("could not parse flux: %v", err)
|
||||
}
|
||||
|
||||
var gotReadBuckets, gotWriteBuckets []platform.BucketFilter
|
||||
if tc.WantReadBuckets != nil || tc.WantWriteBuckets != nil {
|
||||
gotReadBuckets, gotWriteBuckets, err = query.BucketsAccessed(ast, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
if tc.WantReadBuckets != nil {
|
||||
if diagnostic := verifyBuckets(*tc.WantReadBuckets, gotReadBuckets); diagnostic != "" {
|
||||
t.Errorf("Could not verify read buckets: %v", diagnostic)
|
||||
}
|
||||
}
|
||||
|
||||
if tc.WantWriteBuckets != nil {
|
||||
if diagnostic := verifyBuckets(*tc.WantWriteBuckets, gotWriteBuckets); diagnostic != "" {
|
||||
t.Errorf("Could not verify write buckets: %v", diagnostic)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func verifyBuckets(wantBuckets, gotBuckets []platform.BucketFilter) string {
|
||||
if len(wantBuckets) != len(gotBuckets) {
|
||||
return fmt.Sprintf("Expected %v buckets but got %v", len(wantBuckets), len(gotBuckets))
|
||||
}
|
||||
|
||||
for i, wantBucket := range wantBuckets {
|
||||
if diagnostic := cmp.Diff(wantBucket, gotBuckets[i]); diagnostic != "" {
|
||||
return fmt.Sprintf("Bucket mismatch: -want/+got:\n%v", diagnostic)
|
||||
}
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
|
@ -1,10 +1,12 @@
|
|||
package querytest
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/influxdata/flux/plan"
|
||||
"github.com/influxdata/flux/stdlib/influxdata/influxdb"
|
||||
v1 "github.com/influxdata/flux/stdlib/influxdata/influxdb/v1"
|
||||
"github.com/influxdata/influxdb/v2/query/influxql"
|
||||
"github.com/influxdata/influxdb/v2/query/stdlib/influxdata/influxdb"
|
||||
)
|
||||
|
||||
// MakeFromInfluxJSONCompiler returns a compiler that replaces all From operations with FromJSON.
|
||||
|
@ -24,7 +26,7 @@ func (ReplaceFromRule) Pattern() plan.Pattern {
|
|||
return plan.Pat(influxdb.FromKind)
|
||||
}
|
||||
|
||||
func (r ReplaceFromRule) Rewrite(n plan.Node) (plan.Node, bool, error) {
|
||||
func (r ReplaceFromRule) Rewrite(ctx context.Context, n plan.Node) (plan.Node, bool, error) {
|
||||
if err := n.ReplaceSpec(&v1.FromInfluxJSONProcedureSpec{
|
||||
File: r.Filename,
|
||||
}); err != nil {
|
||||
|
|
|
@ -21,7 +21,7 @@ type compilerA struct {
|
|||
A string `json:"a"`
|
||||
}
|
||||
|
||||
func (c compilerA) Compile(ctx context.Context) (flux.Program, error) {
|
||||
func (c compilerA) Compile(ctx context.Context, runtime flux.Runtime) (flux.Program, error) {
|
||||
panic("not implemented")
|
||||
}
|
||||
|
||||
|
|
|
@ -1,49 +0,0 @@
|
|||
package query
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/influxdata/flux"
|
||||
"github.com/influxdata/flux/ast"
|
||||
"github.com/influxdata/flux/lang"
|
||||
platform "github.com/influxdata/influxdb/v2"
|
||||
)
|
||||
|
||||
// BucketAwareOperationSpec specifies an operation that reads or writes buckets
|
||||
type BucketAwareOperationSpec interface {
|
||||
BucketsAccessed(orgID *platform.ID) (readBuckets, writeBuckets []platform.BucketFilter)
|
||||
}
|
||||
|
||||
type constantSecretService struct{}
|
||||
|
||||
func (s constantSecretService) LoadSecret(ctx context.Context, k string) (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func newDeps() flux.Dependencies {
|
||||
deps := flux.NewDefaultDependencies()
|
||||
deps.Deps.HTTPClient = nil
|
||||
deps.Deps.URLValidator = nil
|
||||
deps.Deps.SecretService = constantSecretService{}
|
||||
return deps
|
||||
}
|
||||
|
||||
// BucketsAccessed returns the set of buckets read and written by a query spec
|
||||
func BucketsAccessed(ast *ast.Package, orgID *platform.ID) (readBuckets, writeBuckets []platform.BucketFilter, err error) {
|
||||
ctx := newDeps().Inject(context.Background())
|
||||
err = lang.WalkIR(ctx, ast, func(o *flux.Operation) error {
|
||||
bucketAwareOpSpec, ok := o.Spec.(BucketAwareOperationSpec)
|
||||
if ok {
|
||||
opBucketsRead, opBucketsWritten := bucketAwareOpSpec.BucketsAccessed(orgID)
|
||||
readBuckets = append(readBuckets, opBucketsRead...)
|
||||
writeBuckets = append(writeBuckets, opBucketsWritten...)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return readBuckets, writeBuckets, nil
|
||||
}
|
|
@ -9,6 +9,7 @@ import (
|
|||
"github.com/influxdata/flux/codes"
|
||||
"github.com/influxdata/flux/execute"
|
||||
"github.com/influxdata/flux/plan"
|
||||
"github.com/influxdata/flux/runtime"
|
||||
"github.com/influxdata/flux/semantic"
|
||||
"github.com/influxdata/flux/stdlib/experimental"
|
||||
platform "github.com/influxdata/influxdb/v2"
|
||||
|
@ -32,19 +33,8 @@ type ToOpSpec struct {
|
|||
}
|
||||
|
||||
func init() {
|
||||
toSignature := flux.FunctionSignature(
|
||||
map[string]semantic.PolyType{
|
||||
"bucket": semantic.String,
|
||||
"bucketID": semantic.String,
|
||||
"org": semantic.String,
|
||||
"orgID": semantic.String,
|
||||
"host": semantic.String,
|
||||
"token": semantic.String,
|
||||
},
|
||||
[]string{},
|
||||
)
|
||||
|
||||
flux.ReplacePackageValue("experimental", "to", flux.FunctionValueWithSideEffect("to", createToOpSpec, toSignature))
|
||||
toSignature := runtime.MustLookupBuiltinType("experimental", "to")
|
||||
runtime.ReplacePackageValue("experimental", "to", flux.MustValue(flux.FunctionValueWithSideEffect("to", createToOpSpec, toSignature)))
|
||||
flux.RegisterOpSpec(ExperimentalToKind, func() flux.OperationSpec { return &ToOpSpec{} })
|
||||
plan.RegisterProcedureSpecWithSideEffect(ExperimentalToKind, newToProcedure, ExperimentalToKind)
|
||||
execute.RegisterTransformation(ExperimentalToKind, createToTransformation)
|
||||
|
@ -185,9 +175,7 @@ func createToTransformation(id execute.DatasetID, mode execute.AccumulationMode,
|
|||
// ToTransformation is the transformation for the `to` flux function.
|
||||
type ToTransformation struct {
|
||||
ctx context.Context
|
||||
bucket string
|
||||
bucketID platform.ID
|
||||
org string
|
||||
orgID platform.ID
|
||||
d execute.Dataset
|
||||
cache execute.TableBuilderCache
|
||||
|
@ -206,7 +194,6 @@ func NewToTransformation(ctx context.Context, d execute.Dataset, cache execute.T
|
|||
var err error
|
||||
|
||||
var orgID platform.ID
|
||||
var org string
|
||||
// Get organization name and ID
|
||||
if spec.Spec.Org != "" {
|
||||
oID, ok := deps.OrganizationLookup.Lookup(ctx, spec.Spec.Org)
|
||||
|
@ -214,7 +201,6 @@ func NewToTransformation(ctx context.Context, d execute.Dataset, cache execute.T
|
|||
return nil, fmt.Errorf("failed to look up organization %q", spec.Spec.Org)
|
||||
}
|
||||
orgID = oID
|
||||
org = spec.Spec.Org
|
||||
} else if spec.Spec.OrgID != "" {
|
||||
if oid, err := platform.IDFromString(spec.Spec.OrgID); err != nil {
|
||||
return nil, err
|
||||
|
@ -229,15 +215,8 @@ func NewToTransformation(ctx context.Context, d execute.Dataset, cache execute.T
|
|||
}
|
||||
orgID = req.OrganizationID
|
||||
}
|
||||
if org == "" {
|
||||
org = deps.OrganizationLookup.LookupName(ctx, orgID)
|
||||
if org == "" {
|
||||
return nil, fmt.Errorf("failed to look up organization name for ID %q", orgID.String())
|
||||
}
|
||||
}
|
||||
|
||||
var bucketID *platform.ID
|
||||
var bucket string
|
||||
// Get bucket name and ID
|
||||
// User will have specified exactly one in the ToOpSpec.
|
||||
if spec.Spec.Bucket != "" {
|
||||
|
@ -246,21 +225,14 @@ func NewToTransformation(ctx context.Context, d execute.Dataset, cache execute.T
|
|||
return nil, fmt.Errorf("failed to look up bucket %q in org %q", spec.Spec.Bucket, spec.Spec.Org)
|
||||
}
|
||||
bucketID = &bID
|
||||
bucket = spec.Spec.Bucket
|
||||
} else {
|
||||
if bucketID, err = platform.IDFromString(spec.Spec.BucketID); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
bucket = deps.BucketLookup.LookupName(ctx, orgID, *bucketID)
|
||||
if bucket == "" {
|
||||
return nil, fmt.Errorf("failed to look up bucket with ID %q in org %q", bucketID, org)
|
||||
}
|
||||
}
|
||||
return &ToTransformation{
|
||||
ctx: ctx,
|
||||
bucket: bucket,
|
||||
bucketID: *bucketID,
|
||||
org: org,
|
||||
orgID: orgID,
|
||||
d: d,
|
||||
cache: cache,
|
||||
|
@ -313,6 +285,8 @@ type TablePointsMetadata struct {
|
|||
MeasurementName string
|
||||
// The tags in the table (final element is left as nil, to be replaced by field name)
|
||||
Tags [][]byte
|
||||
// The offset in tags where to store the field name
|
||||
FieldKeyTagValueOffset int
|
||||
// The column offset in the input table where the _time column is stored
|
||||
TimestampOffset int
|
||||
// The labels and offsets of all the fields in the table
|
||||
|
@ -428,15 +402,15 @@ func (t *ToTransformation) writeTable(ctx context.Context, tbl flux.Table) error
|
|||
}
|
||||
|
||||
switch fieldVal.Type() {
|
||||
case semantic.Float:
|
||||
case semantic.BasicFloat:
|
||||
fields[lao.Label] = fieldVal.Float()
|
||||
case semantic.Int:
|
||||
case semantic.BasicInt:
|
||||
fields[lao.Label] = fieldVal.Int()
|
||||
case semantic.UInt:
|
||||
case semantic.BasicUint:
|
||||
fields[lao.Label] = fieldVal.UInt()
|
||||
case semantic.String:
|
||||
case semantic.BasicString:
|
||||
fields[lao.Label] = fieldVal.Str()
|
||||
case semantic.Bool:
|
||||
case semantic.BasicBool:
|
||||
fields[lao.Label] = fieldVal.Bool()
|
||||
default:
|
||||
return fmt.Errorf("unsupported field type %v", fieldVal.Type())
|
||||
|
|
|
@ -3,7 +3,6 @@ package experimental_test
|
|||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
|
@ -17,7 +16,6 @@ import (
|
|||
"github.com/influxdata/influxdb/v2/mock"
|
||||
"github.com/influxdata/influxdb/v2/models"
|
||||
_ "github.com/influxdata/influxdb/v2/query/builtin"
|
||||
pquerytest "github.com/influxdata/influxdb/v2/query/querytest"
|
||||
"github.com/influxdata/influxdb/v2/query/stdlib/experimental"
|
||||
"github.com/influxdata/influxdb/v2/query/stdlib/influxdata/influxdb"
|
||||
)
|
||||
|
@ -35,9 +33,9 @@ from(bucket:"mydb")
|
|||
Want: &flux.Spec{
|
||||
Operations: []*flux.Operation{
|
||||
{
|
||||
ID: "influxDBFrom0",
|
||||
ID: "from0",
|
||||
Spec: &influxdb.FromOpSpec{
|
||||
Bucket: "mydb",
|
||||
Bucket: influxdb.NameOrID{Name: "mydb"},
|
||||
},
|
||||
},
|
||||
{
|
||||
|
@ -68,7 +66,7 @@ from(bucket:"mydb")
|
|||
},
|
||||
},
|
||||
Edges: []flux.Edge{
|
||||
{Parent: "influxDBFrom0", Child: "range1"},
|
||||
{Parent: "from0", Child: "range1"},
|
||||
{Parent: "range1", Child: "pivot2"},
|
||||
{Parent: "pivot2", Child: "experimental-to3"},
|
||||
},
|
||||
|
@ -84,53 +82,6 @@ from(bucket:"mydb")
|
|||
}
|
||||
}
|
||||
|
||||
func TestToOpSpec_BucketsAccessed(t *testing.T) {
|
||||
bucketName := "my_bucket"
|
||||
bucketIDString := "ddddccccbbbbaaaa"
|
||||
bucketID, err := platform.IDFromString(bucketIDString)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
orgName := "my_org"
|
||||
orgIDString := "aaaabbbbccccdddd"
|
||||
orgID, err := platform.IDFromString(orgIDString)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
tests := []pquerytest.BucketsAccessedTestCase{
|
||||
{
|
||||
Name: "from() with bucket and to with org and bucket",
|
||||
Raw: fmt.Sprintf(`import "experimental"
|
||||
from(bucket:"%s")
|
||||
|> experimental.to(bucket:"%s", org:"%s")`, bucketName, bucketName, orgName),
|
||||
WantReadBuckets: &[]platform.BucketFilter{{Name: &bucketName}},
|
||||
WantWriteBuckets: &[]platform.BucketFilter{{Name: &bucketName, Org: &orgName}},
|
||||
},
|
||||
{
|
||||
Name: "from() with bucket and to with orgID and bucket",
|
||||
Raw: fmt.Sprintf(`import "experimental"
|
||||
from(bucket:"%s") |> experimental.to(bucket:"%s", orgID:"%s")`, bucketName, bucketName, orgIDString),
|
||||
WantReadBuckets: &[]platform.BucketFilter{{Name: &bucketName}},
|
||||
WantWriteBuckets: &[]platform.BucketFilter{{Name: &bucketName, OrganizationID: orgID}},
|
||||
},
|
||||
{
|
||||
Name: "from() with bucket and to with orgID and bucketID",
|
||||
Raw: fmt.Sprintf(`import "experimental"
|
||||
from(bucket:"%s") |> experimental.to(bucketID:"%s", orgID:"%s")`, bucketName, bucketIDString, orgIDString),
|
||||
WantReadBuckets: &[]platform.BucketFilter{{Name: &bucketName}},
|
||||
WantWriteBuckets: &[]platform.BucketFilter{{ID: bucketID, OrganizationID: orgID}},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
tc := tc
|
||||
t.Run(tc.Name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
pquerytest.BucketsAccessedTestHelper(t, tc)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestTo_Process(t *testing.T) {
|
||||
oid, _ := mock.OrganizationLookup{}.Lookup(context.Background(), "my-org")
|
||||
bid, _ := mock.BucketLookup{}.Lookup(context.Background(), oid, "my-bucket")
|
||||
|
|
|
@ -15,8 +15,23 @@ import (
|
|||
"github.com/influxdata/influxdb/v2/query"
|
||||
)
|
||||
|
||||
const BucketsKind = "influxdata/influxdb.localBuckets"
|
||||
|
||||
func init() {
|
||||
execute.RegisterSource(influxdb.BucketsKind, createBucketsSource)
|
||||
execute.RegisterSource(BucketsKind, createBucketsSource)
|
||||
plan.RegisterPhysicalRules(LocalBucketsRule{})
|
||||
}
|
||||
|
||||
type LocalBucketsProcedureSpec struct {
|
||||
plan.DefaultCost
|
||||
}
|
||||
|
||||
func (s *LocalBucketsProcedureSpec) Kind() plan.ProcedureKind {
|
||||
return BucketsKind
|
||||
}
|
||||
|
||||
func (s *LocalBucketsProcedureSpec) Copy() plan.ProcedureSpec {
|
||||
return new(LocalBucketsProcedureSpec)
|
||||
}
|
||||
|
||||
type BucketsDecoder struct {
|
||||
|
@ -99,7 +114,7 @@ func (bd *BucketsDecoder) Close() error {
|
|||
}
|
||||
|
||||
func createBucketsSource(prSpec plan.ProcedureSpec, dsid execute.DatasetID, a execute.Administration) (execute.Source, error) {
|
||||
_, ok := prSpec.(*influxdb.BucketsProcedureSpec)
|
||||
_, ok := prSpec.(*LocalBucketsProcedureSpec)
|
||||
if !ok {
|
||||
return nil, &flux.Error{
|
||||
Code: codes.Internal,
|
||||
|
@ -128,3 +143,27 @@ type AllBucketLookup interface {
|
|||
FindAllBuckets(ctx context.Context, orgID platform.ID) ([]*platform.Bucket, int)
|
||||
}
|
||||
type BucketDependencies AllBucketLookup
|
||||
|
||||
type LocalBucketsRule struct{}
|
||||
|
||||
func (rule LocalBucketsRule) Name() string {
|
||||
return "influxdata/influxdb.LocalBucketsRule"
|
||||
}
|
||||
|
||||
func (rule LocalBucketsRule) Pattern() plan.Pattern {
|
||||
return plan.Pat(influxdb.BucketsKind)
|
||||
}
|
||||
|
||||
func (rule LocalBucketsRule) Rewrite(ctx context.Context, node plan.Node) (plan.Node, bool, error) {
|
||||
fromSpec := node.ProcedureSpec().(*influxdb.BucketsProcedureSpec)
|
||||
if fromSpec.Host != nil {
|
||||
return node, false, nil
|
||||
} else if fromSpec.Org != nil {
|
||||
return node, false, &flux.Error{
|
||||
Code: codes.Unimplemented,
|
||||
Msg: "buckets cannot list from a separate organization; please specify a host or remove the organization",
|
||||
}
|
||||
}
|
||||
|
||||
return plan.CreateLogicalNode("localBuckets", &LocalBucketsProcedureSpec{}), true, nil
|
||||
}
|
||||
|
|
|
@ -26,6 +26,9 @@ func (d StorageDependencies) Inject(ctx context.Context) context.Context {
|
|||
}
|
||||
|
||||
func GetStorageDependencies(ctx context.Context) StorageDependencies {
|
||||
if ctx.Value(dependenciesKey) == nil {
|
||||
return StorageDependencies{}
|
||||
}
|
||||
return ctx.Value(dependenciesKey).(StorageDependencies)
|
||||
}
|
||||
|
||||
|
@ -65,7 +68,7 @@ func (d Dependencies) PrometheusCollectors() []prometheus.Collector {
|
|||
}
|
||||
|
||||
func NewDependencies(
|
||||
reader Reader,
|
||||
reader query.StorageReader,
|
||||
writer storage.PointsWriter,
|
||||
bucketSvc influxdb.BucketService,
|
||||
orgSvc influxdb.OrganizationService,
|
||||
|
|
|
@ -6,131 +6,32 @@ import (
|
|||
"github.com/influxdata/flux"
|
||||
"github.com/influxdata/flux/codes"
|
||||
"github.com/influxdata/flux/plan"
|
||||
"github.com/influxdata/flux/semantic"
|
||||
"github.com/influxdata/flux/stdlib/influxdata/influxdb"
|
||||
platform "github.com/influxdata/influxdb/v2"
|
||||
)
|
||||
|
||||
const FromKind = "influxDBFrom"
|
||||
|
||||
type FromOpSpec struct {
|
||||
Bucket string `json:"bucket,omitempty"`
|
||||
BucketID string `json:"bucketID,omitempty"`
|
||||
type (
|
||||
NameOrID = influxdb.NameOrID
|
||||
FromOpSpec = influxdb.FromOpSpec
|
||||
)
|
||||
|
||||
type FromStorageProcedureSpec struct {
|
||||
Bucket influxdb.NameOrID
|
||||
}
|
||||
|
||||
func init() {
|
||||
fromSignature := semantic.FunctionPolySignature{
|
||||
Parameters: map[string]semantic.PolyType{
|
||||
"bucket": semantic.String,
|
||||
"bucketID": semantic.String,
|
||||
},
|
||||
Required: nil,
|
||||
Return: flux.TableObjectType,
|
||||
}
|
||||
|
||||
flux.ReplacePackageValue("influxdata/influxdb", influxdb.FromKind, flux.FunctionValue(FromKind, createFromOpSpec, fromSignature))
|
||||
flux.RegisterOpSpec(FromKind, newFromOp)
|
||||
plan.RegisterProcedureSpec(FromKind, newFromProcedure, FromKind)
|
||||
}
|
||||
|
||||
func createFromOpSpec(args flux.Arguments, a *flux.Administration) (flux.OperationSpec, error) {
|
||||
spec := new(FromOpSpec)
|
||||
|
||||
if bucket, ok, err := args.GetString("bucket"); err != nil {
|
||||
return nil, err
|
||||
} else if ok {
|
||||
spec.Bucket = bucket
|
||||
}
|
||||
|
||||
if bucketID, ok, err := args.GetString("bucketID"); err != nil {
|
||||
return nil, err
|
||||
} else if ok {
|
||||
spec.BucketID = bucketID
|
||||
}
|
||||
|
||||
if spec.Bucket == "" && spec.BucketID == "" {
|
||||
return nil, &flux.Error{
|
||||
Code: codes.Invalid,
|
||||
Msg: "must specify one of bucket or bucketID",
|
||||
}
|
||||
}
|
||||
if spec.Bucket != "" && spec.BucketID != "" {
|
||||
return nil, &flux.Error{
|
||||
Code: codes.Invalid,
|
||||
Msg: "must specify only one of bucket or bucketID",
|
||||
}
|
||||
}
|
||||
return spec, nil
|
||||
}
|
||||
|
||||
func newFromOp() flux.OperationSpec {
|
||||
return new(FromOpSpec)
|
||||
}
|
||||
|
||||
func (s *FromOpSpec) Kind() flux.OperationKind {
|
||||
func (s *FromStorageProcedureSpec) Kind() plan.ProcedureKind {
|
||||
return FromKind
|
||||
}
|
||||
|
||||
// BucketsAccessed makes FromOpSpec a query.BucketAwareOperationSpec
|
||||
func (s *FromOpSpec) BucketsAccessed(orgID *platform.ID) (readBuckets, writeBuckets []platform.BucketFilter) {
|
||||
bf := platform.BucketFilter{}
|
||||
if s.Bucket != "" {
|
||||
bf.Name = &s.Bucket
|
||||
}
|
||||
if orgID != nil {
|
||||
bf.OrganizationID = orgID
|
||||
}
|
||||
|
||||
if len(s.BucketID) > 0 {
|
||||
if id, err := platform.IDFromString(s.BucketID); err != nil {
|
||||
invalidID := platform.InvalidID()
|
||||
bf.ID = &invalidID
|
||||
} else {
|
||||
bf.ID = id
|
||||
}
|
||||
}
|
||||
|
||||
if bf.ID != nil || bf.Name != nil {
|
||||
readBuckets = append(readBuckets, bf)
|
||||
}
|
||||
return readBuckets, writeBuckets
|
||||
}
|
||||
|
||||
type FromProcedureSpec struct {
|
||||
Bucket string
|
||||
BucketID string
|
||||
}
|
||||
|
||||
func newFromProcedure(qs flux.OperationSpec, pa plan.Administration) (plan.ProcedureSpec, error) {
|
||||
spec, ok := qs.(*FromOpSpec)
|
||||
if !ok {
|
||||
return nil, &flux.Error{
|
||||
Code: codes.Internal,
|
||||
Msg: fmt.Sprintf("invalid spec type %T", qs),
|
||||
}
|
||||
}
|
||||
|
||||
return &FromProcedureSpec{
|
||||
Bucket: spec.Bucket,
|
||||
BucketID: spec.BucketID,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *FromProcedureSpec) Kind() plan.ProcedureKind {
|
||||
return FromKind
|
||||
}
|
||||
|
||||
func (s *FromProcedureSpec) Copy() plan.ProcedureSpec {
|
||||
ns := new(FromProcedureSpec)
|
||||
|
||||
func (s *FromStorageProcedureSpec) Copy() plan.ProcedureSpec {
|
||||
ns := new(FromStorageProcedureSpec)
|
||||
ns.Bucket = s.Bucket
|
||||
ns.BucketID = s.BucketID
|
||||
|
||||
return ns
|
||||
}
|
||||
|
||||
func (s *FromProcedureSpec) PostPhysicalValidate(id plan.NodeID) error {
|
||||
// FromProcedureSpec is a logical operation representing any read
|
||||
func (s *FromStorageProcedureSpec) PostPhysicalValidate(id plan.NodeID) error {
|
||||
// FromStorageProcedureSpec is a logical operation representing any read
|
||||
// from storage. However as a logical operation, it doesn't specify
|
||||
// how data is to be read from storage. It is the query planner's
|
||||
// job to determine the optimal read strategy and to convert this
|
||||
|
@ -142,10 +43,10 @@ func (s *FromProcedureSpec) PostPhysicalValidate(id plan.NodeID) error {
|
|||
// not support unbounded reads, and so this query must not be
|
||||
// validated.
|
||||
var bucket string
|
||||
if len(s.Bucket) > 0 {
|
||||
bucket = s.Bucket
|
||||
if s.Bucket.Name != "" {
|
||||
bucket = s.Bucket.Name
|
||||
} else {
|
||||
bucket = s.BucketID
|
||||
bucket = s.Bucket.ID
|
||||
}
|
||||
return &flux.Error{
|
||||
Code: codes.Invalid,
|
||||
|
|
|
@ -1,168 +1,23 @@
|
|||
package influxdb_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/flux"
|
||||
"github.com/influxdata/flux/execute"
|
||||
"github.com/influxdata/flux/plan"
|
||||
"github.com/influxdata/flux/plan/plantest"
|
||||
"github.com/influxdata/flux/querytest"
|
||||
"github.com/influxdata/flux/stdlib/influxdata/influxdb"
|
||||
"github.com/influxdata/flux/stdlib/universe"
|
||||
platform "github.com/influxdata/influxdb/v2"
|
||||
pquerytest "github.com/influxdata/influxdb/v2/query/querytest"
|
||||
"github.com/influxdata/influxdb/v2/query/stdlib/influxdata/influxdb"
|
||||
qinfluxdb "github.com/influxdata/influxdb/v2/query/stdlib/influxdata/influxdb"
|
||||
)
|
||||
|
||||
func TestFrom_NewQuery(t *testing.T) {
|
||||
t.Skip()
|
||||
tests := []querytest.NewQueryTestCase{
|
||||
{
|
||||
Name: "from no args",
|
||||
Raw: `from()`,
|
||||
WantErr: true,
|
||||
},
|
||||
{
|
||||
Name: "from conflicting args",
|
||||
Raw: `from(bucket:"d", bucket:"b")`,
|
||||
WantErr: true,
|
||||
},
|
||||
{
|
||||
Name: "from repeat arg",
|
||||
Raw: `from(bucket:"telegraf", bucket:"oops")`,
|
||||
WantErr: true,
|
||||
},
|
||||
{
|
||||
Name: "from",
|
||||
Raw: `from(bucket:"telegraf", chicken:"what is this?")`,
|
||||
WantErr: true,
|
||||
},
|
||||
{
|
||||
Name: "from bucket invalid ID",
|
||||
Raw: `from(bucketID:"invalid")`,
|
||||
WantErr: true,
|
||||
},
|
||||
{
|
||||
Name: "from bucket ID",
|
||||
Raw: `from(bucketID:"aaaabbbbccccdddd")`,
|
||||
Want: &flux.Spec{
|
||||
Operations: []*flux.Operation{
|
||||
{
|
||||
ID: "from0",
|
||||
Spec: &influxdb.FromOpSpec{
|
||||
BucketID: "aaaabbbbccccdddd",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "from with database",
|
||||
Raw: `from(bucket:"mybucket") |> range(start:-4h, stop:-2h) |> sum()`,
|
||||
Want: &flux.Spec{
|
||||
Operations: []*flux.Operation{
|
||||
{
|
||||
ID: "from0",
|
||||
Spec: &influxdb.FromOpSpec{
|
||||
Bucket: "mybucket",
|
||||
},
|
||||
},
|
||||
{
|
||||
ID: "range1",
|
||||
Spec: &universe.RangeOpSpec{
|
||||
Start: flux.Time{
|
||||
Relative: -4 * time.Hour,
|
||||
IsRelative: true,
|
||||
},
|
||||
Stop: flux.Time{
|
||||
Relative: -2 * time.Hour,
|
||||
IsRelative: true,
|
||||
},
|
||||
TimeColumn: "_time",
|
||||
StartColumn: "_start",
|
||||
StopColumn: "_stop",
|
||||
},
|
||||
},
|
||||
{
|
||||
ID: "sum2",
|
||||
Spec: &universe.SumOpSpec{
|
||||
AggregateConfig: execute.DefaultAggregateConfig,
|
||||
},
|
||||
},
|
||||
},
|
||||
Edges: []flux.Edge{
|
||||
{Parent: "from0", Child: "range1"},
|
||||
{Parent: "range1", Child: "sum2"},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tc := range tests {
|
||||
tc := tc
|
||||
t.Run(tc.Name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
querytest.NewQueryTestHelper(t, tc)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestFromOperation_Marshaling(t *testing.T) {
|
||||
t.Skip()
|
||||
data := []byte(`{"id":"from","kind":"from","spec":{"bucket":"mybucket"}}`)
|
||||
op := &flux.Operation{
|
||||
ID: "from",
|
||||
Spec: &influxdb.FromOpSpec{
|
||||
Bucket: "mybucket",
|
||||
},
|
||||
}
|
||||
querytest.OperationMarshalingTestHelper(t, data, op)
|
||||
}
|
||||
|
||||
func TestFromOpSpec_BucketsAccessed(t *testing.T) {
|
||||
bucketName := "my_bucket"
|
||||
bucketIDString := "aaaabbbbccccdddd"
|
||||
bucketID, err := platform.IDFromString(bucketIDString)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
invalidID := platform.InvalidID()
|
||||
tests := []pquerytest.BucketsAccessedTestCase{
|
||||
{
|
||||
Name: "From with bucket",
|
||||
Raw: fmt.Sprintf(`from(bucket:"%s")`, bucketName),
|
||||
WantReadBuckets: &[]platform.BucketFilter{{Name: &bucketName}},
|
||||
WantWriteBuckets: &[]platform.BucketFilter{},
|
||||
},
|
||||
{
|
||||
Name: "From with bucketID",
|
||||
Raw: fmt.Sprintf(`from(bucketID:"%s")`, bucketID),
|
||||
WantReadBuckets: &[]platform.BucketFilter{{ID: bucketID}},
|
||||
WantWriteBuckets: &[]platform.BucketFilter{},
|
||||
},
|
||||
{
|
||||
Name: "From invalid bucketID",
|
||||
Raw: `from(bucketID:"invalid")`,
|
||||
WantReadBuckets: &[]platform.BucketFilter{{ID: &invalidID}},
|
||||
WantWriteBuckets: &[]platform.BucketFilter{},
|
||||
},
|
||||
}
|
||||
for _, tc := range tests {
|
||||
tc := tc
|
||||
t.Run(tc.Name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
pquerytest.BucketsAccessedTestHelper(t, tc)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestFromValidation(t *testing.T) {
|
||||
spec := plantest.PlanSpec{
|
||||
// from |> group (cannot query an infinite time range)
|
||||
Nodes: []plan.Node{
|
||||
plan.CreateLogicalNode("from", &influxdb.FromProcedureSpec{
|
||||
Bucket: "my-bucket",
|
||||
Bucket: influxdb.NameOrID{Name: "my-bucket"},
|
||||
}),
|
||||
plan.CreatePhysicalNode("group", &universe.GroupProcedureSpec{
|
||||
GroupMode: flux.GroupModeBy,
|
||||
|
@ -176,11 +31,12 @@ func TestFromValidation(t *testing.T) {
|
|||
|
||||
ps := plantest.CreatePlanSpec(&spec)
|
||||
pp := plan.NewPhysicalPlanner(plan.OnlyPhysicalRules(
|
||||
influxdb.PushDownRangeRule{},
|
||||
influxdb.PushDownFilterRule{},
|
||||
influxdb.PushDownGroupRule{},
|
||||
qinfluxdb.FromStorageRule{},
|
||||
qinfluxdb.PushDownRangeRule{},
|
||||
qinfluxdb.PushDownFilterRule{},
|
||||
qinfluxdb.PushDownGroupRule{},
|
||||
))
|
||||
_, err := pp.Plan(ps)
|
||||
_, err := pp.Plan(context.Background(), ps)
|
||||
if err == nil {
|
||||
t.Error("Expected query with no call to range to fail physical planning")
|
||||
}
|
||||
|
|
|
@ -7,9 +7,9 @@ import (
|
|||
"github.com/influxdata/flux"
|
||||
"github.com/influxdata/flux/codes"
|
||||
"github.com/influxdata/flux/plan"
|
||||
"github.com/influxdata/flux/semantic"
|
||||
"github.com/influxdata/flux/values"
|
||||
"github.com/influxdata/influxdb/v2"
|
||||
"github.com/influxdata/influxdb/v2/storage/reads/datatypes"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -55,12 +55,10 @@ type ReadRangePhysSpec struct {
|
|||
Bucket string
|
||||
BucketID string
|
||||
|
||||
// FilterSet is set to true if there is a filter.
|
||||
FilterSet bool
|
||||
// Filter is the filter to use when calling into
|
||||
// storage. It must be possible to push down this
|
||||
// filter.
|
||||
Filter *semantic.FunctionExpression
|
||||
Filter *datatypes.Predicate
|
||||
|
||||
Bounds flux.Bounds
|
||||
}
|
||||
|
@ -69,19 +67,8 @@ func (s *ReadRangePhysSpec) Kind() plan.ProcedureKind {
|
|||
return ReadRangePhysKind
|
||||
}
|
||||
func (s *ReadRangePhysSpec) Copy() plan.ProcedureSpec {
|
||||
ns := new(ReadRangePhysSpec)
|
||||
|
||||
ns.Bucket = s.Bucket
|
||||
ns.BucketID = s.BucketID
|
||||
|
||||
ns.FilterSet = s.FilterSet
|
||||
if ns.FilterSet {
|
||||
ns.Filter = s.Filter.Copy().(*semantic.FunctionExpression)
|
||||
}
|
||||
|
||||
ns.Bounds = s.Bounds
|
||||
|
||||
return ns
|
||||
ns := *s
|
||||
return &ns
|
||||
}
|
||||
|
||||
func (s *ReadRangePhysSpec) LookupBucketID(ctx context.Context, orgID influxdb.ID, buckets BucketLookup) (influxdb.ID, error) {
|
||||
|
@ -127,22 +114,29 @@ type ReadWindowAggregatePhysSpec struct {
|
|||
ReadRangePhysSpec
|
||||
|
||||
WindowEvery int64
|
||||
Offset int64
|
||||
Aggregates []plan.ProcedureKind
|
||||
CreateEmpty bool
|
||||
TimeColumn string
|
||||
}
|
||||
|
||||
func (s *ReadWindowAggregatePhysSpec) PlanDetails() string {
|
||||
return fmt.Sprintf("every = %d, aggregates = %v", s.WindowEvery, s.Aggregates)
|
||||
return fmt.Sprintf("every = %d, aggregates = %v, createEmpty = %v, timeColumn = \"%s\"", s.WindowEvery, s.Aggregates, s.CreateEmpty, s.TimeColumn)
|
||||
}
|
||||
|
||||
func (s *ReadWindowAggregatePhysSpec) Kind() plan.ProcedureKind {
|
||||
return ReadWindowAggregatePhysKind
|
||||
}
|
||||
|
||||
func (s *ReadWindowAggregatePhysSpec) Copy() plan.ProcedureSpec {
|
||||
ns := new(ReadWindowAggregatePhysSpec)
|
||||
|
||||
ns.ReadRangePhysSpec = *s.ReadRangePhysSpec.Copy().(*ReadRangePhysSpec)
|
||||
ns.WindowEvery = s.WindowEvery
|
||||
ns.Offset = s.Offset
|
||||
ns.Aggregates = s.Aggregates
|
||||
ns.CreateEmpty = s.CreateEmpty
|
||||
ns.TimeColumn = s.TimeColumn
|
||||
|
||||
return ns
|
||||
}
|
||||
|
|
|
@ -1,23 +1,69 @@
|
|||
package influxdb
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math"
|
||||
|
||||
"github.com/influxdata/flux"
|
||||
"github.com/influxdata/flux/ast"
|
||||
"github.com/influxdata/flux/codes"
|
||||
"github.com/influxdata/flux/execute"
|
||||
"github.com/influxdata/flux/plan"
|
||||
"github.com/influxdata/flux/semantic"
|
||||
"github.com/influxdata/flux/stdlib/influxdata/influxdb"
|
||||
"github.com/influxdata/flux/stdlib/universe"
|
||||
"github.com/influxdata/flux/values"
|
||||
"github.com/influxdata/influxdb/v2/kit/feature"
|
||||
"github.com/influxdata/influxdb/v2/query"
|
||||
)
|
||||
|
||||
func init() {
|
||||
plan.RegisterPhysicalRules(
|
||||
FromStorageRule{},
|
||||
PushDownRangeRule{},
|
||||
PushDownFilterRule{},
|
||||
PushDownGroupRule{},
|
||||
PushDownReadTagKeysRule{},
|
||||
PushDownReadTagValuesRule{},
|
||||
// These rules can be re-enabled with https://github.com/influxdata/influxdb/issues/19561 is fixed
|
||||
// PushDownReadTagKeysRule{},
|
||||
// PushDownReadTagValuesRule{},
|
||||
SortedPivotRule{},
|
||||
PushDownWindowAggregateRule{},
|
||||
PushDownWindowAggregateByTimeRule{},
|
||||
PushDownBareAggregateRule{},
|
||||
GroupWindowAggregateTransposeRule{},
|
||||
PushDownGroupAggregateRule{},
|
||||
SwitchFillImplRule{},
|
||||
SwitchSchemaMutationImplRule{},
|
||||
)
|
||||
plan.RegisterLogicalRules(
|
||||
MergeFiltersRule{},
|
||||
)
|
||||
}
|
||||
|
||||
type FromStorageRule struct{}
|
||||
|
||||
func (rule FromStorageRule) Name() string {
|
||||
return "influxdata/influxdb.FromStorageRule"
|
||||
}
|
||||
|
||||
func (rule FromStorageRule) Pattern() plan.Pattern {
|
||||
return plan.Pat(influxdb.FromKind)
|
||||
}
|
||||
|
||||
func (rule FromStorageRule) Rewrite(ctx context.Context, node plan.Node) (plan.Node, bool, error) {
|
||||
fromSpec := node.ProcedureSpec().(*influxdb.FromProcedureSpec)
|
||||
if fromSpec.Host != nil {
|
||||
return node, false, nil
|
||||
} else if fromSpec.Org != nil {
|
||||
return node, false, &flux.Error{
|
||||
Code: codes.Unimplemented,
|
||||
Msg: "reads from the storage engine cannot read from a separate organization; please specify a host or remove the organization",
|
||||
}
|
||||
}
|
||||
|
||||
return plan.CreateLogicalNode("fromStorage", &FromStorageProcedureSpec{
|
||||
Bucket: fromSpec.Bucket,
|
||||
}), true, nil
|
||||
}
|
||||
|
||||
// PushDownGroupRule pushes down a group operation to storage
|
||||
|
@ -31,7 +77,7 @@ func (rule PushDownGroupRule) Pattern() plan.Pattern {
|
|||
return plan.Pat(universe.GroupKind, plan.Pat(ReadRangePhysKind))
|
||||
}
|
||||
|
||||
func (rule PushDownGroupRule) Rewrite(node plan.Node) (plan.Node, bool, error) {
|
||||
func (rule PushDownGroupRule) Rewrite(ctx context.Context, node plan.Node) (plan.Node, bool, error) {
|
||||
src := node.Predecessors()[0].ProcedureSpec().(*ReadRangePhysSpec)
|
||||
grp := node.ProcedureSpec().(*universe.GroupProcedureSpec)
|
||||
|
||||
|
@ -71,14 +117,13 @@ func (rule PushDownRangeRule) Pattern() plan.Pattern {
|
|||
}
|
||||
|
||||
// Rewrite converts 'from |> range' into 'ReadRange'
|
||||
func (rule PushDownRangeRule) Rewrite(node plan.Node) (plan.Node, bool, error) {
|
||||
func (rule PushDownRangeRule) Rewrite(ctx context.Context, node plan.Node) (plan.Node, bool, error) {
|
||||
fromNode := node.Predecessors()[0]
|
||||
fromSpec := fromNode.ProcedureSpec().(*FromProcedureSpec)
|
||||
|
||||
fromSpec := fromNode.ProcedureSpec().(*FromStorageProcedureSpec)
|
||||
rangeSpec := node.ProcedureSpec().(*universe.RangeProcedureSpec)
|
||||
return plan.CreatePhysicalNode("ReadRange", &ReadRangePhysSpec{
|
||||
Bucket: fromSpec.Bucket,
|
||||
BucketID: fromSpec.BucketID,
|
||||
Bucket: fromSpec.Bucket.Name,
|
||||
BucketID: fromSpec.Bucket.ID,
|
||||
Bounds: rangeSpec.Bounds,
|
||||
}), true, nil
|
||||
}
|
||||
|
@ -96,7 +141,7 @@ func (PushDownFilterRule) Pattern() plan.Pattern {
|
|||
return plan.Pat(universe.FilterKind, plan.Pat(ReadRangePhysKind))
|
||||
}
|
||||
|
||||
func (PushDownFilterRule) Rewrite(pn plan.Node) (plan.Node, bool, error) {
|
||||
func (PushDownFilterRule) Rewrite(ctx context.Context, pn plan.Node) (plan.Node, bool, error) {
|
||||
filterSpec := pn.ProcedureSpec().(*universe.FilterProcedureSpec)
|
||||
fromNode := pn.Predecessors()[0]
|
||||
fromSpec := fromNode.ProcedureSpec().(*ReadRangePhysSpec)
|
||||
|
@ -106,17 +151,17 @@ func (PushDownFilterRule) Rewrite(pn plan.Node) (plan.Node, bool, error) {
|
|||
return pn, false, nil
|
||||
}
|
||||
|
||||
bodyExpr, ok := filterSpec.Fn.Fn.Block.Body.(semantic.Expression)
|
||||
bodyExpr, ok := filterSpec.Fn.Fn.GetFunctionBodyExpression()
|
||||
if !ok {
|
||||
return pn, false, nil
|
||||
}
|
||||
|
||||
if len(filterSpec.Fn.Fn.Block.Parameters.List) != 1 {
|
||||
if len(filterSpec.Fn.Fn.Parameters.List) != 1 {
|
||||
// I would expect that type checking would catch this, but just to be safe...
|
||||
return pn, false, nil
|
||||
}
|
||||
|
||||
paramName := filterSpec.Fn.Fn.Block.Parameters.List[0].Key.Name
|
||||
paramName := filterSpec.Fn.Fn.Parameters.List[0].Key.Name
|
||||
|
||||
pushable, notPushable, err := semantic.PartitionPredicates(bodyExpr, func(e semantic.Expression) (bool, error) {
|
||||
return isPushableExpr(paramName, e)
|
||||
|
@ -131,17 +176,26 @@ func (PushDownFilterRule) Rewrite(pn plan.Node) (plan.Node, bool, error) {
|
|||
}
|
||||
pushable, _ = rewritePushableExpr(pushable)
|
||||
|
||||
newFromSpec := fromSpec.Copy().(*ReadRangePhysSpec)
|
||||
if newFromSpec.FilterSet {
|
||||
newBody := semantic.ExprsToConjunction(newFromSpec.Filter.Block.Body.(semantic.Expression), pushable)
|
||||
newFromSpec.Filter.Block.Body = newBody
|
||||
} else {
|
||||
newFromSpec.FilterSet = true
|
||||
// NOTE: We loose the scope here, but that is ok because we can't push down the scope to storage.
|
||||
newFromSpec.Filter = filterSpec.Fn.Fn.Copy().(*semantic.FunctionExpression)
|
||||
newFromSpec.Filter.Block.Body = pushable
|
||||
// Convert the pushable expression to a storage predicate.
|
||||
predicate, err := ToStoragePredicate(pushable, paramName)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
// If the filter has already been set, then combine the existing predicate
|
||||
// with the new one.
|
||||
if fromSpec.Filter != nil {
|
||||
mergedPredicate, err := mergePredicates(ast.AndOperator, fromSpec.Filter, predicate)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
predicate = mergedPredicate
|
||||
}
|
||||
|
||||
// Copy the specification and set the predicate.
|
||||
newFromSpec := fromSpec.Copy().(*ReadRangePhysSpec)
|
||||
newFromSpec.Filter = predicate
|
||||
|
||||
if notPushable == nil {
|
||||
// All predicates could be pushed down, so eliminate the filter
|
||||
mergedNode, err := plan.MergeToPhysicalNode(pn, fromNode, newFromSpec)
|
||||
|
@ -157,7 +211,11 @@ func (PushDownFilterRule) Rewrite(pn plan.Node) (plan.Node, bool, error) {
|
|||
}
|
||||
|
||||
newFilterSpec := filterSpec.Copy().(*universe.FilterProcedureSpec)
|
||||
newFilterSpec.Fn.Fn.Block.Body = notPushable
|
||||
newFilterSpec.Fn.Fn.Block = &semantic.Block{
|
||||
Body: []semantic.Statement{
|
||||
&semantic.ReturnStatement{Argument: notPushable},
|
||||
},
|
||||
}
|
||||
if err := pn.ReplaceSpec(newFilterSpec); err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
|
@ -183,11 +241,11 @@ func (rule PushDownReadTagKeysRule) Pattern() plan.Pattern {
|
|||
plan.Pat(ReadRangePhysKind))))
|
||||
}
|
||||
|
||||
func (rule PushDownReadTagKeysRule) Rewrite(pn plan.Node) (plan.Node, bool, error) {
|
||||
func (rule PushDownReadTagKeysRule) Rewrite(ctx context.Context, pn plan.Node) (plan.Node, bool, error) {
|
||||
// Retrieve the nodes and specs for all of the predecessors.
|
||||
distinctSpec := pn.ProcedureSpec().(*universe.DistinctProcedureSpec)
|
||||
keepNode := pn.Predecessors()[0]
|
||||
keepSpec := keepNode.ProcedureSpec().(*universe.SchemaMutationProcedureSpec)
|
||||
keepSpec := asSchemaMutationProcedureSpec(keepNode.ProcedureSpec())
|
||||
keysNode := keepNode.Predecessors()[0]
|
||||
keysSpec := keysNode.ProcedureSpec().(*universe.KeysProcedureSpec)
|
||||
fromNode := keysNode.Predecessors()[0]
|
||||
|
@ -245,14 +303,14 @@ func (rule PushDownReadTagValuesRule) Pattern() plan.Pattern {
|
|||
plan.Pat(ReadRangePhysKind))))
|
||||
}
|
||||
|
||||
func (rule PushDownReadTagValuesRule) Rewrite(pn plan.Node) (plan.Node, bool, error) {
|
||||
func (rule PushDownReadTagValuesRule) Rewrite(ctx context.Context, pn plan.Node) (plan.Node, bool, error) {
|
||||
// Retrieve the nodes and specs for all of the predecessors.
|
||||
distinctNode := pn
|
||||
distinctSpec := distinctNode.ProcedureSpec().(*universe.DistinctProcedureSpec)
|
||||
groupNode := distinctNode.Predecessors()[0]
|
||||
groupSpec := groupNode.ProcedureSpec().(*universe.GroupProcedureSpec)
|
||||
keepNode := groupNode.Predecessors()[0]
|
||||
keepSpec := keepNode.ProcedureSpec().(*universe.SchemaMutationProcedureSpec)
|
||||
keepSpec := asSchemaMutationProcedureSpec(keepNode.ProcedureSpec())
|
||||
fromNode := keepNode.Predecessors()[0]
|
||||
fromSpec := fromNode.ProcedureSpec().(*ReadRangePhysSpec)
|
||||
|
||||
|
@ -556,7 +614,7 @@ func (SortedPivotRule) Pattern() plan.Pattern {
|
|||
return plan.Pat(universe.PivotKind, plan.Pat(ReadRangePhysKind))
|
||||
}
|
||||
|
||||
func (SortedPivotRule) Rewrite(pn plan.Node) (plan.Node, bool, error) {
|
||||
func (SortedPivotRule) Rewrite(ctx context.Context, pn plan.Node) (plan.Node, bool, error) {
|
||||
pivotSpec := pn.ProcedureSpec().Copy().(*universe.PivotProcedureSpec)
|
||||
pivotSpec.IsSortedByFunc = func(cols []string, desc bool) bool {
|
||||
if desc {
|
||||
|
@ -595,3 +653,551 @@ func (SortedPivotRule) Rewrite(pn plan.Node) (plan.Node, bool, error) {
|
|||
}
|
||||
return pn, false, nil
|
||||
}
|
||||
|
||||
//
|
||||
// Push Down of window aggregates.
|
||||
// ReadRangePhys |> window |> { min, max, mean, count, sum }
|
||||
//
|
||||
type PushDownWindowAggregateRule struct{}
|
||||
|
||||
func (PushDownWindowAggregateRule) Name() string {
|
||||
return "PushDownWindowAggregateRule"
|
||||
}
|
||||
|
||||
var windowPushableAggs = []plan.ProcedureKind{
|
||||
universe.CountKind,
|
||||
universe.SumKind,
|
||||
universe.MinKind,
|
||||
universe.MaxKind,
|
||||
universe.MeanKind,
|
||||
universe.FirstKind,
|
||||
universe.LastKind,
|
||||
}
|
||||
|
||||
func (rule PushDownWindowAggregateRule) Pattern() plan.Pattern {
|
||||
return plan.OneOf(windowPushableAggs,
|
||||
plan.Pat(universe.WindowKind, plan.Pat(ReadRangePhysKind)))
|
||||
}
|
||||
|
||||
func canPushWindowedAggregate(ctx context.Context, fnNode plan.Node) bool {
|
||||
caps, ok := capabilities(ctx)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
// Check the aggregate function spec. Require the operation on _value
|
||||
// and check the feature flag associated with the aggregate function.
|
||||
switch fnNode.Kind() {
|
||||
case universe.MinKind:
|
||||
if !caps.HaveMin() {
|
||||
return false
|
||||
}
|
||||
minSpec := fnNode.ProcedureSpec().(*universe.MinProcedureSpec)
|
||||
if minSpec.Column != execute.DefaultValueColLabel {
|
||||
return false
|
||||
}
|
||||
case universe.MaxKind:
|
||||
if !caps.HaveMax() {
|
||||
return false
|
||||
}
|
||||
maxSpec := fnNode.ProcedureSpec().(*universe.MaxProcedureSpec)
|
||||
if maxSpec.Column != execute.DefaultValueColLabel {
|
||||
return false
|
||||
}
|
||||
case universe.MeanKind:
|
||||
if !feature.PushDownWindowAggregateMean().Enabled(ctx) || !caps.HaveMean() {
|
||||
return false
|
||||
}
|
||||
meanSpec := fnNode.ProcedureSpec().(*universe.MeanProcedureSpec)
|
||||
if len(meanSpec.Columns) != 1 || meanSpec.Columns[0] != execute.DefaultValueColLabel {
|
||||
return false
|
||||
}
|
||||
case universe.CountKind:
|
||||
if !caps.HaveCount() {
|
||||
return false
|
||||
}
|
||||
countSpec := fnNode.ProcedureSpec().(*universe.CountProcedureSpec)
|
||||
if len(countSpec.Columns) != 1 || countSpec.Columns[0] != execute.DefaultValueColLabel {
|
||||
return false
|
||||
}
|
||||
case universe.SumKind:
|
||||
if !caps.HaveSum() {
|
||||
return false
|
||||
}
|
||||
sumSpec := fnNode.ProcedureSpec().(*universe.SumProcedureSpec)
|
||||
if len(sumSpec.Columns) != 1 || sumSpec.Columns[0] != execute.DefaultValueColLabel {
|
||||
return false
|
||||
}
|
||||
case universe.FirstKind:
|
||||
if !caps.HaveFirst() {
|
||||
return false
|
||||
}
|
||||
firstSpec := fnNode.ProcedureSpec().(*universe.FirstProcedureSpec)
|
||||
if firstSpec.Column != execute.DefaultValueColLabel {
|
||||
return false
|
||||
}
|
||||
case universe.LastKind:
|
||||
if !caps.HaveLast() {
|
||||
return false
|
||||
}
|
||||
lastSpec := fnNode.ProcedureSpec().(*universe.LastProcedureSpec)
|
||||
if lastSpec.Column != execute.DefaultValueColLabel {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func isPushableWindow(windowSpec *universe.WindowProcedureSpec) bool {
|
||||
// every and period must be equal
|
||||
// every.months must be zero
|
||||
// every.isNegative must be false
|
||||
// offset.months must be zero
|
||||
// offset.isNegative must be false
|
||||
// timeColumn: must be "_time"
|
||||
// startColumn: must be "_start"
|
||||
// stopColumn: must be "_stop"
|
||||
// createEmpty: must be false
|
||||
window := windowSpec.Window
|
||||
return window.Every.Equal(window.Period) &&
|
||||
window.Every.Months() == 0 &&
|
||||
!window.Every.IsNegative() &&
|
||||
!window.Every.IsZero() &&
|
||||
window.Offset.Months() == 0 &&
|
||||
!window.Offset.IsNegative() &&
|
||||
windowSpec.TimeColumn == "_time" &&
|
||||
windowSpec.StartColumn == "_start" &&
|
||||
windowSpec.StopColumn == "_stop"
|
||||
}
|
||||
|
||||
func capabilities(ctx context.Context) (query.WindowAggregateCapability, bool) {
|
||||
reader := GetStorageDependencies(ctx).FromDeps.Reader
|
||||
windowAggregateReader, ok := reader.(query.WindowAggregateReader)
|
||||
if !ok {
|
||||
return nil, false
|
||||
}
|
||||
caps := windowAggregateReader.GetWindowAggregateCapability(ctx)
|
||||
return caps, caps != nil
|
||||
}
|
||||
|
||||
func (PushDownWindowAggregateRule) Rewrite(ctx context.Context, pn plan.Node) (plan.Node, bool, error) {
|
||||
fnNode := pn
|
||||
if !canPushWindowedAggregate(ctx, fnNode) {
|
||||
return pn, false, nil
|
||||
}
|
||||
|
||||
windowNode := fnNode.Predecessors()[0]
|
||||
windowSpec := windowNode.ProcedureSpec().(*universe.WindowProcedureSpec)
|
||||
fromNode := windowNode.Predecessors()[0]
|
||||
fromSpec := fromNode.ProcedureSpec().(*ReadRangePhysSpec)
|
||||
|
||||
if !isPushableWindow(windowSpec) {
|
||||
return pn, false, nil
|
||||
}
|
||||
|
||||
if caps, ok := capabilities(ctx); !ok || windowSpec.Window.Offset.IsPositive() && !caps.HaveOffset() {
|
||||
return pn, false, nil
|
||||
}
|
||||
|
||||
// Rule passes.
|
||||
return plan.CreatePhysicalNode("ReadWindowAggregate", &ReadWindowAggregatePhysSpec{
|
||||
ReadRangePhysSpec: *fromSpec.Copy().(*ReadRangePhysSpec),
|
||||
Aggregates: []plan.ProcedureKind{fnNode.Kind()},
|
||||
WindowEvery: windowSpec.Window.Every.Nanoseconds(),
|
||||
Offset: windowSpec.Window.Offset.Nanoseconds(),
|
||||
CreateEmpty: windowSpec.CreateEmpty,
|
||||
}), true, nil
|
||||
}
|
||||
|
||||
// PushDownWindowAggregateWithTimeRule will match the given pattern.
|
||||
// ReadWindowAggregatePhys |> duplicate |> window(every: inf)
|
||||
//
|
||||
// If this pattern matches and the arguments to duplicate are
|
||||
// matching time column names, it will set the time column on
|
||||
// the spec.
|
||||
type PushDownWindowAggregateByTimeRule struct{}
|
||||
|
||||
func (PushDownWindowAggregateByTimeRule) Name() string {
|
||||
return "PushDownWindowAggregateByTimeRule"
|
||||
}
|
||||
|
||||
func (rule PushDownWindowAggregateByTimeRule) Pattern() plan.Pattern {
|
||||
return plan.Pat(universe.WindowKind,
|
||||
plan.Pat(universe.SchemaMutationKind,
|
||||
plan.Pat(ReadWindowAggregatePhysKind)))
|
||||
}
|
||||
|
||||
func (PushDownWindowAggregateByTimeRule) Rewrite(ctx context.Context, pn plan.Node) (plan.Node, bool, error) {
|
||||
windowNode := pn
|
||||
windowSpec := windowNode.ProcedureSpec().(*universe.WindowProcedureSpec)
|
||||
|
||||
duplicateNode := windowNode.Predecessors()[0]
|
||||
duplicateSpec, duplicateSpecOk := func() (*universe.DuplicateOpSpec, bool) {
|
||||
s := asSchemaMutationProcedureSpec(duplicateNode.ProcedureSpec())
|
||||
if len(s.Mutations) != 1 {
|
||||
return nil, false
|
||||
}
|
||||
mutator, ok := s.Mutations[0].(*universe.DuplicateOpSpec)
|
||||
return mutator, ok
|
||||
}()
|
||||
if !duplicateSpecOk {
|
||||
return pn, false, nil
|
||||
}
|
||||
|
||||
// The As field must be the default time value
|
||||
// and the column must be start or stop.
|
||||
if duplicateSpec.As != execute.DefaultTimeColLabel ||
|
||||
(duplicateSpec.Column != execute.DefaultStartColLabel && duplicateSpec.Column != execute.DefaultStopColLabel) {
|
||||
return pn, false, nil
|
||||
}
|
||||
|
||||
// window(every: inf)
|
||||
if windowSpec.Window.Every != values.ConvertDuration(math.MaxInt64) ||
|
||||
windowSpec.Window.Every != windowSpec.Window.Period ||
|
||||
windowSpec.TimeColumn != execute.DefaultTimeColLabel ||
|
||||
windowSpec.StartColumn != execute.DefaultStartColLabel ||
|
||||
windowSpec.StopColumn != execute.DefaultStopColLabel ||
|
||||
windowSpec.CreateEmpty {
|
||||
return pn, false, nil
|
||||
}
|
||||
|
||||
// Cannot rewrite if already was rewritten.
|
||||
windowAggregateNode := duplicateNode.Predecessors()[0]
|
||||
windowAggregateSpec := windowAggregateNode.ProcedureSpec().(*ReadWindowAggregatePhysSpec)
|
||||
if windowAggregateSpec.TimeColumn != "" {
|
||||
return pn, false, nil
|
||||
}
|
||||
|
||||
// Rule passes.
|
||||
windowAggregateSpec.TimeColumn = duplicateSpec.Column
|
||||
return plan.CreatePhysicalNode("ReadWindowAggregateByTime", windowAggregateSpec), true, nil
|
||||
}
|
||||
|
||||
// PushDownBareAggregateRule is a rule that allows pushing down of aggregates
|
||||
// that are directly over a ReadRange source.
|
||||
type PushDownBareAggregateRule struct{}
|
||||
|
||||
func (p PushDownBareAggregateRule) Name() string {
|
||||
return "PushDownBareAggregateRule"
|
||||
}
|
||||
|
||||
func (p PushDownBareAggregateRule) Pattern() plan.Pattern {
|
||||
return plan.OneOf(windowPushableAggs,
|
||||
plan.Pat(ReadRangePhysKind))
|
||||
}
|
||||
|
||||
func (p PushDownBareAggregateRule) Rewrite(ctx context.Context, pn plan.Node) (plan.Node, bool, error) {
|
||||
fnNode := pn
|
||||
if !canPushWindowedAggregate(ctx, fnNode) {
|
||||
return pn, false, nil
|
||||
}
|
||||
|
||||
fromNode := fnNode.Predecessors()[0]
|
||||
fromSpec := fromNode.ProcedureSpec().(*ReadRangePhysSpec)
|
||||
|
||||
return plan.CreatePhysicalNode("ReadWindowAggregate", &ReadWindowAggregatePhysSpec{
|
||||
ReadRangePhysSpec: *fromSpec.Copy().(*ReadRangePhysSpec),
|
||||
Aggregates: []plan.ProcedureKind{fnNode.Kind()},
|
||||
WindowEvery: math.MaxInt64,
|
||||
}), true, nil
|
||||
}
|
||||
|
||||
// GroupWindowAggregateTransposeRule will match the given pattern.
|
||||
// ReadGroupPhys |> window |> { min, max, count, sum }
|
||||
//
|
||||
// This pattern will use the PushDownWindowAggregateRule to determine
|
||||
// if the ReadWindowAggregatePhys operation is available before it will
|
||||
// rewrite the above. This rewrites the above to:
|
||||
//
|
||||
// ReadWindowAggregatePhys |> group(columns: ["_start", "_stop", ...]) |> { min, max, sum }
|
||||
//
|
||||
// The count aggregate uses sum to merge the results.
|
||||
type GroupWindowAggregateTransposeRule struct{}
|
||||
|
||||
func (p GroupWindowAggregateTransposeRule) Name() string {
|
||||
return "GroupWindowAggregateTransposeRule"
|
||||
}
|
||||
|
||||
var windowMergeablePushAggs = []plan.ProcedureKind{
|
||||
universe.MinKind,
|
||||
universe.MaxKind,
|
||||
universe.CountKind,
|
||||
universe.SumKind,
|
||||
}
|
||||
|
||||
func (p GroupWindowAggregateTransposeRule) Pattern() plan.Pattern {
|
||||
return plan.OneOf(windowMergeablePushAggs,
|
||||
plan.Pat(universe.WindowKind, plan.Pat(ReadGroupPhysKind)))
|
||||
}
|
||||
|
||||
func (p GroupWindowAggregateTransposeRule) Rewrite(ctx context.Context, pn plan.Node) (plan.Node, bool, error) {
|
||||
if !feature.GroupWindowAggregateTranspose().Enabled(ctx) {
|
||||
return pn, false, nil
|
||||
}
|
||||
|
||||
fnNode := pn
|
||||
if !canPushWindowedAggregate(ctx, fnNode) {
|
||||
return pn, false, nil
|
||||
}
|
||||
|
||||
windowNode := fnNode.Predecessors()[0]
|
||||
windowSpec := windowNode.ProcedureSpec().(*universe.WindowProcedureSpec)
|
||||
|
||||
if !isPushableWindow(windowSpec) {
|
||||
return pn, false, nil
|
||||
}
|
||||
|
||||
if caps, ok := capabilities(ctx); !ok || windowSpec.Window.Offset.IsPositive() && !caps.HaveOffset() {
|
||||
return pn, false, nil
|
||||
}
|
||||
|
||||
fromNode := windowNode.Predecessors()[0]
|
||||
fromSpec := fromNode.ProcedureSpec().(*ReadGroupPhysSpec)
|
||||
|
||||
// This only works with GroupModeBy. It is the case
|
||||
// that ReadGroup, which we depend on as a predecessor,
|
||||
// only works with GroupModeBy so it should be impossible
|
||||
// to fail this condition, but we add this here for extra
|
||||
// protection.
|
||||
if fromSpec.GroupMode != flux.GroupModeBy {
|
||||
return pn, false, nil
|
||||
}
|
||||
|
||||
// Perform the rewrite by replacing each of the nodes.
|
||||
newFromNode := plan.CreatePhysicalNode("ReadWindowAggregate", &ReadWindowAggregatePhysSpec{
|
||||
ReadRangePhysSpec: *fromSpec.ReadRangePhysSpec.Copy().(*ReadRangePhysSpec),
|
||||
Aggregates: []plan.ProcedureKind{fnNode.Kind()},
|
||||
WindowEvery: windowSpec.Window.Every.Nanoseconds(),
|
||||
Offset: windowSpec.Window.Offset.Nanoseconds(),
|
||||
CreateEmpty: windowSpec.CreateEmpty,
|
||||
})
|
||||
|
||||
// Replace the window node with a group node first.
|
||||
groupKeys := make([]string, len(fromSpec.GroupKeys), len(fromSpec.GroupKeys)+2)
|
||||
copy(groupKeys, fromSpec.GroupKeys)
|
||||
if !execute.ContainsStr(groupKeys, execute.DefaultStartColLabel) {
|
||||
groupKeys = append(groupKeys, execute.DefaultStartColLabel)
|
||||
}
|
||||
if !execute.ContainsStr(groupKeys, execute.DefaultStopColLabel) {
|
||||
groupKeys = append(groupKeys, execute.DefaultStopColLabel)
|
||||
}
|
||||
newGroupNode := plan.CreatePhysicalNode("group", &universe.GroupProcedureSpec{
|
||||
GroupMode: flux.GroupModeBy,
|
||||
GroupKeys: groupKeys,
|
||||
})
|
||||
newFromNode.AddSuccessors(newGroupNode)
|
||||
newGroupNode.AddPredecessors(newFromNode)
|
||||
|
||||
// Attach the existing function node to the new group node.
|
||||
fnNode.ClearPredecessors()
|
||||
newGroupNode.AddSuccessors(fnNode)
|
||||
fnNode.AddPredecessors(newGroupNode)
|
||||
|
||||
// Replace the spec for the function if needed.
|
||||
switch spec := fnNode.ProcedureSpec().(type) {
|
||||
case *universe.CountProcedureSpec:
|
||||
newFnNode := plan.CreatePhysicalNode("sum", &universe.SumProcedureSpec{
|
||||
AggregateConfig: spec.AggregateConfig,
|
||||
})
|
||||
plan.ReplaceNode(fnNode, newFnNode)
|
||||
fnNode = newFnNode
|
||||
default:
|
||||
// No replacement required. The procedure is idempotent so
|
||||
// we can use it over and over again and get the same result.
|
||||
}
|
||||
return fnNode, true, nil
|
||||
}
|
||||
|
||||
//
|
||||
// Push Down of group aggregates.
|
||||
// ReadGroupPhys |> { count }
|
||||
//
|
||||
type PushDownGroupAggregateRule struct{}
|
||||
|
||||
func (PushDownGroupAggregateRule) Name() string {
|
||||
return "PushDownGroupAggregateRule"
|
||||
}
|
||||
|
||||
func (rule PushDownGroupAggregateRule) Pattern() plan.Pattern {
|
||||
return plan.OneOf(
|
||||
[]plan.ProcedureKind{
|
||||
universe.CountKind,
|
||||
universe.SumKind,
|
||||
universe.FirstKind,
|
||||
universe.LastKind,
|
||||
universe.MinKind,
|
||||
universe.MaxKind,
|
||||
},
|
||||
plan.Pat(ReadGroupPhysKind))
|
||||
}
|
||||
|
||||
func (PushDownGroupAggregateRule) Rewrite(ctx context.Context, pn plan.Node) (plan.Node, bool, error) {
|
||||
group := pn.Predecessors()[0].ProcedureSpec().(*ReadGroupPhysSpec)
|
||||
// Cannot push down multiple aggregates
|
||||
if len(group.AggregateMethod) > 0 {
|
||||
return pn, false, nil
|
||||
}
|
||||
|
||||
if !canPushGroupedAggregate(ctx, pn) {
|
||||
return pn, false, nil
|
||||
}
|
||||
|
||||
switch pn.Kind() {
|
||||
case universe.CountKind:
|
||||
// ReadGroup() -> count => ReadGroup(count)
|
||||
node := plan.CreatePhysicalNode("ReadGroupAggregate", &ReadGroupPhysSpec{
|
||||
ReadRangePhysSpec: group.ReadRangePhysSpec,
|
||||
GroupMode: group.GroupMode,
|
||||
GroupKeys: group.GroupKeys,
|
||||
AggregateMethod: universe.CountKind,
|
||||
})
|
||||
return node, true, nil
|
||||
case universe.SumKind:
|
||||
// ReadGroup() -> sum => ReadGroup(sum)
|
||||
node := plan.CreatePhysicalNode("ReadGroupAggregate", &ReadGroupPhysSpec{
|
||||
ReadRangePhysSpec: group.ReadRangePhysSpec,
|
||||
GroupMode: group.GroupMode,
|
||||
GroupKeys: group.GroupKeys,
|
||||
AggregateMethod: universe.SumKind,
|
||||
})
|
||||
return node, true, nil
|
||||
case universe.FirstKind:
|
||||
// ReadGroup() -> first => ReadGroup(first)
|
||||
node := plan.CreatePhysicalNode("ReadGroupAggregate", &ReadGroupPhysSpec{
|
||||
ReadRangePhysSpec: group.ReadRangePhysSpec,
|
||||
GroupMode: group.GroupMode,
|
||||
GroupKeys: group.GroupKeys,
|
||||
AggregateMethod: universe.FirstKind,
|
||||
})
|
||||
return node, true, nil
|
||||
case universe.LastKind:
|
||||
// ReadGroup() -> last => ReadGroup(last)
|
||||
node := plan.CreatePhysicalNode("ReadGroupAggregate", &ReadGroupPhysSpec{
|
||||
ReadRangePhysSpec: group.ReadRangePhysSpec,
|
||||
GroupMode: group.GroupMode,
|
||||
GroupKeys: group.GroupKeys,
|
||||
AggregateMethod: universe.LastKind,
|
||||
})
|
||||
return node, true, nil
|
||||
case universe.MinKind:
|
||||
// ReadGroup() -> min => ReadGroup(min)
|
||||
if feature.PushDownGroupAggregateMinMax().Enabled(ctx) {
|
||||
node := plan.CreatePhysicalNode("ReadGroupAggregate", &ReadGroupPhysSpec{
|
||||
ReadRangePhysSpec: group.ReadRangePhysSpec,
|
||||
GroupMode: group.GroupMode,
|
||||
GroupKeys: group.GroupKeys,
|
||||
AggregateMethod: universe.MinKind,
|
||||
})
|
||||
return node, true, nil
|
||||
}
|
||||
case universe.MaxKind:
|
||||
// ReadGroup() -> max => ReadGroup(max)
|
||||
if feature.PushDownGroupAggregateMinMax().Enabled(ctx) {
|
||||
node := plan.CreatePhysicalNode("ReadGroupAggregate", &ReadGroupPhysSpec{
|
||||
ReadRangePhysSpec: group.ReadRangePhysSpec,
|
||||
GroupMode: group.GroupMode,
|
||||
GroupKeys: group.GroupKeys,
|
||||
AggregateMethod: universe.MaxKind,
|
||||
})
|
||||
return node, true, nil
|
||||
}
|
||||
}
|
||||
return pn, false, nil
|
||||
}
|
||||
|
||||
func canPushGroupedAggregate(ctx context.Context, pn plan.Node) bool {
|
||||
reader := GetStorageDependencies(ctx).FromDeps.Reader
|
||||
aggregator, ok := reader.(query.GroupAggregator)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
caps := aggregator.GetGroupCapability(ctx)
|
||||
if caps == nil {
|
||||
return false
|
||||
}
|
||||
switch pn.Kind() {
|
||||
case universe.CountKind:
|
||||
agg := pn.ProcedureSpec().(*universe.CountProcedureSpec)
|
||||
return caps.HaveCount() && len(agg.Columns) == 1 && agg.Columns[0] == execute.DefaultValueColLabel
|
||||
case universe.SumKind:
|
||||
agg := pn.ProcedureSpec().(*universe.SumProcedureSpec)
|
||||
return caps.HaveSum() && len(agg.Columns) == 1 && agg.Columns[0] == execute.DefaultValueColLabel
|
||||
case universe.FirstKind:
|
||||
agg := pn.ProcedureSpec().(*universe.FirstProcedureSpec)
|
||||
return caps.HaveFirst() && agg.Column == execute.DefaultValueColLabel
|
||||
case universe.LastKind:
|
||||
agg := pn.ProcedureSpec().(*universe.LastProcedureSpec)
|
||||
return caps.HaveLast() && agg.Column == execute.DefaultValueColLabel
|
||||
case universe.MaxKind:
|
||||
agg := pn.ProcedureSpec().(*universe.MaxProcedureSpec)
|
||||
return caps.HaveMax() && agg.Column == execute.DefaultValueColLabel
|
||||
case universe.MinKind:
|
||||
agg := pn.ProcedureSpec().(*universe.MinProcedureSpec)
|
||||
return caps.HaveMin() && agg.Column == execute.DefaultValueColLabel
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
type SwitchFillImplRule struct{}
|
||||
|
||||
func (SwitchFillImplRule) Name() string {
|
||||
return "SwitchFillImplRule"
|
||||
}
|
||||
|
||||
func (SwitchFillImplRule) Pattern() plan.Pattern {
|
||||
return plan.Pat(universe.FillKind, plan.Any())
|
||||
}
|
||||
|
||||
func (r SwitchFillImplRule) Rewrite(ctx context.Context, pn plan.Node) (plan.Node, bool, error) {
|
||||
if !feature.MemoryOptimizedFill().Enabled(ctx) {
|
||||
spec := pn.ProcedureSpec().Copy()
|
||||
universe.UseDeprecatedImpl(spec)
|
||||
if err := pn.ReplaceSpec(spec); err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
}
|
||||
return pn, false, nil
|
||||
}
|
||||
|
||||
type SwitchSchemaMutationImplRule struct{}
|
||||
|
||||
func (SwitchSchemaMutationImplRule) Name() string {
|
||||
return "SwitchSchemaMutationImplRule"
|
||||
}
|
||||
|
||||
func (SwitchSchemaMutationImplRule) Pattern() plan.Pattern {
|
||||
return plan.Pat(universe.SchemaMutationKind, plan.Any())
|
||||
}
|
||||
|
||||
func (r SwitchSchemaMutationImplRule) Rewrite(ctx context.Context, pn plan.Node) (plan.Node, bool, error) {
|
||||
spec, ok := pn.ProcedureSpec().(*universe.DualImplProcedureSpec)
|
||||
if !ok || spec.UseDeprecated {
|
||||
return pn, false, nil
|
||||
}
|
||||
|
||||
spec.UseDeprecated = !feature.MemoryOptimizedSchemaMutation().Enabled(ctx)
|
||||
return pn, spec.UseDeprecated, nil
|
||||
}
|
||||
|
||||
func asSchemaMutationProcedureSpec(spec plan.ProcedureSpec) *universe.SchemaMutationProcedureSpec {
|
||||
if s, ok := spec.(*universe.DualImplProcedureSpec); ok {
|
||||
spec = s.ProcedureSpec
|
||||
}
|
||||
return spec.(*universe.SchemaMutationProcedureSpec)
|
||||
}
|
||||
|
||||
type MergeFiltersRule struct{}
|
||||
|
||||
func (MergeFiltersRule) Name() string {
|
||||
return universe.MergeFiltersRule{}.Name()
|
||||
}
|
||||
|
||||
func (MergeFiltersRule) Pattern() plan.Pattern {
|
||||
return universe.MergeFiltersRule{}.Pattern()
|
||||
}
|
||||
|
||||
func (r MergeFiltersRule) Rewrite(ctx context.Context, pn plan.Node) (plan.Node, bool, error) {
|
||||
if feature.MergedFiltersRule().Enabled(ctx) {
|
||||
return universe.MergeFiltersRule{}.Rewrite(ctx, pn)
|
||||
}
|
||||
return pn, false, nil
|
||||
}
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -9,8 +9,8 @@ import (
|
|||
"github.com/influxdata/flux/codes"
|
||||
"github.com/influxdata/flux/execute"
|
||||
"github.com/influxdata/flux/memory"
|
||||
"github.com/influxdata/flux/metadata"
|
||||
"github.com/influxdata/flux/plan"
|
||||
"github.com/influxdata/flux/semantic"
|
||||
platform "github.com/influxdata/influxdb/v2"
|
||||
"github.com/influxdata/influxdb/v2/kit/tracing"
|
||||
"github.com/influxdata/influxdb/v2/query"
|
||||
|
@ -20,6 +20,7 @@ import (
|
|||
func init() {
|
||||
execute.RegisterSource(ReadRangePhysKind, createReadFilterSource)
|
||||
execute.RegisterSource(ReadGroupPhysKind, createReadGroupSource)
|
||||
execute.RegisterSource(ReadWindowAggregatePhysKind, createReadWindowAggregateSource)
|
||||
execute.RegisterSource(ReadTagKeysPhysKind, createReadTagKeysSource)
|
||||
execute.RegisterSource(ReadTagValuesPhysKind, createReadTagValuesSource)
|
||||
}
|
||||
|
@ -46,7 +47,7 @@ func (s *Source) Run(ctx context.Context) {
|
|||
labelValues := s.m.getLabelValues(ctx, s.orgID, s.op)
|
||||
start := time.Now()
|
||||
var err error
|
||||
if flux.IsExperimentalTracingEnabled() {
|
||||
if flux.IsExperimentalTracingEnabled(ctx) {
|
||||
span, ctxWithSpan := tracing.StartSpanFromContextWithOperationName(ctx, "source-"+s.op)
|
||||
err = s.runner.run(ctxWithSpan)
|
||||
span.Finish()
|
||||
|
@ -63,14 +64,14 @@ func (s *Source) AddTransformation(t execute.Transformation) {
|
|||
s.ts = append(s.ts, t)
|
||||
}
|
||||
|
||||
func (s *Source) Metadata() flux.Metadata {
|
||||
return flux.Metadata{
|
||||
func (s *Source) Metadata() metadata.Metadata {
|
||||
return metadata.Metadata{
|
||||
"influxdb/scanned-bytes": []interface{}{s.stats.ScannedBytes},
|
||||
"influxdb/scanned-values": []interface{}{s.stats.ScannedValues},
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Source) processTables(ctx context.Context, tables TableIterator, watermark execute.Time) error {
|
||||
func (s *Source) processTables(ctx context.Context, tables query.TableIterator, watermark execute.Time) error {
|
||||
err := tables.Do(func(tbl flux.Table) error {
|
||||
return s.processTable(ctx, tbl)
|
||||
})
|
||||
|
@ -117,11 +118,11 @@ func (s *Source) processTable(ctx context.Context, tbl flux.Table) error {
|
|||
|
||||
type readFilterSource struct {
|
||||
Source
|
||||
reader Reader
|
||||
readSpec ReadFilterSpec
|
||||
reader query.StorageReader
|
||||
readSpec query.ReadFilterSpec
|
||||
}
|
||||
|
||||
func ReadFilterSource(id execute.DatasetID, r Reader, readSpec ReadFilterSpec, a execute.Administration) execute.Source {
|
||||
func ReadFilterSource(id execute.DatasetID, r query.StorageReader, readSpec query.ReadFilterSpec, a execute.Administration) execute.Source {
|
||||
src := new(readFilterSource)
|
||||
|
||||
src.id = id
|
||||
|
@ -181,18 +182,14 @@ func createReadFilterSource(s plan.ProcedureSpec, id execute.DatasetID, a execut
|
|||
return nil, err
|
||||
}
|
||||
|
||||
var filter *semantic.FunctionExpression
|
||||
if spec.FilterSet {
|
||||
filter = spec.Filter
|
||||
}
|
||||
return ReadFilterSource(
|
||||
id,
|
||||
deps.Reader,
|
||||
ReadFilterSpec{
|
||||
query.ReadFilterSpec{
|
||||
OrganizationID: orgID,
|
||||
BucketID: bucketID,
|
||||
Bounds: *bounds,
|
||||
Predicate: filter,
|
||||
Predicate: spec.Filter,
|
||||
},
|
||||
a,
|
||||
), nil
|
||||
|
@ -200,11 +197,11 @@ func createReadFilterSource(s plan.ProcedureSpec, id execute.DatasetID, a execut
|
|||
|
||||
type readGroupSource struct {
|
||||
Source
|
||||
reader Reader
|
||||
readSpec ReadGroupSpec
|
||||
reader query.StorageReader
|
||||
readSpec query.ReadGroupSpec
|
||||
}
|
||||
|
||||
func ReadGroupSource(id execute.DatasetID, r Reader, readSpec ReadGroupSpec, a execute.Administration) execute.Source {
|
||||
func ReadGroupSource(id execute.DatasetID, r query.StorageReader, readSpec query.ReadGroupSpec, a execute.Administration) execute.Source {
|
||||
src := new(readGroupSource)
|
||||
|
||||
src.id = id
|
||||
|
@ -215,7 +212,7 @@ func ReadGroupSource(id execute.DatasetID, r Reader, readSpec ReadGroupSpec, a e
|
|||
|
||||
src.m = GetStorageDependencies(a.Context()).FromDeps.Metrics
|
||||
src.orgID = readSpec.OrganizationID
|
||||
src.op = "readGroup"
|
||||
src.op = readSpec.Name()
|
||||
|
||||
src.runner = src
|
||||
return src
|
||||
|
@ -258,21 +255,17 @@ func createReadGroupSource(s plan.ProcedureSpec, id execute.DatasetID, a execute
|
|||
return nil, err
|
||||
}
|
||||
|
||||
var filter *semantic.FunctionExpression
|
||||
if spec.FilterSet {
|
||||
filter = spec.Filter
|
||||
}
|
||||
return ReadGroupSource(
|
||||
id,
|
||||
deps.Reader,
|
||||
ReadGroupSpec{
|
||||
ReadFilterSpec: ReadFilterSpec{
|
||||
query.ReadGroupSpec{
|
||||
ReadFilterSpec: query.ReadFilterSpec{
|
||||
OrganizationID: orgID,
|
||||
BucketID: bucketID,
|
||||
Bounds: *bounds,
|
||||
Predicate: filter,
|
||||
Predicate: spec.Filter,
|
||||
},
|
||||
GroupMode: ToGroupMode(spec.GroupMode),
|
||||
GroupMode: query.ToGroupMode(spec.GroupMode),
|
||||
GroupKeys: spec.GroupKeys,
|
||||
AggregateMethod: spec.AggregateMethod,
|
||||
},
|
||||
|
@ -280,6 +273,93 @@ func createReadGroupSource(s plan.ProcedureSpec, id execute.DatasetID, a execute
|
|||
), nil
|
||||
}
|
||||
|
||||
type readWindowAggregateSource struct {
|
||||
Source
|
||||
reader query.WindowAggregateReader
|
||||
readSpec query.ReadWindowAggregateSpec
|
||||
}
|
||||
|
||||
func ReadWindowAggregateSource(id execute.DatasetID, r query.WindowAggregateReader, readSpec query.ReadWindowAggregateSpec, a execute.Administration) execute.Source {
|
||||
src := new(readWindowAggregateSource)
|
||||
|
||||
src.id = id
|
||||
src.alloc = a.Allocator()
|
||||
|
||||
src.reader = r
|
||||
src.readSpec = readSpec
|
||||
|
||||
src.m = GetStorageDependencies(a.Context()).FromDeps.Metrics
|
||||
src.orgID = readSpec.OrganizationID
|
||||
src.op = readSpec.Name()
|
||||
|
||||
src.runner = src
|
||||
return src
|
||||
}
|
||||
|
||||
func (s *readWindowAggregateSource) run(ctx context.Context) error {
|
||||
stop := s.readSpec.Bounds.Stop
|
||||
tables, err := s.reader.ReadWindowAggregate(
|
||||
ctx,
|
||||
s.readSpec,
|
||||
s.alloc,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return s.processTables(ctx, tables, stop)
|
||||
}
|
||||
|
||||
func createReadWindowAggregateSource(s plan.ProcedureSpec, id execute.DatasetID, a execute.Administration) (execute.Source, error) {
|
||||
span, ctx := tracing.StartSpanFromContext(a.Context())
|
||||
defer span.Finish()
|
||||
|
||||
spec := s.(*ReadWindowAggregatePhysSpec)
|
||||
|
||||
bounds := a.StreamContext().Bounds()
|
||||
if bounds == nil {
|
||||
return nil, &flux.Error{
|
||||
Code: codes.Internal,
|
||||
Msg: "nil bounds passed to from",
|
||||
}
|
||||
}
|
||||
|
||||
deps := GetStorageDependencies(a.Context()).FromDeps
|
||||
reader := deps.Reader.(query.WindowAggregateReader)
|
||||
|
||||
req := query.RequestFromContext(a.Context())
|
||||
if req == nil {
|
||||
return nil, &flux.Error{
|
||||
Code: codes.Internal,
|
||||
Msg: "missing request on context",
|
||||
}
|
||||
}
|
||||
|
||||
orgID := req.OrganizationID
|
||||
bucketID, err := spec.LookupBucketID(ctx, orgID, deps.BucketLookup)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ReadWindowAggregateSource(
|
||||
id,
|
||||
reader,
|
||||
query.ReadWindowAggregateSpec{
|
||||
ReadFilterSpec: query.ReadFilterSpec{
|
||||
OrganizationID: orgID,
|
||||
BucketID: bucketID,
|
||||
Bounds: *bounds,
|
||||
Predicate: spec.Filter,
|
||||
},
|
||||
WindowEvery: spec.WindowEvery,
|
||||
Offset: spec.Offset,
|
||||
Aggregates: spec.Aggregates,
|
||||
CreateEmpty: spec.CreateEmpty,
|
||||
TimeColumn: spec.TimeColumn,
|
||||
},
|
||||
a,
|
||||
), nil
|
||||
}
|
||||
|
||||
func createReadTagKeysSource(prSpec plan.ProcedureSpec, dsid execute.DatasetID, a execute.Administration) (execute.Source, error) {
|
||||
span, ctx := tracing.StartSpanFromContext(a.Context())
|
||||
defer span.Finish()
|
||||
|
@ -297,21 +377,16 @@ func createReadTagKeysSource(prSpec plan.ProcedureSpec, dsid execute.DatasetID,
|
|||
return nil, err
|
||||
}
|
||||
|
||||
var filter *semantic.FunctionExpression
|
||||
if spec.FilterSet {
|
||||
filter = spec.Filter
|
||||
}
|
||||
|
||||
bounds := a.StreamContext().Bounds()
|
||||
return ReadTagKeysSource(
|
||||
dsid,
|
||||
deps.Reader,
|
||||
ReadTagKeysSpec{
|
||||
ReadFilterSpec: ReadFilterSpec{
|
||||
query.ReadTagKeysSpec{
|
||||
ReadFilterSpec: query.ReadFilterSpec{
|
||||
OrganizationID: orgID,
|
||||
BucketID: bucketID,
|
||||
Bounds: *bounds,
|
||||
Predicate: filter,
|
||||
Predicate: spec.Filter,
|
||||
},
|
||||
},
|
||||
a,
|
||||
|
@ -321,11 +396,11 @@ func createReadTagKeysSource(prSpec plan.ProcedureSpec, dsid execute.DatasetID,
|
|||
type readTagKeysSource struct {
|
||||
Source
|
||||
|
||||
reader Reader
|
||||
readSpec ReadTagKeysSpec
|
||||
reader query.StorageReader
|
||||
readSpec query.ReadTagKeysSpec
|
||||
}
|
||||
|
||||
func ReadTagKeysSource(id execute.DatasetID, r Reader, readSpec ReadTagKeysSpec, a execute.Administration) execute.Source {
|
||||
func ReadTagKeysSource(id execute.DatasetID, r query.StorageReader, readSpec query.ReadTagKeysSpec, a execute.Administration) execute.Source {
|
||||
src := &readTagKeysSource{
|
||||
reader: r,
|
||||
readSpec: readSpec,
|
||||
|
@ -366,21 +441,16 @@ func createReadTagValuesSource(prSpec plan.ProcedureSpec, dsid execute.DatasetID
|
|||
return nil, err
|
||||
}
|
||||
|
||||
var filter *semantic.FunctionExpression
|
||||
if spec.FilterSet {
|
||||
filter = spec.Filter
|
||||
}
|
||||
|
||||
bounds := a.StreamContext().Bounds()
|
||||
return ReadTagValuesSource(
|
||||
dsid,
|
||||
deps.Reader,
|
||||
ReadTagValuesSpec{
|
||||
ReadFilterSpec: ReadFilterSpec{
|
||||
query.ReadTagValuesSpec{
|
||||
ReadFilterSpec: query.ReadFilterSpec{
|
||||
OrganizationID: orgID,
|
||||
BucketID: bucketID,
|
||||
Bounds: *bounds,
|
||||
Predicate: filter,
|
||||
Predicate: spec.Filter,
|
||||
},
|
||||
TagKey: spec.TagKey,
|
||||
},
|
||||
|
@ -391,11 +461,11 @@ func createReadTagValuesSource(prSpec plan.ProcedureSpec, dsid execute.DatasetID
|
|||
type readTagValuesSource struct {
|
||||
Source
|
||||
|
||||
reader Reader
|
||||
readSpec ReadTagValuesSpec
|
||||
reader query.StorageReader
|
||||
readSpec query.ReadTagValuesSpec
|
||||
}
|
||||
|
||||
func ReadTagValuesSource(id execute.DatasetID, r Reader, readSpec ReadTagValuesSpec, a execute.Administration) execute.Source {
|
||||
func ReadTagValuesSource(id execute.DatasetID, r query.StorageReader, readSpec query.ReadTagValuesSpec, a execute.Administration) execute.Source {
|
||||
src := &readTagValuesSource{
|
||||
reader: r,
|
||||
readSpec: readSpec,
|
||||
|
|
|
@ -0,0 +1,10 @@
|
|||
package influxdb
|
||||
|
||||
import (
|
||||
"github.com/influxdata/flux/execute"
|
||||
"github.com/influxdata/flux/plan"
|
||||
)
|
||||
|
||||
func CreateReadWindowAggregateSource(s plan.ProcedureSpec, id execute.DatasetID, a execute.Administration) (execute.Source, error) {
|
||||
return createReadWindowAggregateSource(s, id, a)
|
||||
}
|
|
@ -5,13 +5,18 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/influxdata/flux"
|
||||
"github.com/influxdata/flux/dependencies/dependenciestest"
|
||||
"github.com/influxdata/flux/execute"
|
||||
"github.com/influxdata/flux/execute/executetest"
|
||||
"github.com/influxdata/flux/memory"
|
||||
"github.com/influxdata/flux/plan"
|
||||
"github.com/influxdata/flux/stdlib/universe"
|
||||
platform "github.com/influxdata/influxdb/v2"
|
||||
"github.com/influxdata/influxdb/v2/kit/prom/promtest"
|
||||
"github.com/influxdata/influxdb/v2/mock"
|
||||
"github.com/influxdata/influxdb/v2/query"
|
||||
"github.com/influxdata/influxdb/v2/query/stdlib/influxdata/influxdb"
|
||||
"github.com/influxdata/influxdb/v2/tsdb/cursors"
|
||||
"github.com/influxdata/influxdb/v2/uuid"
|
||||
|
@ -32,19 +37,19 @@ func (mockTableIterator) Statistics() cursors.CursorStats {
|
|||
type mockReader struct {
|
||||
}
|
||||
|
||||
func (mockReader) ReadFilter(ctx context.Context, spec influxdb.ReadFilterSpec, alloc *memory.Allocator) (influxdb.TableIterator, error) {
|
||||
func (mockReader) ReadFilter(ctx context.Context, spec query.ReadFilterSpec, alloc *memory.Allocator) (query.TableIterator, error) {
|
||||
return &mockTableIterator{}, nil
|
||||
}
|
||||
|
||||
func (mockReader) ReadGroup(ctx context.Context, spec influxdb.ReadGroupSpec, alloc *memory.Allocator) (influxdb.TableIterator, error) {
|
||||
func (mockReader) ReadGroup(ctx context.Context, spec query.ReadGroupSpec, alloc *memory.Allocator) (query.TableIterator, error) {
|
||||
return &mockTableIterator{}, nil
|
||||
}
|
||||
|
||||
func (mockReader) ReadTagKeys(ctx context.Context, spec influxdb.ReadTagKeysSpec, alloc *memory.Allocator) (influxdb.TableIterator, error) {
|
||||
func (mockReader) ReadTagKeys(ctx context.Context, spec query.ReadTagKeysSpec, alloc *memory.Allocator) (query.TableIterator, error) {
|
||||
return &mockTableIterator{}, nil
|
||||
}
|
||||
|
||||
func (mockReader) ReadTagValues(ctx context.Context, spec influxdb.ReadTagValuesSpec, alloc *memory.Allocator) (influxdb.TableIterator, error) {
|
||||
func (mockReader) ReadTagValues(ctx context.Context, spec query.ReadTagValuesSpec, alloc *memory.Allocator) (query.TableIterator, error) {
|
||||
return &mockTableIterator{}, nil
|
||||
}
|
||||
|
||||
|
@ -52,7 +57,8 @@ func (mockReader) Close() {
|
|||
}
|
||||
|
||||
type mockAdministration struct {
|
||||
Ctx context.Context
|
||||
Ctx context.Context
|
||||
StreamBounds *execute.Bounds
|
||||
}
|
||||
|
||||
func (a mockAdministration) Context() context.Context {
|
||||
|
@ -63,8 +69,12 @@ func (mockAdministration) ResolveTime(qt flux.Time) execute.Time {
|
|||
return 0
|
||||
}
|
||||
|
||||
func (mockAdministration) StreamContext() execute.StreamContext {
|
||||
return nil
|
||||
func (a mockAdministration) StreamContext() execute.StreamContext {
|
||||
return a
|
||||
}
|
||||
|
||||
func (a mockAdministration) Bounds() *execute.Bounds {
|
||||
return a.StreamBounds
|
||||
}
|
||||
|
||||
func (mockAdministration) Allocator() *memory.Allocator {
|
||||
|
@ -110,7 +120,7 @@ func TestMetrics(t *testing.T) {
|
|||
rfs := influxdb.ReadFilterSource(
|
||||
execute.DatasetID(uuid.FromTime(time.Now())),
|
||||
&mockReader{},
|
||||
influxdb.ReadFilterSpec{
|
||||
query.ReadFilterSpec{
|
||||
OrganizationID: *orgID,
|
||||
},
|
||||
a,
|
||||
|
@ -129,3 +139,150 @@ func TestMetrics(t *testing.T) {
|
|||
t.Fatalf("expected sample count of %v, got %v", want, got)
|
||||
}
|
||||
}
|
||||
|
||||
type TableIterator struct {
|
||||
Tables []*executetest.Table
|
||||
}
|
||||
|
||||
func (t *TableIterator) Do(f func(flux.Table) error) error {
|
||||
for _, table := range t.Tables {
|
||||
if err := f(table); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *TableIterator) Statistics() cursors.CursorStats {
|
||||
return cursors.CursorStats{}
|
||||
}
|
||||
|
||||
func TestReadWindowAggregateSource(t *testing.T) {
|
||||
t.Skip("test panics in CI; issue: https://github.com/influxdata/influxdb/issues/17847")
|
||||
|
||||
orgID, bucketID := platform.ID(1), platform.ID(2)
|
||||
executetest.RunSourceHelper(t,
|
||||
[]*executetest.Table{
|
||||
{
|
||||
ColMeta: []flux.ColMeta{
|
||||
{Label: "_time", Type: flux.TTime},
|
||||
{Label: "_measurement", Type: flux.TString},
|
||||
{Label: "_field", Type: flux.TString},
|
||||
{Label: "host", Type: flux.TString},
|
||||
{Label: "_value", Type: flux.TFloat},
|
||||
},
|
||||
KeyCols: []string{"_measurement", "_field", "host"},
|
||||
Data: [][]interface{}{
|
||||
{execute.Time(0), "cpu", "usage_user", "server01", 2.0},
|
||||
{execute.Time(10), "cpu", "usage_user", "server01", 1.5},
|
||||
{execute.Time(20), "cpu", "usage_user", "server01", 5.0},
|
||||
},
|
||||
},
|
||||
{
|
||||
ColMeta: []flux.ColMeta{
|
||||
{Label: "_time", Type: flux.TTime},
|
||||
{Label: "_measurement", Type: flux.TString},
|
||||
{Label: "_field", Type: flux.TString},
|
||||
{Label: "host", Type: flux.TString},
|
||||
{Label: "_value", Type: flux.TFloat},
|
||||
},
|
||||
KeyCols: []string{"_measurement", "_field", "host"},
|
||||
Data: [][]interface{}{
|
||||
{execute.Time(0), "cpu", "usage_system", "server01", 8.0},
|
||||
{execute.Time(10), "cpu", "usage_system", "server01", 3.0},
|
||||
{execute.Time(20), "cpu", "usage_system", "server01", 6.0},
|
||||
},
|
||||
},
|
||||
},
|
||||
nil,
|
||||
func(id execute.DatasetID) execute.Source {
|
||||
pspec := &influxdb.ReadWindowAggregatePhysSpec{
|
||||
ReadRangePhysSpec: influxdb.ReadRangePhysSpec{
|
||||
BucketID: bucketID.String(),
|
||||
},
|
||||
WindowEvery: 10,
|
||||
Aggregates: []plan.ProcedureKind{
|
||||
universe.SumKind,
|
||||
},
|
||||
}
|
||||
reader := &mock.WindowAggregateStoreReader{
|
||||
ReadWindowAggregateFn: func(ctx context.Context, spec query.ReadWindowAggregateSpec, alloc *memory.Allocator) (query.TableIterator, error) {
|
||||
if want, got := orgID, spec.OrganizationID; want != got {
|
||||
t.Errorf("unexpected organization id -want/+got:\n\t- %s\n\t+ %s", want, got)
|
||||
}
|
||||
if want, got := bucketID, spec.BucketID; want != got {
|
||||
t.Errorf("unexpected bucket id -want/+got:\n\t- %s\n\t+ %s", want, got)
|
||||
}
|
||||
if want, got := (execute.Bounds{Start: 0, Stop: 30}), spec.Bounds; want != got {
|
||||
t.Errorf("unexpected bounds -want/+got:\n%s", cmp.Diff(want, got))
|
||||
}
|
||||
if want, got := int64(10), spec.WindowEvery; want != got {
|
||||
t.Errorf("unexpected window every value -want/+got:\n\t- %d\n\t+ %d", want, got)
|
||||
}
|
||||
if want, got := []plan.ProcedureKind{universe.SumKind}, spec.Aggregates; !cmp.Equal(want, got) {
|
||||
t.Errorf("unexpected aggregates -want/+got:\n%s", cmp.Diff(want, got))
|
||||
}
|
||||
return &TableIterator{
|
||||
Tables: []*executetest.Table{
|
||||
{
|
||||
ColMeta: []flux.ColMeta{
|
||||
{Label: "_time", Type: flux.TTime},
|
||||
{Label: "_measurement", Type: flux.TString},
|
||||
{Label: "_field", Type: flux.TString},
|
||||
{Label: "host", Type: flux.TString},
|
||||
{Label: "_value", Type: flux.TFloat},
|
||||
},
|
||||
KeyCols: []string{"_measurement", "_field", "host"},
|
||||
Data: [][]interface{}{
|
||||
{execute.Time(0), "cpu", "usage_user", "server01", 2.0},
|
||||
{execute.Time(10), "cpu", "usage_user", "server01", 1.5},
|
||||
{execute.Time(20), "cpu", "usage_user", "server01", 5.0},
|
||||
},
|
||||
},
|
||||
{
|
||||
ColMeta: []flux.ColMeta{
|
||||
{Label: "_time", Type: flux.TTime},
|
||||
{Label: "_measurement", Type: flux.TString},
|
||||
{Label: "_field", Type: flux.TString},
|
||||
{Label: "host", Type: flux.TString},
|
||||
{Label: "_value", Type: flux.TFloat},
|
||||
},
|
||||
KeyCols: []string{"_measurement", "_field", "host"},
|
||||
Data: [][]interface{}{
|
||||
{execute.Time(0), "cpu", "usage_system", "server01", 8.0},
|
||||
{execute.Time(10), "cpu", "usage_system", "server01", 3.0},
|
||||
{execute.Time(20), "cpu", "usage_system", "server01", 6.0},
|
||||
},
|
||||
},
|
||||
},
|
||||
}, nil
|
||||
},
|
||||
}
|
||||
|
||||
metrics := influxdb.NewMetrics(nil)
|
||||
deps := influxdb.StorageDependencies{
|
||||
FromDeps: influxdb.FromDependencies{
|
||||
Reader: reader,
|
||||
Metrics: metrics,
|
||||
},
|
||||
}
|
||||
ctx := deps.Inject(context.Background())
|
||||
ctx = query.ContextWithRequest(ctx, &query.Request{
|
||||
OrganizationID: orgID,
|
||||
})
|
||||
a := mockAdministration{
|
||||
Ctx: ctx,
|
||||
StreamBounds: &execute.Bounds{
|
||||
Start: execute.Time(0),
|
||||
Stop: execute.Time(30),
|
||||
},
|
||||
}
|
||||
|
||||
s, err := influxdb.CreateReadWindowAggregateSource(pspec, id, a)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return s
|
||||
},
|
||||
)
|
||||
}
|
||||
|
|
|
@ -2,15 +2,10 @@ package influxdb
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/influxdata/flux"
|
||||
"github.com/influxdata/flux/execute"
|
||||
"github.com/influxdata/flux/memory"
|
||||
"github.com/influxdata/flux/semantic"
|
||||
platform "github.com/influxdata/influxdb/v2"
|
||||
"github.com/influxdata/influxdb/v2/kit/prom"
|
||||
"github.com/influxdata/influxdb/v2/tsdb/cursors"
|
||||
"github.com/influxdata/influxdb/v2/query"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
@ -22,16 +17,14 @@ type HostLookup interface {
|
|||
|
||||
type BucketLookup interface {
|
||||
Lookup(ctx context.Context, orgID platform.ID, name string) (platform.ID, bool)
|
||||
LookupName(ctx context.Context, orgID platform.ID, id platform.ID) string
|
||||
}
|
||||
|
||||
type OrganizationLookup interface {
|
||||
Lookup(ctx context.Context, name string) (platform.ID, bool)
|
||||
LookupName(ctx context.Context, id platform.ID) string
|
||||
}
|
||||
|
||||
type FromDependencies struct {
|
||||
Reader Reader
|
||||
Reader query.StorageReader
|
||||
BucketLookup BucketLookup
|
||||
OrganizationLookup OrganizationLookup
|
||||
Metrics *metrics
|
||||
|
@ -79,83 +72,3 @@ func (l StaticLookup) Watch() <-chan struct{} {
|
|||
// A nil channel always blocks, since hosts never change this is appropriate.
|
||||
return nil
|
||||
}
|
||||
|
||||
type GroupMode int
|
||||
|
||||
const (
|
||||
// GroupModeNone merges all series into a single group.
|
||||
GroupModeNone GroupMode = iota
|
||||
// GroupModeBy produces a table for each unique value of the specified GroupKeys.
|
||||
GroupModeBy
|
||||
)
|
||||
|
||||
// ToGroupMode accepts the group mode from Flux and produces the appropriate storage group mode.
|
||||
func ToGroupMode(fluxMode flux.GroupMode) GroupMode {
|
||||
switch fluxMode {
|
||||
case flux.GroupModeNone:
|
||||
return GroupModeNone
|
||||
case flux.GroupModeBy:
|
||||
return GroupModeBy
|
||||
default:
|
||||
panic(fmt.Sprint("unknown group mode: ", fluxMode))
|
||||
}
|
||||
}
|
||||
|
||||
type ReadFilterSpec struct {
|
||||
OrganizationID platform.ID
|
||||
BucketID platform.ID
|
||||
Database string
|
||||
RetentionPolicy string
|
||||
|
||||
Bounds execute.Bounds
|
||||
|
||||
Predicate *semantic.FunctionExpression
|
||||
}
|
||||
|
||||
type ReadGroupSpec struct {
|
||||
ReadFilterSpec
|
||||
|
||||
GroupMode GroupMode
|
||||
GroupKeys []string
|
||||
|
||||
AggregateMethod string
|
||||
}
|
||||
|
||||
type ReadTagKeysSpec struct {
|
||||
ReadFilterSpec
|
||||
}
|
||||
|
||||
type ReadTagValuesSpec struct {
|
||||
ReadFilterSpec
|
||||
TagKey string
|
||||
}
|
||||
|
||||
type Reader interface {
|
||||
ReadFilter(ctx context.Context, spec ReadFilterSpec, alloc *memory.Allocator) (TableIterator, error)
|
||||
ReadGroup(ctx context.Context, spec ReadGroupSpec, alloc *memory.Allocator) (TableIterator, error)
|
||||
|
||||
ReadTagKeys(ctx context.Context, spec ReadTagKeysSpec, alloc *memory.Allocator) (TableIterator, error)
|
||||
ReadTagValues(ctx context.Context, spec ReadTagValuesSpec, alloc *memory.Allocator) (TableIterator, error)
|
||||
|
||||
Close()
|
||||
}
|
||||
|
||||
// TableIterator is a table iterator that also keeps track of cursor statistics from the storage engine.
|
||||
type TableIterator interface {
|
||||
flux.TableIterator
|
||||
Statistics() cursors.CursorStats
|
||||
}
|
||||
|
||||
type ReadWindowAggregateSpec struct {
|
||||
ReadFilterSpec
|
||||
// TODO(issue #17784): add attributes for the window aggregate spec.
|
||||
}
|
||||
|
||||
// WindowAggregateReader implements the WindowAggregate capability.
|
||||
type WindowAggregateReader interface {
|
||||
// HasWindowAggregateCapability will test if this Reader source supports the ReadWindowAggregate capability.
|
||||
HasWindowAggregateCapability(ctx context.Context) bool
|
||||
|
||||
// ReadWindowAggregate will read a table using the WindowAggregate method.
|
||||
ReadWindowAggregate(ctx context.Context, spec ReadWindowAggregateSpec, alloc *memory.Allocator) (TableIterator, error)
|
||||
}
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
package storageflux
|
||||
package influxdb
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
@ -10,12 +10,10 @@ import (
|
|||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func toStoragePredicate(f *semantic.FunctionExpression) (*datatypes.Predicate, error) {
|
||||
if f.Block.Parameters == nil || len(f.Block.Parameters.List) != 1 {
|
||||
return nil, errors.New("storage predicate functions must have exactly one parameter")
|
||||
}
|
||||
|
||||
root, err := toStoragePredicateHelper(f.Block.Body.(semantic.Expression), f.Block.Parameters.List[0].Key.Name)
|
||||
// ToStoragePredicate will convert a FunctionExpression into a predicate that can be
|
||||
// sent down to the storage layer.
|
||||
func ToStoragePredicate(n semantic.Expression, objectName string) (*datatypes.Predicate, error) {
|
||||
root, err := toStoragePredicateHelper(n, objectName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -25,6 +23,39 @@ func toStoragePredicate(f *semantic.FunctionExpression) (*datatypes.Predicate, e
|
|||
}, nil
|
||||
}
|
||||
|
||||
func mergePredicates(op ast.LogicalOperatorKind, predicates ...*datatypes.Predicate) (*datatypes.Predicate, error) {
|
||||
if len(predicates) == 0 {
|
||||
return nil, errors.New("at least one predicate is needed")
|
||||
}
|
||||
|
||||
var value datatypes.Node_Logical
|
||||
switch op {
|
||||
case ast.AndOperator:
|
||||
value = datatypes.LogicalAnd
|
||||
case ast.OrOperator:
|
||||
value = datatypes.LogicalOr
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown logical operator %v", op)
|
||||
}
|
||||
|
||||
// Nest the predicates backwards. This way we get a tree like this:
|
||||
// a AND (b AND c)
|
||||
root := predicates[len(predicates)-1].Root
|
||||
for i := len(predicates) - 2; i >= 0; i-- {
|
||||
root = &datatypes.Node{
|
||||
NodeType: datatypes.NodeTypeLogicalExpression,
|
||||
Value: &datatypes.Node_Logical_{Logical: value},
|
||||
Children: []*datatypes.Node{
|
||||
predicates[i].Root,
|
||||
root,
|
||||
},
|
||||
}
|
||||
}
|
||||
return &datatypes.Predicate{
|
||||
Root: root,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func toStoragePredicateHelper(n semantic.Expression, objectName string) (*datatypes.Node, error) {
|
||||
switch n := n.(type) {
|
||||
case *semantic.LogicalExpression:
|
|
@ -2,7 +2,6 @@ package influxdb
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sort"
|
||||
"time"
|
||||
|
@ -13,6 +12,7 @@ import (
|
|||
"github.com/influxdata/flux/execute"
|
||||
"github.com/influxdata/flux/interpreter"
|
||||
"github.com/influxdata/flux/plan"
|
||||
"github.com/influxdata/flux/runtime"
|
||||
"github.com/influxdata/flux/semantic"
|
||||
"github.com/influxdata/flux/stdlib/influxdata/influxdb"
|
||||
"github.com/influxdata/flux/stdlib/kafka"
|
||||
|
@ -24,13 +24,17 @@ import (
|
|||
"github.com/influxdata/influxdb/v2/storage"
|
||||
)
|
||||
|
||||
// ToKind is the kind for the `to` flux function
|
||||
const ToKind = influxdb.ToKind
|
||||
const (
|
||||
// ToKind is the kind for the `to` flux function
|
||||
ToKind = influxdb.ToKind
|
||||
|
||||
// TODO(jlapacik) remove this once we have execute.DefaultFieldColLabel
|
||||
const defaultFieldColLabel = "_field"
|
||||
const DefaultMeasurementColLabel = "_measurement"
|
||||
const DefaultBufferSize = 1 << 14
|
||||
// TODO(jlapacik) remove this once we have execute.DefaultFieldColLabel
|
||||
defaultFieldColLabel = "_field"
|
||||
DefaultMeasurementColLabel = "_measurement"
|
||||
DefaultBufferSize = 1 << 14
|
||||
|
||||
toOp = "influxdata/influxdb/to"
|
||||
)
|
||||
|
||||
// ToOpSpec is the flux.OperationSpec for the `to` flux function.
|
||||
type ToOpSpec struct {
|
||||
|
@ -47,29 +51,8 @@ type ToOpSpec struct {
|
|||
}
|
||||
|
||||
func init() {
|
||||
toSignature := flux.FunctionSignature(
|
||||
map[string]semantic.PolyType{
|
||||
"bucket": semantic.String,
|
||||
"bucketID": semantic.String,
|
||||
"org": semantic.String,
|
||||
"orgID": semantic.String,
|
||||
"host": semantic.String,
|
||||
"token": semantic.String,
|
||||
"timeColumn": semantic.String,
|
||||
"measurementColumn": semantic.String,
|
||||
"tagColumns": semantic.Array,
|
||||
"fieldFn": semantic.NewFunctionPolyType(semantic.FunctionPolySignature{
|
||||
Parameters: map[string]semantic.PolyType{
|
||||
"r": semantic.Tvar(1),
|
||||
},
|
||||
Required: semantic.LabelSet{"r"},
|
||||
Return: semantic.Tvar(2),
|
||||
}),
|
||||
},
|
||||
[]string{},
|
||||
)
|
||||
|
||||
flux.ReplacePackageValue("influxdata/influxdb", "to", flux.FunctionValueWithSideEffect(ToKind, createToOpSpec, toSignature))
|
||||
toSignature := runtime.MustLookupBuiltinType("influxdata/influxdb", ToKind)
|
||||
runtime.ReplacePackageValue("influxdata/influxdb", "to", flux.MustValue(flux.FunctionValueWithSideEffect(ToKind, createToOpSpec, toSignature)))
|
||||
flux.RegisterOpSpec(ToKind, func() flux.OperationSpec { return &ToOpSpec{} })
|
||||
plan.RegisterProcedureSpecWithSideEffect(ToKind, newToProcedure, ToKind)
|
||||
execute.RegisterTransformation(ToKind, createToTransformation)
|
||||
|
@ -257,8 +240,15 @@ func createToTransformation(id execute.DatasetID, mode execute.AccumulationMode,
|
|||
}
|
||||
cache := execute.NewTableBuilderCache(a.Allocator())
|
||||
d := execute.NewDataset(id, mode, cache)
|
||||
deps := GetStorageDependencies(a.Context()).ToDeps
|
||||
t, err := NewToTransformation(a.Context(), d, cache, s, deps)
|
||||
deps := GetStorageDependencies(a.Context())
|
||||
if deps == (StorageDependencies{}) {
|
||||
return nil, nil, &flux.Error{
|
||||
Code: codes.Unimplemented,
|
||||
Msg: "cannot return storage dependencies; storage dependencies are unimplemented",
|
||||
}
|
||||
}
|
||||
toDeps := deps.ToDeps
|
||||
t, err := NewToTransformation(a.Context(), d, cache, s, toDeps)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
@ -287,13 +277,10 @@ func (t *ToTransformation) RetractTable(id execute.DatasetID, key flux.GroupKey)
|
|||
// NewToTransformation returns a new *ToTransformation with the appropriate fields set.
|
||||
func NewToTransformation(ctx context.Context, d execute.Dataset, cache execute.TableBuilderCache, toSpec *ToProcedureSpec, deps ToDependencies) (x *ToTransformation, err error) {
|
||||
var fn *execute.RowMapFn
|
||||
//var err error
|
||||
spec := toSpec.Spec
|
||||
var bucketID, orgID *platform.ID
|
||||
if spec.FieldFn.Fn != nil {
|
||||
if fn, err = execute.NewRowMapFn(spec.FieldFn.Fn, compiler.ToScope(spec.FieldFn.Scope)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fn = execute.NewRowMapFn(spec.FieldFn.Fn, compiler.ToScope(spec.FieldFn.Scope))
|
||||
}
|
||||
// Get organization ID
|
||||
if spec.Org != "" {
|
||||
|
@ -313,7 +300,11 @@ func NewToTransformation(ctx context.Context, d execute.Dataset, cache execute.T
|
|||
// No org or orgID provided as an arg, use the orgID from the context
|
||||
req := query.RequestFromContext(ctx)
|
||||
if req == nil {
|
||||
return nil, errors.New("missing request on context")
|
||||
return nil, &platform.Error{
|
||||
Code: platform.EInternal,
|
||||
Msg: "missing request on context",
|
||||
Op: toOp,
|
||||
}
|
||||
}
|
||||
orgID = &req.OrganizationID
|
||||
}
|
||||
|
@ -360,23 +351,26 @@ func (t *ToTransformation) Process(id execute.DatasetID, tbl flux.Table) error {
|
|||
if t.implicitTagColumns {
|
||||
|
||||
// If no tag columns are specified, by default we exclude
|
||||
// _field and _value from being tag columns.
|
||||
// _field, _value and _measurement from being tag columns.
|
||||
excludeColumns := map[string]bool{
|
||||
execute.DefaultValueColLabel: true,
|
||||
defaultFieldColLabel: true,
|
||||
DefaultMeasurementColLabel: true,
|
||||
}
|
||||
|
||||
// If a field function is specified then we exclude any column that
|
||||
// is referenced in the function expression from being a tag column.
|
||||
if t.spec.Spec.FieldFn.Fn != nil {
|
||||
recordParam := t.spec.Spec.FieldFn.Fn.Block.Parameters.List[0].Key.Name
|
||||
recordParam := t.spec.Spec.FieldFn.Fn.Parameters.List[0].Key.Name
|
||||
exprNode := t.spec.Spec.FieldFn.Fn
|
||||
colVisitor := newFieldFunctionVisitor(recordParam, tbl.Cols())
|
||||
|
||||
// Walk the field function expression and record which columns
|
||||
// are referenced. None of these columns will be used as tag columns.
|
||||
semantic.Walk(colVisitor, exprNode)
|
||||
excludeColumns = colVisitor.captured
|
||||
for k, v := range colVisitor.captured {
|
||||
excludeColumns[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
addTagsFromTable(t.spec.Spec, tbl, excludeColumns)
|
||||
|
@ -471,13 +465,25 @@ type ToDependencies struct {
|
|||
// Validate returns an error if any required field is unset.
|
||||
func (d ToDependencies) Validate() error {
|
||||
if d.BucketLookup == nil {
|
||||
return errors.New("missing bucket lookup dependency")
|
||||
return &platform.Error{
|
||||
Code: platform.EInternal,
|
||||
Msg: "missing bucket lookup dependency",
|
||||
Op: toOp,
|
||||
}
|
||||
}
|
||||
if d.OrganizationLookup == nil {
|
||||
return errors.New("missing organization lookup dependency")
|
||||
return &platform.Error{
|
||||
Code: platform.EInternal,
|
||||
Msg: "missing organization lookup dependency",
|
||||
Op: toOp,
|
||||
}
|
||||
}
|
||||
if d.PointsWriter == nil {
|
||||
return errors.New("missing points writer dependency")
|
||||
return &platform.Error{
|
||||
Code: platform.EInternal,
|
||||
Msg: "missing points writer dependency",
|
||||
Op: toOp,
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -540,8 +546,10 @@ func writeTable(ctx context.Context, t *ToTransformation, tbl flux.Table) (err e
|
|||
}
|
||||
|
||||
// prepare field function if applicable and record the number of values to write per row
|
||||
var fn *execute.RowMapPreparedFn
|
||||
if spec.FieldFn.Fn != nil {
|
||||
if err = t.fn.Prepare(columns); err != nil {
|
||||
var err error
|
||||
if fn, err = t.fn.Prepare(columns); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -580,7 +588,11 @@ func writeTable(ctx context.Context, t *ToTransformation, tbl flux.Table) (err e
|
|||
pointTime = valueTime.Time().Time()
|
||||
case isTag[j]:
|
||||
if col.Type != flux.TString {
|
||||
return errors.New("invalid type for tag column")
|
||||
return &platform.Error{
|
||||
Code: platform.EInvalid,
|
||||
Msg: "invalid type for tag column",
|
||||
Op: toOp,
|
||||
}
|
||||
}
|
||||
// TODO(docmerlin): instead of doing this sort of thing, it would be nice if we had a way that allocated a lot less.
|
||||
kv = append(kv, []byte(col.Label), er.Strings(j).Value(i))
|
||||
|
@ -602,11 +614,11 @@ func writeTable(ctx context.Context, t *ToTransformation, tbl flux.Table) (err e
|
|||
}
|
||||
|
||||
var fieldValues values.Object
|
||||
if spec.FieldFn.Fn == nil {
|
||||
if fn == nil {
|
||||
if fieldValues, err = defaultFieldMapping(er, i); err != nil {
|
||||
return err
|
||||
}
|
||||
} else if fieldValues, err = t.fn.Eval(t.Ctx, i, er); err != nil {
|
||||
} else if fieldValues, err = fn.Eval(t.Ctx, i, er); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -615,7 +627,7 @@ func writeTable(ctx context.Context, t *ToTransformation, tbl flux.Table) (err e
|
|||
fields[k] = nil
|
||||
return
|
||||
}
|
||||
switch v.Type() {
|
||||
switch v.Type().Nature() {
|
||||
case semantic.Float:
|
||||
fields[k] = v.Float()
|
||||
case semantic.Int:
|
||||
|
@ -680,10 +692,14 @@ func defaultFieldMapping(er flux.ColReader, row int) (values.Object, error) {
|
|||
}
|
||||
|
||||
value := execute.ValueForRow(er, row, valueColumnIdx)
|
||||
|
||||
fieldValueMapping := values.NewObject()
|
||||
field := execute.ValueForRow(er, row, fieldColumnIdx)
|
||||
props := []semantic.PropertyType{
|
||||
{
|
||||
Key: []byte(field.Str()),
|
||||
Value: value.Type(),
|
||||
},
|
||||
}
|
||||
fieldValueMapping := values.NewObject(semantic.NewObjectType(props))
|
||||
fieldValueMapping.Set(field.Str(), value)
|
||||
|
||||
return fieldValueMapping, nil
|
||||
}
|
||||
|
|
|
@ -2,24 +2,19 @@ package influxdb_test
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/influxdata/flux"
|
||||
"github.com/influxdata/flux/ast"
|
||||
"github.com/influxdata/flux/dependencies/dependenciestest"
|
||||
"github.com/influxdata/flux/execute"
|
||||
"github.com/influxdata/flux/execute/executetest"
|
||||
"github.com/influxdata/flux/interpreter"
|
||||
"github.com/influxdata/flux/querytest"
|
||||
"github.com/influxdata/flux/semantic"
|
||||
"github.com/influxdata/flux/values/valuestest"
|
||||
platform "github.com/influxdata/influxdb/v2"
|
||||
"github.com/influxdata/influxdb/v2/mock"
|
||||
"github.com/influxdata/influxdb/v2/models"
|
||||
_ "github.com/influxdata/influxdb/v2/query/builtin"
|
||||
pquerytest "github.com/influxdata/influxdb/v2/query/querytest"
|
||||
"github.com/influxdata/influxdb/v2/query/stdlib/influxdata/influxdb"
|
||||
)
|
||||
|
||||
|
@ -31,9 +26,9 @@ func TestTo_Query(t *testing.T) {
|
|||
Want: &flux.Spec{
|
||||
Operations: []*flux.Operation{
|
||||
{
|
||||
ID: "influxDBFrom0",
|
||||
ID: "from0",
|
||||
Spec: &influxdb.FromOpSpec{
|
||||
Bucket: "mydb",
|
||||
Bucket: influxdb.NameOrID{Name: "mydb"},
|
||||
},
|
||||
},
|
||||
{
|
||||
|
@ -46,35 +41,14 @@ func TestTo_Query(t *testing.T) {
|
|||
TimeColumn: execute.DefaultTimeColLabel,
|
||||
MeasurementColumn: influxdb.DefaultMeasurementColLabel,
|
||||
FieldFn: interpreter.ResolvedFunction{
|
||||
Scope: valuestest.NowScope(),
|
||||
Fn: &semantic.FunctionExpression{
|
||||
Block: &semantic.FunctionBlock{
|
||||
Parameters: &semantic.FunctionParameters{
|
||||
List: []*semantic.FunctionParameter{
|
||||
{
|
||||
Key: &semantic.Identifier{Name: "r"},
|
||||
},
|
||||
},
|
||||
},
|
||||
Body: &semantic.ObjectExpression{
|
||||
Properties: []*semantic.Property{
|
||||
{
|
||||
Key: &semantic.Identifier{Name: "col"},
|
||||
Value: &semantic.MemberExpression{
|
||||
Object: &semantic.IdentifierExpression{Name: "r"},
|
||||
Property: "col",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Scope: valuestest.Scope(),
|
||||
Fn: executetest.FunctionExpression(t, `(r) => ({col: r.col})`),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Edges: []flux.Edge{
|
||||
{Parent: "influxDBFrom0", Child: "to1"},
|
||||
{Parent: "from0", Child: "to1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -88,49 +62,6 @@ func TestTo_Query(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestToOpSpec_BucketsAccessed(t *testing.T) {
|
||||
bucketName := "my_bucket"
|
||||
bucketIDString := "ddddccccbbbbaaaa"
|
||||
bucketID, err := platform.IDFromString(bucketIDString)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
orgName := "my_org"
|
||||
orgIDString := "aaaabbbbccccdddd"
|
||||
orgID, err := platform.IDFromString(orgIDString)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
tests := []pquerytest.BucketsAccessedTestCase{
|
||||
{
|
||||
Name: "from() with bucket and to with org and bucket",
|
||||
Raw: fmt.Sprintf(`from(bucket:"%s") |> to(bucket:"%s", org:"%s")`, bucketName, bucketName, orgName),
|
||||
WantReadBuckets: &[]platform.BucketFilter{{Name: &bucketName}},
|
||||
WantWriteBuckets: &[]platform.BucketFilter{{Name: &bucketName, Org: &orgName}},
|
||||
},
|
||||
{
|
||||
Name: "from() with bucket and to with orgID and bucket",
|
||||
Raw: fmt.Sprintf(`from(bucket:"%s") |> to(bucket:"%s", orgID:"%s")`, bucketName, bucketName, orgIDString),
|
||||
WantReadBuckets: &[]platform.BucketFilter{{Name: &bucketName}},
|
||||
WantWriteBuckets: &[]platform.BucketFilter{{Name: &bucketName, OrganizationID: orgID}},
|
||||
},
|
||||
{
|
||||
Name: "from() with bucket and to with orgID and bucketID",
|
||||
Raw: fmt.Sprintf(`from(bucket:"%s") |> to(bucketID:"%s", orgID:"%s")`, bucketName, bucketIDString, orgIDString),
|
||||
WantReadBuckets: &[]platform.BucketFilter{{Name: &bucketName}},
|
||||
WantWriteBuckets: &[]platform.BucketFilter{{ID: bucketID, OrganizationID: orgID}},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
tc := tc
|
||||
t.Run(tc.Name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
pquerytest.BucketsAccessedTestHelper(t, tc)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestTo_Process(t *testing.T) {
|
||||
type wanted struct {
|
||||
result *mock.PointsWriter
|
||||
|
@ -413,29 +344,8 @@ m,tag1=c,tag2=ee _value=4 41`),
|
|||
TimeColumn: "_time",
|
||||
MeasurementColumn: "_measurement",
|
||||
FieldFn: interpreter.ResolvedFunction{
|
||||
Scope: valuestest.NowScope(),
|
||||
Fn: &semantic.FunctionExpression{
|
||||
Block: &semantic.FunctionBlock{
|
||||
Parameters: &semantic.FunctionParameters{
|
||||
List: []*semantic.FunctionParameter{
|
||||
{
|
||||
Key: &semantic.Identifier{Name: "r"},
|
||||
},
|
||||
},
|
||||
},
|
||||
Body: &semantic.ObjectExpression{
|
||||
Properties: []*semantic.Property{
|
||||
{
|
||||
Key: &semantic.Identifier{Name: "temperature"},
|
||||
Value: &semantic.MemberExpression{
|
||||
Object: &semantic.IdentifierExpression{Name: "r"},
|
||||
Property: "temperature",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Scope: valuestest.Scope(),
|
||||
Fn: executetest.FunctionExpression(t, `(r) => ({temperature: r.temperature})`),
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -486,74 +396,29 @@ c temperature=4 41`),
|
|||
TimeColumn: "_time",
|
||||
MeasurementColumn: "tag",
|
||||
FieldFn: interpreter.ResolvedFunction{
|
||||
Scope: valuestest.NowScope(),
|
||||
Fn: &semantic.FunctionExpression{
|
||||
Block: &semantic.FunctionBlock{
|
||||
Parameters: &semantic.FunctionParameters{
|
||||
List: []*semantic.FunctionParameter{
|
||||
{
|
||||
Key: &semantic.Identifier{Name: "r"},
|
||||
},
|
||||
},
|
||||
},
|
||||
Body: &semantic.ObjectExpression{
|
||||
Properties: []*semantic.Property{
|
||||
{
|
||||
Key: &semantic.Identifier{Name: "day"},
|
||||
Value: &semantic.MemberExpression{
|
||||
Object: &semantic.IdentifierExpression{Name: "r"},
|
||||
Property: "day",
|
||||
},
|
||||
},
|
||||
{
|
||||
Key: &semantic.Identifier{Name: "temperature"},
|
||||
Value: &semantic.MemberExpression{
|
||||
Object: &semantic.IdentifierExpression{Name: "r"},
|
||||
Property: "temperature",
|
||||
},
|
||||
},
|
||||
{
|
||||
Key: &semantic.Identifier{Name: "humidity"},
|
||||
Value: &semantic.MemberExpression{
|
||||
Object: &semantic.IdentifierExpression{Name: "r"},
|
||||
Property: "humidity",
|
||||
},
|
||||
},
|
||||
{
|
||||
Key: &semantic.Identifier{Name: "ratio"},
|
||||
Value: &semantic.BinaryExpression{
|
||||
Operator: ast.DivisionOperator,
|
||||
Left: &semantic.MemberExpression{
|
||||
Object: &semantic.IdentifierExpression{Name: "r"},
|
||||
Property: "temperature",
|
||||
},
|
||||
Right: &semantic.MemberExpression{
|
||||
Object: &semantic.IdentifierExpression{Name: "r"},
|
||||
Property: "humidity",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Scope: valuestest.Scope(),
|
||||
Fn: executetest.FunctionExpression(t, `(r) => ({day: r.day, temperature: r.temperature, humidity: r.humidity, ratio: r.temperature / r.humidity})`),
|
||||
},
|
||||
},
|
||||
},
|
||||
data: []flux.Table{executetest.MustCopyTable(&executetest.Table{
|
||||
ColMeta: []flux.ColMeta{
|
||||
{Label: "_measurement", Type: flux.TString},
|
||||
{Label: "_field", Type: flux.TString},
|
||||
{Label: "_time", Type: flux.TTime},
|
||||
{Label: "day", Type: flux.TString},
|
||||
{Label: "tag", Type: flux.TString},
|
||||
{Label: "temperature", Type: flux.TFloat},
|
||||
{Label: "humidity", Type: flux.TFloat},
|
||||
{Label: "_value", Type: flux.TString},
|
||||
},
|
||||
KeyCols: []string{"_measurement", "_field"},
|
||||
Data: [][]interface{}{
|
||||
{execute.Time(11), "Monday", "a", 2.0, 1.0},
|
||||
{execute.Time(21), "Tuesday", "a", 2.0, 2.0},
|
||||
{execute.Time(21), "Wednesday", "b", 1.0, 4.0},
|
||||
{execute.Time(31), "Thursday", "a", 3.0, 3.0},
|
||||
{execute.Time(41), "Friday", "c", 4.0, 5.0},
|
||||
{"m", "f", execute.Time(11), "Monday", "a", 2.0, 1.0, "bogus"},
|
||||
{"m", "f", execute.Time(21), "Tuesday", "a", 2.0, 2.0, "bogus"},
|
||||
{"m", "f", execute.Time(21), "Wednesday", "b", 1.0, 4.0, "bogus"},
|
||||
{"m", "f", execute.Time(31), "Thursday", "a", 3.0, 3.0, "bogus"},
|
||||
{"m", "f", execute.Time(41), "Friday", "c", 4.0, 5.0, "bogus"},
|
||||
},
|
||||
})},
|
||||
want: wanted{
|
||||
|
@ -566,18 +431,22 @@ c day="Friday",humidity=5,ratio=0.8,temperature=4 41`),
|
|||
},
|
||||
tables: []*executetest.Table{{
|
||||
ColMeta: []flux.ColMeta{
|
||||
{Label: "_measurement", Type: flux.TString},
|
||||
{Label: "_field", Type: flux.TString},
|
||||
{Label: "_time", Type: flux.TTime},
|
||||
{Label: "day", Type: flux.TString},
|
||||
{Label: "tag", Type: flux.TString},
|
||||
{Label: "temperature", Type: flux.TFloat},
|
||||
{Label: "humidity", Type: flux.TFloat},
|
||||
{Label: "_value", Type: flux.TString},
|
||||
},
|
||||
KeyCols: []string{"_measurement", "_field"},
|
||||
Data: [][]interface{}{
|
||||
{execute.Time(11), "Monday", "a", 2.0, 1.0},
|
||||
{execute.Time(21), "Tuesday", "a", 2.0, 2.0},
|
||||
{execute.Time(21), "Wednesday", "b", 1.0, 4.0},
|
||||
{execute.Time(31), "Thursday", "a", 3.0, 3.0},
|
||||
{execute.Time(41), "Friday", "c", 4.0, 5.0},
|
||||
{"m", "f", execute.Time(11), "Monday", "a", 2.0, 1.0, "bogus"},
|
||||
{"m", "f", execute.Time(21), "Tuesday", "a", 2.0, 2.0, "bogus"},
|
||||
{"m", "f", execute.Time(21), "Wednesday", "b", 1.0, 4.0, "bogus"},
|
||||
{"m", "f", execute.Time(31), "Thursday", "a", 3.0, 3.0, "bogus"},
|
||||
{"m", "f", execute.Time(41), "Friday", "c", 4.0, 5.0, "bogus"},
|
||||
},
|
||||
}},
|
||||
},
|
||||
|
@ -592,36 +461,8 @@ c day="Friday",humidity=5,ratio=0.8,temperature=4 41`),
|
|||
MeasurementColumn: "tag1",
|
||||
TagColumns: []string{"tag2"},
|
||||
FieldFn: interpreter.ResolvedFunction{
|
||||
Scope: valuestest.NowScope(),
|
||||
Fn: &semantic.FunctionExpression{
|
||||
Block: &semantic.FunctionBlock{
|
||||
Parameters: &semantic.FunctionParameters{
|
||||
List: []*semantic.FunctionParameter{
|
||||
{
|
||||
Key: &semantic.Identifier{Name: "r"},
|
||||
},
|
||||
},
|
||||
},
|
||||
Body: &semantic.ObjectExpression{
|
||||
Properties: []*semantic.Property{
|
||||
{
|
||||
Key: &semantic.Identifier{Name: "temperature"},
|
||||
Value: &semantic.MemberExpression{
|
||||
Object: &semantic.IdentifierExpression{Name: "r"},
|
||||
Property: "temperature",
|
||||
},
|
||||
},
|
||||
{
|
||||
Key: &semantic.Identifier{Name: "humidity"},
|
||||
Value: &semantic.MemberExpression{
|
||||
Object: &semantic.IdentifierExpression{Name: "r"},
|
||||
Property: "humidity",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Scope: valuestest.Scope(),
|
||||
Fn: executetest.FunctionExpression(t, `(r) => ({temperature: r.temperature, humidity: r.humidity})`),
|
||||
},
|
||||
},
|
||||
},
|
||||
|
|
|
@ -6,6 +6,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/influxdata/flux"
|
||||
"github.com/influxdata/flux/codes"
|
||||
"github.com/influxdata/flux/execute"
|
||||
"github.com/influxdata/flux/memory"
|
||||
"github.com/influxdata/flux/plan"
|
||||
|
@ -16,54 +17,24 @@ import (
|
|||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const DatabasesKind = v1.DatabasesKind
|
||||
|
||||
type DatabasesOpSpec struct {
|
||||
}
|
||||
|
||||
func init() {
|
||||
flux.ReplacePackageValue("influxdata/influxdb/v1", DatabasesKind, flux.FunctionValue(DatabasesKind, createDatabasesOpSpec, v1.DatabasesSignature))
|
||||
flux.RegisterOpSpec(DatabasesKind, newDatabasesOp)
|
||||
plan.RegisterProcedureSpec(DatabasesKind, newDatabasesProcedure, DatabasesKind)
|
||||
}
|
||||
|
||||
func createDatabasesOpSpec(args flux.Arguments, a *flux.Administration) (flux.OperationSpec, error) {
|
||||
spec := new(DatabasesOpSpec)
|
||||
return spec, nil
|
||||
}
|
||||
|
||||
func newDatabasesOp() flux.OperationSpec {
|
||||
return new(DatabasesOpSpec)
|
||||
}
|
||||
|
||||
func (s *DatabasesOpSpec) Kind() flux.OperationKind {
|
||||
return DatabasesKind
|
||||
}
|
||||
|
||||
type DatabasesProcedureSpec struct {
|
||||
plan.DefaultCost
|
||||
}
|
||||
|
||||
func newDatabasesProcedure(qs flux.OperationSpec, pa plan.Administration) (plan.ProcedureSpec, error) {
|
||||
_, ok := qs.(*DatabasesOpSpec)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid spec type %T", qs)
|
||||
}
|
||||
|
||||
return &DatabasesProcedureSpec{}, nil
|
||||
}
|
||||
|
||||
func (s *DatabasesProcedureSpec) Kind() plan.ProcedureKind {
|
||||
return DatabasesKind
|
||||
}
|
||||
|
||||
func (s *DatabasesProcedureSpec) Copy() plan.ProcedureSpec {
|
||||
ns := new(DatabasesProcedureSpec)
|
||||
return ns
|
||||
}
|
||||
const DatabasesKind = "influxdata/influxdb/v1.localDatabases"
|
||||
|
||||
func init() {
|
||||
execute.RegisterSource(DatabasesKind, createDatabasesSource)
|
||||
plan.RegisterPhysicalRules(LocalDatabasesRule{})
|
||||
}
|
||||
|
||||
type LocalDatabasesProcedureSpec struct {
|
||||
plan.DefaultCost
|
||||
}
|
||||
|
||||
func (s *LocalDatabasesProcedureSpec) Kind() plan.ProcedureKind {
|
||||
return DatabasesKind
|
||||
}
|
||||
|
||||
func (s *LocalDatabasesProcedureSpec) Copy() plan.ProcedureSpec {
|
||||
ns := new(LocalDatabasesProcedureSpec)
|
||||
return ns
|
||||
}
|
||||
|
||||
type DatabasesDecoder struct {
|
||||
|
@ -177,7 +148,7 @@ func (bd *DatabasesDecoder) Close() error {
|
|||
}
|
||||
|
||||
func createDatabasesSource(prSpec plan.ProcedureSpec, dsid execute.DatasetID, a execute.Administration) (execute.Source, error) {
|
||||
_, ok := prSpec.(*DatabasesProcedureSpec)
|
||||
_, ok := prSpec.(*LocalDatabasesProcedureSpec)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid spec type %T", prSpec)
|
||||
}
|
||||
|
@ -219,3 +190,27 @@ func (d DatabasesDependencies) Validate() error {
|
|||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type LocalDatabasesRule struct{}
|
||||
|
||||
func (rule LocalDatabasesRule) Name() string {
|
||||
return "influxdata/influxdb.LocalDatabasesRule"
|
||||
}
|
||||
|
||||
func (rule LocalDatabasesRule) Pattern() plan.Pattern {
|
||||
return plan.Pat(v1.DatabasesKind)
|
||||
}
|
||||
|
||||
func (rule LocalDatabasesRule) Rewrite(ctx context.Context, node plan.Node) (plan.Node, bool, error) {
|
||||
fromSpec := node.ProcedureSpec().(*v1.DatabasesProcedureSpec)
|
||||
if fromSpec.Host != nil {
|
||||
return node, false, nil
|
||||
} else if fromSpec.Org != nil {
|
||||
return node, false, &flux.Error{
|
||||
Code: codes.Unimplemented,
|
||||
Msg: "buckets cannot list from a separate organization; please specify a host or remove the organization",
|
||||
}
|
||||
}
|
||||
|
||||
return plan.CreateLogicalNode("localDatabases", &LocalDatabasesProcedureSpec{}), true, nil
|
||||
}
|
||||
|
|
|
@ -1,25 +1,25 @@
|
|||
//lint:file-ignore U1000 ignore these flagger-related dead code issues until we can circle back
|
||||
package testing_test
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdata/flux"
|
||||
"github.com/influxdata/flux/ast"
|
||||
"github.com/influxdata/flux/execute"
|
||||
"github.com/influxdata/flux/lang"
|
||||
"github.com/influxdata/flux/parser"
|
||||
"github.com/influxdata/flux/runtime"
|
||||
"github.com/influxdata/flux/stdlib"
|
||||
"github.com/influxdata/influxdb/v2/kit/feature"
|
||||
"github.com/influxdata/influxdb/v2/kit/feature/override"
|
||||
|
||||
platform "github.com/influxdata/influxdb/v2"
|
||||
"github.com/influxdata/influxdb/v2/cmd/influxd/launcher"
|
||||
influxdbcontext "github.com/influxdata/influxdb/v2/context"
|
||||
"github.com/influxdata/influxdb/v2/kit/feature"
|
||||
"github.com/influxdata/influxdb/v2/kit/feature/override"
|
||||
"github.com/influxdata/influxdb/v2/mock"
|
||||
"github.com/influxdata/influxdb/v2/query"
|
||||
_ "github.com/influxdata/influxdb/v2/query/stdlib"
|
||||
|
@ -77,11 +77,10 @@ func (f Flagger) Flags(ctx context.Context, _f ...feature.Flag) (map[string]inte
|
|||
var ctx = influxdbcontext.SetAuthorizer(context.Background(), mock.NewMockAuthorizer(true, nil))
|
||||
|
||||
func init() {
|
||||
flux.FinalizeBuiltIns()
|
||||
runtime.FinalizeBuiltIns()
|
||||
}
|
||||
|
||||
func TestFluxEndToEnd(t *testing.T) {
|
||||
t.Skip("Skipping per https://github.com/influxdata/influxdb/issues/19299")
|
||||
runEndToEnd(t, stdlib.FluxTestPackages)
|
||||
}
|
||||
func BenchmarkFluxEndToEnd(b *testing.B) {
|
||||
|
@ -110,6 +109,8 @@ func runEndToEnd(t *testing.T, pkgs []*ast.Package) {
|
|||
if reason, ok := itesting.FluxEndToEndSkipList[pkg.Path][name]; ok {
|
||||
t.Skip(reason)
|
||||
}
|
||||
|
||||
flagger.SetActiveTestCase(pkg.Path, name)
|
||||
testFlux(t, l, file)
|
||||
})
|
||||
}
|
||||
|
@ -153,12 +154,15 @@ func makeTestPackage(file *ast.File) *ast.Package {
|
|||
var optionsSource = `
|
||||
import "testing"
|
||||
import c "csv"
|
||||
import "experimental"
|
||||
|
||||
// Options bucket and org are defined dynamically per test
|
||||
|
||||
option testing.loadStorage = (csv) => {
|
||||
c.from(csv: csv) |> to(bucket: bucket, org: org)
|
||||
return from(bucket: bucket)
|
||||
return experimental.chain(
|
||||
first: c.from(csv: csv) |> to(bucket: bucket, org: org),
|
||||
second: from(bucket:bucket)
|
||||
)
|
||||
}
|
||||
`
|
||||
var optionsAST *ast.File
|
||||
|
@ -173,8 +177,6 @@ func init() {
|
|||
|
||||
func testFlux(t testing.TB, l *launcher.TestLauncher, file *ast.File) {
|
||||
|
||||
// Query server to ensure write persists.
|
||||
|
||||
b := &platform.Bucket{
|
||||
OrgID: l.Org.ID,
|
||||
Name: t.Name(),
|
||||
|
@ -206,70 +208,32 @@ func testFlux(t testing.TB, l *launcher.TestLauncher, file *ast.File) {
|
|||
pkg := makeTestPackage(file)
|
||||
pkg.Files = append(pkg.Files, options)
|
||||
|
||||
// Add testing.inspect call to ensure the data is loaded
|
||||
// Use testing.inspect call to get all of diff, want, and got
|
||||
inspectCalls := stdlib.TestingInspectCalls(pkg)
|
||||
pkg.Files = append(pkg.Files, inspectCalls)
|
||||
|
||||
req := &query.Request{
|
||||
OrganizationID: l.Org.ID,
|
||||
Compiler: lang.ASTCompiler{AST: pkg},
|
||||
}
|
||||
if r, err := l.FluxQueryService().Query(ctx, req); err != nil {
|
||||
t.Fatal(err)
|
||||
} else {
|
||||
for r.More() {
|
||||
v := r.Next()
|
||||
if err := v.Tables().Do(func(tbl flux.Table) error {
|
||||
return tbl.Do(func(reader flux.ColReader) error {
|
||||
return nil
|
||||
})
|
||||
}); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// quirk: our execution engine doesn't guarantee the order of execution for disconnected DAGS
|
||||
// so that our function-with-side effects call to `to` may run _after_ the test instead of before.
|
||||
// running twice makes sure that `to` happens at least once before we run the test.
|
||||
// this time we use a call to `run` so that the assertion error is triggered
|
||||
runCalls := stdlib.TestingRunCalls(pkg)
|
||||
pkg.Files[len(pkg.Files)-1] = runCalls
|
||||
r, err := l.FluxQueryService().Query(ctx, req)
|
||||
bs, err := json.Marshal(pkg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
for r.More() {
|
||||
v := r.Next()
|
||||
if err := v.Tables().Do(func(tbl flux.Table) error {
|
||||
return tbl.Do(func(reader flux.ColReader) error {
|
||||
return nil
|
||||
})
|
||||
}); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
req := &query.Request{
|
||||
OrganizationID: l.Org.ID,
|
||||
Compiler: lang.ASTCompiler{AST: bs},
|
||||
}
|
||||
if err := r.Err(); err != nil {
|
||||
t.Error(err)
|
||||
// Replace the testing.run calls with testing.inspect calls.
|
||||
pkg.Files[len(pkg.Files)-1] = inspectCalls
|
||||
r, err := l.FluxQueryService().Query(ctx, req)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
var out bytes.Buffer
|
||||
defer func() {
|
||||
if t.Failed() {
|
||||
scanner := bufio.NewScanner(&out)
|
||||
for scanner.Scan() {
|
||||
t.Log(scanner.Text())
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
if r, err := l.FluxQueryService().Query(ctx, req); err != nil {
|
||||
t.Fatal(err)
|
||||
} else {
|
||||
results := make(map[string]*bytes.Buffer)
|
||||
|
||||
for r.More() {
|
||||
v := r.Next()
|
||||
err := execute.FormatResult(&out, v)
|
||||
|
||||
if _, ok := results[v.Name()]; !ok {
|
||||
results[v.Name()] = &bytes.Buffer{}
|
||||
}
|
||||
err := execute.FormatResult(results[v.Name()], v)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
@ -277,5 +241,22 @@ func testFlux(t testing.TB, l *launcher.TestLauncher, file *ast.File) {
|
|||
if err := r.Err(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
logFormatted := func(name string, results map[string]*bytes.Buffer) {
|
||||
if _, ok := results[name]; ok {
|
||||
scanner := bufio.NewScanner(results[name])
|
||||
for scanner.Scan() {
|
||||
t.Log(scanner.Text())
|
||||
}
|
||||
} else {
|
||||
t.Log("table ", name, " not present in results")
|
||||
}
|
||||
}
|
||||
if _, ok := results["diff"]; ok {
|
||||
t.Error("diff table was not empty")
|
||||
logFormatted("diff", results)
|
||||
logFormatted("want", results)
|
||||
logFormatted("got", results)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -51,6 +51,7 @@ var FluxEndToEndSkipList = map[string]map[string]string{
|
|||
"integral_columns": "unbounded test",
|
||||
"map": "unbounded test",
|
||||
"join_missing_on_col": "unbounded test",
|
||||
"join_use_previous": "unbounded test (https://github.com/influxdata/flux/issues/2996)",
|
||||
"rowfn_with_import": "unbounded test",
|
||||
|
||||
// the following tests have a difference between the CSV-decoded input table, and the storage-retrieved version of that table
|
||||
|
@ -90,6 +91,11 @@ var FluxEndToEndSkipList = map[string]map[string]string{
|
|||
"to_uint": "dateTime conversion issue: https://github.com/influxdata/influxdb/issues/14575",
|
||||
|
||||
"holt_winters_panic": "Expected output is an empty table which breaks the testing framework (https://github.com/influxdata/influxdb/issues/14749)",
|
||||
"map_nulls": "to cannot write null values",
|
||||
|
||||
"range_stop": "pushed down range stop no longer exclusive https://github.com/influxdata/influxdb/issues/19564",
|
||||
|
||||
"to_time": "Flaky test https://github.com/influxdata/influxdb/issues/19577",
|
||||
},
|
||||
"experimental": {
|
||||
"set": "Reason TBD",
|
||||
|
@ -137,7 +143,9 @@ var FluxEndToEndSkipList = map[string]map[string]string{
|
|||
"join": "unbounded test",
|
||||
},
|
||||
"testing/chronograf": {
|
||||
"buckets": "unbounded test",
|
||||
"buckets": "unbounded test",
|
||||
"aggregate_window_count": "flakey test: https://github.com/influxdata/influxdb/issues/18463",
|
||||
"aggregate_window_median": "failing with \"field type conflict\": https://github.com/influxdata/influxdb/issues/19565",
|
||||
},
|
||||
"testing/kapacitor": {
|
||||
"fill_default": "unknown field type for f1",
|
||||
|
@ -147,14 +155,32 @@ var FluxEndToEndSkipList = map[string]map[string]string{
|
|||
"partition_strings_splitN": "pandas. map does not correctly handled returned arrays (https://github.com/influxdata/flux/issues/1387)",
|
||||
},
|
||||
"testing/promql": {
|
||||
"emptyTable": "tests a source",
|
||||
"year": "flakey test: https://github.com/influxdata/influxdb/issues/15667",
|
||||
"emptyTable": "tests a source",
|
||||
"year": "flakey test: https://github.com/influxdata/influxdb/issues/15667",
|
||||
"extrapolatedRate_counter_rate": "option \"testing.loadStorage\" reassigned: https://github.com/influxdata/flux/issues/3155",
|
||||
"extrapolatedRate_nocounter": "option \"testing.loadStorage\" reassigned: https://github.com/influxdata/flux/issues/3155",
|
||||
"extrapolatedRate_norate": "option \"testing.loadStorage\" reassigned: https://github.com/influxdata/flux/issues/3155",
|
||||
"linearRegression_nopredict": "option \"testing.loadStorage\" reassigned: https://github.com/influxdata/flux/issues/3155",
|
||||
"linearRegression_predict": "option \"testing.loadStorage\" reassigned: https://github.com/influxdata/flux/issues/3155",
|
||||
},
|
||||
"testing/influxql": {
|
||||
"cumulative_sum": "invalid test data requires loadStorage to be overridden. See https://github.com/influxdata/flux/issues/3145",
|
||||
"elapsed": "failing since split with Flux upgrade: https://github.com/influxdata/influxdb/issues/19568",
|
||||
},
|
||||
}
|
||||
|
||||
type PerTestFeatureFlagMap = map[string]map[string]map[string]string
|
||||
|
||||
var FluxEndToEndFeatureFlags = PerTestFeatureFlagMap{}
|
||||
var FluxEndToEndFeatureFlags = PerTestFeatureFlagMap{
|
||||
"planner": {
|
||||
"bare_mean_push": {
|
||||
"pushDownWindowAggregateMean": "true",
|
||||
},
|
||||
"window_mean_push": {
|
||||
"pushDownWindowAggregateMean": "true",
|
||||
},
|
||||
"merge_filters": {
|
||||
"mergeFilterRule": "true",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
|
|
@ -11,7 +11,7 @@ import (
|
|||
"github.com/influxdata/flux/memory"
|
||||
"github.com/influxdata/flux/values"
|
||||
"github.com/influxdata/influxdb/v2/models"
|
||||
"github.com/influxdata/influxdb/v2/query/stdlib/influxdata/influxdb"
|
||||
"github.com/influxdata/influxdb/v2/query"
|
||||
storage "github.com/influxdata/influxdb/v2/storage/reads"
|
||||
"github.com/influxdata/influxdb/v2/storage/reads/datatypes"
|
||||
"github.com/influxdata/influxdb/v2/tsdb/cursors"
|
||||
|
@ -55,11 +55,11 @@ type storeReader struct {
|
|||
}
|
||||
|
||||
// NewReader returns a new storageflux reader
|
||||
func NewReader(s storage.Store) influxdb.Reader {
|
||||
func NewReader(s storage.Store) query.StorageReader {
|
||||
return &storeReader{s: s}
|
||||
}
|
||||
|
||||
func (r *storeReader) ReadFilter(ctx context.Context, spec influxdb.ReadFilterSpec, alloc *memory.Allocator) (influxdb.TableIterator, error) {
|
||||
func (r *storeReader) ReadFilter(ctx context.Context, spec query.ReadFilterSpec, alloc *memory.Allocator) (query.TableIterator, error) {
|
||||
return &filterIterator{
|
||||
ctx: ctx,
|
||||
s: r.s,
|
||||
|
@ -69,7 +69,7 @@ func (r *storeReader) ReadFilter(ctx context.Context, spec influxdb.ReadFilterSp
|
|||
}, nil
|
||||
}
|
||||
|
||||
func (r *storeReader) ReadGroup(ctx context.Context, spec influxdb.ReadGroupSpec, alloc *memory.Allocator) (influxdb.TableIterator, error) {
|
||||
func (r *storeReader) ReadGroup(ctx context.Context, spec query.ReadGroupSpec, alloc *memory.Allocator) (query.TableIterator, error) {
|
||||
return &groupIterator{
|
||||
ctx: ctx,
|
||||
s: r.s,
|
||||
|
@ -79,42 +79,24 @@ func (r *storeReader) ReadGroup(ctx context.Context, spec influxdb.ReadGroupSpec
|
|||
}, nil
|
||||
}
|
||||
|
||||
func (r *storeReader) ReadTagKeys(ctx context.Context, spec influxdb.ReadTagKeysSpec, alloc *memory.Allocator) (influxdb.TableIterator, error) {
|
||||
var predicate *datatypes.Predicate
|
||||
if spec.Predicate != nil {
|
||||
p, err := toStoragePredicate(spec.Predicate)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
predicate = p
|
||||
}
|
||||
|
||||
func (r *storeReader) ReadTagKeys(ctx context.Context, spec query.ReadTagKeysSpec, alloc *memory.Allocator) (query.TableIterator, error) {
|
||||
return &tagKeysIterator{
|
||||
ctx: ctx,
|
||||
bounds: spec.Bounds,
|
||||
s: r.s,
|
||||
readSpec: spec,
|
||||
predicate: predicate,
|
||||
predicate: spec.Predicate,
|
||||
alloc: alloc,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (r *storeReader) ReadTagValues(ctx context.Context, spec influxdb.ReadTagValuesSpec, alloc *memory.Allocator) (influxdb.TableIterator, error) {
|
||||
var predicate *datatypes.Predicate
|
||||
if spec.Predicate != nil {
|
||||
p, err := toStoragePredicate(spec.Predicate)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
predicate = p
|
||||
}
|
||||
|
||||
func (r *storeReader) ReadTagValues(ctx context.Context, spec query.ReadTagValuesSpec, alloc *memory.Allocator) (query.TableIterator, error) {
|
||||
return &tagValuesIterator{
|
||||
ctx: ctx,
|
||||
bounds: spec.Bounds,
|
||||
s: r.s,
|
||||
readSpec: spec,
|
||||
predicate: predicate,
|
||||
predicate: spec.Predicate,
|
||||
alloc: alloc,
|
||||
}, nil
|
||||
}
|
||||
|
@ -124,7 +106,7 @@ func (r *storeReader) Close() {}
|
|||
type filterIterator struct {
|
||||
ctx context.Context
|
||||
s storage.Store
|
||||
spec influxdb.ReadFilterSpec
|
||||
spec query.ReadFilterSpec
|
||||
stats cursors.CursorStats
|
||||
cache *tagsCache
|
||||
alloc *memory.Allocator
|
||||
|
@ -144,18 +126,9 @@ func (fi *filterIterator) Do(f func(flux.Table) error) error {
|
|||
return err
|
||||
}
|
||||
|
||||
var predicate *datatypes.Predicate
|
||||
if fi.spec.Predicate != nil {
|
||||
p, err := toStoragePredicate(fi.spec.Predicate)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
predicate = p
|
||||
}
|
||||
|
||||
var req datatypes.ReadFilterRequest
|
||||
req.ReadSource = any
|
||||
req.Predicate = predicate
|
||||
req.Predicate = fi.spec.Predicate
|
||||
req.Range.Start = int64(fi.spec.Bounds.Start)
|
||||
req.Range.End = int64(fi.spec.Bounds.Stop)
|
||||
|
||||
|
@ -248,7 +221,7 @@ READ:
|
|||
type groupIterator struct {
|
||||
ctx context.Context
|
||||
s storage.Store
|
||||
spec influxdb.ReadGroupSpec
|
||||
spec query.ReadGroupSpec
|
||||
stats cursors.CursorStats
|
||||
cache *tagsCache
|
||||
alloc *memory.Allocator
|
||||
|
@ -268,18 +241,9 @@ func (gi *groupIterator) Do(f func(flux.Table) error) error {
|
|||
return err
|
||||
}
|
||||
|
||||
var predicate *datatypes.Predicate
|
||||
if gi.spec.Predicate != nil {
|
||||
p, err := toStoragePredicate(gi.spec.Predicate)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
predicate = p
|
||||
}
|
||||
|
||||
var req datatypes.ReadGroupRequest
|
||||
req.ReadSource = any
|
||||
req.Predicate = predicate
|
||||
req.Predicate = gi.spec.Predicate
|
||||
req.Range.Start = int64(gi.spec.Bounds.Start)
|
||||
req.Range.End = int64(gi.spec.Bounds.Stop)
|
||||
|
||||
|
@ -402,11 +366,11 @@ func determineAggregateMethod(agg string) (datatypes.Aggregate_AggregateType, er
|
|||
return 0, fmt.Errorf("unknown aggregate type %q", agg)
|
||||
}
|
||||
|
||||
func convertGroupMode(m influxdb.GroupMode) datatypes.ReadGroupRequest_Group {
|
||||
func convertGroupMode(m query.GroupMode) datatypes.ReadGroupRequest_Group {
|
||||
switch m {
|
||||
case influxdb.GroupModeNone:
|
||||
case query.GroupModeNone:
|
||||
return datatypes.GroupNone
|
||||
case influxdb.GroupModeBy:
|
||||
case query.GroupModeBy:
|
||||
return datatypes.GroupBy
|
||||
}
|
||||
panic(fmt.Sprint("invalid group mode: ", m))
|
||||
|
@ -501,7 +465,7 @@ func determineTableColsForGroup(tagKeys [][]byte, typ flux.ColType) ([]flux.ColM
|
|||
return cols, defs
|
||||
}
|
||||
|
||||
func groupKeyForGroup(kv [][]byte, spec *influxdb.ReadGroupSpec, bnds execute.Bounds) flux.GroupKey {
|
||||
func groupKeyForGroup(kv [][]byte, spec *query.ReadGroupSpec, bnds execute.Bounds) flux.GroupKey {
|
||||
cols := make([]flux.ColMeta, 2, len(spec.GroupKeys)+2)
|
||||
vs := make([]values.Value, 2, len(spec.GroupKeys)+2)
|
||||
cols[startColIdx] = flux.ColMeta{
|
||||
|
@ -531,7 +495,7 @@ type tagKeysIterator struct {
|
|||
ctx context.Context
|
||||
bounds execute.Bounds
|
||||
s storage.Store
|
||||
readSpec influxdb.ReadTagKeysSpec
|
||||
readSpec query.ReadTagKeysSpec
|
||||
predicate *datatypes.Predicate
|
||||
alloc *memory.Allocator
|
||||
}
|
||||
|
@ -614,7 +578,7 @@ type tagValuesIterator struct {
|
|||
ctx context.Context
|
||||
bounds execute.Bounds
|
||||
s storage.Store
|
||||
readSpec influxdb.ReadTagValuesSpec
|
||||
readSpec query.ReadTagValuesSpec
|
||||
predicate *datatypes.Predicate
|
||||
alloc *memory.Allocator
|
||||
}
|
||||
|
|
2
task.go
2
task.go
|
@ -9,8 +9,8 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/influxdata/flux/ast"
|
||||
"github.com/influxdata/flux/ast/edit"
|
||||
"github.com/influxdata/influxdb/v2/kit/feature"
|
||||
"github.com/influxdata/influxdb/v2/pkg/flux/ast/edit"
|
||||
"github.com/influxdata/influxdb/v2/task/options"
|
||||
)
|
||||
|
||||
|
|
|
@ -2,12 +2,15 @@ package executor
|
|||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/flux"
|
||||
"github.com/influxdata/flux/ast"
|
||||
"github.com/influxdata/flux/lang"
|
||||
"github.com/influxdata/flux/runtime"
|
||||
"github.com/influxdata/influxdb/v2"
|
||||
icontext "github.com/influxdata/influxdb/v2/context"
|
||||
"github.com/influxdata/influxdb/v2/kit/feature"
|
||||
|
@ -21,6 +24,8 @@ import (
|
|||
const (
|
||||
maxPromises = 1000
|
||||
defaultMaxWorkers = 100
|
||||
|
||||
lastSuccessOption = "tasks.lastSuccessTime"
|
||||
)
|
||||
|
||||
var _ scheduler.Executor = (*Executor)(nil)
|
||||
|
@ -69,7 +74,31 @@ func WithMaxWorkers(n int) executorOption {
|
|||
|
||||
// CompilerBuilderFunc is a function that yields a new flux.Compiler. The
|
||||
// context.Context provided can be assumed to be an authorized context.
|
||||
type CompilerBuilderFunc func(ctx context.Context, query string, now time.Time) (flux.Compiler, error)
|
||||
type CompilerBuilderFunc func(ctx context.Context, query string, ts CompilerBuilderTimestamps) (flux.Compiler, error)
|
||||
|
||||
// CompilerBuilderTimestamps contains timestamps which should be provided along
|
||||
// with a Task query.
|
||||
type CompilerBuilderTimestamps struct {
|
||||
Now time.Time
|
||||
LatestSuccess time.Time
|
||||
}
|
||||
|
||||
func (ts CompilerBuilderTimestamps) Extern() *ast.File {
|
||||
var body []ast.Statement
|
||||
|
||||
if !ts.LatestSuccess.IsZero() {
|
||||
body = append(body, &ast.OptionStatement{
|
||||
Assignment: &ast.VariableAssignment{
|
||||
ID: &ast.Identifier{Name: lastSuccessOption},
|
||||
Init: &ast.DateTimeLiteral{
|
||||
Value: ts.LatestSuccess,
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
return &ast.File{Body: body}
|
||||
}
|
||||
|
||||
// WithSystemCompilerBuilder is an Executor option that configures a
|
||||
// CompilerBuilderFunc to be used when compiling queries for System Tasks.
|
||||
|
@ -415,8 +444,6 @@ func (w *worker) start(p *promise) {
|
|||
}
|
||||
|
||||
func (w *worker) finish(p *promise, rs influxdb.RunStatus, err error) {
|
||||
|
||||
// trace
|
||||
span, ctx := tracing.StartSpanFromContext(p.ctx)
|
||||
defer span.Finish()
|
||||
|
||||
|
@ -470,7 +497,10 @@ func (w *worker) executeQuery(p *promise) {
|
|||
if p.task.Type != influxdb.TaskSystemType {
|
||||
buildCompiler = w.nonSystemBuildCompiler
|
||||
}
|
||||
compiler, err := buildCompiler(ctx, p.task.Flux, p.run.ScheduledFor)
|
||||
compiler, err := buildCompiler(ctx, p.task.Flux, CompilerBuilderTimestamps{
|
||||
Now: p.run.ScheduledFor,
|
||||
LatestSuccess: p.task.LatestSuccess,
|
||||
})
|
||||
if err != nil {
|
||||
w.finish(p, influxdb.RunFail, influxdb.ErrFluxParseError(err))
|
||||
return
|
||||
|
@ -591,21 +621,45 @@ func exhaustResultIterators(res flux.Result) error {
|
|||
}
|
||||
|
||||
// NewASTCompiler parses a Flux query string into an AST representatation.
|
||||
func NewASTCompiler(_ context.Context, query string, now time.Time) (flux.Compiler, error) {
|
||||
pkg, err := flux.Parse(query)
|
||||
func NewASTCompiler(ctx context.Context, query string, ts CompilerBuilderTimestamps) (flux.Compiler, error) {
|
||||
pkg, err := runtime.ParseToJSON(query)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var externBytes []byte
|
||||
if feature.InjectLatestSuccessTime().Enabled(ctx) {
|
||||
extern := ts.Extern()
|
||||
if len(extern.Body) > 0 {
|
||||
var err error
|
||||
externBytes, err = json.Marshal(extern)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
return lang.ASTCompiler{
|
||||
AST: pkg,
|
||||
Now: now,
|
||||
AST: pkg,
|
||||
Now: ts.Now,
|
||||
Extern: externBytes,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// NewFluxCompiler wraps a Flux query string in a raw-query representation.
|
||||
func NewFluxCompiler(_ context.Context, query string, _ time.Time) (flux.Compiler, error) {
|
||||
func NewFluxCompiler(ctx context.Context, query string, ts CompilerBuilderTimestamps) (flux.Compiler, error) {
|
||||
var externBytes []byte
|
||||
if feature.InjectLatestSuccessTime().Enabled(ctx) {
|
||||
extern := ts.Extern()
|
||||
if len(extern.Body) > 0 {
|
||||
var err error
|
||||
externBytes, err = json.Marshal(extern)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
return lang.FluxCompiler{
|
||||
Query: query,
|
||||
Query: query,
|
||||
Extern: externBytes,
|
||||
// TODO(brett): This mitigates an immediate problem where
|
||||
// Checks/Notifications breaks when sending Now, and system Tasks do not
|
||||
// break when sending Now. We are currently sending C+N through using
|
||||
|
@ -616,7 +670,13 @@ func NewFluxCompiler(_ context.Context, query string, _ time.Time) (flux.Compile
|
|||
// we are able to locate the root cause and use Flux Compiler for all
|
||||
// Task types.
|
||||
//
|
||||
// This should be removed once we diagnose the problem.
|
||||
// It turns out this is due to the exclusive nature of the stop time in
|
||||
// Flux "from" and that we weren't including the left-hand boundary of
|
||||
// the range check for notifications. We're shipping a fix soon in
|
||||
//
|
||||
// https://github.com/influxdata/influxdb/pull/19392
|
||||
//
|
||||
// Once this has merged, we can send Now again.
|
||||
//
|
||||
// Now: now,
|
||||
}, nil
|
||||
|
|
|
@ -12,6 +12,7 @@ import (
|
|||
"github.com/influxdata/flux/execute"
|
||||
"github.com/influxdata/flux/lang"
|
||||
"github.com/influxdata/flux/memory"
|
||||
"github.com/influxdata/flux/runtime"
|
||||
"github.com/influxdata/flux/values"
|
||||
"github.com/influxdata/influxdb/v2"
|
||||
"github.com/influxdata/influxdb/v2/kv"
|
||||
|
@ -31,7 +32,7 @@ type fakeQueryService struct {
|
|||
var _ query.AsyncQueryService = (*fakeQueryService)(nil)
|
||||
|
||||
func makeAST(q string) lang.ASTCompiler {
|
||||
pkg, err := flux.Parse(q)
|
||||
pkg, err := runtime.ParseToJSON(q)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
@ -170,10 +171,11 @@ type fakeQuery struct {
|
|||
|
||||
var _ flux.Query = (*fakeQuery)(nil)
|
||||
|
||||
func (q *fakeQuery) Done() {}
|
||||
func (q *fakeQuery) Cancel() { close(q.results) }
|
||||
func (q *fakeQuery) Statistics() flux.Statistics { return flux.Statistics{} }
|
||||
func (q *fakeQuery) Results() <-chan flux.Result { return q.results }
|
||||
func (q *fakeQuery) Done() {}
|
||||
func (q *fakeQuery) Cancel() { close(q.results) }
|
||||
func (q *fakeQuery) Statistics() flux.Statistics { return flux.Statistics{} }
|
||||
func (q *fakeQuery) Results() <-chan flux.Result { return q.results }
|
||||
func (q *fakeQuery) ProfilerResults() (flux.ResultIterator, error) { return nil, nil }
|
||||
|
||||
func (q *fakeQuery) Err() error {
|
||||
if q.ctxErr != nil {
|
||||
|
|
|
@ -11,11 +11,10 @@ import (
|
|||
"github.com/influxdata/cron"
|
||||
"github.com/influxdata/flux"
|
||||
"github.com/influxdata/flux/ast"
|
||||
"github.com/influxdata/flux/ast/edit"
|
||||
"github.com/influxdata/flux/interpreter"
|
||||
"github.com/influxdata/flux/semantic"
|
||||
"github.com/influxdata/flux/values"
|
||||
ast2 "github.com/influxdata/influxdb/v2/pkg/flux/ast"
|
||||
"github.com/influxdata/influxdb/v2/pkg/flux/ast/edit"
|
||||
"github.com/influxdata/influxdb/v2/pkg/pointer"
|
||||
)
|
||||
|
||||
|
@ -293,7 +292,7 @@ func extractNameOption(opts *Options, objExpr *ast.ObjectExpression) error {
|
|||
if !ok {
|
||||
return errParseTaskOptionField(optName)
|
||||
}
|
||||
opts.Name = ast2.StringFromLiteral(nameStr)
|
||||
opts.Name = ast.StringFromLiteral(nameStr)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -313,7 +312,7 @@ func extractScheduleOptions(opts *Options, objExpr *ast.ObjectExpression) error
|
|||
if !ok {
|
||||
return errParseTaskOptionField(optCron)
|
||||
}
|
||||
opts.Cron = ast2.StringFromLiteral(cronExprStr)
|
||||
opts.Cron = ast.StringFromLiteral(cronExprStr)
|
||||
}
|
||||
|
||||
if everyErr == nil {
|
||||
|
@ -359,7 +358,7 @@ func extractConcurrencyOption(opts *Options, objExpr *ast.ObjectExpression) erro
|
|||
if !ok {
|
||||
return errParseTaskOptionField(optConcurrency)
|
||||
}
|
||||
val := ast2.IntegerFromLiteral(concurInt)
|
||||
val := ast.IntegerFromLiteral(concurInt)
|
||||
opts.Concurrency = &val
|
||||
|
||||
return nil
|
||||
|
@ -375,7 +374,7 @@ func extractRetryOption(opts *Options, objExpr *ast.ObjectExpression) error {
|
|||
if !ok {
|
||||
return errParseTaskOptionField(optRetry)
|
||||
}
|
||||
val := ast2.IntegerFromLiteral(retryInt)
|
||||
val := ast.IntegerFromLiteral(retryInt)
|
||||
opts.Retry = &val
|
||||
|
||||
return nil
|
||||
|
|
|
@ -39,7 +39,7 @@ from(bucket: "${name}"{rightarrow}
|
|||
|
||||
cy.getByTestID('notification-error').should(
|
||||
'contain',
|
||||
'error calling function "to": missing required keyword argument "bucketID"'
|
||||
'error calling function "to" @12:8-12:26: missing required keyword argument "bucketID"'
|
||||
)
|
||||
})
|
||||
|
||||
|
|
Loading…
Reference in New Issue