Merge branch 'master' into feat/18733
commit
13cb2927bb
|
@ -553,31 +553,31 @@ workflows:
|
|||
branches:
|
||||
ignore: /.*/
|
||||
tags:
|
||||
only: /^v[0-9]+\.[0-9]+\.[0-9]+-(alpha|beta)\.[0-9]+$/
|
||||
only: /^v[0-9]+\.[0-9]+\.[0-9]+-(rc|alpha|beta)\.[0-9]+$/
|
||||
- golint:
|
||||
filters:
|
||||
branches:
|
||||
ignore: /.*/
|
||||
tags:
|
||||
only: /^v[0-9]+\.[0-9]+\.[0-9]+-(alpha|beta)\.[0-9]+$/
|
||||
only: /^v[0-9]+\.[0-9]+\.[0-9]+-(rc|alpha|beta)\.[0-9]+$/
|
||||
- lint-feature-flags:
|
||||
filters:
|
||||
branches:
|
||||
ignore: /.*/
|
||||
tags:
|
||||
only: /^v[0-9]+\.[0-9]+\.[0-9]+-(alpha|beta)\.[0-9]+$/
|
||||
only: /^v[0-9]+\.[0-9]+\.[0-9]+-(rc|alpha|beta)\.[0-9]+$/
|
||||
- jstest:
|
||||
filters:
|
||||
branches:
|
||||
ignore: /.*/
|
||||
tags:
|
||||
only: /^v[0-9]+\.[0-9]+\.[0-9]+-(alpha|beta)\.[0-9]+$/
|
||||
only: /^v[0-9]+\.[0-9]+\.[0-9]+-(rc|alpha|beta)\.[0-9]+$/
|
||||
- jslint:
|
||||
filters:
|
||||
branches:
|
||||
ignore: /.*/
|
||||
tags:
|
||||
only: /^v[0-9]+\.[0-9]+\.[0-9]+-(alpha|beta)\.[0-9]+$/
|
||||
only: /^v[0-9]+\.[0-9]+\.[0-9]+-(rc|alpha|beta)\.[0-9]+$/
|
||||
- release:
|
||||
requires:
|
||||
- gotest
|
||||
|
@ -589,4 +589,4 @@ workflows:
|
|||
branches:
|
||||
ignore: /.*/
|
||||
tags:
|
||||
only: /^v[0-9]+\.[0-9]+\.[0-9]+-(alpha|beta)\.[0-9]+$/
|
||||
only: /^v[0-9]+\.[0-9]+\.[0-9]+-(rc|alpha|beta)\.[0-9]+$/
|
||||
|
|
|
@ -12,5 +12,5 @@ http/swagger.yml @influxdata/monitoring-team
|
|||
/pkger/ @influxdata/tools-team
|
||||
|
||||
# Storage code
|
||||
/storage/ @influxdata/storage-team
|
||||
/tsdb/ @influxdata/storage-team
|
||||
#/storage/ @influxdata/storage-team
|
||||
#/tsdb/ @influxdata/storage-team
|
||||
|
|
|
@ -7,6 +7,11 @@ builds:
|
|||
goarch:
|
||||
- amd64
|
||||
- arm64
|
||||
- arm
|
||||
goarm:
|
||||
- 5
|
||||
- 6
|
||||
|
||||
main: ./cmd/influx/
|
||||
flags:
|
||||
- -tags={{if and (eq .Os "linux") (eq .Arch "amd64")}}osusergo,netgo,static_build{{end}}
|
||||
|
@ -19,6 +24,7 @@ builds:
|
|||
ldflags:
|
||||
- -s -w -X main.version={{.Version}} -X main.commit={{.ShortCommit}} -X main.date={{.Date}} {{if and (eq .Os "linux") (eq .Arch "amd64")}}-extldflags "-fno-PIC -static -Wl,-z,stack-size=8388608"{{end}}
|
||||
binary: influx
|
||||
|
||||
- id: influxd
|
||||
goos:
|
||||
- linux
|
||||
|
@ -26,6 +32,11 @@ builds:
|
|||
goarch:
|
||||
- amd64
|
||||
- arm64
|
||||
- arm
|
||||
goarm:
|
||||
- 5
|
||||
- 6
|
||||
|
||||
main: ./cmd/influxd/
|
||||
flags:
|
||||
- -tags=assets{{if and (eq .Os "linux") (eq .Arch "amd64")}},osusergo,netgo,static_build{{end}}
|
||||
|
@ -41,6 +52,35 @@ builds:
|
|||
hooks:
|
||||
pre: make generate
|
||||
|
||||
nfpms:
|
||||
-
|
||||
id: "influxdata"
|
||||
builds: ["influx", "influxd"]
|
||||
formats:
|
||||
- deb
|
||||
- rpm
|
||||
bindir: /usr/bin
|
||||
files:
|
||||
"scripts/init.sh": "/usr/lib/influxdb/scripts/init.sh"
|
||||
"scripts/influxdb.service": "/usr/lib/influxdb/scripts/influxdb.service"
|
||||
"scripts/logrotate": "/etc/logrotate.d/influxdb"
|
||||
scripts:
|
||||
preinstall: "scripts/pre-install.sh"
|
||||
postinstall: "scripts/post-install.sh"
|
||||
postremove: "scripts/post-uninstall.sh"
|
||||
overrides:
|
||||
rpm:
|
||||
replacements:
|
||||
amd64: x86_64
|
||||
file_name_template: "influxdb-{{ .Version }}.{{ .Arch }}{{if .Arm}}{{ if eq .Arm \"5\" }}el{{end}}{{ end }}{{if .Arm}}{{ if eq .Arm \"6\" }}hf{{end}}{{ end }}"
|
||||
deb:
|
||||
file_name_template: "influxdb_{{ .Version }}_{{ .Arch }}{{if .Arm}}{{ if eq .Arm \"5\" }}el{{end}}{{ end }}{{if .Arm}}{{ if eq .Arm \"6\" }}hf{{end}}{{ end }}"
|
||||
vendor: InfluxData
|
||||
homepage: https://influxdata.com
|
||||
maintainer: support@influxdb.com
|
||||
description: Distributed time-series database.
|
||||
license: Proprietary
|
||||
|
||||
archives:
|
||||
- id: influxdb_client
|
||||
builds: ["influx"]
|
||||
|
@ -49,7 +89,7 @@ archives:
|
|||
format_overrides:
|
||||
- goos: windows
|
||||
format: zip
|
||||
name_template: "influxdb_client_{{ .Version }}_{{ .Os }}_{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}"
|
||||
name_template: "influxdb_client_{{ .Version }}_{{ .Os }}_{{ .Arch }}{{if .Arm}}{{ if eq .Arm \"5\" }}el{{end}}{{ end }}{{if .Arm}}{{ if eq .Arm \"6\" }}hf{{end}}{{ end }}"
|
||||
files:
|
||||
- LICENSE
|
||||
- README.md
|
||||
|
@ -59,7 +99,7 @@ archives:
|
|||
format_overrides:
|
||||
- goos: windows
|
||||
format: zip
|
||||
name_template: "influxdb_{{ .Version }}_{{ .Os }}_{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}"
|
||||
name_template: "influxdb-{{ .Version }}_{{ .Os }}_{{ .Arch }}{{if .Arm}}{{ if eq .Arm \"5\" }}el{{end}}{{ end }}{{if .Arm}}{{ if eq .Arm \"6\" }}hf{{end}}{{ end }}"
|
||||
files:
|
||||
- LICENSE
|
||||
- README.md
|
||||
|
@ -70,6 +110,10 @@ blobs:
|
|||
region: "us-east-1"
|
||||
folder: "influxdb/releases/"
|
||||
|
||||
checksum:
|
||||
name_template: "influxdb_{{ .Version }}.sha256"
|
||||
algorithm: sha256
|
||||
|
||||
dockers:
|
||||
- goos: linux
|
||||
goarch: amd64
|
||||
|
|
|
@ -25,6 +25,7 @@ need to update any InfluxDB CLI config profiles with the new port number.
|
|||
|
||||
### Features
|
||||
|
||||
1. [18779](https://github.com/influxdata/influxdb/pull/18779): Add new processing options and enhancements to influx write.
|
||||
1. [19246](https://github.com/influxdata/influxdb/pull/19246): Redesign load data page to increase discovery and ease of use
|
||||
1. [19334](https://github.com/influxdata/influxdb/pull/19334): Add --active-config flag to influx to set config for single command
|
||||
1. [19219](https://github.com/influxdata/influxdb/pull/19219): List buckets via the API now supports after (ID) parameter as an alternative to offset.
|
||||
|
@ -37,7 +38,9 @@ need to update any InfluxDB CLI config profiles with the new port number.
|
|||
### Bug Fixes
|
||||
|
||||
1. [19331](https://github.com/influxdata/influxdb/pull/19331): Add description to auth influx command outputs.
|
||||
1. [19392](https://github.com/influxdata/influxdb/pull/19392) Include the edge of the boundary we are observing.
|
||||
1. [19392](https://github.com/influxdata/influxdb/pull/19392): Include the edge of the boundary we are observing.
|
||||
1. [19453](https://github.com/influxdata/influxdb/pull/19453): Warn about duplicate tag names during influx write csv.
|
||||
1. [19466](https://github.com/influxdata/influxdb/pull/19466): Do not override existing line part in group annotation.
|
||||
|
||||
## v2.0.0-beta.16 [2020-08-07]
|
||||
|
||||
|
|
2
Makefile
2
Makefile
|
@ -162,7 +162,7 @@ build: all
|
|||
|
||||
goreleaser:
|
||||
curl -sfL -o goreleaser-install https://install.goreleaser.com/github.com/goreleaser/goreleaser.sh
|
||||
sh goreleaser-install v0.135.0
|
||||
sh goreleaser-install v0.142.0
|
||||
go build -o $(GOPATH)/bin/pkg-config github.com/influxdata/pkg-config
|
||||
install xcc.sh $(GOPATH)/bin/xcc
|
||||
|
||||
|
|
|
@ -191,7 +191,7 @@ func TestService_handlePostAuthorization(t *testing.T) {
|
|||
httprouter.Params{
|
||||
{
|
||||
Key: "userID",
|
||||
Value: string(tt.args.session.UserID),
|
||||
Value: fmt.Sprintf("%d", tt.args.session.UserID),
|
||||
},
|
||||
}))
|
||||
|
||||
|
|
|
@ -90,11 +90,6 @@ func authorize(ctx context.Context, a influxdb.Action, rt influxdb.ResourceType,
|
|||
}
|
||||
|
||||
func authorizeReadSystemBucket(ctx context.Context, bid, oid influxdb.ID) (influxdb.Authorizer, influxdb.Permission, error) {
|
||||
// HACK: remove once system buckets are migrated away from hard coded values
|
||||
if !oid.Valid() && (bid == influxdb.TasksSystemBucketID || bid == influxdb.MonitoringSystemBucketID) {
|
||||
a, _ := icontext.GetAuthorizer(ctx)
|
||||
return a, influxdb.Permission{}, nil
|
||||
}
|
||||
return AuthorizeReadOrg(ctx, oid)
|
||||
}
|
||||
|
||||
|
|
|
@ -8,11 +8,6 @@ import (
|
|||
)
|
||||
|
||||
const (
|
||||
// TasksSystemBucketID is the fixed ID for our tasks system bucket
|
||||
TasksSystemBucketID = ID(10)
|
||||
// MonitoringSystemBucketID is the fixed ID for our monitoring system bucket
|
||||
MonitoringSystemBucketID = ID(11)
|
||||
|
||||
// BucketTypeUser is a user created bucket
|
||||
BucketTypeUser = BucketType(0)
|
||||
// BucketTypeSystem is an internally created bucket that cannot be deleted/renamed.
|
||||
|
|
|
@ -105,7 +105,7 @@ func fluxQueryF(cmd *cobra.Command, args []string) error {
|
|||
"query": q,
|
||||
"type": "flux",
|
||||
"dialect": map[string]interface{}{
|
||||
"annotations": []string{"datatype", "group", "default"},
|
||||
"annotations": []string{"group", "datatype", "default"},
|
||||
"delimiter": ",",
|
||||
"header": true,
|
||||
},
|
||||
|
|
|
@ -2,6 +2,7 @@ package main
|
|||
|
||||
import (
|
||||
"context"
|
||||
"encoding/csv"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
|
@ -38,6 +39,7 @@ type writeFlagsType struct {
|
|||
SkipHeader int
|
||||
IgnoreDataTypeInColumnName bool
|
||||
Encoding string
|
||||
ErrorsFile string
|
||||
}
|
||||
|
||||
var writeFlags writeFlagsType
|
||||
|
@ -86,6 +88,7 @@ func cmdWrite(f *globalFlags, opt genericCLIOpts) *cobra.Command {
|
|||
cmd.PersistentFlags().BoolVar(&writeFlags.IgnoreDataTypeInColumnName, "xIgnoreDataTypeInColumnName", false, "Ignores dataType which could be specified after ':' in column name")
|
||||
cmd.PersistentFlags().MarkHidden("xIgnoreDataTypeInColumnName") // should be used only upon explicit advice
|
||||
cmd.PersistentFlags().StringVar(&writeFlags.Encoding, "encoding", "UTF-8", "Character encoding of input files or stdin")
|
||||
cmd.PersistentFlags().StringVar(&writeFlags.ErrorsFile, "errors-file", "", "The path to the file to write rejected rows to")
|
||||
|
||||
cmdDryRun := opt.newCmd("dryrun", fluxWriteDryrunF, false)
|
||||
cmdDryRun.Args = cobra.MaximumNArgs(1)
|
||||
|
@ -204,6 +207,27 @@ func (writeFlags *writeFlagsType) createLineReader(ctx context.Context, cmd *cob
|
|||
}
|
||||
}
|
||||
|
||||
// create writer for errors-file, if supplied
|
||||
var errorsFile *csv.Writer
|
||||
var rowSkippedListener func(*csv2lp.CsvToLineReader, error, []string)
|
||||
if writeFlags.ErrorsFile != "" {
|
||||
writer, err := os.Create(writeFlags.ErrorsFile)
|
||||
if err != nil {
|
||||
return nil, csv2lp.MultiCloser(closers...), fmt.Errorf("failed to create %q: %v", writeFlags.ErrorsFile, err)
|
||||
}
|
||||
closers = append(closers, writer)
|
||||
errorsFile = csv.NewWriter(writer)
|
||||
rowSkippedListener = func(source *csv2lp.CsvToLineReader, lineError error, row []string) {
|
||||
log.Println(lineError)
|
||||
errorsFile.Comma = source.Comma()
|
||||
errorsFile.Write([]string{fmt.Sprintf("# error : %v", lineError)})
|
||||
if err := errorsFile.Write(row); err != nil {
|
||||
log.Printf("Unable to write to error-file: %v\n", err)
|
||||
}
|
||||
errorsFile.Flush() // flush is required
|
||||
}
|
||||
}
|
||||
|
||||
// concatenate readers
|
||||
r := io.MultiReader(readers...)
|
||||
if writeFlags.Format == inputFormatCsv {
|
||||
|
@ -213,6 +237,7 @@ func (writeFlags *writeFlagsType) createLineReader(ctx context.Context, cmd *cob
|
|||
csvReader.Table.IgnoreDataTypeInColumnName(writeFlags.IgnoreDataTypeInColumnName)
|
||||
// change LineNumber to report file/stdin line numbers properly
|
||||
csvReader.LineNumber = writeFlags.SkipHeader - len(writeFlags.Headers)
|
||||
csvReader.RowSkipped = rowSkippedListener
|
||||
r = csvReader
|
||||
}
|
||||
return r, csv2lp.MultiCloser(closers...), nil
|
||||
|
|
|
@ -56,6 +56,7 @@ func readLines(reader io.Reader) []string {
|
|||
|
||||
func createTempFile(suffix string, contents []byte) string {
|
||||
file, err := ioutil.TempFile("", "influx_writeTest*."+suffix)
|
||||
file.Close() // Close immediatelly, since we need only a file name
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
return "unknown.file"
|
||||
|
@ -545,3 +546,19 @@ func Test_fluxWriteF(t *testing.T) {
|
|||
require.Equal(t, "stdin3 i=stdin1,j=stdin2,k=stdin4", strings.Trim(string(lineData), "\n"))
|
||||
})
|
||||
}
|
||||
|
||||
// Test_writeFlags_errorsFile tests that rejected rows are written to errors file
|
||||
func Test_writeFlags_errorsFile(t *testing.T) {
|
||||
defer removeTempFiles()
|
||||
errorsFile := createTempFile("errors", []byte{})
|
||||
stdInContents := "_measurement,a|long:strict\nm,1\nm,1.1"
|
||||
out := bytes.Buffer{}
|
||||
command := cmdWrite(&globalFlags{}, genericCLIOpts{in: strings.NewReader(stdInContents), w: bufio.NewWriter(&out)})
|
||||
command.SetArgs([]string{"dryrun", "--format", "csv", "--errors-file", errorsFile})
|
||||
err := command.Execute()
|
||||
require.Nil(t, err)
|
||||
require.Equal(t, "m a=1i", strings.Trim(out.String(), "\n"))
|
||||
errorLines, err := ioutil.ReadFile(errorsFile)
|
||||
require.Nil(t, err)
|
||||
require.Equal(t, "# error : line 3: column 'a': '1.1' cannot fit into long data type\nm,1.1", strings.Trim(string(errorLines), "\n"))
|
||||
}
|
||||
|
|
|
@ -6,12 +6,12 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"net"
|
||||
nethttp "net/http"
|
||||
_ "net/http/pprof" // needed to add pprof to our binary.
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
|
@ -48,7 +48,6 @@ import (
|
|||
"github.com/influxdata/influxdb/v2/pkger"
|
||||
infprom "github.com/influxdata/influxdb/v2/prometheus"
|
||||
"github.com/influxdata/influxdb/v2/query"
|
||||
"github.com/influxdata/influxdb/v2/query/builtinlazy"
|
||||
"github.com/influxdata/influxdb/v2/query/control"
|
||||
"github.com/influxdata/influxdb/v2/query/fluxlang"
|
||||
"github.com/influxdata/influxdb/v2/query/stdlib/influxdata/influxdb"
|
||||
|
@ -91,6 +90,8 @@ const (
|
|||
LogTracing = "log"
|
||||
// JaegerTracing enables tracing via the Jaeger client library
|
||||
JaegerTracing = "jaeger"
|
||||
// Max Integer
|
||||
MaxInt = 1<<uint(strconv.IntSize-1) - 1
|
||||
)
|
||||
|
||||
func NewInfluxdCommand(ctx context.Context, subCommands ...*cobra.Command) *cobra.Command {
|
||||
|
@ -135,8 +136,6 @@ func cmdRunE(ctx context.Context, l *Launcher) func() error {
|
|||
// exit with SIGINT and SIGTERM
|
||||
ctx = signals.WithStandardSignals(ctx)
|
||||
|
||||
builtinlazy.Initialize()
|
||||
|
||||
if err := l.run(ctx); err != nil {
|
||||
return err
|
||||
} else if !l.Running() {
|
||||
|
@ -352,7 +351,7 @@ func launcherOpts(l *Launcher) []cli.Opt {
|
|||
{
|
||||
DestP: &l.memoryBytesQuotaPerQuery,
|
||||
Flag: "query-memory-bytes",
|
||||
Default: math.MaxInt64,
|
||||
Default: MaxInt,
|
||||
Desc: "maximum number of bytes a query is allowed to use at any given time. This must be greater or equal to query-initial-memory-bytes",
|
||||
},
|
||||
{
|
||||
|
|
|
@ -19,7 +19,10 @@ import (
|
|||
"github.com/influxdata/flux/csv"
|
||||
"github.com/influxdata/flux/execute"
|
||||
"github.com/influxdata/flux/execute/executetest"
|
||||
"github.com/influxdata/flux/execute/table"
|
||||
"github.com/influxdata/flux/lang"
|
||||
"github.com/influxdata/flux/memory"
|
||||
"github.com/influxdata/flux/runtime"
|
||||
"github.com/influxdata/flux/values"
|
||||
"github.com/influxdata/influxdb/v2"
|
||||
"github.com/influxdata/influxdb/v2/cmd/influxd/launcher"
|
||||
|
@ -27,7 +30,6 @@ import (
|
|||
"github.com/influxdata/influxdb/v2/kit/feature"
|
||||
"github.com/influxdata/influxdb/v2/kit/prom"
|
||||
"github.com/influxdata/influxdb/v2/mock"
|
||||
"github.com/influxdata/influxdb/v2/pkg/flux/execute/table"
|
||||
"github.com/influxdata/influxdb/v2/query"
|
||||
)
|
||||
|
||||
|
@ -221,7 +223,7 @@ func queryPoints(ctx context.Context, t *testing.T, l *launcher.TestLauncher, op
|
|||
if d.verbose {
|
||||
t.Logf("query:\n%s", qs)
|
||||
}
|
||||
pkg, err := flux.Parse(qs)
|
||||
pkg, err := runtime.ParseToJSON(qs)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -751,6 +753,193 @@ from(bucket: "%s")
|
|||
}
|
||||
}
|
||||
|
||||
type TestQueryProfiler struct{
|
||||
start int64
|
||||
}
|
||||
|
||||
func (s TestQueryProfiler) Name() string {
|
||||
return fmt.Sprintf("query%d", s.start)
|
||||
}
|
||||
|
||||
func (s TestQueryProfiler) GetResult(q flux.Query, alloc *memory.Allocator) (flux.Table, error) {
|
||||
groupKey := execute.NewGroupKey(
|
||||
[]flux.ColMeta{
|
||||
{
|
||||
Label: "_measurement",
|
||||
Type: flux.TString,
|
||||
},
|
||||
},
|
||||
[]values.Value{
|
||||
values.NewString(fmt.Sprintf("profiler/query%d", s.start)),
|
||||
},
|
||||
)
|
||||
b := execute.NewColListTableBuilder(groupKey, alloc)
|
||||
colMeta := []flux.ColMeta{
|
||||
{
|
||||
Label: "_measurement",
|
||||
Type: flux.TString,
|
||||
},
|
||||
{
|
||||
Label: "TotalDuration",
|
||||
Type: flux.TInt,
|
||||
},
|
||||
{
|
||||
Label: "CompileDuration",
|
||||
Type: flux.TInt,
|
||||
},
|
||||
{
|
||||
Label: "QueueDuration",
|
||||
Type: flux.TInt,
|
||||
},
|
||||
{
|
||||
Label: "PlanDuration",
|
||||
Type: flux.TInt,
|
||||
},
|
||||
{
|
||||
Label: "RequeueDuration",
|
||||
Type: flux.TInt,
|
||||
},
|
||||
{
|
||||
Label: "ExecuteDuration",
|
||||
Type: flux.TInt,
|
||||
},
|
||||
{
|
||||
Label: "Concurrency",
|
||||
Type: flux.TInt,
|
||||
},
|
||||
{
|
||||
Label: "MaxAllocated",
|
||||
Type: flux.TInt,
|
||||
},
|
||||
{
|
||||
Label: "TotalAllocated",
|
||||
Type: flux.TInt,
|
||||
},
|
||||
{
|
||||
Label: "RuntimeErrors",
|
||||
Type: flux.TString,
|
||||
},
|
||||
{
|
||||
Label: "influxdb/scanned-bytes",
|
||||
Type: flux.TInt,
|
||||
},
|
||||
{
|
||||
Label: "influxdb/scanned-values",
|
||||
Type: flux.TInt,
|
||||
},
|
||||
{
|
||||
Label: "flux/query-plan",
|
||||
Type: flux.TString,
|
||||
},
|
||||
}
|
||||
colData := []interface{} {
|
||||
fmt.Sprintf("profiler/query%d", s.start),
|
||||
s.start,
|
||||
s.start + 1,
|
||||
s.start + 2,
|
||||
s.start + 3,
|
||||
s.start + 4,
|
||||
s.start + 5,
|
||||
s.start + 6,
|
||||
s.start + 7,
|
||||
s.start + 8,
|
||||
"error1\nerror2",
|
||||
s.start + 9,
|
||||
s.start + 10,
|
||||
"query plan",
|
||||
}
|
||||
for _, col := range colMeta {
|
||||
if _, err := b.AddCol(col); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
for i := 0; i < len(colData); i++ {
|
||||
if intValue, ok := colData[i].(int64); ok {
|
||||
b.AppendInt(i, intValue)
|
||||
} else {
|
||||
b.AppendString(i, colData[i].(string))
|
||||
}
|
||||
}
|
||||
tbl, err := b.Table()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return tbl, nil
|
||||
}
|
||||
|
||||
func TestFluxProfiler(t *testing.T) {
|
||||
testcases := []struct {
|
||||
name string
|
||||
data []string
|
||||
query string
|
||||
want string
|
||||
}{
|
||||
{
|
||||
name: "range last single point start time",
|
||||
data: []string{
|
||||
"m,tag=a f=1i 1",
|
||||
},
|
||||
query: `
|
||||
option profiler.enabledProfilers = ["query0", "query100", "query100", "NonExistentProfiler"]
|
||||
from(bucket: v.bucket)
|
||||
|> range(start: 1970-01-01T00:00:00.000000001Z, stop: 1970-01-01T01:00:00Z)
|
||||
|> last()
|
||||
`,
|
||||
want: `
|
||||
#datatype,string,long,dateTime:RFC3339,dateTime:RFC3339,dateTime:RFC3339,long,string,string,string
|
||||
#group,false,false,true,true,false,false,true,true,true
|
||||
#default,_result,,,,,,,,
|
||||
,result,table,_start,_stop,_time,_value,_field,_measurement,tag
|
||||
,,0,1970-01-01T00:00:00.000000001Z,1970-01-01T01:00:00Z,1970-01-01T00:00:00.000000001Z,1,f,m,a
|
||||
|
||||
#datatype,string,long,string,long,long,long,long,long,long,long,long,long,string,string,long,long
|
||||
#group,false,false,true,false,false,false,false,false,false,false,false,false,false,false,false,false
|
||||
#default,_profiler,,,,,,,,,,,,,,,
|
||||
,result,table,_measurement,TotalDuration,CompileDuration,QueueDuration,PlanDuration,RequeueDuration,ExecuteDuration,Concurrency,MaxAllocated,TotalAllocated,RuntimeErrors,flux/query-plan,influxdb/scanned-bytes,influxdb/scanned-values
|
||||
,,0,profiler/query0,0,1,2,3,4,5,6,7,8,"error1
|
||||
error2","query plan",9,10
|
||||
,,1,profiler/query100,100,101,102,103,104,105,106,107,108,"error1
|
||||
error2","query plan",109,110
|
||||
`,
|
||||
},
|
||||
}
|
||||
execute.RegisterProfilers(&TestQueryProfiler{}, &TestQueryProfiler{start: 100})
|
||||
for _, tc := range testcases {
|
||||
tc := tc
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
l := launcher.RunTestLauncherOrFail(t, ctx, nil)
|
||||
|
||||
l.SetupOrFail(t)
|
||||
defer l.ShutdownOrFail(t, ctx)
|
||||
|
||||
l.WritePointsOrFail(t, strings.Join(tc.data, "\n"))
|
||||
|
||||
queryStr := "import \"profiler\"\nv = {bucket: " + "\"" + l.Bucket.Name + "\"" + "}\n" + tc.query
|
||||
req := &query.Request{
|
||||
Authorization: l.Auth,
|
||||
OrganizationID: l.Org.ID,
|
||||
Compiler: lang.FluxCompiler{
|
||||
Query: queryStr,
|
||||
},
|
||||
}
|
||||
if got, err := l.FluxQueryService().Query(ctx, req); err != nil {
|
||||
t.Error(err)
|
||||
} else {
|
||||
dec := csv.NewMultiResultDecoder(csv.ResultDecoderConfig{})
|
||||
want, err := dec.Decode(ioutil.NopCloser(strings.NewReader(tc.want)))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer want.Release()
|
||||
|
||||
if err := executetest.EqualResultIterators(want, got); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestQueryPushDowns(t *testing.T) {
|
||||
t.Skip("Not supported yet")
|
||||
testcases := []struct {
|
||||
|
|
|
@ -5,11 +5,14 @@ import (
|
|||
"io/ioutil"
|
||||
nethttp "net/http"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/influxdata/influxdb/v2"
|
||||
"github.com/influxdata/influxdb/v2/cmd/influxd/launcher"
|
||||
"github.com/influxdata/influxdb/v2/http"
|
||||
"github.com/influxdata/influxdb/v2/pkg/testing/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestStorage_WriteAndQuery(t *testing.T) {
|
||||
|
@ -152,3 +155,18 @@ func TestLauncher_BucketDelete(t *testing.T) {
|
|||
t.Fatalf("after bucket delete got %d, exp %d", got, exp)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLauncher_UpdateRetentionPolicy(t *testing.T) {
|
||||
l := launcher.RunTestLauncherOrFail(t, ctx, nil)
|
||||
l.SetupOrFail(t)
|
||||
defer l.ShutdownOrFail(t, ctx)
|
||||
|
||||
bucket, err := l.BucketService(t).FindBucket(ctx, influxdb.BucketFilter{ID: &l.Bucket.ID})
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, bucket)
|
||||
|
||||
newRetentionPeriod := 1 * time.Hour
|
||||
bucket, err = l.BucketService(t).UpdateBucket(ctx, bucket.ID, influxdb.BucketUpdate{RetentionPeriod: &newRetentionPeriod})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, bucket.RetentionPeriod, newRetentionPeriod)
|
||||
}
|
||||
|
|
|
@ -7,10 +7,10 @@ import (
|
|||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/flux"
|
||||
"github.com/influxdata/influxdb/v2"
|
||||
"github.com/influxdata/influxdb/v2/cmd/influxd/launcher"
|
||||
"github.com/influxdata/influxdb/v2/cmd/influxd/upgrade"
|
||||
_ "github.com/influxdata/influxdb/v2/query/builtin"
|
||||
_ "github.com/influxdata/influxdb/v2/tsdb/engine/tsm1"
|
||||
_ "github.com/influxdata/influxdb/v2/tsdb/index/tsi1"
|
||||
"github.com/spf13/cobra"
|
||||
|
@ -43,11 +43,6 @@ func main() {
|
|||
},
|
||||
)
|
||||
|
||||
// TODO: this should be removed in the future: https://github.com/influxdata/influxdb/issues/16220
|
||||
if os.Getenv("QUERY_TRACING") == "1" {
|
||||
flux.EnableExperimentalTracing()
|
||||
}
|
||||
|
||||
if err := rootCmd.Execute(); err != nil {
|
||||
os.Exit(1)
|
||||
}
|
||||
|
|
262
dashboard.go
262
dashboard.go
|
@ -706,160 +706,178 @@ func (u ViewUpdate) MarshalJSON() ([]byte, error) {
|
|||
|
||||
// LinePlusSingleStatProperties represents options for line plus single stat view in Chronograf
|
||||
type LinePlusSingleStatProperties struct {
|
||||
Queries []DashboardQuery `json:"queries"`
|
||||
Axes map[string]Axis `json:"axes"`
|
||||
Type string `json:"type"`
|
||||
Legend Legend `json:"legend"`
|
||||
ViewColors []ViewColor `json:"colors"`
|
||||
Prefix string `json:"prefix"`
|
||||
Suffix string `json:"suffix"`
|
||||
DecimalPlaces DecimalPlaces `json:"decimalPlaces"`
|
||||
Note string `json:"note"`
|
||||
ShowNoteWhenEmpty bool `json:"showNoteWhenEmpty"`
|
||||
XColumn string `json:"xColumn"`
|
||||
YColumn string `json:"yColumn"`
|
||||
ShadeBelow bool `json:"shadeBelow"`
|
||||
Position string `json:"position"`
|
||||
TimeFormat string `json:"timeFormat"`
|
||||
HoverDimension string `json:"hoverDimension"`
|
||||
Queries []DashboardQuery `json:"queries"`
|
||||
Axes map[string]Axis `json:"axes"`
|
||||
Type string `json:"type"`
|
||||
Legend Legend `json:"legend"`
|
||||
ViewColors []ViewColor `json:"colors"`
|
||||
Prefix string `json:"prefix"`
|
||||
Suffix string `json:"suffix"`
|
||||
DecimalPlaces DecimalPlaces `json:"decimalPlaces"`
|
||||
Note string `json:"note"`
|
||||
ShowNoteWhenEmpty bool `json:"showNoteWhenEmpty"`
|
||||
XColumn string `json:"xColumn"`
|
||||
YColumn string `json:"yColumn"`
|
||||
ShadeBelow bool `json:"shadeBelow"`
|
||||
Position string `json:"position"`
|
||||
TimeFormat string `json:"timeFormat"`
|
||||
HoverDimension string `json:"hoverDimension"`
|
||||
LegendOpacity float64 `json:"legendOpacity"`
|
||||
LegendOrientationThreshold int `json:"legendOrientationThreshold"`
|
||||
}
|
||||
|
||||
// XYViewProperties represents options for line, bar, step, or stacked view in Chronograf
|
||||
type XYViewProperties struct {
|
||||
Queries []DashboardQuery `json:"queries"`
|
||||
Axes map[string]Axis `json:"axes"`
|
||||
Type string `json:"type"`
|
||||
Legend Legend `json:"legend"`
|
||||
Geom string `json:"geom"` // Either "line", "step", "stacked", or "bar"
|
||||
ViewColors []ViewColor `json:"colors"`
|
||||
Note string `json:"note"`
|
||||
ShowNoteWhenEmpty bool `json:"showNoteWhenEmpty"`
|
||||
XColumn string `json:"xColumn"`
|
||||
YColumn string `json:"yColumn"`
|
||||
ShadeBelow bool `json:"shadeBelow"`
|
||||
Position string `json:"position"`
|
||||
TimeFormat string `json:"timeFormat"`
|
||||
HoverDimension string `json:"hoverDimension"`
|
||||
Queries []DashboardQuery `json:"queries"`
|
||||
Axes map[string]Axis `json:"axes"`
|
||||
Type string `json:"type"`
|
||||
Legend Legend `json:"legend"`
|
||||
Geom string `json:"geom"` // Either "line", "step", "stacked", or "bar"
|
||||
ViewColors []ViewColor `json:"colors"`
|
||||
Note string `json:"note"`
|
||||
ShowNoteWhenEmpty bool `json:"showNoteWhenEmpty"`
|
||||
XColumn string `json:"xColumn"`
|
||||
YColumn string `json:"yColumn"`
|
||||
ShadeBelow bool `json:"shadeBelow"`
|
||||
Position string `json:"position"`
|
||||
TimeFormat string `json:"timeFormat"`
|
||||
HoverDimension string `json:"hoverDimension"`
|
||||
LegendOpacity float64 `json:"legendOpacity"`
|
||||
LegendOrientationThreshold int `json:"legendOrientationThreshold"`
|
||||
}
|
||||
|
||||
// BandViewProperties represents options for the band view
|
||||
type BandViewProperties struct {
|
||||
Queries []DashboardQuery `json:"queries"`
|
||||
Axes map[string]Axis `json:"axes"`
|
||||
Type string `json:"type"`
|
||||
Legend Legend `json:"legend"`
|
||||
Geom string `json:"geom"`
|
||||
ViewColors []ViewColor `json:"colors"`
|
||||
Note string `json:"note"`
|
||||
ShowNoteWhenEmpty bool `json:"showNoteWhenEmpty"`
|
||||
TimeFormat string `json:"timeFormat"`
|
||||
HoverDimension string `json:"hoverDimension"`
|
||||
XColumn string `json:"xColumn"`
|
||||
YColumn string `json:"yColumn"`
|
||||
UpperColumn string `json:"upperColumn"`
|
||||
MainColumn string `json:"mainColumn"`
|
||||
LowerColumn string `json:"lowerColumn"`
|
||||
Queries []DashboardQuery `json:"queries"`
|
||||
Axes map[string]Axis `json:"axes"`
|
||||
Type string `json:"type"`
|
||||
Legend Legend `json:"legend"`
|
||||
Geom string `json:"geom"`
|
||||
ViewColors []ViewColor `json:"colors"`
|
||||
Note string `json:"note"`
|
||||
ShowNoteWhenEmpty bool `json:"showNoteWhenEmpty"`
|
||||
TimeFormat string `json:"timeFormat"`
|
||||
HoverDimension string `json:"hoverDimension"`
|
||||
XColumn string `json:"xColumn"`
|
||||
YColumn string `json:"yColumn"`
|
||||
UpperColumn string `json:"upperColumn"`
|
||||
MainColumn string `json:"mainColumn"`
|
||||
LowerColumn string `json:"lowerColumn"`
|
||||
LegendOpacity float64 `json:"legendOpacity"`
|
||||
LegendOrientationThreshold int `json:"legendOrientationThreshold"`
|
||||
}
|
||||
|
||||
// CheckViewProperties represents options for a view representing a check
|
||||
type CheckViewProperties struct {
|
||||
Type string `json:"type"`
|
||||
CheckID string `json:"checkID"`
|
||||
Queries []DashboardQuery `json:"queries"`
|
||||
ViewColors []string `json:"colors"`
|
||||
Type string `json:"type"`
|
||||
CheckID string `json:"checkID"`
|
||||
Queries []DashboardQuery `json:"queries"`
|
||||
ViewColors []string `json:"colors"`
|
||||
LegendOpacity float64 `json:"legendOpacity"`
|
||||
LegendOrientationThreshold int `json:"legendOrientationThreshold"`
|
||||
}
|
||||
|
||||
// SingleStatViewProperties represents options for single stat view in Chronograf
|
||||
type SingleStatViewProperties struct {
|
||||
Type string `json:"type"`
|
||||
Queries []DashboardQuery `json:"queries"`
|
||||
Prefix string `json:"prefix"`
|
||||
TickPrefix string `json:"tickPrefix"`
|
||||
Suffix string `json:"suffix"`
|
||||
TickSuffix string `json:"tickSuffix"`
|
||||
ViewColors []ViewColor `json:"colors"`
|
||||
DecimalPlaces DecimalPlaces `json:"decimalPlaces"`
|
||||
Note string `json:"note"`
|
||||
ShowNoteWhenEmpty bool `json:"showNoteWhenEmpty"`
|
||||
Type string `json:"type"`
|
||||
Queries []DashboardQuery `json:"queries"`
|
||||
Prefix string `json:"prefix"`
|
||||
TickPrefix string `json:"tickPrefix"`
|
||||
Suffix string `json:"suffix"`
|
||||
TickSuffix string `json:"tickSuffix"`
|
||||
ViewColors []ViewColor `json:"colors"`
|
||||
DecimalPlaces DecimalPlaces `json:"decimalPlaces"`
|
||||
Note string `json:"note"`
|
||||
ShowNoteWhenEmpty bool `json:"showNoteWhenEmpty"`
|
||||
LegendOpacity float64 `json:"legendOpacity"`
|
||||
LegendOrientationThreshold int `json:"legendOrientationThreshold"`
|
||||
}
|
||||
|
||||
// HistogramViewProperties represents options for histogram view in Chronograf
|
||||
type HistogramViewProperties struct {
|
||||
Type string `json:"type"`
|
||||
Queries []DashboardQuery `json:"queries"`
|
||||
ViewColors []ViewColor `json:"colors"`
|
||||
XColumn string `json:"xColumn"`
|
||||
FillColumns []string `json:"fillColumns"`
|
||||
XDomain []float64 `json:"xDomain,omitempty"`
|
||||
XAxisLabel string `json:"xAxisLabel"`
|
||||
Position string `json:"position"`
|
||||
BinCount int `json:"binCount"`
|
||||
Note string `json:"note"`
|
||||
ShowNoteWhenEmpty bool `json:"showNoteWhenEmpty"`
|
||||
Type string `json:"type"`
|
||||
Queries []DashboardQuery `json:"queries"`
|
||||
ViewColors []ViewColor `json:"colors"`
|
||||
XColumn string `json:"xColumn"`
|
||||
FillColumns []string `json:"fillColumns"`
|
||||
XDomain []float64 `json:"xDomain,omitempty"`
|
||||
XAxisLabel string `json:"xAxisLabel"`
|
||||
Position string `json:"position"`
|
||||
BinCount int `json:"binCount"`
|
||||
Note string `json:"note"`
|
||||
ShowNoteWhenEmpty bool `json:"showNoteWhenEmpty"`
|
||||
LegendOpacity float64 `json:"legendOpacity"`
|
||||
LegendOrientationThreshold int `json:"legendOrientationThreshold"`
|
||||
}
|
||||
|
||||
// HeatmapViewProperties represents options for heatmap view in Chronograf
|
||||
type HeatmapViewProperties struct {
|
||||
Type string `json:"type"`
|
||||
Queries []DashboardQuery `json:"queries"`
|
||||
ViewColors []string `json:"colors"`
|
||||
BinSize int32 `json:"binSize"`
|
||||
XColumn string `json:"xColumn"`
|
||||
YColumn string `json:"yColumn"`
|
||||
XDomain []float64 `json:"xDomain,omitempty"`
|
||||
YDomain []float64 `json:"yDomain,omitempty"`
|
||||
XAxisLabel string `json:"xAxisLabel"`
|
||||
YAxisLabel string `json:"yAxisLabel"`
|
||||
XPrefix string `json:"xPrefix"`
|
||||
XSuffix string `json:"xSuffix"`
|
||||
YPrefix string `json:"yPrefix"`
|
||||
YSuffix string `json:"ySuffix"`
|
||||
Note string `json:"note"`
|
||||
ShowNoteWhenEmpty bool `json:"showNoteWhenEmpty"`
|
||||
TimeFormat string `json:"timeFormat"`
|
||||
Type string `json:"type"`
|
||||
Queries []DashboardQuery `json:"queries"`
|
||||
ViewColors []string `json:"colors"`
|
||||
BinSize int32 `json:"binSize"`
|
||||
XColumn string `json:"xColumn"`
|
||||
YColumn string `json:"yColumn"`
|
||||
XDomain []float64 `json:"xDomain,omitempty"`
|
||||
YDomain []float64 `json:"yDomain,omitempty"`
|
||||
XAxisLabel string `json:"xAxisLabel"`
|
||||
YAxisLabel string `json:"yAxisLabel"`
|
||||
XPrefix string `json:"xPrefix"`
|
||||
XSuffix string `json:"xSuffix"`
|
||||
YPrefix string `json:"yPrefix"`
|
||||
YSuffix string `json:"ySuffix"`
|
||||
Note string `json:"note"`
|
||||
ShowNoteWhenEmpty bool `json:"showNoteWhenEmpty"`
|
||||
TimeFormat string `json:"timeFormat"`
|
||||
LegendOpacity float64 `json:"legendOpacity"`
|
||||
LegendOrientationThreshold int `json:"legendOrientationThreshold"`
|
||||
}
|
||||
|
||||
// ScatterViewProperties represents options for scatter view in Chronograf
|
||||
type ScatterViewProperties struct {
|
||||
Type string `json:"type"`
|
||||
Queries []DashboardQuery `json:"queries"`
|
||||
ViewColors []string `json:"colors"`
|
||||
FillColumns []string `json:"fillColumns"`
|
||||
SymbolColumns []string `json:"symbolColumns"`
|
||||
XColumn string `json:"xColumn"`
|
||||
YColumn string `json:"yColumn"`
|
||||
XDomain []float64 `json:"xDomain,omitempty"`
|
||||
YDomain []float64 `json:"yDomain,omitempty"`
|
||||
XAxisLabel string `json:"xAxisLabel"`
|
||||
YAxisLabel string `json:"yAxisLabel"`
|
||||
XPrefix string `json:"xPrefix"`
|
||||
XSuffix string `json:"xSuffix"`
|
||||
YPrefix string `json:"yPrefix"`
|
||||
YSuffix string `json:"ySuffix"`
|
||||
Note string `json:"note"`
|
||||
ShowNoteWhenEmpty bool `json:"showNoteWhenEmpty"`
|
||||
TimeFormat string `json:"timeFormat"`
|
||||
Type string `json:"type"`
|
||||
Queries []DashboardQuery `json:"queries"`
|
||||
ViewColors []string `json:"colors"`
|
||||
FillColumns []string `json:"fillColumns"`
|
||||
SymbolColumns []string `json:"symbolColumns"`
|
||||
XColumn string `json:"xColumn"`
|
||||
YColumn string `json:"yColumn"`
|
||||
XDomain []float64 `json:"xDomain,omitempty"`
|
||||
YDomain []float64 `json:"yDomain,omitempty"`
|
||||
XAxisLabel string `json:"xAxisLabel"`
|
||||
YAxisLabel string `json:"yAxisLabel"`
|
||||
XPrefix string `json:"xPrefix"`
|
||||
XSuffix string `json:"xSuffix"`
|
||||
YPrefix string `json:"yPrefix"`
|
||||
YSuffix string `json:"ySuffix"`
|
||||
Note string `json:"note"`
|
||||
ShowNoteWhenEmpty bool `json:"showNoteWhenEmpty"`
|
||||
TimeFormat string `json:"timeFormat"`
|
||||
LegendOpacity float64 `json:"legendOpacity"`
|
||||
LegendOrientationThreshold int `json:"legendOrientationThreshold"`
|
||||
}
|
||||
|
||||
// MosaicViewProperties represents options for mosaic view in Chronograf
|
||||
type MosaicViewProperties struct {
|
||||
Type string `json:"type"`
|
||||
Queries []DashboardQuery `json:"queries"`
|
||||
ViewColors []string `json:"colors"`
|
||||
FillColumns []string `json:"fillColumns"`
|
||||
XColumn string `json:"xColumn"`
|
||||
YSeriesColumns []string `json:"ySeriesColumns"`
|
||||
XDomain []float64 `json:"xDomain,omitempty"`
|
||||
YDomain []float64 `json:"yDomain,omitempty"`
|
||||
XAxisLabel string `json:"xAxisLabel"`
|
||||
YAxisLabel string `json:"yAxisLabel"`
|
||||
XPrefix string `json:"xPrefix"`
|
||||
XSuffix string `json:"xSuffix"`
|
||||
YPrefix string `json:"yPrefix"`
|
||||
YSuffix string `json:"ySuffix"`
|
||||
Note string `json:"note"`
|
||||
ShowNoteWhenEmpty bool `json:"showNoteWhenEmpty"`
|
||||
TimeFormat string `json:"timeFormat"`
|
||||
Type string `json:"type"`
|
||||
Queries []DashboardQuery `json:"queries"`
|
||||
ViewColors []string `json:"colors"`
|
||||
FillColumns []string `json:"fillColumns"`
|
||||
XColumn string `json:"xColumn"`
|
||||
YSeriesColumns []string `json:"ySeriesColumns"`
|
||||
XDomain []float64 `json:"xDomain,omitempty"`
|
||||
YDomain []float64 `json:"yDomain,omitempty"`
|
||||
XAxisLabel string `json:"xAxisLabel"`
|
||||
YAxisLabel string `json:"yAxisLabel"`
|
||||
XPrefix string `json:"xPrefix"`
|
||||
XSuffix string `json:"xSuffix"`
|
||||
YPrefix string `json:"yPrefix"`
|
||||
YSuffix string `json:"ySuffix"`
|
||||
Note string `json:"note"`
|
||||
ShowNoteWhenEmpty bool `json:"showNoteWhenEmpty"`
|
||||
TimeFormat string `json:"timeFormat"`
|
||||
LegendOpacity float64 `json:"legendOpacity"`
|
||||
LegendOrientationThreshold int `json:"legendOrientationThreshold"`
|
||||
}
|
||||
|
||||
// GaugeViewProperties represents options for gauge view in Chronograf
|
||||
|
|
|
@ -51,9 +51,11 @@ func TestView_MarshalJSON(t *testing.T) {
|
|||
"xColumn": "",
|
||||
"yColumn": "",
|
||||
"shadeBelow": false,
|
||||
"position": "",
|
||||
"timeFormat": "",
|
||||
"hoverDimension": ""
|
||||
"position": "",
|
||||
"timeFormat": "",
|
||||
"hoverDimension": "",
|
||||
"legendOpacity": 0,
|
||||
"legendOrientationThreshold": 0
|
||||
}
|
||||
}
|
||||
`,
|
||||
|
|
13
flags.yml
13
flags.yml
|
@ -83,6 +83,13 @@
|
|||
contact: Query Team
|
||||
lifetime: temporary
|
||||
|
||||
- name: Query Tracing
|
||||
description: Turn on query tracing for queries that are sampled
|
||||
key: queryTracing
|
||||
default: false
|
||||
contact: Query Team
|
||||
lifetime: permanent
|
||||
|
||||
- name: Simple Task Options Extraction
|
||||
description: Simplified task options extraction to avoid undefined functions when saving tasks
|
||||
key: simpleTaskOptionsExtraction
|
||||
|
@ -133,6 +140,12 @@
|
|||
default: false
|
||||
contact: Compute Team
|
||||
|
||||
- name: Inject Latest Success Time
|
||||
description: Inject the latest successful task run timestamp into a Task query extern when executing.
|
||||
key: injectLatestSuccessTime
|
||||
default: false
|
||||
contact: Compute Team
|
||||
|
||||
- name: Enforce Organization Dashboard Limits
|
||||
description: Enforces the default limit params for the dashboards api when orgs are set
|
||||
key: enforceOrgDashboardLimits
|
||||
|
|
|
@ -266,7 +266,7 @@ func (s *mockStorage) UpdateTarget(ctx context.Context, update *influxdb.Scraper
|
|||
defer s.Unlock()
|
||||
|
||||
for k, v := range s.Targets {
|
||||
if v.ID.String() == string(update.ID) {
|
||||
if v.ID.String() == update.ID.String() {
|
||||
s.Targets[k] = *update
|
||||
break
|
||||
}
|
||||
|
|
16
go.mod
16
go.mod
|
@ -3,9 +3,7 @@ module github.com/influxdata/influxdb/v2
|
|||
go 1.13
|
||||
|
||||
require (
|
||||
cloud.google.com/go/bigtable v1.3.0 // indirect
|
||||
github.com/BurntSushi/toml v0.3.1
|
||||
github.com/DATA-DOG/go-sqlmock v1.4.1 // indirect
|
||||
github.com/NYTimes/gziphandler v1.0.1
|
||||
github.com/RoaringBitmap/roaring v0.4.16
|
||||
github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883
|
||||
|
@ -31,7 +29,6 @@ require (
|
|||
github.com/glycerine/go-unsnap-stream v0.0.0-20181221182339-f9677308dec2 // indirect
|
||||
github.com/glycerine/goconvey v0.0.0-20180728074245-46e3a41ad493 // indirect
|
||||
github.com/go-chi/chi v4.1.0+incompatible
|
||||
github.com/go-sql-driver/mysql v1.5.0 // indirect
|
||||
github.com/go-stack/stack v1.8.0
|
||||
github.com/gogo/protobuf v1.3.1
|
||||
github.com/golang/gddo v0.0.0-20181116215533-9bd4a3295021
|
||||
|
@ -51,7 +48,7 @@ require (
|
|||
github.com/hashicorp/vault/api v1.0.2
|
||||
github.com/imdario/mergo v0.3.9 // indirect
|
||||
github.com/influxdata/cron v0.0.0-20191203200038-ded12750aac6
|
||||
github.com/influxdata/flux v0.66.1
|
||||
github.com/influxdata/flux v0.83.1
|
||||
github.com/influxdata/httprouter v1.3.1-0.20191122104820-ee83e2772f69
|
||||
github.com/influxdata/influxql v0.0.0-20180925231337-1cbfca8e56b6
|
||||
github.com/influxdata/pkg-config v0.2.3
|
||||
|
@ -102,15 +99,14 @@ require (
|
|||
github.com/yudai/pp v2.0.1+incompatible // indirect
|
||||
go.uber.org/multierr v1.5.0
|
||||
go.uber.org/zap v1.14.1
|
||||
golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37
|
||||
golang.org/x/lint v0.0.0-20200302205851-738671d3881b // indirect
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9
|
||||
golang.org/x/net v0.0.0-20200625001655-4c5254603344
|
||||
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e
|
||||
golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4
|
||||
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208
|
||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd
|
||||
golang.org/x/text v0.3.2
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0
|
||||
golang.org/x/tools v0.0.0-20200304024140-c4206d458c3f
|
||||
golang.org/x/tools v0.0.0-20200721032237-77f530d86f9a
|
||||
google.golang.org/api v0.17.0
|
||||
google.golang.org/grpc v1.27.1
|
||||
gopkg.in/vmihailenco/msgpack.v2 v2.9.1 // indirect
|
||||
|
|
83
go.sum
83
go.sum
|
@ -2,8 +2,6 @@ cloud.google.com/go v0.26.0 h1:e0WKqKTd5BnrG8aKH3J3h+QvEIQtSUcf2n5UZ5ZgLtQ=
|
|||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
|
||||
cloud.google.com/go v0.43.0 h1:banaiRPAM8kUVYneOSkhgcDsLzEvL25FinuiSZaH/2w=
|
||||
cloud.google.com/go v0.43.0/go.mod h1:BOSR3VbTLkk6FDC/TcffxP4NF/FFBGA5ku+jvKOP7pg=
|
||||
cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
|
||||
cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
|
||||
cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
|
||||
|
@ -29,10 +27,34 @@ cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0Zeo
|
|||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
|
||||
github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI=
|
||||
github.com/Azure/go-autorest/autorest v0.9.3/go.mod h1:GsRuLYvwzLjjjRoWEIyMUaYq8GNUx2nRB378IPt/1p0=
|
||||
github.com/Azure/go-autorest/autorest v0.10.1 h1:uaB8A32IZU9YKs9v50+/LWIWTDHJk2vlGzbfd7FfESI=
|
||||
github.com/Azure/go-autorest/autorest v0.10.1/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.8.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.8.1/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.8.3 h1:O1AGG9Xig71FxdX9HO5pGNyZ7TbSyHaVg+5eJO/jSGw=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.8.3/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q=
|
||||
github.com/Azure/go-autorest/autorest/azure/auth v0.4.2 h1:iM6UAvjR97ZIeR93qTcwpKNMpV+/FTWjwEbuPD495Tk=
|
||||
github.com/Azure/go-autorest/autorest/azure/auth v0.4.2/go.mod h1:90gmfKdlmKgfjUpnCEpOJzsUEjrWDSLwHIG73tSXddM=
|
||||
github.com/Azure/go-autorest/autorest/azure/cli v0.3.1 h1:LXl088ZQlP0SBppGFsRZonW6hSvwgL5gRByMbvUbx8U=
|
||||
github.com/Azure/go-autorest/autorest/azure/cli v0.3.1/go.mod h1:ZG5p860J94/0kI9mNJVoIoLgXcirM2gF5i2kWloofxw=
|
||||
github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA=
|
||||
github.com/Azure/go-autorest/autorest/date v0.2.0 h1:yW+Zlqf26583pE43KhfnhFcdmSWlm5Ew6bxipnr/tbM=
|
||||
github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g=
|
||||
github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
|
||||
github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
|
||||
github.com/Azure/go-autorest/autorest/mocks v0.3.0 h1:qJumjCaCudz+OcqE9/XtEPfvtOjOmKaui4EOpFI6zZc=
|
||||
github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM=
|
||||
github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY=
|
||||
github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc=
|
||||
github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k=
|
||||
github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk=
|
||||
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM=
|
||||
github.com/DATA-DOG/go-sqlmock v1.4.1 h1:ThlnYciV1iM/V0OSF/dtkqWb6xo5qITT1TJBG1MRDJM=
|
||||
github.com/DATA-DOG/go-sqlmock v1.4.1/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM=
|
||||
github.com/Masterminds/semver v1.4.2 h1:WBLTQ37jOCzSLtXNdoo8bNM8876KhNqOKvrlGITgsTc=
|
||||
|
@ -68,6 +90,8 @@ github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5
|
|||
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da h1:8GUt8eRujhVEGZFFEjBj46YV4rDjvGrNxb0KMWYkL2I=
|
||||
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
|
||||
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
|
||||
github.com/aws/aws-sdk-go v1.29.16 h1:Gbtod7Y4W/Ai7wPtesdvgGVTkFN8JxAaGouRLlcQfQs=
|
||||
github.com/aws/aws-sdk-go v1.29.16/go.mod h1:1KvfttTE3SPKMpo8g2c6jL3ZKfXtFvKscTgahTma5Xg=
|
||||
github.com/benbjohnson/clock v0.0.0-20161215174838-7dc76406b6d3 h1:wOysYcIdqv3WnvwqFFzrYCFALPED7qkUGaLXu359GSc=
|
||||
github.com/benbjohnson/clock v0.0.0-20161215174838-7dc76406b6d3/go.mod h1:UMqtWQTnOe4byzwe7Zhwh8f8s+36uszN51sJrSIZlTE=
|
||||
github.com/benbjohnson/tmpl v1.0.0 h1:T5QPGJD0W6JJxyEEAlVnX3co/IkUrfHen1/42nlgAHo=
|
||||
|
@ -81,12 +105,15 @@ github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6r
|
|||
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
|
||||
github.com/boltdb/bolt v1.3.1 h1:JQmyP4ZBrce+ZQu0dY660FMfatumYDLun9hBCUVIkF4=
|
||||
github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps=
|
||||
github.com/bonitoo-io/go-sql-bigquery v0.3.4-1.4.0 h1:MaVh0h9+KaMnJcoDvvIGp+O3fefdWm+8MBUX6ELTJTM=
|
||||
github.com/bonitoo-io/go-sql-bigquery v0.3.4-1.4.0/go.mod h1:J4Y6YJm0qTWB9aFziB7cPeSyc6dOZFyJdteSeybVpXQ=
|
||||
github.com/bouk/httprouter v0.0.0-20160817010721-ee8b3818a7f5 h1:kS0dw4K730x7cxT+bVyTyYJZHuSoH7ofSr/Ijit56Qw=
|
||||
github.com/bouk/httprouter v0.0.0-20160817010721-ee8b3818a7f5/go.mod h1:CDReaxg1cmLrtcasZy43l4EYPAknXLiQSrb7tLw5zXM=
|
||||
github.com/buger/jsonparser v0.0.0-20191004114745-ee4c978eae7e h1:oJCXMss/3rg5F6Poy9wG3JQusc58Mzk5B9Z6wSnssNE=
|
||||
github.com/buger/jsonparser v0.0.0-20191004114745-ee4c978eae7e/go.mod h1:errmMKH8tTB49UR2A8C8DPYkyudelsYJwJFaZHQ6ik8=
|
||||
github.com/c-bata/go-prompt v0.2.2 h1:uyKRz6Z6DUyj49QVijyM339UJV9yhbr70gESwbNU3e0=
|
||||
github.com/c-bata/go-prompt v0.2.2/go.mod h1:VzqtzE2ksDBcdln8G7mk2RX9QyGjH+OVqOCSiVIqS34=
|
||||
github.com/cactus/go-statsd-client/statsd v0.0.0-20191106001114-12b4e2b38748/go.mod h1:l/bIBLeOl9eX+wxJAzxS4TveKRtAqlyDpHjhkfO0MEI=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1 h1:glEXhBS5PSLLv4IXzLA5yPRVX4bilULVyxxbrfOtDAk=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
|
||||
|
@ -116,11 +143,15 @@ github.com/dave/jennifer v1.2.0/go.mod h1:fIb+770HOpJ2fmN9EPPKOqm1vMGhB+TwXKMZhr
|
|||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/denisenkom/go-mssqldb v0.0.0-20200428022330-06a60b6afbbc h1:VRRKCwnzqk8QCaRC4os14xoKDdbHqqlJtJA0oc1ZAjg=
|
||||
github.com/denisenkom/go-mssqldb v0.0.0-20200428022330-06a60b6afbbc/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU=
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
||||
github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8 h1:akOQj8IVgoeFfBTzGOEQakCYshWD6RNo1M5pivFXt70=
|
||||
github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8/go.mod h1:VMaSuZ+SZcx/wljOQKvp5srsbCiKDEb6K2wC4+PiBmQ=
|
||||
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
|
||||
github.com/dimchansky/utfbom v1.1.0 h1:FcM3g+nofKgUteL8dm/UpdRXNC9KmADgTpLKsu0TRo4=
|
||||
github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8=
|
||||
github.com/docker/distribution v2.7.0+incompatible h1:neUDAlf3wX6Ml4HdqTrbcOHXtfRN0TFIwt6YFL7N9RU=
|
||||
github.com/docker/distribution v2.7.0+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/docker v0.7.3-0.20180815000130-e05b657120a6/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
|
@ -164,6 +195,7 @@ github.com/go-chi/chi v4.1.0+incompatible h1:ETj3cggsVIY2Xao5ExCu6YhEh5MD6JTfcBz
|
|||
github.com/go-chi/chi v4.1.0+incompatible/go.mod h1:eB3wogJHnLi3x/kFX2A+IbTBlXxmMeXJVKy9tTv1XzQ=
|
||||
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
|
||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||
github.com/go-kit/kit v0.8.0 h1:Wz+5lgoB0kkuqLEc6NVmwRknTKP6dTGbSqvhZtBI/j0=
|
||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
|
@ -171,12 +203,13 @@ github.com/go-ldap/ldap v3.0.2+incompatible/go.mod h1:qfd9rJvER9Q0/D/Sqn1DfHRoBp
|
|||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||
github.com/go-logfmt/logfmt v0.4.0 h1:MP4Eh7ZCb31lleYCFuwm0oe4/YGak+5l1vA2NOE80nA=
|
||||
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
||||
github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
|
||||
github.com/go-sql-driver/mysql v1.5.0 h1:ozyZYNQW3x3HtqT1jira07DN2PArx2v7/mN66gGcHOs=
|
||||
github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
|
||||
github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/go-test/deep v1.0.1/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA=
|
||||
github.com/gofrs/uuid v3.3.0+incompatible h1:8K4tyRfvU1CYPgJsveYFQMhpFd/wXNM7iK6rR7UHz84=
|
||||
github.com/gofrs/uuid v3.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.2.0 h1:xU6/SpYbvkNYiptHJYEDRseDLvYE7wSqhYYNy0QSUzI=
|
||||
github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
|
@ -185,6 +218,8 @@ github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zV
|
|||
github.com/gogo/protobuf v1.2.2-0.20190730201129-28a6bbf47e48/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
|
||||
github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls=
|
||||
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
|
||||
github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe h1:lXe2qZdvpiX5WZkZR4hgp4KJVfY3nMkvmwbVkpv1rVY=
|
||||
github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0=
|
||||
github.com/golang/gddo v0.0.0-20181116215533-9bd4a3295021 h1:HYV500jCgk+IC68L5sWrLFIWMpaUFfXXpJSAb7XOoBk=
|
||||
github.com/golang/gddo v0.0.0-20181116215533-9bd4a3295021/go.mod h1:xEhNfoBDX1hzLm2Nf80qUvZ2sVwoMZ8d6IE2SrsQfh4=
|
||||
github.com/golang/geo v0.0.0-20190916061304-5b978397cfec h1:lJwO/92dFXWeXOZdoGXgptLmNLwynMSHUmU6besqtiw=
|
||||
|
@ -318,8 +353,8 @@ github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NH
|
|||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||
github.com/influxdata/cron v0.0.0-20191203200038-ded12750aac6 h1:OtjKkeWDjUbyMi82C7XXy7Tvm2LXMwiBBXyFIGNPaGA=
|
||||
github.com/influxdata/cron v0.0.0-20191203200038-ded12750aac6/go.mod h1:XabtPPW2qsCg0tl+kjaPU+cFS+CjQXEXbT1VJvHT4og=
|
||||
github.com/influxdata/flux v0.66.1 h1:d98L5k9mmP7bU7d2zAx6C3dCe5B8/PEa1wkWzZAE+Ok=
|
||||
github.com/influxdata/flux v0.66.1/go.mod h1:BwN2XG2lMszOoquQaFdPET8FRQfrXiZsWmcMO9rkaVY=
|
||||
github.com/influxdata/flux v0.83.1 h1:KdJ19S2bj0jZvhICdS8d54BHYCJNuq9h3A/HkIKOD6o=
|
||||
github.com/influxdata/flux v0.83.1/go.mod h1:+6FzHdZdwYjEIa2iuQEJ92x+C2A8X1jI0qdpVT0DJfM=
|
||||
github.com/influxdata/httprouter v1.3.1-0.20191122104820-ee83e2772f69 h1:WQsmW0fXO4ZE/lFGIE84G6rIV5SJN3P3sjIXAP1a8eU=
|
||||
github.com/influxdata/httprouter v1.3.1-0.20191122104820-ee83e2772f69/go.mod h1:pwymjR6SrP3gD3pRj9RJwdl1j5s3doEEV8gS4X9qSzA=
|
||||
github.com/influxdata/influxql v0.0.0-20180925231337-1cbfca8e56b6 h1:CFx+pP90q/qg3spoiZjf8donE4WpAdjeJfPOcoNqkWo=
|
||||
|
@ -336,6 +371,8 @@ github.com/influxdata/usage-client v0.0.0-20160829180054-6d3895376368 h1:+TUUmaF
|
|||
github.com/influxdata/usage-client v0.0.0-20160829180054-6d3895376368/go.mod h1:Wbbw6tYNvwa5dlB6304Sd+82Z3f7PmVZHVKU637d4po=
|
||||
github.com/jessevdk/go-flags v1.4.0 h1:4IU2WS7AumrZ/40jfhf4QVDMsQwqA7VEHozFRrGARJA=
|
||||
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
|
||||
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM=
|
||||
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
|
||||
github.com/jonboulle/clockwork v0.1.0 h1:VKV+ZcuP6l3yW9doeqz6ziZGgcynBVQO+obU0+0hcPo=
|
||||
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
|
||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
|
@ -464,6 +501,8 @@ github.com/philhofer/fwd v1.0.0 h1:UbZqGr5Y38ApvM/V/jEljVxwocdweyH+vmYvRPBnbqQ=
|
|||
github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU=
|
||||
github.com/pierrec/lz4 v2.0.5+incompatible h1:2xWsjqPFWcplujydGg4WmhC/6fZqK42wMM8aXeqhl0I=
|
||||
github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
|
||||
github.com/pkg/browser v0.0.0-20180916011732-0a3d74bf9ce4 h1:49lOXmGaUpV9Fz3gd7TFZY106KVlPVa5jcYD1gaQf98=
|
||||
github.com/pkg/browser v0.0.0-20180916011732-0a3d74bf9ce4/go.mod h1:4OwLy04Bl9Ef3GJJCoec+30X3LQs/0/m4HFRt/2LUSA=
|
||||
github.com/pkg/errors v0.8.0 h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
|
||||
|
@ -537,6 +576,8 @@ github.com/smartystreets/goconvey v0.0.0-20190731233626-505e41936337 h1:WN9BUFbd
|
|||
github.com/smartystreets/goconvey v0.0.0-20190731233626-505e41936337/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
|
||||
github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s=
|
||||
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
|
||||
github.com/snowflakedb/gosnowflake v1.3.4 h1:Gyoi6g4lMHsilEwW9+KV+bgYkJTgf5pVfvL7Utus920=
|
||||
github.com/snowflakedb/gosnowflake v1.3.4/go.mod h1:NsRq2QeiMUuoNUJhp5Q6xGC4uBrsS9g6LwZVEkTWgsE=
|
||||
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
|
||||
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72 h1:qLC7fQah7D6K1B0ujays3HV9gkFtllcxhzImRR7ArPQ=
|
||||
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
|
@ -585,6 +626,10 @@ github.com/tylerb/graceful v1.2.15 h1:B0x01Y8fsJpogzZTkDg6BDi6eMf03s01lEKGdrv83o
|
|||
github.com/tylerb/graceful v1.2.15/go.mod h1:LPYTbOYmUTdabwRt0TGhLllQ0MUNbs0Y5q1WXJOI9II=
|
||||
github.com/uber-go/atomic v1.3.2 h1:Azu9lPBWRNKzYXSIwRfgRuDuS0YKsK4NFhiQv98gkxo=
|
||||
github.com/uber-go/atomic v1.3.2/go.mod h1:/Ct5t2lcmbJ4OSe/waGBoaVvVqtO0bmtfVNex1PFV8g=
|
||||
github.com/uber-go/tally v3.3.15+incompatible h1:9hLSgNBP28CjIaDmAuRTq9qV+UZY+9PcvAkXO4nNMwg=
|
||||
github.com/uber-go/tally v3.3.15+incompatible/go.mod h1:YDTIBxdXyOU/sCWilKB4bgyufu1cEi0jdVnRdxvjnmU=
|
||||
github.com/uber/athenadriver v1.1.4 h1:k6k0RBeXjR7oZ8NO557MsRw3eX1cc/9B0GNx+W9eHiQ=
|
||||
github.com/uber/athenadriver v1.1.4/go.mod h1:tQjho4NzXw55LGfSZEcETuYydpY1vtmixUabHkC1K/E=
|
||||
github.com/uber/jaeger-client-go v2.16.0+incompatible h1:Q2Pp6v3QYiocMxomCaJuwQGFt7E53bPYqEgug/AoBtY=
|
||||
github.com/uber/jaeger-client-go v2.16.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk=
|
||||
github.com/uber/jaeger-lib v2.2.0+incompatible h1:MxZXOiR2JuoANZ3J6DE/U0kSFv/eJ/GfSYVCjK7dyaw=
|
||||
|
@ -603,6 +648,7 @@ github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82 h1:BHyfKlQyqbsFN5p3Ifn
|
|||
github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82/go.mod h1:lgjkn3NuSvDfVJdfcVVdX+jpBxNmX4rDAzaS45IcYoM=
|
||||
github.com/yudai/pp v2.0.1+incompatible h1:Q4//iY4pNF6yPLZIigmvcl7k/bPgrcTPIFIcmawg5bI=
|
||||
github.com/yudai/pp v2.0.1+incompatible/go.mod h1:PuxR/8QJ7cyCkFp/aUDS+JY727OFEZkTdatxwunjIkc=
|
||||
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
go.etcd.io/bbolt v1.3.2 h1:Z/90sZLPOeCy2PwprqkFa25PdkusRzaj9P8zm/KNyvk=
|
||||
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||
go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
|
||||
|
@ -624,6 +670,7 @@ go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk=
|
|||
go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
|
||||
go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI=
|
||||
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
||||
go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
|
||||
go.uber.org/multierr v1.4.0 h1:f3WCSC2KzAcBXGATIxAB1E2XuCpNU255wNKZ505qi3E=
|
||||
go.uber.org/multierr v1.4.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
|
||||
go.uber.org/multierr v1.5.0 h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A=
|
||||
|
@ -634,6 +681,7 @@ go.uber.org/zap v1.9.1 h1:XCJQEf3W6eZaVwhRBof6ImoYGJSITeKWsyeh3HFu/5o=
|
|||
go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||
go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM=
|
||||
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||
go.uber.org/zap v1.14.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM=
|
||||
go.uber.org/zap v1.14.1 h1:nYDKopTbvAPq/NrUVZwT15y2lpROBiLLyoRTbXOYWOo=
|
||||
go.uber.org/zap v1.14.1/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc=
|
||||
golang.org/x/crypto v0.0.0-20180505025534-4ec37c66abab/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
|
@ -642,13 +690,15 @@ golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9 h1:mKdxBk7AujPs8kU4m80U72
|
|||
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529 h1:iMGN4xG0cnqj3t+zOM8wUB0BiPKHEwSxEZCvzcbZuvk=
|
||||
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5 h1:58fnuSXlxZmFdJyvtTFVmVhcMLU6v5fEb/ok4wyqtNU=
|
||||
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37 h1:cg5LA/zNPRzIXIWSCxQW10Rvpy94aQh3LT/ShoCpkHw=
|
||||
golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4 h1:c2HOrn5iMezYjSlGPncknSEr/8x5LELb/ilJbXi9DEA=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
|
@ -662,6 +712,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0
|
|||
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd h1:zkO/Lhoka23X63N9OSzpSeROEUQ5ODw47tM3YWjygbs=
|
||||
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
|
||||
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6 h1:QE6XYQK6naiK1EPAe1g/ILLxN5RBoH5xkJk3CqlMI/Y=
|
||||
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
|
||||
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
||||
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
|
@ -683,8 +735,11 @@ golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKG
|
|||
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
|
||||
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee h1:WG0RUwxtNT4qqaXX3DPA8zHFNm/D9xaBpxzHt1WcA/E=
|
||||
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/mod v0.2.0 h1:KU7oHjnv3XNWfa5COkzUifxZmxp1TyI7ImMXqFxLwvQ=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
|
@ -712,6 +767,8 @@ golang.org/x/net v0.0.0-20200202094626-16171245cfb2 h1:CCH4IOTTfewWjGOlSp+zGcjut
|
|||
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b h1:0mm1VjtFUOIlE1SbDlwjYaDxZVDP2S5ou6y0gSgXHu8=
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200625001655-4c5254603344 h1:vGXIOMxbNfDTk/aXCmfdLgkrSV+Z2tcbze+pEc3v5W4=
|
||||
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0=
|
||||
|
@ -729,6 +786,8 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEha
|
|||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e h1:vcxGaoTs7kV8m5Np9uUNQin4BrLOthgV7252N8V+FwY=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208 h1:qwRHBd0NqMbJxfbotnDhm2ByMI1Shq4Y6oRJo21SGJA=
|
||||
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
|
@ -758,6 +817,7 @@ golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0 h1:HyfiK1WMnHj5FXFXatD+Qs1A/
|
|||
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
|
@ -766,6 +826,8 @@ golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7w
|
|||
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4 h1:sfkvUWPNGwSV+8/fNqctR5lS2AqCSqYwXdrjCxp/dXo=
|
||||
golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd h1:xhmwyvizuTgC2qz7ZlMluP20uW+C3Rm0FD/WLDX8884=
|
||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
|
@ -820,6 +882,8 @@ golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56 h1:DFtSed2q3HtNuVazwVDZ4nS
|
|||
golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200304024140-c4206d458c3f h1:haxFuLhmPh0vRpVv5MeXoGyfCB39/Ohsq7A68h65qAg=
|
||||
golang.org/x/tools v0.0.0-20200304024140-c4206d458c3f/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
|
||||
golang.org/x/tools v0.0.0-20200721032237-77f530d86f9a h1:kVMPw4f6EVqYdfGQTedjrpw1dbE2PEMfw4jwXsNdn9s=
|
||||
golang.org/x/tools v0.0.0-20200721032237-77f530d86f9a/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898 h1:/atklqdjdhuosWIl6AIbOeHJjicWYPqR9bpxqxYG2pA=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
|
@ -858,8 +922,6 @@ google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRn
|
|||
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190716160619-c506a9f90610 h1:Ygq9/SRJX9+dU0WCIICM8RkWvDw03lvB77hrhJnpxfU=
|
||||
google.golang.org/genproto v0.0.0-20190716160619-c506a9f90610/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64 h1:iKtrH9Y8mcbADOP0YFaEMth7OfuHY9xHOwNj4znpM1A=
|
||||
google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
|
@ -934,6 +996,7 @@ honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWh
|
|||
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
||||
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||
honnef.co/go/tools v0.0.1-2020.1.4 h1:UoveltGrhghAA7ePc+e+QYDHXrBps2PqFZiHkGR/xK8=
|
||||
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||
istio.io/api v0.0.0-20190515205759-982e5c3888c6/go.mod h1:hhLFQmpHia8zgaM37vb2ml9iS5NfNfqZGRt1pS9aVEo=
|
||||
|
|
|
@ -448,24 +448,26 @@ func TestService_handleGetDashboard(t *testing.T) {
|
|||
"x": 1,
|
||||
"y": 2,
|
||||
"w": 3,
|
||||
"h": 4,
|
||||
"name": "the cell name",
|
||||
"properties": {
|
||||
"shape": "chronograf-v2",
|
||||
"axes": null,
|
||||
"colors": null,
|
||||
"geom": "",
|
||||
"legend": {},
|
||||
"position": "",
|
||||
"note": "",
|
||||
"queries": null,
|
||||
"shadeBelow": false,
|
||||
"hoverDimension": "",
|
||||
"showNoteWhenEmpty": false,
|
||||
"timeFormat": "",
|
||||
"type": "xy",
|
||||
"xColumn": "",
|
||||
"yColumn": ""
|
||||
"h": 4,
|
||||
"name": "the cell name",
|
||||
"properties": {
|
||||
"shape": "chronograf-v2",
|
||||
"axes": null,
|
||||
"colors": null,
|
||||
"geom": "",
|
||||
"legend": {},
|
||||
"position": "",
|
||||
"note": "",
|
||||
"queries": null,
|
||||
"shadeBelow": false,
|
||||
"hoverDimension": "",
|
||||
"showNoteWhenEmpty": false,
|
||||
"timeFormat": "",
|
||||
"type": "xy",
|
||||
"xColumn": "",
|
||||
"yColumn": "",
|
||||
"legendOpacity": 0,
|
||||
"legendOrientationThreshold": 0
|
||||
},
|
||||
"links": {
|
||||
"self": "/api/v2/dashboards/020f755c3c082000/cells/da7aba5e5d81e550",
|
||||
|
@ -983,7 +985,9 @@ func TestService_handlePostDashboard(t *testing.T) {
|
|||
"type": "",
|
||||
"xColumn": "",
|
||||
"yColumn": "",
|
||||
"type": "xy"
|
||||
"type": "xy",
|
||||
"legendOpacity": 0,
|
||||
"legendOrientationThreshold": 0
|
||||
},
|
||||
"links": {
|
||||
"self": "/api/v2/dashboards/020f755c3c082000/cells/da7aba5e5d81e550",
|
||||
|
|
|
@ -18,7 +18,6 @@ import (
|
|||
"github.com/influxdata/flux/ast"
|
||||
"github.com/influxdata/flux/csv"
|
||||
"github.com/influxdata/flux/lang"
|
||||
"github.com/influxdata/flux/repl"
|
||||
"github.com/influxdata/influxdb/v2"
|
||||
"github.com/influxdata/influxdb/v2/jsonweb"
|
||||
"github.com/influxdata/influxdb/v2/query"
|
||||
|
@ -32,11 +31,10 @@ type QueryRequest struct {
|
|||
Query string `json:"query"`
|
||||
|
||||
// Flux fields
|
||||
Extern *ast.File `json:"extern,omitempty"`
|
||||
Spec *flux.Spec `json:"spec,omitempty"`
|
||||
AST *ast.Package `json:"ast,omitempty"`
|
||||
Dialect QueryDialect `json:"dialect"`
|
||||
Now time.Time `json:"now"`
|
||||
Extern json.RawMessage `json:"extern,omitempty"`
|
||||
AST json.RawMessage `json:"ast,omitempty"`
|
||||
Dialect QueryDialect `json:"dialect"`
|
||||
Now time.Time `json:"now"`
|
||||
|
||||
// InfluxQL fields
|
||||
Bucket string `json:"bucket,omitempty"`
|
||||
|
@ -271,19 +269,13 @@ func (r QueryRequest) proxyRequest(now func() time.Time) (*query.ProxyRequest, e
|
|||
Query: r.Query,
|
||||
}
|
||||
}
|
||||
} else if r.AST != nil {
|
||||
} else if len(r.AST) > 0 {
|
||||
c := lang.ASTCompiler{
|
||||
AST: r.AST,
|
||||
Now: n,
|
||||
}
|
||||
if r.Extern != nil {
|
||||
c.PrependFile(r.Extern)
|
||||
Extern: r.Extern,
|
||||
AST: r.AST,
|
||||
Now: n,
|
||||
}
|
||||
compiler = c
|
||||
} else if r.Spec != nil {
|
||||
compiler = repl.Compiler{
|
||||
Spec: r.Spec,
|
||||
}
|
||||
}
|
||||
|
||||
delimiter, _ := utf8.DecodeRuneInString(r.Dialect.Delimiter)
|
||||
|
|
|
@ -245,7 +245,7 @@ func TestFluxHandler_postFluxAST(t *testing.T) {
|
|||
name: "get ast from()",
|
||||
w: httptest.NewRecorder(),
|
||||
r: httptest.NewRequest("POST", "/api/v2/query/ast", bytes.NewBufferString(`{"query": "from()"}`)),
|
||||
want: `{"ast":{"type":"Package","package":"main","files":[{"type":"File","location":{"start":{"line":1,"column":1},"end":{"line":1,"column":7},"source":"from()"},"metadata":"parser-type=go","package":null,"imports":null,"body":[{"type":"ExpressionStatement","location":{"start":{"line":1,"column":1},"end":{"line":1,"column":7},"source":"from()"},"expression":{"type":"CallExpression","location":{"start":{"line":1,"column":1},"end":{"line":1,"column":7},"source":"from()"},"callee":{"type":"Identifier","location":{"start":{"line":1,"column":1},"end":{"line":1,"column":5},"source":"from"},"name":"from"}}}]}]}}
|
||||
want: `{"ast":{"type":"Package","package":"main","files":[{"type":"File","location":{"start":{"line":1,"column":1},"end":{"line":1,"column":7},"source":"from()"},"metadata":"parser-type=rust","package":null,"imports":null,"body":[{"type":"ExpressionStatement","location":{"start":{"line":1,"column":1},"end":{"line":1,"column":7},"source":"from()"},"expression":{"type":"CallExpression","location":{"start":{"line":1,"column":1},"end":{"line":1,"column":7},"source":"from()"},"callee":{"type":"Identifier","location":{"start":{"line":1,"column":1},"end":{"line":1,"column":5},"source":"from"},"name":"from"}}}]}]}}
|
||||
`,
|
||||
status: http.StatusOK,
|
||||
},
|
||||
|
|
|
@ -3,6 +3,7 @@ package http
|
|||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"reflect"
|
||||
|
@ -33,7 +34,7 @@ var cmpOptions = cmp.Options{
|
|||
func TestQueryRequest_WithDefaults(t *testing.T) {
|
||||
type fields struct {
|
||||
Spec *flux.Spec
|
||||
AST *ast.Package
|
||||
AST json.RawMessage
|
||||
Query string
|
||||
Type string
|
||||
Dialect QueryDialect
|
||||
|
@ -59,7 +60,6 @@ func TestQueryRequest_WithDefaults(t *testing.T) {
|
|||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
r := QueryRequest{
|
||||
Spec: tt.fields.Spec,
|
||||
AST: tt.fields.AST,
|
||||
Query: tt.fields.Query,
|
||||
Type: tt.fields.Type,
|
||||
|
@ -75,9 +75,8 @@ func TestQueryRequest_WithDefaults(t *testing.T) {
|
|||
|
||||
func TestQueryRequest_Validate(t *testing.T) {
|
||||
type fields struct {
|
||||
Extern *ast.File
|
||||
Spec *flux.Spec
|
||||
AST *ast.Package
|
||||
Extern json.RawMessage
|
||||
AST json.RawMessage
|
||||
Query string
|
||||
Type string
|
||||
Dialect QueryDialect
|
||||
|
@ -95,19 +94,6 @@ func TestQueryRequest_Validate(t *testing.T) {
|
|||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "query cannot have both extern and spec",
|
||||
fields: fields{
|
||||
Extern: &ast.File{},
|
||||
Spec: &flux.Spec{},
|
||||
Type: "flux",
|
||||
Dialect: QueryDialect{
|
||||
Delimiter: ",",
|
||||
DateTimeFormat: "RFC3339",
|
||||
},
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "requires flux type",
|
||||
fields: fields{
|
||||
|
@ -189,7 +175,6 @@ func TestQueryRequest_Validate(t *testing.T) {
|
|||
t.Run(tt.name, func(t *testing.T) {
|
||||
r := QueryRequest{
|
||||
Extern: tt.fields.Extern,
|
||||
Spec: tt.fields.Spec,
|
||||
AST: tt.fields.AST,
|
||||
Query: tt.fields.Query,
|
||||
Type: tt.fields.Type,
|
||||
|
@ -205,9 +190,9 @@ func TestQueryRequest_Validate(t *testing.T) {
|
|||
|
||||
func TestQueryRequest_proxyRequest(t *testing.T) {
|
||||
type fields struct {
|
||||
Extern *ast.File
|
||||
Extern json.RawMessage
|
||||
Spec *flux.Spec
|
||||
AST *ast.Package
|
||||
AST json.RawMessage
|
||||
Query string
|
||||
Type string
|
||||
Dialect QueryDialect
|
||||
|
@ -258,7 +243,7 @@ func TestQueryRequest_proxyRequest(t *testing.T) {
|
|||
{
|
||||
name: "valid AST",
|
||||
fields: fields{
|
||||
AST: &ast.Package{},
|
||||
AST: mustMarshal(&ast.Package{}),
|
||||
Type: "flux",
|
||||
Dialect: QueryDialect{
|
||||
Delimiter: ",",
|
||||
|
@ -271,7 +256,7 @@ func TestQueryRequest_proxyRequest(t *testing.T) {
|
|||
want: &query.ProxyRequest{
|
||||
Request: query.Request{
|
||||
Compiler: lang.ASTCompiler{
|
||||
AST: &ast.Package{},
|
||||
AST: mustMarshal(&ast.Package{}),
|
||||
Now: time.Unix(1, 1),
|
||||
},
|
||||
},
|
||||
|
@ -286,7 +271,7 @@ func TestQueryRequest_proxyRequest(t *testing.T) {
|
|||
{
|
||||
name: "valid AST with calculated now",
|
||||
fields: fields{
|
||||
AST: &ast.Package{},
|
||||
AST: mustMarshal(&ast.Package{}),
|
||||
Type: "flux",
|
||||
Dialect: QueryDialect{
|
||||
Delimiter: ",",
|
||||
|
@ -298,7 +283,7 @@ func TestQueryRequest_proxyRequest(t *testing.T) {
|
|||
want: &query.ProxyRequest{
|
||||
Request: query.Request{
|
||||
Compiler: lang.ASTCompiler{
|
||||
AST: &ast.Package{},
|
||||
AST: mustMarshal(&ast.Package{}),
|
||||
Now: time.Unix(2, 2),
|
||||
},
|
||||
},
|
||||
|
@ -313,7 +298,7 @@ func TestQueryRequest_proxyRequest(t *testing.T) {
|
|||
{
|
||||
name: "valid AST with extern",
|
||||
fields: fields{
|
||||
Extern: &ast.File{
|
||||
Extern: mustMarshal(&ast.File{
|
||||
Body: []ast.Statement{
|
||||
&ast.OptionStatement{
|
||||
Assignment: &ast.VariableAssignment{
|
||||
|
@ -322,8 +307,8 @@ func TestQueryRequest_proxyRequest(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
AST: &ast.Package{},
|
||||
}),
|
||||
AST: mustMarshal(&ast.Package{}),
|
||||
Type: "flux",
|
||||
Dialect: QueryDialect{
|
||||
Delimiter: ",",
|
||||
|
@ -335,20 +320,17 @@ func TestQueryRequest_proxyRequest(t *testing.T) {
|
|||
want: &query.ProxyRequest{
|
||||
Request: query.Request{
|
||||
Compiler: lang.ASTCompiler{
|
||||
AST: &ast.Package{
|
||||
Files: []*ast.File{
|
||||
{
|
||||
Body: []ast.Statement{
|
||||
&ast.OptionStatement{
|
||||
Assignment: &ast.VariableAssignment{
|
||||
ID: &ast.Identifier{Name: "x"},
|
||||
Init: &ast.IntegerLiteral{Value: 0},
|
||||
},
|
||||
},
|
||||
Extern: mustMarshal(&ast.File{
|
||||
Body: []ast.Statement{
|
||||
&ast.OptionStatement{
|
||||
Assignment: &ast.VariableAssignment{
|
||||
ID: &ast.Identifier{Name: "x"},
|
||||
Init: &ast.IntegerLiteral{Value: 0},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}),
|
||||
AST: mustMarshal(&ast.Package{}),
|
||||
Now: time.Unix(1, 1),
|
||||
},
|
||||
},
|
||||
|
@ -365,7 +347,6 @@ func TestQueryRequest_proxyRequest(t *testing.T) {
|
|||
t.Run(tt.name, func(t *testing.T) {
|
||||
r := QueryRequest{
|
||||
Extern: tt.fields.Extern,
|
||||
Spec: tt.fields.Spec,
|
||||
AST: tt.fields.AST,
|
||||
Query: tt.fields.Query,
|
||||
Type: tt.fields.Type,
|
||||
|
@ -385,6 +366,14 @@ func TestQueryRequest_proxyRequest(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func mustMarshal(p ast.Node) []byte {
|
||||
bs, err := json.Marshal(p)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return bs
|
||||
}
|
||||
|
||||
func Test_decodeQueryRequest(t *testing.T) {
|
||||
type args struct {
|
||||
ctx context.Context
|
||||
|
@ -481,6 +470,25 @@ func Test_decodeQueryRequest(t *testing.T) {
|
|||
}
|
||||
|
||||
func Test_decodeProxyQueryRequest(t *testing.T) {
|
||||
externJSON := `{
|
||||
"type": "File",
|
||||
"body": [
|
||||
{
|
||||
"type": "OptionStatement",
|
||||
"assignment": {
|
||||
"type": "VariableAssignment",
|
||||
"id": {
|
||||
"type": "Identifier",
|
||||
"name": "x"
|
||||
},
|
||||
"init": {
|
||||
"type": "IntegerLiteral",
|
||||
"value": "0"
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}`
|
||||
type args struct {
|
||||
ctx context.Context
|
||||
r *http.Request
|
||||
|
@ -525,25 +533,7 @@ func Test_decodeProxyQueryRequest(t *testing.T) {
|
|||
args: args{
|
||||
r: httptest.NewRequest("POST", "/", bytes.NewBufferString(`
|
||||
{
|
||||
"extern": {
|
||||
"type": "File",
|
||||
"body": [
|
||||
{
|
||||
"type": "OptionStatement",
|
||||
"assignment": {
|
||||
"type": "VariableAssignment",
|
||||
"id": {
|
||||
"type": "Identifier",
|
||||
"name": "x"
|
||||
},
|
||||
"init": {
|
||||
"type": "IntegerLiteral",
|
||||
"value": "0"
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
"extern": `+externJSON+`,
|
||||
"query": "from(bucket: \"mybucket\")"
|
||||
}
|
||||
`)),
|
||||
|
@ -559,17 +549,8 @@ func Test_decodeProxyQueryRequest(t *testing.T) {
|
|||
Request: query.Request{
|
||||
OrganizationID: func() platform.ID { s, _ := platform.IDFromString("deadbeefdeadbeef"); return *s }(),
|
||||
Compiler: lang.FluxCompiler{
|
||||
Extern: &ast.File{
|
||||
Body: []ast.Statement{
|
||||
&ast.OptionStatement{
|
||||
Assignment: &ast.VariableAssignment{
|
||||
ID: &ast.Identifier{Name: "x"},
|
||||
Init: &ast.IntegerLiteral{Value: 0},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Query: `from(bucket: "mybucket")`,
|
||||
Extern: []byte(externJSON),
|
||||
Query: `from(bucket: "mybucket")`,
|
||||
},
|
||||
},
|
||||
Dialect: &csv.Dialect{
|
||||
|
@ -629,3 +610,59 @@ func Test_decodeProxyQueryRequest(t *testing.T) {
|
|||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestProxyRequestToQueryRequest_Compilers(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
pr query.ProxyRequest
|
||||
want QueryRequest
|
||||
}{
|
||||
{
|
||||
name: "flux compiler copied",
|
||||
pr: query.ProxyRequest{
|
||||
Dialect: &query.NoContentDialect{},
|
||||
Request: query.Request{
|
||||
Compiler: lang.FluxCompiler{
|
||||
Query: `howdy`,
|
||||
Now: time.Unix(45, 45),
|
||||
},
|
||||
},
|
||||
},
|
||||
want: QueryRequest{
|
||||
Type: "flux",
|
||||
Query: `howdy`,
|
||||
PreferNoContent: true,
|
||||
Now: time.Unix(45, 45),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "AST compiler copied",
|
||||
pr: query.ProxyRequest{
|
||||
Dialect: &query.NoContentDialect{},
|
||||
Request: query.Request{
|
||||
Compiler: lang.ASTCompiler{
|
||||
Now: time.Unix(45, 45),
|
||||
AST: mustMarshal(&ast.Package{}),
|
||||
},
|
||||
},
|
||||
},
|
||||
want: QueryRequest{
|
||||
Type: "flux",
|
||||
PreferNoContent: true,
|
||||
AST: mustMarshal(&ast.Package{}),
|
||||
Now: time.Unix(45, 45),
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
|
||||
got, err := QueryRequestFromProxyRequest(&tt.pr)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
} else if !reflect.DeepEqual(*got, tt.want) {
|
||||
t.Errorf("QueryRequestFromProxyRequest = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -7024,15 +7024,15 @@ components:
|
|||
maxLength: 1
|
||||
minLength: 1
|
||||
annotations:
|
||||
description: Https://www.w3.org/TR/2015/REC-tabular-data-model-20151217/#columns
|
||||
description: https://www.w3.org/TR/2015/REC-tabular-data-model-20151217/#columns
|
||||
type: array
|
||||
uniqueItems: true
|
||||
items:
|
||||
type: string
|
||||
enum:
|
||||
- "group"
|
||||
- "datatype"
|
||||
- "default"
|
||||
uniqueItems: true
|
||||
commentPrefix:
|
||||
description: Character prefixed to comment strings
|
||||
type: string
|
||||
|
@ -7128,7 +7128,7 @@ components:
|
|||
description: ID of org that authorization is scoped to.
|
||||
permissions:
|
||||
type: array
|
||||
minLength: 1
|
||||
minItems: 1
|
||||
description: List of permissions for an auth. An auth must have at least one Permission.
|
||||
items:
|
||||
$ref: "#/components/schemas/Permission"
|
||||
|
@ -8990,6 +8990,11 @@ components:
|
|||
enum: [overlaid, stacked]
|
||||
geom:
|
||||
$ref: "#/components/schemas/XYGeom"
|
||||
legendOpacity:
|
||||
type: number
|
||||
format: float
|
||||
legendOrientationThreshold:
|
||||
type: integer
|
||||
XYGeom:
|
||||
type: string
|
||||
enum: [line, step, stacked, bar, monotoneX]
|
||||
|
@ -9047,6 +9052,11 @@ components:
|
|||
enum: [auto, x, y, xy]
|
||||
geom:
|
||||
$ref: "#/components/schemas/XYGeom"
|
||||
legendOpacity:
|
||||
type: number
|
||||
format: float
|
||||
legendOrientationThreshold:
|
||||
type: integer
|
||||
LinePlusSingleStatProperties:
|
||||
type: object
|
||||
required:
|
||||
|
@ -9107,6 +9117,11 @@ components:
|
|||
type: string
|
||||
decimalPlaces:
|
||||
$ref: "#/components/schemas/DecimalPlaces"
|
||||
legendOpacity:
|
||||
type: number
|
||||
format: float
|
||||
legendOrientationThreshold:
|
||||
type: integer
|
||||
MosaicViewProperties:
|
||||
type: object
|
||||
required:
|
||||
|
@ -9182,6 +9197,11 @@ components:
|
|||
type: string
|
||||
ySuffix:
|
||||
type: string
|
||||
legendOpacity:
|
||||
type: number
|
||||
format: float
|
||||
legendOrientationThreshold:
|
||||
type: integer
|
||||
ScatterViewProperties:
|
||||
type: object
|
||||
required:
|
||||
|
@ -9260,6 +9280,11 @@ components:
|
|||
type: string
|
||||
ySuffix:
|
||||
type: string
|
||||
legendOpacity:
|
||||
type: number
|
||||
format: float
|
||||
legendOrientationThreshold:
|
||||
type: integer
|
||||
HeatmapViewProperties:
|
||||
type: object
|
||||
required:
|
||||
|
@ -9331,6 +9356,11 @@ components:
|
|||
type: string
|
||||
binSize:
|
||||
type: number
|
||||
legendOpacity:
|
||||
type: number
|
||||
format: float
|
||||
legendOrientationThreshold:
|
||||
type: integer
|
||||
SingleStatViewProperties:
|
||||
type: object
|
||||
required:
|
||||
|
@ -9379,6 +9409,11 @@ components:
|
|||
$ref: "#/components/schemas/Legend"
|
||||
decimalPlaces:
|
||||
$ref: "#/components/schemas/DecimalPlaces"
|
||||
legendOpacity:
|
||||
type: number
|
||||
format: float
|
||||
legendOrientationThreshold:
|
||||
type: integer
|
||||
HistogramViewProperties:
|
||||
type: object
|
||||
required:
|
||||
|
@ -9433,6 +9468,11 @@ components:
|
|||
enum: [overlaid, stacked]
|
||||
binCount:
|
||||
type: integer
|
||||
legendOpacity:
|
||||
type: number
|
||||
format: float
|
||||
legendOrientationThreshold:
|
||||
type: integer
|
||||
GaugeViewProperties:
|
||||
type: object
|
||||
required:
|
||||
|
@ -9595,6 +9635,11 @@ components:
|
|||
type: array
|
||||
items:
|
||||
$ref: "#/components/schemas/DashboardColor"
|
||||
legendOpacity:
|
||||
type: number
|
||||
format: float
|
||||
legendOrientationThreshold:
|
||||
type: integer
|
||||
Axes:
|
||||
description: The viewport for a View's visualizations
|
||||
type: object
|
||||
|
@ -10888,7 +10933,7 @@ components:
|
|||
example: { "color": "ffb3b3", "description": "this is a description" }
|
||||
LabelCreateRequest:
|
||||
type: object
|
||||
required: [orgID]
|
||||
required: [orgID, name]
|
||||
properties:
|
||||
orgID:
|
||||
type: string
|
||||
|
|
|
@ -311,15 +311,7 @@ LOOP:
|
|||
e.Metrics.Requests.WithLabelValues(statusLabel).Inc()
|
||||
|
||||
// Check if the query was interrupted during an uninterruptible statement.
|
||||
interrupted := false
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
interrupted = true
|
||||
default:
|
||||
// Query has not been interrupted.
|
||||
}
|
||||
|
||||
if interrupted {
|
||||
if err := ctx.Err(); err != nil {
|
||||
statusLabel = control.LabelInterruptedErr
|
||||
e.Metrics.Requests.WithLabelValues(statusLabel).Inc()
|
||||
break
|
||||
|
|
|
@ -142,6 +142,20 @@ func MemoryOptimizedSchemaMutation() BoolFlag {
|
|||
return memoryOptimizedSchemaMutation
|
||||
}
|
||||
|
||||
var queryTracing = MakeBoolFlag(
|
||||
"Query Tracing",
|
||||
"queryTracing",
|
||||
"Query Team",
|
||||
false,
|
||||
Permanent,
|
||||
false,
|
||||
)
|
||||
|
||||
// QueryTracing - Turn on query tracing for queries that are sampled
|
||||
func QueryTracing() BoolFlag {
|
||||
return queryTracing
|
||||
}
|
||||
|
||||
var simpleTaskOptionsExtraction = MakeBoolFlag(
|
||||
"Simple Task Options Extraction",
|
||||
"simpleTaskOptionsExtraction",
|
||||
|
@ -240,6 +254,20 @@ func OrgOnlyMemberList() BoolFlag {
|
|||
return orgOnlyMemberList
|
||||
}
|
||||
|
||||
var injectLatestSuccessTime = MakeBoolFlag(
|
||||
"Inject Latest Success Time",
|
||||
"injectLatestSuccessTime",
|
||||
"Compute Team",
|
||||
false,
|
||||
Temporary,
|
||||
false,
|
||||
)
|
||||
|
||||
// InjectLatestSuccessTime - Inject the latest successful task run timestamp into a Task query extern when executing.
|
||||
func InjectLatestSuccessTime() BoolFlag {
|
||||
return injectLatestSuccessTime
|
||||
}
|
||||
|
||||
var enforceOrgDashboardLimits = MakeBoolFlag(
|
||||
"Enforce Organization Dashboard Limits",
|
||||
"enforceOrgDashboardLimits",
|
||||
|
@ -265,6 +293,7 @@ var all = []Flag{
|
|||
newLabels,
|
||||
memoryOptimizedFill,
|
||||
memoryOptimizedSchemaMutation,
|
||||
queryTracing,
|
||||
simpleTaskOptionsExtraction,
|
||||
mergeFiltersRule,
|
||||
bandPlotType,
|
||||
|
@ -272,6 +301,7 @@ var all = []Flag{
|
|||
notebooks,
|
||||
pushDownGroupAggregateMinMax,
|
||||
orgOnlyMemberList,
|
||||
injectLatestSuccessTime,
|
||||
enforceOrgDashboardLimits,
|
||||
}
|
||||
|
||||
|
@ -286,6 +316,7 @@ var byKey = map[string]Flag{
|
|||
"newLabels": newLabels,
|
||||
"memoryOptimizedFill": memoryOptimizedFill,
|
||||
"memoryOptimizedSchemaMutation": memoryOptimizedSchemaMutation,
|
||||
"queryTracing": queryTracing,
|
||||
"simpleTaskOptionsExtraction": simpleTaskOptionsExtraction,
|
||||
"mergeFiltersRule": mergeFiltersRule,
|
||||
"bandPlotType": bandPlotType,
|
||||
|
@ -293,5 +324,6 @@ var byKey = map[string]Flag{
|
|||
"notebooks": notebooks,
|
||||
"pushDownGroupAggregateMinMax": pushDownGroupAggregateMinMax,
|
||||
"orgOnlyMemberList": orgOnlyMemberList,
|
||||
"injectLatestSuccessTime": injectLatestSuccessTime,
|
||||
"enforceOrgDashboardLimits": enforceOrgDashboardLimits,
|
||||
}
|
||||
|
|
65
kv/bucket.go
65
kv/bucket.go
|
@ -179,30 +179,9 @@ func (s *Service) findBucketByName(ctx context.Context, tx Tx, orgID influxdb.ID
|
|||
|
||||
buf, err := idx.Get(key)
|
||||
if IsNotFound(err) {
|
||||
switch n {
|
||||
case influxdb.TasksSystemBucketName:
|
||||
return &influxdb.Bucket{
|
||||
ID: influxdb.TasksSystemBucketID,
|
||||
Type: influxdb.BucketTypeSystem,
|
||||
Name: influxdb.TasksSystemBucketName,
|
||||
RetentionPeriod: influxdb.TasksSystemBucketRetention,
|
||||
Description: "System bucket for task logs",
|
||||
OrgID: orgID,
|
||||
}, nil
|
||||
case influxdb.MonitoringSystemBucketName:
|
||||
return &influxdb.Bucket{
|
||||
ID: influxdb.MonitoringSystemBucketID,
|
||||
Type: influxdb.BucketTypeSystem,
|
||||
Name: influxdb.MonitoringSystemBucketName,
|
||||
RetentionPeriod: influxdb.MonitoringSystemBucketRetention,
|
||||
Description: "System bucket for monitoring logs",
|
||||
OrgID: orgID,
|
||||
}, nil
|
||||
default:
|
||||
return nil, &influxdb.Error{
|
||||
Code: influxdb.ENotFound,
|
||||
Msg: fmt.Sprintf("bucket %q not found", n),
|
||||
}
|
||||
return nil, &influxdb.Error{
|
||||
Code: influxdb.ENotFound,
|
||||
Msg: fmt.Sprintf("bucket %q not found", n),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -343,6 +322,10 @@ func (s *Service) FindBuckets(ctx context.Context, filter influxdb.BucketFilter,
|
|||
return nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
// Don't append system buckets if Name is set. Users who don't have real
|
||||
// system buckets won't get mocked buckets if they query for a bucket by name
|
||||
// without the orgID, but this is a vanishing small number of users and has
|
||||
|
@ -351,40 +334,6 @@ func (s *Service) FindBuckets(ctx context.Context, filter influxdb.BucketFilter,
|
|||
return bs, len(bs), nil
|
||||
}
|
||||
|
||||
needsSystemBuckets := true
|
||||
for _, b := range bs {
|
||||
if b.Type == influxdb.BucketTypeSystem {
|
||||
needsSystemBuckets = false
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if needsSystemBuckets {
|
||||
tb := &influxdb.Bucket{
|
||||
ID: influxdb.TasksSystemBucketID,
|
||||
Type: influxdb.BucketTypeSystem,
|
||||
Name: influxdb.TasksSystemBucketName,
|
||||
RetentionPeriod: influxdb.TasksSystemBucketRetention,
|
||||
Description: "System bucket for task logs",
|
||||
}
|
||||
|
||||
bs = append(bs, tb)
|
||||
|
||||
mb := &influxdb.Bucket{
|
||||
ID: influxdb.MonitoringSystemBucketID,
|
||||
Type: influxdb.BucketTypeSystem,
|
||||
Name: influxdb.MonitoringSystemBucketName,
|
||||
RetentionPeriod: influxdb.MonitoringSystemBucketRetention,
|
||||
Description: "System bucket for monitoring logs",
|
||||
}
|
||||
|
||||
bs = append(bs, mb)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
return bs, len(bs), nil
|
||||
}
|
||||
|
||||
|
|
|
@ -249,10 +249,8 @@ func WalkCursor(ctx context.Context, cursor ForwardCursor, visit VisitFunc) (err
|
|||
return err
|
||||
}
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
default:
|
||||
if err := ctx.Err(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -40,7 +40,7 @@ func NewBucketService() *BucketService {
|
|||
FindBucketByIDFn: func(context.Context, platform.ID) (*platform.Bucket, error) { return nil, nil },
|
||||
FindBucketByNameFn: func(context.Context, platform.ID, string) (*platform.Bucket, error) {
|
||||
return &platform.Bucket{
|
||||
ID: platform.TasksSystemBucketID,
|
||||
ID: platform.ID(10),
|
||||
Type: platform.BucketTypeSystem,
|
||||
Name: "_tasks",
|
||||
RetentionPeriod: time.Hour * 24 * 3,
|
||||
|
|
|
@ -2,12 +2,15 @@ package endpoint_test
|
|||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/influxdata/influxdb/v2"
|
||||
"github.com/influxdata/influxdb/v2/kit/errors"
|
||||
"github.com/influxdata/influxdb/v2/mock"
|
||||
"github.com/influxdata/influxdb/v2/notification/endpoint"
|
||||
influxTesting "github.com/influxdata/influxdb/v2/testing"
|
||||
|
@ -28,9 +31,10 @@ var goodBase = endpoint.Base{
|
|||
|
||||
func TestValidEndpoint(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
src influxdb.NotificationEndpoint
|
||||
err error
|
||||
name string
|
||||
src influxdb.NotificationEndpoint
|
||||
err error
|
||||
errFn func(*testing.T) error
|
||||
}{
|
||||
{
|
||||
name: "invalid endpoint id",
|
||||
|
@ -102,9 +106,16 @@ func TestValidEndpoint(t *testing.T) {
|
|||
Base: goodBase,
|
||||
URL: "posts://er:{DEf1=ghi@:5432/db?ssl",
|
||||
},
|
||||
err: &influxdb.Error{
|
||||
Code: influxdb.EInvalid,
|
||||
Msg: "slack endpoint URL is invalid: parse posts://er:{DEf1=ghi@:5432/db?ssl: net/url: invalid userinfo",
|
||||
errFn: func(t *testing.T) error {
|
||||
err := url.Error{
|
||||
Op: "parse",
|
||||
URL: "posts://er:{DEf1=ghi@:5432/db?ssl",
|
||||
Err: errors.New("net/url: invalid userinfo"),
|
||||
}
|
||||
return &influxdb.Error{
|
||||
Code: influxdb.EInvalid,
|
||||
Msg: fmt.Sprintf("slack endpoint URL is invalid: %s", err.Error()),
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
|
@ -186,7 +197,13 @@ func TestValidEndpoint(t *testing.T) {
|
|||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
got := c.src.Valid()
|
||||
influxTesting.ErrorsEqual(t, got, c.err)
|
||||
var exp error
|
||||
if c.errFn != nil {
|
||||
exp = c.errFn(t)
|
||||
} else {
|
||||
exp = c.err
|
||||
}
|
||||
influxTesting.ErrorsEqual(t, got, exp)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -139,6 +139,11 @@ Existing [data types](https://v2.docs.influxdata.com/v2.0/reference/syntax/annot
|
|||
- `#constant` annotation adds a constant column to the data, so you can set measurement, time, field or tag of every row you import
|
||||
- the format of a constant annotation row is `#constant,datatype,name,value`', it contains supported datatype, a column name, and a constant value
|
||||
- _column name_ can be omitted for _dateTime_ or _measurement_ columns, so the annotation can be simply `#constant,measurement,cpu`
|
||||
- `#concat` annotation adds a new column that is concatenated from existing columns according to a template
|
||||
- the format of a concat annotation row is `#concat,datatype,name,template`', it contains supported datatype, a column name, and a template value
|
||||
- the `template` is a string with `${columnName}` placeholders, in which the placeholders are replaced by values of existing columns
|
||||
- for example: `#concat,string,fullName,${firstName} ${lastName}`
|
||||
- _column name_ can be omitted for _dateTime_ or _measurement_ columns
|
||||
- `#timezone` annotation specifies the time zone of the data using an offset, which is either `+hhmm` or `-hhmm` or `Local` to use the local/computer time zone. Examples: _#timezone,+0100_ _#timezone -0500_ _#timezone Local_
|
||||
|
||||
#### Data type with data format
|
||||
|
@ -158,6 +163,9 @@ All data types can include the format that is used to parse column data. It is t
|
|||
- note that you have to quote column delimiters whenever they appear in a CSV column value, for example:
|
||||
- `#constant,"double:,.",myColumn,"1.234,011"`
|
||||
- `long:format` and `unsignedLong:format` support the same format as `double`, but everything after and including a fraction character is ignored
|
||||
- the format can be prepended with `strict` to fail when a fraction digit is present, for example:
|
||||
- `1000.000` is `1000` when parsed as `long`, but fails when parsed as `long:strict`
|
||||
- `1_000,000` is `1000` when parsed as `long:,_`, but fails when parsed as `long:strict,_`
|
||||
- `boolean:truthy:falsy`
|
||||
- `truthy` and `falsy` are comma-separated lists of values, they can be empty to assume all values as truthy/falsy; for example `boolean:sí,yes,ja,oui,ano,да:no,nein,non,ne,нет`
|
||||
- a `boolean` data type (without the format) parses column values that start with any of _tTyY1_ as `true` values, _fFnN0_ as `false` values and fails on other values
|
||||
|
|
|
@ -17,7 +17,21 @@ type CsvLineError struct {
|
|||
}
|
||||
|
||||
func (e CsvLineError) Error() string {
|
||||
return fmt.Sprintf("line %d: %v", e.Line, e.Err)
|
||||
if e.Line > 0 {
|
||||
return fmt.Sprintf("line %d: %v", e.Line, e.Err)
|
||||
}
|
||||
return fmt.Sprintf("%v", e.Err)
|
||||
}
|
||||
|
||||
// CreateRowColumnError wraps an existing error to add line and column coordinates
|
||||
func CreateRowColumnError(line int, columnLabel string, err error) CsvLineError {
|
||||
return CsvLineError{
|
||||
Line: line,
|
||||
Err: CsvColumnError{
|
||||
Column: columnLabel,
|
||||
Err: err,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// CsvToLineReader represents state of transformation from csv data to lien protocol reader
|
||||
|
@ -34,6 +48,8 @@ type CsvToLineReader struct {
|
|||
dataRowAdded bool
|
||||
// log CSV data errors to sterr and continue with CSV processing
|
||||
skipRowOnError bool
|
||||
// RowSkipped is called when a row is skipped because of data parsing error
|
||||
RowSkipped func(source *CsvToLineReader, lineError error, row []string)
|
||||
|
||||
// reader results
|
||||
buffer []byte
|
||||
|
@ -54,6 +70,11 @@ func (state *CsvToLineReader) SkipRowOnError(val bool) *CsvToLineReader {
|
|||
return state
|
||||
}
|
||||
|
||||
// Comma returns a field delimiter used in an input CSV file
|
||||
func (state *CsvToLineReader) Comma() rune {
|
||||
return state.csv.Comma
|
||||
}
|
||||
|
||||
// Read implements io.Reader that returns protocol lines
|
||||
func (state *CsvToLineReader) Read(p []byte) (n int, err error) {
|
||||
// state1: finished
|
||||
|
@ -98,13 +119,17 @@ func (state *CsvToLineReader) Read(p []byte) (n int, err error) {
|
|||
if state.Table.AddRow(row) {
|
||||
var err error
|
||||
state.lineBuffer = state.lineBuffer[:0] // reuse line buffer
|
||||
state.lineBuffer, err = state.Table.AppendLine(state.lineBuffer, row)
|
||||
state.lineBuffer, err = state.Table.AppendLine(state.lineBuffer, row, state.LineNumber)
|
||||
if !state.dataRowAdded && state.logTableDataColumns {
|
||||
log.Println(state.Table.DataColumnsInfo())
|
||||
}
|
||||
state.dataRowAdded = true
|
||||
if err != nil {
|
||||
lineError := CsvLineError{state.LineNumber, err}
|
||||
if state.RowSkipped != nil {
|
||||
state.RowSkipped(state, lineError, row)
|
||||
continue
|
||||
}
|
||||
if state.skipRowOnError {
|
||||
log.Println(lineError)
|
||||
continue
|
||||
|
|
|
@ -14,8 +14,8 @@ import (
|
|||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// Test_CsvToLineProtocol tests conversion of annotated CSV data to line protocol data
|
||||
func Test_CsvToLineProtocol(t *testing.T) {
|
||||
// Test_CsvToLineProtocol_variousBufferSize tests conversion of annotated CSV data to line protocol data on various buffer sizes
|
||||
func Test_CsvToLineProtocol_variousBufferSize(t *testing.T) {
|
||||
var tests = []struct {
|
||||
name string
|
||||
csv string
|
||||
|
@ -117,6 +117,68 @@ func Test_CsvToLineProtocol(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
// Test_CsvToLineProtocol_samples tests conversion of annotated CSV data to line protocol data
|
||||
func Test_CsvToLineProtocol_samples(t *testing.T) {
|
||||
var tests = []struct {
|
||||
name string
|
||||
csv string
|
||||
lines string
|
||||
err string
|
||||
}{
|
||||
{
|
||||
"queryResult_19452", // https://github.com/influxdata/influxdb/issues/19452
|
||||
"#datatype,string,long,dateTime:RFC3339,dateTime:RFC3339,dateTime:RFC3339,long,string,string,string\n" +
|
||||
"#group,false,false,true,true,false,false,true,true,true\n" +
|
||||
"#default,_result,,,,,,,,\n" +
|
||||
",result,table,_start,_stop,_time,_value,_field,_measurement,host\n" +
|
||||
",,0,2020-08-26T22:59:23.598653Z,2020-08-26T23:00:23.598653Z,2020-08-26T22:59:30Z,15075651584,active,mem,ip-192-168-86-25.ec2.internal\n",
|
||||
"mem,host=ip-192-168-86-25.ec2.internal active=15075651584i 1598482770000000000\n",
|
||||
"", // no error
|
||||
},
|
||||
{
|
||||
"queryResult_19452_group_first", // issue 19452, but with group annotation first
|
||||
"#group,false,false,true,true,false,false,true,true,true\n" +
|
||||
"#datatype,string,long,dateTime:RFC3339,dateTime:RFC3339,dateTime:RFC3339,long,string,string,string\n" +
|
||||
"#default,_result,,,,,,,,\n" +
|
||||
",result,table,_start,_stop,_time,_value,_field,_measurement,host\n" +
|
||||
",,0,2020-08-26T22:59:23.598653Z,2020-08-26T23:00:23.598653Z,2020-08-26T22:59:30Z,15075651584,active,mem,ip-192-168-86-25.ec2.internal\n",
|
||||
"mem,host=ip-192-168-86-25.ec2.internal active=15075651584i 1598482770000000000\n",
|
||||
"", // no error
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
reader := CsvToLineProtocol(strings.NewReader(test.csv))
|
||||
buffer := make([]byte, 100)
|
||||
lines := make([]byte, 0, 100)
|
||||
for {
|
||||
n, err := reader.Read(buffer)
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if test.err != "" {
|
||||
// fmt.Println(err)
|
||||
if err := err.Error(); !strings.Contains(err, test.err) {
|
||||
require.Equal(t, err, test.err)
|
||||
}
|
||||
return
|
||||
}
|
||||
require.Nil(t, err.Error())
|
||||
break
|
||||
}
|
||||
lines = append(lines, buffer[:n]...)
|
||||
}
|
||||
if test.err == "" {
|
||||
require.Equal(t, test.lines, string(lines))
|
||||
} else {
|
||||
require.Fail(t, "error message with '"+test.err+"' expected")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Test_CsvToLineProtocol_LogTableColumns checks correct logging of table columns
|
||||
func Test_CsvToLineProtocol_LogTableColumns(t *testing.T) {
|
||||
var buf bytes.Buffer
|
||||
|
@ -204,6 +266,68 @@ func Test_CsvToLineProtocol_SkipRowOnError(t *testing.T) {
|
|||
require.Equal(t, messages, 2)
|
||||
}
|
||||
|
||||
// Test_CsvToLineProtocol_RowSkipped tests that error rows are reported to configured RowSkipped listener
|
||||
func Test_CsvToLineProtocol_RowSkipped(t *testing.T) {
|
||||
var buf bytes.Buffer
|
||||
log.SetOutput(&buf)
|
||||
oldFlags := log.Flags()
|
||||
log.SetFlags(0)
|
||||
defer func() {
|
||||
log.SetOutput(os.Stderr)
|
||||
log.SetFlags(oldFlags)
|
||||
}()
|
||||
|
||||
type ActualArguments = struct {
|
||||
src *CsvToLineReader
|
||||
err error
|
||||
row []string
|
||||
}
|
||||
type ExpectedArguments = struct {
|
||||
errorString string
|
||||
row []string
|
||||
}
|
||||
|
||||
csv := "sep=;\n_measurement;a|long:strict\n;1\ncpu;2.1\ncpu;3a\n"
|
||||
calledArgs := []ActualArguments{}
|
||||
expectedArgs := []ExpectedArguments{
|
||||
{
|
||||
"line 3: column '_measurement': no measurement supplied",
|
||||
[]string{"", "1"},
|
||||
},
|
||||
{
|
||||
"line 4: column 'a': '2.1' cannot fit into long data type",
|
||||
[]string{"cpu", "2.1"},
|
||||
},
|
||||
{
|
||||
"line 5: column 'a': strconv.ParseInt:",
|
||||
[]string{"cpu", "3a"},
|
||||
},
|
||||
}
|
||||
|
||||
reader := CsvToLineProtocol(strings.NewReader(csv)).SkipRowOnError(true)
|
||||
reader.RowSkipped = func(src *CsvToLineReader, err error, _row []string) {
|
||||
// make a copy of _row
|
||||
row := make([]string, len(_row))
|
||||
copy(row, _row)
|
||||
// remember for comparison
|
||||
calledArgs = append(calledArgs, ActualArguments{
|
||||
src, err, row,
|
||||
})
|
||||
}
|
||||
// read all the data
|
||||
ioutil.ReadAll(reader)
|
||||
|
||||
out := buf.String()
|
||||
require.Empty(t, out, "No log messages expected because RowSkipped handler is set")
|
||||
|
||||
require.Len(t, calledArgs, 3)
|
||||
for i, expected := range expectedArgs {
|
||||
require.Equal(t, reader, calledArgs[i].src)
|
||||
require.Contains(t, calledArgs[i].err.Error(), expected.errorString)
|
||||
require.Equal(t, expected.row, calledArgs[i].row)
|
||||
}
|
||||
}
|
||||
|
||||
// Test_CsvLineError tests CsvLineError error format
|
||||
func Test_CsvLineError(t *testing.T) {
|
||||
var tests = []struct {
|
||||
|
@ -218,6 +342,10 @@ func Test_CsvLineError(t *testing.T) {
|
|||
CsvLineError{Line: 2, Err: CsvColumnError{"a", errors.New("cause")}},
|
||||
"line 2: column 'a': cause",
|
||||
},
|
||||
{
|
||||
CsvLineError{Line: -1, Err: CsvColumnError{"a", errors.New("cause")}},
|
||||
"column 'a': cause",
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
require.Equal(t, test.value, test.err.Error())
|
||||
|
|
|
@ -2,6 +2,7 @@ package csv2lp
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
@ -33,9 +34,8 @@ func (a annotationComment) matches(comment string) bool {
|
|||
return strings.HasPrefix(strings.ToLower(comment), a.prefix)
|
||||
}
|
||||
|
||||
// constantSetupTable setups the supplied CSV table from #constant annotation
|
||||
func constantSetupTable(table *CsvTable, row []string) error {
|
||||
// adds a virtual column with contsant value to all data rows
|
||||
func createConstantOrConcatColumn(table *CsvTable, row []string, annotationName string) CsvTableColumn {
|
||||
// adds a virtual column with constant value to all data rows
|
||||
// supported types of constant annotation rows are:
|
||||
// 1. "#constant,datatype,label,defaultValue"
|
||||
// 2. "#constant,measurement,value"
|
||||
|
@ -72,17 +72,61 @@ func constantSetupTable(table *CsvTable, row []string) error {
|
|||
if col.DefaultValue == "" && col.Label != "" {
|
||||
// type 2,3,5,6
|
||||
col.DefaultValue = col.Label
|
||||
col.Label = "#constant " + col.DataType
|
||||
col.Label = annotationName + " " + col.DataType
|
||||
} else if col.Label == "" {
|
||||
// setup a label if no label is supplied fo focused error messages
|
||||
col.Label = "#constant " + col.DataType
|
||||
// setup a label if no label is supplied for focused error messages
|
||||
col.Label = annotationName + " " + col.DataType
|
||||
}
|
||||
}
|
||||
// add a virtual column to the table
|
||||
return col
|
||||
}
|
||||
|
||||
// constantSetupTable setups the supplied CSV table from #constant annotation
|
||||
func constantSetupTable(table *CsvTable, row []string) error {
|
||||
col := createConstantOrConcatColumn(table, row, "#constant")
|
||||
// add a virtual column to the table
|
||||
table.extraColumns = append(table.extraColumns, &col)
|
||||
return nil
|
||||
}
|
||||
|
||||
// computedReplacer is used to replace value in computed columns
|
||||
var computedReplacer *regexp.Regexp = regexp.MustCompile(`\$\{[^}]+\}`)
|
||||
|
||||
// concatSetupTable setups the supplied CSV table from #concat annotation
|
||||
func concatSetupTable(table *CsvTable, row []string) error {
|
||||
col := createConstantOrConcatColumn(table, row, "#concat")
|
||||
template := col.DefaultValue
|
||||
col.ComputeValue = func(row []string) string {
|
||||
return computedReplacer.ReplaceAllStringFunc(template, func(text string) string {
|
||||
columnLabel := text[2 : len(text)-1] // ${columnLabel}
|
||||
if placeholderColumn := table.Column(columnLabel); placeholderColumn != nil {
|
||||
return placeholderColumn.Value(row)
|
||||
}
|
||||
log.Printf("WARNING: column %s: column '%s' cannot be replaced, no such column available", col.Label, columnLabel)
|
||||
return ""
|
||||
})
|
||||
}
|
||||
// add a virtual column to the table
|
||||
table.extraColumns = append(table.extraColumns, &col)
|
||||
// add validator to report error when no placeholder column is available
|
||||
table.validators = append(table.validators, func(table *CsvTable) error {
|
||||
placeholders := computedReplacer.FindAllString(template, len(template))
|
||||
for _, placeholder := range placeholders {
|
||||
columnLabel := placeholder[2 : len(placeholder)-1] // ${columnLabel}
|
||||
if placeholderColumn := table.Column(columnLabel); placeholderColumn == nil {
|
||||
return CsvColumnError{
|
||||
Column: col.Label,
|
||||
Err: fmt.Errorf("'%s' references an uknown column '%s', available columns are: %v",
|
||||
template, columnLabel, strings.Join(table.ColumnLabels(), ",")),
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
// supportedAnnotations contains all supported CSV annotations comments
|
||||
var supportedAnnotations = []annotationComment{
|
||||
{
|
||||
|
@ -91,7 +135,10 @@ var supportedAnnotations = []annotationComment{
|
|||
setupColumn: func(column *CsvTableColumn, value string) {
|
||||
// standard flux query result annotation
|
||||
if strings.HasSuffix(value, "true") {
|
||||
column.LinePart = linePartTag
|
||||
// setup column's line part unless it is already set (#19452)
|
||||
if column.LinePart == 0 {
|
||||
column.LinePart = linePartTag
|
||||
}
|
||||
}
|
||||
},
|
||||
},
|
||||
|
@ -131,6 +178,10 @@ var supportedAnnotations = []annotationComment{
|
|||
return nil
|
||||
},
|
||||
},
|
||||
{
|
||||
prefix: "#concat",
|
||||
setupTable: concatSetupTable,
|
||||
},
|
||||
}
|
||||
|
||||
// ignoreLeadingComment returns a value without '#anyComment ' prefix
|
||||
|
|
|
@ -1,7 +1,10 @@
|
|||
package csv2lp
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
|
@ -140,6 +143,93 @@ func Test_ConstantAnnotation(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
// Test_ConcatAnnotation tests #concat annotation
|
||||
func Test_ConcatAnnotation(t *testing.T) {
|
||||
subject := annotation("#concat")
|
||||
require.True(t, subject.matches("#Concat"))
|
||||
require.True(t, subject.isTableAnnotation())
|
||||
var tests = []struct {
|
||||
value []string
|
||||
expectLabel string
|
||||
expectValue string
|
||||
expectLinePart int
|
||||
}{
|
||||
// all possible specifications
|
||||
{[]string{"#concat "}, "", "", 0}, // means literally nothing
|
||||
{[]string{"#concat measurement", "a"}, "_", "a", linePartMeasurement},
|
||||
{[]string{"#concat measurement", "a", "b"}, "_", "b", linePartMeasurement},
|
||||
{[]string{"#concat measurement", "a", ""}, "_", "a", linePartMeasurement},
|
||||
{[]string{"#concat tag", "tgName", "tgValue"}, "tgName", "tgValue", linePartTag},
|
||||
{[]string{"#concat", "tag", "tgName", "tgValue"}, "tgName", "tgValue", linePartTag},
|
||||
{[]string{"#concat field", "fName", "fVal"}, "fName", "fVal", linePartField},
|
||||
{[]string{"#concat", "field", "fName", "fVal"}, "fName", "fVal", linePartField},
|
||||
{[]string{"dateTime", "1"}, "_", "1", linePartTime},
|
||||
{[]string{"dateTime", "1", "2"}, "_", "2", linePartTime},
|
||||
{[]string{"dateTime", "", "2"}, "_", "2", linePartTime},
|
||||
{[]string{"dateTime", "3", ""}, "_", "3", linePartTime},
|
||||
{[]string{"long", "fN", "fV"}, "fN", "fV", 0},
|
||||
// concat values
|
||||
{[]string{"string", "fN", "$-${b}-${a}"}, "fN", "$-2-1", 0},
|
||||
}
|
||||
exampleRow := []string{"1", "2"}
|
||||
for i, test := range tests {
|
||||
t.Run(strconv.Itoa(i), func(t *testing.T) {
|
||||
table := &CsvTable{columns: []*CsvTableColumn{
|
||||
{Label: "a", Index: 0},
|
||||
{Label: "b", Index: 1},
|
||||
}}
|
||||
subject.setupTable(table, test.value)
|
||||
// validator
|
||||
require.Equal(t, 1, len(table.validators))
|
||||
require.Equal(t, table.validators[0](table), nil)
|
||||
// columns
|
||||
require.Equal(t, 1, len(table.extraColumns))
|
||||
col := table.extraColumns[0]
|
||||
require.Equal(t, test.expectLinePart, col.LinePart)
|
||||
require.Greater(t, 0, col.Index)
|
||||
if test.expectLabel != "_" {
|
||||
require.Equal(t, test.expectLabel, col.Label)
|
||||
} else {
|
||||
require.NotEqual(t, "", col.Label)
|
||||
}
|
||||
require.Equal(t, test.expectValue, col.Value(exampleRow))
|
||||
})
|
||||
}
|
||||
t.Run("concat template references unknown column", func(t *testing.T) {
|
||||
var buf bytes.Buffer
|
||||
log.SetOutput(&buf)
|
||||
oldFlags := log.Flags()
|
||||
log.SetFlags(0)
|
||||
oldPrefix := log.Prefix()
|
||||
prefix := "::PREFIX::"
|
||||
log.SetPrefix(prefix)
|
||||
defer func() {
|
||||
log.SetOutput(os.Stderr)
|
||||
log.SetFlags(oldFlags)
|
||||
log.SetPrefix(oldPrefix)
|
||||
}()
|
||||
|
||||
table := &CsvTable{columns: []*CsvTableColumn{
|
||||
{Label: "x", Index: 0},
|
||||
}}
|
||||
subject.setupTable(table, []string{"string", "fN", "a${y}-${x}z"})
|
||||
require.Equal(t, 1, len(table.validators))
|
||||
require.NotNil(t, table.validators[0](table))
|
||||
require.Equal(t,
|
||||
"column 'fN': 'a${y}-${x}z' references an uknown column 'y', available columns are: x",
|
||||
table.validators[0](table).Error())
|
||||
// columns
|
||||
require.Equal(t, 1, len(table.extraColumns))
|
||||
col := table.extraColumns[0]
|
||||
require.Greater(t, 0, col.Index)
|
||||
require.Equal(t, "a-1z", col.Value(exampleRow))
|
||||
// a warning is printed to console
|
||||
require.Equal(t,
|
||||
"::PREFIX::WARNING: column fN: column 'y' cannot be replaced, no such column available",
|
||||
strings.TrimSpace(buf.String()))
|
||||
})
|
||||
}
|
||||
|
||||
// Test_TimeZoneAnnotation tests #timezone annotation
|
||||
func Test_TimeZoneAnnotation(t *testing.T) {
|
||||
subject := annotation("#timezone")
|
||||
|
|
|
@ -46,7 +46,9 @@ type CsvTableColumn struct {
|
|||
// TimeZone of dateTime column, applied when parsing dateTime DataType
|
||||
TimeZone *time.Location
|
||||
// ParseF is an optional function used to convert column's string value to interface{}
|
||||
ParseF func(string) (interface{}, error)
|
||||
ParseF func(value string) (interface{}, error)
|
||||
// ComputeValue is an optional function used to compute column value out of row data
|
||||
ComputeValue func(row []string) string
|
||||
|
||||
// escapedLabel contains escaped label that can be directly used in line protocol
|
||||
escapedLabel string
|
||||
|
@ -63,6 +65,9 @@ func (c *CsvTableColumn) LineLabel() string {
|
|||
// Value returns the value of the column for the supplied row
|
||||
func (c *CsvTableColumn) Value(row []string) string {
|
||||
if c.Index < 0 || c.Index >= len(row) {
|
||||
if c.ComputeValue != nil {
|
||||
return c.ComputeValue(row)
|
||||
}
|
||||
return c.DefaultValue
|
||||
}
|
||||
val := row[c.Index]
|
||||
|
@ -126,9 +131,18 @@ func (c *CsvTableColumn) setupDataType(columnValue string) {
|
|||
// setup column data type
|
||||
c.DataType = columnValue
|
||||
|
||||
// setup custom parsing of bool data type
|
||||
// setup custom parsing
|
||||
if c.DataType == boolDatatype && c.DataFormat != "" {
|
||||
c.ParseF = createBoolParseFn(c.DataFormat)
|
||||
return
|
||||
}
|
||||
if c.DataType == longDatatype && strings.HasPrefix(c.DataFormat, "strict") {
|
||||
c.ParseF = createStrictLongParseFn(c.DataFormat[6:])
|
||||
return
|
||||
}
|
||||
if c.DataType == uLongDatatype && strings.HasPrefix(c.DataFormat, "strict") {
|
||||
c.ParseF = createStrictUnsignedLongParseFn(c.DataFormat[6:])
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -163,6 +177,8 @@ type CsvTable struct {
|
|||
ignoreDataTypeInColumnName bool
|
||||
// timeZone of dateTime column(s), applied when parsing dateTime value without a time zone specified
|
||||
timeZone *time.Location
|
||||
// validators validate table structure right before processing data rows
|
||||
validators []func(*CsvTable) error
|
||||
|
||||
/* cached columns are initialized before reading the data rows using the computeLineProtocolColumns fn */
|
||||
// cachedMeasurement is a required column that read (line protocol) measurement
|
||||
|
@ -193,6 +209,7 @@ func (t *CsvTable) DataColumnsInfo() string {
|
|||
return "<nil>"
|
||||
}
|
||||
var builder = strings.Builder{}
|
||||
t.computeLineProtocolColumns() // censure that ached columns are initialized
|
||||
builder.WriteString(fmt.Sprintf("CsvTable{ dataColumns: %d constantColumns: %d\n", len(t.columns), len(t.extraColumns)))
|
||||
builder.WriteString(fmt.Sprintf(" measurement: %+v\n", t.cachedMeasurement))
|
||||
for _, col := range t.cachedTags {
|
||||
|
@ -232,7 +249,7 @@ func (t *CsvTable) AddRow(row []string) bool {
|
|||
// detect data row or table header row
|
||||
if len(row[0]) == 0 || row[0][0] != '#' {
|
||||
if !t.readTableData {
|
||||
// row must a header row now
|
||||
// expect a header row
|
||||
t.lpColumnsValid = false // line protocol columns change
|
||||
if t.partBits == 0 {
|
||||
// create columns since no column anotations were processed
|
||||
|
@ -328,6 +345,8 @@ func (t *CsvTable) recomputeLineProtocolColumns() {
|
|||
t.cachedFieldValue = nil
|
||||
t.cachedTags = nil
|
||||
t.cachedFields = nil
|
||||
// collect unique tag names (#19453)
|
||||
var tags = make(map[string]*CsvTableColumn)
|
||||
|
||||
// having a _field column indicates fields without a line type are ignored
|
||||
defaultIsField := t.Column(labelFieldName) == nil
|
||||
|
@ -353,8 +372,11 @@ func (t *CsvTable) recomputeLineProtocolColumns() {
|
|||
case col.Label == labelFieldValue:
|
||||
t.cachedFieldValue = col
|
||||
case col.LinePart == linePartTag:
|
||||
if val, found := tags[col.Label]; found {
|
||||
log.Printf("WARNING: ignoring duplicate tag '%s' at column index %d, using column at index %d\n", col.Label, val.Index, col.Index)
|
||||
}
|
||||
col.escapedLabel = escapeTag(col.Label)
|
||||
t.cachedTags = append(t.cachedTags, col)
|
||||
tags[col.Label] = col
|
||||
case col.LinePart == linePartField:
|
||||
col.escapedLabel = escapeTag(col.Label)
|
||||
t.cachedFields = append(t.cachedFields, col)
|
||||
|
@ -365,8 +387,12 @@ func (t *CsvTable) recomputeLineProtocolColumns() {
|
|||
}
|
||||
}
|
||||
}
|
||||
// line protocol requires sorted tags
|
||||
if t.cachedTags != nil && len(t.cachedTags) > 0 {
|
||||
// line protocol requires sorted unique tags
|
||||
if len(tags) > 0 {
|
||||
t.cachedTags = make([]*CsvTableColumn, 0, len(tags))
|
||||
for _, v := range tags {
|
||||
t.cachedTags = append(t.cachedTags, v)
|
||||
}
|
||||
sort.Slice(t.cachedTags, func(i, j int) bool {
|
||||
return t.cachedTags[i].Label < t.cachedTags[j].Label
|
||||
})
|
||||
|
@ -382,7 +408,7 @@ func (t *CsvTable) recomputeLineProtocolColumns() {
|
|||
// CreateLine produces a protocol line out of the supplied row or returns error
|
||||
func (t *CsvTable) CreateLine(row []string) (line string, err error) {
|
||||
buffer := make([]byte, 100)[:0]
|
||||
buffer, err = t.AppendLine(buffer, row)
|
||||
buffer, err = t.AppendLine(buffer, row, -1)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
@ -390,7 +416,7 @@ func (t *CsvTable) CreateLine(row []string) (line string, err error) {
|
|||
}
|
||||
|
||||
// AppendLine appends a protocol line to the supplied buffer using a CSV row and returns appended buffer or an error if any
|
||||
func (t *CsvTable) AppendLine(buffer []byte, row []string) ([]byte, error) {
|
||||
func (t *CsvTable) AppendLine(buffer []byte, row []string, lineNumber int) ([]byte, error) {
|
||||
if t.computeLineProtocolColumns() {
|
||||
// validate column data types
|
||||
if t.cachedFieldValue != nil && !IsTypeSupported(t.cachedFieldValue.DataType) {
|
||||
|
@ -407,6 +433,11 @@ func (t *CsvTable) AppendLine(buffer []byte, row []string) ([]byte, error) {
|
|||
}
|
||||
}
|
||||
}
|
||||
for _, v := range t.validators {
|
||||
if err := v(t); err != nil {
|
||||
return buffer, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if t.cachedMeasurement == nil {
|
||||
|
@ -438,7 +469,7 @@ func (t *CsvTable) AppendLine(buffer []byte, row []string) ([]byte, error) {
|
|||
buffer = append(buffer, escapeTag(field)...)
|
||||
buffer = append(buffer, '=')
|
||||
var err error
|
||||
buffer, err = appendConverted(buffer, value, t.cachedFieldValue)
|
||||
buffer, err = appendConverted(buffer, value, t.cachedFieldValue, lineNumber)
|
||||
if err != nil {
|
||||
return buffer, CsvColumnError{
|
||||
t.cachedFieldName.Label,
|
||||
|
@ -459,7 +490,7 @@ func (t *CsvTable) AppendLine(buffer []byte, row []string) ([]byte, error) {
|
|||
buffer = append(buffer, field.LineLabel()...)
|
||||
buffer = append(buffer, '=')
|
||||
var err error
|
||||
buffer, err = appendConverted(buffer, value, field)
|
||||
buffer, err = appendConverted(buffer, value, field, lineNumber)
|
||||
if err != nil {
|
||||
return buffer, CsvColumnError{
|
||||
field.Label,
|
||||
|
@ -482,7 +513,7 @@ func (t *CsvTable) AppendLine(buffer []byte, row []string) ([]byte, error) {
|
|||
}
|
||||
buffer = append(buffer, ' ')
|
||||
var err error
|
||||
buffer, err = appendConverted(buffer, timeVal, t.cachedTime)
|
||||
buffer, err = appendConverted(buffer, timeVal, t.cachedTime, lineNumber)
|
||||
if err != nil {
|
||||
return buffer, CsvColumnError{
|
||||
t.cachedTime.Label,
|
||||
|
@ -509,6 +540,15 @@ func (t *CsvTable) Columns() []*CsvTableColumn {
|
|||
return t.columns
|
||||
}
|
||||
|
||||
// ColumnLabels returns available columns labels
|
||||
func (t *CsvTable) ColumnLabels() []string {
|
||||
labels := make([]string, len(t.columns))
|
||||
for i, col := range t.columns {
|
||||
labels[i] = col.Label
|
||||
}
|
||||
return labels
|
||||
}
|
||||
|
||||
// Measurement returns measurement column or nil
|
||||
func (t *CsvTable) Measurement() *CsvTableColumn {
|
||||
t.computeLineProtocolColumns()
|
||||
|
|
|
@ -104,6 +104,7 @@ func Test_CsvTable_FluxQueryResult(t *testing.T) {
|
|||
require.Equal(t, table.Tags()[0].Label, "cpu")
|
||||
require.Equal(t, table.Tags()[1].Label, "host")
|
||||
require.Equal(t, len(table.Fields()), 0)
|
||||
require.Contains(t, table.ColumnLabels(), "_measurement")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -255,6 +256,15 @@ func Test_CsvTableProcessing(t *testing.T) {
|
|||
"#default cpu,yes,0,1\n#datatype ,tag,,\n_measurement,test,col1,_time\n,,,",
|
||||
"cpu,test=yes col1=0 1",
|
||||
},
|
||||
{
|
||||
"no duplicate tags", // duplicate tags are ignored, the last column wins, https://github.com/influxdata/influxdb/issues/19453
|
||||
"#datatype,string,long,dateTime:RFC3339,dateTime:RFC3339,dateTime:RFC3339,double,string,string,string,string,string,string,string,string,string,string\n" +
|
||||
"#group,true,true,false,false,false,false,true,true,true,true,true,true,true,true,true,true\n" +
|
||||
"#default,_result,,,,,,,,,,,,,,,\n" +
|
||||
",result,table,_start,_stop,_time,_value,_field,_measurement,env,host,hostname,nodename,org,result,table,url\n" +
|
||||
",,0,2020-08-26T23:10:54.023607624Z,2020-08-26T23:15:54.023607624Z,2020-08-26T23:11:00Z,0,0.001,something,host,pod,node,host,,success,role,http://127.0.0.1:8099/metrics\n",
|
||||
"something,env=host,host=pod,hostname=node,nodename=host,result=success,table=role,url=http://127.0.0.1:8099/metrics 0.001=0 1598483460000000000",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
|
@ -323,46 +333,108 @@ func Test_ConstantAnnotations(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
// Test_ConcatAnnotations tests processing of concat annotations
|
||||
func Test_ConcatAnnotations(t *testing.T) {
|
||||
var tests = []struct {
|
||||
name string
|
||||
csv string
|
||||
line string
|
||||
}{
|
||||
{
|
||||
"measurement_1",
|
||||
"#concat measurement,cpu\n" +
|
||||
"a,b\n" +
|
||||
"1,1",
|
||||
"cpu a=1,b=1",
|
||||
},
|
||||
{
|
||||
"measurement_2",
|
||||
"#concat,measurement,${a}${b}\n" +
|
||||
"#constant,tag,cpu,cpu1\n" +
|
||||
"#constant,long,of,0\n" +
|
||||
"#constant,dateTime,,2\n" +
|
||||
"a,b\n" +
|
||||
"1,1",
|
||||
"11,cpu=cpu1 a=1,b=1,of=0i 2",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
rows := readCsv(t, test.csv)
|
||||
table := CsvTable{}
|
||||
var lines []string
|
||||
for _, row := range rows {
|
||||
rowProcessed := table.AddRow(row)
|
||||
if rowProcessed {
|
||||
line, err := table.CreateLine(row)
|
||||
if err != nil && test.line != "" {
|
||||
require.Nil(t, err.Error())
|
||||
}
|
||||
lines = append(lines, line)
|
||||
}
|
||||
}
|
||||
require.Equal(t, []string{test.line}, lines)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Test_DataTypeInColumnName tests specification of column data type in the header row
|
||||
func Test_DataTypeInColumnName(t *testing.T) {
|
||||
var tests = []struct {
|
||||
csv string
|
||||
line string
|
||||
ignoreDataTypeInColumnName bool
|
||||
error string
|
||||
}{
|
||||
{
|
||||
"m|measurement,b|boolean:x:,c|boolean:x:|x\n" +
|
||||
csv: "m|measurement,b|boolean:x:,c|boolean:x:|x\n" +
|
||||
"cpu,,",
|
||||
`cpu c=true`,
|
||||
false,
|
||||
line: `cpu c=true`,
|
||||
},
|
||||
{
|
||||
"m|measurement,a|boolean,b|boolean:0:1,c|boolean:x:,d|boolean:x:\n" +
|
||||
csv: "m|measurement,a|boolean,b|boolean:0:1,c|boolean:x:,d|boolean:x:\n" +
|
||||
"cpu,1,1,x,y",
|
||||
`cpu a=true,b=false,c=true,d=false`,
|
||||
false,
|
||||
line: `cpu a=true,b=false,c=true,d=false`,
|
||||
},
|
||||
{
|
||||
"#constant measurement,cpu\n" +
|
||||
csv: "#constant measurement,cpu\n" +
|
||||
"a|long,b|string\n" +
|
||||
"1,1",
|
||||
`cpu a=1i,b="1"`,
|
||||
false,
|
||||
line: `cpu a=1i,b="1"`,
|
||||
},
|
||||
{
|
||||
"#constant measurement,cpu\n" +
|
||||
csv: "#constant measurement,cpu\n" +
|
||||
"a|long,b|string\n" +
|
||||
"1,1",
|
||||
`cpu a|long=1,b|string=1`,
|
||||
true,
|
||||
line: `cpu a|long=1,b|string=1`,
|
||||
ignoreDataTypeInColumnName: true,
|
||||
},
|
||||
{
|
||||
"#constant measurement,cpu\n" +
|
||||
csv: "#constant measurement,cpu\n" +
|
||||
"#datatype long,string\n" +
|
||||
"a|long,b|string\n" +
|
||||
"1,1",
|
||||
`cpu a|long=1i,b|string="1"`,
|
||||
true,
|
||||
line: `cpu a|long=1i,b|string="1"`,
|
||||
ignoreDataTypeInColumnName: true,
|
||||
},
|
||||
{
|
||||
csv: "#constant measurement,cpu\n" +
|
||||
"a|long:strict: ,b|unsignedLong:strict: \n" +
|
||||
"1 2,1 2",
|
||||
line: `cpu a=12i,b=12u`,
|
||||
},
|
||||
{
|
||||
csv: "#constant measurement,cpu\n" +
|
||||
"a|long:strict\n" +
|
||||
"1.1,1",
|
||||
error: "column 'a': '1.1' cannot fit into long data type",
|
||||
},
|
||||
{
|
||||
csv: "#constant measurement,cpu\n" +
|
||||
"a|unsignedLong:strict\n" +
|
||||
"1.1,1",
|
||||
error: "column 'a': '1.1' cannot fit into unsignedLong data type",
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -376,8 +448,12 @@ func Test_DataTypeInColumnName(t *testing.T) {
|
|||
rowProcessed := table.AddRow(row)
|
||||
if rowProcessed {
|
||||
line, err := table.CreateLine(row)
|
||||
if err != nil && test.line != "" {
|
||||
require.Nil(t, err.Error())
|
||||
if err != nil {
|
||||
if test.error == "" {
|
||||
require.Nil(t, err.Error())
|
||||
} else {
|
||||
require.Equal(t, test.error, err.Error())
|
||||
}
|
||||
}
|
||||
lines = append(lines, line)
|
||||
}
|
||||
|
@ -425,6 +501,10 @@ func Test_CsvTable_dataErrors(t *testing.T) {
|
|||
"error_no_measurement_data",
|
||||
"_measurement,col1\n,2",
|
||||
},
|
||||
{
|
||||
"error_derived_column_missing reference",
|
||||
"#concat string,d,${col1}${col2}\n_measurement,col1\nm,2",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
|
|
|
@ -5,6 +5,7 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"math"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
@ -82,15 +83,16 @@ func escapeString(val string) string {
|
|||
return val
|
||||
}
|
||||
|
||||
// normalizeNumberString normalizes the supplied value with the help of the format supplied.
|
||||
// normalizeNumberString normalizes the supplied value according to the supplied format.
|
||||
// This normalization is intended to convert number strings of different locales to a strconv-parseable value.
|
||||
//
|
||||
// The format's first character is a fraction delimiter character. Next characters in the format
|
||||
// are simply removed, they are typically used to visually separate groups in large numbers.
|
||||
// The removeFaction parameter controls whether the returned value can contain also the fraction part.
|
||||
// The removeFraction parameter controls whether the returned value can contain also the fraction part.
|
||||
// An empty format means ". \n\t\r_"
|
||||
//
|
||||
// For example, to get a strconv-parseable float from a Spanish value '3.494.826.157,123', use format ",." .
|
||||
func normalizeNumberString(value string, format string, removeFraction bool) string {
|
||||
func normalizeNumberString(value string, format string, removeFraction bool) (normalized string, truncated bool) {
|
||||
if len(format) == 0 {
|
||||
format = ". \n\t\r_"
|
||||
}
|
||||
|
@ -110,20 +112,20 @@ func normalizeNumberString(value string, format string, removeFraction bool) str
|
|||
}
|
||||
if c == fractionRune {
|
||||
if removeFraction {
|
||||
break ForAllCharacters
|
||||
return retVal.String(), true
|
||||
}
|
||||
retVal.WriteByte('.')
|
||||
} else {
|
||||
retVal.WriteRune(c)
|
||||
continue
|
||||
}
|
||||
retVal.WriteRune(c)
|
||||
}
|
||||
|
||||
return retVal.String()
|
||||
return retVal.String(), false
|
||||
}
|
||||
return value
|
||||
return value, false
|
||||
}
|
||||
|
||||
func toTypedValue(val string, column *CsvTableColumn) (interface{}, error) {
|
||||
func toTypedValue(val string, column *CsvTableColumn, lineNumber int) (interface{}, error) {
|
||||
dataType := column.DataType
|
||||
dataFormat := column.DataFormat
|
||||
if column.ParseF != nil {
|
||||
|
@ -159,7 +161,8 @@ func toTypedValue(val string, column *CsvTableColumn) (interface{}, error) {
|
|||
case durationDatatype:
|
||||
return time.ParseDuration(val)
|
||||
case doubleDatatype:
|
||||
return strconv.ParseFloat(normalizeNumberString(val, dataFormat, false), 64)
|
||||
normalized, _ := normalizeNumberString(val, dataFormat, false)
|
||||
return strconv.ParseFloat(normalized, 64)
|
||||
case boolDatatype:
|
||||
switch {
|
||||
case len(val) == 0:
|
||||
|
@ -172,9 +175,21 @@ func toTypedValue(val string, column *CsvTableColumn) (interface{}, error) {
|
|||
return nil, errors.New("Unsupported boolean value '" + val + "' , first character is expected to be 't','f','0','1','y','n'")
|
||||
}
|
||||
case longDatatype:
|
||||
return strconv.ParseInt(normalizeNumberString(val, dataFormat, true), 10, 64)
|
||||
normalized, truncated := normalizeNumberString(val, dataFormat, true)
|
||||
if truncated {
|
||||
error := CreateRowColumnError(lineNumber, column.Label,
|
||||
fmt.Errorf("'%s' truncated to '%s' to fit into long data type", val, normalized))
|
||||
log.Printf("WARNING: %v\n", error)
|
||||
}
|
||||
return strconv.ParseInt(normalized, 10, 64)
|
||||
case uLongDatatype:
|
||||
return strconv.ParseUint(normalizeNumberString(val, dataFormat, true), 10, 64)
|
||||
normalized, truncated := normalizeNumberString(val, dataFormat, true)
|
||||
if truncated {
|
||||
error := CreateRowColumnError(lineNumber, column.Label,
|
||||
fmt.Errorf("'%s' truncated to '%s' to fit into unsignedLong data type", val, normalized))
|
||||
log.Printf("WARNING: %v\n", error)
|
||||
}
|
||||
return strconv.ParseUint(normalized, 10, 64)
|
||||
case base64BinaryDataType:
|
||||
return base64.StdEncoding.DecodeString(val)
|
||||
default:
|
||||
|
@ -230,11 +245,11 @@ func appendProtocolValue(buffer []byte, value interface{}) ([]byte, error) {
|
|||
}
|
||||
}
|
||||
|
||||
func appendConverted(buffer []byte, val string, column *CsvTableColumn) ([]byte, error) {
|
||||
func appendConverted(buffer []byte, val string, column *CsvTableColumn, lineNumber int) ([]byte, error) {
|
||||
if len(column.DataType) == 0 { // keep the value as it is
|
||||
return append(buffer, val...), nil
|
||||
}
|
||||
typedVal, err := toTypedValue(val, column)
|
||||
typedVal, err := toTypedValue(val, column, lineNumber)
|
||||
if err != nil {
|
||||
return buffer, err
|
||||
}
|
||||
|
@ -302,3 +317,25 @@ func createBoolParseFn(format string) func(string) (interface{}, error) {
|
|||
return nil, fmt.Errorf("unsupported boolean value: %s must one of %v or one of %v", val, truthy, falsy)
|
||||
}
|
||||
}
|
||||
|
||||
// createStrictLongParseFn returns a function that converts a string value to long and fails also when a fraction digit is detected
|
||||
func createStrictLongParseFn(dataFormat string) func(string) (interface{}, error) {
|
||||
return func(val string) (interface{}, error) {
|
||||
normalized, truncated := normalizeNumberString(val, dataFormat, true)
|
||||
if truncated {
|
||||
return 0, fmt.Errorf("'%s' cannot fit into long data type", val)
|
||||
}
|
||||
return strconv.ParseInt(normalized, 10, 64)
|
||||
}
|
||||
}
|
||||
|
||||
// createStrictUnsignedLongParseFn returns a function that converts a string value to unsigned long and fails when a fraction digit is detected
|
||||
func createStrictUnsignedLongParseFn(dataFormat string) func(string) (interface{}, error) {
|
||||
return func(val string) (interface{}, error) {
|
||||
normalized, truncated := normalizeNumberString(val, dataFormat, true)
|
||||
if truncated {
|
||||
return 0, fmt.Errorf("'%s' cannot fit into unsignedLong data type", val)
|
||||
}
|
||||
return strconv.ParseUint(normalized, 10, 64)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -6,6 +6,7 @@ import (
|
|||
"io/ioutil"
|
||||
"log"
|
||||
"math"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
@ -112,9 +113,9 @@ func Test_ToTypedValue(t *testing.T) {
|
|||
|
||||
for i, test := range tests {
|
||||
t.Run(fmt.Sprint(i)+" "+test.value, func(t *testing.T) {
|
||||
column := &CsvTableColumn{}
|
||||
column := &CsvTableColumn{Label: "test"}
|
||||
column.setupDataType(test.dataType)
|
||||
val, err := toTypedValue(test.value, column)
|
||||
val, err := toTypedValue(test.value, column, 1)
|
||||
if err != nil && test.expect != nil {
|
||||
require.Nil(t, err.Error())
|
||||
}
|
||||
|
@ -143,7 +144,7 @@ func Test_ToTypedValue_dateTimeCustomTimeZone(t *testing.T) {
|
|||
column := &CsvTableColumn{}
|
||||
column.TimeZone = tz
|
||||
column.setupDataType(test.dataType)
|
||||
val, err := toTypedValue(test.value, column)
|
||||
val, err := toTypedValue(test.value, column, 1)
|
||||
if err != nil && test.expect != nil {
|
||||
require.Nil(t, err.Error())
|
||||
}
|
||||
|
@ -210,9 +211,9 @@ func Test_AppendConverted(t *testing.T) {
|
|||
|
||||
for i, test := range tests {
|
||||
t.Run(fmt.Sprint(i), func(t *testing.T) {
|
||||
column := &CsvTableColumn{}
|
||||
column := &CsvTableColumn{Label: "test"}
|
||||
column.setupDataType(test.dataType)
|
||||
val, err := appendConverted(nil, test.value, column)
|
||||
val, err := appendConverted(nil, test.value, column, 1)
|
||||
if err != nil && test.expect != "" {
|
||||
require.Nil(t, err.Error())
|
||||
}
|
||||
|
@ -246,18 +247,34 @@ func Test_NormalizeNumberString(t *testing.T) {
|
|||
format string
|
||||
removeFraction bool
|
||||
expect string
|
||||
truncated bool
|
||||
}{
|
||||
{"123", "", true, "123"},
|
||||
{"123", ".", true, "123"},
|
||||
{"123.456", ".", true, "123"},
|
||||
{"123.456", ".", false, "123.456"},
|
||||
{"1 2.3,456", ",. ", false, "123.456"},
|
||||
{" 1 2\t3.456 \r\n", "", false, "123.456"},
|
||||
{"123", "", true, "123", false},
|
||||
{"123", ".", true, "123", false},
|
||||
{"123.456", ".", true, "123", true},
|
||||
{"123.456", ".", false, "123.456", false},
|
||||
{"1 2.3,456", ",. ", false, "123.456", false},
|
||||
{" 1 2\t3.456 \r\n", "", false, "123.456", false},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
t.Run(fmt.Sprint(i), func(t *testing.T) {
|
||||
require.Equal(t, test.expect, normalizeNumberString(test.value, test.format, test.removeFraction))
|
||||
// customize logging to check warnings
|
||||
var buf bytes.Buffer
|
||||
log.SetOutput(&buf)
|
||||
oldFlags := log.Flags()
|
||||
log.SetFlags(0)
|
||||
oldPrefix := log.Prefix()
|
||||
prefix := "::PREFIX::"
|
||||
log.SetPrefix(prefix)
|
||||
defer func() {
|
||||
log.SetOutput(os.Stderr)
|
||||
log.SetFlags(oldFlags)
|
||||
log.SetPrefix(oldPrefix)
|
||||
}()
|
||||
normalized, truncated := normalizeNumberString(test.value, test.format, test.removeFraction)
|
||||
require.Equal(t, test.expect, normalized)
|
||||
require.Equal(t, test.truncated, truncated)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,5 +0,0 @@
|
|||
# Flux master packages
|
||||
|
||||
This package tree is temporary copied from Flux master to keep unit tests which depend on newer
|
||||
versions of Flux. Once Flux has been updated, this package should be removed and any clients of
|
||||
this package referred to the official Flux package.
|
|
@ -1,130 +0,0 @@
|
|||
package edit
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/influxdata/flux/ast"
|
||||
)
|
||||
|
||||
// `OptionFn` is a function that, provided with an `OptionStatement`, returns
|
||||
// an `Expression` or an error. It is used by `Option` functions to edit
|
||||
// AST's options statements.
|
||||
type OptionFn func(opt *ast.OptionStatement) (ast.Expression, error)
|
||||
|
||||
// `Option` passes the `OptionStatement` in the AST rooted at `node` that has the
|
||||
// specified identifier to `fn`.
|
||||
// The function can have side effects on the option statement
|
||||
// and/or return a non-nil `Expression` that is set as value for the option.
|
||||
// If the value returned by the edit function is `nil` (or an error is returned) no new value is set
|
||||
// for the option statement (but any, maybe partial, side effect is applied).
|
||||
// `Option` returns whether it could find and edit the option (possibly with errors) or not.
|
||||
func Option(node ast.Node, optionIdentifier string, fn OptionFn) (bool, error) {
|
||||
oe := &optionEditor{identifier: optionIdentifier, optionFn: fn, err: nil}
|
||||
ast.Walk(oe, node)
|
||||
|
||||
if oe.err != nil {
|
||||
return oe.found, oe.err
|
||||
}
|
||||
|
||||
return oe.found, nil
|
||||
}
|
||||
|
||||
// Creates an `OptionFn` for setting the value of an `OptionStatement`.
|
||||
func OptionValueFn(expr ast.Expression) OptionFn {
|
||||
return func(opt *ast.OptionStatement) (ast.Expression, error) {
|
||||
return expr, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Creates an `OptionFn` for updating the values of an `OptionStatement` that has an
|
||||
// `ObjectExpression` as value. Returns error if the child of the option statement is not
|
||||
// an object expression. If some key is not a property of the object it is added.
|
||||
func OptionObjectFn(keyMap map[string]ast.Expression) OptionFn {
|
||||
return func(opt *ast.OptionStatement) (ast.Expression, error) {
|
||||
a, ok := opt.Assignment.(*ast.VariableAssignment)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("option assignment must be variable assignment")
|
||||
}
|
||||
obj, ok := a.Init.(*ast.ObjectExpression)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("value is %s, not an object expression", a.Init.Type())
|
||||
}
|
||||
|
||||
// check that every specified property exists in the object
|
||||
found := make(map[string]bool, len(obj.Properties))
|
||||
for _, p := range obj.Properties {
|
||||
found[p.Key.Key()] = true
|
||||
}
|
||||
|
||||
for k := range keyMap {
|
||||
if !found[k] {
|
||||
obj.Properties = append(obj.Properties, &ast.Property{
|
||||
Key: &ast.Identifier{Name: k},
|
||||
Value: keyMap[k],
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
for _, p := range obj.Properties {
|
||||
exp, found := keyMap[p.Key.Key()]
|
||||
if found {
|
||||
p.Value = exp
|
||||
}
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
|
||||
//Finds the `OptionStatement` with the specified `identifier` and updates its value.
|
||||
//There shouldn't be more then one option statement with the same identifier
|
||||
//in a valid query.
|
||||
type optionEditor struct {
|
||||
identifier string
|
||||
optionFn OptionFn
|
||||
err error
|
||||
found bool
|
||||
}
|
||||
|
||||
func (v *optionEditor) Visit(node ast.Node) ast.Visitor {
|
||||
if os, ok := node.(*ast.OptionStatement); ok {
|
||||
switch a := os.Assignment.(type) {
|
||||
case *ast.VariableAssignment:
|
||||
if a.ID.Name == v.identifier {
|
||||
v.found = true
|
||||
|
||||
newInit, err := v.optionFn(os)
|
||||
|
||||
if err != nil {
|
||||
v.err = err
|
||||
} else if newInit != nil {
|
||||
a.Init = newInit
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
case *ast.MemberAssignment:
|
||||
id, ok := a.Member.Object.(*ast.Identifier)
|
||||
if ok {
|
||||
name := id.Name + "." + a.Member.Property.Key()
|
||||
if name == v.identifier {
|
||||
v.found = true
|
||||
|
||||
newInit, err := v.optionFn(os)
|
||||
|
||||
if err != nil {
|
||||
v.err = err
|
||||
} else if newInit != nil {
|
||||
a.Init = newInit
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return v
|
||||
}
|
||||
|
||||
func (v *optionEditor) Done(node ast.Node) {}
|
|
@ -1,109 +0,0 @@
|
|||
package edit
|
||||
|
||||
import (
|
||||
"github.com/influxdata/flux"
|
||||
"github.com/influxdata/flux/ast"
|
||||
"github.com/influxdata/flux/codes"
|
||||
)
|
||||
|
||||
// GetOption finds and returns the init for the option's variable assignment
|
||||
func GetOption(file *ast.File, name string) (ast.Expression, error) {
|
||||
for _, st := range file.Body {
|
||||
if val, ok := st.(*ast.OptionStatement); ok {
|
||||
assign := val.Assignment
|
||||
if va, ok := assign.(*ast.VariableAssignment); ok {
|
||||
if va.ID.Name == name {
|
||||
if ok {
|
||||
return va.Init, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil, &flux.Error{
|
||||
Code: codes.Internal,
|
||||
Msg: "Option not found",
|
||||
}
|
||||
}
|
||||
|
||||
// SetOption replaces an existing option's init with the provided init or adds
|
||||
// the option if it doesn't exist. The file AST is mutated in place.
|
||||
func SetOption(file *ast.File, name string, expr ast.Expression) {
|
||||
// check for the correct file
|
||||
for _, st := range file.Body {
|
||||
if val, ok := st.(*ast.OptionStatement); ok {
|
||||
assign := val.Assignment
|
||||
if va, ok := assign.(*ast.VariableAssignment); ok {
|
||||
if va.ID.Name == name {
|
||||
// replace the variable assignment's init
|
||||
va.Init = expr
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// option was not found. prepend new option to body
|
||||
file.Body = append([]ast.Statement{&ast.OptionStatement{
|
||||
Assignment: &ast.VariableAssignment{
|
||||
ID: &ast.Identifier{Name: name},
|
||||
Init: expr,
|
||||
},
|
||||
}}, file.Body...)
|
||||
}
|
||||
|
||||
// DeleteOption removes an option if it exists. The file AST is mutated in place.
|
||||
func DeleteOption(file *ast.File, name string) {
|
||||
for i, st := range file.Body {
|
||||
if val, ok := st.(*ast.OptionStatement); ok {
|
||||
assign := val.Assignment
|
||||
if va, ok := assign.(*ast.VariableAssignment); ok {
|
||||
if va.ID.Name == name {
|
||||
file.Body = append(file.Body[:i], file.Body[i+1:]...)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// GetProperty finds and returns the AST node for the property value.
|
||||
func GetProperty(obj *ast.ObjectExpression, key string) (ast.Expression, error) {
|
||||
for _, prop := range obj.Properties {
|
||||
if key == prop.Key.Key() {
|
||||
return prop.Value, nil
|
||||
}
|
||||
}
|
||||
return nil, &flux.Error{
|
||||
Code: codes.Internal,
|
||||
Msg: "Property not found",
|
||||
}
|
||||
}
|
||||
|
||||
// SetProperty replaces an existing property definition with the provided object expression or adds
|
||||
// the property if it doesn't exist. The object expression AST is mutated in place.
|
||||
func SetProperty(obj *ast.ObjectExpression, key string, value ast.Expression) {
|
||||
for _, prop := range obj.Properties {
|
||||
if key == prop.Key.Key() {
|
||||
prop.Value = value
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
obj.Properties = append(obj.Properties, &ast.Property{
|
||||
BaseNode: obj.BaseNode,
|
||||
Key: &ast.Identifier{Name: key},
|
||||
Value: value,
|
||||
})
|
||||
}
|
||||
|
||||
// DeleteProperty removes a property from the object expression if it exists.
|
||||
// The object expression AST is mutated in place.
|
||||
func DeleteProperty(obj *ast.ObjectExpression, key string) {
|
||||
for i, prop := range obj.Properties {
|
||||
if key == prop.Key.Key() {
|
||||
obj.Properties = append(obj.Properties[:i], obj.Properties[i+1:]...)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,52 +0,0 @@
|
|||
package ast
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/flux/ast"
|
||||
)
|
||||
|
||||
func IntegerLiteralFromValue(v int64) *ast.IntegerLiteral {
|
||||
return &ast.IntegerLiteral{Value: v}
|
||||
}
|
||||
func UnsignedIntegerLiteralFromValue(v uint64) *ast.UnsignedIntegerLiteral {
|
||||
return &ast.UnsignedIntegerLiteral{Value: v}
|
||||
}
|
||||
func FloatLiteralFromValue(v float64) *ast.FloatLiteral {
|
||||
return &ast.FloatLiteral{Value: v}
|
||||
}
|
||||
func StringLiteralFromValue(v string) *ast.StringLiteral {
|
||||
return &ast.StringLiteral{Value: v}
|
||||
}
|
||||
func BooleanLiteralFromValue(v bool) *ast.BooleanLiteral {
|
||||
return &ast.BooleanLiteral{Value: v}
|
||||
}
|
||||
func DateTimeLiteralFromValue(v time.Time) *ast.DateTimeLiteral {
|
||||
return &ast.DateTimeLiteral{Value: v}
|
||||
}
|
||||
func RegexpLiteralFromValue(v *regexp.Regexp) *ast.RegexpLiteral {
|
||||
return &ast.RegexpLiteral{Value: v}
|
||||
}
|
||||
|
||||
func IntegerFromLiteral(lit *ast.IntegerLiteral) int64 {
|
||||
return lit.Value
|
||||
}
|
||||
func UnsignedIntegerFromLiteral(lit *ast.UnsignedIntegerLiteral) uint64 {
|
||||
return lit.Value
|
||||
}
|
||||
func FloatFromLiteral(lit *ast.FloatLiteral) float64 {
|
||||
return lit.Value
|
||||
}
|
||||
func StringFromLiteral(lit *ast.StringLiteral) string {
|
||||
return lit.Value
|
||||
}
|
||||
func BooleanFromLiteral(lit *ast.BooleanLiteral) bool {
|
||||
return lit.Value
|
||||
}
|
||||
func DateTimeFromLiteral(lit *ast.DateTimeLiteral) time.Time {
|
||||
return lit.Value
|
||||
}
|
||||
func RegexpFromLiteral(lit *ast.RegexpLiteral) *regexp.Regexp {
|
||||
return lit.Value
|
||||
}
|
|
@ -1,138 +0,0 @@
|
|||
package table
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/andreyvit/diff"
|
||||
"github.com/influxdata/flux"
|
||||
)
|
||||
|
||||
// Diff will perform a diff between two table iterators.
|
||||
// This will sort the tables within the table iterators and produce
|
||||
// a diff of the full output.
|
||||
func Diff(want, got flux.TableIterator, opts ...DiffOption) string {
|
||||
if want == nil {
|
||||
want = Iterator{}
|
||||
}
|
||||
|
||||
var wantS string
|
||||
if wantT, err := Sort(want); err != nil {
|
||||
wantS = fmt.Sprintf("table error: %s\n", err)
|
||||
} else {
|
||||
var sb strings.Builder
|
||||
if err := wantT.Do(func(table flux.Table) error {
|
||||
sb.WriteString(Stringify(table))
|
||||
return nil
|
||||
}); err != nil {
|
||||
_, _ = fmt.Fprintf(&sb, "table error: %s\n", err)
|
||||
}
|
||||
wantS = sb.String()
|
||||
}
|
||||
|
||||
if got == nil {
|
||||
got = Iterator{}
|
||||
}
|
||||
|
||||
var gotS string
|
||||
if gotT, err := Sort(got); err != nil {
|
||||
gotS = fmt.Sprintf("table error: %s\n", err)
|
||||
} else {
|
||||
var sb strings.Builder
|
||||
if err := gotT.Do(func(table flux.Table) error {
|
||||
sb.WriteString(Stringify(table))
|
||||
return nil
|
||||
}); err != nil {
|
||||
_, _ = fmt.Fprintf(&sb, "table error: %s\n", err)
|
||||
}
|
||||
gotS = sb.String()
|
||||
}
|
||||
|
||||
differ := newDiffer(opts...)
|
||||
return differ.diff(wantS, gotS)
|
||||
}
|
||||
|
||||
type differ struct {
|
||||
ctx *[2]int
|
||||
}
|
||||
|
||||
func newDiffer(opts ...DiffOption) (d differ) {
|
||||
for _, opt := range diffDefaultOptions {
|
||||
opt.apply(&d)
|
||||
}
|
||||
for _, opt := range opts {
|
||||
opt.apply(&d)
|
||||
}
|
||||
return d
|
||||
}
|
||||
|
||||
func (d differ) diff(want, got string) string {
|
||||
lines := diff.LineDiffAsLines(want, got)
|
||||
if d.ctx == nil {
|
||||
return strings.Join(lines, "\n")
|
||||
}
|
||||
|
||||
difflines := make([]string, 0, len(lines))
|
||||
OUTER:
|
||||
for {
|
||||
for i := 0; i < len(lines); i++ {
|
||||
if lines[i][0] == ' ' {
|
||||
continue
|
||||
}
|
||||
|
||||
// This is the start of a diff section. Store this location.
|
||||
start := i - (*d.ctx)[0]
|
||||
if start < 0 {
|
||||
start = 0
|
||||
}
|
||||
|
||||
// Find the end of this section.
|
||||
for ; i < len(lines); i++ {
|
||||
if lines[i][0] == ' ' {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Look n points in the future and, if they are
|
||||
// not part of a diff or don't overrun the number
|
||||
// of lines, include them.
|
||||
stop := i
|
||||
|
||||
for n := (*d.ctx)[1]; n > 0; n-- {
|
||||
if stop+1 >= len(lines) || lines[stop+1][0] != ' ' {
|
||||
break
|
||||
}
|
||||
stop++
|
||||
}
|
||||
|
||||
difflines = append(difflines, lines[start:stop]...)
|
||||
lines = lines[stop:]
|
||||
continue OUTER
|
||||
}
|
||||
return strings.Join(difflines, "\n")
|
||||
}
|
||||
}
|
||||
|
||||
type DiffOption interface {
|
||||
apply(*differ)
|
||||
}
|
||||
|
||||
type diffOptionFn func(d *differ)
|
||||
|
||||
func (opt diffOptionFn) apply(d *differ) {
|
||||
opt(d)
|
||||
}
|
||||
|
||||
var diffDefaultOptions = []DiffOption{
|
||||
DiffContext(3),
|
||||
}
|
||||
|
||||
func DiffContext(n int) DiffOption {
|
||||
return diffOptionFn(func(d *differ) {
|
||||
if n < 0 {
|
||||
d.ctx = nil
|
||||
}
|
||||
ctx := [2]int{n, n}
|
||||
d.ctx = &ctx
|
||||
})
|
||||
}
|
|
@ -1,14 +0,0 @@
|
|||
package table
|
||||
|
||||
import "github.com/influxdata/flux"
|
||||
|
||||
type Iterator []flux.Table
|
||||
|
||||
func (t Iterator) Do(f func(flux.Table) error) error {
|
||||
for _, tbl := range t {
|
||||
if err := f(tbl); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -1,32 +0,0 @@
|
|||
package table
|
||||
|
||||
import (
|
||||
"github.com/influxdata/flux"
|
||||
"github.com/influxdata/flux/execute"
|
||||
)
|
||||
|
||||
// Sort will read a TableIterator and produce another TableIterator
|
||||
// where the keys are sorted.
|
||||
//
|
||||
// This method will buffer all of the data since it needs to ensure
|
||||
// all of the tables are read to avoid any deadlocks. Be careful
|
||||
// using this method in performance sensitive areas.
|
||||
func Sort(tables flux.TableIterator) (flux.TableIterator, error) {
|
||||
groups := execute.NewGroupLookup()
|
||||
if err := tables.Do(func(table flux.Table) error {
|
||||
buffered, err := execute.CopyTable(table)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
groups.Set(buffered.Key(), buffered)
|
||||
return nil
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var buffered []flux.Table
|
||||
groups.Range(func(_ flux.GroupKey, value interface{}) {
|
||||
buffered = append(buffered, value.(flux.Table))
|
||||
})
|
||||
return Iterator(buffered), nil
|
||||
}
|
|
@ -1,703 +0,0 @@
|
|||
// Package static provides utilities for easily constructing static
|
||||
// tables that are meant for tests.
|
||||
//
|
||||
// The primary type is Table which will be a mapping of columns to their data.
|
||||
// The data is defined in a columnar format instead of a row-based one.
|
||||
//
|
||||
// The implementations in this package are not performant and are not meant
|
||||
// to be used in production code. They are good enough for small datasets that
|
||||
// are present in tests to ensure code correctness.
|
||||
package static
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
stdarrow "github.com/apache/arrow/go/arrow"
|
||||
"github.com/apache/arrow/go/arrow/array"
|
||||
"github.com/influxdata/flux"
|
||||
"github.com/influxdata/flux/arrow"
|
||||
"github.com/influxdata/flux/codes"
|
||||
"github.com/influxdata/flux/execute"
|
||||
"github.com/influxdata/flux/memory"
|
||||
"github.com/influxdata/flux/values"
|
||||
"github.com/influxdata/influxdb/v2/pkg/flux/internal/errors"
|
||||
"github.com/influxdata/influxdb/v2/pkg/flux/internal/execute/table"
|
||||
)
|
||||
|
||||
// Table is a statically constructed table.
|
||||
// It is a mapping between column names and the column.
|
||||
//
|
||||
// This is not a performant section of code and it is primarily
|
||||
// meant to make writing unit tests easily. Do not use in
|
||||
// production code.
|
||||
//
|
||||
// The Table struct implements the TableIterator interface
|
||||
// and not the Table interface. To retrieve a flux.Table compatible
|
||||
// implementation, the Table() method can be used.
|
||||
type Table []Column
|
||||
|
||||
// Do will produce the Table and then invoke the function
|
||||
// on that flux.Table.
|
||||
//
|
||||
// If the produced Table is invalid, then this method
|
||||
// will panic.
|
||||
func (s Table) Do(f func(flux.Table) error) error {
|
||||
return f(s.Table())
|
||||
}
|
||||
|
||||
func (s Table) Build(template *[]Column) []flux.Table {
|
||||
t := make(Table, 0, len(*template)+len(s))
|
||||
t = append(t, *template...)
|
||||
t = append(t, s...)
|
||||
return []flux.Table{t.Table()}
|
||||
}
|
||||
|
||||
// Table will produce a flux.Table using the Column values
|
||||
// that are part of this Table.
|
||||
//
|
||||
// If the Table produces an invalid buffer, then this method
|
||||
// will panic.
|
||||
func (s Table) Table() flux.Table {
|
||||
if len(s) == 0 {
|
||||
panic(errors.New(codes.Internal, "static table has no columns"))
|
||||
}
|
||||
|
||||
key, cols := s.buildSchema()
|
||||
buffer := &arrow.TableBuffer{
|
||||
GroupKey: key,
|
||||
Columns: cols,
|
||||
}
|
||||
|
||||
// Determine the size by looking at the first non-key column.
|
||||
n := 0
|
||||
for _, c := range s {
|
||||
if c.IsKey() {
|
||||
continue
|
||||
}
|
||||
n = c.Len()
|
||||
break
|
||||
}
|
||||
|
||||
// Construct each of the buffers.
|
||||
buffer.Values = make([]array.Interface, len(buffer.Columns))
|
||||
for i, c := range s {
|
||||
buffer.Values[i] = c.Make(n)
|
||||
}
|
||||
|
||||
if err := buffer.Validate(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return table.FromBuffer(buffer)
|
||||
}
|
||||
|
||||
// buildSchema will construct the schema from the columns.
|
||||
func (s Table) buildSchema() (flux.GroupKey, []flux.ColMeta) {
|
||||
var (
|
||||
keyCols []flux.ColMeta
|
||||
keyVals []values.Value
|
||||
cols []flux.ColMeta
|
||||
)
|
||||
for _, c := range s {
|
||||
col := flux.ColMeta{Label: c.Label(), Type: c.Type()}
|
||||
if c.IsKey() {
|
||||
keyCols = append(keyCols, col)
|
||||
keyVals = append(keyVals, c.KeyValue())
|
||||
}
|
||||
cols = append(cols, col)
|
||||
}
|
||||
return execute.NewGroupKey(keyCols, keyVals), cols
|
||||
}
|
||||
|
||||
// Column is the definition for how to construct a column for the table.
|
||||
type Column interface {
|
||||
// Label returns the label associated with this column.
|
||||
Label() string
|
||||
|
||||
// Type returns the column type for this column.
|
||||
Type() flux.ColType
|
||||
|
||||
// Make will construct an array with the given length
|
||||
// if it is possible.
|
||||
Make(n int) array.Interface
|
||||
|
||||
// Len will return the length of this column.
|
||||
// If no length is known, this will return -1.
|
||||
Len() int
|
||||
|
||||
// IsKey will return true if this is part of the group key.
|
||||
IsKey() bool
|
||||
|
||||
// KeyValue will return the key value if this column is part
|
||||
// of the group key.
|
||||
KeyValue() values.Value
|
||||
|
||||
// TableBuilder allows this column to add itself to a template.
|
||||
TableBuilder
|
||||
}
|
||||
|
||||
// IntKey will construct a group key with the integer type.
|
||||
// The value can be an int, int64, or nil.
|
||||
func IntKey(k string, v interface{}) KeyColumn {
|
||||
if iv, ok := mustIntValue(v); ok {
|
||||
return KeyColumn{k: k, v: iv, t: flux.TInt}
|
||||
}
|
||||
return KeyColumn{k: k, t: flux.TInt}
|
||||
}
|
||||
|
||||
// UintKey will construct a group key with the unsigned type.
|
||||
// The value can be a uint, uint64, int, int64, or nil.
|
||||
func UintKey(k string, v interface{}) KeyColumn {
|
||||
if iv, ok := mustUintValue(v); ok {
|
||||
return KeyColumn{k: k, v: iv, t: flux.TUInt}
|
||||
}
|
||||
return KeyColumn{k: k, t: flux.TUInt}
|
||||
}
|
||||
|
||||
// FloatKey will construct a group key with the float type.
|
||||
// The value can be a float64, int, int64, or nil.
|
||||
func FloatKey(k string, v interface{}) KeyColumn {
|
||||
if iv, ok := mustFloatValue(v); ok {
|
||||
return KeyColumn{k: k, v: iv, t: flux.TFloat}
|
||||
}
|
||||
return KeyColumn{k: k, t: flux.TFloat}
|
||||
}
|
||||
|
||||
// StringKey will construct a group key with the string type.
|
||||
// The value can be a string or nil.
|
||||
func StringKey(k string, v interface{}) KeyColumn {
|
||||
if iv, ok := mustStringValue(v); ok {
|
||||
return KeyColumn{k: k, v: iv, t: flux.TString}
|
||||
}
|
||||
return KeyColumn{k: k, t: flux.TString}
|
||||
}
|
||||
|
||||
// BooleanKey will construct a group key with the boolean type.
|
||||
// The value can be a bool or nil.
|
||||
func BooleanKey(k string, v interface{}) KeyColumn {
|
||||
if iv, ok := mustBooleanValue(v); ok {
|
||||
return KeyColumn{k: k, v: iv, t: flux.TBool}
|
||||
}
|
||||
return KeyColumn{k: k, t: flux.TBool}
|
||||
}
|
||||
|
||||
// TimeKey will construct a group key with the given time using either a
|
||||
// string or an integer. If an integer is used, then it is in seconds.
|
||||
func TimeKey(k string, v interface{}) KeyColumn {
|
||||
if iv, _, ok := mustTimeValue(v, 0, time.Second); ok {
|
||||
return KeyColumn{k: k, v: execute.Time(iv), t: flux.TTime}
|
||||
}
|
||||
return KeyColumn{k: k, t: flux.TTime}
|
||||
}
|
||||
|
||||
type KeyColumn struct {
|
||||
k string
|
||||
v interface{}
|
||||
t flux.ColType
|
||||
}
|
||||
|
||||
func (s KeyColumn) Make(n int) array.Interface {
|
||||
return arrow.Repeat(s.KeyValue(), n, memory.DefaultAllocator)
|
||||
}
|
||||
|
||||
func (s KeyColumn) Label() string { return s.k }
|
||||
func (s KeyColumn) Type() flux.ColType { return s.t }
|
||||
func (s KeyColumn) Len() int { return -1 }
|
||||
func (s KeyColumn) IsKey() bool { return true }
|
||||
func (s KeyColumn) KeyValue() values.Value { return values.New(s.v) }
|
||||
|
||||
func (s KeyColumn) Build(template *[]Column) []flux.Table {
|
||||
*template = append(*template, s)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Ints will construct an array of integers.
|
||||
// Each value can be an int, int64, or nil.
|
||||
func Ints(k string, v ...interface{}) Column {
|
||||
c := intColumn{
|
||||
column: column{k: k},
|
||||
v: make([]int64, len(v)),
|
||||
}
|
||||
for i, iv := range v {
|
||||
val, ok := mustIntValue(iv)
|
||||
if !ok {
|
||||
if c.valid == nil {
|
||||
c.valid = make([]bool, len(v))
|
||||
for i := range c.valid {
|
||||
c.valid[i] = true
|
||||
}
|
||||
}
|
||||
c.valid[i] = false
|
||||
}
|
||||
c.v[i] = val
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
type column struct {
|
||||
k string
|
||||
valid []bool
|
||||
}
|
||||
|
||||
func (s column) Label() string { return s.k }
|
||||
func (s column) IsKey() bool { return false }
|
||||
|
||||
type intColumn struct {
|
||||
column
|
||||
v []int64
|
||||
}
|
||||
|
||||
func (s intColumn) Make(n int) array.Interface {
|
||||
b := array.NewInt64Builder(memory.DefaultAllocator)
|
||||
b.Resize(len(s.v))
|
||||
b.AppendValues(s.v, s.valid)
|
||||
return b.NewArray()
|
||||
}
|
||||
|
||||
func (s intColumn) Type() flux.ColType { return flux.TInt }
|
||||
func (s intColumn) Len() int { return len(s.v) }
|
||||
func (s intColumn) KeyValue() values.Value { return values.InvalidValue }
|
||||
|
||||
func (s intColumn) Build(template *[]Column) []flux.Table {
|
||||
*template = append(*template, s)
|
||||
return nil
|
||||
}
|
||||
|
||||
func mustIntValue(v interface{}) (int64, bool) {
|
||||
if v == nil {
|
||||
return 0, false
|
||||
}
|
||||
|
||||
switch v := v.(type) {
|
||||
case int:
|
||||
return int64(v), true
|
||||
case int64:
|
||||
return v, true
|
||||
default:
|
||||
panic(fmt.Sprintf("unable to convert type %T to an int value", v))
|
||||
}
|
||||
}
|
||||
|
||||
// Uints will construct an array of unsigned integers.
|
||||
// Each value can be a uint, uint64, int, int64, or nil.
|
||||
func Uints(k string, v ...interface{}) Column {
|
||||
c := uintColumn{
|
||||
column: column{k: k},
|
||||
v: make([]uint64, len(v)),
|
||||
}
|
||||
for i, iv := range v {
|
||||
val, ok := mustUintValue(iv)
|
||||
if !ok {
|
||||
if c.valid == nil {
|
||||
c.valid = make([]bool, len(v))
|
||||
for i := range c.valid {
|
||||
c.valid[i] = true
|
||||
}
|
||||
}
|
||||
c.valid[i] = false
|
||||
}
|
||||
c.v[i] = val
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
type uintColumn struct {
|
||||
column
|
||||
v []uint64
|
||||
}
|
||||
|
||||
func (s uintColumn) Make(n int) array.Interface {
|
||||
b := array.NewUint64Builder(memory.DefaultAllocator)
|
||||
b.Resize(len(s.v))
|
||||
b.AppendValues(s.v, s.valid)
|
||||
return b.NewArray()
|
||||
}
|
||||
|
||||
func (s uintColumn) Type() flux.ColType { return flux.TUInt }
|
||||
func (s uintColumn) Len() int { return len(s.v) }
|
||||
func (s uintColumn) KeyValue() values.Value { return values.InvalidValue }
|
||||
|
||||
func (s uintColumn) Build(template *[]Column) []flux.Table {
|
||||
*template = append(*template, s)
|
||||
return nil
|
||||
}
|
||||
|
||||
func mustUintValue(v interface{}) (uint64, bool) {
|
||||
if v == nil {
|
||||
return 0, false
|
||||
}
|
||||
|
||||
switch v := v.(type) {
|
||||
case int:
|
||||
return uint64(v), true
|
||||
case int64:
|
||||
return uint64(v), true
|
||||
case uint:
|
||||
return uint64(v), true
|
||||
case uint64:
|
||||
return v, true
|
||||
default:
|
||||
panic(fmt.Sprintf("unable to convert type %T to a uint value", v))
|
||||
}
|
||||
}
|
||||
|
||||
// Floats will construct an array of floats.
|
||||
// Each value can be a float64, int, int64, or nil.
|
||||
func Floats(k string, v ...interface{}) Column {
|
||||
c := floatColumn{
|
||||
column: column{k: k},
|
||||
v: make([]float64, len(v)),
|
||||
}
|
||||
for i, iv := range v {
|
||||
val, ok := mustFloatValue(iv)
|
||||
if !ok {
|
||||
if c.valid == nil {
|
||||
c.valid = make([]bool, len(v))
|
||||
for i := range c.valid {
|
||||
c.valid[i] = true
|
||||
}
|
||||
}
|
||||
c.valid[i] = false
|
||||
}
|
||||
c.v[i] = val
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
type floatColumn struct {
|
||||
column
|
||||
v []float64
|
||||
}
|
||||
|
||||
func (s floatColumn) Make(n int) array.Interface {
|
||||
b := array.NewFloat64Builder(memory.DefaultAllocator)
|
||||
b.Resize(len(s.v))
|
||||
b.AppendValues(s.v, s.valid)
|
||||
return b.NewArray()
|
||||
}
|
||||
|
||||
func (s floatColumn) Type() flux.ColType { return flux.TFloat }
|
||||
func (s floatColumn) Len() int { return len(s.v) }
|
||||
func (s floatColumn) KeyValue() values.Value { return values.InvalidValue }
|
||||
|
||||
func (s floatColumn) Build(template *[]Column) []flux.Table {
|
||||
*template = append(*template, s)
|
||||
return nil
|
||||
}
|
||||
|
||||
func mustFloatValue(v interface{}) (float64, bool) {
|
||||
if v == nil {
|
||||
return 0, false
|
||||
}
|
||||
|
||||
switch v := v.(type) {
|
||||
case int:
|
||||
return float64(v), true
|
||||
case int64:
|
||||
return float64(v), true
|
||||
case float64:
|
||||
return v, true
|
||||
default:
|
||||
panic(fmt.Sprintf("unable to convert type %T to a float value", v))
|
||||
}
|
||||
}
|
||||
|
||||
// Strings will construct an array of strings.
|
||||
// Each value can be a string or nil.
|
||||
func Strings(k string, v ...interface{}) Column {
|
||||
c := stringColumn{
|
||||
column: column{k: k},
|
||||
v: make([]string, len(v)),
|
||||
}
|
||||
for i, iv := range v {
|
||||
val, ok := mustStringValue(iv)
|
||||
if !ok {
|
||||
if c.valid == nil {
|
||||
c.valid = make([]bool, len(v))
|
||||
for i := range c.valid {
|
||||
c.valid[i] = true
|
||||
}
|
||||
}
|
||||
c.valid[i] = false
|
||||
}
|
||||
c.v[i] = val
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
type stringColumn struct {
|
||||
column
|
||||
v []string
|
||||
}
|
||||
|
||||
func (s stringColumn) Make(n int) array.Interface {
|
||||
b := array.NewBinaryBuilder(memory.DefaultAllocator, stdarrow.BinaryTypes.String)
|
||||
b.Resize(len(s.v))
|
||||
b.AppendStringValues(s.v, s.valid)
|
||||
return b.NewArray()
|
||||
}
|
||||
|
||||
func (s stringColumn) Type() flux.ColType { return flux.TString }
|
||||
func (s stringColumn) Len() int { return len(s.v) }
|
||||
func (s stringColumn) KeyValue() values.Value { return values.InvalidValue }
|
||||
|
||||
func (s stringColumn) Build(template *[]Column) []flux.Table {
|
||||
*template = append(*template, s)
|
||||
return nil
|
||||
}
|
||||
|
||||
func mustStringValue(v interface{}) (string, bool) {
|
||||
if v == nil {
|
||||
return "", false
|
||||
}
|
||||
|
||||
switch v := v.(type) {
|
||||
case string:
|
||||
return v, true
|
||||
default:
|
||||
panic(fmt.Sprintf("unable to convert type %T to a string value", v))
|
||||
}
|
||||
}
|
||||
|
||||
// Booleans will construct an array of booleans.
|
||||
// Each value can be a bool or nil.
|
||||
func Booleans(k string, v ...interface{}) Column {
|
||||
c := booleanColumn{
|
||||
column: column{k: k},
|
||||
v: make([]bool, len(v)),
|
||||
}
|
||||
for i, iv := range v {
|
||||
val, ok := mustBooleanValue(iv)
|
||||
if !ok {
|
||||
if c.valid == nil {
|
||||
c.valid = make([]bool, len(v))
|
||||
for i := range c.valid {
|
||||
c.valid[i] = true
|
||||
}
|
||||
}
|
||||
c.valid[i] = false
|
||||
}
|
||||
c.v[i] = val
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
type booleanColumn struct {
|
||||
column
|
||||
v []bool
|
||||
}
|
||||
|
||||
func (s booleanColumn) Make(n int) array.Interface {
|
||||
b := array.NewBooleanBuilder(memory.DefaultAllocator)
|
||||
b.Resize(len(s.v))
|
||||
b.AppendValues(s.v, s.valid)
|
||||
return b.NewArray()
|
||||
}
|
||||
|
||||
func (s booleanColumn) Type() flux.ColType { return flux.TBool }
|
||||
func (s booleanColumn) Len() int { return len(s.v) }
|
||||
func (s booleanColumn) KeyValue() values.Value { return values.InvalidValue }
|
||||
|
||||
func (s booleanColumn) Build(template *[]Column) []flux.Table {
|
||||
*template = append(*template, s)
|
||||
return nil
|
||||
}
|
||||
|
||||
func mustBooleanValue(v interface{}) (bool, bool) {
|
||||
if v == nil {
|
||||
return false, false
|
||||
}
|
||||
|
||||
switch v := v.(type) {
|
||||
case bool:
|
||||
return v, true
|
||||
default:
|
||||
panic(fmt.Sprintf("unable to convert type %T to a boolean value", v))
|
||||
}
|
||||
}
|
||||
|
||||
// Times will construct an array of times with the given time using either a
|
||||
// string or an integer. If an integer is used, then it is in seconds.
|
||||
//
|
||||
// If strings and integers are mixed, the integers will be treates as offsets
|
||||
// from the last string time that was used.
|
||||
func Times(k string, v ...interface{}) Column {
|
||||
var offset int64
|
||||
c := timeColumn{
|
||||
column: column{k: k},
|
||||
v: make([]int64, len(v)),
|
||||
}
|
||||
for i, iv := range v {
|
||||
val, abs, ok := mustTimeValue(iv, offset, time.Second)
|
||||
if !ok {
|
||||
if c.valid == nil {
|
||||
c.valid = make([]bool, len(v))
|
||||
for i := range c.valid {
|
||||
c.valid[i] = true
|
||||
}
|
||||
}
|
||||
c.valid[i] = false
|
||||
}
|
||||
if abs {
|
||||
offset = val
|
||||
}
|
||||
c.v[i] = val
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
type timeColumn struct {
|
||||
column
|
||||
v []int64
|
||||
}
|
||||
|
||||
func (s timeColumn) Make(n int) array.Interface {
|
||||
b := array.NewInt64Builder(memory.DefaultAllocator)
|
||||
b.Resize(len(s.v))
|
||||
b.AppendValues(s.v, s.valid)
|
||||
return b.NewArray()
|
||||
}
|
||||
|
||||
func (s timeColumn) Type() flux.ColType { return flux.TTime }
|
||||
func (s timeColumn) Len() int { return len(s.v) }
|
||||
func (s timeColumn) KeyValue() values.Value { return values.InvalidValue }
|
||||
|
||||
func (s timeColumn) Build(template *[]Column) []flux.Table {
|
||||
*template = append(*template, s)
|
||||
return nil
|
||||
}
|
||||
|
||||
// mustTimeValue will convert the interface into a time value.
|
||||
// This must either be an int-like value or a string that can be
|
||||
// parsed as a time in RFC3339 format.
|
||||
//
|
||||
// This will panic otherwise.
|
||||
func mustTimeValue(v interface{}, offset int64, unit time.Duration) (t int64, abs, ok bool) {
|
||||
if v == nil {
|
||||
return 0, false, false
|
||||
}
|
||||
|
||||
switch v := v.(type) {
|
||||
case int:
|
||||
return offset + int64(v)*int64(unit), false, true
|
||||
case int64:
|
||||
return offset + v*int64(unit), false, true
|
||||
case string:
|
||||
t, err := time.Parse(time.RFC3339, v)
|
||||
if err != nil {
|
||||
if t, err = time.Parse(time.RFC3339Nano, v); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
return t.UnixNano(), true, true
|
||||
default:
|
||||
panic(fmt.Sprintf("unable to convert type %T to a time value", v))
|
||||
}
|
||||
}
|
||||
|
||||
// TableBuilder is used to construct a set of Tables.
|
||||
type TableBuilder interface {
|
||||
// Build will construct a set of tables using the
|
||||
// template as input.
|
||||
//
|
||||
// The template is a pointer as a builder is allowed
|
||||
// to modify the template. For implementors, the
|
||||
// template pointer must be non-nil.
|
||||
Build(template *[]Column) []flux.Table
|
||||
}
|
||||
|
||||
// TableGroup will construct a group of Tables
|
||||
// that have common values. It includes any TableBuilder
|
||||
// values.
|
||||
type TableGroup []TableBuilder
|
||||
|
||||
func (t TableGroup) Do(f func(flux.Table) error) error {
|
||||
// Use an empty template.
|
||||
var template []Column
|
||||
tables := t.Build(&template)
|
||||
return table.Iterator(tables).Do(f)
|
||||
}
|
||||
|
||||
// Build will construct Tables using the given template.
|
||||
func (t TableGroup) Build(template *[]Column) []flux.Table {
|
||||
// Copy over the template.
|
||||
gtemplate := make([]Column, len(*template))
|
||||
copy(gtemplate, *template)
|
||||
|
||||
var tables []flux.Table
|
||||
for _, tb := range t {
|
||||
tables = append(tables, tb.Build(>emplate)...)
|
||||
}
|
||||
return tables
|
||||
}
|
||||
|
||||
// TableList will produce a Table using the template and
|
||||
// each of the table builders.
|
||||
//
|
||||
// Changes to the template are not shared between each of the
|
||||
// entries. If the TableBuilder does not produce tables,
|
||||
// this will force a single Table to be created.
|
||||
type TableList []TableBuilder
|
||||
|
||||
func (t TableList) Build(template *[]Column) []flux.Table {
|
||||
var tables []flux.Table
|
||||
for _, tb := range t {
|
||||
// Copy over the group template for each of these.
|
||||
gtemplate := make([]Column, len(*template), len(*template)+1)
|
||||
copy(gtemplate, *template)
|
||||
|
||||
if ntables := tb.Build(>emplate); len(ntables) > 0 {
|
||||
tables = append(tables, ntables...)
|
||||
} else {
|
||||
tables = append(tables, Table(gtemplate).Table())
|
||||
}
|
||||
}
|
||||
return tables
|
||||
}
|
||||
|
||||
// StringKeys creates a TableList with the given key values.
|
||||
func StringKeys(k string, v ...interface{}) TableList {
|
||||
list := make(TableList, len(v))
|
||||
for i := range v {
|
||||
list[i] = StringKey(k, v[i])
|
||||
}
|
||||
return list
|
||||
}
|
||||
|
||||
// TableMatrix will produce a set of Tables by producing the
|
||||
// cross product of each of the TableBuilders with each other.
|
||||
type TableMatrix []TableList
|
||||
|
||||
func (t TableMatrix) Build(template *[]Column) []flux.Table {
|
||||
if len(t) == 0 {
|
||||
return nil
|
||||
} else if len(t) == 1 {
|
||||
return t[0].Build(template)
|
||||
}
|
||||
|
||||
// Split the TableList into their own distinct TableGroups
|
||||
// so we can produce a cross product of groups.
|
||||
builders := make([]TableGroup, len(t[0]))
|
||||
for i, b := range t[0] {
|
||||
builders[i] = append(builders[i], b)
|
||||
}
|
||||
|
||||
for i := 1; i < len(t); i++ {
|
||||
product := make([]TableGroup, 0, len(builders)*len(t[i]))
|
||||
for _, bs := range t[i] {
|
||||
a := make([]TableGroup, len(builders))
|
||||
copy(a, builders)
|
||||
for j := range a {
|
||||
a[j] = append(a[j], bs)
|
||||
}
|
||||
product = append(product, a...)
|
||||
}
|
||||
builders = product
|
||||
}
|
||||
|
||||
var tables []flux.Table
|
||||
for _, b := range builders {
|
||||
tables = append(tables, b.Build(template)...)
|
||||
}
|
||||
return tables
|
||||
}
|
|
@ -1,151 +0,0 @@
|
|||
package table
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/flux"
|
||||
"github.com/influxdata/flux/execute"
|
||||
"github.com/influxdata/flux/semantic"
|
||||
"github.com/influxdata/flux/values"
|
||||
)
|
||||
|
||||
// Stringify will read a table and turn it into a human-readable string.
|
||||
func Stringify(table flux.Table) string {
|
||||
var sb strings.Builder
|
||||
stringifyKey(&sb, table)
|
||||
if err := table.Do(func(cr flux.ColReader) error {
|
||||
stringifyRows(&sb, cr)
|
||||
return nil
|
||||
}); err != nil {
|
||||
_, _ = fmt.Fprintf(&sb, "table error: %s\n", err)
|
||||
}
|
||||
return sb.String()
|
||||
}
|
||||
|
||||
func getSortedIndices(key flux.GroupKey, cols []flux.ColMeta) ([]flux.ColMeta, []int) {
|
||||
indices := make([]int, len(cols))
|
||||
for i := range indices {
|
||||
indices[i] = i
|
||||
}
|
||||
sort.Slice(indices, func(i, j int) bool {
|
||||
ci, cj := cols[indices[i]], cols[indices[j]]
|
||||
if key.HasCol(ci.Label) && !key.HasCol(cj.Label) {
|
||||
return true
|
||||
} else if !key.HasCol(ci.Label) && key.HasCol(cj.Label) {
|
||||
return false
|
||||
}
|
||||
return ci.Label < cj.Label
|
||||
})
|
||||
return cols, indices
|
||||
}
|
||||
|
||||
func stringifyKey(sb *strings.Builder, table flux.Table) {
|
||||
key := table.Key()
|
||||
cols, indices := getSortedIndices(table.Key(), table.Cols())
|
||||
|
||||
sb.WriteString("# ")
|
||||
if len(cols) == 0 {
|
||||
sb.WriteString("(none)")
|
||||
} else {
|
||||
nkeys := 0
|
||||
for _, idx := range indices {
|
||||
c := cols[idx]
|
||||
kidx := execute.ColIdx(c.Label, key.Cols())
|
||||
if kidx < 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
if nkeys > 0 {
|
||||
sb.WriteString(",")
|
||||
}
|
||||
sb.WriteString(cols[idx].Label)
|
||||
sb.WriteString("=")
|
||||
|
||||
v := key.Value(kidx)
|
||||
stringifyValue(sb, v)
|
||||
nkeys++
|
||||
}
|
||||
}
|
||||
sb.WriteString(" ")
|
||||
|
||||
ncols := 0
|
||||
for _, idx := range indices {
|
||||
c := cols[idx]
|
||||
if key.HasCol(c.Label) {
|
||||
continue
|
||||
}
|
||||
|
||||
if ncols > 0 {
|
||||
sb.WriteString(",")
|
||||
}
|
||||
sb.WriteString(cols[idx].Label)
|
||||
sb.WriteString("=")
|
||||
sb.WriteString(cols[idx].Type.String())
|
||||
ncols++
|
||||
}
|
||||
sb.WriteString("\n")
|
||||
}
|
||||
|
||||
func stringifyRows(sb *strings.Builder, cr flux.ColReader) {
|
||||
key := cr.Key()
|
||||
cols, indices := getSortedIndices(cr.Key(), cr.Cols())
|
||||
|
||||
for i, sz := 0, cr.Len(); i < sz; i++ {
|
||||
inKey := true
|
||||
for j, idx := range indices {
|
||||
c := cols[idx]
|
||||
if j > 0 {
|
||||
if inKey && !key.HasCol(c.Label) {
|
||||
sb.WriteString(" ")
|
||||
inKey = false
|
||||
} else {
|
||||
sb.WriteString(",")
|
||||
}
|
||||
} else if !key.HasCol(c.Label) {
|
||||
inKey = false
|
||||
}
|
||||
sb.WriteString(cols[idx].Label)
|
||||
sb.WriteString("=")
|
||||
|
||||
v := execute.ValueForRow(cr, i, idx)
|
||||
stringifyValue(sb, v)
|
||||
}
|
||||
sb.WriteString("\n")
|
||||
}
|
||||
}
|
||||
|
||||
func stringifyValue(sb *strings.Builder, v values.Value) {
|
||||
if v.IsNull() {
|
||||
sb.WriteString("!(nil)")
|
||||
return
|
||||
}
|
||||
|
||||
switch v.Type().Nature() {
|
||||
case semantic.Int:
|
||||
_, _ = fmt.Fprintf(sb, "%di", v.Int())
|
||||
case semantic.UInt:
|
||||
_, _ = fmt.Fprintf(sb, "%du", v.UInt())
|
||||
case semantic.Float:
|
||||
_, _ = fmt.Fprintf(sb, "%.3f", v.Float())
|
||||
case semantic.String:
|
||||
sb.WriteString(v.Str())
|
||||
case semantic.Bool:
|
||||
if v.Bool() {
|
||||
sb.WriteString("true")
|
||||
} else {
|
||||
sb.WriteString("false")
|
||||
}
|
||||
case semantic.Time:
|
||||
ts := v.Time().Time()
|
||||
if ts.Nanosecond() > 0 {
|
||||
sb.WriteString(ts.Format(time.RFC3339Nano))
|
||||
} else {
|
||||
sb.WriteString(ts.Format(time.RFC3339))
|
||||
}
|
||||
default:
|
||||
sb.WriteString("!(invalid)")
|
||||
}
|
||||
}
|
|
@ -1,92 +0,0 @@
|
|||
package errors
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/influxdata/flux/codes"
|
||||
)
|
||||
|
||||
// Error is the error struct of flux.
|
||||
type Error struct {
|
||||
// Code is the code of the error as defined in the codes package.
|
||||
// This describes the type and category of the error. It is required.
|
||||
Code codes.Code
|
||||
|
||||
// Msg contains a human-readable description and additional information
|
||||
// about the error itself. This is optional.
|
||||
Msg string
|
||||
|
||||
// Err contains the error that was the cause of this error.
|
||||
// This is optional.
|
||||
Err error
|
||||
}
|
||||
|
||||
// Error implement the error interface by outputting the Code and Err.
|
||||
func (e *Error) Error() string {
|
||||
if e.Msg != "" && e.Err != nil {
|
||||
var b strings.Builder
|
||||
b.WriteString(e.Msg)
|
||||
b.WriteString(": ")
|
||||
b.WriteString(e.Err.Error())
|
||||
return b.String()
|
||||
} else if e.Msg != "" {
|
||||
return e.Msg
|
||||
} else if e.Err != nil {
|
||||
return e.Err.Error()
|
||||
}
|
||||
return e.Code.String()
|
||||
}
|
||||
|
||||
// Unwrap will return the wrapped error.
|
||||
func (e *Error) Unwrap() error {
|
||||
return e.Err
|
||||
}
|
||||
|
||||
func New(code codes.Code, msg ...interface{}) error {
|
||||
return Wrap(nil, code, msg...)
|
||||
}
|
||||
|
||||
func Newf(code codes.Code, fmtStr string, args ...interface{}) error {
|
||||
return Wrapf(nil, code, fmtStr, args...)
|
||||
}
|
||||
|
||||
func Wrap(err error, code codes.Code, msg ...interface{}) error {
|
||||
var s string
|
||||
if len(msg) > 0 {
|
||||
s = fmt.Sprint(msg...)
|
||||
}
|
||||
return &Error{
|
||||
Code: code,
|
||||
Msg: s,
|
||||
Err: err,
|
||||
}
|
||||
}
|
||||
|
||||
func Wrapf(err error, code codes.Code, format string, a ...interface{}) error {
|
||||
return &Error{
|
||||
Code: code,
|
||||
Msg: fmt.Sprintf(format, a...),
|
||||
Err: err,
|
||||
}
|
||||
}
|
||||
|
||||
// Code returns the error code for the given error.
|
||||
// If the error is not a flux.Error, this will return
|
||||
// Unknown for the code. If the error is a flux.Error
|
||||
// and its code is Inherit, then this will return the
|
||||
// wrapped error's code.
|
||||
func Code(err error) codes.Code {
|
||||
for {
|
||||
if ferr, ok := err.(*Error); ok {
|
||||
if ferr.Code != codes.Inherit {
|
||||
return ferr.Code
|
||||
} else if ferr.Err == nil {
|
||||
return codes.Unknown
|
||||
}
|
||||
err = ferr.Err
|
||||
} else {
|
||||
return codes.Unknown
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,87 +0,0 @@
|
|||
package table
|
||||
|
||||
import (
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/influxdata/flux"
|
||||
"github.com/influxdata/flux/codes"
|
||||
"github.com/influxdata/influxdb/v2/pkg/flux/internal/errors"
|
||||
)
|
||||
|
||||
// BufferedTable represents a table of buffered column readers.
|
||||
type BufferedTable struct {
|
||||
used int32
|
||||
empty bool
|
||||
GroupKey flux.GroupKey
|
||||
Columns []flux.ColMeta
|
||||
Buffers []flux.ColReader
|
||||
}
|
||||
|
||||
// FromBuffer constructs a flux.Table from a single flux.ColReader.
|
||||
func FromBuffer(cr flux.ColReader) flux.Table {
|
||||
return &BufferedTable{
|
||||
GroupKey: cr.Key(),
|
||||
Columns: cr.Cols(),
|
||||
Buffers: []flux.ColReader{cr},
|
||||
}
|
||||
}
|
||||
|
||||
func (b *BufferedTable) Key() flux.GroupKey {
|
||||
return b.GroupKey
|
||||
}
|
||||
|
||||
func (b *BufferedTable) Cols() []flux.ColMeta {
|
||||
return b.Columns
|
||||
}
|
||||
|
||||
func (b *BufferedTable) Do(f func(flux.ColReader) error) error {
|
||||
if !atomic.CompareAndSwapInt32(&b.used, 0, 1) {
|
||||
return errors.New(codes.Internal, "table already read")
|
||||
}
|
||||
|
||||
i := 0
|
||||
defer func() {
|
||||
for ; i < len(b.Buffers); i++ {
|
||||
b.Buffers[i].Release()
|
||||
}
|
||||
}()
|
||||
|
||||
b.empty = true
|
||||
for ; i < len(b.Buffers); i++ {
|
||||
cr := b.Buffers[i]
|
||||
if cr.Len() > 0 {
|
||||
b.empty = false
|
||||
}
|
||||
if err := f(cr); err != nil {
|
||||
return err
|
||||
}
|
||||
cr.Release()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *BufferedTable) Done() {
|
||||
if atomic.CompareAndSwapInt32(&b.used, 0, 1) {
|
||||
b.empty = b.isEmpty()
|
||||
for _, buf := range b.Buffers {
|
||||
buf.Release()
|
||||
}
|
||||
b.Buffers = nil
|
||||
}
|
||||
}
|
||||
|
||||
func (b *BufferedTable) Empty() bool {
|
||||
if atomic.LoadInt32(&b.used) != 0 {
|
||||
return b.empty
|
||||
}
|
||||
return b.isEmpty()
|
||||
}
|
||||
|
||||
func (b *BufferedTable) isEmpty() bool {
|
||||
for _, buf := range b.Buffers {
|
||||
if buf.Len() > 0 {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
|
@ -1,5 +0,0 @@
|
|||
package table
|
||||
|
||||
import "github.com/influxdata/influxdb/v2/pkg/flux/execute/table"
|
||||
|
||||
type Iterator = table.Iterator
|
|
@ -762,6 +762,8 @@ func convertCellView(cell influxdb.Cell) chart {
|
|||
ch.Note = p.Note
|
||||
ch.NoteOnEmpty = p.ShowNoteWhenEmpty
|
||||
ch.BinSize = int(p.BinSize)
|
||||
ch.LegendOpacity = float64(p.LegendOpacity)
|
||||
ch.LegendOrientationThreshold = int(p.LegendOrientationThreshold)
|
||||
case influxdb.HistogramViewProperties:
|
||||
ch.Kind = chartKindHistogram
|
||||
ch.Queries = convertQueries(p.Queries)
|
||||
|
@ -773,6 +775,8 @@ func convertCellView(cell influxdb.Cell) chart {
|
|||
ch.NoteOnEmpty = p.ShowNoteWhenEmpty
|
||||
ch.BinCount = p.BinCount
|
||||
ch.Position = p.Position
|
||||
ch.LegendOpacity = float64(p.LegendOpacity)
|
||||
ch.LegendOrientationThreshold = int(p.LegendOrientationThreshold)
|
||||
case influxdb.MarkdownViewProperties:
|
||||
ch.Kind = chartKindMarkdown
|
||||
ch.Note = p.Note
|
||||
|
@ -786,11 +790,15 @@ func convertCellView(cell influxdb.Cell) chart {
|
|||
ch.XCol = p.XColumn
|
||||
ch.YCol = p.YColumn
|
||||
ch.Position = p.Position
|
||||
ch.LegendOpacity = float64(p.LegendOpacity)
|
||||
ch.LegendOrientationThreshold = int(p.LegendOrientationThreshold)
|
||||
case influxdb.SingleStatViewProperties:
|
||||
setCommon(chartKindSingleStat, p.ViewColors, p.DecimalPlaces, p.Queries)
|
||||
setNoteFixes(p.Note, p.ShowNoteWhenEmpty, p.Prefix, p.Suffix)
|
||||
ch.TickPrefix = p.TickPrefix
|
||||
ch.TickSuffix = p.TickSuffix
|
||||
ch.LegendOpacity = float64(p.LegendOpacity)
|
||||
ch.LegendOrientationThreshold = int(p.LegendOrientationThreshold)
|
||||
case influxdb.MosaicViewProperties:
|
||||
ch.Kind = chartKindMosaic
|
||||
ch.Queries = convertQueries(p.Queries)
|
||||
|
@ -803,6 +811,8 @@ func convertCellView(cell influxdb.Cell) chart {
|
|||
}
|
||||
ch.Note = p.Note
|
||||
ch.NoteOnEmpty = p.ShowNoteWhenEmpty
|
||||
ch.LegendOpacity = float64(p.LegendOpacity)
|
||||
ch.LegendOrientationThreshold = int(p.LegendOrientationThreshold)
|
||||
case influxdb.ScatterViewProperties:
|
||||
ch.Kind = chartKindScatter
|
||||
ch.Queries = convertQueries(p.Queries)
|
||||
|
@ -815,6 +825,8 @@ func convertCellView(cell influxdb.Cell) chart {
|
|||
}
|
||||
ch.Note = p.Note
|
||||
ch.NoteOnEmpty = p.ShowNoteWhenEmpty
|
||||
ch.LegendOpacity = float64(p.LegendOpacity)
|
||||
ch.LegendOrientationThreshold = int(p.LegendOrientationThreshold)
|
||||
case influxdb.TableViewProperties:
|
||||
setCommon(chartKindTable, p.ViewColors, p.DecimalPlaces, p.Queries)
|
||||
setNoteFixes(p.Note, p.ShowNoteWhenEmpty, "", "")
|
||||
|
@ -844,6 +856,8 @@ func convertCellView(cell influxdb.Cell) chart {
|
|||
ch.UpperColumn = p.UpperColumn
|
||||
ch.MainColumn = p.MainColumn
|
||||
ch.LowerColumn = p.LowerColumn
|
||||
ch.LegendOpacity = float64(p.LegendOpacity)
|
||||
ch.LegendOrientationThreshold = int(p.LegendOrientationThreshold)
|
||||
case influxdb.XYViewProperties:
|
||||
setCommon(chartKindXY, p.ViewColors, influxdb.DecimalPlaces{}, p.Queries)
|
||||
setNoteFixes(p.Note, p.ShowNoteWhenEmpty, "", "")
|
||||
|
@ -855,6 +869,8 @@ func convertCellView(cell influxdb.Cell) chart {
|
|||
ch.XCol = p.XColumn
|
||||
ch.YCol = p.YColumn
|
||||
ch.Position = p.Position
|
||||
ch.LegendOpacity = float64(p.LegendOpacity)
|
||||
ch.LegendOrientationThreshold = int(p.LegendOrientationThreshold)
|
||||
}
|
||||
|
||||
sort.Slice(ch.Axes, func(i, j int) bool {
|
||||
|
@ -958,10 +974,15 @@ func convertChartToResource(ch chart) Resource {
|
|||
})
|
||||
|
||||
assignNonZeroInts(r, map[string]int{
|
||||
fieldChartXPos: ch.XPos,
|
||||
fieldChartYPos: ch.YPos,
|
||||
fieldChartBinCount: ch.BinCount,
|
||||
fieldChartBinSize: ch.BinSize,
|
||||
fieldChartXPos: ch.XPos,
|
||||
fieldChartYPos: ch.YPos,
|
||||
fieldChartBinCount: ch.BinCount,
|
||||
fieldChartBinSize: ch.BinSize,
|
||||
fieldChartLegendOrientationThreshold: ch.LegendOrientationThreshold,
|
||||
})
|
||||
|
||||
assignNonZeroFloats(r, map[string]float64{
|
||||
fieldChartLegendOpacity: ch.LegendOpacity,
|
||||
})
|
||||
|
||||
return r
|
||||
|
@ -1273,6 +1294,14 @@ func assignNonZeroInts(r Resource, m map[string]int) {
|
|||
}
|
||||
}
|
||||
|
||||
func assignNonZeroFloats(r Resource, m map[string]float64) {
|
||||
for k, v := range m {
|
||||
if v != 0 {
|
||||
r[k] = v
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func assignNonZeroStrings(r Resource, m map[string]string) {
|
||||
for k, v := range m {
|
||||
if v != "" {
|
||||
|
|
|
@ -17,10 +17,9 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/influxdata/flux/ast"
|
||||
"github.com/influxdata/flux/ast/edit"
|
||||
"github.com/influxdata/flux/parser"
|
||||
"github.com/influxdata/influxdb/v2"
|
||||
ast2 "github.com/influxdata/influxdb/v2/pkg/flux/ast"
|
||||
"github.com/influxdata/influxdb/v2/pkg/flux/ast/edit"
|
||||
"github.com/influxdata/influxdb/v2/pkg/jsonnet"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
@ -1444,32 +1443,34 @@ func (p *Template) parseChart(dashMetaName string, chartIdx int, r Resource) (*c
|
|||
}
|
||||
|
||||
c := chart{
|
||||
Kind: ck,
|
||||
Name: r.Name(),
|
||||
BinSize: r.intShort(fieldChartBinSize),
|
||||
BinCount: r.intShort(fieldChartBinCount),
|
||||
Geom: r.stringShort(fieldChartGeom),
|
||||
Height: r.intShort(fieldChartHeight),
|
||||
Note: r.stringShort(fieldChartNote),
|
||||
NoteOnEmpty: r.boolShort(fieldChartNoteOnEmpty),
|
||||
Position: r.stringShort(fieldChartPosition),
|
||||
Prefix: r.stringShort(fieldPrefix),
|
||||
Shade: r.boolShort(fieldChartShade),
|
||||
HoverDimension: r.stringShort(fieldChartHoverDimension),
|
||||
Suffix: r.stringShort(fieldSuffix),
|
||||
TickPrefix: r.stringShort(fieldChartTickPrefix),
|
||||
TickSuffix: r.stringShort(fieldChartTickSuffix),
|
||||
TimeFormat: r.stringShort(fieldChartTimeFormat),
|
||||
Width: r.intShort(fieldChartWidth),
|
||||
XCol: r.stringShort(fieldChartXCol),
|
||||
YCol: r.stringShort(fieldChartYCol),
|
||||
XPos: r.intShort(fieldChartXPos),
|
||||
YPos: r.intShort(fieldChartYPos),
|
||||
FillColumns: r.slcStr(fieldChartFillColumns),
|
||||
YSeriesColumns: r.slcStr(fieldChartYSeriesColumns),
|
||||
UpperColumn: r.stringShort(fieldChartUpperColumn),
|
||||
MainColumn: r.stringShort(fieldChartMainColumn),
|
||||
LowerColumn: r.stringShort(fieldChartLowerColumn),
|
||||
Kind: ck,
|
||||
Name: r.Name(),
|
||||
BinSize: r.intShort(fieldChartBinSize),
|
||||
BinCount: r.intShort(fieldChartBinCount),
|
||||
Geom: r.stringShort(fieldChartGeom),
|
||||
Height: r.intShort(fieldChartHeight),
|
||||
Note: r.stringShort(fieldChartNote),
|
||||
NoteOnEmpty: r.boolShort(fieldChartNoteOnEmpty),
|
||||
Position: r.stringShort(fieldChartPosition),
|
||||
Prefix: r.stringShort(fieldPrefix),
|
||||
Shade: r.boolShort(fieldChartShade),
|
||||
HoverDimension: r.stringShort(fieldChartHoverDimension),
|
||||
Suffix: r.stringShort(fieldSuffix),
|
||||
TickPrefix: r.stringShort(fieldChartTickPrefix),
|
||||
TickSuffix: r.stringShort(fieldChartTickSuffix),
|
||||
TimeFormat: r.stringShort(fieldChartTimeFormat),
|
||||
Width: r.intShort(fieldChartWidth),
|
||||
XCol: r.stringShort(fieldChartXCol),
|
||||
YCol: r.stringShort(fieldChartYCol),
|
||||
XPos: r.intShort(fieldChartXPos),
|
||||
YPos: r.intShort(fieldChartYPos),
|
||||
FillColumns: r.slcStr(fieldChartFillColumns),
|
||||
YSeriesColumns: r.slcStr(fieldChartYSeriesColumns),
|
||||
UpperColumn: r.stringShort(fieldChartUpperColumn),
|
||||
MainColumn: r.stringShort(fieldChartMainColumn),
|
||||
LowerColumn: r.stringShort(fieldChartLowerColumn),
|
||||
LegendOpacity: r.float64Short(fieldChartLegendOpacity),
|
||||
LegendOrientationThreshold: r.intShort(fieldChartLegendOrientationThreshold),
|
||||
}
|
||||
|
||||
if presLeg, ok := r[fieldChartLegend].(legend); ok {
|
||||
|
@ -1729,16 +1730,16 @@ func valFromExpr(p ast.Expression) interface{} {
|
|||
}
|
||||
return nil
|
||||
case *ast.DateTimeLiteral:
|
||||
return ast2.DateTimeFromLiteral(literal)
|
||||
return ast.DateTimeFromLiteral(literal)
|
||||
case *ast.FloatLiteral:
|
||||
return ast2.FloatFromLiteral(literal)
|
||||
return ast.FloatFromLiteral(literal)
|
||||
case *ast.IntegerLiteral:
|
||||
return ast2.IntegerFromLiteral(literal)
|
||||
return ast.IntegerFromLiteral(literal)
|
||||
case *ast.DurationLiteral:
|
||||
dur, _ := ast.DurationFrom(literal, time.Time{})
|
||||
return dur
|
||||
case *ast.StringLiteral:
|
||||
return ast2.StringFromLiteral(literal)
|
||||
return ast.StringFromLiteral(literal)
|
||||
case *ast.UnaryExpression:
|
||||
// a signed duration is represented by a UnaryExpression.
|
||||
// it is the only unary expression allowed.
|
||||
|
|
|
@ -10,14 +10,13 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/influxdata/flux/ast"
|
||||
"github.com/influxdata/flux/ast/edit"
|
||||
"github.com/influxdata/flux/parser"
|
||||
"github.com/influxdata/influxdb/v2"
|
||||
"github.com/influxdata/influxdb/v2/notification"
|
||||
icheck "github.com/influxdata/influxdb/v2/notification/check"
|
||||
"github.com/influxdata/influxdb/v2/notification/endpoint"
|
||||
"github.com/influxdata/influxdb/v2/notification/rule"
|
||||
ast2 "github.com/influxdata/influxdb/v2/pkg/flux/ast"
|
||||
"github.com/influxdata/influxdb/v2/pkg/flux/ast/edit"
|
||||
)
|
||||
|
||||
type identity struct {
|
||||
|
@ -532,70 +531,74 @@ func (d *dashboard) valid() []validationErr {
|
|||
}
|
||||
|
||||
const (
|
||||
fieldChartAxes = "axes"
|
||||
fieldChartBinCount = "binCount"
|
||||
fieldChartBinSize = "binSize"
|
||||
fieldChartColors = "colors"
|
||||
fieldChartDecimalPlaces = "decimalPlaces"
|
||||
fieldChartDomain = "domain"
|
||||
fieldChartFillColumns = "fillColumns"
|
||||
fieldChartGeom = "geom"
|
||||
fieldChartHeight = "height"
|
||||
fieldChartLegend = "legend"
|
||||
fieldChartNote = "note"
|
||||
fieldChartNoteOnEmpty = "noteOnEmpty"
|
||||
fieldChartPosition = "position"
|
||||
fieldChartQueries = "queries"
|
||||
fieldChartShade = "shade"
|
||||
fieldChartHoverDimension = "hoverDimension"
|
||||
fieldChartFieldOptions = "fieldOptions"
|
||||
fieldChartTableOptions = "tableOptions"
|
||||
fieldChartTickPrefix = "tickPrefix"
|
||||
fieldChartTickSuffix = "tickSuffix"
|
||||
fieldChartTimeFormat = "timeFormat"
|
||||
fieldChartYSeriesColumns = "ySeriesColumns"
|
||||
fieldChartUpperColumn = "upperColumn"
|
||||
fieldChartMainColumn = "mainColumn"
|
||||
fieldChartLowerColumn = "lowerColumn"
|
||||
fieldChartWidth = "width"
|
||||
fieldChartXCol = "xCol"
|
||||
fieldChartXPos = "xPos"
|
||||
fieldChartYCol = "yCol"
|
||||
fieldChartYPos = "yPos"
|
||||
fieldChartAxes = "axes"
|
||||
fieldChartBinCount = "binCount"
|
||||
fieldChartBinSize = "binSize"
|
||||
fieldChartColors = "colors"
|
||||
fieldChartDecimalPlaces = "decimalPlaces"
|
||||
fieldChartDomain = "domain"
|
||||
fieldChartFillColumns = "fillColumns"
|
||||
fieldChartGeom = "geom"
|
||||
fieldChartHeight = "height"
|
||||
fieldChartLegend = "legend"
|
||||
fieldChartNote = "note"
|
||||
fieldChartNoteOnEmpty = "noteOnEmpty"
|
||||
fieldChartPosition = "position"
|
||||
fieldChartQueries = "queries"
|
||||
fieldChartShade = "shade"
|
||||
fieldChartHoverDimension = "hoverDimension"
|
||||
fieldChartFieldOptions = "fieldOptions"
|
||||
fieldChartTableOptions = "tableOptions"
|
||||
fieldChartTickPrefix = "tickPrefix"
|
||||
fieldChartTickSuffix = "tickSuffix"
|
||||
fieldChartTimeFormat = "timeFormat"
|
||||
fieldChartYSeriesColumns = "ySeriesColumns"
|
||||
fieldChartUpperColumn = "upperColumn"
|
||||
fieldChartMainColumn = "mainColumn"
|
||||
fieldChartLowerColumn = "lowerColumn"
|
||||
fieldChartWidth = "width"
|
||||
fieldChartXCol = "xCol"
|
||||
fieldChartXPos = "xPos"
|
||||
fieldChartYCol = "yCol"
|
||||
fieldChartYPos = "yPos"
|
||||
fieldChartLegendOpacity = "legendOpacity"
|
||||
fieldChartLegendOrientationThreshold = "legendOrientationThreshold"
|
||||
)
|
||||
|
||||
type chart struct {
|
||||
Kind chartKind
|
||||
Name string
|
||||
Prefix string
|
||||
TickPrefix string
|
||||
Suffix string
|
||||
TickSuffix string
|
||||
Note string
|
||||
NoteOnEmpty bool
|
||||
DecimalPlaces int
|
||||
EnforceDecimals bool
|
||||
Shade bool
|
||||
HoverDimension string
|
||||
Legend legend
|
||||
Colors colors
|
||||
Queries queries
|
||||
Axes axes
|
||||
Geom string
|
||||
YSeriesColumns []string
|
||||
XCol, YCol string
|
||||
UpperColumn string
|
||||
MainColumn string
|
||||
LowerColumn string
|
||||
XPos, YPos int
|
||||
Height, Width int
|
||||
BinSize int
|
||||
BinCount int
|
||||
Position string
|
||||
FieldOptions []fieldOption
|
||||
FillColumns []string
|
||||
TableOptions tableOptions
|
||||
TimeFormat string
|
||||
Kind chartKind
|
||||
Name string
|
||||
Prefix string
|
||||
TickPrefix string
|
||||
Suffix string
|
||||
TickSuffix string
|
||||
Note string
|
||||
NoteOnEmpty bool
|
||||
DecimalPlaces int
|
||||
EnforceDecimals bool
|
||||
Shade bool
|
||||
HoverDimension string
|
||||
Legend legend
|
||||
Colors colors
|
||||
Queries queries
|
||||
Axes axes
|
||||
Geom string
|
||||
YSeriesColumns []string
|
||||
XCol, YCol string
|
||||
UpperColumn string
|
||||
MainColumn string
|
||||
LowerColumn string
|
||||
XPos, YPos int
|
||||
Height, Width int
|
||||
BinSize int
|
||||
BinCount int
|
||||
Position string
|
||||
FieldOptions []fieldOption
|
||||
FillColumns []string
|
||||
TableOptions tableOptions
|
||||
TimeFormat string
|
||||
LegendOpacity float64
|
||||
LegendOrientationThreshold int
|
||||
}
|
||||
|
||||
func (c *chart) properties() influxdb.ViewProperties {
|
||||
|
@ -618,37 +621,41 @@ func (c *chart) properties() influxdb.ViewProperties {
|
|||
}
|
||||
case chartKindHeatMap:
|
||||
return influxdb.HeatmapViewProperties{
|
||||
Type: influxdb.ViewPropertyTypeHeatMap,
|
||||
Queries: c.Queries.influxDashQueries(),
|
||||
ViewColors: c.Colors.strings(),
|
||||
BinSize: int32(c.BinSize),
|
||||
XColumn: c.XCol,
|
||||
YColumn: c.YCol,
|
||||
XDomain: c.Axes.get("x").Domain,
|
||||
YDomain: c.Axes.get("y").Domain,
|
||||
XPrefix: c.Axes.get("x").Prefix,
|
||||
YPrefix: c.Axes.get("y").Prefix,
|
||||
XSuffix: c.Axes.get("x").Suffix,
|
||||
YSuffix: c.Axes.get("y").Suffix,
|
||||
XAxisLabel: c.Axes.get("x").Label,
|
||||
YAxisLabel: c.Axes.get("y").Label,
|
||||
Note: c.Note,
|
||||
ShowNoteWhenEmpty: c.NoteOnEmpty,
|
||||
TimeFormat: c.TimeFormat,
|
||||
Type: influxdb.ViewPropertyTypeHeatMap,
|
||||
Queries: c.Queries.influxDashQueries(),
|
||||
ViewColors: c.Colors.strings(),
|
||||
BinSize: int32(c.BinSize),
|
||||
XColumn: c.XCol,
|
||||
YColumn: c.YCol,
|
||||
XDomain: c.Axes.get("x").Domain,
|
||||
YDomain: c.Axes.get("y").Domain,
|
||||
XPrefix: c.Axes.get("x").Prefix,
|
||||
YPrefix: c.Axes.get("y").Prefix,
|
||||
XSuffix: c.Axes.get("x").Suffix,
|
||||
YSuffix: c.Axes.get("y").Suffix,
|
||||
XAxisLabel: c.Axes.get("x").Label,
|
||||
YAxisLabel: c.Axes.get("y").Label,
|
||||
Note: c.Note,
|
||||
ShowNoteWhenEmpty: c.NoteOnEmpty,
|
||||
TimeFormat: c.TimeFormat,
|
||||
LegendOpacity: float64(c.LegendOpacity),
|
||||
LegendOrientationThreshold: int(c.LegendOrientationThreshold),
|
||||
}
|
||||
case chartKindHistogram:
|
||||
return influxdb.HistogramViewProperties{
|
||||
Type: influxdb.ViewPropertyTypeHistogram,
|
||||
Queries: c.Queries.influxDashQueries(),
|
||||
ViewColors: c.Colors.influxViewColors(),
|
||||
FillColumns: c.FillColumns,
|
||||
XColumn: c.XCol,
|
||||
XDomain: c.Axes.get("x").Domain,
|
||||
XAxisLabel: c.Axes.get("x").Label,
|
||||
Position: c.Position,
|
||||
BinCount: c.BinCount,
|
||||
Note: c.Note,
|
||||
ShowNoteWhenEmpty: c.NoteOnEmpty,
|
||||
Type: influxdb.ViewPropertyTypeHistogram,
|
||||
Queries: c.Queries.influxDashQueries(),
|
||||
ViewColors: c.Colors.influxViewColors(),
|
||||
FillColumns: c.FillColumns,
|
||||
XColumn: c.XCol,
|
||||
XDomain: c.Axes.get("x").Domain,
|
||||
XAxisLabel: c.Axes.get("x").Label,
|
||||
Position: c.Position,
|
||||
BinCount: c.BinCount,
|
||||
Note: c.Note,
|
||||
ShowNoteWhenEmpty: c.NoteOnEmpty,
|
||||
LegendOpacity: float64(c.LegendOpacity),
|
||||
LegendOrientationThreshold: int(c.LegendOrientationThreshold),
|
||||
}
|
||||
case chartKindMarkdown:
|
||||
return influxdb.MarkdownViewProperties{
|
||||
|
@ -657,59 +664,65 @@ func (c *chart) properties() influxdb.ViewProperties {
|
|||
}
|
||||
case chartKindMosaic:
|
||||
return influxdb.MosaicViewProperties{
|
||||
Type: influxdb.ViewPropertyTypeMosaic,
|
||||
Queries: c.Queries.influxDashQueries(),
|
||||
ViewColors: c.Colors.strings(),
|
||||
XColumn: c.XCol,
|
||||
YSeriesColumns: c.YSeriesColumns,
|
||||
XDomain: c.Axes.get("x").Domain,
|
||||
YDomain: c.Axes.get("y").Domain,
|
||||
XPrefix: c.Axes.get("x").Prefix,
|
||||
YPrefix: c.Axes.get("y").Prefix,
|
||||
XSuffix: c.Axes.get("x").Suffix,
|
||||
YSuffix: c.Axes.get("y").Suffix,
|
||||
XAxisLabel: c.Axes.get("x").Label,
|
||||
YAxisLabel: c.Axes.get("y").Label,
|
||||
Note: c.Note,
|
||||
ShowNoteWhenEmpty: c.NoteOnEmpty,
|
||||
TimeFormat: c.TimeFormat,
|
||||
Type: influxdb.ViewPropertyTypeMosaic,
|
||||
Queries: c.Queries.influxDashQueries(),
|
||||
ViewColors: c.Colors.strings(),
|
||||
XColumn: c.XCol,
|
||||
YSeriesColumns: c.YSeriesColumns,
|
||||
XDomain: c.Axes.get("x").Domain,
|
||||
YDomain: c.Axes.get("y").Domain,
|
||||
XPrefix: c.Axes.get("x").Prefix,
|
||||
YPrefix: c.Axes.get("y").Prefix,
|
||||
XSuffix: c.Axes.get("x").Suffix,
|
||||
YSuffix: c.Axes.get("y").Suffix,
|
||||
XAxisLabel: c.Axes.get("x").Label,
|
||||
YAxisLabel: c.Axes.get("y").Label,
|
||||
Note: c.Note,
|
||||
ShowNoteWhenEmpty: c.NoteOnEmpty,
|
||||
TimeFormat: c.TimeFormat,
|
||||
LegendOpacity: float64(c.LegendOpacity),
|
||||
LegendOrientationThreshold: int(c.LegendOrientationThreshold),
|
||||
}
|
||||
case chartKindBand:
|
||||
return influxdb.BandViewProperties{
|
||||
Type: influxdb.ViewPropertyTypeBand,
|
||||
Queries: c.Queries.influxDashQueries(),
|
||||
ViewColors: c.Colors.influxViewColors(),
|
||||
Legend: c.Legend.influxLegend(),
|
||||
HoverDimension: c.HoverDimension,
|
||||
XColumn: c.XCol,
|
||||
YColumn: c.YCol,
|
||||
UpperColumn: c.UpperColumn,
|
||||
MainColumn: c.MainColumn,
|
||||
LowerColumn: c.LowerColumn,
|
||||
Axes: c.Axes.influxAxes(),
|
||||
Geom: c.Geom,
|
||||
Note: c.Note,
|
||||
ShowNoteWhenEmpty: c.NoteOnEmpty,
|
||||
TimeFormat: c.TimeFormat,
|
||||
Type: influxdb.ViewPropertyTypeBand,
|
||||
Queries: c.Queries.influxDashQueries(),
|
||||
ViewColors: c.Colors.influxViewColors(),
|
||||
Legend: c.Legend.influxLegend(),
|
||||
HoverDimension: c.HoverDimension,
|
||||
XColumn: c.XCol,
|
||||
YColumn: c.YCol,
|
||||
UpperColumn: c.UpperColumn,
|
||||
MainColumn: c.MainColumn,
|
||||
LowerColumn: c.LowerColumn,
|
||||
Axes: c.Axes.influxAxes(),
|
||||
Geom: c.Geom,
|
||||
Note: c.Note,
|
||||
ShowNoteWhenEmpty: c.NoteOnEmpty,
|
||||
TimeFormat: c.TimeFormat,
|
||||
LegendOpacity: float64(c.LegendOpacity),
|
||||
LegendOrientationThreshold: int(c.LegendOrientationThreshold),
|
||||
}
|
||||
case chartKindScatter:
|
||||
return influxdb.ScatterViewProperties{
|
||||
Type: influxdb.ViewPropertyTypeScatter,
|
||||
Queries: c.Queries.influxDashQueries(),
|
||||
ViewColors: c.Colors.strings(),
|
||||
XColumn: c.XCol,
|
||||
YColumn: c.YCol,
|
||||
XDomain: c.Axes.get("x").Domain,
|
||||
YDomain: c.Axes.get("y").Domain,
|
||||
XPrefix: c.Axes.get("x").Prefix,
|
||||
YPrefix: c.Axes.get("y").Prefix,
|
||||
XSuffix: c.Axes.get("x").Suffix,
|
||||
YSuffix: c.Axes.get("y").Suffix,
|
||||
XAxisLabel: c.Axes.get("x").Label,
|
||||
YAxisLabel: c.Axes.get("y").Label,
|
||||
Note: c.Note,
|
||||
ShowNoteWhenEmpty: c.NoteOnEmpty,
|
||||
TimeFormat: c.TimeFormat,
|
||||
Type: influxdb.ViewPropertyTypeScatter,
|
||||
Queries: c.Queries.influxDashQueries(),
|
||||
ViewColors: c.Colors.strings(),
|
||||
XColumn: c.XCol,
|
||||
YColumn: c.YCol,
|
||||
XDomain: c.Axes.get("x").Domain,
|
||||
YDomain: c.Axes.get("y").Domain,
|
||||
XPrefix: c.Axes.get("x").Prefix,
|
||||
YPrefix: c.Axes.get("y").Prefix,
|
||||
XSuffix: c.Axes.get("x").Suffix,
|
||||
YSuffix: c.Axes.get("y").Suffix,
|
||||
XAxisLabel: c.Axes.get("x").Label,
|
||||
YAxisLabel: c.Axes.get("y").Label,
|
||||
Note: c.Note,
|
||||
ShowNoteWhenEmpty: c.NoteOnEmpty,
|
||||
TimeFormat: c.TimeFormat,
|
||||
LegendOpacity: float64(c.LegendOpacity),
|
||||
LegendOrientationThreshold: int(c.LegendOrientationThreshold),
|
||||
}
|
||||
case chartKindSingleStat:
|
||||
return influxdb.SingleStatViewProperties{
|
||||
|
@ -722,10 +735,12 @@ func (c *chart) properties() influxdb.ViewProperties {
|
|||
IsEnforced: c.EnforceDecimals,
|
||||
Digits: int32(c.DecimalPlaces),
|
||||
},
|
||||
Note: c.Note,
|
||||
ShowNoteWhenEmpty: c.NoteOnEmpty,
|
||||
Queries: c.Queries.influxDashQueries(),
|
||||
ViewColors: c.Colors.influxViewColors(),
|
||||
Note: c.Note,
|
||||
ShowNoteWhenEmpty: c.NoteOnEmpty,
|
||||
Queries: c.Queries.influxDashQueries(),
|
||||
ViewColors: c.Colors.influxViewColors(),
|
||||
LegendOpacity: float64(c.LegendOpacity),
|
||||
LegendOrientationThreshold: int(c.LegendOrientationThreshold),
|
||||
}
|
||||
case chartKindSingleStatPlusLine:
|
||||
return influxdb.LinePlusSingleStatProperties{
|
||||
|
@ -736,17 +751,19 @@ func (c *chart) properties() influxdb.ViewProperties {
|
|||
IsEnforced: c.EnforceDecimals,
|
||||
Digits: int32(c.DecimalPlaces),
|
||||
},
|
||||
Note: c.Note,
|
||||
ShowNoteWhenEmpty: c.NoteOnEmpty,
|
||||
XColumn: c.XCol,
|
||||
YColumn: c.YCol,
|
||||
ShadeBelow: c.Shade,
|
||||
HoverDimension: c.HoverDimension,
|
||||
Legend: c.Legend.influxLegend(),
|
||||
Queries: c.Queries.influxDashQueries(),
|
||||
ViewColors: c.Colors.influxViewColors(),
|
||||
Axes: c.Axes.influxAxes(),
|
||||
Position: c.Position,
|
||||
Note: c.Note,
|
||||
ShowNoteWhenEmpty: c.NoteOnEmpty,
|
||||
XColumn: c.XCol,
|
||||
YColumn: c.YCol,
|
||||
ShadeBelow: c.Shade,
|
||||
HoverDimension: c.HoverDimension,
|
||||
Legend: c.Legend.influxLegend(),
|
||||
Queries: c.Queries.influxDashQueries(),
|
||||
ViewColors: c.Colors.influxViewColors(),
|
||||
Axes: c.Axes.influxAxes(),
|
||||
Position: c.Position,
|
||||
LegendOpacity: float64(c.LegendOpacity),
|
||||
LegendOrientationThreshold: int(c.LegendOrientationThreshold),
|
||||
}
|
||||
case chartKindTable:
|
||||
fieldOptions := make([]influxdb.RenamableField, 0, len(c.FieldOptions))
|
||||
|
@ -781,20 +798,22 @@ func (c *chart) properties() influxdb.ViewProperties {
|
|||
}
|
||||
case chartKindXY:
|
||||
return influxdb.XYViewProperties{
|
||||
Type: influxdb.ViewPropertyTypeXY,
|
||||
Note: c.Note,
|
||||
ShowNoteWhenEmpty: c.NoteOnEmpty,
|
||||
XColumn: c.XCol,
|
||||
YColumn: c.YCol,
|
||||
ShadeBelow: c.Shade,
|
||||
HoverDimension: c.HoverDimension,
|
||||
Legend: c.Legend.influxLegend(),
|
||||
Queries: c.Queries.influxDashQueries(),
|
||||
ViewColors: c.Colors.influxViewColors(),
|
||||
Axes: c.Axes.influxAxes(),
|
||||
Geom: c.Geom,
|
||||
Position: c.Position,
|
||||
TimeFormat: c.TimeFormat,
|
||||
Type: influxdb.ViewPropertyTypeXY,
|
||||
Note: c.Note,
|
||||
ShowNoteWhenEmpty: c.NoteOnEmpty,
|
||||
XColumn: c.XCol,
|
||||
YColumn: c.YCol,
|
||||
ShadeBelow: c.Shade,
|
||||
HoverDimension: c.HoverDimension,
|
||||
Legend: c.Legend.influxLegend(),
|
||||
Queries: c.Queries.influxDashQueries(),
|
||||
ViewColors: c.Colors.influxViewColors(),
|
||||
Axes: c.Axes.influxAxes(),
|
||||
Geom: c.Geom,
|
||||
Position: c.Position,
|
||||
TimeFormat: c.TimeFormat,
|
||||
LegendOpacity: float64(c.LegendOpacity),
|
||||
LegendOrientationThreshold: int(c.LegendOrientationThreshold),
|
||||
}
|
||||
default:
|
||||
return nil
|
||||
|
@ -2313,7 +2332,7 @@ func convertRefToRefSummary(field string, ref *references) SummaryReference {
|
|||
|
||||
func astBoolFromIface(v interface{}) *ast.BooleanLiteral {
|
||||
b, _ := v.(bool)
|
||||
return ast2.BooleanLiteralFromValue(b)
|
||||
return ast.BooleanLiteralFromValue(b)
|
||||
}
|
||||
|
||||
func astDurationFromIface(v interface{}) *ast.DurationLiteral {
|
||||
|
@ -2331,18 +2350,18 @@ func astDurationFromIface(v interface{}) *ast.DurationLiteral {
|
|||
|
||||
func astFloatFromIface(v interface{}) *ast.FloatLiteral {
|
||||
if i, ok := v.(int); ok {
|
||||
return ast2.FloatLiteralFromValue(float64(i))
|
||||
return ast.FloatLiteralFromValue(float64(i))
|
||||
}
|
||||
f, _ := v.(float64)
|
||||
return ast2.FloatLiteralFromValue(f)
|
||||
return ast.FloatLiteralFromValue(f)
|
||||
}
|
||||
|
||||
func astIntegerFromIface(v interface{}) *ast.IntegerLiteral {
|
||||
if f, ok := v.(float64); ok {
|
||||
return ast2.IntegerLiteralFromValue(int64(f))
|
||||
return ast.IntegerLiteralFromValue(int64(f))
|
||||
}
|
||||
i, _ := v.(int64)
|
||||
return ast2.IntegerLiteralFromValue(i)
|
||||
return ast.IntegerLiteralFromValue(i)
|
||||
}
|
||||
|
||||
func astNow() *ast.CallExpression {
|
||||
|
@ -2353,12 +2372,12 @@ func astNow() *ast.CallExpression {
|
|||
|
||||
func astStringFromIface(v interface{}) *ast.StringLiteral {
|
||||
s, _ := v.(string)
|
||||
return ast2.StringLiteralFromValue(s)
|
||||
return ast.StringLiteralFromValue(s)
|
||||
}
|
||||
|
||||
func astTimeFromIface(v interface{}) *ast.DateTimeLiteral {
|
||||
if t, ok := v.(time.Time); ok {
|
||||
return ast2.DateTimeLiteralFromValue(t)
|
||||
return ast.DateTimeLiteralFromValue(t)
|
||||
}
|
||||
|
||||
s, ok := v.(string)
|
||||
|
|
|
@ -1058,6 +1058,8 @@ spec:
|
|||
assert.Equal(t, "heatmap", props.GetType())
|
||||
assert.Equal(t, "heatmap note", props.Note)
|
||||
assert.Equal(t, int32(10), props.BinSize)
|
||||
assert.Equal(t, 1.0, props.LegendOpacity)
|
||||
assert.Equal(t, 5, props.LegendOrientationThreshold)
|
||||
assert.True(t, props.ShowNoteWhenEmpty)
|
||||
|
||||
assert.Equal(t, []float64{0, 10}, props.XDomain)
|
||||
|
@ -1175,6 +1177,8 @@ spec:
|
|||
assert.Equal(t, "histogram", props.GetType())
|
||||
assert.Equal(t, "histogram note", props.Note)
|
||||
assert.Equal(t, 30, props.BinCount)
|
||||
assert.Equal(t, 1.0, props.LegendOpacity)
|
||||
assert.Equal(t, 5, props.LegendOrientationThreshold)
|
||||
assert.True(t, props.ShowNoteWhenEmpty)
|
||||
assert.Equal(t, []float64{0, 10}, props.XDomain)
|
||||
assert.Equal(t, []string{"a", "b"}, props.FillColumns)
|
||||
|
@ -1289,6 +1293,8 @@ spec:
|
|||
assert.Equal(t, "y_prefix", props.YPrefix)
|
||||
assert.Equal(t, "x_suffix", props.XSuffix)
|
||||
assert.Equal(t, "y_suffix", props.YSuffix)
|
||||
assert.Equal(t, 1.0, props.LegendOpacity)
|
||||
assert.Equal(t, 5, props.LegendOrientationThreshold)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
@ -1319,6 +1325,8 @@ spec:
|
|||
assert.Equal(t, "foo", props.UpperColumn)
|
||||
assert.Equal(t, "baz", props.MainColumn)
|
||||
assert.Equal(t, "bar", props.LowerColumn)
|
||||
assert.Equal(t, 1.0, props.LegendOpacity)
|
||||
assert.Equal(t, 5, props.LegendOrientationThreshold)
|
||||
|
||||
require.Len(t, props.ViewColors, 1)
|
||||
c := props.ViewColors[0]
|
||||
|
@ -1382,6 +1390,8 @@ spec:
|
|||
assert.Equal(t, "y_prefix", props.YPrefix)
|
||||
assert.Equal(t, "x_suffix", props.XSuffix)
|
||||
assert.Equal(t, "y_suffix", props.YSuffix)
|
||||
assert.Equal(t, 1.0, props.LegendOpacity)
|
||||
assert.Equal(t, 5, props.LegendOrientationThreshold)
|
||||
})
|
||||
})
|
||||
|
||||
|
@ -1820,6 +1830,8 @@ spec:
|
|||
assert.Equal(t, "overlaid", props.Position)
|
||||
assert.Equal(t, "leg_type", props.Legend.Type)
|
||||
assert.Equal(t, "horizontal", props.Legend.Orientation)
|
||||
assert.Equal(t, 1.0, props.LegendOpacity)
|
||||
assert.Equal(t, 5, props.LegendOrientationThreshold)
|
||||
|
||||
require.Len(t, props.Queries, 1)
|
||||
q := props.Queries[0]
|
||||
|
@ -2274,6 +2286,8 @@ spec:
|
|||
assert.Equal(t, "xy chart note", props.Note)
|
||||
assert.True(t, props.ShowNoteWhenEmpty)
|
||||
assert.Equal(t, "stacked", props.Position)
|
||||
assert.Equal(t, 1.0, props.LegendOpacity)
|
||||
assert.Equal(t, 5, props.LegendOrientationThreshold)
|
||||
|
||||
require.Len(t, props.Queries, 1)
|
||||
q := props.Queries[0]
|
||||
|
|
|
@ -2355,23 +2355,25 @@ func TestService(t *testing.T) {
|
|||
Name: "view name",
|
||||
},
|
||||
Properties: influxdb.HeatmapViewProperties{
|
||||
Type: influxdb.ViewPropertyTypeHeatMap,
|
||||
Note: "a note",
|
||||
Queries: []influxdb.DashboardQuery{newQuery()},
|
||||
ShowNoteWhenEmpty: true,
|
||||
ViewColors: []string{"#8F8AF4", "#8F8AF4", "#8F8AF4"},
|
||||
XColumn: "x",
|
||||
YColumn: "y",
|
||||
XDomain: []float64{0, 10},
|
||||
YDomain: []float64{0, 100},
|
||||
XAxisLabel: "x_label",
|
||||
XPrefix: "x_prefix",
|
||||
XSuffix: "x_suffix",
|
||||
YAxisLabel: "y_label",
|
||||
YPrefix: "y_prefix",
|
||||
YSuffix: "y_suffix",
|
||||
BinSize: 10,
|
||||
TimeFormat: "",
|
||||
Type: influxdb.ViewPropertyTypeHeatMap,
|
||||
Note: "a note",
|
||||
Queries: []influxdb.DashboardQuery{newQuery()},
|
||||
ShowNoteWhenEmpty: true,
|
||||
ViewColors: []string{"#8F8AF4", "#8F8AF4", "#8F8AF4"},
|
||||
XColumn: "x",
|
||||
YColumn: "y",
|
||||
XDomain: []float64{0, 10},
|
||||
YDomain: []float64{0, 100},
|
||||
XAxisLabel: "x_label",
|
||||
XPrefix: "x_prefix",
|
||||
XSuffix: "x_suffix",
|
||||
YAxisLabel: "y_label",
|
||||
YPrefix: "y_prefix",
|
||||
YSuffix: "y_suffix",
|
||||
BinSize: 10,
|
||||
TimeFormat: "",
|
||||
LegendOpacity: 1.0,
|
||||
LegendOrientationThreshold: 5,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -2383,17 +2385,19 @@ func TestService(t *testing.T) {
|
|||
Name: "view name",
|
||||
},
|
||||
Properties: influxdb.HistogramViewProperties{
|
||||
Type: influxdb.ViewPropertyTypeHistogram,
|
||||
Note: "a note",
|
||||
Queries: []influxdb.DashboardQuery{newQuery()},
|
||||
ShowNoteWhenEmpty: true,
|
||||
ViewColors: []influxdb.ViewColor{{Type: "scale", Hex: "#8F8AF4", Value: 0}, {Type: "scale", Hex: "#8F8AF4", Value: 0}, {Type: "scale", Hex: "#8F8AF4", Value: 0}},
|
||||
FillColumns: []string{"a", "b"},
|
||||
XColumn: "_value",
|
||||
XDomain: []float64{0, 10},
|
||||
XAxisLabel: "x_label",
|
||||
BinCount: 30,
|
||||
Position: "stacked",
|
||||
Type: influxdb.ViewPropertyTypeHistogram,
|
||||
Note: "a note",
|
||||
Queries: []influxdb.DashboardQuery{newQuery()},
|
||||
ShowNoteWhenEmpty: true,
|
||||
ViewColors: []influxdb.ViewColor{{Type: "scale", Hex: "#8F8AF4", Value: 0}, {Type: "scale", Hex: "#8F8AF4", Value: 0}, {Type: "scale", Hex: "#8F8AF4", Value: 0}},
|
||||
FillColumns: []string{"a", "b"},
|
||||
XColumn: "_value",
|
||||
XDomain: []float64{0, 10},
|
||||
XAxisLabel: "x_label",
|
||||
BinCount: 30,
|
||||
Position: "stacked",
|
||||
LegendOpacity: 1.0,
|
||||
LegendOrientationThreshold: 5,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -2405,22 +2409,24 @@ func TestService(t *testing.T) {
|
|||
Name: "view name",
|
||||
},
|
||||
Properties: influxdb.ScatterViewProperties{
|
||||
Type: influxdb.ViewPropertyTypeScatter,
|
||||
Note: "a note",
|
||||
Queries: []influxdb.DashboardQuery{newQuery()},
|
||||
ShowNoteWhenEmpty: true,
|
||||
ViewColors: []string{"#8F8AF4", "#8F8AF4", "#8F8AF4"},
|
||||
XColumn: "x",
|
||||
YColumn: "y",
|
||||
XDomain: []float64{0, 10},
|
||||
YDomain: []float64{0, 100},
|
||||
XAxisLabel: "x_label",
|
||||
XPrefix: "x_prefix",
|
||||
XSuffix: "x_suffix",
|
||||
YAxisLabel: "y_label",
|
||||
YPrefix: "y_prefix",
|
||||
YSuffix: "y_suffix",
|
||||
TimeFormat: "",
|
||||
Type: influxdb.ViewPropertyTypeScatter,
|
||||
Note: "a note",
|
||||
Queries: []influxdb.DashboardQuery{newQuery()},
|
||||
ShowNoteWhenEmpty: true,
|
||||
ViewColors: []string{"#8F8AF4", "#8F8AF4", "#8F8AF4"},
|
||||
XColumn: "x",
|
||||
YColumn: "y",
|
||||
XDomain: []float64{0, 10},
|
||||
YDomain: []float64{0, 100},
|
||||
XAxisLabel: "x_label",
|
||||
XPrefix: "x_prefix",
|
||||
XSuffix: "x_suffix",
|
||||
YAxisLabel: "y_label",
|
||||
YPrefix: "y_prefix",
|
||||
YSuffix: "y_suffix",
|
||||
TimeFormat: "",
|
||||
LegendOpacity: 1.0,
|
||||
LegendOrientationThreshold: 5,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -2431,21 +2437,23 @@ func TestService(t *testing.T) {
|
|||
Name: "view name",
|
||||
},
|
||||
Properties: influxdb.MosaicViewProperties{
|
||||
Type: influxdb.ViewPropertyTypeMosaic,
|
||||
Note: "a note",
|
||||
Queries: []influxdb.DashboardQuery{newQuery()},
|
||||
ShowNoteWhenEmpty: true,
|
||||
ViewColors: []string{"#8F8AF4", "#8F8AF4", "#8F8AF4"},
|
||||
XColumn: "x",
|
||||
YSeriesColumns: []string{"y"},
|
||||
XDomain: []float64{0, 10},
|
||||
YDomain: []float64{0, 100},
|
||||
XAxisLabel: "x_label",
|
||||
XPrefix: "x_prefix",
|
||||
XSuffix: "x_suffix",
|
||||
YAxisLabel: "y_label",
|
||||
YPrefix: "y_prefix",
|
||||
YSuffix: "y_suffix",
|
||||
Type: influxdb.ViewPropertyTypeMosaic,
|
||||
Note: "a note",
|
||||
Queries: []influxdb.DashboardQuery{newQuery()},
|
||||
ShowNoteWhenEmpty: true,
|
||||
ViewColors: []string{"#8F8AF4", "#8F8AF4", "#8F8AF4"},
|
||||
XColumn: "x",
|
||||
YSeriesColumns: []string{"y"},
|
||||
XDomain: []float64{0, 10},
|
||||
YDomain: []float64{0, 100},
|
||||
XAxisLabel: "x_label",
|
||||
XPrefix: "x_prefix",
|
||||
XSuffix: "x_suffix",
|
||||
YAxisLabel: "y_label",
|
||||
YPrefix: "y_prefix",
|
||||
YSuffix: "y_suffix",
|
||||
LegendOpacity: 1.0,
|
||||
LegendOrientationThreshold: 5,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -2456,16 +2464,18 @@ func TestService(t *testing.T) {
|
|||
Name: "view name",
|
||||
},
|
||||
Properties: influxdb.SingleStatViewProperties{
|
||||
Type: influxdb.ViewPropertyTypeSingleStat,
|
||||
DecimalPlaces: influxdb.DecimalPlaces{IsEnforced: true, Digits: 1},
|
||||
Note: "a note",
|
||||
Queries: []influxdb.DashboardQuery{newQuery()},
|
||||
Prefix: "pre",
|
||||
TickPrefix: "false",
|
||||
ShowNoteWhenEmpty: true,
|
||||
Suffix: "suf",
|
||||
TickSuffix: "true",
|
||||
ViewColors: []influxdb.ViewColor{{Type: "text", Hex: "red"}},
|
||||
Type: influxdb.ViewPropertyTypeSingleStat,
|
||||
DecimalPlaces: influxdb.DecimalPlaces{IsEnforced: true, Digits: 1},
|
||||
Note: "a note",
|
||||
Queries: []influxdb.DashboardQuery{newQuery()},
|
||||
Prefix: "pre",
|
||||
TickPrefix: "false",
|
||||
ShowNoteWhenEmpty: true,
|
||||
Suffix: "suf",
|
||||
TickSuffix: "true",
|
||||
ViewColors: []influxdb.ViewColor{{Type: "text", Hex: "red"}},
|
||||
LegendOpacity: 1.0,
|
||||
LegendOrientationThreshold: 5,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -2477,16 +2487,18 @@ func TestService(t *testing.T) {
|
|||
Name: "view name",
|
||||
},
|
||||
Properties: influxdb.SingleStatViewProperties{
|
||||
Type: influxdb.ViewPropertyTypeSingleStat,
|
||||
DecimalPlaces: influxdb.DecimalPlaces{IsEnforced: true, Digits: 1},
|
||||
Note: "a note",
|
||||
Queries: []influxdb.DashboardQuery{newQuery()},
|
||||
Prefix: "pre",
|
||||
TickPrefix: "false",
|
||||
ShowNoteWhenEmpty: true,
|
||||
Suffix: "suf",
|
||||
TickSuffix: "true",
|
||||
ViewColors: []influxdb.ViewColor{{Type: "text", Hex: "red"}},
|
||||
Type: influxdb.ViewPropertyTypeSingleStat,
|
||||
DecimalPlaces: influxdb.DecimalPlaces{IsEnforced: true, Digits: 1},
|
||||
Note: "a note",
|
||||
Queries: []influxdb.DashboardQuery{newQuery()},
|
||||
Prefix: "pre",
|
||||
TickPrefix: "false",
|
||||
ShowNoteWhenEmpty: true,
|
||||
Suffix: "suf",
|
||||
TickSuffix: "true",
|
||||
ViewColors: []influxdb.ViewColor{{Type: "text", Hex: "red"}},
|
||||
LegendOpacity: 1.0,
|
||||
LegendOrientationThreshold: 5,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -2498,21 +2510,23 @@ func TestService(t *testing.T) {
|
|||
Name: "view name",
|
||||
},
|
||||
Properties: influxdb.LinePlusSingleStatProperties{
|
||||
Type: influxdb.ViewPropertyTypeSingleStatPlusLine,
|
||||
Axes: newAxes(),
|
||||
DecimalPlaces: influxdb.DecimalPlaces{IsEnforced: true, Digits: 1},
|
||||
Legend: influxdb.Legend{Type: "type", Orientation: "horizontal"},
|
||||
Note: "a note",
|
||||
Prefix: "pre",
|
||||
Suffix: "suf",
|
||||
Queries: []influxdb.DashboardQuery{newQuery()},
|
||||
ShadeBelow: true,
|
||||
HoverDimension: "y",
|
||||
ShowNoteWhenEmpty: true,
|
||||
ViewColors: []influxdb.ViewColor{{Type: "text", Hex: "red"}},
|
||||
XColumn: "x",
|
||||
YColumn: "y",
|
||||
Position: "stacked",
|
||||
Type: influxdb.ViewPropertyTypeSingleStatPlusLine,
|
||||
Axes: newAxes(),
|
||||
DecimalPlaces: influxdb.DecimalPlaces{IsEnforced: true, Digits: 1},
|
||||
Legend: influxdb.Legend{Type: "type", Orientation: "horizontal"},
|
||||
Note: "a note",
|
||||
Prefix: "pre",
|
||||
Suffix: "suf",
|
||||
Queries: []influxdb.DashboardQuery{newQuery()},
|
||||
ShadeBelow: true,
|
||||
HoverDimension: "y",
|
||||
ShowNoteWhenEmpty: true,
|
||||
ViewColors: []influxdb.ViewColor{{Type: "text", Hex: "red"}},
|
||||
XColumn: "x",
|
||||
YColumn: "y",
|
||||
Position: "stacked",
|
||||
LegendOpacity: 1.0,
|
||||
LegendOrientationThreshold: 5,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -2524,20 +2538,22 @@ func TestService(t *testing.T) {
|
|||
Name: "view name",
|
||||
},
|
||||
Properties: influxdb.XYViewProperties{
|
||||
Type: influxdb.ViewPropertyTypeXY,
|
||||
Axes: newAxes(),
|
||||
Geom: "step",
|
||||
Legend: influxdb.Legend{Type: "type", Orientation: "horizontal"},
|
||||
Note: "a note",
|
||||
Queries: []influxdb.DashboardQuery{newQuery()},
|
||||
ShadeBelow: true,
|
||||
HoverDimension: "y",
|
||||
ShowNoteWhenEmpty: true,
|
||||
ViewColors: []influxdb.ViewColor{{Type: "text", Hex: "red"}},
|
||||
XColumn: "x",
|
||||
YColumn: "y",
|
||||
Position: "overlaid",
|
||||
TimeFormat: "",
|
||||
Type: influxdb.ViewPropertyTypeXY,
|
||||
Axes: newAxes(),
|
||||
Geom: "step",
|
||||
Legend: influxdb.Legend{Type: "type", Orientation: "horizontal"},
|
||||
Note: "a note",
|
||||
Queries: []influxdb.DashboardQuery{newQuery()},
|
||||
ShadeBelow: true,
|
||||
HoverDimension: "y",
|
||||
ShowNoteWhenEmpty: true,
|
||||
ViewColors: []influxdb.ViewColor{{Type: "text", Hex: "red"}},
|
||||
XColumn: "x",
|
||||
YColumn: "y",
|
||||
Position: "overlaid",
|
||||
TimeFormat: "",
|
||||
LegendOpacity: 1.0,
|
||||
LegendOrientationThreshold: 5,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -2549,21 +2565,23 @@ func TestService(t *testing.T) {
|
|||
Name: "view name",
|
||||
},
|
||||
Properties: influxdb.BandViewProperties{
|
||||
Type: influxdb.ViewPropertyTypeBand,
|
||||
Axes: newAxes(),
|
||||
Geom: "step",
|
||||
Legend: influxdb.Legend{Type: "type", Orientation: "horizontal"},
|
||||
Note: "a note",
|
||||
Queries: []influxdb.DashboardQuery{newQuery()},
|
||||
HoverDimension: "y",
|
||||
ShowNoteWhenEmpty: true,
|
||||
ViewColors: []influxdb.ViewColor{{Type: "text", Hex: "red"}},
|
||||
XColumn: "x",
|
||||
YColumn: "y",
|
||||
UpperColumn: "upper",
|
||||
MainColumn: "main",
|
||||
LowerColumn: "lower",
|
||||
TimeFormat: "",
|
||||
Type: influxdb.ViewPropertyTypeBand,
|
||||
Axes: newAxes(),
|
||||
Geom: "step",
|
||||
Legend: influxdb.Legend{Type: "type", Orientation: "horizontal"},
|
||||
Note: "a note",
|
||||
Queries: []influxdb.DashboardQuery{newQuery()},
|
||||
HoverDimension: "y",
|
||||
ShowNoteWhenEmpty: true,
|
||||
ViewColors: []influxdb.ViewColor{{Type: "text", Hex: "red"}},
|
||||
XColumn: "x",
|
||||
YColumn: "y",
|
||||
UpperColumn: "upper",
|
||||
MainColumn: "main",
|
||||
LowerColumn: "lower",
|
||||
TimeFormat: "",
|
||||
LegendOpacity: 1.0,
|
||||
LegendOrientationThreshold: 5,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
|
|
@ -22,6 +22,8 @@ spec:
|
|||
geom: line
|
||||
width: 6
|
||||
height: 3
|
||||
legendOpacity: 1.0
|
||||
legendOrientationThreshold: 5
|
||||
queries:
|
||||
- query: >
|
||||
from(bucket: v.bucket) |> range(start: v.timeRangeStart) |> filter(fn: (r) => r._measurement == "mem") |> filter(fn: (r) => r._field == "used_percent") |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false) |> yield(name: "mean")
|
||||
|
|
|
@ -20,43 +20,75 @@
|
|||
"xCol": "_time",
|
||||
"yCol": "_value",
|
||||
"binSize": 10,
|
||||
"legendOpacity": 1.0,
|
||||
"legendOrientationThreshold": 5,
|
||||
"queries": [
|
||||
{
|
||||
"query": "from(bucket: v.bucket) |> range(start: v.timeRangeStart) |> filter(fn: (r) => r._measurement == \"mem\") |> filter(fn: (r) => r._field == \"used_percent\") |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false) |> yield(name: \"mean\")"
|
||||
}
|
||||
],
|
||||
"axes":[
|
||||
"axes": [
|
||||
{
|
||||
"name": "x",
|
||||
"label": "x_label",
|
||||
"prefix": "x_prefix",
|
||||
"suffix": "x_suffix",
|
||||
"domain": [0, 10]
|
||||
"domain": [
|
||||
0,
|
||||
10
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "y",
|
||||
"label": "y_label",
|
||||
"prefix": "y_prefix",
|
||||
"suffix": "y_suffix",
|
||||
"domain": [0, 100]
|
||||
"domain": [
|
||||
0,
|
||||
100
|
||||
]
|
||||
}
|
||||
],
|
||||
"colors": [
|
||||
{ "hex": "#000004" },
|
||||
{ "hex": "#110a30" },
|
||||
{ "hex": "#320a5e" },
|
||||
{ "hex": "#57106e" },
|
||||
{ "hex": "#781c6d" },
|
||||
{ "hex": "#9a2865" },
|
||||
{ "hex": "#bc3754" },
|
||||
{ "hex": "#d84c3e" },
|
||||
{ "hex": "#ed6925" },
|
||||
{ "hex": "#f98e09" },
|
||||
{ "hex": "#fbb61a" },
|
||||
{ "hex": "#f4df53" }
|
||||
{
|
||||
"hex": "#000004"
|
||||
},
|
||||
{
|
||||
"hex": "#110a30"
|
||||
},
|
||||
{
|
||||
"hex": "#320a5e"
|
||||
},
|
||||
{
|
||||
"hex": "#57106e"
|
||||
},
|
||||
{
|
||||
"hex": "#781c6d"
|
||||
},
|
||||
{
|
||||
"hex": "#9a2865"
|
||||
},
|
||||
{
|
||||
"hex": "#bc3754"
|
||||
},
|
||||
{
|
||||
"hex": "#d84c3e"
|
||||
},
|
||||
{
|
||||
"hex": "#ed6925"
|
||||
},
|
||||
{
|
||||
"hex": "#f98e09"
|
||||
},
|
||||
{
|
||||
"hex": "#fbb61a"
|
||||
},
|
||||
{
|
||||
"hex": "#f4df53"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
]
|
||||
]
|
|
@ -14,6 +14,8 @@ spec:
|
|||
width: 6
|
||||
height: 3
|
||||
binSize: 10
|
||||
legendOpacity: 1.0
|
||||
legendOrientationThreshold: 5
|
||||
xCol: _time
|
||||
yCol: _value
|
||||
queries:
|
||||
|
|
|
@ -18,17 +18,25 @@
|
|||
"xCol": "_value",
|
||||
"position": "stacked",
|
||||
"binCount": 30,
|
||||
"fillColumns": ["a", "b"],
|
||||
"legendOpacity": 1.0,
|
||||
"legendOrientationThreshold": 5,
|
||||
"fillColumns": [
|
||||
"a",
|
||||
"b"
|
||||
],
|
||||
"queries": [
|
||||
{
|
||||
"query": "from(bucket: v.bucket) |> range(start: v.timeRangeStart, stop: v.timeRangeStop) |> filter(fn: (r) => r._measurement == \"boltdb_reads_total\") |> filter(fn: (r) => r._field == \"counter\")"
|
||||
}
|
||||
],
|
||||
"axes":[
|
||||
"axes": [
|
||||
{
|
||||
"name": "x",
|
||||
"label": "x_label",
|
||||
"domain": [0, 10]
|
||||
"domain": [
|
||||
0,
|
||||
10
|
||||
]
|
||||
}
|
||||
],
|
||||
"colors": [
|
||||
|
@ -55,4 +63,4 @@
|
|||
]
|
||||
}
|
||||
}
|
||||
]
|
||||
]
|
|
@ -14,6 +14,8 @@ spec:
|
|||
height: 3
|
||||
binCount: 30
|
||||
fillColumns: ["a", "b"]
|
||||
legendOpacity: 1.0
|
||||
legendOrientationThreshold: 5
|
||||
queries:
|
||||
- query: >
|
||||
from(bucket: v.bucket) |> range(start: v.timeRangeStart, stop: v.timeRangeStop) |> filter(fn: (r) => r._measurement == "boltdb_reads_total") |> filter(fn: (r) => r._field == "counter")
|
||||
|
|
|
@ -18,6 +18,8 @@ spec:
|
|||
width: 6
|
||||
height: 3
|
||||
ySeriesColumns: ["_value", "foo"]
|
||||
legendOpacity: 1.0
|
||||
legendOrientationThreshold: 5
|
||||
queries:
|
||||
- query: >
|
||||
from(bucket: v.bucket) |> range(start: v.timeRangeStart) |> filter(fn: (r) => r._measurement == "mem") |> filter(fn: (r) => r._field == "used_percent") |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false) |> yield(name: "mean")
|
||||
|
|
|
@ -19,25 +19,33 @@
|
|||
"height": 3,
|
||||
"xCol": "_time",
|
||||
"yCol": "_value",
|
||||
"legendOpacity": 1.0,
|
||||
"legendOrientationThreshold": 5,
|
||||
"queries": [
|
||||
{
|
||||
"query": "from(bucket: v.bucket) |> range(start: v.timeRangeStart) |> filter(fn: (r) => r._measurement == \"mem\") |> filter(fn: (r) => r._field == \"used_percent\") |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false) |> yield(name: \"mean\")"
|
||||
}
|
||||
],
|
||||
"axes":[
|
||||
"axes": [
|
||||
{
|
||||
"name": "x",
|
||||
"label": "x_label",
|
||||
"prefix": "x_prefix",
|
||||
"suffix": "x_suffix",
|
||||
"domain": [0, 10]
|
||||
"domain": [
|
||||
0,
|
||||
10
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "y",
|
||||
"label": "y_label",
|
||||
"prefix": "y_prefix",
|
||||
"suffix": "y_suffix",
|
||||
"domain": [0, 100]
|
||||
"domain": [
|
||||
0,
|
||||
100
|
||||
]
|
||||
}
|
||||
],
|
||||
"colors": [
|
||||
|
@ -55,4 +63,4 @@
|
|||
]
|
||||
}
|
||||
}
|
||||
]
|
||||
]
|
|
@ -17,6 +17,8 @@ spec:
|
|||
yCol: _value
|
||||
width: 6
|
||||
height: 3
|
||||
legendOpacity: 1.0
|
||||
legendOrientationThreshold: 5
|
||||
queries:
|
||||
- query: >
|
||||
from(bucket: v.bucket) |> range(start: v.timeRangeStart) |> filter(fn: (r) => r._measurement == "mem") |> filter(fn: (r) => r._field == "used_percent") |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false) |> yield(name: "mean")
|
||||
|
|
|
@ -25,6 +25,8 @@
|
|||
"xColumn": "_time",
|
||||
"yColumn": "_value",
|
||||
"position": "overlaid",
|
||||
"legendOpacity": 1.0,
|
||||
"legendOrientationThreshold": 5,
|
||||
"legend": {
|
||||
"type": "leg_type",
|
||||
"orientation": "horizontal"
|
||||
|
@ -70,4 +72,4 @@
|
|||
]
|
||||
}
|
||||
}
|
||||
]
|
||||
]
|
|
@ -19,6 +19,8 @@ spec:
|
|||
shade: true
|
||||
hoverDimension: "y"
|
||||
position: overlaid
|
||||
legendOpacity: 1.0
|
||||
legendOrientationThreshold: 5
|
||||
legend:
|
||||
type: leg_type
|
||||
orientation: horizontal
|
||||
|
|
|
@ -24,7 +24,8 @@
|
|||
"hoverDimension": "y",
|
||||
"xColumn": "_time",
|
||||
"yColumn": "_value",
|
||||
"legend": {},
|
||||
"legendOpacity": 1.0,
|
||||
"legendOrientationThreshold": 5,
|
||||
"queries": [
|
||||
{
|
||||
"query": "from(bucket: v.bucket) |> range(start: v.timeRangeStart, stop: v.timeRangeStop) |> filter(fn: (r) => r._measurement == \"boltdb_writes_total\") |> filter(fn: (r) => r._field == \"counter\")"
|
||||
|
@ -61,4 +62,4 @@
|
|||
]
|
||||
}
|
||||
}
|
||||
]
|
||||
]
|
|
@ -17,7 +17,8 @@ spec:
|
|||
hoverDimension: "y"
|
||||
geom: line
|
||||
position: stacked
|
||||
legend:
|
||||
legendOpacity: 1.0
|
||||
legendOrientationThreshold: 5
|
||||
queries:
|
||||
- query: >
|
||||
from(bucket: v.bucket) |> range(start: v.timeRangeStart, stop: v.timeRangeStop) |> filter(fn: (r) => r._measurement == "boltdb_writes_total") |> filter(fn: (r) => r._field == "counter")
|
||||
|
|
|
@ -149,6 +149,15 @@ func (b ProxyQueryServiceAsyncBridge) Query(ctx context.Context, w io.Writer, re
|
|||
if err != nil {
|
||||
return stats, tracing.LogError(span, err)
|
||||
}
|
||||
|
||||
if results, err := q.ProfilerResults(); err != nil {
|
||||
return stats, tracing.LogError(span, err)
|
||||
} else if results != nil {
|
||||
_, err = encoder.Encode(w, results)
|
||||
if err != nil {
|
||||
return stats, tracing.LogError(span, err)
|
||||
}
|
||||
}
|
||||
return stats, nil
|
||||
}
|
||||
|
||||
|
|
|
@ -10,6 +10,7 @@ import (
|
|||
"github.com/influxdata/flux"
|
||||
"github.com/influxdata/flux/csv"
|
||||
"github.com/influxdata/flux/execute/executetest"
|
||||
"github.com/influxdata/flux/metadata"
|
||||
"github.com/influxdata/influxdb/v2/query"
|
||||
"github.com/influxdata/influxdb/v2/query/mock"
|
||||
)
|
||||
|
@ -26,7 +27,7 @@ func (w failWriter) Write(p []byte) (int, error) {
|
|||
|
||||
func TestProxyQueryServiceAsyncBridge_StatsOnClientDisconnect(t *testing.T) {
|
||||
q := mock.NewQuery()
|
||||
q.Metadata = flux.Metadata{
|
||||
q.Metadata = metadata.Metadata{
|
||||
"foo": []interface{}{"bar"},
|
||||
}
|
||||
r := executetest.NewResult([]*executetest.Table{
|
||||
|
|
|
@ -4,12 +4,12 @@
|
|||
package builtin
|
||||
|
||||
import (
|
||||
"github.com/influxdata/flux"
|
||||
"github.com/influxdata/flux/runtime"
|
||||
|
||||
_ "github.com/influxdata/flux/stdlib" // Import the stdlib
|
||||
_ "github.com/influxdata/influxdb/v2/query/stdlib" // Import the stdlib
|
||||
)
|
||||
|
||||
func init() {
|
||||
flux.FinalizeBuiltIns()
|
||||
runtime.FinalizeBuiltIns()
|
||||
}
|
||||
|
|
|
@ -1,20 +0,0 @@
|
|||
package builtinlazy
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/influxdata/flux"
|
||||
_ "github.com/influxdata/flux/stdlib" // Import the stdlib
|
||||
_ "github.com/influxdata/influxdb/v2/query/stdlib" // Import the stdlib
|
||||
)
|
||||
|
||||
var once sync.Once
|
||||
|
||||
// Initialize ensures all Flux builtins are configured and should be called
|
||||
// prior to using the Flux runtime. Initialize is safe to call concurrently
|
||||
// and is idempotent.
|
||||
func Initialize() {
|
||||
once.Do(func() {
|
||||
flux.FinalizeBuiltIns()
|
||||
})
|
||||
}
|
|
@ -26,10 +26,13 @@ import (
|
|||
|
||||
"github.com/influxdata/flux"
|
||||
"github.com/influxdata/flux/codes"
|
||||
"github.com/influxdata/flux/execute/table"
|
||||
"github.com/influxdata/flux/lang"
|
||||
"github.com/influxdata/flux/memory"
|
||||
"github.com/influxdata/flux/runtime"
|
||||
"github.com/influxdata/influxdb/v2"
|
||||
"github.com/influxdata/influxdb/v2/kit/errors"
|
||||
"github.com/influxdata/influxdb/v2/kit/feature"
|
||||
"github.com/influxdata/influxdb/v2/kit/prom"
|
||||
"github.com/influxdata/influxdb/v2/kit/tracing"
|
||||
influxlogger "github.com/influxdata/influxdb/v2/logger"
|
||||
|
@ -206,6 +209,10 @@ func (c *Controller) Query(ctx context.Context, req *query.Request) (flux.Query,
|
|||
for _, dep := range c.dependencies {
|
||||
ctx = dep.Inject(ctx)
|
||||
}
|
||||
// Add per-transformation spans if the feature flag is set.
|
||||
if feature.QueryTracing().Enabled(ctx) {
|
||||
ctx = flux.WithExperimentalTracingEnabled(ctx)
|
||||
}
|
||||
q, err := c.query(ctx, req.Compiler)
|
||||
if err != nil {
|
||||
return q, err
|
||||
|
@ -338,7 +345,7 @@ func (c *Controller) compileQuery(q *Query, compiler flux.Compiler) (err error)
|
|||
}
|
||||
}
|
||||
|
||||
prog, err := compiler.Compile(ctx)
|
||||
prog, err := compiler.Compile(ctx, runtime.Default)
|
||||
if err != nil {
|
||||
return &flux.Error{
|
||||
Msg: "compilation failed",
|
||||
|
@ -547,6 +554,23 @@ type Query struct {
|
|||
alloc *memory.Allocator
|
||||
}
|
||||
|
||||
func (q *Query) ProfilerResults() (flux.ResultIterator, error) {
|
||||
p := q.program.(*lang.AstProgram)
|
||||
if len(p.Profilers) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
tables := make([]flux.Table, 0)
|
||||
for _, profiler := range p.Profilers {
|
||||
if result, err := profiler.GetResult(q, q.alloc); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
tables = append(tables, result)
|
||||
}
|
||||
}
|
||||
res := table.NewProfilerResult(tables...)
|
||||
return flux.NewSliceResultIterator([]flux.Result{&res}), nil
|
||||
}
|
||||
|
||||
// ID reports an ephemeral unique ID for the query.
|
||||
func (q *Query) ID() QueryID {
|
||||
return q.id
|
||||
|
|
|
@ -20,10 +20,14 @@ import (
|
|||
"github.com/influxdata/flux/plan"
|
||||
"github.com/influxdata/flux/plan/plantest"
|
||||
"github.com/influxdata/flux/stdlib/universe"
|
||||
"github.com/influxdata/influxdb/v2/kit/feature"
|
||||
pmock "github.com/influxdata/influxdb/v2/mock"
|
||||
"github.com/influxdata/influxdb/v2/query"
|
||||
_ "github.com/influxdata/influxdb/v2/query/builtin"
|
||||
"github.com/influxdata/influxdb/v2/query/control"
|
||||
"github.com/influxdata/influxdb/v2/query/stdlib/influxdata/influxdb"
|
||||
"github.com/opentracing/opentracing-go"
|
||||
"github.com/opentracing/opentracing-go/mocktracer"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
"go.uber.org/zap/zaptest"
|
||||
|
@ -1289,6 +1293,102 @@ func TestController_ReserveMemoryWithoutExceedingMax(t *testing.T) {
|
|||
validateUnusedMemory(t, reg, config)
|
||||
}
|
||||
|
||||
func TestController_QueryTracing(t *testing.T) {
|
||||
// temporarily install a mock tracer to see which spans are created.
|
||||
oldTracer := opentracing.GlobalTracer()
|
||||
defer opentracing.SetGlobalTracer(oldTracer)
|
||||
mockTracer := mocktracer.New()
|
||||
opentracing.SetGlobalTracer(mockTracer)
|
||||
|
||||
const memoryBytesQuotaPerQuery = 64
|
||||
config := config
|
||||
config.MemoryBytesQuotaPerQuery = memoryBytesQuotaPerQuery
|
||||
ctrl, err := control.New(config)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer shutdown(t, ctrl)
|
||||
|
||||
flagger := pmock.NewFlagger(map[feature.Flag]interface{}{
|
||||
feature.QueryTracing(): true,
|
||||
})
|
||||
plainCtx := context.Background()
|
||||
withFlagger, err := feature.Annotate(plainCtx, flagger)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
tcs := []struct {
|
||||
name string
|
||||
ctx context.Context
|
||||
doNotWantSpan string
|
||||
wantSpan string
|
||||
}{
|
||||
{
|
||||
name: "feature flag off",
|
||||
ctx: plainCtx,
|
||||
doNotWantSpan: "*executetest.AllocatingFromProcedureSpec",
|
||||
},
|
||||
{
|
||||
name: "feature flag on",
|
||||
ctx: withFlagger,
|
||||
wantSpan: "*executetest.AllocatingFromProcedureSpec",
|
||||
},
|
||||
}
|
||||
for _, tc := range tcs {
|
||||
tc := tc
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
mockTracer.Reset()
|
||||
|
||||
compiler := &mock.Compiler{
|
||||
CompileFn: func(ctx context.Context) (flux.Program, error) {
|
||||
// Return a program that will allocate one more byte than is allowed.
|
||||
pts := plantest.PlanSpec{
|
||||
Nodes: []plan.Node{
|
||||
plan.CreatePhysicalNode("allocating-from-test", &executetest.AllocatingFromProcedureSpec{
|
||||
ByteCount: 16,
|
||||
}),
|
||||
plan.CreatePhysicalNode("yield", &universe.YieldProcedureSpec{Name: "_result"}),
|
||||
},
|
||||
Edges: [][2]int{
|
||||
{0, 1},
|
||||
},
|
||||
Resources: flux.ResourceManagement{
|
||||
ConcurrencyQuota: 1,
|
||||
},
|
||||
}
|
||||
|
||||
ps := plantest.CreatePlanSpec(&pts)
|
||||
prog := &lang.Program{
|
||||
Logger: zaptest.NewLogger(t),
|
||||
PlanSpec: ps,
|
||||
}
|
||||
|
||||
return prog, nil
|
||||
},
|
||||
}
|
||||
|
||||
// Depending on how the feature flag is set in the context,
|
||||
// we may or may not do query tracing here.
|
||||
q, err := ctrl.Query(tc.ctx, makeRequest(compiler))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
|
||||
consumeResults(t, q)
|
||||
gotSpans := make(map[string]struct{})
|
||||
for _, span := range mockTracer.FinishedSpans() {
|
||||
gotSpans[span.OperationName] = struct{}{}
|
||||
}
|
||||
if _, found := gotSpans[tc.doNotWantSpan]; tc.doNotWantSpan != "" && found {
|
||||
t.Fatalf("did not want to find span %q but it was there", tc.doNotWantSpan)
|
||||
}
|
||||
if _, found := gotSpans[tc.wantSpan]; tc.wantSpan != "" && !found {
|
||||
t.Fatalf("wanted to find span %q but it was not there", tc.wantSpan)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func consumeResults(tb testing.TB, q flux.Query) {
|
||||
tb.Helper()
|
||||
for res := range q.Results() {
|
||||
|
|
|
@ -55,7 +55,6 @@ func (e *NoContentEncoder) Encode(w io.Writer, results flux.ResultIterator) (int
|
|||
for results.More() {
|
||||
if err := results.Next().Tables().Do(func(tbl flux.Table) error {
|
||||
return tbl.Do(func(cr flux.ColReader) error {
|
||||
cr.Release()
|
||||
return nil
|
||||
})
|
||||
}); err != nil {
|
||||
|
@ -114,7 +113,6 @@ func (e *NoContentWithErrorEncoder) Encode(w io.Writer, results flux.ResultItera
|
|||
for results.More() {
|
||||
if err := results.Next().Tables().Do(func(tbl flux.Table) error {
|
||||
return tbl.Do(func(cr flux.ColReader) error {
|
||||
cr.Release()
|
||||
return nil
|
||||
})
|
||||
}); err != nil {
|
||||
|
|
|
@ -4,11 +4,11 @@ package fluxlang
|
|||
import (
|
||||
"context"
|
||||
|
||||
"github.com/influxdata/flux"
|
||||
"github.com/influxdata/flux/ast"
|
||||
"github.com/influxdata/flux/complete"
|
||||
"github.com/influxdata/flux/interpreter"
|
||||
"github.com/influxdata/flux/parser"
|
||||
"github.com/influxdata/flux/runtime"
|
||||
"github.com/influxdata/flux/values"
|
||||
"github.com/influxdata/influxdb/v2"
|
||||
)
|
||||
|
@ -27,9 +27,9 @@ func (d defaultService) Parse(source string) (pkg *ast.Package, err error) {
|
|||
}
|
||||
|
||||
func (d defaultService) EvalAST(ctx context.Context, astPkg *ast.Package) ([]interpreter.SideEffect, values.Scope, error) {
|
||||
return flux.EvalAST(ctx, astPkg)
|
||||
return runtime.EvalAST(ctx, astPkg)
|
||||
}
|
||||
|
||||
func (d defaultService) Completer() complete.Completer {
|
||||
return complete.NewCompleter(flux.Prelude())
|
||||
return complete.NewCompleter(runtime.Prelude())
|
||||
}
|
||||
|
|
|
@ -2,6 +2,7 @@ package influxql
|
|||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/flux"
|
||||
|
@ -42,7 +43,7 @@ func NewCompiler(dbrpMappingSvc platform.DBRPMappingServiceV2) *Compiler {
|
|||
}
|
||||
|
||||
// Compile transpiles the query into a Program.
|
||||
func (c *Compiler) Compile(ctx context.Context) (flux.Program, error) {
|
||||
func (c *Compiler) Compile(ctx context.Context, runtime flux.Runtime) (flux.Program, error) {
|
||||
var now time.Time
|
||||
if c.Now != nil {
|
||||
now = *c.Now
|
||||
|
@ -64,7 +65,15 @@ func (c *Compiler) Compile(ctx context.Context) (flux.Program, error) {
|
|||
return nil, err
|
||||
}
|
||||
compileOptions := lang.WithLogPlanOpts(c.logicalPlannerOptions...)
|
||||
return lang.CompileAST(astPkg, now, compileOptions), nil
|
||||
bs, err := json.Marshal(astPkg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hdl, err := runtime.JSONToHandle(bs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return lang.CompileAST(hdl, runtime, now, compileOptions), nil
|
||||
}
|
||||
|
||||
func (c *Compiler) CompilerType() flux.CompilerType {
|
||||
|
|
|
@ -131,6 +131,8 @@ var skipTests = map[string]string{
|
|||
"SelectorMath_29": "Transpiler: unimplemented functions: top and bottom https://github.com/influxdata/influxdb/issues/10738",
|
||||
"SelectorMath_30": "Transpiler: unimplemented functions: top and bottom https://github.com/influxdata/influxdb/issues/10738",
|
||||
"SelectorMath_31": "Transpiler: unimplemented functions: top and bottom https://github.com/influxdata/influxdb/issues/10738",
|
||||
"ands": "algo-w: https://github.com/influxdata/influxdb/issues/16811",
|
||||
"ors": "algo-w: https://github.com/influxdata/influxdb/issues/16811",
|
||||
}
|
||||
|
||||
var querier = fluxquerytest.NewQuerier()
|
||||
|
|
|
@ -21,15 +21,45 @@ type LoggingProxyQueryService struct {
|
|||
queryLogger Logger
|
||||
nowFunction func() time.Time
|
||||
log *zap.Logger
|
||||
cond func(ctx context.Context) bool
|
||||
|
||||
// If this is set then logging happens only if this key is present in the
|
||||
// metadata.
|
||||
requireMetadataKey string
|
||||
}
|
||||
|
||||
func NewLoggingProxyQueryService(log *zap.Logger, queryLogger Logger, proxyQueryService ProxyQueryService) *LoggingProxyQueryService {
|
||||
return &LoggingProxyQueryService{
|
||||
// LoggingProxyQueryServiceOption provides a way to modify the
|
||||
// behavior of LoggingProxyQueryService.
|
||||
type LoggingProxyQueryServiceOption func(lpqs *LoggingProxyQueryService)
|
||||
|
||||
// ConditionalLogging returns a LoggingProxyQueryServiceOption
|
||||
// that only logs if the passed in function returns true.
|
||||
// Thus logging can be controlled by a request-scoped attribute, e.g., a feature flag.
|
||||
func ConditionalLogging(cond func(context.Context) bool) LoggingProxyQueryServiceOption {
|
||||
return func(lpqs *LoggingProxyQueryService) {
|
||||
lpqs.cond = cond
|
||||
}
|
||||
}
|
||||
|
||||
func RequireMetadataKey(metadataKey string) LoggingProxyQueryServiceOption {
|
||||
return func(lpqs *LoggingProxyQueryService) {
|
||||
lpqs.requireMetadataKey = metadataKey
|
||||
}
|
||||
}
|
||||
|
||||
func NewLoggingProxyQueryService(log *zap.Logger, queryLogger Logger, proxyQueryService ProxyQueryService, opts ...LoggingProxyQueryServiceOption) *LoggingProxyQueryService {
|
||||
lpqs := &LoggingProxyQueryService{
|
||||
proxyQueryService: proxyQueryService,
|
||||
queryLogger: queryLogger,
|
||||
nowFunction: time.Now,
|
||||
log: log,
|
||||
}
|
||||
|
||||
for _, o := range opts {
|
||||
o(lpqs)
|
||||
}
|
||||
|
||||
return lpqs
|
||||
}
|
||||
|
||||
func (s *LoggingProxyQueryService) SetNowFunctionForTesting(nowFunction func() time.Time) {
|
||||
|
@ -38,6 +68,12 @@ func (s *LoggingProxyQueryService) SetNowFunctionForTesting(nowFunction func() t
|
|||
|
||||
// Query executes and logs the query.
|
||||
func (s *LoggingProxyQueryService) Query(ctx context.Context, w io.Writer, req *ProxyRequest) (stats flux.Statistics, err error) {
|
||||
if s.cond != nil && !s.cond(ctx) {
|
||||
// Logging is conditional, and we are not logging this request.
|
||||
// Just invoke the wrapped service directly.
|
||||
return s.proxyQueryService.Query(ctx, w, req)
|
||||
}
|
||||
|
||||
span, ctx := tracing.StartSpanFromContext(ctx)
|
||||
defer span.Finish()
|
||||
|
||||
|
@ -50,6 +86,14 @@ func (s *LoggingProxyQueryService) Query(ctx context.Context, w io.Writer, req *
|
|||
entry.Write(zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
// Enforce requireMetadataKey, if set.
|
||||
if s.requireMetadataKey != "" {
|
||||
if _, ok := stats.Metadata[s.requireMetadataKey]; !ok {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
traceID, sampled, _ := tracing.InfoFromContext(ctx)
|
||||
log := Log{
|
||||
OrganizationID: req.Request.OrganizationID,
|
||||
|
|
|
@ -4,12 +4,14 @@ import (
|
|||
"bytes"
|
||||
"context"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/google/go-cmp/cmp/cmpopts"
|
||||
"github.com/influxdata/flux"
|
||||
"github.com/influxdata/flux/metadata"
|
||||
platform "github.com/influxdata/influxdb/v2"
|
||||
"github.com/influxdata/influxdb/v2/query"
|
||||
"github.com/influxdata/influxdb/v2/query/mock"
|
||||
|
@ -34,6 +36,10 @@ var opts = []cmp.Option{
|
|||
cmpopts.IgnoreUnexported(query.Request{}),
|
||||
}
|
||||
|
||||
type contextKey string
|
||||
|
||||
const loggingCtxKey contextKey = "do-logging"
|
||||
|
||||
func TestLoggingProxyQueryService(t *testing.T) {
|
||||
// Set a Jaeger in-memory tracer to get span information in the query log.
|
||||
oldTracer := opentracing.GlobalTracer()
|
||||
|
@ -53,7 +59,9 @@ func TestLoggingProxyQueryService(t *testing.T) {
|
|||
ExecuteDuration: time.Second,
|
||||
Concurrency: 2,
|
||||
MaxAllocated: 2048,
|
||||
Metadata: make(metadata.Metadata),
|
||||
}
|
||||
wantStats.Metadata.Add("some-mock-metadata", 42)
|
||||
wantBytes := 10
|
||||
pqs := &mock.ProxyQueryService{
|
||||
QueryF: func(ctx context.Context, w io.Writer, req *query.ProxyRequest) (flux.Statistics, error) {
|
||||
|
@ -69,13 +77,6 @@ func TestLoggingProxyQueryService(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
wantTime := time.Now()
|
||||
lpqs := query.NewLoggingProxyQueryService(zap.NewNop(), logger, pqs)
|
||||
lpqs.SetNowFunctionForTesting(func() time.Time {
|
||||
return wantTime
|
||||
})
|
||||
|
||||
var buf bytes.Buffer
|
||||
req := &query.ProxyRequest{
|
||||
Request: query.Request{
|
||||
Authorization: nil,
|
||||
|
@ -84,25 +85,98 @@ func TestLoggingProxyQueryService(t *testing.T) {
|
|||
},
|
||||
Dialect: nil,
|
||||
}
|
||||
stats, err := lpqs.Query(context.Background(), &buf, req)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !cmp.Equal(wantStats, stats, opts...) {
|
||||
t.Errorf("unexpected query stats: -want/+got\n%s", cmp.Diff(wantStats, stats, opts...))
|
||||
}
|
||||
traceID := reporter.GetSpans()[0].Context().(jaeger.SpanContext).TraceID().String()
|
||||
wantLogs := []query.Log{{
|
||||
Time: wantTime,
|
||||
OrganizationID: orgID,
|
||||
TraceID: traceID,
|
||||
Sampled: true,
|
||||
Error: nil,
|
||||
ProxyRequest: req,
|
||||
ResponseSize: int64(wantBytes),
|
||||
Statistics: wantStats,
|
||||
}}
|
||||
if !cmp.Equal(wantLogs, logs, opts...) {
|
||||
t.Errorf("unexpected query logs: -want/+got\n%s", cmp.Diff(wantLogs, logs, opts...))
|
||||
}
|
||||
|
||||
t.Run("log", func(t *testing.T) {
|
||||
defer func() {
|
||||
logs = nil
|
||||
}()
|
||||
wantTime := time.Now()
|
||||
lpqs := query.NewLoggingProxyQueryService(zap.NewNop(), logger, pqs)
|
||||
lpqs.SetNowFunctionForTesting(func() time.Time {
|
||||
return wantTime
|
||||
})
|
||||
|
||||
var buf bytes.Buffer
|
||||
stats, err := lpqs.Query(context.Background(), &buf, req)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !cmp.Equal(wantStats, stats, opts...) {
|
||||
t.Errorf("unexpected query stats: -want/+got\n%s", cmp.Diff(wantStats, stats, opts...))
|
||||
}
|
||||
traceID := reporter.GetSpans()[0].Context().(jaeger.SpanContext).TraceID().String()
|
||||
wantLogs := []query.Log{{
|
||||
Time: wantTime,
|
||||
OrganizationID: orgID,
|
||||
TraceID: traceID,
|
||||
Sampled: true,
|
||||
Error: nil,
|
||||
ProxyRequest: req,
|
||||
ResponseSize: int64(wantBytes),
|
||||
Statistics: wantStats,
|
||||
}}
|
||||
if !cmp.Equal(wantLogs, logs, opts...) {
|
||||
t.Errorf("unexpected query logs: -want/+got\n%s", cmp.Diff(wantLogs, logs, opts...))
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("conditional logging", func(t *testing.T) {
|
||||
defer func() {
|
||||
logs = nil
|
||||
}()
|
||||
|
||||
condLog := query.ConditionalLogging(func(ctx context.Context) bool {
|
||||
return ctx.Value(loggingCtxKey) != nil
|
||||
})
|
||||
|
||||
lpqs := query.NewLoggingProxyQueryService(zap.NewNop(), logger, pqs, condLog)
|
||||
_, err := lpqs.Query(context.Background(), ioutil.Discard, req)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if len(logs) != 0 {
|
||||
t.Fatal("expected query service not to log")
|
||||
}
|
||||
|
||||
ctx := context.WithValue(context.Background(), loggingCtxKey, true)
|
||||
_, err = lpqs.Query(ctx, ioutil.Discard, req)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if len(logs) != 1 {
|
||||
t.Fatal("expected query service to log")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("require metadata key", func(t *testing.T) {
|
||||
defer func() {
|
||||
logs = nil
|
||||
}()
|
||||
|
||||
reqMeta1 := query.RequireMetadataKey("this-metadata-wont-be-found")
|
||||
lpqs1 := query.NewLoggingProxyQueryService(zap.NewNop(), logger, pqs, reqMeta1)
|
||||
|
||||
_, err := lpqs1.Query(context.Background(), ioutil.Discard, req)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if len(logs) != 0 {
|
||||
t.Fatal("expected query service not to log")
|
||||
}
|
||||
|
||||
reqMeta2 := query.RequireMetadataKey("some-mock-metadata")
|
||||
lpqs2 := query.NewLoggingProxyQueryService(zap.NewNop(), logger, pqs, reqMeta2)
|
||||
|
||||
_, err = lpqs2.Query(context.Background(), ioutil.Discard, req)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if len(logs) != 1 {
|
||||
t.Fatal("expected query service to log")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
|
|
@ -6,6 +6,7 @@ import (
|
|||
"sync"
|
||||
|
||||
"github.com/influxdata/flux"
|
||||
"github.com/influxdata/flux/metadata"
|
||||
"github.com/influxdata/influxdb/v2/kit/check"
|
||||
"github.com/influxdata/influxdb/v2/query"
|
||||
)
|
||||
|
@ -52,7 +53,7 @@ func (s *AsyncQueryService) Query(ctx context.Context, req *query.Request) (flux
|
|||
// It contains controls to ensure that the flux.Query object is used correctly.
|
||||
// Note: Query will only return one result, specified by calling the SetResults method.
|
||||
type Query struct {
|
||||
Metadata flux.Metadata
|
||||
Metadata metadata.Metadata
|
||||
|
||||
results chan flux.Result
|
||||
once sync.Once
|
||||
|
@ -66,7 +67,7 @@ var _ flux.Query = (*Query)(nil)
|
|||
// NewQuery constructs a new asynchronous query.
|
||||
func NewQuery() *Query {
|
||||
return &Query{
|
||||
Metadata: make(flux.Metadata),
|
||||
Metadata: make(metadata.Metadata),
|
||||
results: make(chan flux.Result, 1),
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,7 +19,7 @@ import (
|
|||
"github.com/influxdata/influxdb/v2/cmd/influxd/launcher"
|
||||
"github.com/influxdata/influxdb/v2/models"
|
||||
"github.com/influxdata/influxdb/v2/query"
|
||||
itsdb "github.com/influxdata/influxdb/v2/v1/tsdb"
|
||||
itsdb "github.com/influxdata/influxdb/v2/tsdb"
|
||||
ipromql "github.com/influxdata/promql/v2"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/prometheus/prometheus/pkg/labels"
|
||||
|
|
|
@ -1,43 +1,28 @@
|
|||
module github.com/influxdata/promqltests
|
||||
|
||||
go 1.13
|
||||
go 1.12
|
||||
|
||||
require (
|
||||
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d // indirect
|
||||
github.com/aws/aws-sdk-go v1.29.18 // indirect
|
||||
github.com/docker/go-units v0.4.0 // indirect
|
||||
github.com/fatih/color v1.9.0 // indirect
|
||||
github.com/glycerine/go-unsnap-stream v0.0.0-20190901134440-81cf024a9e0a // indirect
|
||||
github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31 // indirect
|
||||
github.com/go-kit/kit v0.10.0 // indirect
|
||||
github.com/gogo/protobuf v1.3.1 // indirect
|
||||
github.com/google/go-cmp v0.4.0
|
||||
github.com/google/uuid v1.1.1 // indirect
|
||||
github.com/google/go-cmp v0.5.0
|
||||
github.com/hashicorp/go-rootcerts v1.0.2 // indirect
|
||||
github.com/influxdata/flux v0.66.1
|
||||
github.com/influxdata/flux v0.83.1
|
||||
github.com/influxdata/influxdb/v2 v2.0.0-00010101000000-000000000000
|
||||
github.com/influxdata/influxql v1.0.1 // indirect
|
||||
github.com/influxdata/promql/v2 v2.12.0
|
||||
github.com/kr/pretty v0.2.0 // indirect
|
||||
github.com/mattn/go-isatty v0.0.12 // indirect
|
||||
github.com/onsi/ginkgo v1.10.1 // indirect
|
||||
github.com/onsi/gomega v1.7.0 // indirect
|
||||
github.com/prometheus/client_golang v1.5.1 // indirect
|
||||
github.com/prometheus/common v0.9.1
|
||||
github.com/prometheus/prometheus v2.5.0+incompatible
|
||||
github.com/prometheus/tsdb v0.10.0
|
||||
github.com/spf13/afero v1.2.2 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/willf/bitset v1.1.10 // indirect
|
||||
golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413 // indirect
|
||||
golang.org/x/net v0.0.0-20200301022130-244492dfa37a // indirect
|
||||
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d // indirect
|
||||
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527 // indirect
|
||||
golang.org/x/tools v0.0.0-20200305205014-bc073721adb6 // indirect
|
||||
google.golang.org/api v0.20.0 // indirect
|
||||
google.golang.org/genproto v0.0.0-20200305110556-506484158171 // indirect
|
||||
google.golang.org/grpc v1.27.1 // indirect
|
||||
gopkg.in/yaml.v2 v2.2.8 // indirect
|
||||
)
|
||||
|
||||
replace github.com/influxdata/influxdb/v2 => ../../../../
|
||||
|
|
|
@ -1,8 +1,6 @@
|
|||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
|
||||
cloud.google.com/go v0.43.0 h1:banaiRPAM8kUVYneOSkhgcDsLzEvL25FinuiSZaH/2w=
|
||||
cloud.google.com/go v0.43.0/go.mod h1:BOSR3VbTLkk6FDC/TcffxP4NF/FFBGA5ku+jvKOP7pg=
|
||||
cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
|
||||
cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
|
||||
cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
|
||||
|
@ -113,6 +111,8 @@ github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6r
|
|||
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
|
||||
github.com/boltdb/bolt v1.3.1 h1:JQmyP4ZBrce+ZQu0dY660FMfatumYDLun9hBCUVIkF4=
|
||||
github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps=
|
||||
github.com/bonitoo-io/go-sql-bigquery v0.3.4-1.4.0 h1:MaVh0h9+KaMnJcoDvvIGp+O3fefdWm+8MBUX6ELTJTM=
|
||||
github.com/bonitoo-io/go-sql-bigquery v0.3.4-1.4.0/go.mod h1:J4Y6YJm0qTWB9aFziB7cPeSyc6dOZFyJdteSeybVpXQ=
|
||||
github.com/bouk/httprouter v0.0.0-20160817010721-ee8b3818a7f5 h1:kS0dw4K730x7cxT+bVyTyYJZHuSoH7ofSr/Ijit56Qw=
|
||||
github.com/bouk/httprouter v0.0.0-20160817010721-ee8b3818a7f5/go.mod h1:CDReaxg1cmLrtcasZy43l4EYPAknXLiQSrb7tLw5zXM=
|
||||
github.com/buger/jsonparser v0.0.0-20191004114745-ee4c978eae7e h1:oJCXMss/3rg5F6Poy9wG3JQusc58Mzk5B9Z6wSnssNE=
|
||||
|
@ -218,6 +218,7 @@ github.com/go-chi/chi v4.1.0+incompatible h1:ETj3cggsVIY2Xao5ExCu6YhEh5MD6JTfcBz
|
|||
github.com/go-chi/chi v4.1.0+incompatible/go.mod h1:eB3wogJHnLi3x/kFX2A+IbTBlXxmMeXJVKy9tTv1XzQ=
|
||||
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
|
||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-kit/kit v0.9.0 h1:wDJmvq38kDhkVxi50ni9ykkdUr1PKgqKOoi01fa0Mdk=
|
||||
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
|
@ -235,6 +236,8 @@ github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LB
|
|||
github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/go-test/deep v1.0.1/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA=
|
||||
github.com/gofrs/uuid v3.3.0+incompatible h1:8K4tyRfvU1CYPgJsveYFQMhpFd/wXNM7iK6rR7UHz84=
|
||||
github.com/gofrs/uuid v3.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
|
||||
github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s=
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
|
@ -389,8 +392,8 @@ github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NH
|
|||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||
github.com/influxdata/cron v0.0.0-20191203200038-ded12750aac6 h1:OtjKkeWDjUbyMi82C7XXy7Tvm2LXMwiBBXyFIGNPaGA=
|
||||
github.com/influxdata/cron v0.0.0-20191203200038-ded12750aac6/go.mod h1:XabtPPW2qsCg0tl+kjaPU+cFS+CjQXEXbT1VJvHT4og=
|
||||
github.com/influxdata/flux v0.82.2 h1:VtoF8pbyoS+3QLQQmihSmV0Ly6g/A73x+3VBUp9t15g=
|
||||
github.com/influxdata/flux v0.82.2/go.mod h1:sAAIEgQTlTpsXCUQ49ymoRsKqraPzIb7F3paT72/lE0=
|
||||
github.com/influxdata/flux v0.83.1 h1:KdJ19S2bj0jZvhICdS8d54BHYCJNuq9h3A/HkIKOD6o=
|
||||
github.com/influxdata/flux v0.83.1/go.mod h1:+6FzHdZdwYjEIa2iuQEJ92x+C2A8X1jI0qdpVT0DJfM=
|
||||
github.com/influxdata/httprouter v1.3.1-0.20191122104820-ee83e2772f69 h1:WQsmW0fXO4ZE/lFGIE84G6rIV5SJN3P3sjIXAP1a8eU=
|
||||
github.com/influxdata/httprouter v1.3.1-0.20191122104820-ee83e2772f69/go.mod h1:pwymjR6SrP3gD3pRj9RJwdl1j5s3doEEV8gS4X9qSzA=
|
||||
github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo=
|
||||
|
@ -729,6 +732,8 @@ github.com/willf/bitset v1.1.9/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPyS
|
|||
github.com/willf/bitset v1.1.10 h1:NotGKqX0KwQ72NUzqrjZq5ipPNDQex9lo3WpaS8L2sc=
|
||||
github.com/willf/bitset v1.1.10/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
|
||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
||||
github.com/xlab/treeprint v1.0.0 h1:J0TkWtiuYgtdlrkkrDLISYBQ92M+X5m4LrIIMKrbDTs=
|
||||
github.com/xlab/treeprint v1.0.0/go.mod h1:IoImgRak9i3zJyuxOKUP1v4UZd1tMoKkq/Cimt1uhCg=
|
||||
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
|
||||
github.com/yudai/gojsondiff v1.0.0 h1:27cbfqXLVEJ1o8I6v3y9lg8Ydm53EKqHXAOMxEGlCOA=
|
||||
github.com/yudai/gojsondiff v1.0.0/go.mod h1:AY32+k2cwILAkW1fbgxQ5mUmMiZFgLIV+FBNExI05xg=
|
||||
|
@ -736,6 +741,7 @@ github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82 h1:BHyfKlQyqbsFN5p3Ifn
|
|||
github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82/go.mod h1:lgjkn3NuSvDfVJdfcVVdX+jpBxNmX4rDAzaS45IcYoM=
|
||||
github.com/yudai/pp v2.0.1+incompatible h1:Q4//iY4pNF6yPLZIigmvcl7k/bPgrcTPIFIcmawg5bI=
|
||||
github.com/yudai/pp v2.0.1+incompatible/go.mod h1:PuxR/8QJ7cyCkFp/aUDS+JY727OFEZkTdatxwunjIkc=
|
||||
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||
go.etcd.io/bbolt v1.3.3 h1:MUGmc65QhB3pIlaQ5bB4LwqSj6GIonVJXpZiaKNyaKk=
|
||||
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||
|
@ -792,8 +798,8 @@ golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8U
|
|||
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37 h1:cg5LA/zNPRzIXIWSCxQW10Rvpy94aQh3LT/ShoCpkHw=
|
||||
golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
|
@ -806,6 +812,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0
|
|||
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd h1:zkO/Lhoka23X63N9OSzpSeROEUQ5ODw47tM3YWjygbs=
|
||||
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
|
||||
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6 h1:QE6XYQK6naiK1EPAe1g/ILLxN5RBoH5xkJk3CqlMI/Y=
|
||||
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
|
||||
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
||||
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
|
@ -826,8 +834,11 @@ golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCc
|
|||
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
|
||||
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
|
||||
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/mod v0.2.0 h1:KU7oHjnv3XNWfa5COkzUifxZmxp1TyI7ImMXqFxLwvQ=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
|
@ -854,8 +865,8 @@ golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLL
|
|||
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200301022130-244492dfa37a h1:GuSPYbZzB5/dcLNCwLQLsg3obCJtX9IJhpXkvY7kzk0=
|
||||
golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200625001655-4c5254603344 h1:vGXIOMxbNfDTk/aXCmfdLgkrSV+Z2tcbze+pEc3v5W4=
|
||||
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0=
|
||||
|
@ -872,6 +883,8 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEha
|
|||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e h1:vcxGaoTs7kV8m5Np9uUNQin4BrLOthgV7252N8V+FwY=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208 h1:qwRHBd0NqMbJxfbotnDhm2ByMI1Shq4Y6oRJo21SGJA=
|
||||
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
|
@ -899,6 +912,7 @@ golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7w
|
|||
golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
|
@ -909,8 +923,8 @@ golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7w
|
|||
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4 h1:sfkvUWPNGwSV+8/fNqctR5lS2AqCSqYwXdrjCxp/dXo=
|
||||
golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527 h1:uYVVQ9WP/Ds2ROhcaGPeIdVq0RIXVLwsHlnvJ+cT1So=
|
||||
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd h1:xhmwyvizuTgC2qz7ZlMluP20uW+C3Rm0FD/WLDX8884=
|
||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
|
@ -962,8 +976,8 @@ golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapK
|
|||
golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200304024140-c4206d458c3f/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
|
||||
golang.org/x/tools v0.0.0-20200305205014-bc073721adb6 h1:V/kH9fbTtfqZLJU7djyPh+n4yWxBZVU6H5npu6UeY54=
|
||||
golang.org/x/tools v0.0.0-20200305205014-bc073721adb6/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
|
||||
golang.org/x/tools v0.0.0-20200721032237-77f530d86f9a h1:kVMPw4f6EVqYdfGQTedjrpw1dbE2PEMfw4jwXsNdn9s=
|
||||
golang.org/x/tools v0.0.0-20200721032237-77f530d86f9a/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
|
@ -1000,8 +1014,6 @@ google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRn
|
|||
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s=
|
||||
google.golang.org/genproto v0.0.0-20190716160619-c506a9f90610 h1:Ygq9/SRJX9+dU0WCIICM8RkWvDw03lvB77hrhJnpxfU=
|
||||
google.golang.org/genproto v0.0.0-20190716160619-c506a9f90610/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
|
||||
|
|
|
@ -364,8 +364,10 @@ func TestBuild(t *testing.T) {
|
|||
want: &flux.Spec{
|
||||
Operations: []*flux.Operation{
|
||||
{
|
||||
ID: flux.OperationID("from"),
|
||||
Spec: &influxdb.FromOpSpec{Bucket: "prometheus"},
|
||||
ID: flux.OperationID("from"),
|
||||
Spec: &influxdb.FromOpSpec{
|
||||
Bucket: influxdb.NameOrID{Name: "prometheus"},
|
||||
},
|
||||
},
|
||||
{
|
||||
ID: "where",
|
||||
|
@ -373,51 +375,55 @@ func TestBuild(t *testing.T) {
|
|||
Fn: interpreter.ResolvedFunction{
|
||||
Scope: nil,
|
||||
Fn: &semantic.FunctionExpression{
|
||||
Block: &semantic.FunctionBlock{
|
||||
Parameters: &semantic.FunctionParameters{
|
||||
List: []*semantic.FunctionParameter{{Key: &semantic.Identifier{Name: "r"}}},
|
||||
},
|
||||
Body: &semantic.LogicalExpression{
|
||||
Operator: ast.AndOperator,
|
||||
Left: &semantic.LogicalExpression{
|
||||
Operator: ast.AndOperator,
|
||||
Left: &semantic.BinaryExpression{
|
||||
Operator: ast.EqualOperator,
|
||||
Left: &semantic.MemberExpression{
|
||||
Object: &semantic.IdentifierExpression{
|
||||
Name: "r",
|
||||
Parameters: &semantic.FunctionParameters{
|
||||
List: []*semantic.FunctionParameter{{Key: &semantic.Identifier{Name: "r"}}},
|
||||
},
|
||||
Block: &semantic.Block{
|
||||
Body: []semantic.Statement{
|
||||
&semantic.ReturnStatement{
|
||||
Argument: &semantic.LogicalExpression{
|
||||
Operator: ast.AndOperator,
|
||||
Left: &semantic.LogicalExpression{
|
||||
Operator: ast.AndOperator,
|
||||
Left: &semantic.BinaryExpression{
|
||||
Operator: ast.EqualOperator,
|
||||
Left: &semantic.MemberExpression{
|
||||
Object: &semantic.IdentifierExpression{
|
||||
Name: "r",
|
||||
},
|
||||
Property: "_metric",
|
||||
},
|
||||
Right: &semantic.StringLiteral{
|
||||
Value: "node_cpu",
|
||||
},
|
||||
},
|
||||
Property: "_metric",
|
||||
},
|
||||
Right: &semantic.StringLiteral{
|
||||
Value: "node_cpu",
|
||||
},
|
||||
},
|
||||
Right: &semantic.BinaryExpression{
|
||||
Operator: ast.EqualOperator,
|
||||
Left: &semantic.MemberExpression{
|
||||
Object: &semantic.IdentifierExpression{
|
||||
Name: "r",
|
||||
Right: &semantic.BinaryExpression{
|
||||
Operator: ast.EqualOperator,
|
||||
Left: &semantic.MemberExpression{
|
||||
Object: &semantic.IdentifierExpression{
|
||||
Name: "r",
|
||||
},
|
||||
Property: "mode",
|
||||
},
|
||||
Right: &semantic.StringLiteral{
|
||||
Value: "user",
|
||||
},
|
||||
},
|
||||
Property: "mode",
|
||||
},
|
||||
Right: &semantic.StringLiteral{
|
||||
Value: "user",
|
||||
Right: &semantic.BinaryExpression{
|
||||
Operator: ast.EqualOperator,
|
||||
Left: &semantic.MemberExpression{
|
||||
Object: &semantic.IdentifierExpression{
|
||||
Name: "r",
|
||||
},
|
||||
Property: "cpu",
|
||||
},
|
||||
Right: &semantic.StringLiteral{
|
||||
Value: "cpu2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Right: &semantic.BinaryExpression{
|
||||
Operator: ast.EqualOperator,
|
||||
Left: &semantic.MemberExpression{
|
||||
Object: &semantic.IdentifierExpression{
|
||||
Name: "r",
|
||||
},
|
||||
Property: "cpu",
|
||||
},
|
||||
Right: &semantic.StringLiteral{
|
||||
Value: "cpu2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -446,8 +452,10 @@ func TestBuild(t *testing.T) {
|
|||
want: &flux.Spec{
|
||||
Operations: []*flux.Operation{
|
||||
{
|
||||
ID: flux.OperationID("from"),
|
||||
Spec: &influxdb.FromOpSpec{Bucket: "prometheus"},
|
||||
ID: flux.OperationID("from"),
|
||||
Spec: &influxdb.FromOpSpec{
|
||||
Bucket: influxdb.NameOrID{Name: "prometheus"},
|
||||
},
|
||||
},
|
||||
{
|
||||
ID: flux.OperationID("range"),
|
||||
|
@ -461,34 +469,38 @@ func TestBuild(t *testing.T) {
|
|||
Fn: interpreter.ResolvedFunction{
|
||||
Scope: nil,
|
||||
Fn: &semantic.FunctionExpression{
|
||||
Block: &semantic.FunctionBlock{
|
||||
Parameters: &semantic.FunctionParameters{
|
||||
List: []*semantic.FunctionParameter{{Key: &semantic.Identifier{Name: "r"}}},
|
||||
},
|
||||
Body: &semantic.LogicalExpression{
|
||||
Operator: ast.AndOperator,
|
||||
Left: &semantic.BinaryExpression{
|
||||
Operator: ast.EqualOperator,
|
||||
Left: &semantic.MemberExpression{
|
||||
Object: &semantic.IdentifierExpression{
|
||||
Name: "r",
|
||||
Parameters: &semantic.FunctionParameters{
|
||||
List: []*semantic.FunctionParameter{{Key: &semantic.Identifier{Name: "r"}}},
|
||||
},
|
||||
Block: &semantic.Block{
|
||||
Body: []semantic.Statement{
|
||||
&semantic.ReturnStatement{
|
||||
Argument: &semantic.LogicalExpression{
|
||||
Operator: ast.AndOperator,
|
||||
Left: &semantic.BinaryExpression{
|
||||
Operator: ast.EqualOperator,
|
||||
Left: &semantic.MemberExpression{
|
||||
Object: &semantic.IdentifierExpression{
|
||||
Name: "r",
|
||||
},
|
||||
Property: "_metric",
|
||||
},
|
||||
Right: &semantic.StringLiteral{
|
||||
Value: "node_cpu",
|
||||
},
|
||||
},
|
||||
Property: "_metric",
|
||||
},
|
||||
Right: &semantic.StringLiteral{
|
||||
Value: "node_cpu",
|
||||
},
|
||||
},
|
||||
Right: &semantic.BinaryExpression{
|
||||
Operator: ast.EqualOperator,
|
||||
Left: &semantic.MemberExpression{
|
||||
Object: &semantic.IdentifierExpression{
|
||||
Name: "r",
|
||||
Right: &semantic.BinaryExpression{
|
||||
Operator: ast.EqualOperator,
|
||||
Left: &semantic.MemberExpression{
|
||||
Object: &semantic.IdentifierExpression{
|
||||
Name: "r",
|
||||
},
|
||||
Property: "mode",
|
||||
},
|
||||
Right: &semantic.StringLiteral{
|
||||
Value: "user",
|
||||
},
|
||||
},
|
||||
Property: "mode",
|
||||
},
|
||||
Right: &semantic.StringLiteral{
|
||||
Value: "user",
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -517,8 +529,10 @@ func TestBuild(t *testing.T) {
|
|||
want: &flux.Spec{
|
||||
Operations: []*flux.Operation{
|
||||
{
|
||||
ID: flux.OperationID("from"),
|
||||
Spec: &influxdb.FromOpSpec{Bucket: "prometheus"},
|
||||
ID: flux.OperationID("from"),
|
||||
Spec: &influxdb.FromOpSpec{
|
||||
Bucket: influxdb.NameOrID{Name: "prometheus"},
|
||||
},
|
||||
},
|
||||
{
|
||||
ID: flux.OperationID("range"),
|
||||
|
@ -532,34 +546,38 @@ func TestBuild(t *testing.T) {
|
|||
Fn: interpreter.ResolvedFunction{
|
||||
Scope: nil,
|
||||
Fn: &semantic.FunctionExpression{
|
||||
Block: &semantic.FunctionBlock{
|
||||
Parameters: &semantic.FunctionParameters{
|
||||
List: []*semantic.FunctionParameter{{Key: &semantic.Identifier{Name: "r"}}},
|
||||
},
|
||||
Body: &semantic.LogicalExpression{
|
||||
Operator: ast.AndOperator,
|
||||
Left: &semantic.BinaryExpression{
|
||||
Operator: ast.EqualOperator,
|
||||
Left: &semantic.MemberExpression{
|
||||
Object: &semantic.IdentifierExpression{
|
||||
Name: "r",
|
||||
Parameters: &semantic.FunctionParameters{
|
||||
List: []*semantic.FunctionParameter{{Key: &semantic.Identifier{Name: "r"}}},
|
||||
},
|
||||
Block: &semantic.Block{
|
||||
Body: []semantic.Statement{
|
||||
&semantic.ReturnStatement{
|
||||
Argument: &semantic.LogicalExpression{
|
||||
Operator: ast.AndOperator,
|
||||
Left: &semantic.BinaryExpression{
|
||||
Operator: ast.EqualOperator,
|
||||
Left: &semantic.MemberExpression{
|
||||
Object: &semantic.IdentifierExpression{
|
||||
Name: "r",
|
||||
},
|
||||
Property: "_metric",
|
||||
},
|
||||
Right: &semantic.StringLiteral{
|
||||
Value: "node_cpu",
|
||||
},
|
||||
},
|
||||
Property: "_metric",
|
||||
},
|
||||
Right: &semantic.StringLiteral{
|
||||
Value: "node_cpu",
|
||||
},
|
||||
},
|
||||
Right: &semantic.BinaryExpression{
|
||||
Operator: ast.EqualOperator,
|
||||
Left: &semantic.MemberExpression{
|
||||
Object: &semantic.IdentifierExpression{
|
||||
Name: "r",
|
||||
Right: &semantic.BinaryExpression{
|
||||
Operator: ast.EqualOperator,
|
||||
Left: &semantic.MemberExpression{
|
||||
Object: &semantic.IdentifierExpression{
|
||||
Name: "r",
|
||||
},
|
||||
Property: "_measurement",
|
||||
},
|
||||
Right: &semantic.StringLiteral{
|
||||
Value: "m0",
|
||||
},
|
||||
},
|
||||
Property: "_measurement",
|
||||
},
|
||||
Right: &semantic.StringLiteral{
|
||||
Value: "m0",
|
||||
},
|
||||
},
|
||||
},
|
||||
|
|
|
@ -148,7 +148,7 @@ func (s *Selector) QuerySpec() (*flux.Spec, error) {
|
|||
{
|
||||
ID: "from", // TODO: Change this to a UUID
|
||||
Spec: &influxdb.FromOpSpec{
|
||||
Bucket: "prometheus",
|
||||
Bucket: influxdb.NameOrID{Name: "prometheus"},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
@ -260,11 +260,15 @@ func NewWhereOperation(metricName string, labels []*LabelMatcher) (*flux.Operati
|
|||
Fn: interpreter.ResolvedFunction{
|
||||
Scope: nil,
|
||||
Fn: &semantic.FunctionExpression{
|
||||
Block: &semantic.FunctionBlock{
|
||||
Parameters: &semantic.FunctionParameters{
|
||||
List: []*semantic.FunctionParameter{{Key: &semantic.Identifier{Name: "r"}}},
|
||||
Parameters: &semantic.FunctionParameters{
|
||||
List: []*semantic.FunctionParameter{{Key: &semantic.Identifier{Name: "r"}}},
|
||||
},
|
||||
Block: &semantic.Block{
|
||||
Body: []semantic.Statement{
|
||||
&semantic.ReturnStatement{
|
||||
Argument: node,
|
||||
},
|
||||
},
|
||||
Body: node,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
|
|
@ -1,62 +0,0 @@
|
|||
package querytest
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/influxdata/flux"
|
||||
platform "github.com/influxdata/influxdb/v2"
|
||||
"github.com/influxdata/influxdb/v2/query"
|
||||
)
|
||||
|
||||
type BucketsAccessedTestCase struct {
|
||||
Name string
|
||||
Raw string
|
||||
WantErr bool
|
||||
WantReadBuckets *[]platform.BucketFilter
|
||||
WantWriteBuckets *[]platform.BucketFilter
|
||||
}
|
||||
|
||||
func BucketsAccessedTestHelper(t *testing.T, tc BucketsAccessedTestCase) {
|
||||
t.Helper()
|
||||
|
||||
ast, err := flux.Parse(tc.Raw)
|
||||
if err != nil {
|
||||
t.Fatalf("could not parse flux: %v", err)
|
||||
}
|
||||
|
||||
var gotReadBuckets, gotWriteBuckets []platform.BucketFilter
|
||||
if tc.WantReadBuckets != nil || tc.WantWriteBuckets != nil {
|
||||
gotReadBuckets, gotWriteBuckets, err = query.BucketsAccessed(ast, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
if tc.WantReadBuckets != nil {
|
||||
if diagnostic := verifyBuckets(*tc.WantReadBuckets, gotReadBuckets); diagnostic != "" {
|
||||
t.Errorf("Could not verify read buckets: %v", diagnostic)
|
||||
}
|
||||
}
|
||||
|
||||
if tc.WantWriteBuckets != nil {
|
||||
if diagnostic := verifyBuckets(*tc.WantWriteBuckets, gotWriteBuckets); diagnostic != "" {
|
||||
t.Errorf("Could not verify write buckets: %v", diagnostic)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func verifyBuckets(wantBuckets, gotBuckets []platform.BucketFilter) string {
|
||||
if len(wantBuckets) != len(gotBuckets) {
|
||||
return fmt.Sprintf("Expected %v buckets but got %v", len(wantBuckets), len(gotBuckets))
|
||||
}
|
||||
|
||||
for i, wantBucket := range wantBuckets {
|
||||
if diagnostic := cmp.Diff(wantBucket, gotBuckets[i]); diagnostic != "" {
|
||||
return fmt.Sprintf("Bucket mismatch: -want/+got:\n%v", diagnostic)
|
||||
}
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
|
@ -1,10 +1,12 @@
|
|||
package querytest
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/influxdata/flux/plan"
|
||||
"github.com/influxdata/flux/stdlib/influxdata/influxdb"
|
||||
v1 "github.com/influxdata/flux/stdlib/influxdata/influxdb/v1"
|
||||
"github.com/influxdata/influxdb/v2/query/influxql"
|
||||
"github.com/influxdata/influxdb/v2/query/stdlib/influxdata/influxdb"
|
||||
)
|
||||
|
||||
// MakeFromInfluxJSONCompiler returns a compiler that replaces all From operations with FromJSON.
|
||||
|
@ -24,7 +26,7 @@ func (ReplaceFromRule) Pattern() plan.Pattern {
|
|||
return plan.Pat(influxdb.FromKind)
|
||||
}
|
||||
|
||||
func (r ReplaceFromRule) Rewrite(n plan.Node) (plan.Node, bool, error) {
|
||||
func (r ReplaceFromRule) Rewrite(ctx context.Context, n plan.Node) (plan.Node, bool, error) {
|
||||
if err := n.ReplaceSpec(&v1.FromInfluxJSONProcedureSpec{
|
||||
File: r.Filename,
|
||||
}); err != nil {
|
||||
|
|
|
@ -21,7 +21,7 @@ type compilerA struct {
|
|||
A string `json:"a"`
|
||||
}
|
||||
|
||||
func (c compilerA) Compile(ctx context.Context) (flux.Program, error) {
|
||||
func (c compilerA) Compile(ctx context.Context, runtime flux.Runtime) (flux.Program, error) {
|
||||
panic("not implemented")
|
||||
}
|
||||
|
||||
|
|
|
@ -1,49 +0,0 @@
|
|||
package query
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/influxdata/flux"
|
||||
"github.com/influxdata/flux/ast"
|
||||
"github.com/influxdata/flux/lang"
|
||||
platform "github.com/influxdata/influxdb/v2"
|
||||
)
|
||||
|
||||
// BucketAwareOperationSpec specifies an operation that reads or writes buckets
|
||||
type BucketAwareOperationSpec interface {
|
||||
BucketsAccessed(orgID *platform.ID) (readBuckets, writeBuckets []platform.BucketFilter)
|
||||
}
|
||||
|
||||
type constantSecretService struct{}
|
||||
|
||||
func (s constantSecretService) LoadSecret(ctx context.Context, k string) (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func newDeps() flux.Dependencies {
|
||||
deps := flux.NewDefaultDependencies()
|
||||
deps.Deps.HTTPClient = nil
|
||||
deps.Deps.URLValidator = nil
|
||||
deps.Deps.SecretService = constantSecretService{}
|
||||
return deps
|
||||
}
|
||||
|
||||
// BucketsAccessed returns the set of buckets read and written by a query spec
|
||||
func BucketsAccessed(ast *ast.Package, orgID *platform.ID) (readBuckets, writeBuckets []platform.BucketFilter, err error) {
|
||||
ctx := newDeps().Inject(context.Background())
|
||||
err = lang.WalkIR(ctx, ast, func(o *flux.Operation) error {
|
||||
bucketAwareOpSpec, ok := o.Spec.(BucketAwareOperationSpec)
|
||||
if ok {
|
||||
opBucketsRead, opBucketsWritten := bucketAwareOpSpec.BucketsAccessed(orgID)
|
||||
readBuckets = append(readBuckets, opBucketsRead...)
|
||||
writeBuckets = append(writeBuckets, opBucketsWritten...)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return readBuckets, writeBuckets, nil
|
||||
}
|
|
@ -9,6 +9,7 @@ import (
|
|||
"github.com/influxdata/flux/codes"
|
||||
"github.com/influxdata/flux/execute"
|
||||
"github.com/influxdata/flux/plan"
|
||||
"github.com/influxdata/flux/runtime"
|
||||
"github.com/influxdata/flux/semantic"
|
||||
"github.com/influxdata/flux/stdlib/experimental"
|
||||
platform "github.com/influxdata/influxdb/v2"
|
||||
|
@ -32,19 +33,8 @@ type ToOpSpec struct {
|
|||
}
|
||||
|
||||
func init() {
|
||||
toSignature := flux.FunctionSignature(
|
||||
map[string]semantic.PolyType{
|
||||
"bucket": semantic.String,
|
||||
"bucketID": semantic.String,
|
||||
"org": semantic.String,
|
||||
"orgID": semantic.String,
|
||||
"host": semantic.String,
|
||||
"token": semantic.String,
|
||||
},
|
||||
[]string{},
|
||||
)
|
||||
|
||||
flux.ReplacePackageValue("experimental", "to", flux.FunctionValueWithSideEffect("to", createToOpSpec, toSignature))
|
||||
toSignature := runtime.MustLookupBuiltinType("experimental", "to")
|
||||
runtime.ReplacePackageValue("experimental", "to", flux.MustValue(flux.FunctionValueWithSideEffect("to", createToOpSpec, toSignature)))
|
||||
flux.RegisterOpSpec(ExperimentalToKind, func() flux.OperationSpec { return &ToOpSpec{} })
|
||||
plan.RegisterProcedureSpecWithSideEffect(ExperimentalToKind, newToProcedure, ExperimentalToKind)
|
||||
execute.RegisterTransformation(ExperimentalToKind, createToTransformation)
|
||||
|
@ -185,9 +175,7 @@ func createToTransformation(id execute.DatasetID, mode execute.AccumulationMode,
|
|||
// ToTransformation is the transformation for the `to` flux function.
|
||||
type ToTransformation struct {
|
||||
ctx context.Context
|
||||
bucket string
|
||||
bucketID platform.ID
|
||||
org string
|
||||
orgID platform.ID
|
||||
d execute.Dataset
|
||||
cache execute.TableBuilderCache
|
||||
|
@ -206,7 +194,6 @@ func NewToTransformation(ctx context.Context, d execute.Dataset, cache execute.T
|
|||
var err error
|
||||
|
||||
var orgID platform.ID
|
||||
var org string
|
||||
// Get organization name and ID
|
||||
if spec.Spec.Org != "" {
|
||||
oID, ok := deps.OrganizationLookup.Lookup(ctx, spec.Spec.Org)
|
||||
|
@ -214,7 +201,6 @@ func NewToTransformation(ctx context.Context, d execute.Dataset, cache execute.T
|
|||
return nil, fmt.Errorf("failed to look up organization %q", spec.Spec.Org)
|
||||
}
|
||||
orgID = oID
|
||||
org = spec.Spec.Org
|
||||
} else if spec.Spec.OrgID != "" {
|
||||
if oid, err := platform.IDFromString(spec.Spec.OrgID); err != nil {
|
||||
return nil, err
|
||||
|
@ -229,15 +215,8 @@ func NewToTransformation(ctx context.Context, d execute.Dataset, cache execute.T
|
|||
}
|
||||
orgID = req.OrganizationID
|
||||
}
|
||||
if org == "" {
|
||||
org = deps.OrganizationLookup.LookupName(ctx, orgID)
|
||||
if org == "" {
|
||||
return nil, fmt.Errorf("failed to look up organization name for ID %q", orgID.String())
|
||||
}
|
||||
}
|
||||
|
||||
var bucketID *platform.ID
|
||||
var bucket string
|
||||
// Get bucket name and ID
|
||||
// User will have specified exactly one in the ToOpSpec.
|
||||
if spec.Spec.Bucket != "" {
|
||||
|
@ -246,21 +225,14 @@ func NewToTransformation(ctx context.Context, d execute.Dataset, cache execute.T
|
|||
return nil, fmt.Errorf("failed to look up bucket %q in org %q", spec.Spec.Bucket, spec.Spec.Org)
|
||||
}
|
||||
bucketID = &bID
|
||||
bucket = spec.Spec.Bucket
|
||||
} else {
|
||||
if bucketID, err = platform.IDFromString(spec.Spec.BucketID); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
bucket = deps.BucketLookup.LookupName(ctx, orgID, *bucketID)
|
||||
if bucket == "" {
|
||||
return nil, fmt.Errorf("failed to look up bucket with ID %q in org %q", bucketID, org)
|
||||
}
|
||||
}
|
||||
return &ToTransformation{
|
||||
ctx: ctx,
|
||||
bucket: bucket,
|
||||
bucketID: *bucketID,
|
||||
org: org,
|
||||
orgID: orgID,
|
||||
d: d,
|
||||
cache: cache,
|
||||
|
@ -313,6 +285,8 @@ type TablePointsMetadata struct {
|
|||
MeasurementName string
|
||||
// The tags in the table (final element is left as nil, to be replaced by field name)
|
||||
Tags [][]byte
|
||||
// The offset in tags where to store the field name
|
||||
FieldKeyTagValueOffset int
|
||||
// The column offset in the input table where the _time column is stored
|
||||
TimestampOffset int
|
||||
// The labels and offsets of all the fields in the table
|
||||
|
@ -428,15 +402,15 @@ func (t *ToTransformation) writeTable(ctx context.Context, tbl flux.Table) error
|
|||
}
|
||||
|
||||
switch fieldVal.Type() {
|
||||
case semantic.Float:
|
||||
case semantic.BasicFloat:
|
||||
fields[lao.Label] = fieldVal.Float()
|
||||
case semantic.Int:
|
||||
case semantic.BasicInt:
|
||||
fields[lao.Label] = fieldVal.Int()
|
||||
case semantic.UInt:
|
||||
case semantic.BasicUint:
|
||||
fields[lao.Label] = fieldVal.UInt()
|
||||
case semantic.String:
|
||||
case semantic.BasicString:
|
||||
fields[lao.Label] = fieldVal.Str()
|
||||
case semantic.Bool:
|
||||
case semantic.BasicBool:
|
||||
fields[lao.Label] = fieldVal.Bool()
|
||||
default:
|
||||
return fmt.Errorf("unsupported field type %v", fieldVal.Type())
|
||||
|
|
|
@ -3,7 +3,6 @@ package experimental_test
|
|||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
|
@ -17,7 +16,6 @@ import (
|
|||
"github.com/influxdata/influxdb/v2/mock"
|
||||
"github.com/influxdata/influxdb/v2/models"
|
||||
_ "github.com/influxdata/influxdb/v2/query/builtin"
|
||||
pquerytest "github.com/influxdata/influxdb/v2/query/querytest"
|
||||
"github.com/influxdata/influxdb/v2/query/stdlib/experimental"
|
||||
"github.com/influxdata/influxdb/v2/query/stdlib/influxdata/influxdb"
|
||||
)
|
||||
|
@ -35,9 +33,9 @@ from(bucket:"mydb")
|
|||
Want: &flux.Spec{
|
||||
Operations: []*flux.Operation{
|
||||
{
|
||||
ID: "influxDBFrom0",
|
||||
ID: "from0",
|
||||
Spec: &influxdb.FromOpSpec{
|
||||
Bucket: "mydb",
|
||||
Bucket: influxdb.NameOrID{Name: "mydb"},
|
||||
},
|
||||
},
|
||||
{
|
||||
|
@ -68,7 +66,7 @@ from(bucket:"mydb")
|
|||
},
|
||||
},
|
||||
Edges: []flux.Edge{
|
||||
{Parent: "influxDBFrom0", Child: "range1"},
|
||||
{Parent: "from0", Child: "range1"},
|
||||
{Parent: "range1", Child: "pivot2"},
|
||||
{Parent: "pivot2", Child: "experimental-to3"},
|
||||
},
|
||||
|
@ -84,53 +82,6 @@ from(bucket:"mydb")
|
|||
}
|
||||
}
|
||||
|
||||
func TestToOpSpec_BucketsAccessed(t *testing.T) {
|
||||
bucketName := "my_bucket"
|
||||
bucketIDString := "ddddccccbbbbaaaa"
|
||||
bucketID, err := platform.IDFromString(bucketIDString)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
orgName := "my_org"
|
||||
orgIDString := "aaaabbbbccccdddd"
|
||||
orgID, err := platform.IDFromString(orgIDString)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
tests := []pquerytest.BucketsAccessedTestCase{
|
||||
{
|
||||
Name: "from() with bucket and to with org and bucket",
|
||||
Raw: fmt.Sprintf(`import "experimental"
|
||||
from(bucket:"%s")
|
||||
|> experimental.to(bucket:"%s", org:"%s")`, bucketName, bucketName, orgName),
|
||||
WantReadBuckets: &[]platform.BucketFilter{{Name: &bucketName}},
|
||||
WantWriteBuckets: &[]platform.BucketFilter{{Name: &bucketName, Org: &orgName}},
|
||||
},
|
||||
{
|
||||
Name: "from() with bucket and to with orgID and bucket",
|
||||
Raw: fmt.Sprintf(`import "experimental"
|
||||
from(bucket:"%s") |> experimental.to(bucket:"%s", orgID:"%s")`, bucketName, bucketName, orgIDString),
|
||||
WantReadBuckets: &[]platform.BucketFilter{{Name: &bucketName}},
|
||||
WantWriteBuckets: &[]platform.BucketFilter{{Name: &bucketName, OrganizationID: orgID}},
|
||||
},
|
||||
{
|
||||
Name: "from() with bucket and to with orgID and bucketID",
|
||||
Raw: fmt.Sprintf(`import "experimental"
|
||||
from(bucket:"%s") |> experimental.to(bucketID:"%s", orgID:"%s")`, bucketName, bucketIDString, orgIDString),
|
||||
WantReadBuckets: &[]platform.BucketFilter{{Name: &bucketName}},
|
||||
WantWriteBuckets: &[]platform.BucketFilter{{ID: bucketID, OrganizationID: orgID}},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
tc := tc
|
||||
t.Run(tc.Name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
pquerytest.BucketsAccessedTestHelper(t, tc)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestTo_Process(t *testing.T) {
|
||||
oid, _ := mock.OrganizationLookup{}.Lookup(context.Background(), "my-org")
|
||||
bid, _ := mock.BucketLookup{}.Lookup(context.Background(), oid, "my-bucket")
|
||||
|
|
|
@ -15,8 +15,23 @@ import (
|
|||
"github.com/influxdata/influxdb/v2/query"
|
||||
)
|
||||
|
||||
const BucketsKind = "influxdata/influxdb.localBuckets"
|
||||
|
||||
func init() {
|
||||
execute.RegisterSource(influxdb.BucketsKind, createBucketsSource)
|
||||
execute.RegisterSource(BucketsKind, createBucketsSource)
|
||||
plan.RegisterPhysicalRules(LocalBucketsRule{})
|
||||
}
|
||||
|
||||
type LocalBucketsProcedureSpec struct {
|
||||
plan.DefaultCost
|
||||
}
|
||||
|
||||
func (s *LocalBucketsProcedureSpec) Kind() plan.ProcedureKind {
|
||||
return BucketsKind
|
||||
}
|
||||
|
||||
func (s *LocalBucketsProcedureSpec) Copy() plan.ProcedureSpec {
|
||||
return new(LocalBucketsProcedureSpec)
|
||||
}
|
||||
|
||||
type BucketsDecoder struct {
|
||||
|
@ -99,7 +114,7 @@ func (bd *BucketsDecoder) Close() error {
|
|||
}
|
||||
|
||||
func createBucketsSource(prSpec plan.ProcedureSpec, dsid execute.DatasetID, a execute.Administration) (execute.Source, error) {
|
||||
_, ok := prSpec.(*influxdb.BucketsProcedureSpec)
|
||||
_, ok := prSpec.(*LocalBucketsProcedureSpec)
|
||||
if !ok {
|
||||
return nil, &flux.Error{
|
||||
Code: codes.Internal,
|
||||
|
@ -128,3 +143,27 @@ type AllBucketLookup interface {
|
|||
FindAllBuckets(ctx context.Context, orgID platform.ID) ([]*platform.Bucket, int)
|
||||
}
|
||||
type BucketDependencies AllBucketLookup
|
||||
|
||||
type LocalBucketsRule struct{}
|
||||
|
||||
func (rule LocalBucketsRule) Name() string {
|
||||
return "influxdata/influxdb.LocalBucketsRule"
|
||||
}
|
||||
|
||||
func (rule LocalBucketsRule) Pattern() plan.Pattern {
|
||||
return plan.Pat(influxdb.BucketsKind)
|
||||
}
|
||||
|
||||
func (rule LocalBucketsRule) Rewrite(ctx context.Context, node plan.Node) (plan.Node, bool, error) {
|
||||
fromSpec := node.ProcedureSpec().(*influxdb.BucketsProcedureSpec)
|
||||
if fromSpec.Host != nil {
|
||||
return node, false, nil
|
||||
} else if fromSpec.Org != nil {
|
||||
return node, false, &flux.Error{
|
||||
Code: codes.Unimplemented,
|
||||
Msg: "buckets cannot list from a separate organization; please specify a host or remove the organization",
|
||||
}
|
||||
}
|
||||
|
||||
return plan.CreateLogicalNode("localBuckets", &LocalBucketsProcedureSpec{}), true, nil
|
||||
}
|
||||
|
|
|
@ -26,6 +26,9 @@ func (d StorageDependencies) Inject(ctx context.Context) context.Context {
|
|||
}
|
||||
|
||||
func GetStorageDependencies(ctx context.Context) StorageDependencies {
|
||||
if ctx.Value(dependenciesKey) == nil {
|
||||
return StorageDependencies{}
|
||||
}
|
||||
return ctx.Value(dependenciesKey).(StorageDependencies)
|
||||
}
|
||||
|
||||
|
@ -65,7 +68,7 @@ func (d Dependencies) PrometheusCollectors() []prometheus.Collector {
|
|||
}
|
||||
|
||||
func NewDependencies(
|
||||
reader Reader,
|
||||
reader query.StorageReader,
|
||||
writer storage.PointsWriter,
|
||||
bucketSvc influxdb.BucketService,
|
||||
orgSvc influxdb.OrganizationService,
|
||||
|
|
|
@ -6,131 +6,32 @@ import (
|
|||
"github.com/influxdata/flux"
|
||||
"github.com/influxdata/flux/codes"
|
||||
"github.com/influxdata/flux/plan"
|
||||
"github.com/influxdata/flux/semantic"
|
||||
"github.com/influxdata/flux/stdlib/influxdata/influxdb"
|
||||
platform "github.com/influxdata/influxdb/v2"
|
||||
)
|
||||
|
||||
const FromKind = "influxDBFrom"
|
||||
|
||||
type FromOpSpec struct {
|
||||
Bucket string `json:"bucket,omitempty"`
|
||||
BucketID string `json:"bucketID,omitempty"`
|
||||
type (
|
||||
NameOrID = influxdb.NameOrID
|
||||
FromOpSpec = influxdb.FromOpSpec
|
||||
)
|
||||
|
||||
type FromStorageProcedureSpec struct {
|
||||
Bucket influxdb.NameOrID
|
||||
}
|
||||
|
||||
func init() {
|
||||
fromSignature := semantic.FunctionPolySignature{
|
||||
Parameters: map[string]semantic.PolyType{
|
||||
"bucket": semantic.String,
|
||||
"bucketID": semantic.String,
|
||||
},
|
||||
Required: nil,
|
||||
Return: flux.TableObjectType,
|
||||
}
|
||||
|
||||
flux.ReplacePackageValue("influxdata/influxdb", influxdb.FromKind, flux.FunctionValue(FromKind, createFromOpSpec, fromSignature))
|
||||
flux.RegisterOpSpec(FromKind, newFromOp)
|
||||
plan.RegisterProcedureSpec(FromKind, newFromProcedure, FromKind)
|
||||
}
|
||||
|
||||
func createFromOpSpec(args flux.Arguments, a *flux.Administration) (flux.OperationSpec, error) {
|
||||
spec := new(FromOpSpec)
|
||||
|
||||
if bucket, ok, err := args.GetString("bucket"); err != nil {
|
||||
return nil, err
|
||||
} else if ok {
|
||||
spec.Bucket = bucket
|
||||
}
|
||||
|
||||
if bucketID, ok, err := args.GetString("bucketID"); err != nil {
|
||||
return nil, err
|
||||
} else if ok {
|
||||
spec.BucketID = bucketID
|
||||
}
|
||||
|
||||
if spec.Bucket == "" && spec.BucketID == "" {
|
||||
return nil, &flux.Error{
|
||||
Code: codes.Invalid,
|
||||
Msg: "must specify one of bucket or bucketID",
|
||||
}
|
||||
}
|
||||
if spec.Bucket != "" && spec.BucketID != "" {
|
||||
return nil, &flux.Error{
|
||||
Code: codes.Invalid,
|
||||
Msg: "must specify only one of bucket or bucketID",
|
||||
}
|
||||
}
|
||||
return spec, nil
|
||||
}
|
||||
|
||||
func newFromOp() flux.OperationSpec {
|
||||
return new(FromOpSpec)
|
||||
}
|
||||
|
||||
func (s *FromOpSpec) Kind() flux.OperationKind {
|
||||
func (s *FromStorageProcedureSpec) Kind() plan.ProcedureKind {
|
||||
return FromKind
|
||||
}
|
||||
|
||||
// BucketsAccessed makes FromOpSpec a query.BucketAwareOperationSpec
|
||||
func (s *FromOpSpec) BucketsAccessed(orgID *platform.ID) (readBuckets, writeBuckets []platform.BucketFilter) {
|
||||
bf := platform.BucketFilter{}
|
||||
if s.Bucket != "" {
|
||||
bf.Name = &s.Bucket
|
||||
}
|
||||
if orgID != nil {
|
||||
bf.OrganizationID = orgID
|
||||
}
|
||||
|
||||
if len(s.BucketID) > 0 {
|
||||
if id, err := platform.IDFromString(s.BucketID); err != nil {
|
||||
invalidID := platform.InvalidID()
|
||||
bf.ID = &invalidID
|
||||
} else {
|
||||
bf.ID = id
|
||||
}
|
||||
}
|
||||
|
||||
if bf.ID != nil || bf.Name != nil {
|
||||
readBuckets = append(readBuckets, bf)
|
||||
}
|
||||
return readBuckets, writeBuckets
|
||||
}
|
||||
|
||||
type FromProcedureSpec struct {
|
||||
Bucket string
|
||||
BucketID string
|
||||
}
|
||||
|
||||
func newFromProcedure(qs flux.OperationSpec, pa plan.Administration) (plan.ProcedureSpec, error) {
|
||||
spec, ok := qs.(*FromOpSpec)
|
||||
if !ok {
|
||||
return nil, &flux.Error{
|
||||
Code: codes.Internal,
|
||||
Msg: fmt.Sprintf("invalid spec type %T", qs),
|
||||
}
|
||||
}
|
||||
|
||||
return &FromProcedureSpec{
|
||||
Bucket: spec.Bucket,
|
||||
BucketID: spec.BucketID,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *FromProcedureSpec) Kind() plan.ProcedureKind {
|
||||
return FromKind
|
||||
}
|
||||
|
||||
func (s *FromProcedureSpec) Copy() plan.ProcedureSpec {
|
||||
ns := new(FromProcedureSpec)
|
||||
|
||||
func (s *FromStorageProcedureSpec) Copy() plan.ProcedureSpec {
|
||||
ns := new(FromStorageProcedureSpec)
|
||||
ns.Bucket = s.Bucket
|
||||
ns.BucketID = s.BucketID
|
||||
|
||||
return ns
|
||||
}
|
||||
|
||||
func (s *FromProcedureSpec) PostPhysicalValidate(id plan.NodeID) error {
|
||||
// FromProcedureSpec is a logical operation representing any read
|
||||
func (s *FromStorageProcedureSpec) PostPhysicalValidate(id plan.NodeID) error {
|
||||
// FromStorageProcedureSpec is a logical operation representing any read
|
||||
// from storage. However as a logical operation, it doesn't specify
|
||||
// how data is to be read from storage. It is the query planner's
|
||||
// job to determine the optimal read strategy and to convert this
|
||||
|
@ -142,10 +43,10 @@ func (s *FromProcedureSpec) PostPhysicalValidate(id plan.NodeID) error {
|
|||
// not support unbounded reads, and so this query must not be
|
||||
// validated.
|
||||
var bucket string
|
||||
if len(s.Bucket) > 0 {
|
||||
bucket = s.Bucket
|
||||
if s.Bucket.Name != "" {
|
||||
bucket = s.Bucket.Name
|
||||
} else {
|
||||
bucket = s.BucketID
|
||||
bucket = s.Bucket.ID
|
||||
}
|
||||
return &flux.Error{
|
||||
Code: codes.Invalid,
|
||||
|
|
|
@ -1,168 +1,23 @@
|
|||
package influxdb_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/flux"
|
||||
"github.com/influxdata/flux/execute"
|
||||
"github.com/influxdata/flux/plan"
|
||||
"github.com/influxdata/flux/plan/plantest"
|
||||
"github.com/influxdata/flux/querytest"
|
||||
"github.com/influxdata/flux/stdlib/influxdata/influxdb"
|
||||
"github.com/influxdata/flux/stdlib/universe"
|
||||
platform "github.com/influxdata/influxdb/v2"
|
||||
pquerytest "github.com/influxdata/influxdb/v2/query/querytest"
|
||||
"github.com/influxdata/influxdb/v2/query/stdlib/influxdata/influxdb"
|
||||
qinfluxdb "github.com/influxdata/influxdb/v2/query/stdlib/influxdata/influxdb"
|
||||
)
|
||||
|
||||
func TestFrom_NewQuery(t *testing.T) {
|
||||
t.Skip()
|
||||
tests := []querytest.NewQueryTestCase{
|
||||
{
|
||||
Name: "from no args",
|
||||
Raw: `from()`,
|
||||
WantErr: true,
|
||||
},
|
||||
{
|
||||
Name: "from conflicting args",
|
||||
Raw: `from(bucket:"d", bucket:"b")`,
|
||||
WantErr: true,
|
||||
},
|
||||
{
|
||||
Name: "from repeat arg",
|
||||
Raw: `from(bucket:"telegraf", bucket:"oops")`,
|
||||
WantErr: true,
|
||||
},
|
||||
{
|
||||
Name: "from",
|
||||
Raw: `from(bucket:"telegraf", chicken:"what is this?")`,
|
||||
WantErr: true,
|
||||
},
|
||||
{
|
||||
Name: "from bucket invalid ID",
|
||||
Raw: `from(bucketID:"invalid")`,
|
||||
WantErr: true,
|
||||
},
|
||||
{
|
||||
Name: "from bucket ID",
|
||||
Raw: `from(bucketID:"aaaabbbbccccdddd")`,
|
||||
Want: &flux.Spec{
|
||||
Operations: []*flux.Operation{
|
||||
{
|
||||
ID: "from0",
|
||||
Spec: &influxdb.FromOpSpec{
|
||||
BucketID: "aaaabbbbccccdddd",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "from with database",
|
||||
Raw: `from(bucket:"mybucket") |> range(start:-4h, stop:-2h) |> sum()`,
|
||||
Want: &flux.Spec{
|
||||
Operations: []*flux.Operation{
|
||||
{
|
||||
ID: "from0",
|
||||
Spec: &influxdb.FromOpSpec{
|
||||
Bucket: "mybucket",
|
||||
},
|
||||
},
|
||||
{
|
||||
ID: "range1",
|
||||
Spec: &universe.RangeOpSpec{
|
||||
Start: flux.Time{
|
||||
Relative: -4 * time.Hour,
|
||||
IsRelative: true,
|
||||
},
|
||||
Stop: flux.Time{
|
||||
Relative: -2 * time.Hour,
|
||||
IsRelative: true,
|
||||
},
|
||||
TimeColumn: "_time",
|
||||
StartColumn: "_start",
|
||||
StopColumn: "_stop",
|
||||
},
|
||||
},
|
||||
{
|
||||
ID: "sum2",
|
||||
Spec: &universe.SumOpSpec{
|
||||
AggregateConfig: execute.DefaultAggregateConfig,
|
||||
},
|
||||
},
|
||||
},
|
||||
Edges: []flux.Edge{
|
||||
{Parent: "from0", Child: "range1"},
|
||||
{Parent: "range1", Child: "sum2"},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tc := range tests {
|
||||
tc := tc
|
||||
t.Run(tc.Name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
querytest.NewQueryTestHelper(t, tc)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestFromOperation_Marshaling(t *testing.T) {
|
||||
t.Skip()
|
||||
data := []byte(`{"id":"from","kind":"from","spec":{"bucket":"mybucket"}}`)
|
||||
op := &flux.Operation{
|
||||
ID: "from",
|
||||
Spec: &influxdb.FromOpSpec{
|
||||
Bucket: "mybucket",
|
||||
},
|
||||
}
|
||||
querytest.OperationMarshalingTestHelper(t, data, op)
|
||||
}
|
||||
|
||||
func TestFromOpSpec_BucketsAccessed(t *testing.T) {
|
||||
bucketName := "my_bucket"
|
||||
bucketIDString := "aaaabbbbccccdddd"
|
||||
bucketID, err := platform.IDFromString(bucketIDString)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
invalidID := platform.InvalidID()
|
||||
tests := []pquerytest.BucketsAccessedTestCase{
|
||||
{
|
||||
Name: "From with bucket",
|
||||
Raw: fmt.Sprintf(`from(bucket:"%s")`, bucketName),
|
||||
WantReadBuckets: &[]platform.BucketFilter{{Name: &bucketName}},
|
||||
WantWriteBuckets: &[]platform.BucketFilter{},
|
||||
},
|
||||
{
|
||||
Name: "From with bucketID",
|
||||
Raw: fmt.Sprintf(`from(bucketID:"%s")`, bucketID),
|
||||
WantReadBuckets: &[]platform.BucketFilter{{ID: bucketID}},
|
||||
WantWriteBuckets: &[]platform.BucketFilter{},
|
||||
},
|
||||
{
|
||||
Name: "From invalid bucketID",
|
||||
Raw: `from(bucketID:"invalid")`,
|
||||
WantReadBuckets: &[]platform.BucketFilter{{ID: &invalidID}},
|
||||
WantWriteBuckets: &[]platform.BucketFilter{},
|
||||
},
|
||||
}
|
||||
for _, tc := range tests {
|
||||
tc := tc
|
||||
t.Run(tc.Name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
pquerytest.BucketsAccessedTestHelper(t, tc)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestFromValidation(t *testing.T) {
|
||||
spec := plantest.PlanSpec{
|
||||
// from |> group (cannot query an infinite time range)
|
||||
Nodes: []plan.Node{
|
||||
plan.CreateLogicalNode("from", &influxdb.FromProcedureSpec{
|
||||
Bucket: "my-bucket",
|
||||
Bucket: influxdb.NameOrID{Name: "my-bucket"},
|
||||
}),
|
||||
plan.CreatePhysicalNode("group", &universe.GroupProcedureSpec{
|
||||
GroupMode: flux.GroupModeBy,
|
||||
|
@ -176,11 +31,12 @@ func TestFromValidation(t *testing.T) {
|
|||
|
||||
ps := plantest.CreatePlanSpec(&spec)
|
||||
pp := plan.NewPhysicalPlanner(plan.OnlyPhysicalRules(
|
||||
influxdb.PushDownRangeRule{},
|
||||
influxdb.PushDownFilterRule{},
|
||||
influxdb.PushDownGroupRule{},
|
||||
qinfluxdb.FromStorageRule{},
|
||||
qinfluxdb.PushDownRangeRule{},
|
||||
qinfluxdb.PushDownFilterRule{},
|
||||
qinfluxdb.PushDownGroupRule{},
|
||||
))
|
||||
_, err := pp.Plan(ps)
|
||||
_, err := pp.Plan(context.Background(), ps)
|
||||
if err == nil {
|
||||
t.Error("Expected query with no call to range to fail physical planning")
|
||||
}
|
||||
|
|
|
@ -7,9 +7,9 @@ import (
|
|||
"github.com/influxdata/flux"
|
||||
"github.com/influxdata/flux/codes"
|
||||
"github.com/influxdata/flux/plan"
|
||||
"github.com/influxdata/flux/semantic"
|
||||
"github.com/influxdata/flux/values"
|
||||
"github.com/influxdata/influxdb/v2"
|
||||
"github.com/influxdata/influxdb/v2/storage/reads/datatypes"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -55,12 +55,10 @@ type ReadRangePhysSpec struct {
|
|||
Bucket string
|
||||
BucketID string
|
||||
|
||||
// FilterSet is set to true if there is a filter.
|
||||
FilterSet bool
|
||||
// Filter is the filter to use when calling into
|
||||
// storage. It must be possible to push down this
|
||||
// filter.
|
||||
Filter *semantic.FunctionExpression
|
||||
Filter *datatypes.Predicate
|
||||
|
||||
Bounds flux.Bounds
|
||||
}
|
||||
|
@ -69,19 +67,8 @@ func (s *ReadRangePhysSpec) Kind() plan.ProcedureKind {
|
|||
return ReadRangePhysKind
|
||||
}
|
||||
func (s *ReadRangePhysSpec) Copy() plan.ProcedureSpec {
|
||||
ns := new(ReadRangePhysSpec)
|
||||
|
||||
ns.Bucket = s.Bucket
|
||||
ns.BucketID = s.BucketID
|
||||
|
||||
ns.FilterSet = s.FilterSet
|
||||
if ns.FilterSet {
|
||||
ns.Filter = s.Filter.Copy().(*semantic.FunctionExpression)
|
||||
}
|
||||
|
||||
ns.Bounds = s.Bounds
|
||||
|
||||
return ns
|
||||
ns := *s
|
||||
return &ns
|
||||
}
|
||||
|
||||
func (s *ReadRangePhysSpec) LookupBucketID(ctx context.Context, orgID influxdb.ID, buckets BucketLookup) (influxdb.ID, error) {
|
||||
|
@ -127,22 +114,29 @@ type ReadWindowAggregatePhysSpec struct {
|
|||
ReadRangePhysSpec
|
||||
|
||||
WindowEvery int64
|
||||
Offset int64
|
||||
Aggregates []plan.ProcedureKind
|
||||
CreateEmpty bool
|
||||
TimeColumn string
|
||||
}
|
||||
|
||||
func (s *ReadWindowAggregatePhysSpec) PlanDetails() string {
|
||||
return fmt.Sprintf("every = %d, aggregates = %v", s.WindowEvery, s.Aggregates)
|
||||
return fmt.Sprintf("every = %d, aggregates = %v, createEmpty = %v, timeColumn = \"%s\"", s.WindowEvery, s.Aggregates, s.CreateEmpty, s.TimeColumn)
|
||||
}
|
||||
|
||||
func (s *ReadWindowAggregatePhysSpec) Kind() plan.ProcedureKind {
|
||||
return ReadWindowAggregatePhysKind
|
||||
}
|
||||
|
||||
func (s *ReadWindowAggregatePhysSpec) Copy() plan.ProcedureSpec {
|
||||
ns := new(ReadWindowAggregatePhysSpec)
|
||||
|
||||
ns.ReadRangePhysSpec = *s.ReadRangePhysSpec.Copy().(*ReadRangePhysSpec)
|
||||
ns.WindowEvery = s.WindowEvery
|
||||
ns.Offset = s.Offset
|
||||
ns.Aggregates = s.Aggregates
|
||||
ns.CreateEmpty = s.CreateEmpty
|
||||
ns.TimeColumn = s.TimeColumn
|
||||
|
||||
return ns
|
||||
}
|
||||
|
|
|
@ -1,23 +1,69 @@
|
|||
package influxdb
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math"
|
||||
|
||||
"github.com/influxdata/flux"
|
||||
"github.com/influxdata/flux/ast"
|
||||
"github.com/influxdata/flux/codes"
|
||||
"github.com/influxdata/flux/execute"
|
||||
"github.com/influxdata/flux/plan"
|
||||
"github.com/influxdata/flux/semantic"
|
||||
"github.com/influxdata/flux/stdlib/influxdata/influxdb"
|
||||
"github.com/influxdata/flux/stdlib/universe"
|
||||
"github.com/influxdata/flux/values"
|
||||
"github.com/influxdata/influxdb/v2/kit/feature"
|
||||
"github.com/influxdata/influxdb/v2/query"
|
||||
)
|
||||
|
||||
func init() {
|
||||
plan.RegisterPhysicalRules(
|
||||
FromStorageRule{},
|
||||
PushDownRangeRule{},
|
||||
PushDownFilterRule{},
|
||||
PushDownGroupRule{},
|
||||
PushDownReadTagKeysRule{},
|
||||
PushDownReadTagValuesRule{},
|
||||
// These rules can be re-enabled with https://github.com/influxdata/influxdb/issues/19561 is fixed
|
||||
// PushDownReadTagKeysRule{},
|
||||
// PushDownReadTagValuesRule{},
|
||||
SortedPivotRule{},
|
||||
PushDownWindowAggregateRule{},
|
||||
PushDownWindowAggregateByTimeRule{},
|
||||
PushDownBareAggregateRule{},
|
||||
GroupWindowAggregateTransposeRule{},
|
||||
PushDownGroupAggregateRule{},
|
||||
SwitchFillImplRule{},
|
||||
SwitchSchemaMutationImplRule{},
|
||||
)
|
||||
plan.RegisterLogicalRules(
|
||||
MergeFiltersRule{},
|
||||
)
|
||||
}
|
||||
|
||||
type FromStorageRule struct{}
|
||||
|
||||
func (rule FromStorageRule) Name() string {
|
||||
return "influxdata/influxdb.FromStorageRule"
|
||||
}
|
||||
|
||||
func (rule FromStorageRule) Pattern() plan.Pattern {
|
||||
return plan.Pat(influxdb.FromKind)
|
||||
}
|
||||
|
||||
func (rule FromStorageRule) Rewrite(ctx context.Context, node plan.Node) (plan.Node, bool, error) {
|
||||
fromSpec := node.ProcedureSpec().(*influxdb.FromProcedureSpec)
|
||||
if fromSpec.Host != nil {
|
||||
return node, false, nil
|
||||
} else if fromSpec.Org != nil {
|
||||
return node, false, &flux.Error{
|
||||
Code: codes.Unimplemented,
|
||||
Msg: "reads from the storage engine cannot read from a separate organization; please specify a host or remove the organization",
|
||||
}
|
||||
}
|
||||
|
||||
return plan.CreateLogicalNode("fromStorage", &FromStorageProcedureSpec{
|
||||
Bucket: fromSpec.Bucket,
|
||||
}), true, nil
|
||||
}
|
||||
|
||||
// PushDownGroupRule pushes down a group operation to storage
|
||||
|
@ -31,7 +77,7 @@ func (rule PushDownGroupRule) Pattern() plan.Pattern {
|
|||
return plan.Pat(universe.GroupKind, plan.Pat(ReadRangePhysKind))
|
||||
}
|
||||
|
||||
func (rule PushDownGroupRule) Rewrite(node plan.Node) (plan.Node, bool, error) {
|
||||
func (rule PushDownGroupRule) Rewrite(ctx context.Context, node plan.Node) (plan.Node, bool, error) {
|
||||
src := node.Predecessors()[0].ProcedureSpec().(*ReadRangePhysSpec)
|
||||
grp := node.ProcedureSpec().(*universe.GroupProcedureSpec)
|
||||
|
||||
|
@ -71,14 +117,13 @@ func (rule PushDownRangeRule) Pattern() plan.Pattern {
|
|||
}
|
||||
|
||||
// Rewrite converts 'from |> range' into 'ReadRange'
|
||||
func (rule PushDownRangeRule) Rewrite(node plan.Node) (plan.Node, bool, error) {
|
||||
func (rule PushDownRangeRule) Rewrite(ctx context.Context, node plan.Node) (plan.Node, bool, error) {
|
||||
fromNode := node.Predecessors()[0]
|
||||
fromSpec := fromNode.ProcedureSpec().(*FromProcedureSpec)
|
||||
|
||||
fromSpec := fromNode.ProcedureSpec().(*FromStorageProcedureSpec)
|
||||
rangeSpec := node.ProcedureSpec().(*universe.RangeProcedureSpec)
|
||||
return plan.CreatePhysicalNode("ReadRange", &ReadRangePhysSpec{
|
||||
Bucket: fromSpec.Bucket,
|
||||
BucketID: fromSpec.BucketID,
|
||||
Bucket: fromSpec.Bucket.Name,
|
||||
BucketID: fromSpec.Bucket.ID,
|
||||
Bounds: rangeSpec.Bounds,
|
||||
}), true, nil
|
||||
}
|
||||
|
@ -96,7 +141,7 @@ func (PushDownFilterRule) Pattern() plan.Pattern {
|
|||
return plan.Pat(universe.FilterKind, plan.Pat(ReadRangePhysKind))
|
||||
}
|
||||
|
||||
func (PushDownFilterRule) Rewrite(pn plan.Node) (plan.Node, bool, error) {
|
||||
func (PushDownFilterRule) Rewrite(ctx context.Context, pn plan.Node) (plan.Node, bool, error) {
|
||||
filterSpec := pn.ProcedureSpec().(*universe.FilterProcedureSpec)
|
||||
fromNode := pn.Predecessors()[0]
|
||||
fromSpec := fromNode.ProcedureSpec().(*ReadRangePhysSpec)
|
||||
|
@ -106,17 +151,17 @@ func (PushDownFilterRule) Rewrite(pn plan.Node) (plan.Node, bool, error) {
|
|||
return pn, false, nil
|
||||
}
|
||||
|
||||
bodyExpr, ok := filterSpec.Fn.Fn.Block.Body.(semantic.Expression)
|
||||
bodyExpr, ok := filterSpec.Fn.Fn.GetFunctionBodyExpression()
|
||||
if !ok {
|
||||
return pn, false, nil
|
||||
}
|
||||
|
||||
if len(filterSpec.Fn.Fn.Block.Parameters.List) != 1 {
|
||||
if len(filterSpec.Fn.Fn.Parameters.List) != 1 {
|
||||
// I would expect that type checking would catch this, but just to be safe...
|
||||
return pn, false, nil
|
||||
}
|
||||
|
||||
paramName := filterSpec.Fn.Fn.Block.Parameters.List[0].Key.Name
|
||||
paramName := filterSpec.Fn.Fn.Parameters.List[0].Key.Name
|
||||
|
||||
pushable, notPushable, err := semantic.PartitionPredicates(bodyExpr, func(e semantic.Expression) (bool, error) {
|
||||
return isPushableExpr(paramName, e)
|
||||
|
@ -131,17 +176,26 @@ func (PushDownFilterRule) Rewrite(pn plan.Node) (plan.Node, bool, error) {
|
|||
}
|
||||
pushable, _ = rewritePushableExpr(pushable)
|
||||
|
||||
newFromSpec := fromSpec.Copy().(*ReadRangePhysSpec)
|
||||
if newFromSpec.FilterSet {
|
||||
newBody := semantic.ExprsToConjunction(newFromSpec.Filter.Block.Body.(semantic.Expression), pushable)
|
||||
newFromSpec.Filter.Block.Body = newBody
|
||||
} else {
|
||||
newFromSpec.FilterSet = true
|
||||
// NOTE: We loose the scope here, but that is ok because we can't push down the scope to storage.
|
||||
newFromSpec.Filter = filterSpec.Fn.Fn.Copy().(*semantic.FunctionExpression)
|
||||
newFromSpec.Filter.Block.Body = pushable
|
||||
// Convert the pushable expression to a storage predicate.
|
||||
predicate, err := ToStoragePredicate(pushable, paramName)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
// If the filter has already been set, then combine the existing predicate
|
||||
// with the new one.
|
||||
if fromSpec.Filter != nil {
|
||||
mergedPredicate, err := mergePredicates(ast.AndOperator, fromSpec.Filter, predicate)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
predicate = mergedPredicate
|
||||
}
|
||||
|
||||
// Copy the specification and set the predicate.
|
||||
newFromSpec := fromSpec.Copy().(*ReadRangePhysSpec)
|
||||
newFromSpec.Filter = predicate
|
||||
|
||||
if notPushable == nil {
|
||||
// All predicates could be pushed down, so eliminate the filter
|
||||
mergedNode, err := plan.MergeToPhysicalNode(pn, fromNode, newFromSpec)
|
||||
|
@ -157,7 +211,11 @@ func (PushDownFilterRule) Rewrite(pn plan.Node) (plan.Node, bool, error) {
|
|||
}
|
||||
|
||||
newFilterSpec := filterSpec.Copy().(*universe.FilterProcedureSpec)
|
||||
newFilterSpec.Fn.Fn.Block.Body = notPushable
|
||||
newFilterSpec.Fn.Fn.Block = &semantic.Block{
|
||||
Body: []semantic.Statement{
|
||||
&semantic.ReturnStatement{Argument: notPushable},
|
||||
},
|
||||
}
|
||||
if err := pn.ReplaceSpec(newFilterSpec); err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
|
@ -183,11 +241,11 @@ func (rule PushDownReadTagKeysRule) Pattern() plan.Pattern {
|
|||
plan.Pat(ReadRangePhysKind))))
|
||||
}
|
||||
|
||||
func (rule PushDownReadTagKeysRule) Rewrite(pn plan.Node) (plan.Node, bool, error) {
|
||||
func (rule PushDownReadTagKeysRule) Rewrite(ctx context.Context, pn plan.Node) (plan.Node, bool, error) {
|
||||
// Retrieve the nodes and specs for all of the predecessors.
|
||||
distinctSpec := pn.ProcedureSpec().(*universe.DistinctProcedureSpec)
|
||||
keepNode := pn.Predecessors()[0]
|
||||
keepSpec := keepNode.ProcedureSpec().(*universe.SchemaMutationProcedureSpec)
|
||||
keepSpec := asSchemaMutationProcedureSpec(keepNode.ProcedureSpec())
|
||||
keysNode := keepNode.Predecessors()[0]
|
||||
keysSpec := keysNode.ProcedureSpec().(*universe.KeysProcedureSpec)
|
||||
fromNode := keysNode.Predecessors()[0]
|
||||
|
@ -245,14 +303,14 @@ func (rule PushDownReadTagValuesRule) Pattern() plan.Pattern {
|
|||
plan.Pat(ReadRangePhysKind))))
|
||||
}
|
||||
|
||||
func (rule PushDownReadTagValuesRule) Rewrite(pn plan.Node) (plan.Node, bool, error) {
|
||||
func (rule PushDownReadTagValuesRule) Rewrite(ctx context.Context, pn plan.Node) (plan.Node, bool, error) {
|
||||
// Retrieve the nodes and specs for all of the predecessors.
|
||||
distinctNode := pn
|
||||
distinctSpec := distinctNode.ProcedureSpec().(*universe.DistinctProcedureSpec)
|
||||
groupNode := distinctNode.Predecessors()[0]
|
||||
groupSpec := groupNode.ProcedureSpec().(*universe.GroupProcedureSpec)
|
||||
keepNode := groupNode.Predecessors()[0]
|
||||
keepSpec := keepNode.ProcedureSpec().(*universe.SchemaMutationProcedureSpec)
|
||||
keepSpec := asSchemaMutationProcedureSpec(keepNode.ProcedureSpec())
|
||||
fromNode := keepNode.Predecessors()[0]
|
||||
fromSpec := fromNode.ProcedureSpec().(*ReadRangePhysSpec)
|
||||
|
||||
|
@ -556,7 +614,7 @@ func (SortedPivotRule) Pattern() plan.Pattern {
|
|||
return plan.Pat(universe.PivotKind, plan.Pat(ReadRangePhysKind))
|
||||
}
|
||||
|
||||
func (SortedPivotRule) Rewrite(pn plan.Node) (plan.Node, bool, error) {
|
||||
func (SortedPivotRule) Rewrite(ctx context.Context, pn plan.Node) (plan.Node, bool, error) {
|
||||
pivotSpec := pn.ProcedureSpec().Copy().(*universe.PivotProcedureSpec)
|
||||
pivotSpec.IsSortedByFunc = func(cols []string, desc bool) bool {
|
||||
if desc {
|
||||
|
@ -595,3 +653,551 @@ func (SortedPivotRule) Rewrite(pn plan.Node) (plan.Node, bool, error) {
|
|||
}
|
||||
return pn, false, nil
|
||||
}
|
||||
|
||||
//
|
||||
// Push Down of window aggregates.
|
||||
// ReadRangePhys |> window |> { min, max, mean, count, sum }
|
||||
//
|
||||
type PushDownWindowAggregateRule struct{}
|
||||
|
||||
func (PushDownWindowAggregateRule) Name() string {
|
||||
return "PushDownWindowAggregateRule"
|
||||
}
|
||||
|
||||
var windowPushableAggs = []plan.ProcedureKind{
|
||||
universe.CountKind,
|
||||
universe.SumKind,
|
||||
universe.MinKind,
|
||||
universe.MaxKind,
|
||||
universe.MeanKind,
|
||||
universe.FirstKind,
|
||||
universe.LastKind,
|
||||
}
|
||||
|
||||
func (rule PushDownWindowAggregateRule) Pattern() plan.Pattern {
|
||||
return plan.OneOf(windowPushableAggs,
|
||||
plan.Pat(universe.WindowKind, plan.Pat(ReadRangePhysKind)))
|
||||
}
|
||||
|
||||
func canPushWindowedAggregate(ctx context.Context, fnNode plan.Node) bool {
|
||||
caps, ok := capabilities(ctx)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
// Check the aggregate function spec. Require the operation on _value
|
||||
// and check the feature flag associated with the aggregate function.
|
||||
switch fnNode.Kind() {
|
||||
case universe.MinKind:
|
||||
if !caps.HaveMin() {
|
||||
return false
|
||||
}
|
||||
minSpec := fnNode.ProcedureSpec().(*universe.MinProcedureSpec)
|
||||
if minSpec.Column != execute.DefaultValueColLabel {
|
||||
return false
|
||||
}
|
||||
case universe.MaxKind:
|
||||
if !caps.HaveMax() {
|
||||
return false
|
||||
}
|
||||
maxSpec := fnNode.ProcedureSpec().(*universe.MaxProcedureSpec)
|
||||
if maxSpec.Column != execute.DefaultValueColLabel {
|
||||
return false
|
||||
}
|
||||
case universe.MeanKind:
|
||||
if !feature.PushDownWindowAggregateMean().Enabled(ctx) || !caps.HaveMean() {
|
||||
return false
|
||||
}
|
||||
meanSpec := fnNode.ProcedureSpec().(*universe.MeanProcedureSpec)
|
||||
if len(meanSpec.Columns) != 1 || meanSpec.Columns[0] != execute.DefaultValueColLabel {
|
||||
return false
|
||||
}
|
||||
case universe.CountKind:
|
||||
if !caps.HaveCount() {
|
||||
return false
|
||||
}
|
||||
countSpec := fnNode.ProcedureSpec().(*universe.CountProcedureSpec)
|
||||
if len(countSpec.Columns) != 1 || countSpec.Columns[0] != execute.DefaultValueColLabel {
|
||||
return false
|
||||
}
|
||||
case universe.SumKind:
|
||||
if !caps.HaveSum() {
|
||||
return false
|
||||
}
|
||||
sumSpec := fnNode.ProcedureSpec().(*universe.SumProcedureSpec)
|
||||
if len(sumSpec.Columns) != 1 || sumSpec.Columns[0] != execute.DefaultValueColLabel {
|
||||
return false
|
||||
}
|
||||
case universe.FirstKind:
|
||||
if !caps.HaveFirst() {
|
||||
return false
|
||||
}
|
||||
firstSpec := fnNode.ProcedureSpec().(*universe.FirstProcedureSpec)
|
||||
if firstSpec.Column != execute.DefaultValueColLabel {
|
||||
return false
|
||||
}
|
||||
case universe.LastKind:
|
||||
if !caps.HaveLast() {
|
||||
return false
|
||||
}
|
||||
lastSpec := fnNode.ProcedureSpec().(*universe.LastProcedureSpec)
|
||||
if lastSpec.Column != execute.DefaultValueColLabel {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func isPushableWindow(windowSpec *universe.WindowProcedureSpec) bool {
|
||||
// every and period must be equal
|
||||
// every.months must be zero
|
||||
// every.isNegative must be false
|
||||
// offset.months must be zero
|
||||
// offset.isNegative must be false
|
||||
// timeColumn: must be "_time"
|
||||
// startColumn: must be "_start"
|
||||
// stopColumn: must be "_stop"
|
||||
// createEmpty: must be false
|
||||
window := windowSpec.Window
|
||||
return window.Every.Equal(window.Period) &&
|
||||
window.Every.Months() == 0 &&
|
||||
!window.Every.IsNegative() &&
|
||||
!window.Every.IsZero() &&
|
||||
window.Offset.Months() == 0 &&
|
||||
!window.Offset.IsNegative() &&
|
||||
windowSpec.TimeColumn == "_time" &&
|
||||
windowSpec.StartColumn == "_start" &&
|
||||
windowSpec.StopColumn == "_stop"
|
||||
}
|
||||
|
||||
func capabilities(ctx context.Context) (query.WindowAggregateCapability, bool) {
|
||||
reader := GetStorageDependencies(ctx).FromDeps.Reader
|
||||
windowAggregateReader, ok := reader.(query.WindowAggregateReader)
|
||||
if !ok {
|
||||
return nil, false
|
||||
}
|
||||
caps := windowAggregateReader.GetWindowAggregateCapability(ctx)
|
||||
return caps, caps != nil
|
||||
}
|
||||
|
||||
func (PushDownWindowAggregateRule) Rewrite(ctx context.Context, pn plan.Node) (plan.Node, bool, error) {
|
||||
fnNode := pn
|
||||
if !canPushWindowedAggregate(ctx, fnNode) {
|
||||
return pn, false, nil
|
||||
}
|
||||
|
||||
windowNode := fnNode.Predecessors()[0]
|
||||
windowSpec := windowNode.ProcedureSpec().(*universe.WindowProcedureSpec)
|
||||
fromNode := windowNode.Predecessors()[0]
|
||||
fromSpec := fromNode.ProcedureSpec().(*ReadRangePhysSpec)
|
||||
|
||||
if !isPushableWindow(windowSpec) {
|
||||
return pn, false, nil
|
||||
}
|
||||
|
||||
if caps, ok := capabilities(ctx); !ok || windowSpec.Window.Offset.IsPositive() && !caps.HaveOffset() {
|
||||
return pn, false, nil
|
||||
}
|
||||
|
||||
// Rule passes.
|
||||
return plan.CreatePhysicalNode("ReadWindowAggregate", &ReadWindowAggregatePhysSpec{
|
||||
ReadRangePhysSpec: *fromSpec.Copy().(*ReadRangePhysSpec),
|
||||
Aggregates: []plan.ProcedureKind{fnNode.Kind()},
|
||||
WindowEvery: windowSpec.Window.Every.Nanoseconds(),
|
||||
Offset: windowSpec.Window.Offset.Nanoseconds(),
|
||||
CreateEmpty: windowSpec.CreateEmpty,
|
||||
}), true, nil
|
||||
}
|
||||
|
||||
// PushDownWindowAggregateWithTimeRule will match the given pattern.
|
||||
// ReadWindowAggregatePhys |> duplicate |> window(every: inf)
|
||||
//
|
||||
// If this pattern matches and the arguments to duplicate are
|
||||
// matching time column names, it will set the time column on
|
||||
// the spec.
|
||||
type PushDownWindowAggregateByTimeRule struct{}
|
||||
|
||||
func (PushDownWindowAggregateByTimeRule) Name() string {
|
||||
return "PushDownWindowAggregateByTimeRule"
|
||||
}
|
||||
|
||||
func (rule PushDownWindowAggregateByTimeRule) Pattern() plan.Pattern {
|
||||
return plan.Pat(universe.WindowKind,
|
||||
plan.Pat(universe.SchemaMutationKind,
|
||||
plan.Pat(ReadWindowAggregatePhysKind)))
|
||||
}
|
||||
|
||||
func (PushDownWindowAggregateByTimeRule) Rewrite(ctx context.Context, pn plan.Node) (plan.Node, bool, error) {
|
||||
windowNode := pn
|
||||
windowSpec := windowNode.ProcedureSpec().(*universe.WindowProcedureSpec)
|
||||
|
||||
duplicateNode := windowNode.Predecessors()[0]
|
||||
duplicateSpec, duplicateSpecOk := func() (*universe.DuplicateOpSpec, bool) {
|
||||
s := asSchemaMutationProcedureSpec(duplicateNode.ProcedureSpec())
|
||||
if len(s.Mutations) != 1 {
|
||||
return nil, false
|
||||
}
|
||||
mutator, ok := s.Mutations[0].(*universe.DuplicateOpSpec)
|
||||
return mutator, ok
|
||||
}()
|
||||
if !duplicateSpecOk {
|
||||
return pn, false, nil
|
||||
}
|
||||
|
||||
// The As field must be the default time value
|
||||
// and the column must be start or stop.
|
||||
if duplicateSpec.As != execute.DefaultTimeColLabel ||
|
||||
(duplicateSpec.Column != execute.DefaultStartColLabel && duplicateSpec.Column != execute.DefaultStopColLabel) {
|
||||
return pn, false, nil
|
||||
}
|
||||
|
||||
// window(every: inf)
|
||||
if windowSpec.Window.Every != values.ConvertDuration(math.MaxInt64) ||
|
||||
windowSpec.Window.Every != windowSpec.Window.Period ||
|
||||
windowSpec.TimeColumn != execute.DefaultTimeColLabel ||
|
||||
windowSpec.StartColumn != execute.DefaultStartColLabel ||
|
||||
windowSpec.StopColumn != execute.DefaultStopColLabel ||
|
||||
windowSpec.CreateEmpty {
|
||||
return pn, false, nil
|
||||
}
|
||||
|
||||
// Cannot rewrite if already was rewritten.
|
||||
windowAggregateNode := duplicateNode.Predecessors()[0]
|
||||
windowAggregateSpec := windowAggregateNode.ProcedureSpec().(*ReadWindowAggregatePhysSpec)
|
||||
if windowAggregateSpec.TimeColumn != "" {
|
||||
return pn, false, nil
|
||||
}
|
||||
|
||||
// Rule passes.
|
||||
windowAggregateSpec.TimeColumn = duplicateSpec.Column
|
||||
return plan.CreatePhysicalNode("ReadWindowAggregateByTime", windowAggregateSpec), true, nil
|
||||
}
|
||||
|
||||
// PushDownBareAggregateRule is a rule that allows pushing down of aggregates
|
||||
// that are directly over a ReadRange source.
|
||||
type PushDownBareAggregateRule struct{}
|
||||
|
||||
func (p PushDownBareAggregateRule) Name() string {
|
||||
return "PushDownBareAggregateRule"
|
||||
}
|
||||
|
||||
func (p PushDownBareAggregateRule) Pattern() plan.Pattern {
|
||||
return plan.OneOf(windowPushableAggs,
|
||||
plan.Pat(ReadRangePhysKind))
|
||||
}
|
||||
|
||||
func (p PushDownBareAggregateRule) Rewrite(ctx context.Context, pn plan.Node) (plan.Node, bool, error) {
|
||||
fnNode := pn
|
||||
if !canPushWindowedAggregate(ctx, fnNode) {
|
||||
return pn, false, nil
|
||||
}
|
||||
|
||||
fromNode := fnNode.Predecessors()[0]
|
||||
fromSpec := fromNode.ProcedureSpec().(*ReadRangePhysSpec)
|
||||
|
||||
return plan.CreatePhysicalNode("ReadWindowAggregate", &ReadWindowAggregatePhysSpec{
|
||||
ReadRangePhysSpec: *fromSpec.Copy().(*ReadRangePhysSpec),
|
||||
Aggregates: []plan.ProcedureKind{fnNode.Kind()},
|
||||
WindowEvery: math.MaxInt64,
|
||||
}), true, nil
|
||||
}
|
||||
|
||||
// GroupWindowAggregateTransposeRule will match the given pattern.
|
||||
// ReadGroupPhys |> window |> { min, max, count, sum }
|
||||
//
|
||||
// This pattern will use the PushDownWindowAggregateRule to determine
|
||||
// if the ReadWindowAggregatePhys operation is available before it will
|
||||
// rewrite the above. This rewrites the above to:
|
||||
//
|
||||
// ReadWindowAggregatePhys |> group(columns: ["_start", "_stop", ...]) |> { min, max, sum }
|
||||
//
|
||||
// The count aggregate uses sum to merge the results.
|
||||
type GroupWindowAggregateTransposeRule struct{}
|
||||
|
||||
func (p GroupWindowAggregateTransposeRule) Name() string {
|
||||
return "GroupWindowAggregateTransposeRule"
|
||||
}
|
||||
|
||||
var windowMergeablePushAggs = []plan.ProcedureKind{
|
||||
universe.MinKind,
|
||||
universe.MaxKind,
|
||||
universe.CountKind,
|
||||
universe.SumKind,
|
||||
}
|
||||
|
||||
func (p GroupWindowAggregateTransposeRule) Pattern() plan.Pattern {
|
||||
return plan.OneOf(windowMergeablePushAggs,
|
||||
plan.Pat(universe.WindowKind, plan.Pat(ReadGroupPhysKind)))
|
||||
}
|
||||
|
||||
func (p GroupWindowAggregateTransposeRule) Rewrite(ctx context.Context, pn plan.Node) (plan.Node, bool, error) {
|
||||
if !feature.GroupWindowAggregateTranspose().Enabled(ctx) {
|
||||
return pn, false, nil
|
||||
}
|
||||
|
||||
fnNode := pn
|
||||
if !canPushWindowedAggregate(ctx, fnNode) {
|
||||
return pn, false, nil
|
||||
}
|
||||
|
||||
windowNode := fnNode.Predecessors()[0]
|
||||
windowSpec := windowNode.ProcedureSpec().(*universe.WindowProcedureSpec)
|
||||
|
||||
if !isPushableWindow(windowSpec) {
|
||||
return pn, false, nil
|
||||
}
|
||||
|
||||
if caps, ok := capabilities(ctx); !ok || windowSpec.Window.Offset.IsPositive() && !caps.HaveOffset() {
|
||||
return pn, false, nil
|
||||
}
|
||||
|
||||
fromNode := windowNode.Predecessors()[0]
|
||||
fromSpec := fromNode.ProcedureSpec().(*ReadGroupPhysSpec)
|
||||
|
||||
// This only works with GroupModeBy. It is the case
|
||||
// that ReadGroup, which we depend on as a predecessor,
|
||||
// only works with GroupModeBy so it should be impossible
|
||||
// to fail this condition, but we add this here for extra
|
||||
// protection.
|
||||
if fromSpec.GroupMode != flux.GroupModeBy {
|
||||
return pn, false, nil
|
||||
}
|
||||
|
||||
// Perform the rewrite by replacing each of the nodes.
|
||||
newFromNode := plan.CreatePhysicalNode("ReadWindowAggregate", &ReadWindowAggregatePhysSpec{
|
||||
ReadRangePhysSpec: *fromSpec.ReadRangePhysSpec.Copy().(*ReadRangePhysSpec),
|
||||
Aggregates: []plan.ProcedureKind{fnNode.Kind()},
|
||||
WindowEvery: windowSpec.Window.Every.Nanoseconds(),
|
||||
Offset: windowSpec.Window.Offset.Nanoseconds(),
|
||||
CreateEmpty: windowSpec.CreateEmpty,
|
||||
})
|
||||
|
||||
// Replace the window node with a group node first.
|
||||
groupKeys := make([]string, len(fromSpec.GroupKeys), len(fromSpec.GroupKeys)+2)
|
||||
copy(groupKeys, fromSpec.GroupKeys)
|
||||
if !execute.ContainsStr(groupKeys, execute.DefaultStartColLabel) {
|
||||
groupKeys = append(groupKeys, execute.DefaultStartColLabel)
|
||||
}
|
||||
if !execute.ContainsStr(groupKeys, execute.DefaultStopColLabel) {
|
||||
groupKeys = append(groupKeys, execute.DefaultStopColLabel)
|
||||
}
|
||||
newGroupNode := plan.CreatePhysicalNode("group", &universe.GroupProcedureSpec{
|
||||
GroupMode: flux.GroupModeBy,
|
||||
GroupKeys: groupKeys,
|
||||
})
|
||||
newFromNode.AddSuccessors(newGroupNode)
|
||||
newGroupNode.AddPredecessors(newFromNode)
|
||||
|
||||
// Attach the existing function node to the new group node.
|
||||
fnNode.ClearPredecessors()
|
||||
newGroupNode.AddSuccessors(fnNode)
|
||||
fnNode.AddPredecessors(newGroupNode)
|
||||
|
||||
// Replace the spec for the function if needed.
|
||||
switch spec := fnNode.ProcedureSpec().(type) {
|
||||
case *universe.CountProcedureSpec:
|
||||
newFnNode := plan.CreatePhysicalNode("sum", &universe.SumProcedureSpec{
|
||||
AggregateConfig: spec.AggregateConfig,
|
||||
})
|
||||
plan.ReplaceNode(fnNode, newFnNode)
|
||||
fnNode = newFnNode
|
||||
default:
|
||||
// No replacement required. The procedure is idempotent so
|
||||
// we can use it over and over again and get the same result.
|
||||
}
|
||||
return fnNode, true, nil
|
||||
}
|
||||
|
||||
//
|
||||
// Push Down of group aggregates.
|
||||
// ReadGroupPhys |> { count }
|
||||
//
|
||||
type PushDownGroupAggregateRule struct{}
|
||||
|
||||
func (PushDownGroupAggregateRule) Name() string {
|
||||
return "PushDownGroupAggregateRule"
|
||||
}
|
||||
|
||||
func (rule PushDownGroupAggregateRule) Pattern() plan.Pattern {
|
||||
return plan.OneOf(
|
||||
[]plan.ProcedureKind{
|
||||
universe.CountKind,
|
||||
universe.SumKind,
|
||||
universe.FirstKind,
|
||||
universe.LastKind,
|
||||
universe.MinKind,
|
||||
universe.MaxKind,
|
||||
},
|
||||
plan.Pat(ReadGroupPhysKind))
|
||||
}
|
||||
|
||||
func (PushDownGroupAggregateRule) Rewrite(ctx context.Context, pn plan.Node) (plan.Node, bool, error) {
|
||||
group := pn.Predecessors()[0].ProcedureSpec().(*ReadGroupPhysSpec)
|
||||
// Cannot push down multiple aggregates
|
||||
if len(group.AggregateMethod) > 0 {
|
||||
return pn, false, nil
|
||||
}
|
||||
|
||||
if !canPushGroupedAggregate(ctx, pn) {
|
||||
return pn, false, nil
|
||||
}
|
||||
|
||||
switch pn.Kind() {
|
||||
case universe.CountKind:
|
||||
// ReadGroup() -> count => ReadGroup(count)
|
||||
node := plan.CreatePhysicalNode("ReadGroupAggregate", &ReadGroupPhysSpec{
|
||||
ReadRangePhysSpec: group.ReadRangePhysSpec,
|
||||
GroupMode: group.GroupMode,
|
||||
GroupKeys: group.GroupKeys,
|
||||
AggregateMethod: universe.CountKind,
|
||||
})
|
||||
return node, true, nil
|
||||
case universe.SumKind:
|
||||
// ReadGroup() -> sum => ReadGroup(sum)
|
||||
node := plan.CreatePhysicalNode("ReadGroupAggregate", &ReadGroupPhysSpec{
|
||||
ReadRangePhysSpec: group.ReadRangePhysSpec,
|
||||
GroupMode: group.GroupMode,
|
||||
GroupKeys: group.GroupKeys,
|
||||
AggregateMethod: universe.SumKind,
|
||||
})
|
||||
return node, true, nil
|
||||
case universe.FirstKind:
|
||||
// ReadGroup() -> first => ReadGroup(first)
|
||||
node := plan.CreatePhysicalNode("ReadGroupAggregate", &ReadGroupPhysSpec{
|
||||
ReadRangePhysSpec: group.ReadRangePhysSpec,
|
||||
GroupMode: group.GroupMode,
|
||||
GroupKeys: group.GroupKeys,
|
||||
AggregateMethod: universe.FirstKind,
|
||||
})
|
||||
return node, true, nil
|
||||
case universe.LastKind:
|
||||
// ReadGroup() -> last => ReadGroup(last)
|
||||
node := plan.CreatePhysicalNode("ReadGroupAggregate", &ReadGroupPhysSpec{
|
||||
ReadRangePhysSpec: group.ReadRangePhysSpec,
|
||||
GroupMode: group.GroupMode,
|
||||
GroupKeys: group.GroupKeys,
|
||||
AggregateMethod: universe.LastKind,
|
||||
})
|
||||
return node, true, nil
|
||||
case universe.MinKind:
|
||||
// ReadGroup() -> min => ReadGroup(min)
|
||||
if feature.PushDownGroupAggregateMinMax().Enabled(ctx) {
|
||||
node := plan.CreatePhysicalNode("ReadGroupAggregate", &ReadGroupPhysSpec{
|
||||
ReadRangePhysSpec: group.ReadRangePhysSpec,
|
||||
GroupMode: group.GroupMode,
|
||||
GroupKeys: group.GroupKeys,
|
||||
AggregateMethod: universe.MinKind,
|
||||
})
|
||||
return node, true, nil
|
||||
}
|
||||
case universe.MaxKind:
|
||||
// ReadGroup() -> max => ReadGroup(max)
|
||||
if feature.PushDownGroupAggregateMinMax().Enabled(ctx) {
|
||||
node := plan.CreatePhysicalNode("ReadGroupAggregate", &ReadGroupPhysSpec{
|
||||
ReadRangePhysSpec: group.ReadRangePhysSpec,
|
||||
GroupMode: group.GroupMode,
|
||||
GroupKeys: group.GroupKeys,
|
||||
AggregateMethod: universe.MaxKind,
|
||||
})
|
||||
return node, true, nil
|
||||
}
|
||||
}
|
||||
return pn, false, nil
|
||||
}
|
||||
|
||||
func canPushGroupedAggregate(ctx context.Context, pn plan.Node) bool {
|
||||
reader := GetStorageDependencies(ctx).FromDeps.Reader
|
||||
aggregator, ok := reader.(query.GroupAggregator)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
caps := aggregator.GetGroupCapability(ctx)
|
||||
if caps == nil {
|
||||
return false
|
||||
}
|
||||
switch pn.Kind() {
|
||||
case universe.CountKind:
|
||||
agg := pn.ProcedureSpec().(*universe.CountProcedureSpec)
|
||||
return caps.HaveCount() && len(agg.Columns) == 1 && agg.Columns[0] == execute.DefaultValueColLabel
|
||||
case universe.SumKind:
|
||||
agg := pn.ProcedureSpec().(*universe.SumProcedureSpec)
|
||||
return caps.HaveSum() && len(agg.Columns) == 1 && agg.Columns[0] == execute.DefaultValueColLabel
|
||||
case universe.FirstKind:
|
||||
agg := pn.ProcedureSpec().(*universe.FirstProcedureSpec)
|
||||
return caps.HaveFirst() && agg.Column == execute.DefaultValueColLabel
|
||||
case universe.LastKind:
|
||||
agg := pn.ProcedureSpec().(*universe.LastProcedureSpec)
|
||||
return caps.HaveLast() && agg.Column == execute.DefaultValueColLabel
|
||||
case universe.MaxKind:
|
||||
agg := pn.ProcedureSpec().(*universe.MaxProcedureSpec)
|
||||
return caps.HaveMax() && agg.Column == execute.DefaultValueColLabel
|
||||
case universe.MinKind:
|
||||
agg := pn.ProcedureSpec().(*universe.MinProcedureSpec)
|
||||
return caps.HaveMin() && agg.Column == execute.DefaultValueColLabel
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
type SwitchFillImplRule struct{}
|
||||
|
||||
func (SwitchFillImplRule) Name() string {
|
||||
return "SwitchFillImplRule"
|
||||
}
|
||||
|
||||
func (SwitchFillImplRule) Pattern() plan.Pattern {
|
||||
return plan.Pat(universe.FillKind, plan.Any())
|
||||
}
|
||||
|
||||
func (r SwitchFillImplRule) Rewrite(ctx context.Context, pn plan.Node) (plan.Node, bool, error) {
|
||||
if !feature.MemoryOptimizedFill().Enabled(ctx) {
|
||||
spec := pn.ProcedureSpec().Copy()
|
||||
universe.UseDeprecatedImpl(spec)
|
||||
if err := pn.ReplaceSpec(spec); err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
}
|
||||
return pn, false, nil
|
||||
}
|
||||
|
||||
type SwitchSchemaMutationImplRule struct{}
|
||||
|
||||
func (SwitchSchemaMutationImplRule) Name() string {
|
||||
return "SwitchSchemaMutationImplRule"
|
||||
}
|
||||
|
||||
func (SwitchSchemaMutationImplRule) Pattern() plan.Pattern {
|
||||
return plan.Pat(universe.SchemaMutationKind, plan.Any())
|
||||
}
|
||||
|
||||
func (r SwitchSchemaMutationImplRule) Rewrite(ctx context.Context, pn plan.Node) (plan.Node, bool, error) {
|
||||
spec, ok := pn.ProcedureSpec().(*universe.DualImplProcedureSpec)
|
||||
if !ok || spec.UseDeprecated {
|
||||
return pn, false, nil
|
||||
}
|
||||
|
||||
spec.UseDeprecated = !feature.MemoryOptimizedSchemaMutation().Enabled(ctx)
|
||||
return pn, spec.UseDeprecated, nil
|
||||
}
|
||||
|
||||
func asSchemaMutationProcedureSpec(spec plan.ProcedureSpec) *universe.SchemaMutationProcedureSpec {
|
||||
if s, ok := spec.(*universe.DualImplProcedureSpec); ok {
|
||||
spec = s.ProcedureSpec
|
||||
}
|
||||
return spec.(*universe.SchemaMutationProcedureSpec)
|
||||
}
|
||||
|
||||
type MergeFiltersRule struct{}
|
||||
|
||||
func (MergeFiltersRule) Name() string {
|
||||
return universe.MergeFiltersRule{}.Name()
|
||||
}
|
||||
|
||||
func (MergeFiltersRule) Pattern() plan.Pattern {
|
||||
return universe.MergeFiltersRule{}.Pattern()
|
||||
}
|
||||
|
||||
func (r MergeFiltersRule) Rewrite(ctx context.Context, pn plan.Node) (plan.Node, bool, error) {
|
||||
if feature.MergedFiltersRule().Enabled(ctx) {
|
||||
return universe.MergeFiltersRule{}.Rewrite(ctx, pn)
|
||||
}
|
||||
return pn, false, nil
|
||||
}
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue