diff --git a/.circleci/config.yml b/.circleci/config.yml index 2a0b81f129..11e308a58e 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -6,7 +6,7 @@ parameters: # when updating the go version, should also update the go version in go.mod description: docker tag for cross build container from quay.io . Created by https://github.com/influxdata/edge/tree/master/dockerfiles/cross-builder . type: string - default: go1.18.9-cb1343dd74ecba8ec07fe810195530a0b9055aa9 + default: go1.19.6-7254b98bc4080dce9c237b698e868e4e0567cee7 workflow: type: string @@ -244,7 +244,7 @@ jobs: unit_test_race: docker: - image: quay.io/influxdb/cross-builder:<< pipeline.parameters.cross-container-tag >> - resource_class: large + resource_class: xlarge steps: - checkout - restore_cache: @@ -257,7 +257,17 @@ jobs: set -x mkdir -p junit-race/ export GORACE="halt_on_error=1" - gotestsum --junitfile junit-race/influxdb.junit.xml -- -race ./... + # "resource_class: xlarge" creates a Docker container with eight + # virtual cpu cores. However, applications like "nproc" return + # the host machine's core count (which in this case is 36). + # When less cores are available than advertised, the + # race-tests fail. + # + # We'll manually reduce the number of available cores to what + # is specified by the CircleCI documentation: + # https://circleci.com/product/features/resource-classes/ + taskset -c 0-7 \ + gotestsum --junitfile junit-race/influxdb.junit.xml -- -race ./... no_output_timeout: 1500s - store_test_results: path: junit-race/ diff --git a/cmd/influx/cli/cli.go b/cmd/influx/cli/cli.go index ba9757a279..bd71d6fc1b 100644 --- a/cmd/influx/cli/cli.go +++ b/cmd/influx/cli/cli.go @@ -1461,7 +1461,6 @@ func (f *formatter) valueBuf(i, j int, typ flux.ColType, cr flux.ColReader) []by // * common tags sorted by label // * other tags sorted by label // * value -// type orderedCols struct { indexMap []int cols []flux.ColMeta diff --git a/cmd/influx_tools/internal/errlist/errlist.go b/cmd/influx_tools/internal/errlist/errlist.go index 23e8530333..eecc446c10 100644 --- a/cmd/influx_tools/internal/errlist/errlist.go +++ b/cmd/influx_tools/internal/errlist/errlist.go @@ -22,7 +22,7 @@ func (el *ErrorList) Add(err error) { el.errs = append(el.errs, err) } -//Err returns whether or not an error list is an error. +// Err returns whether or not an error list is an error. func (el *ErrorList) Err() error { if len(el.errs) == 0 { return nil diff --git a/cmd/influx_tools/internal/format/binary/tools_binary.pb.go b/cmd/influx_tools/internal/format/binary/tools_binary.pb.go index e5c62d2e6c..8831245bce 100644 --- a/cmd/influx_tools/internal/format/binary/tools_binary.pb.go +++ b/cmd/influx_tools/internal/format/binary/tools_binary.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.27.1 +// protoc-gen-go v1.28.1 // protoc v3.17.3 // source: tools_binary.proto diff --git a/cmd/influxd/backup_util/internal/backup_util.pb.go b/cmd/influxd/backup_util/internal/backup_util.pb.go index 06aada8b96..b5eea3b6ce 100644 --- a/cmd/influxd/backup_util/internal/backup_util.pb.go +++ b/cmd/influxd/backup_util/internal/backup_util.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.27.1 +// protoc-gen-go v1.28.1 // protoc v3.17.3 // source: internal/backup_util.proto diff --git a/cmd/influxd/run/command.go b/cmd/influxd/run/command.go index 8a91ebc08a..cfb7e67b15 100644 --- a/cmd/influxd/run/command.go +++ b/cmd/influxd/run/command.go @@ -280,11 +280,11 @@ type Options struct { // GetConfigPath returns the config path from the options. // It will return a path by searching in this order: -// 1. The CLI option in ConfigPath -// 2. The environment variable INFLUXDB_CONFIG_PATH -// 3. The first influxdb.conf file on the path: -// - ~/.influxdb -// - /etc/influxdb +// 1. The CLI option in ConfigPath +// 2. The environment variable INFLUXDB_CONFIG_PATH +// 3. The first influxdb.conf file on the path: +// - ~/.influxdb +// - /etc/influxdb func (opt *Options) GetConfigPath() string { if opt.ConfigPath != "" { if opt.ConfigPath == os.DevNull { diff --git a/cmd/influxd/run/server.go b/cmd/influxd/run/server.go index 366d7de483..6ac186c97a 100644 --- a/cmd/influxd/run/server.go +++ b/cmd/influxd/run/server.go @@ -15,6 +15,7 @@ import ( "github.com/influxdata/flux" "github.com/influxdata/flux/dependencies/testing" + "github.com/influxdata/flux/execute/executetest" "github.com/influxdata/influxdb" "github.com/influxdata/influxdb/coordinator" influxdb2 "github.com/influxdata/influxdb/flux/stdlib/influxdata/influxdb" @@ -312,10 +313,16 @@ func (s *Server) appendHTTPDService(c httpd.Config) error { if err != nil { return err } + + deps := []flux.Dependency{storageDep, testing.FrameworkConfig{}} + if s.config.HTTPD.FluxTesting { + deps = append(deps, executetest.NewDefaultTestFlagger()) + } + srv.Handler.Controller, err = control.New( s.config.FluxController, s.Logger.With(zap.String("service", "flux-controller")), - []flux.Dependency{storageDep, testing.FrameworkConfig{}}, + deps, ) if err != nil { return err diff --git a/coordinator/points_writer.go b/coordinator/points_writer.go index afb01cd791..30e60bfb37 100644 --- a/coordinator/points_writer.go +++ b/coordinator/points_writer.go @@ -255,8 +255,8 @@ func (l sgList) Covers(t time.Time) bool { // to start time. Therefore, if there are multiple shard groups that match // this point's time they will be preferred in this order: // -// - a shard group with the earliest end time; -// - (assuming identical end times) the shard group with the earliest start time. +// - a shard group with the earliest end time; +// - (assuming identical end times) the shard group with the earliest start time. func (l sgList) ShardGroupAt(t time.Time) *meta.ShardGroupInfo { if l.items.Len() == 0 { return nil diff --git a/flux/stdlib/influxdata/influxdb/buckets.go b/flux/stdlib/influxdata/influxdb/buckets.go index 7acafd608d..376bdd7f65 100644 --- a/flux/stdlib/influxdata/influxdb/buckets.go +++ b/flux/stdlib/influxdata/influxdb/buckets.go @@ -149,7 +149,7 @@ func (rule LocalBucketsRule) Name() string { } func (rule LocalBucketsRule) Pattern() plan.Pattern { - return plan.Pat(influxdb.BucketsKind) + return plan.MultiSuccessor(influxdb.BucketsKind) } func (rule LocalBucketsRule) Rewrite(ctx context.Context, node plan.Node) (plan.Node, bool, error) { diff --git a/flux/stdlib/influxdata/influxdb/filter_test.flux b/flux/stdlib/influxdata/influxdb/filter_test.flux index 2f39f44232..4c97936493 100644 --- a/flux/stdlib/influxdata/influxdb/filter_test.flux +++ b/flux/stdlib/influxdata/influxdb/filter_test.flux @@ -33,9 +33,44 @@ testcase filter { ,,0,2018-05-22T19:53:36Z,system,host.local,load1,1.63 ") - got = testing.loadStorage(csv: input) + got = csv.from(csv: input) + |> testing.load() |> range(start: -100y) |> filter(fn: (r) => r._measurement == "system" and r._field == "load1") |> drop(columns: ["_start", "_stop"]) testing.diff(want, got) } + + +input_issue_4804 = "#datatype,string,long,dateTime:RFC3339,string,string,string,boolean +#group,false,false,false,true,true,true,false +#default,_result,,,,,, +,result,table,_time,_measurement,host,_field,_value +,,0,2018-05-22T19:53:26Z,system,host.local,load1,true +,,0,2018-05-22T19:53:36Z,system,host.local,load1,false +,,1,2018-05-22T19:53:26Z,system,host.local,load3,false +,,2,2018-05-22T19:53:26Z,system,host.local,load4,true +" + +testcase flux_issue_4804 { + expect.planner(rules: [ + "influxdata/influxdb.FromStorageRule": 1, + "PushDownRangeRule": 1, + "PushDownFilterRule": 1, + ]) + + want = csv.from(csv: "#datatype,string,long,dateTime:RFC3339,string,string,string,boolean +#group,false,false,false,true,true,true,false +#default,_result,,,,,, +,result,table,_time,_measurement,host,_field,_value +,,0,2018-05-22T19:53:26Z,system,host.local,load1,true +,,1,2018-05-22T19:53:26Z,system,host.local,load3,false +") + + got = csv.from(csv: input_issue_4804) + |> testing.load() + |> range(start: -100y) + |> filter(fn: (r) => ((r["_field"] == "load1" and r["_value"] == true) or (r["_field"] == "load3" and r["_value"] == false))) + |> drop(columns: ["_start", "_stop"]) + testing.diff(want, got) +} diff --git a/flux/stdlib/influxdata/influxdb/multi_measure_test.flux b/flux/stdlib/influxdata/influxdb/multi_measure_test.flux new file mode 100644 index 0000000000..4a0cc2a657 --- /dev/null +++ b/flux/stdlib/influxdata/influxdb/multi_measure_test.flux @@ -0,0 +1,267 @@ +package influxdb_test + +import "csv" +import "testing" + +option now = () => 2030-01-01T00:00:00Z + +input = " +#datatype,string,long,dateTime:RFC3339,string,string,string,double +#group,false,false,false,true,true,true,false +#default,_result,,,,,, +,result,table,_time,_measurement,host,_field,_value +,,0,2018-05-22T19:53:26Z,system,host.local,load1,1.83 +,,0,2018-05-22T19:53:36Z,system,host.local,load1,1.72 +,,0,2018-05-22T19:53:46Z,system,host.local,load1,1.74 +,,0,2018-05-22T19:53:56Z,system,host.local,load1,1.63 +,,0,2018-05-22T19:54:06Z,system,host.local,load1,1.91 +,,0,2018-05-22T19:54:16Z,system,host.local,load1,1.84 + +,,1,2018-05-22T19:53:26Z,sys,host.local,load3,1.98 +,,1,2018-05-22T19:53:36Z,sys,host.local,load3,1.97 +,,1,2018-05-22T19:53:46Z,sys,host.local,load3,1.97 +,,1,2018-05-22T19:53:56Z,sys,host.local,load3,1.96 +,,1,2018-05-22T19:54:06Z,sys,host.local,load3,1.98 +,,1,2018-05-22T19:54:16Z,sys,host.local,load3,1.97 + +,,2,2018-05-22T19:53:26Z,system,host.local,load5,1.95 +,,2,2018-05-22T19:53:36Z,system,host.local,load5,1.92 +,,2,2018-05-22T19:53:46Z,system,host.local,load5,1.92 +,,2,2018-05-22T19:53:56Z,system,host.local,load5,1.89 +,,2,2018-05-22T19:54:06Z,system,host.local,load5,1.94 +,,2,2018-05-22T19:54:16Z,system,host.local,load5,1.93 + +,,3,2018-05-22T19:53:26Z,var,host.local,load3,91.98 +,,3,2018-05-22T19:53:36Z,var,host.local,load3,91.97 +,,3,2018-05-22T19:53:46Z,var,host.local,load3,91.97 +,,3,2018-05-22T19:53:56Z,var,host.local,load3,91.96 +,,3,2018-05-22T19:54:06Z,var,host.local,load3,91.98 +,,3,2018-05-22T19:54:16Z,var,host.local,load3,91.97 + +,,4,2018-05-22T19:53:26Z,swap,host.global,used_percent,82.98 +,,4,2018-05-22T19:53:36Z,swap,host.global,used_percent,82.59 +,,4,2018-05-22T19:53:46Z,swap,host.global,used_percent,82.59 +,,4,2018-05-22T19:53:56Z,swap,host.global,used_percent,82.59 +,,4,2018-05-22T19:54:06Z,swap,host.global,used_percent,82.59 +,,4,2018-05-22T19:54:16Z,swap,host.global,used_percent,82.64 + +#datatype,string,long,dateTime:RFC3339,string,string,string,double +#group,false,false,false,true,true,true,false +#default,_result,,,,,, +,result,table,_time,_measurement,loc,_field,_value +,,0,2018-05-22T19:53:26Z,locale,en,lat,37.09 +,,0,2018-05-22T19:53:36Z,locale,en,lat,37.10 +,,0,2018-05-22T19:53:46Z,locale,en,lat,37.08 +" + +testcase multi_measure { + got = csv.from(csv: input) + |> testing.load() + |> range(start: 2018-01-01T00:00:00Z, stop: 2019-01-01T00:00:00Z) + |> filter(fn: (r) => r["_measurement"] == "system" or r["_measurement"] == "sys") + |> filter(fn: (r) => r["_field"] == "load1" or r["_field"] == "load3") + |> drop(columns: ["_start", "_stop"]) + + want = csv.from(csv: "#datatype,string,long,dateTime:RFC3339,string,string,string,double +#group,false,false,false,true,true,true,false +#default,_result,,,,,, +,result,table,_time,_measurement,host,_field,_value +,,0,2018-05-22T19:53:26Z,system,host.local,load1,1.83 +,,0,2018-05-22T19:53:36Z,system,host.local,load1,1.72 +,,0,2018-05-22T19:53:46Z,system,host.local,load1,1.74 +,,0,2018-05-22T19:53:56Z,system,host.local,load1,1.63 +,,0,2018-05-22T19:54:06Z,system,host.local,load1,1.91 +,,0,2018-05-22T19:54:16Z,system,host.local,load1,1.84 +,,1,2018-05-22T19:53:26Z,sys,host.local,load3,1.98 +,,1,2018-05-22T19:53:36Z,sys,host.local,load3,1.97 +,,1,2018-05-22T19:53:46Z,sys,host.local,load3,1.97 +,,1,2018-05-22T19:53:56Z,sys,host.local,load3,1.96 +,,1,2018-05-22T19:54:06Z,sys,host.local,load3,1.98 +,,1,2018-05-22T19:54:16Z,sys,host.local,load3,1.97 +") + + testing.diff(got, want) +} + +testcase multi_measure_match_all { + got = csv.from(csv: input) + |> testing.load() + |> range(start: 2018-01-01T00:00:00Z, stop: 2019-01-01T00:00:00Z) + |> filter(fn: (r) => r["_measurement"] == "system" or r["_measurement"] == "sys" or r["_measurement"] == "var" or r["_measurement"] == "swap") + |> filter(fn: (r) => r["_field"] == "load1" or r["_field"] == "load3" or r["_field"] == "load5" or r["_field"] == "used_percent") + |> drop(columns: ["_start", "_stop"]) + + want = csv.from(csv: "#datatype,string,long,dateTime:RFC3339,string,string,string,double +#group,false,false,false,true,true,true,false +#default,_result,,,,,, +,result,table,_time,_measurement,host,_field,_value +,,0,2018-05-22T19:53:26Z,system,host.local,load1,1.83 +,,0,2018-05-22T19:53:36Z,system,host.local,load1,1.72 +,,0,2018-05-22T19:53:46Z,system,host.local,load1,1.74 +,,0,2018-05-22T19:53:56Z,system,host.local,load1,1.63 +,,0,2018-05-22T19:54:06Z,system,host.local,load1,1.91 +,,0,2018-05-22T19:54:16Z,system,host.local,load1,1.84 +,,1,2018-05-22T19:53:26Z,sys,host.local,load3,1.98 +,,1,2018-05-22T19:53:36Z,sys,host.local,load3,1.97 +,,1,2018-05-22T19:53:46Z,sys,host.local,load3,1.97 +,,1,2018-05-22T19:53:56Z,sys,host.local,load3,1.96 +,,1,2018-05-22T19:54:06Z,sys,host.local,load3,1.98 +,,1,2018-05-22T19:54:16Z,sys,host.local,load3,1.97 +,,2,2018-05-22T19:53:26Z,system,host.local,load5,1.95 +,,2,2018-05-22T19:53:36Z,system,host.local,load5,1.92 +,,2,2018-05-22T19:53:46Z,system,host.local,load5,1.92 +,,2,2018-05-22T19:53:56Z,system,host.local,load5,1.89 +,,2,2018-05-22T19:54:06Z,system,host.local,load5,1.94 +,,2,2018-05-22T19:54:16Z,system,host.local,load5,1.93 +,,3,2018-05-22T19:53:26Z,var,host.local,load3,91.98 +,,3,2018-05-22T19:53:36Z,var,host.local,load3,91.97 +,,3,2018-05-22T19:53:46Z,var,host.local,load3,91.97 +,,3,2018-05-22T19:53:56Z,var,host.local,load3,91.96 +,,3,2018-05-22T19:54:06Z,var,host.local,load3,91.98 +,,3,2018-05-22T19:54:16Z,var,host.local,load3,91.97 +,,4,2018-05-22T19:53:26Z,swap,host.global,used_percent,82.98 +,,4,2018-05-22T19:53:36Z,swap,host.global,used_percent,82.59 +,,4,2018-05-22T19:53:46Z,swap,host.global,used_percent,82.59 +,,4,2018-05-22T19:53:56Z,swap,host.global,used_percent,82.59 +,,4,2018-05-22T19:54:06Z,swap,host.global,used_percent,82.59 +,,4,2018-05-22T19:54:16Z,swap,host.global,used_percent,82.64 +") + + testing.diff(got, want) +} + +testcase multi_measure_tag_filter { + got = csv.from(csv: input) + |> testing.load() + |> range(start: 2018-01-01T00:00:00Z, stop: 2019-01-01T00:00:00Z) + |> filter(fn: (r) => r["_measurement"] == "system" or r["_measurement"] == "swap") + |> filter(fn: (r) => r["_field"] == "load1" or r["_field"] == "load3" or r["_field"] == "used_percent") + |> filter(fn: (r) => r["host"] == "host.local" or r["host"] == "host.global") + |> drop(columns: ["_start", "_stop"]) + + want = csv.from(csv: "#datatype,string,long,dateTime:RFC3339,string,string,string,double +#group,false,false,false,true,true,true,false +#default,_result,,,,,, +,result,table,_time,_measurement,host,_field,_value +,,0,2018-05-22T19:53:26Z,system,host.local,load1,1.83 +,,0,2018-05-22T19:53:36Z,system,host.local,load1,1.72 +,,0,2018-05-22T19:53:46Z,system,host.local,load1,1.74 +,,0,2018-05-22T19:53:56Z,system,host.local,load1,1.63 +,,0,2018-05-22T19:54:06Z,system,host.local,load1,1.91 +,,0,2018-05-22T19:54:16Z,system,host.local,load1,1.84 +,,4,2018-05-22T19:53:26Z,swap,host.global,used_percent,82.98 +,,4,2018-05-22T19:53:36Z,swap,host.global,used_percent,82.59 +,,4,2018-05-22T19:53:46Z,swap,host.global,used_percent,82.59 +,,4,2018-05-22T19:53:56Z,swap,host.global,used_percent,82.59 +,,4,2018-05-22T19:54:06Z,swap,host.global,used_percent,82.59 +,,4,2018-05-22T19:54:16Z,swap,host.global,used_percent,82.64 +") + + testing.diff(got, want) +} + +testcase multi_measure_complex_or { + got = csv.from(csv: input) + |> testing.load() + |> range(start: 2018-01-01T00:00:00Z, stop: 2019-01-01T00:00:00Z) + |> filter(fn: (r) => (r["_measurement"] == "system" or r["_measurement"] == "swap") or (r["_measurement"] != "var" and r["host"] == "host.local")) + |> drop(columns: ["_start", "_stop"]) + + want = csv.from(csv: "#datatype,string,long,dateTime:RFC3339,string,string,string,double +#group,false,false,false,true,true,true,false +#default,_result,,,,,, +,result,table,_time,_measurement,host,_field,_value +,,0,2018-05-22T19:53:26Z,system,host.local,load1,1.83 +,,0,2018-05-22T19:53:36Z,system,host.local,load1,1.72 +,,0,2018-05-22T19:53:46Z,system,host.local,load1,1.74 +,,0,2018-05-22T19:53:56Z,system,host.local,load1,1.63 +,,0,2018-05-22T19:54:06Z,system,host.local,load1,1.91 +,,0,2018-05-22T19:54:16Z,system,host.local,load1,1.84 +,,2,2018-05-22T19:53:26Z,system,host.local,load5,1.95 +,,2,2018-05-22T19:53:36Z,system,host.local,load5,1.92 +,,2,2018-05-22T19:53:46Z,system,host.local,load5,1.92 +,,2,2018-05-22T19:53:56Z,system,host.local,load5,1.89 +,,2,2018-05-22T19:54:06Z,system,host.local,load5,1.94 +,,2,2018-05-22T19:54:16Z,system,host.local,load5,1.93 +,,4,2018-05-22T19:53:26Z,swap,host.global,used_percent,82.98 +,,4,2018-05-22T19:53:36Z,swap,host.global,used_percent,82.59 +,,4,2018-05-22T19:53:46Z,swap,host.global,used_percent,82.59 +,,4,2018-05-22T19:53:56Z,swap,host.global,used_percent,82.59 +,,4,2018-05-22T19:54:06Z,swap,host.global,used_percent,82.59 +,,4,2018-05-22T19:54:16Z,swap,host.global,used_percent,82.64 +,,1,2018-05-22T19:53:26Z,sys,host.local,load3,1.98 +,,1,2018-05-22T19:53:36Z,sys,host.local,load3,1.97 +,,1,2018-05-22T19:53:46Z,sys,host.local,load3,1.97 +,,1,2018-05-22T19:53:56Z,sys,host.local,load3,1.96 +,,1,2018-05-22T19:54:06Z,sys,host.local,load3,1.98 +,,1,2018-05-22T19:54:16Z,sys,host.local,load3,1.97 +") + + testing.diff(got, want) +} + +testcase multi_measure_complex_and { + got = csv.from(csv: input) + |> testing.load() + |> range(start: 2018-01-01T00:00:00Z, stop: 2019-01-01T00:00:00Z) + |> filter(fn: (r) => r["_measurement"] != "system" or r["_measurement"] == "swap") + |> filter(fn: (r) => r["_measurement"] == "swap" or r["_measurement"] == "var") + |> drop(columns: ["_start", "_stop"]) + + want = csv.from(csv: "#datatype,string,long,dateTime:RFC3339,string,string,string,double +#group,false,false,false,true,true,true,false +#default,_result,,,,,, +,result,table,_time,_measurement,host,_field,_value +,,4,2018-05-22T19:53:26Z,swap,host.global,used_percent,82.98 +,,4,2018-05-22T19:53:36Z,swap,host.global,used_percent,82.59 +,,4,2018-05-22T19:53:46Z,swap,host.global,used_percent,82.59 +,,4,2018-05-22T19:53:56Z,swap,host.global,used_percent,82.59 +,,4,2018-05-22T19:54:06Z,swap,host.global,used_percent,82.59 +,,4,2018-05-22T19:54:16Z,swap,host.global,used_percent,82.64 +,,3,2018-05-22T19:53:26Z,var,host.local,load3,91.98 +,,3,2018-05-22T19:53:36Z,var,host.local,load3,91.97 +,,3,2018-05-22T19:53:46Z,var,host.local,load3,91.97 +,,3,2018-05-22T19:53:56Z,var,host.local,load3,91.96 +,,3,2018-05-22T19:54:06Z,var,host.local,load3,91.98 +,,3,2018-05-22T19:54:16Z,var,host.local,load3,91.97 +") + + testing.diff(got, want) +} + +testcase multi_measure_negation { + got = csv.from(csv: input) + |> testing.load() + |> range(start: 2018-01-01T00:00:00Z, stop: 2019-01-01T00:00:00Z) + |> filter(fn: (r) => r["_measurement"] != "system") + |> filter(fn: (r) => r["host"] == "host.local" or not exists r["host"]) + |> drop(columns: ["_start", "_stop"]) + + want = csv.from(csv: "#datatype,string,long,dateTime:RFC3339,string,string,string,double +#group,false,false,false,true,true,true,false +#default,_result,,,,,, +,result,table,_time,_measurement,host,_field,_value +,,1,2018-05-22T19:53:26Z,sys,host.local,load3,1.98 +,,1,2018-05-22T19:53:36Z,sys,host.local,load3,1.97 +,,1,2018-05-22T19:53:46Z,sys,host.local,load3,1.97 +,,1,2018-05-22T19:53:56Z,sys,host.local,load3,1.96 +,,1,2018-05-22T19:54:06Z,sys,host.local,load3,1.98 +,,1,2018-05-22T19:54:16Z,sys,host.local,load3,1.97 +,,3,2018-05-22T19:53:26Z,var,host.local,load3,91.98 +,,3,2018-05-22T19:53:36Z,var,host.local,load3,91.97 +,,3,2018-05-22T19:53:46Z,var,host.local,load3,91.97 +,,3,2018-05-22T19:53:56Z,var,host.local,load3,91.96 +,,3,2018-05-22T19:54:06Z,var,host.local,load3,91.98 +,,3,2018-05-22T19:54:16Z,var,host.local,load3,91.97 + +#datatype,string,long,dateTime:RFC3339,string,string,string,double +#group,false,false,false,true,true,true,false +#default,_result,,,,,, +,result,table,_time,_measurement,loc,_field,_value +,,0,2018-05-22T19:53:26Z,locale,en,lat,37.09 +,,0,2018-05-22T19:53:36Z,locale,en,lat,37.10 +,,0,2018-05-22T19:53:46Z,locale,en,lat,37.08 +") + + testing.diff(got, want) +} diff --git a/flux/stdlib/influxdata/influxdb/rules.go b/flux/stdlib/influxdata/influxdb/rules.go index f81e72dbb2..379c363cc8 100644 --- a/flux/stdlib/influxdata/influxdb/rules.go +++ b/flux/stdlib/influxdata/influxdb/rules.go @@ -46,7 +46,7 @@ func (rule FromStorageRule) Name() string { } func (rule FromStorageRule) Pattern() plan.Pattern { - return plan.Pat(influxdb.FromKind) + return plan.MultiSuccessor(influxdb.FromKind) } func (rule FromStorageRule) Rewrite(ctx context.Context, node plan.Node) (plan.Node, bool, error) { @@ -73,7 +73,7 @@ func (rule PushDownGroupRule) Name() string { } func (rule PushDownGroupRule) Pattern() plan.Pattern { - return plan.Pat(universe.GroupKind, plan.Pat(ReadRangePhysKind)) + return plan.MultiSuccessor(universe.GroupKind, plan.SingleSuccessor(ReadRangePhysKind)) } func (rule PushDownGroupRule) Rewrite(ctx context.Context, node plan.Node) (plan.Node, bool, error) { @@ -112,7 +112,7 @@ func (rule PushDownRangeRule) Name() string { // Pattern matches 'from |> range' func (rule PushDownRangeRule) Pattern() plan.Pattern { - return plan.Pat(universe.RangeKind, plan.Pat(FromKind)) + return plan.MultiSuccessor(universe.RangeKind, plan.SingleSuccessor(FromKind)) } // Rewrite converts 'from |> range' into 'ReadRange' @@ -138,7 +138,7 @@ func (PushDownFilterRule) Name() string { } func (PushDownFilterRule) Pattern() plan.Pattern { - return plan.Pat(universe.FilterKind, plan.Pat(ReadRangePhysKind)) + return plan.MultiSuccessor(universe.FilterKind, plan.SingleSuccessor(ReadRangePhysKind)) } func (PushDownFilterRule) Rewrite(ctx context.Context, pn plan.Node) (plan.Node, bool, error) { @@ -227,10 +227,10 @@ func (rule PushDownReadTagKeysRule) Name() string { } func (rule PushDownReadTagKeysRule) Pattern() plan.Pattern { - return plan.Pat(universe.DistinctKind, - plan.Pat(universe.SchemaMutationKind, - plan.Pat(universe.KeysKind, - plan.Pat(ReadRangePhysKind)))) + return plan.MultiSuccessor(universe.DistinctKind, + plan.SingleSuccessor(universe.SchemaMutationKind, + plan.SingleSuccessor(universe.KeysKind, + plan.SingleSuccessor(ReadRangePhysKind)))) } func hasFieldRef(node *datatypes.Node) bool { @@ -311,10 +311,10 @@ func (rule PushDownReadTagValuesRule) Name() string { } func (rule PushDownReadTagValuesRule) Pattern() plan.Pattern { - return plan.Pat(universe.DistinctKind, - plan.Pat(universe.GroupKind, - plan.Pat(universe.SchemaMutationKind, - plan.Pat(ReadRangePhysKind)))) + return plan.MultiSuccessor(universe.DistinctKind, + plan.SingleSuccessor(universe.GroupKind, + plan.SingleSuccessor(universe.SchemaMutationKind, + plan.SingleSuccessor(ReadRangePhysKind)))) } func (rule PushDownReadTagValuesRule) Rewrite(ctx context.Context, pn plan.Node) (plan.Node, bool, error) { @@ -628,7 +628,7 @@ func (SortedPivotRule) Name() string { } func (SortedPivotRule) Pattern() plan.Pattern { - return plan.Pat(universe.PivotKind, plan.Pat(ReadRangePhysKind)) + return plan.MultiSuccessor(universe.PivotKind, plan.SingleSuccessor(ReadRangePhysKind)) } func (SortedPivotRule) Rewrite(ctx context.Context, pn plan.Node) (plan.Node, bool, error) { @@ -671,10 +671,8 @@ func (SortedPivotRule) Rewrite(ctx context.Context, pn plan.Node) (plan.Node, bo return pn, false, nil } -// // Push Down of window aggregates. // ReadRangePhys |> window |> { min, max, mean, count, sum } -// type PushDownWindowAggregateRule struct{} func (PushDownWindowAggregateRule) Name() string { @@ -692,8 +690,8 @@ var windowPushableAggs = []plan.ProcedureKind{ } func (rule PushDownWindowAggregateRule) Pattern() plan.Pattern { - return plan.OneOf(windowPushableAggs, - plan.Pat(universe.WindowKind, plan.Pat(ReadRangePhysKind))) + return plan.MultiSuccessorOneOf(windowPushableAggs, + plan.SingleSuccessor(universe.WindowKind, plan.SingleSuccessor(ReadRangePhysKind))) } func canPushWindowedAggregate(ctx context.Context, fnNode plan.Node) bool { @@ -783,9 +781,9 @@ func (PushDownWindowAggregateByTimeRule) Name() string { } func (rule PushDownWindowAggregateByTimeRule) Pattern() plan.Pattern { - return plan.Pat(universe.WindowKind, - plan.Pat(universe.SchemaMutationKind, - plan.Pat(ReadWindowAggregatePhysKind))) + return plan.MultiSuccessor(universe.WindowKind, + plan.SingleSuccessor(universe.SchemaMutationKind, + plan.SingleSuccessor(ReadWindowAggregatePhysKind))) } func (PushDownWindowAggregateByTimeRule) Rewrite(ctx context.Context, pn plan.Node) (plan.Node, bool, error) { @@ -843,8 +841,8 @@ func (p PushDownBareAggregateRule) Name() string { } func (p PushDownBareAggregateRule) Pattern() plan.Pattern { - return plan.OneOf(windowPushableAggs, - plan.Pat(ReadRangePhysKind)) + return plan.MultiSuccessorOneOf(windowPushableAggs, + plan.SingleSuccessor(ReadRangePhysKind)) } func (p PushDownBareAggregateRule) Rewrite(ctx context.Context, pn plan.Node) (plan.Node, bool, error) { @@ -863,10 +861,8 @@ func (p PushDownBareAggregateRule) Rewrite(ctx context.Context, pn plan.Node) (p }), true, nil } -// // Push Down of group aggregates. // ReadGroupPhys |> { count } -// type PushDownGroupAggregateRule struct{} func (PushDownGroupAggregateRule) Name() string { @@ -874,7 +870,7 @@ func (PushDownGroupAggregateRule) Name() string { } func (rule PushDownGroupAggregateRule) Pattern() plan.Pattern { - return plan.OneOf( + return plan.MultiSuccessorOneOf( []plan.ProcedureKind{ universe.CountKind, universe.SumKind, @@ -883,7 +879,7 @@ func (rule PushDownGroupAggregateRule) Pattern() plan.Pattern { universe.MinKind, universe.MaxKind, }, - plan.Pat(ReadGroupPhysKind)) + plan.SingleSuccessor(ReadGroupPhysKind)) } func (PushDownGroupAggregateRule) Rewrite(ctx context.Context, pn plan.Node) (plan.Node, bool, error) { diff --git a/flux/stdlib/influxdata/influxdb/rules_test.go b/flux/stdlib/influxdata/influxdb/rules_test.go index a815eed1d9..165715e764 100644 --- a/flux/stdlib/influxdata/influxdb/rules_test.go +++ b/flux/stdlib/influxdata/influxdb/rules_test.go @@ -1170,9 +1170,7 @@ func meanProcedureSpec() *universe.MeanProcedureSpec { } } -// // Window Aggregate Testing -// func TestPushDownWindowAggregateRule(t *testing.T) { createRangeSpec := func() *influxdb.ReadRangePhysSpec { return &influxdb.ReadRangePhysSpec{ diff --git a/flux/stdlib/influxdata/influxdb/tag_values_measurement_or_predicate_test.flux b/flux/stdlib/influxdata/influxdb/tag_values_measurement_or_predicate_test.flux new file mode 100644 index 0000000000..fbc71a53ab --- /dev/null +++ b/flux/stdlib/influxdata/influxdb/tag_values_measurement_or_predicate_test.flux @@ -0,0 +1,135 @@ +// TODO(whb): These tests should get ported to the flux repo and removed here +// when they are included with a flux release that InfluxDB uses to remove the +// redundancy. + +package influxdb_test + +import "csv" +import "testing" +import "testing/expect" + +option now = () => 2030-01-01T00:00:00Z + +input = " +#group,false,false,false,false,true,true,true,true,true,true,true +#datatype,string,long,dateTime:RFC3339,long,string,string,string,string,string,string,string +#default,_result,,,,,,,,,, +,result,table,_time,_value,_field,_measurement,device,fstype,host,mode,path +,,0,2020-10-21T20:48:30Z,4881964326,inodes_free,disk,disk1s5,apfs,euterpe.local,ro,/ +,,0,2020-10-21T20:48:40Z,4881964326,inodes_free,disk,disk1s5,apfs,euterpe.local,ro,/ +,,0,2020-10-21T20:48:50Z,4881964326,inodes_free,disk,disk1s5,apfs,euterpe.local,ro,/ +,,1,2020-10-21T20:48:30Z,4294963701,inodes_free,disk,disk2s1,hfs,euterpe.local,ro,/Volumes/IntelliJ IDEA CE +,,1,2020-10-21T20:48:40Z,4294963701,inodes_free,disk,disk2s1,hfs,euterpe.local,ro,/Volumes/IntelliJ IDEA CE +,,1,2020-10-21T20:48:50Z,4294963701,inodes_free,disk,disk2s1,hfs,euterpe.local,ro,/Volumes/IntelliJ IDEA CE +,,2,2020-10-21T20:48:30Z,488514,inodes_used,disk,disk1s5,apfs,euterpe.local,ro,/ +,,2,2020-10-21T20:48:40Z,488514,inodes_used,disk,disk1s5,apfs,euterpe.local,ro,/ +,,2,2020-10-21T20:48:50Z,488514,inodes_used,disk,disk1s5,apfs,euterpe.local,ro,/ +,,3,2020-10-21T20:48:30Z,3578,inodes_used,disk,disk2s1,hfs,euterpe.local,ro,/Volumes/IntelliJ IDEA CE +,,3,2020-10-21T20:48:40Z,3578,inodes_used,disk,disk2s1,hfs,euterpe.local,ro,/Volumes/IntelliJ IDEA CE +,,3,2020-10-21T20:48:50Z,3578,inodes_used,disk,disk2s1,hfs,euterpe.local,ro,/Volumes/IntelliJ IDEA CE + +#group,false,false,false,false,true,true,true,true,true +#datatype,string,long,dateTime:RFC3339,double,string,string,string,string,string +#default,_result,,,,,,,, +,result,table,_time,_value,_field,_measurement,cpu,host,region +,,4,2020-10-21T20:48:30Z,69.30000000167638,usage_idle,cpu,cpu0,euterpe.local,south +,,4,2020-10-21T20:48:40Z,67.36736736724372,usage_idle,cpu,cpu0,euterpe.local,south +,,4,2020-10-21T20:48:50Z,69.23076923005354,usage_idle,cpu,cpu0,euterpe.local,south +,,5,2020-10-21T20:48:30Z,96.10000000102445,usage_idle,cpu,cpu1,euterpe.local,south +,,5,2020-10-21T20:48:40Z,95.70000000055181,usage_idle,cpu,cpu1,euterpe.local,south +,,5,2020-10-21T20:48:50Z,95.89999999860534,usage_idle,cpu,cpu1,euterpe.local,south + +#group,false,false,false,false,true,true,true,true,true +#datatype,string,long,dateTime:RFC3339,double,string,string,string,string,string +#default,_result,,,,,,,, +,result,table,_time,_value,_field,_measurement,cpu,host,region +,,6,2020-10-21T20:48:30Z,69.30000000167638,usage_idle,cpu,cpu0,mnemosyne.local,east +,,6,2020-10-21T20:48:40Z,67.36736736724372,usage_idle,cpu,cpu0,mnemosyne.local,east +,,6,2020-10-21T20:48:50Z,69.23076923005354,usage_idle,cpu,cpu0,mnemosyne.local,east +,,7,2020-10-21T20:48:30Z,96.10000000102445,usage_idle,cpu,cpu1,mnemosyne.local,east +,,7,2020-10-21T20:48:40Z,95.70000000055181,usage_idle,cpu,cpu1,mnemosyne.local,east +,,7,2020-10-21T20:48:50Z,95.89999999860534,usage_idle,cpu,cpu1,mnemosyne.local,east + +#group,false,false,true,true,false,false,true,true,true +#datatype,string,long,string,string,dateTime:RFC3339,double,string,string,string +#default,_result,,,,,,,, +,result,table,_field,_measurement,_time,_value,cpu,host,region +,,8,usage_user,cpu,2020-10-21T20:48:30Z,19.30000000007567,cpu0,euterpe.local,north +,,8,usage_user,cpu,2020-10-21T20:48:40Z,20.020020020038682,cpu0,euterpe.local,north +,,8,usage_user,cpu,2020-10-21T20:48:50Z,18.581418581407107,cpu0,euterpe.local,north +,,9,usage_user,cpu,2020-10-21T20:48:30Z,2.3000000000138243,cpu1,euterpe.local,north +,,9,usage_user,cpu,2020-10-21T20:48:40Z,2.4000000000536965,cpu1,euterpe.local,north +,,9,usage_user,cpu,2020-10-21T20:48:50Z,2.0999999999423746,cpu1,euterpe.local,north +" + +testcase tag_values_measurement_or_predicate { + got = csv.from(csv: input) + |> testing.load() + |> range(start: -100y) + |> filter(fn: (r) => r["_measurement"] == "cpu") + |> filter(fn: (r) => r["_measurement"] == "someOtherThing" or r["host"] == "euterpe.local") + |> keep(columns: ["region"]) + |> group() + |> distinct(column: "region") + |> limit(n: 200) + |> sort() + + want = csv.from(csv: "#datatype,string,long,string +#group,false,false,false +#default,0,, +,result,table,_value +,,0,north +,,0,south +") + + expect.planner(rules: ["PushDownReadTagValuesRule": 1]) + testing.diff(got, want) +} + +testcase tag_values_measurement_or_negation { + got = csv.from(csv: input) + |> testing.load() + |> range(start: -100y) + |> filter(fn: (r) => r["_measurement"] != "cpu") + |> filter(fn: (r) => r["_measurement"] == "someOtherThing" or r["fstype"] != "apfs") + |> keep(columns: ["fstype"]) + |> group() + |> distinct(column: "fstype") + |> limit(n: 200) + |> sort() + + want = csv.from(csv: "#datatype,string,long,string +#group,false,false,false +#default,0,, +,result,table,_value +,,0,hfs +") + + expect.planner(rules: ["PushDownReadTagValuesRule": 1]) + testing.diff(got, want) +} + +testcase tag_values_measurement_or_regex { + got = csv.from(csv: input) + |> testing.load() + |> range(start: -100y) + |> filter(fn: (r) => r["_measurement"] =~ /cp.*/) + |> filter(fn: (r) => r["_measurement"] == "someOtherThing" or r["host"] !~ /mnemo.*/) + |> keep(columns: ["region"]) + |> group() + |> distinct(column: "region") + |> limit(n: 200) + |> sort() + + want = csv.from(csv: "#datatype,string,long,string +#group,false,false,false +#default,0,, +,result,table,_value +,,0,north +,,0,south +") + + expect.planner(rules: ["PushDownReadTagValuesRule": 1]) + testing.diff(got, want) +} + diff --git a/flux/stdlib/influxdata/influxdb/to.go b/flux/stdlib/influxdata/influxdb/to.go index 0ea7c68504..8153fd87d0 100644 --- a/flux/stdlib/influxdata/influxdb/to.go +++ b/flux/stdlib/influxdata/influxdb/to.go @@ -40,7 +40,6 @@ type ToOpSpec struct { func init() { toSignature := runtime.MustLookupBuiltinType("influxdata/influxdb", influxdb.ToKind) runtime.ReplacePackageValue("influxdata/influxdb", "to", flux.MustValue(flux.FunctionValueWithSideEffect(ToKind, createToOpSpec, toSignature))) - flux.RegisterOpSpec(ToKind, func() flux.OperationSpec { return &ToOpSpec{} }) plan.RegisterProcedureSpecWithSideEffect(ToKind, newToProcedure, ToKind) execute.RegisterTransformation(ToKind, createToTransformation) } @@ -279,7 +278,7 @@ func (t *ToTransformation) Process(id execute.DatasetID, tbl flux.Table) error { var fn *execute.RowMapPreparedFn if t.fn != nil { var err error - if fn, err = t.fn.Prepare(columns); err != nil { + if fn, err = t.fn.Prepare(t.Ctx, columns); err != nil { return err } } diff --git a/flux/stdlib/influxdata/influxdb/to_test.go b/flux/stdlib/influxdata/influxdb/to_test.go index bb87ba22d2..e458dedb1e 100644 --- a/flux/stdlib/influxdata/influxdb/to_test.go +++ b/flux/stdlib/influxdata/influxdb/to_test.go @@ -26,27 +26,6 @@ func TestTo_Query(t *testing.T) { { Name: "from with database with range", Raw: `from(bucket:"mydb") |> to(bucket:"myotherdb/autogen")`, - Want: &flux.Spec{ - Operations: []*flux.Operation{ - { - ID: "from0", - Spec: &influxdb.FromOpSpec{ - Bucket: influxdb.NameOrID{Name: "mydb"}, - }, - }, - { - ID: "influx1x/toKind1", - Spec: &influxdb.ToOpSpec{ - Bucket: "myotherdb/autogen", - TimeColumn: "_time", - MeasurementColumn: "_measurement", - }, - }, - }, - Edges: []flux.Edge{ - {Parent: "from0", Child: "influx1x/toKind1"}, - }, - }, }, } for _, tc := range tests { diff --git a/flux/stdlib/influxdata/influxdb/v1/databases.go b/flux/stdlib/influxdata/influxdb/v1/databases.go index d82827a487..b4bbaea1d5 100644 --- a/flux/stdlib/influxdata/influxdb/v1/databases.go +++ b/flux/stdlib/influxdata/influxdb/v1/databases.go @@ -24,7 +24,6 @@ type DatabasesOpSpec struct { func init() { databasesSignature := runtime.MustLookupBuiltinType("influxdata/influxdb/v1", "databases") runtime.ReplacePackageValue("influxdata/influxdb/v1", "databases", flux.MustValue(flux.FunctionValue(DatabasesKind, createDatabasesOpSpec, databasesSignature))) - flux.RegisterOpSpec(DatabasesKind, newDatabasesOp) plan.RegisterProcedureSpec(DatabasesKind, newDatabasesProcedure, DatabasesKind) } diff --git a/flux/stdlib/universe/bare_aggregate_test.flux b/flux/stdlib/universe/bare_aggregate_test.flux index d7b9daf7c4..e2c1bcf3bf 100644 --- a/flux/stdlib/universe/bare_aggregate_test.flux +++ b/flux/stdlib/universe/bare_aggregate_test.flux @@ -49,7 +49,8 @@ testcase bare_count { ,,0,23 ", ) - result = testing.loadStorage(csv: input) + result = csv.from(csv: input) + |> testing.load() |> range(start: -100y) |> count() |> keep(columns: ["_value"]) @@ -69,7 +70,8 @@ testcase bare_sum { ,,0,23938.0 ", ) - result = testing.loadStorage(csv: input) + result = csv.from(csv: input) + |> testing.load() |> range(start: -100y) |> sum() |> keep(columns: ["_value"]) @@ -89,7 +91,8 @@ testcase bare_mean { ,,0,1040.782608696 ", ) - result = testing.loadStorage(csv: input) + result = csv.from(csv: input) + |> testing.load() |> range(start: -100y) |> mean() |> keep(columns: ["_value"]) @@ -109,7 +112,8 @@ testcase bare_min { ,,0,2021-01-26T08:00:00Z,-1099,bank,pge_bill ", ) - result = testing.loadStorage(csv: input) + result = csv.from(csv: input) + |> testing.load() |> range(start: -100y) |> min() |> keep(columns: ["_time", "_value", "_field", "_measurement"]) @@ -129,7 +133,8 @@ testcase bare_max { ,,0,2019-11-21T08:00:00Z,2187,bank,pge_bill ", ) - result = testing.loadStorage(csv: input) + result = csv.from(csv: input) + |> testing.load() |> range(start: -100y) |> max() |> keep(columns: ["_time", "_value", "_field", "_measurement"]) @@ -149,7 +154,8 @@ testcase bare_first { ,,0,2019-04-11T07:00:00Z,0,bank,pge_bill ", ) - result = testing.loadStorage(csv: input) + result = csv.from(csv: input) + |> testing.load() |> range(start: -100y) |> first() |> keep(columns: ["_time", "_value", "_field", "_measurement"]) @@ -169,7 +175,8 @@ testcase bare_last { ,,0,2021-01-26T08:00:00Z,-1099,bank,pge_bill ", ) - result = testing.loadStorage(csv: input) + result = csv.from(csv: input) + |> testing.load() |> range(start: -100y) |> last() |> keep(columns: ["_time", "_value", "_field", "_measurement"]) diff --git a/flux/stdlib/universe/merge_filter_test.flux b/flux/stdlib/universe/merge_filter_test.flux index 7126da86e1..ecf28c8af6 100644 --- a/flux/stdlib/universe/merge_filter_test.flux +++ b/flux/stdlib/universe/merge_filter_test.flux @@ -3,6 +3,7 @@ package universe_test import "testing" import "testing/expect" import "planner" +import "csv" option now = () => (2030-01-01T00:00:00Z) @@ -29,7 +30,8 @@ output = " " merge_filter_fn = () => - testing.loadStorage(csv: input) + csv.from(csv: input) + |> testing.load() |> range(start: 2018-05-22T19:53:26Z) |> filter(fn: (r) => r["_value"] == 1.77) |> filter(fn: (r) => r["_field"] == "load4") @@ -39,5 +41,5 @@ testcase merge_filter { // expect.planner(rules: ["MergeFiltersRule": 1]) result = merge_filter_fn() - testing.diff(got: result, want: testing.loadMem(csv: output)) + testing.diff(got: result, want: csv.from(csv: output)) } diff --git a/flux/stdlib/universe/window_aggregate_by_time_test.flux b/flux/stdlib/universe/window_aggregate_by_time_test.flux index 4e414be746..2ce38f42db 100644 --- a/flux/stdlib/universe/window_aggregate_by_time_test.flux +++ b/flux/stdlib/universe/window_aggregate_by_time_test.flux @@ -51,7 +51,8 @@ testcase windowed_by_time_count { ,,0,2021-01-01T00:00:00Z,1 ", ) - result = testing.loadStorage(csv: input) + result = csv.from(csv: input) + |> testing.load() |> range(start: -100y) |> aggregateWindow(every: 1y, fn: count, timeSrc: "_start", createEmpty: false) |> keep(columns: ["_time", "_value"]) @@ -73,7 +74,8 @@ testcase windowed_by_time_sum { ,,0,2021-01-01T00:00:00Z,-1099.00 ", ) - result = testing.loadStorage(csv: input) + result = csv.from(csv: input) + |> testing.load() |> range(start: -100y) |> aggregateWindow(every: 1y, fn: sum, timeSrc: "_start", createEmpty: false) |> keep(columns: ["_time", "_value"]) @@ -95,7 +97,8 @@ testcase windowed_by_time_mean { ,,0,2021-01-01T00:00:00Z,-1099.00 ", ) - result = testing.loadStorage(csv: input) + result = csv.from(csv: input) + |> testing.load() |> range(start: -100y) |> aggregateWindow(every: 1y, fn: mean, timeSrc: "_start", createEmpty: false) |> keep(columns: ["_time", "_value"]) @@ -117,7 +120,8 @@ testcase windowed_by_time_min { ,,0,2021-01-01T00:00:00Z,-1099,bank,pge_bill ", ) - result = testing.loadStorage(csv: input) + result = csv.from(csv: input) + |> testing.load() |> range(start: -100y) |> aggregateWindow(every: 1y, fn: min, timeSrc: "_start", createEmpty: false) |> keep(columns: ["_time", "_value", "_field", "_measurement"]) @@ -140,7 +144,8 @@ testcase windowed_by_time_max { ,,0,2021-01-01T00:00:00Z,-1099,bank,pge_bill ", ) - result = testing.loadStorage(csv: input) + result = csv.from(csv: input) + |> testing.load() |> range(start: -100y) |> aggregateWindow(every: 1y, fn: max, timeSrc: "_start", createEmpty: false) |> keep(columns: ["_time", "_value", "_field", "_measurement"]) @@ -163,7 +168,8 @@ testcase windowed_by_time_first { ,,0,2021-01-01T00:00:00Z,-1099,bank,pge_bill ", ) - result = testing.loadStorage(csv: input) + result = csv.from(csv: input) + |> testing.load() |> range(start: -100y) |> aggregateWindow(every: 1y, fn: first, timeSrc: "_start", createEmpty: false) |> keep(columns: ["_time", "_value", "_field", "_measurement"]) @@ -186,7 +192,8 @@ testcase windowed_by_time_last { ,,0,2021-01-01T00:00:00Z,-1099,bank,pge_bill ", ) - result = testing.loadStorage(csv: input) + result = csv.from(csv: input) + |> testing.load() |> range(start: -100y) |> aggregateWindow(every: 1y, fn: last, timeSrc: "_start", createEmpty: false) |> keep(columns: ["_time", "_value", "_field", "_measurement"]) diff --git a/flux/stdlib/universe/window_aggregate_test.flux b/flux/stdlib/universe/window_aggregate_test.flux index 11037547e1..8f2ccdea90 100644 --- a/flux/stdlib/universe/window_aggregate_test.flux +++ b/flux/stdlib/universe/window_aggregate_test.flux @@ -51,7 +51,8 @@ testcase windowed_count { ,,2,2021-01-01T00:00:00Z,1 ", ) - result = testing.loadStorage(csv: input) + result = csv.from(csv: input) + |> testing.load() |> range(start: -100y) |> window(every: 1y) |> count() @@ -74,7 +75,8 @@ testcase windowed_sum { ,,2,2021-01-01T00:00:00Z,-1099.00 ", ) - result = testing.loadStorage(csv: input) + result = csv.from(csv: input) + |> testing.load() |> range(start: -100y) |> window(every: 1y) |> sum() @@ -97,7 +99,8 @@ testcase windowed_mean { ,,2,2021-01-01T00:00:00Z,-1099.00 ", ) - result = testing.loadStorage(csv: input) + result = csv.from(csv: input) + |> testing.load() |> range(start: -100y) |> window(every: 1y) |> mean() @@ -120,7 +123,8 @@ testcase windowed_min { ,,0,2021-01-26T08:00:00Z,-1099,bank,pge_bill ", ) - result = testing.loadStorage(csv: input) + result = csv.from(csv: input) + |> testing.load() |> range(start: -100y) |> window(every: 1y) |> min() @@ -144,7 +148,8 @@ testcase windowed_max { ,,0,2021-01-26T08:00:00Z,-1099,bank,pge_bill ", ) - result = testing.loadStorage(csv: input) + result = csv.from(csv: input) + |> testing.load() |> range(start: -100y) |> window(every: 1y) |> max() @@ -168,7 +173,8 @@ testcase windowed_first { ,,0,2021-01-26T08:00:00Z,-1099,bank,pge_bill ", ) - result = testing.loadStorage(csv: input) + result = csv.from(csv: input) + |> testing.load() |> range(start: -100y) |> window(every: 1y) |> first() @@ -192,7 +198,8 @@ testcase windowed_last { ,,0,2021-01-26T08:00:00Z,-1099,bank,pge_bill ", ) - result = testing.loadStorage(csv: input) + result = csv.from(csv: input) + |> testing.load() |> range(start: -100y) |> window(every: 1y) |> last() diff --git a/go.mod b/go.mod index 53f67d5e6e..fe47f8d37f 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/influxdata/influxdb -go 1.18 +go 1.19 require ( collectd.org v0.3.0 @@ -16,7 +16,7 @@ require ( github.com/golang/mock v1.5.0 github.com/golang/snappy v0.0.4 github.com/google/go-cmp v0.5.7 - github.com/influxdata/flux v0.170.1 + github.com/influxdata/flux v0.188.0 github.com/influxdata/httprouter v1.3.1-0.20191122104820-ee83e2772f69 github.com/influxdata/influxql v1.1.1-0.20211004132434-7e7d61973256 github.com/influxdata/pkg-config v0.2.11 @@ -25,7 +25,7 @@ require ( github.com/jsternberg/zap-logfmt v1.2.0 github.com/jwilder/encoding v0.0.0-20170811194829-b4e1701a28ef github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada - github.com/mattn/go-isatty v0.0.12 + github.com/mattn/go-isatty v0.0.14 github.com/mileusna/useragent v0.0.0-20190129205925-3e331f0949a5 github.com/opentracing/opentracing-go v1.2.0 github.com/peterh/liner v1.0.1-0.20180619022028-8c1271fcf47f @@ -37,7 +37,7 @@ require ( github.com/retailnext/hllpp v1.0.1-0.20180308014038-101a6d2f8b52 github.com/spf13/cast v1.3.0 github.com/spf13/cobra v0.0.3 - github.com/stretchr/testify v1.7.0 + github.com/stretchr/testify v1.8.0 github.com/tinylib/msgp v1.1.0 github.com/uber/jaeger-client-go v2.28.0+incompatible github.com/xlab/treeprint v0.0.0-20180616005107-d6fb6747feb6 @@ -45,12 +45,12 @@ require ( go.uber.org/zap v1.16.0 golang.org/x/crypto v0.0.0-20211117183948-ae814b36b871 golang.org/x/sync v0.0.0-20220513210516-0976fa681c29 - golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e + golang.org/x/sys v0.0.0-20220412211240-33da011f77ad golang.org/x/text v0.3.7 golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba - golang.org/x/tools v0.1.9 + golang.org/x/tools v0.1.10 google.golang.org/grpc v1.44.0 - google.golang.org/protobuf v1.27.1 + google.golang.org/protobuf v1.28.1 ) require ( @@ -73,7 +73,7 @@ require ( github.com/SAP/go-hdb v0.14.1 // indirect github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883 // indirect github.com/aokoli/goutils v1.0.1 // indirect - github.com/apache/arrow/go/v7 v7.0.0 // indirect + github.com/apache/arrow/go/v7 v7.0.1 // indirect github.com/aws/aws-sdk-go v1.30.12 // indirect github.com/aws/aws-sdk-go-v2 v1.11.0 // indirect github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.0.0 // indirect @@ -95,18 +95,19 @@ require ( github.com/dimchansky/utfbom v1.1.0 // indirect github.com/dustin/go-humanize v1.0.0 // indirect github.com/eclipse/paho.mqtt.golang v1.2.0 // indirect + github.com/fatih/color v1.13.0 // indirect github.com/form3tech-oss/jwt-go v3.2.5+incompatible // indirect github.com/gabriel-vasile/mimetype v1.4.0 // indirect github.com/glycerine/go-unsnap-stream v0.0.0-20180323001048-9f0cb55181dd // indirect github.com/go-sql-driver/mysql v1.5.0 // indirect - github.com/goccy/go-json v0.7.10 // indirect + github.com/goccy/go-json v0.9.6 // indirect github.com/gofrs/uuid v3.3.0+incompatible // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe // indirect github.com/golang/geo v0.0.0-20190916061304-5b978397cfec // indirect github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e // indirect github.com/golang/protobuf v1.5.2 // indirect - github.com/google/flatbuffers v2.0.5+incompatible // indirect + github.com/google/flatbuffers v22.9.30-0.20221019131441-5792623df42e+incompatible // indirect github.com/google/uuid v1.3.0 // indirect github.com/googleapis/gax-go/v2 v2.0.5 // indirect github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect @@ -115,25 +116,28 @@ require ( github.com/inconshreveable/mousetrap v1.0.0 // indirect github.com/influxdata/gosnowflake v1.6.9 // indirect github.com/influxdata/influxdb-client-go/v2 v2.3.1-0.20210518120617-5d1fff431040 // indirect + github.com/influxdata/influxdb-iox-client-go v1.0.0-beta.1 // indirect github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839 // indirect + github.com/influxdata/line-protocol/v2 v2.2.1 // indirect github.com/influxdata/tdigest v0.0.2-0.20210216194612-fc98d27c9e8b // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/jstemmer/go-junit-report v0.9.1 // indirect - github.com/klauspost/compress v1.13.6 // indirect + github.com/klauspost/compress v1.14.2 // indirect github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6 // indirect github.com/lib/pq v1.0.0 // indirect + github.com/mattn/go-colorable v0.1.9 // indirect github.com/mattn/go-ieproxy v0.0.1 // indirect github.com/mattn/go-runewidth v0.0.3 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae // indirect github.com/philhofer/fwd v1.0.0 // indirect - github.com/pierrec/lz4/v4 v4.1.11 // indirect + github.com/pierrec/lz4/v4 v4.1.12 // indirect github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/prometheus/procfs v0.0.11 // indirect github.com/segmentio/kafka-go v0.2.0 // indirect - github.com/sergi/go-diff v1.0.0 // indirect + github.com/sergi/go-diff v1.1.0 // indirect github.com/sirupsen/logrus v1.8.1 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/uber-go/tally v3.3.15+incompatible // indirect @@ -143,16 +147,17 @@ require ( github.com/willf/bitset v1.1.9 // indirect go.opencensus.io v0.23.0 // indirect go.uber.org/atomic v1.7.0 // indirect + golang.org/x/exp v0.0.0-20211216164055-b2b84827b756 // indirect golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 // indirect - golang.org/x/mod v0.6.0-dev.0.20211013180041-c96bc1413d57 // indirect + golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 // indirect golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd // indirect golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c // indirect golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect - golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect - gonum.org/v1/gonum v0.9.3 // indirect + golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f // indirect + gonum.org/v1/gonum v0.11.0 // indirect google.golang.org/api v0.47.0 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20210630183607-d20f26d13c79 // indirect + google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350 // indirect gopkg.in/yaml.v2 v2.3.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/go.sum b/go.sum index c37c7d9a7f..602a66099a 100644 --- a/go.sum +++ b/go.sum @@ -133,8 +133,8 @@ github.com/aokoli/goutils v1.0.1/go.mod h1:SijmP0QR8LtwsmDs8Yii5Z/S4trXFGFC2oO5g github.com/apache/arrow/go/arrow v0.0.0-20191024131854-af6fa24be0db/go.mod h1:VTxUBvSJ3s3eHAg65PNgrsn5BtqCRPdmyXh6rAfdxN0= github.com/apache/arrow/go/arrow v0.0.0-20211112161151-bc219186db40 h1:q4dksr6ICHXqG5hm0ZW5IHyeEJXoIJSOZeBLmWPNeIQ= github.com/apache/arrow/go/arrow v0.0.0-20211112161151-bc219186db40/go.mod h1:Q7yQnSMnLvcXlZ8RV+jwz/6y1rQTqbX6C82SndT52Zs= -github.com/apache/arrow/go/v7 v7.0.0 h1:3d+Qgwo/r75bNhC6N0MMzZXQhsOyB0TSn6wljfuBNWo= -github.com/apache/arrow/go/v7 v7.0.0/go.mod h1:vG2y+fH8mEUcX29tM6hOULGE06/XqEI8sG5fANM6T5w= +github.com/apache/arrow/go/v7 v7.0.1 h1:WpCfq+AQxvXaI6/KplHE27MPMFx5av0o5NbPCTAGfy4= +github.com/apache/arrow/go/v7 v7.0.1/go.mod h1:JxDpochJbCVxqbX4G8i1jRqMrnTCQdf8pTccAfLD8Es= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/apache/thrift v0.15.0/go.mod h1:PHK3hniurgQaNMZYaCLEqXKsYK8upmhPbmdP2FXSqgU= @@ -234,6 +234,7 @@ github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7 github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/cyberdelia/templates v0.0.0-20141128023046-ca7fffd4298c/go.mod h1:GyV+0YP4qX0UQ7r2MoYZ+AvYDp12OF5yg4q8rGnyNh4= github.com/dave/jennifer v1.2.0/go.mod h1:fIb+770HOpJ2fmN9EPPKOqm1vMGhB+TwXKMZhrIygKg= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -277,6 +278,8 @@ github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7 github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= +github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= +github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/fogleman/gg v1.3.0/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= @@ -285,6 +288,10 @@ github.com/form3tech-oss/jwt-go v3.2.5+incompatible/go.mod h1:pbq4aXjuKjdthFRnoD github.com/foxcpp/go-mockdns v0.0.0-20201212160233-ede2f9158d15 h1:nLPjjvpUAODOR6vY/7o0hBIk8iTr19Fvmf8aFx/kC7A= github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= +github.com/frankban/quicktest v1.11.0/go.mod h1:K+q6oSqb0W0Ininfk863uOk1lMy69l/P6txr3mVT54s= +github.com/frankban/quicktest v1.11.2/go.mod h1:K+q6oSqb0W0Ininfk863uOk1lMy69l/P6txr3mVT54s= +github.com/frankban/quicktest v1.13.0 h1:yNZif1OkDfNoDfb9zZa9aXIpejNR4F23Wely0c+Qdqk= +github.com/frankban/quicktest v1.13.0/go.mod h1:qLE0fzW0VuyUAJgPU19zByoIr0HtCHN/r/VLSOOIySU= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/gabriel-vasile/mimetype v1.4.0 h1:Cn9dkdYsMIu56tGho+fqzh7XmvY2YyGU0FnbhiOsEro= github.com/gabriel-vasile/mimetype v1.4.0/go.mod h1:fA8fi6KUiG7MgQQ+mEWotXoEOvmxRtOJlERCzSmRvr8= @@ -404,8 +411,9 @@ github.com/gobuffalo/packd v0.1.0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWe github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGtJQZ0Odn4pQ= github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0= github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw= -github.com/goccy/go-json v0.7.10 h1:ulhbuNe1JqE68nMRXXTJRrUu0uhouf0VevLINxQq4Ec= github.com/goccy/go-json v0.7.10/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= +github.com/goccy/go-json v0.9.6 h1:5/4CtRQdtsX0sal8fdVhTaiMN01Ri8BExZZ8iRmHQ6E= +github.com/goccy/go-json v0.9.6/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= github.com/gofrs/uuid v3.3.0+incompatible h1:8K4tyRfvU1CYPgJsveYFQMhpFd/wXNM7iK6rR7UHz84= github.com/gofrs/uuid v3.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= @@ -470,8 +478,8 @@ github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= github.com/google/flatbuffers v1.11.0/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/flatbuffers v2.0.0+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= -github.com/google/flatbuffers v2.0.5+incompatible h1:ANsW0idDAXIY+mNHzIHxWRfabV2x5LUEEIIWcwsYgB8= -github.com/google/flatbuffers v2.0.5+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= +github.com/google/flatbuffers v22.9.30-0.20221019131441-5792623df42e+incompatible h1:Bqgl5d9t2UlT8pv9Oc/lkkI8yYk0jCwHkZKkHzbxEsc= +github.com/google/flatbuffers v22.9.30-0.20221019131441-5792623df42e+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -579,8 +587,8 @@ github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJ github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/influxdata/flux v0.65.0/go.mod h1:BwN2XG2lMszOoquQaFdPET8FRQfrXiZsWmcMO9rkaVY= -github.com/influxdata/flux v0.170.1 h1:aP0boTO8WZ1yHVU9v01lVVdtLdFbwigeALIQPXTwbc0= -github.com/influxdata/flux v0.170.1/go.mod h1:fNtcZ8tqtVDjwWYcPRvCdlY5t3n+NYCc5xunKCmigQA= +github.com/influxdata/flux v0.188.0 h1:y9F3SAswnPKkbHWJF/x79IgbwBezlmuqAdXIkzthwIc= +github.com/influxdata/flux v0.188.0/go.mod h1:HdQg0JxHSQhJhEProUY/7QRi9eqnM0HP5L1fH3EtS/c= github.com/influxdata/gosnowflake v1.6.9 h1:BhE39Mmh8bC+Rvd4QQsP2gHypfeYIH1wqW1AjGWxxrE= github.com/influxdata/gosnowflake v1.6.9/go.mod h1:9W/BvCXOKx2gJtQ+jdi1Vudev9t9/UDOEHnlJZ/y1nU= github.com/influxdata/httprouter v1.3.1-0.20191122104820-ee83e2772f69 h1:WQsmW0fXO4ZE/lFGIE84G6rIV5SJN3P3sjIXAP1a8eU= @@ -588,6 +596,8 @@ github.com/influxdata/httprouter v1.3.1-0.20191122104820-ee83e2772f69/go.mod h1: github.com/influxdata/influxdb v1.8.0/go.mod h1:SIzcnsjaHRFpmlxpJ4S3NT64qtEKYweNTUMb/vh0OMQ= github.com/influxdata/influxdb-client-go/v2 v2.3.1-0.20210518120617-5d1fff431040 h1:MBLCfcSsUyFPDJp6T7EoHp/Ph3Jkrm4EuUKLD2rUWHg= github.com/influxdata/influxdb-client-go/v2 v2.3.1-0.20210518120617-5d1fff431040/go.mod h1:vLNHdxTJkIf2mSLvGrpj8TCcISApPoXkaxP8g9uRlW8= +github.com/influxdata/influxdb-iox-client-go v1.0.0-beta.1 h1:zDmAiE2o3Y/YZinI6CENzgQueJDuibUB9TWOZC5zCq0= +github.com/influxdata/influxdb-iox-client-go v1.0.0-beta.1/go.mod h1:Chl4pz0SRqoPmEavex4vZaQlunqXqrtEPWAN54THFfo= github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= github.com/influxdata/influxql v1.1.0/go.mod h1:KpVI7okXjK6PRi3Z5B+mtKZli+R1DnZgb3N+tzevNgo= github.com/influxdata/influxql v1.1.1-0.20211004132434-7e7d61973256 h1:8io3jjCJ0j9NFvq3/m/rMrDiEILpsfOqWDPItUt/078= @@ -595,6 +605,13 @@ github.com/influxdata/influxql v1.1.1-0.20211004132434-7e7d61973256/go.mod h1:gH github.com/influxdata/line-protocol v0.0.0-20180522152040-32c6aa80de5e/go.mod h1:4kt73NQhadE3daL3WhR5EJ/J2ocX0PZzwxQ0gXJ7oFE= github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839 h1:W9WBk7wlPfJLvMCdtV4zPulc4uCPrlywQOmbFOhgQNU= github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839/go.mod h1:xaLFMmpvUxqXtVkUJfg9QmT88cDaCJ3ZKgdZ78oO8Qo= +github.com/influxdata/line-protocol-corpus v0.0.0-20210519164801-ca6fa5da0184/go.mod h1:03nmhxzZ7Xk2pdG+lmMd7mHDfeVOYFyhOgwO61qWU98= +github.com/influxdata/line-protocol-corpus v0.0.0-20210922080147-aa28ccfb8937 h1:MHJNQ+p99hFATQm6ORoLmpUCF7ovjwEFshs/NHzAbig= +github.com/influxdata/line-protocol-corpus v0.0.0-20210922080147-aa28ccfb8937/go.mod h1:BKR9c0uHSmRgM/se9JhFHtTT7JTO67X23MtKMHtZcpo= +github.com/influxdata/line-protocol/v2 v2.0.0-20210312151457-c52fdecb625a/go.mod h1:6+9Xt5Sq1rWx+glMgxhcg2c0DUaehK+5TDcPZ76GypY= +github.com/influxdata/line-protocol/v2 v2.1.0/go.mod h1:QKw43hdUBg3GTk2iC3iyCxksNj7PX9aUSeYOYE/ceHY= +github.com/influxdata/line-protocol/v2 v2.2.1 h1:EAPkqJ9Km4uAxtMRgUubJyqAr6zgWM0dznKMLRauQRE= +github.com/influxdata/line-protocol/v2 v2.2.1/go.mod h1:DmB3Cnh+3oxmG6LOBIxce4oaL4CPj3OmMPgvauXh+tM= github.com/influxdata/pkg-config v0.2.11 h1:RDlWAvkTARzPRGChq34x179TYlRndq8OU5Ro80E9g3Q= github.com/influxdata/pkg-config v0.2.11/go.mod h1:EMS7Ll0S4qkzDk53XS3Z72/egBsPInt+BeRxb0WeSwk= github.com/influxdata/promql/v2 v2.12.0/go.mod h1:fxOPu+DY0bqCTCECchSRtWfc+0X19ybifQhZoQNF5D8= @@ -643,9 +660,11 @@ github.com/klauspost/asmfmt v1.3.1/go.mod h1:AG8TuvYojzulgDAMCnYn50l/5QV3Bs/tp6j github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.13.1/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= -github.com/klauspost/compress v1.13.6 h1:P76CopJELS0TiO2mebmnzgWaajssP/EszplttgQxcgc= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= +github.com/klauspost/compress v1.14.2 h1:S0OHlFk/Gbon/yauFJ4FfJJF5V0fc5HbBTJazi28pRw= +github.com/klauspost/compress v1.14.2/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= +github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6 h1:KAZ1BW2TCmT6PRihDPpocIy1QTtsAsrx6TneU/4+CMg= github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg= github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada h1:3L+neHp83cTjegPdCiOxVOJtRIy7/8RldvMTsyPYH10= @@ -659,8 +678,9 @@ github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v0.0.0-20160406211939-eadb3ce320cb/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= github.com/labstack/echo/v4 v4.2.1/go.mod h1:AA49e0DZ8kk5jTOOCKNuPR6oTnBS0dYiM4FW1e6jwpg= github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k= @@ -684,8 +704,9 @@ github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVc github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.7/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-colorable v0.1.8 h1:c1ghPdyEDarC70ftn0y+A/Ee++9zz8ljHG1b13eJ0s8= github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.9 h1:sqDoxXbdeALODt0DAeJCVp38ps9ZogZEAXjus69YV3U= +github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-ieproxy v0.0.1 h1:qiyop7gCflfhwCzGyeT0gro3sF9AIg9HU98JORTkqfI= github.com/mattn/go-ieproxy v0.0.1/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= @@ -694,8 +715,9 @@ github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hd github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= -github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.3 h1:a+kO+98RDGEfo6asOGMmpodZq4FNtnGP54yps8BzLR4= github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= @@ -742,6 +764,7 @@ github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzE github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU= @@ -785,9 +808,9 @@ github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0 github.com/pierrec/lz4 v2.0.5+incompatible h1:2xWsjqPFWcplujydGg4WmhC/6fZqK42wMM8aXeqhl0I= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pierrec/lz4/v4 v4.1.8/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= -github.com/pierrec/lz4/v4 v4.1.9/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= -github.com/pierrec/lz4/v4 v4.1.11 h1:LVs17FAZJFOjgmJXl9Tf13WfLUvZq7/RjfEJrnwZ9OE= github.com/pierrec/lz4/v4 v4.1.11/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pierrec/lz4/v4 v4.1.12 h1:44l88ehTZAUGW4VlO1QC4zkilL99M6Y9MXNwEs0uzP8= +github.com/pierrec/lz4/v4 v4.1.12/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU= github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -853,8 +876,9 @@ github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo= github.com/segmentio/kafka-go v0.2.0 h1:HtCSf6B4gN/87yc5qTl7WsxPKQIIGXLPPM1bMCPOsoY= github.com/segmentio/kafka-go v0.2.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo= -github.com/sergi/go-diff v1.0.0 h1:Kpca3qRNrduNnOQeazBd0ysaKrUJiIuISHxogkT9RPQ= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= +github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/shurcooL/vfsgen v0.0.0-20181202132449-6a9ea43bcacd/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw= @@ -887,16 +911,20 @@ github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3 github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/objx v0.4.0 h1:M2gUjqZET1qApGOWNSnZ49BAIMX4F/1plDv3+l31EJ4= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/testify v1.2.0/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= +github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= github.com/tinylib/msgp v1.1.0 h1:9fQd+ICuRIu/ue4vxJZu6/LzxN0HwMds2nq/0cFvxHU= @@ -934,7 +962,8 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/zeebo/xxh3 v0.13.0/go.mod h1:AQY73TOrhF3jNsdiM9zZOb8MThrYbZONHj7ryDBaLpg= +github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/zeebo/xxh3 v1.0.1/go.mod h1:8VHV24/3AZLn3b6Mlp/KuC33LWH687Wq6EnziEB+rsA= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= @@ -1001,6 +1030,7 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20211117183948-ae814b36b871 h1:/pEO3GD/ABYAjuakUS6xSEmmlyVS4kxBNkeA9tLJiTI= golang.org/x/crypto v0.0.0-20211117183948-ae814b36b871/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -1018,8 +1048,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20211028214138-64b4c8e87d1a/go.mod h1:a3o/VtDNHN+dCVLEpzjjUHOzR+Ln3DHX056ZPzoZGGA= golang.org/x/exp v0.0.0-20211216164055-b2b84827b756 h1:/5Bs7sWi0i3rOVO5KnM55OwugpsD4bRW1zywKoZjbkI= +golang.org/x/exp v0.0.0-20211216164055-b2b84827b756/go.mod h1:b9TAUYHmRtqA6klRHApnXMnj+OyLce4yF5cZCUbk2ps= golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= @@ -1055,9 +1085,10 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.5.1-0.20210830214625-1b1db11ec8f4/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= -golang.org/x/mod v0.6.0-dev.0.20211013180041-c96bc1413d57 h1:LQmS1nU0twXLA96Kt7U9qtHJEbBk3z6Q0V4UXjZkpr4= +golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.6.0-dev.0.20211013180041-c96bc1413d57/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= +golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 h1:kQgndtyPBW/JIYERgdxfwMYh3AVStj88WQTlNDi2a+o= +golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1113,6 +1144,7 @@ golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96b golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210505024714-0287a6fb4125/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211118161319-6a13c67c3ce4/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd h1:O7DYs+zxREGLKzKoMQrtrEacpb0ZVXA5rIwylE2Xchk= @@ -1200,7 +1232,6 @@ golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200727154430-2d971f7391a4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200826173525-f9321e4c35a6/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200828194041-157a740278f4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1225,10 +1256,11 @@ golang.org/x/sys v0.0.0-20210601080250-7ecdf8ef093b/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211117180635-dee7805ff2e1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e h1:fLOSk5Q00efkSvAm+4xcoXD+RRmLmmulPn5I3Y9F2EM= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad h1:ntjMns5wyP/fN65tdBD4g8J5w8n015+iIIs9rtjXkY0= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY= @@ -1328,19 +1360,22 @@ golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.9 h1:j9KsMiaP1c3B0OTQGth0/k+miLGTgLsAFUCrF2vLcF8= -golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= +golang.org/x/tools v0.1.8-0.20211029000441-d6a9af8af023/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= +golang.org/x/tools v0.1.10 h1:QjFRCZxdOhBJ/UNgnBZLbNV13DlbnK0quyivTnXJM20= +golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f h1:GGU+dLjvlC3qDwqYgL6UgRmHXhOOgns0bZu2Ty5mm6U= +golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= gonum.org/v1/gonum v0.0.0-20181121035319-3f7ecaa7e8ca/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= gonum.org/v1/gonum v0.6.0/go.mod h1:9mxDZsDKxgMAuccQkewq682L+0eCu4dCN2yonUJTCLU= gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0= -gonum.org/v1/gonum v0.9.3 h1:DnoIG+QAMaF5NvxnGe/oKsgKcAc6PcUyl8q0VetfQ8s= gonum.org/v1/gonum v0.9.3/go.mod h1:TZumC3NeyVQskjXqmyWt4S3bINhy7B4eYwW69EbyX+0= +gonum.org/v1/gonum v0.11.0 h1:f1IJhK4Km5tBJmaiJXtk/PkL4cdVX6J+tGiM187uT5E= +gonum.org/v1/gonum v0.11.0/go.mod h1:fSG4YDCxxUZQJ7rKsQrj0gMOg00Il0Z96/qMA4bVQhA= gonum.org/v1/netlib v0.0.0-20181029234149-ec6d1f5cefe6/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= @@ -1428,8 +1463,9 @@ google.golang.org/genproto v0.0.0-20210429181445-86c259c2b4ab/go.mod h1:P3QM42oQ google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= google.golang.org/genproto v0.0.0-20210517163617-5e0236093d7a/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= google.golang.org/genproto v0.0.0-20210601144548-a796c710e9b6/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= -google.golang.org/genproto v0.0.0-20210630183607-d20f26d13c79 h1:s1jFTXJryg4a1mew7xv03VZD8N9XjxFhk1o4Js4WvPQ= google.golang.org/genproto v0.0.0-20210630183607-d20f26d13c79/go.mod h1:yiaVoXHpRzHGyxV3o4DktVWY4mSUErTKaeEOq6C3t3U= +google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350 h1:YxHp5zqIcAShDEvRr5/0rVESVS+njYF68PSdazrNLJo= +google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= @@ -1460,6 +1496,7 @@ google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQ google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k= google.golang.org/grpc v1.44.0 h1:weqSxi/TMs1SqFRMHCtBgXRs8k3X39QIDEZ0pRcttUg= google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= @@ -1475,12 +1512,14 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= +google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= @@ -1503,6 +1542,7 @@ gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/internal/cmd/fluxtest-harness-influxdb/test.go b/internal/cmd/fluxtest-harness-influxdb/test.go index edca36a0ce..58401fc84b 100644 --- a/internal/cmd/fluxtest-harness-influxdb/test.go +++ b/internal/cmd/fluxtest-harness-influxdb/test.go @@ -15,7 +15,6 @@ import ( "github.com/influxdata/flux/ast" "github.com/influxdata/flux/cmd/flux/cmd" "github.com/influxdata/flux/csv" - "github.com/influxdata/flux/execute/table" "github.com/influxdata/flux/parser" fluxClient "github.com/influxdata/influxdb/flux/client" "github.com/influxdata/influxdb/tests" @@ -47,11 +46,11 @@ func (t *testExecutor) Close() error { // Run executes an e2e test case for every supported index type. // On failure, logs collected from the server will be printed to stderr. -func (t *testExecutor) Run(pkg *ast.Package) error { +func (t *testExecutor) Run(pkg *ast.Package, fn cmd.TestResultFunc) error { var failed bool for _, idx := range []string{"inmem", "tsi1"} { logOut := &bytes.Buffer{} - if err := t.run(pkg, idx, logOut); err != nil { + if err := t.run(pkg, idx, logOut, fn); err != nil { failed = true _, _ = fmt.Fprintf(os.Stderr, "Failed for index %s:\n%v\n", idx, err) _, _ = io.Copy(os.Stderr, logOut) @@ -66,12 +65,13 @@ func (t *testExecutor) Run(pkg *ast.Package) error { // run executes an e2e test case against a specific index type. // Server logs will be written to the specified logOut writer, for reporting. -func (t *testExecutor) run(pkg *ast.Package, index string, logOut io.Writer) error { +func (t *testExecutor) run(pkg *ast.Package, index string, logOut io.Writer, fn cmd.TestResultFunc) error { _, _ = fmt.Fprintf(os.Stderr, "Testing %s...\n", index) config := tests.NewConfig() config.HTTPD.FluxEnabled = true config.HTTPD.FluxLogEnabled = true + config.HTTPD.FluxTesting = true config.Data.Index = index s := tests.NewServer(config) @@ -99,10 +99,22 @@ func (t *testExecutor) run(pkg *ast.Package, index string, logOut io.Writer) err // During the first execution, we are performing the writes // that are in the testcase. We do not care about errors. - _ = t.executeWithOptions(bucketOpt, t.writeOptAST, pkg, s.URL(), logOut, false) + _ = t.executeWithOptions(bucketOpt, t.writeOptAST, pkg, s.URL(), logOut, + func(ctx context.Context, results flux.ResultIterator) error { + for results.More() { + res := results.Next() + if err := res.Tables().Do(func(table flux.Table) error { + table.Done() + return nil + }); err != nil { + return err + } + } + return nil + }) // Execute the read pass. - return t.executeWithOptions(bucketOpt, t.readOptAST, pkg, s.URL(), logOut, true) + return t.executeWithOptions(bucketOpt, t.readOptAST, pkg, s.URL(), logOut, fn) } // executeWithOptions runs a Flux query against a running server via the HTTP API. @@ -114,7 +126,7 @@ func (t *testExecutor) executeWithOptions( pkg *ast.Package, serverUrl string, logOut io.Writer, - checkOutput bool, + fn cmd.TestResultFunc, ) error { options := optionsAST.Copy().(*ast.File) options.Body = append([]ast.Statement{bucketOpt}, options.Body...) @@ -164,31 +176,7 @@ func (t *testExecutor) executeWithOptions( } defer r.Release() - wasDiff := false - if checkOutput { - for r.More() { - wasDiff = true - v := r.Next() - if err := v.Tables().Do(func(tbl flux.Table) error { - // The data returned here is the result of `testing.diff`, so any result means that - // a comparison of two tables showed inequality. Capture that inequality as part of the error. - // XXX: rockstar (08 Dec 2020) - This could use some ergonomic work, as the diff testOutput - // is not exactly "human readable." - _, _ = fmt.Fprintln(logOut, table.Stringify(tbl)) - return nil - }); err != nil { - return err - } - } - } - r.Release() - if err := r.Err(); err != nil { - return err - } - if wasDiff { - return errors.New("test failed - diff table in output") - } - return nil + return fn(t.ctx, r) } // This options definition puts to() in the path of the CSV input. The tests @@ -198,8 +186,8 @@ const writeOptSource = ` import "testing" import c "csv" -option testing.loadStorage = (csv) => { - return c.from(csv: csv) |> to(bucket: bucket) +option testing.load = (tables=<-) => { + return tables |> to(bucket: bucket) } ` @@ -210,7 +198,7 @@ const readOptSource = ` import "testing" import c "csv" -option testing.loadStorage = (csv) => { +option testing.load = (tables=<-) => { return from(bucket: bucket) } ` diff --git a/kit/platform/errors/errors.go b/kit/platform/errors/errors.go index 8a3a1a3d98..3d3f7e7366 100644 --- a/kit/platform/errors/errors.go +++ b/kit/platform/errors/errors.go @@ -39,24 +39,31 @@ const ( // further help operators. // // To create a simple error, -// &Error{ -// Code:ENotFound, -// } +// +// &Error{ +// Code:ENotFound, +// } +// // To show where the error happens, add Op. -// &Error{ -// Code: ENotFound, -// Op: "bolt.FindUserByID" -// } +// +// &Error{ +// Code: ENotFound, +// Op: "bolt.FindUserByID" +// } +// // To show an error with a unpredictable value, add the value in Msg. -// &Error{ -// Code: EConflict, -// Message: fmt.Sprintf("organization with name %s already exist", aName), -// } +// +// &Error{ +// Code: EConflict, +// Message: fmt.Sprintf("organization with name %s already exist", aName), +// } +// // To show an error wrapped with another error. -// &Error{ -// Code:EInternal, -// Err: err, -// }. +// +// &Error{ +// Code:EInternal, +// Err: err, +// }. type Error struct { Code string Msg string diff --git a/kit/tracing/tracing.go b/kit/tracing/tracing.go index fa82af3d61..9b05efae39 100644 --- a/kit/tracing/tracing.go +++ b/kit/tracing/tracing.go @@ -19,7 +19,8 @@ import ( // LogError adds a span log for an error. // Returns unchanged error, so useful to wrap as in: -// return 0, tracing.LogError(err) +// +// return 0, tracing.LogError(err) func LogError(span opentracing.Span, err error) error { if err == nil { return nil @@ -115,24 +116,25 @@ func (s *Span) Finish() { // Context without parent span reference triggers root span construction. // This function never returns nil values. // -// Performance +// # Performance // // This function incurs a small performance penalty, roughly 1000 ns/op, 376 B/op, 6 allocs/op. // Jaeger timestamp and duration precision is only µs, so this is pretty negligible. // -// Alternatives +// # Alternatives // // If this performance penalty is too much, try these, which are also demonstrated in benchmark tests: -// // Create a root span -// span := opentracing.StartSpan("operation name") -// ctx := opentracing.ContextWithSpan(context.Background(), span) // -// // Create a child span -// span := opentracing.StartSpan("operation name", opentracing.ChildOf(sc)) -// ctx := opentracing.ContextWithSpan(context.Background(), span) +// // Create a root span +// span := opentracing.StartSpan("operation name") +// ctx := opentracing.ContextWithSpan(context.Background(), span) // -// // Sugar to create a child span -// span, ctx := opentracing.StartSpanFromContext(ctx, "operation name") +// // Create a child span +// span := opentracing.StartSpan("operation name", opentracing.ChildOf(sc)) +// ctx := opentracing.ContextWithSpan(context.Background(), span) +// +// // Sugar to create a child span +// span, ctx := opentracing.StartSpanFromContext(ctx, "operation name") func StartSpanFromContext(ctx context.Context, opts ...opentracing.StartSpanOption) (opentracing.Span, context.Context) { if ctx == nil { panic("StartSpanFromContext called with nil context") diff --git a/monitor/diagnostics/diagnostics.go b/monitor/diagnostics/diagnostics.go index f99076d606..32c5aa4731 100644 --- a/monitor/diagnostics/diagnostics.go +++ b/monitor/diagnostics/diagnostics.go @@ -23,9 +23,9 @@ func (f ClientFunc) Diagnostics() (*Diagnostics, error) { // the values for each column, by row. This information is never written to an InfluxDB // system and is display-only. An example showing, say, connections follows: // -// source_ip source_port dest_ip dest_port -// 182.1.0.2 2890 127.0.0.1 38901 -// 174.33.1.2 2924 127.0.0.1 38902 +// source_ip source_port dest_ip dest_port +// 182.1.0.2 2890 127.0.0.1 38901 +// 174.33.1.2 2924 127.0.0.1 38902 type Diagnostics struct { Columns []string Rows [][]interface{} diff --git a/pkg/encoding/simple8b/encoding.go b/pkg/encoding/simple8b/encoding.go index f029df217c..97214bb367 100644 --- a/pkg/encoding/simple8b/encoding.go +++ b/pkg/encoding/simple8b/encoding.go @@ -468,8 +468,9 @@ func Decode(dst *[240]uint64, v uint64) (n int, err error) { // Decode writes the uncompressed values from src to dst. It returns the number // of values written or an error. -//go:nocheckptr // nocheckptr while the underlying struct layout doesn't change +// +//go:nocheckptr func DecodeAll(dst, src []uint64) (value int, err error) { j := 0 for _, v := range src { @@ -482,8 +483,9 @@ func DecodeAll(dst, src []uint64) (value int, err error) { // DecodeBytesBigEndian writes the compressed, big-endian values from src to dst. It returns the number // of values written or an error. -//go:nocheckptr // nocheckptr while the underlying struct layout doesn't change +// +//go:nocheckptr func DecodeBytesBigEndian(dst []uint64, src []byte) (value int, err error) { if len(src)&7 != 0 { return 0, errors.New("src length is not multiple of 8") diff --git a/pkg/errors/error_capture.go b/pkg/errors/error_capture.go index d2e9b7646a..7ecb105457 100644 --- a/pkg/errors/error_capture.go +++ b/pkg/errors/error_capture.go @@ -2,11 +2,12 @@ package errors // Capture is a wrapper function which can be used to capture errors from closing via a defer. // An example: -// func Example() (err error) { -// f, _ := os.Open(...) -// defer errors.Capture(&err, f.Close)() -// ... -// return +// +// func Example() (err error) { +// f, _ := os.Open(...) +// defer errors.Capture(&err, f.Close)() +// ... +// return // // Doing this will result in the error from the f.Close() call being // put in the error via a ptr, if the error is not nil diff --git a/pkg/estimator/hll/hll.go b/pkg/estimator/hll/hll.go index 047bdc5c78..96414d59fe 100644 --- a/pkg/estimator/hll/hll.go +++ b/pkg/estimator/hll/hll.go @@ -4,10 +4,10 @@ // // The differences are that the implementation in this package: // -// * uses an AMD64 optimised xxhash algorithm instead of murmur; -// * uses some AMD64 optimisations for things like clz; -// * works with []byte rather than a Hash64 interface, to reduce allocations; -// * implements encoding.BinaryMarshaler and encoding.BinaryUnmarshaler +// - uses an AMD64 optimised xxhash algorithm instead of murmur; +// - uses some AMD64 optimisations for things like clz; +// - works with []byte rather than a Hash64 interface, to reduce allocations; +// - implements encoding.BinaryMarshaler and encoding.BinaryUnmarshaler // // Based on some rough benchmarking, this implementation of HyperLogLog++ is // around twice as fast as the github.com/clarkduvall/hyperloglog implementation. diff --git a/pkg/tar/stream.go b/pkg/tar/stream.go index 289bb61e8c..6c843bd599 100644 --- a/pkg/tar/stream.go +++ b/pkg/tar/stream.go @@ -62,7 +62,7 @@ func StreamFile(f os.FileInfo, shardRelativePath, fullPath string, tw *tar.Write return StreamRenameFile(f, f.Name(), shardRelativePath, fullPath, tw) } -/// Stream a single file to tw, using tarHeaderFileName instead of the actual filename +// / Stream a single file to tw, using tarHeaderFileName instead of the actual filename // e.g., when we want to write a *.tmp file using the original file's non-tmp name. func StreamRenameFile(f os.FileInfo, tarHeaderFileName, relativePath, fullPath string, tw *tar.Writer) error { h, err := tar.FileInfoHeader(f, f.Name()) diff --git a/pkg/tracing/doc.go b/pkg/tracing/doc.go index 4e7b582d63..36bf889a14 100644 --- a/pkg/tracing/doc.go +++ b/pkg/tracing/doc.go @@ -3,7 +3,7 @@ Package tracing provides a way for capturing hierarchical traces. To start a new trace with a root span named select - trace, span := tracing.NewTrace("select") + trace, span := tracing.NewTrace("select") It is recommended that a span be forwarded to callees using the context package. Firstly, create a new context with the span associated @@ -21,6 +21,5 @@ Once the trace is complete, it may be converted to a graph with the Tree method. The tree is intended to be used with the Walk function in order to generate different presentations. The default Tree#String method returns a tree. - */ package tracing diff --git a/pkg/tracing/fields/field.go b/pkg/tracing/fields/field.go index 38e49071ed..bc96be1fbf 100644 --- a/pkg/tracing/fields/field.go +++ b/pkg/tracing/fields/field.go @@ -50,7 +50,7 @@ func Bool(key string, val bool) Field { } } -/// Int64 adds an int64-valued key:value pair to a Span.LogFields() record +// / Int64 adds an int64-valued key:value pair to a Span.LogFields() record func Int64(key string, val int64) Field { return Field{ key: key, diff --git a/pkg/tracing/wire/binary.go b/pkg/tracing/wire/binary.go index 1178a85a0e..d64abcb48d 100644 --- a/pkg/tracing/wire/binary.go +++ b/pkg/tracing/wire/binary.go @@ -1,6 +1,5 @@ /* Package wire is used to serialize a trace. - */ package wire diff --git a/pkg/tracing/wire/binary.pb.go b/pkg/tracing/wire/binary.pb.go index b440a32c4b..800386e577 100644 --- a/pkg/tracing/wire/binary.pb.go +++ b/pkg/tracing/wire/binary.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.27.1 +// protoc-gen-go v1.28.1 // protoc v3.17.3 // source: binary.proto @@ -276,6 +276,7 @@ type Field struct { Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` FieldType FieldType `protobuf:"varint,2,opt,name=FieldType,proto3,enum=wire.FieldType" json:"FieldType,omitempty"` // Types that are assignable to Value: + // // *Field_NumericVal // *Field_StringVal Value isField_Value `protobuf_oneof:"value"` diff --git a/query/encode.go b/query/encode.go index 7b8af77812..86e36e26c9 100644 --- a/query/encode.go +++ b/query/encode.go @@ -60,9 +60,11 @@ func (e *NoContentEncoder) Encode(w io.Writer, results flux.ResultIterator) (int // Otherwise one can decode the response body to get the error. For example: // ``` // _, err = csv.NewResultDecoder(csv.ResultDecoderConfig{}).Decode(bytes.NewReader(res)) -// if err != nil { -// // we got some runtime error -// } +// +// if err != nil { +// // we got some runtime error +// } +// // ``` type NoContentWithErrorDialect struct { csv.ResultEncoderConfig diff --git a/query/functions.go b/query/functions.go index a76db8080f..bae4a89b07 100644 --- a/query/functions.go +++ b/query/functions.go @@ -1212,8 +1212,8 @@ func (r *UnsignedCumulativeSumReducer) Emit() []UnsignedPoint { // FloatHoltWintersReducer forecasts a series into the future. // This is done using the Holt-Winters damped method. -// 1. Using the series the initial values are calculated using a SSE. -// 2. The series is forecasted into the future using the iterative relations. +// 1. Using the series the initial values are calculated using a SSE. +// 2. The series is forecasted into the future using the iterative relations. type FloatHoltWintersReducer struct { // Season period m int diff --git a/query/internal/internal.pb.go b/query/internal/internal.pb.go index 80fcd2f87c..775eaa9157 100644 --- a/query/internal/internal.pb.go +++ b/query/internal/internal.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.27.1 +// protoc-gen-go v1.28.1 // protoc v3.17.3 // source: internal/internal.proto diff --git a/query/iterator.gen.go b/query/iterator.gen.go index c2cda3f396..cc97c92b0f 100644 --- a/query/iterator.gen.go +++ b/query/iterator.gen.go @@ -403,11 +403,10 @@ func (itr *floatSortedMergeIterator) pop() (*FloatPoint, error) { // floatSortedMergeHeap represents a heap of floatSortedMergeHeapItems. // Items are sorted with the following priority: -// - By their measurement name; -// - By their tag keys/values; -// - By time; or -// - By their Aux field values. -// +// - By their measurement name; +// - By their tag keys/values; +// - By time; or +// - By their Aux field values. type floatSortedMergeHeap struct { opt IteratorOptions items []*floatSortedMergeHeapItem @@ -3067,11 +3066,10 @@ func (itr *integerSortedMergeIterator) pop() (*IntegerPoint, error) { // integerSortedMergeHeap represents a heap of integerSortedMergeHeapItems. // Items are sorted with the following priority: -// - By their measurement name; -// - By their tag keys/values; -// - By time; or -// - By their Aux field values. -// +// - By their measurement name; +// - By their tag keys/values; +// - By time; or +// - By their Aux field values. type integerSortedMergeHeap struct { opt IteratorOptions items []*integerSortedMergeHeapItem @@ -5731,11 +5729,10 @@ func (itr *unsignedSortedMergeIterator) pop() (*UnsignedPoint, error) { // unsignedSortedMergeHeap represents a heap of unsignedSortedMergeHeapItems. // Items are sorted with the following priority: -// - By their measurement name; -// - By their tag keys/values; -// - By time; or -// - By their Aux field values. -// +// - By their measurement name; +// - By their tag keys/values; +// - By time; or +// - By their Aux field values. type unsignedSortedMergeHeap struct { opt IteratorOptions items []*unsignedSortedMergeHeapItem @@ -8395,11 +8392,10 @@ func (itr *stringSortedMergeIterator) pop() (*StringPoint, error) { // stringSortedMergeHeap represents a heap of stringSortedMergeHeapItems. // Items are sorted with the following priority: -// - By their measurement name; -// - By their tag keys/values; -// - By time; or -// - By their Aux field values. -// +// - By their measurement name; +// - By their tag keys/values; +// - By time; or +// - By their Aux field values. type stringSortedMergeHeap struct { opt IteratorOptions items []*stringSortedMergeHeapItem @@ -11045,11 +11041,10 @@ func (itr *booleanSortedMergeIterator) pop() (*BooleanPoint, error) { // booleanSortedMergeHeap represents a heap of booleanSortedMergeHeapItems. // Items are sorted with the following priority: -// - By their measurement name; -// - By their tag keys/values; -// - By time; or -// - By their Aux field values. -// +// - By their measurement name; +// - By their tag keys/values; +// - By time; or +// - By their Aux field values. type booleanSortedMergeHeap struct { opt IteratorOptions items []*booleanSortedMergeHeapItem diff --git a/services/graphite/parser.go b/services/graphite/parser.go index c76b099ab7..2054c99aaf 100644 --- a/services/graphite/parser.go +++ b/services/graphite/parser.go @@ -396,16 +396,18 @@ type nodes []*node // less than a non-wildcard value. // // For example, the filters: -// "*.*" -// "servers.*" -// "servers.localhost" -// "*.localhost" +// +// "*.*" +// "servers.*" +// "servers.localhost" +// "*.localhost" // // Would be sorted as: -// "servers.localhost" -// "servers.*" -// "*.localhost" -// "*.*" +// +// "servers.localhost" +// "servers.*" +// "*.localhost" +// "*.*" func (n *nodes) Less(j, k int) bool { if (*n)[j].value == "*" && (*n)[k].value != "*" { return false diff --git a/services/httpd/config.go b/services/httpd/config.go index e461dafb2b..560b493607 100644 --- a/services/httpd/config.go +++ b/services/httpd/config.go @@ -40,6 +40,7 @@ type Config struct { WriteTracing bool `toml:"write-tracing"` FluxEnabled bool `toml:"flux-enabled"` FluxLogEnabled bool `toml:"flux-log-enabled"` + FluxTesting bool `toml:"-"` PprofEnabled bool `toml:"pprof-enabled"` PprofAuthEnabled bool `toml:"pprof-auth-enabled"` DebugPprofEnabled bool `toml:"debug-pprof-enabled"` @@ -72,6 +73,7 @@ func NewConfig() Config { Enabled: true, FluxEnabled: false, FluxLogEnabled: false, + FluxTesting: false, BindAddress: DefaultBindAddress, LogEnabled: true, PprofEnabled: true, diff --git a/services/httpd/handler.go b/services/httpd/handler.go index 3a2ffe08c7..587c39dd9f 100644 --- a/services/httpd/handler.go +++ b/services/httpd/handler.go @@ -872,7 +872,6 @@ func (h *Handler) async(q *influxql.Query, results <-chan *query.Result) { // in the database URL query value. It is encoded using a forward slash like // "database/retentionpolicy" and we should be able to simply split that string // on the forward slash. -// func bucket2dbrp(bucket string) (string, string, error) { // test for a slash in our bucket name. switch idx := strings.IndexByte(bucket, '/'); idx { diff --git a/services/httpd/pprof.go b/services/httpd/pprof.go index 01ed2844af..846d1132c9 100644 --- a/services/httpd/pprof.go +++ b/services/httpd/pprof.go @@ -35,17 +35,17 @@ func (h *Handler) handleProfiles(w http.ResponseWriter, r *http.Request) { } // archiveProfilesAndQueries collects the following profiles: -// - goroutine profile -// - heap profile -// - blocking profile -// - mutex profile -// - (optionally) CPU profile +// - goroutine profile +// - heap profile +// - blocking profile +// - mutex profile +// - (optionally) CPU profile // // It also collects the following query results: // -// - SHOW SHARDS -// - SHOW STATS -// - SHOW DIAGNOSTICS +// - SHOW SHARDS +// - SHOW STATS +// - SHOW DIAGNOSTICS // // All information is added to a tar archive and then compressed, before being // returned to the requester as an archive file. Where profiles support debug @@ -60,7 +60,6 @@ func (h *Handler) handleProfiles(w http.ResponseWriter, r *http.Request) { // // The value after the `cpu` query parameter is not actually important, as long // as there is something there. -// func (h *Handler) archiveProfilesAndQueries(w http.ResponseWriter, r *http.Request) { // prof describes a profile name and a debug value, or in the case of a CPU // profile, the number of seconds to collect the profile for. diff --git a/services/httpd/response_logger.go b/services/httpd/response_logger.go index 024ba10b44..780b19053d 100644 --- a/services/httpd/response_logger.go +++ b/services/httpd/response_logger.go @@ -78,7 +78,8 @@ func redactPassword(r *http.Request) { // in addition to the common fields, we also append referrer, user agent, // request ID and response time (microseconds) // ie, in apache mod_log_config terms: -// %h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-agent}i\"" %L %D +// +// %h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-agent}i\"" %L %D func buildLogLine(l *responseLogger, r *http.Request, start time.Time) string { redactPassword(r) diff --git a/services/httpd/v2_write_test.go b/services/httpd/v2_write_test.go index 66cc819e61..246a589d59 100644 --- a/services/httpd/v2_write_test.go +++ b/services/httpd/v2_write_test.go @@ -4,7 +4,6 @@ import "testing" // test of how we extract the database and retention policy from the bucket in // our v2 api enpoint. -// func TestV2DatabaseRetentionPolicyMapper(t *testing.T) { tests := map[string]struct { input string diff --git a/services/meta/client.go b/services/meta/client.go index 26ec5c4717..4497b54e5c 100644 --- a/services/meta/client.go +++ b/services/meta/client.go @@ -209,7 +209,6 @@ func (c *Client) CreateDatabase(name string) (*DatabaseInfo, error) { // This call is only idempotent when the caller provides the exact same // retention policy, and that retention policy is already the default for the // database. -// func (c *Client) CreateDatabaseWithRetentionPolicy(name string, spec *RetentionPolicySpec) (*DatabaseInfo, error) { if spec == nil { return nil, errors.New("CreateDatabaseWithRetentionPolicy called with nil spec") diff --git a/services/meta/internal/meta.pb.go b/services/meta/internal/meta.pb.go index 0dc03ad46d..cbdef9d842 100644 --- a/services/meta/internal/meta.pb.go +++ b/services/meta/internal/meta.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.27.1 +// protoc-gen-go v1.28.1 // protoc v3.17.3 // source: internal/meta.proto diff --git a/services/opentsdb/service.go b/services/opentsdb/service.go index af08a2d3fb..026f3dda8f 100644 --- a/services/opentsdb/service.go +++ b/services/opentsdb/service.go @@ -346,7 +346,8 @@ func (s *Service) handleConn(conn net.Conn) { // handleTelnetConn accepts OpenTSDB's telnet protocol. // Each telnet command consists of a line of the form: -// put sys.cpu.user 1356998400 42.5 host=webserver01 cpu=0 +// +// put sys.cpu.user 1356998400 42.5 host=webserver01 cpu=0 func (s *Service) handleTelnetConn(conn net.Conn) { defer conn.Close() defer atomic.AddInt64(&s.stats.ActiveTelnetConnections, -1) diff --git a/services/snapshotter/service.go b/services/snapshotter/service.go index 910e684886..9987253059 100644 --- a/services/snapshotter/service.go +++ b/services/snapshotter/service.go @@ -412,7 +412,6 @@ func (s *Service) writeRetentionPolicyInfo(conn net.Conn, database, retentionPol // the json buffer and the conn. // // we return that buffer sans the newline at the beginning. -// func (s *Service) readRequest(r io.Reader) (*Request, []byte, error) { var req Request d := json.NewDecoder(r) diff --git a/services/storage/predicate_influxql.go b/services/storage/predicate_influxql.go index 2bdea38cd9..d732d94091 100644 --- a/services/storage/predicate_influxql.go +++ b/services/storage/predicate_influxql.go @@ -33,10 +33,9 @@ func RewriteExprRemoveFieldKeyAndValue(expr influxql.Expr) influxql.Expr { // // This condition is determined when the following is true: // -// * there is only one occurrence of the tag key `_measurement`. -// * there are no OR operators in the expression tree. -// * the operator for the `_measurement` binary expression is ==. -// +// - there is only one occurrence of the tag key `_measurement`. +// - there are no OR operators in the expression tree. +// - the operator for the `_measurement` binary expression is ==. func HasSingleMeasurementNoOR(expr influxql.Expr) (string, bool) { var lastMeasurement string foundOnce := true diff --git a/services/storage/source.pb.go b/services/storage/source.pb.go index 5d37e2fbe2..9a92a4ad2c 100644 --- a/services/storage/source.pb.go +++ b/services/storage/source.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.27.1 +// protoc-gen-go v1.28.1 // protoc v3.17.3 // source: source.proto diff --git a/storage/reads/array_cursor.gen.go b/storage/reads/array_cursor.gen.go index 85e17cacb5..a7e477c176 100644 --- a/storage/reads/array_cursor.gen.go +++ b/storage/reads/array_cursor.gen.go @@ -296,6 +296,8 @@ func (c *floatMultiShardArrayCursor) reset(cur cursors.FloatArrayCursor, itrs cu if cond != nil { if c.filter == nil { c.filter = newFloatFilterArrayCursor(cond) + } else { + c.filter.cond = cond } c.filter.reset(cur) cur = c.filter @@ -1299,6 +1301,8 @@ func (c *integerMultiShardArrayCursor) reset(cur cursors.IntegerArrayCursor, itr if cond != nil { if c.filter == nil { c.filter = newIntegerFilterArrayCursor(cond) + } else { + c.filter.cond = cond } c.filter.reset(cur) cur = c.filter @@ -2302,6 +2306,8 @@ func (c *unsignedMultiShardArrayCursor) reset(cur cursors.UnsignedArrayCursor, i if cond != nil { if c.filter == nil { c.filter = newUnsignedFilterArrayCursor(cond) + } else { + c.filter.cond = cond } c.filter.reset(cur) cur = c.filter @@ -3305,6 +3311,8 @@ func (c *stringMultiShardArrayCursor) reset(cur cursors.StringArrayCursor, itrs if cond != nil { if c.filter == nil { c.filter = newStringFilterArrayCursor(cond) + } else { + c.filter.cond = cond } c.filter.reset(cur) cur = c.filter @@ -3730,6 +3738,8 @@ func (c *booleanMultiShardArrayCursor) reset(cur cursors.BooleanArrayCursor, itr if cond != nil { if c.filter == nil { c.filter = newBooleanFilterArrayCursor(cond) + } else { + c.filter.cond = cond } c.filter.reset(cur) cur = c.filter diff --git a/storage/reads/array_cursor.gen.go.tmpl b/storage/reads/array_cursor.gen.go.tmpl index 58686f6244..adc3801e9b 100644 --- a/storage/reads/array_cursor.gen.go.tmpl +++ b/storage/reads/array_cursor.gen.go.tmpl @@ -242,6 +242,8 @@ func (c *{{.name}}MultiShardArrayCursor) reset(cur cursors.{{.Name}}ArrayCursor, if cond != nil { if c.filter == nil { c.filter = new{{.Name}}FilterArrayCursor(cond) + } else { + c.filter.cond = cond } c.filter.reset(cur) cur = c.filter diff --git a/storage/reads/datatypes/predicate.pb.go b/storage/reads/datatypes/predicate.pb.go index 36191ed8a6..77ed8aa8a3 100644 --- a/storage/reads/datatypes/predicate.pb.go +++ b/storage/reads/datatypes/predicate.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.27.1 +// protoc-gen-go v1.28.1 // protoc v3.17.3 // source: predicate.proto @@ -200,6 +200,7 @@ type Node struct { NodeType Node_Type `protobuf:"varint,1,opt,name=node_type,json=nodeType,proto3,enum=influxdata.platform.storage.Node_Type" json:"node_type,omitempty"` Children []*Node `protobuf:"bytes,2,rep,name=children,proto3" json:"children,omitempty"` // Types that are assignable to Value: + // // *Node_StringValue // *Node_BooleanValue // *Node_IntegerValue diff --git a/storage/reads/datatypes/storage_common.pb.go b/storage/reads/datatypes/storage_common.pb.go index cfe3745a0a..3c568790e9 100644 --- a/storage/reads/datatypes/storage_common.pb.go +++ b/storage/reads/datatypes/storage_common.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.27.1 +// protoc-gen-go v1.28.1 // protoc v3.17.3 // source: storage_common.proto @@ -1114,6 +1114,7 @@ type ReadResponse_Frame struct { unknownFields protoimpl.UnknownFields // Types that are assignable to Data: + // // *ReadResponse_Frame_Group // *ReadResponse_Frame_Series // *ReadResponse_Frame_FloatPoints @@ -1625,6 +1626,7 @@ type ReadResponse_AnyPoints struct { unknownFields protoimpl.UnknownFields // Types that are assignable to Data: + // // *ReadResponse_AnyPoints_Floats // *ReadResponse_AnyPoints_Integers // *ReadResponse_AnyPoints_Unsigneds diff --git a/storage/reads/group_resultset_test.go b/storage/reads/group_resultset_test.go index 29269c4723..e0c4aef7fc 100644 --- a/storage/reads/group_resultset_test.go +++ b/storage/reads/group_resultset_test.go @@ -151,13 +151,13 @@ group: group: tag key : _m,tag0,tag1 partition key: val11 - series: _m=cpu,tag0=val01,tag1=val11 series: _m=cpu,tag0=val00,tag1=val11 + series: _m=cpu,tag0=val01,tag1=val11 group: tag key : _m,tag0,tag1 partition key: val12 - series: _m=cpu,tag0=val01,tag1=val12 series: _m=cpu,tag0=val00,tag1=val12 + series: _m=cpu,tag0=val01,tag1=val12 group: tag key : _m,tag0 partition key: @@ -270,7 +270,6 @@ group: sb := new(strings.Builder) GroupResultSetToString(sb, rs, SkipNilCursor()) - if got := sb.String(); !cmp.Equal(got, tt.exp) { t.Errorf("unexpected value; -got/+exp\n%s", cmp.Diff(strings.Split(got, "\n"), strings.Split(tt.exp, "\n"))) } @@ -347,8 +346,8 @@ group: exp: `group: tag key : _m,tag1,tag2 partition key: ,val20 - series: _m=mem,tag1=val11,tag2=val20 series: _m=mem,tag1=val10,tag2=val20 + series: _m=mem,tag1=val11,tag2=val20 group: tag key : _m,tag1,tag2 partition key: ,val21 @@ -356,10 +355,10 @@ group: group: tag key : _m,tag0,tag1 partition key: val00, + series: _m=aaa,tag0=val00 series: _m=cpu,tag0=val00,tag1=val10 series: _m=cpu,tag0=val00,tag1=val11 series: _m=cpu,tag0=val00,tag1=val12 - series: _m=aaa,tag0=val00 group: tag key : _m,tag0 partition key: val01, diff --git a/test-flux.sh b/test-flux.sh index df13e45480..7de3d48cbd 100755 --- a/test-flux.sh +++ b/test-flux.sh @@ -31,15 +31,72 @@ build_test_harness() { "$GO" build -o fluxtest ./internal/cmd/fluxtest-harness-influxdb } -# Many tests targeting 3rd party databases are not yet supported in CI and should be filtered out. -DB_INTEGRATION_WRITE_TESTS=integration_mqtt_pub,integration_sqlite_write_to,integration_vertica_write_to,integration_mssql_write_to,integration_mysql_write_to,integration_mariadb_write_to,integration_pg_write_to,integration_hdb_write_to -DB_INTEGRATION_READ_TESTS=integration_sqlite_read_from_seed,integration_sqlite_read_from_nonseed,integration_vertica_read_from_seed,integration_vertica_read_from_nonseed,integration_mssql_read_from_seed,integration_mssql_read_from_nonseed,integration_mariadb_read_from_seed,integration_mariadb_read_from_nonseed,integration_mysql_read_from_seed,integration_mysql_read_from_nonseed,integration_pg_read_from_seed,integration_pg_read_from_nonseed,integration_hdb_read_from_seed,integration_hdb_read_from_nonseed -DB_INTEGRATION_INJECTION_TESTS="integration_sqlite_injection,integration_hdb_injection,integration_pg_injection,integration_mysql_injection,integration_mariadb_injection,integration_mssql_injection" -DB_TESTS="${DB_INTEGRATION_WRITE_TESTS},${DB_INTEGRATION_READ_TESTS},${DB_INTEGRATION_INJECTION_TESTS}" +skipped_tests() { + doc=$(cat < first (and last) +push_down_group_one_tag_first +push_down_group_all_filter_field_first +push_down_group_one_tag_filter_field_first +push_down_group_one_tag_last +push_down_group_all_filter_field_last +push_down_group_one_tag_filter_field_last + +windowed_by_time_count # TODO(bnpfeife) broken by flux@05a1065f, OptimizeAggregateWindow +windowed_by_time_sum # TODO(bnpfeife) broken by flux@05a1065f, OptimizeAggregateWindow +windowed_by_time_mean # TODO(bnpfeife) broken by flux@05a1065f, OptimizeAggregateWindow +ENDSKIPS +) + echo "$doc" | sed '/^[[:space:]]*$/d' | sed 's/[[:space:]]*#.*$//' | tr '\n' ',' | sed 's/,$//' +} run_integration_tests() { log "Running integration tests..." - ./fluxtest -v -p flux.zip -p flux/stdlib --skip "$DB_TESTS" + ./fluxtest \ + -v \ + -p flux.zip \ + -p flux/ \ + --skip "$(skipped_tests)" } cleanup() { diff --git a/tests/server_test.go b/tests/server_test.go index 665488fc96..4599191e3b 100644 --- a/tests/server_test.go +++ b/tests/server_test.go @@ -10153,375 +10153,6 @@ func TestGroupByEndToEnd(t *testing.T) { assert.Equal(t, `{"results":[{"statement_id":0,"series":[{"name":"m0","columns":["time","scount"],"values":[["2021-05-10T00:00:00Z",10],["2021-05-11T00:00:00Z",5],["2021-05-12T00:00:00Z",3],["2021-05-13T00:00:00Z",7],["2021-05-14T00:00:00Z",4],["2021-05-15T00:00:00Z",null]]}]}]}`, results) } -func TestFluxBasicEndToEnd(t *testing.T) { - config := NewConfig() - config.HTTPD.FluxEnabled = true - s := OpenServer(config) - defer s.Close() - - s.CreateDatabase(t.Name()) - defer s.DropDatabase(t.Name()) - u, err := url.Parse(s.URL()) - assert.NoError(t, err) - u.Path = "/api/v2/query" - httpClient := &http.Client{} - - { - // Query with json body - query := fluxClient.QueryRequest{}.WithDefaults() - query.Query = `import "influxdata/influxdb/v1" v1.databases()` - j, err := json.Marshal(query) - assert.NoError(t, err) - req, err := http.NewRequest("POST", u.String(), bytes.NewBuffer(j)) - req.Header.Set("Content-Type", "application/json") - assert.NoError(t, err) - resp, err := httpClient.Do(req) - assert.NoError(t, err) - defer resp.Body.Close() - b, err := io.ReadAll(resp.Body) - assert.NoError(t, err) - assert.Equal(t, - strings.ReplaceAll(`,result,table,organizationID,databaseName,retentionPolicy,retentionPeriod,default,bucketId -,_result,0,,TestFluxBasicEndToEnd,autogen,0,true, - -`, "\n", "\r\n"), - string(b)) - } - { - // Query with json body, with annotations - query := fluxClient.QueryRequest{}.WithDefaults() - query.Query = `import "influxdata/influxdb/v1" v1.databases()` - query.Dialect.Annotations = csv.DefaultDialect().Annotations - j, err := json.Marshal(query) - assert.NoError(t, err) - req, err := http.NewRequest("POST", u.String(), bytes.NewBuffer(j)) - req.Header.Set("Content-Type", "application/json") - assert.NoError(t, err) - resp, err := httpClient.Do(req) - assert.NoError(t, err) - defer resp.Body.Close() - b, err := io.ReadAll(resp.Body) - assert.NoError(t, err) - assert.Equal(t, - strings.ReplaceAll(`#datatype,string,long,string,string,string,long,boolean,string -#group,false,false,true,false,false,false,false,false -#default,_result,,,,,,, -,result,table,organizationID,databaseName,retentionPolicy,retentionPeriod,default,bucketId -,,0,,TestFluxBasicEndToEnd,autogen,0,true, - -`, "\n", "\r\n"), - string(b)) - assert.NoError(t, err) - } - { - // Query with raw flux - assert.NoError(t, err) - req, err := http.NewRequest("POST", u.String(), bytes.NewBuffer([]byte(`import "influxdata/influxdb/v1" v1.databases()`))) - req.Header.Set("Content-Type", "application/vnd.flux") - assert.NoError(t, err) - resp, err := httpClient.Do(req) - assert.NoError(t, err) - defer resp.Body.Close() - b, err := io.ReadAll(resp.Body) - assert.NoError(t, err) - assert.Equal(t, - strings.ReplaceAll(`,result,table,organizationID,databaseName,retentionPolicy,retentionPeriod,default,bucketId -,_result,0,,TestFluxBasicEndToEnd,autogen,0,true, - -`, "\n", "\r\n"), - string(b)) - } - { - // Make sure runFluxBuiltinTest complains when it finds a diff - testFluxTmpl := `package universe_test -import "testing" -option now = () => (2030-01-01T00:00:00Z) - -inData = "#datatype,string,long,string,string,dateTime:RFC3339,unsignedLong -#group,false,false,true,true,false,false -#default,_result,,,,, -,result,table,_measurement,_field,_time,_value -,,0,Sgf,DlXwgrw,2018-12-18T22:11:05Z,70 -,,0,Sgf,DlXwgrw,2018-12-18T22:11:15Z,50" - -outData = "#datatype,string,long,dateTime:RFC3339,dateTime:RFC3339,string,string,unsignedLong -#group,false,false,true,true,true,true,false -#default,_result,,,,,, -,result,table,_start,_stop,_measurement,_field,_value -,,0,2018-12-01T00:00:00Z,2030-01-01T00:00:00Z,Sgf,DlXwgrw,%d" -t_sum = (table=<-) => (table |> range(start: 2018-12-01T00:00:00Z) |> sum()) -test _sum = () => ({input: testing.loadStorage(csv: inData), want: testing.loadMem(csv: outData), fn: t_sum}) -` - // This test passes: 70+50=120 - databasePass := t.Name() + "_pass" - s.CreateDatabase(databasePass) - defer s.DropDatabase(databasePass) - file := mustParse(fmt.Sprintf(testFluxTmpl, 120)) - bucket := databasePass + "/autogen" - runFluxBuiltinTest(t, file, u, bucket, false) - err := runFluxBuiltinTest(t, file, u, bucket, true) - assert.NoError(t, err) - - // We want to make sure the end to end tests are doing something. We assert that the test runner returns - // an error on diffs - databaseFail := t.Name() + "_fail" - s.CreateDatabase(databaseFail) - defer s.DropDatabase(databaseFail) - file = mustParse(fmt.Sprintf(testFluxTmpl, 121)) - bucket = databaseFail + "/autogen" - runFluxBuiltinTest(t, file, u, bucket, false) - err = runFluxBuiltinTest(t, file, u, bucket, true) - assert.EqualError(t, err, "test failed - diff table in output") - } -} - -func TestFluxRegressionEndToEnd(t *testing.T) { - config := NewConfig() - config.HTTPD.FluxEnabled = true - s := OpenServer(config) - defer s.Close() - - s.CreateDatabase(t.Name()) - defer s.DropDatabase(t.Name()) - u, err := url.Parse(s.URL()) - assert.NoError(t, err) - u.Path = "/api/v2/query" - httpClient := &http.Client{} - - { - // buckets query - assert.NoError(t, err) - req, err := http.NewRequest("POST", u.String(), bytes.NewBuffer([]byte(`buckets()`))) - req.Header.Set("Content-Type", "application/vnd.flux") - assert.NoError(t, err) - resp, err := httpClient.Do(req) - assert.NoError(t, err) - defer resp.Body.Close() - b, err := io.ReadAll(resp.Body) - assert.NoError(t, err) - assert.Equal(t, - strings.ReplaceAll(`,result,table,name,id,organizationID,retentionPolicy,retentionPeriod -,_result,0,TestFluxRegressionEndToEnd/autogen,,,autogen,0 - -`, "\n", "\r\n"), - string(b)) - } -} - -var FluxEndToEndSkipList = map[string]map[string]string{ - "universe": { - // TODO(adam) determine the reason for these test failures. - "cov": "Reason TBD", - "covariance": "Reason TBD", - "cumulative_sum": "Reason TBD", - "cumulative_sum_default": "Reason TBD", - "cumulative_sum_noop": "Reason TBD", - "drop_non_existent": "Reason TBD", - "first": "Reason TBD", - "highestAverage": "Reason TBD", - "highestMax": "Reason TBD", - "histogram": "Reason TBD", - "histogram_normalize": "Reason TBD", - "histogram_quantile": "Reason TBD", - "join": "Reason TBD", - "join_across_measurements": "Reason TBD", - "join_agg": "Reason TBD", - "keep_non_existent": "Reason TBD", - "key_values": "Reason TBD", - "key_values_host_name": "Reason TBD", - "last": "Reason TBD", - "lowestAverage": "Reason TBD", - "max": "Reason TBD", - "min": "Reason TBD", - "sample": "Reason TBD", - "selector_preserve_time": "Reason TBD", - "shift": "Reason TBD", - "shift_negative_duration": "Reason TBD", - "task_per_line": "Reason TBD", - "top": "Reason TBD", - "union": "Reason TBD", - "union_heterogeneous": "Reason TBD", - "unique": "Reason TBD", - "distinct": "Reason TBD", - - // it appears these occur when writing the input data. `to` may not be null safe. - "fill_bool": "failed to read meta data: panic: interface conversion: interface {} is nil, not uint64", - "fill_float": "failed to read meta data: panic: interface conversion: interface {} is nil, not uint64", - "fill_int": "failed to read meta data: panic: interface conversion: interface {} is nil, not uint64", - "fill_string": "failed to read meta data: panic: interface conversion: interface {} is nil, not uint64", - "fill_time": "failed to read meta data: panic: interface conversion: interface {} is nil, not uint64", - "fill_uint": "failed to read meta data: panic: interface conversion: interface {} is nil, not uint64", - "window_null": "failed to read meta data: panic: interface conversion: interface {} is nil, not float64", - - // these may just be missing calls to range() in the tests. easy to fix in a new PR. - "group_nulls": "unbounded test", - "integral": "unbounded test", - "integral_columns": "unbounded test", - "map": "unbounded test", - "join_missing_on_col": "unbounded test", - "join_use_previous": "unbounded test (https://github.com/influxdata/flux/issues/2996)", - "join_panic": "unbounded test (https://github.com/influxdata/flux/issues/3465)", - "rowfn_with_import": "unbounded test", - - // the following tests have a difference between the CSV-decoded input table, and the storage-retrieved version of that table - "columns": "group key mismatch", - "set": "column order mismatch", - "simple_max": "_stop missing from expected output", - "derivative": "time bounds mismatch (engine uses now() instead of bounds on input table)", - "difference_columns": "data write/read path loses columns x and y", - "keys": "group key mismatch", - - // failed to read meta data errors: the CSV encoding is incomplete probably due to data schema errors. needs more detailed investigation to find root cause of error - // "filter_by_regex": "failed to read metadata", - // "filter_by_tags": "failed to read metadata", - "group": "failed to read metadata", - "group_except": "failed to read metadata", - "group_ungroup": "failed to read metadata", - "pivot_mean": "failed to read metadata", - "histogram_quantile_minvalue": "failed to read meta data: no column with label _measurement exists", - "increase": "failed to read meta data: table has no _value column", - - "string_max": "error: invalid use of function: *functions.MaxSelector has no implementation for type string (https://github.com/influxdata/platform/issues/224)", - "null_as_value": "null not supported as value in influxql (https://github.com/influxdata/platform/issues/353)", - "string_interp": "string interpolation not working as expected in flux (https://github.com/influxdata/platform/issues/404)", - "to": "to functions are not supported in the testing framework (https://github.com/influxdata/flux/issues/77)", - "covariance_missing_column_1": "need to support known errors in new test framework (https://github.com/influxdata/flux/issues/536)", - "covariance_missing_column_2": "need to support known errors in new test framework (https://github.com/influxdata/flux/issues/536)", - "drop_before_rename": "need to support known errors in new test framework (https://github.com/influxdata/flux/issues/536)", - "drop_referenced": "need to support known errors in new test framework (https://github.com/influxdata/flux/issues/536)", - "yield": "yield requires special test case (https://github.com/influxdata/flux/issues/535)", - - "window_group_mean_ungroup": "window trigger optimization modifies sort order of its output tables (https://github.com/influxdata/flux/issues/1067)", - - "median_column": "failing in different ways (https://github.com/influxdata/influxdb/issues/13909)", - "dynamic_query": "tableFind does not work in e2e tests: https://github.com/influxdata/influxdb/issues/13975", - - "to_int": "dateTime conversion issue: https://github.com/influxdata/influxdb/issues/14575", - "to_uint": "dateTime conversion issue: https://github.com/influxdata/influxdb/issues/14575", - - "holt_winters_panic": "Expected output is an empty table which breaks the testing framework (https://github.com/influxdata/influxdb/issues/14749)", - "map_nulls": "to cannot write null values", - }, - "array": { - "from": "test not meant to be consumed by influxdb", - "from_group": "test not meant to be consumed by influxdb", - }, - "experimental": { - "set": "Reason TBD", - "join": "unbounded test", - "alignTime": "unbounded test", - }, - "experimental/geo": { - "filterRowsNotStrict": "tableFind does not work in e2e tests: https://github.com/influxdata/influxdb/issues/13975", - "filterRowsStrict": "tableFind does not work in e2e tests: https://github.com/influxdata/influxdb/issues/13975", - "gridFilterLevel": "tableFind does not work in e2e tests: https://github.com/influxdata/influxdb/issues/13975", - "gridFilter": "tableFind does not work in e2e tests: https://github.com/influxdata/influxdb/issues/13975", - "groupByArea": "tableFind does not work in e2e tests: https://github.com/influxdata/influxdb/issues/13975", - "filterRowsPivoted": "tableFind does not work in e2e tests: https://github.com/influxdata/influxdb/issues/13975", - "shapeDataWithFilter": "tableFind does not work in e2e tests: https://github.com/influxdata/influxdb/issues/13975", - "shapeData": "test run before to() is finished: https://github.com/influxdata/influxdb/issues/13975", - }, - "regexp": { - "replaceAllString": "Reason TBD", - }, - "http": { - "http_endpoint": "need ability to test side effects in e2e tests: (https://github.com/influxdata/flux/issues/1723)", - }, - "influxdata/influxdb/schema": { - "show_tag_keys": "failing due to bug in test, unskip this after upgrading from Flux v0.91.0", - }, - "influxdata/influxdb/monitor": { - "state_changes_big_any_to_any": "unbounded test", - "state_changes_big_info_to_ok": "unbounded test", - "state_changes_big_ok_to_info": "unbounded test", - "state_changes_any_to_any": "test run before to() is finished: https://github.com/influxdata/influxdb/issues/13975", - "state_changes_info_to_any": "test run before to() is finished: https://github.com/influxdata/influxdb/issues/13975", - "state_changes_invalid_any_to_any": "test run before to() is finished: https://github.com/influxdata/influxdb/issues/13975", - "state_changes": "test run before to() is finished: https://github.com/influxdata/influxdb/issues/13975", - }, - "influxdata/influxdb/secrets": { - "secrets": "Cannot inject custom deps into the test framework so the secrets don't lookup correctly", - }, - "internal/promql": { - "join": "unbounded test", - }, - "testing/chronograf": { - "buckets": "unbounded test", - "aggregate_window_count": "flakey test: https://github.com/influxdata/influxdb/issues/18463", - }, - "testing/kapacitor": { - "fill_default": "unknown field type for f1", - }, - "testing/pandas": { - "extract_regexp_findStringIndex": "pandas. map does not correctly handled returned arrays (https://github.com/influxdata/flux/issues/1387)", - "partition_strings_splitN": "pandas. map does not correctly handled returned arrays (https://github.com/influxdata/flux/issues/1387)", - }, - "testing/promql": { - "emptyTable": "tests a source", - "year": "flakey test: https://github.com/influxdata/influxdb/issues/15667", - "extrapolatedRate_counter_rate": "option \"testing.loadStorage\" reassigned: https://github.com/influxdata/flux/issues/3155", - "extrapolatedRate_nocounter": "option \"testing.loadStorage\" reassigned: https://github.com/influxdata/flux/issues/3155", - "extrapolatedRate_norate": "option \"testing.loadStorage\" reassigned: https://github.com/influxdata/flux/issues/3155", - "linearRegression_nopredict": "option \"testing.loadStorage\" reassigned: https://github.com/influxdata/flux/issues/3155", - "linearRegression_predict": "option \"testing.loadStorage\" reassigned: https://github.com/influxdata/flux/issues/3155", - }, - "testing/influxql": { - "cumulative_sum": "invalid test data requires loadStorage to be overridden. See https://github.com/influxdata/flux/issues/3145", - "elapsed": "failing since split with Flux upgrade: https://github.com/influxdata/influxdb/issues/19568", - }, - "contrib/RohanSreerama5/naiveBayesClassifier": { - "bayes": "error calling tableFind: ", - }, -} - -func TestFluxEndToEnd(t *testing.T) { - runEndToEnd(t, stdlib.FluxTestPackages) -} - -func runEndToEnd(t *testing.T, pkgs []*ast.Package) { - config := NewConfig() - config.HTTPD.FluxEnabled = true - s := OpenServer(config) - defer s.Close() - - for _, pkg := range pkgs { - test := func(t *testing.T, f func(t *testing.T)) { - t.Run(pkg.Path, f) - } - if pkg.Path == "universe" { - test = func(t *testing.T, f func(t *testing.T)) { - f(t) - } - } - - test(t, func(t *testing.T) { - for _, file := range pkg.Files { - name := strings.TrimSuffix(file.Name, "_test.flux") - t.Run(name, func(t *testing.T) { - if reason, ok := FluxEndToEndSkipList[pkg.Path][name]; ok { - t.Skip(reason) - } - // Set up the database & URL - // We don't properly support slashes in database names for flux queries - databaseName := strings.ReplaceAll(t.Name(), "/", "_") - s.CreateDatabase(databaseName) - defer s.DropDatabase(databaseName) - u, err := url.Parse(s.URL()) - assert.NoError(t, err) - u.Path = "/api/v2/query" - bucket := databaseName + "/autogen" - - // Run the end to end test. The first time we ignore the results, but as a side - // effect the data is loaded into the TSDB store. The second test runs with `from` - // gathering data from TSDB. - runFluxBuiltinTest(t, file, u, bucket, false) - err = runFluxBuiltinTest(t, file, u, bucket, true) - }) - } - }) - } -} - func makeTestPackage(file *ast.File) *ast.Package { file = file.Copy().(*ast.File) file.Package.Name.Name = "main" diff --git a/tsdb/engine/tsm1/compact_test.go b/tsdb/engine/tsm1/compact_test.go index 38729ce542..da753c3e09 100644 --- a/tsdb/engine/tsm1/compact_test.go +++ b/tsdb/engine/tsm1/compact_test.go @@ -15,7 +15,7 @@ import ( "go.uber.org/zap" ) -// Tests compacting a Cache snapshot into a single TSM file +// Tests compacting a Cache snapshot into a single TSM file func TestCompactor_Snapshot(t *testing.T) { dir := MustTempDir() defer os.RemoveAll(dir) diff --git a/tsdb/engine/tsm1/engine_test.go b/tsdb/engine/tsm1/engine_test.go index b1f30a152c..5180795003 100644 --- a/tsdb/engine/tsm1/engine_test.go +++ b/tsdb/engine/tsm1/engine_test.go @@ -1210,7 +1210,7 @@ func TestIndex_SeriesIDSet(t *testing.T) { } // Ensures that deleting series from TSM files with multiple fields removes all the -/// series +// / series func TestEngine_DeleteSeries(t *testing.T) { for _, index := range tsdb.RegisteredIndexes() { t.Run(index, func(t *testing.T) { diff --git a/tsdb/engine/tsm1/ring.go b/tsdb/engine/tsm1/ring.go index b230e228ea..51740614d5 100644 --- a/tsdb/engine/tsm1/ring.go +++ b/tsdb/engine/tsm1/ring.go @@ -29,7 +29,6 @@ const partitions = 16 // // To determine the partition that a series key should be added to, the series // key is hashed and the first 8 bits are used as an index to the ring. -// type ring struct { // The unique set of partitions in the ring. // len(partitions) <= len(continuum) @@ -40,8 +39,7 @@ type ring struct { // power of 2, and for performance reasons should be larger than the number of // cores on the host. The supported set of values for n is: // -// {1, 2, 4, 8, 16}. -// +// {1, 2, 4, 8, 16}. func newring(n int) (*ring, error) { if n <= 0 || n > partitions { return nil, fmt.Errorf("invalid number of partitions: %d", n) diff --git a/tsdb/field_validator.go b/tsdb/field_validator.go index cd2821bd5d..c428e6976a 100644 --- a/tsdb/field_validator.go +++ b/tsdb/field_validator.go @@ -11,8 +11,8 @@ import ( const MaxFieldValueLength = 1048576 // ValidateFields will return a PartialWriteError if: -// - the point has inconsistent fields, or -// - the point has fields that are too long +// - the point has inconsistent fields, or +// - the point has fields that are too long func ValidateFields(mf *MeasurementFields, point models.Point, skipSizeValidation bool) error { pointSize := point.StringSize() iter := point.FieldIterator() diff --git a/tsdb/index/tsi1/doc.go b/tsdb/index/tsi1/doc.go index 1f85e1821a..39bc6076bd 100644 --- a/tsdb/index/tsi1/doc.go +++ b/tsdb/index/tsi1/doc.go @@ -1,9 +1,8 @@ /* - Package tsi1 provides a memory-mapped index implementation that supports high cardinality series. -Overview +# Overview The top-level object in tsi1 is the Index. It is the primary access point from the rest of the system. The Index is composed of LogFile and IndexFile objects. @@ -17,8 +16,7 @@ Index files also contain series information, however, they are highly indexed so that reads can be performed quickly. Index files are built through a process called compaction where a log file or multiple index files are merged together. - -Operations +# Operations The index can perform many tasks related to series, measurement, & tag data. All data is inserted by adding a series to the index. When adding a series, @@ -34,8 +32,7 @@ as by measurement name, by tag value, or by using regular expressions. The index provides an API to iterate over subsets of series and perform set operations such as unions and intersections. - -Log File Layout +# Log File Layout The write-ahead file that series initially are inserted into simply appends all new operations sequentially. It is simply composed of a series of log @@ -61,15 +58,13 @@ name, the tag set, and a checksum. When the log file is replayed, if the checksum is incorrect or the entry is incomplete (because of a partially failed write) then the log is truncated. - -Index File Layout +# Index File Layout The index file is composed of 3 main block types: one series block, one or more tag blocks, and one measurement block. At the end of the index file is a trailer that records metadata such as the offsets to these blocks. - -Series Block Layout +# Series Block Layout The series block stores raw series keys in sorted order. It also provides hash indexes so that series can be looked up quickly. Hash indexes are inserted @@ -111,8 +106,7 @@ a trailer which contains metadata about the block. ┃ └──────────────────────┘ ┃ ┗━━━━━━━━━━━━━━━━━━━━━━━━━━┛ - -Tag Block Layout +# Tag Block Layout After the series block is one or more tag blocks. One of these blocks exists for every measurement in the index file. The block is structured as a sorted @@ -159,8 +153,7 @@ that value. Series iterators can be built around a single tag key value or multiple iterators can be merged with set operators such as union or intersection. - -Measurement block +# Measurement block The measurement block stores a sorted list of measurements, their associated series offsets, and the offset to their tag block. This allows all series for @@ -188,8 +181,7 @@ measurements. ┃ └──────────────────────┘ ┃ ┗━━━━━━━━━━━━━━━━━━━━━━━━━━┛ - -Manifest file +# Manifest file The index is simply an ordered set of log and index files. These files can be merged together or rewritten but their order must always be the same. This is @@ -200,8 +192,7 @@ Whenever the set of active files is changed, a manifest file is written to track the set. The manifest specifies the ordering of files and, on startup, all files not in the manifest are removed from the index directory. - -Compacting index files +# Compacting index files Compaction is the process of taking files and merging them together into a single file. There are two stages of compaction within TSI. @@ -216,8 +207,7 @@ they are all merged together into a single index file and the old files are discarded. Because all blocks are written in sorted order, the new index file can be streamed and minimize memory use. - -Concurrency +# Concurrency Index files are immutable so they do not require fine grained locks, however, compactions require that we track which files are in use so they are not @@ -232,7 +222,5 @@ returns to zero. Besides the reference counting, there are no other locking mechanisms when reading or writing index files. Log files, however, do require a lock whenever they are accessed. This is another reason to minimize log file size. - - */ package tsi1 diff --git a/tsdb/index/tsi1/index.go b/tsdb/index/tsi1/index.go index f59bb63010..cff2f4f29f 100644 --- a/tsdb/index/tsi1/index.go +++ b/tsdb/index/tsi1/index.go @@ -55,7 +55,6 @@ func init() { // // NOTE: Currently, this must not be change once a database is created. Further, // it must also be a power of 2. -// var DefaultPartitionN uint64 = 8 // An IndexOption is a functional option for changing the configuration of diff --git a/tsdb/index_test.go b/tsdb/index_test.go index 69813e4b82..b1e3a36ec3 100644 --- a/tsdb/index_test.go +++ b/tsdb/index_test.go @@ -643,7 +643,7 @@ func BenchmarkIndexSet_TagSets(b *testing.B) { // This benchmark concurrently writes series to the index and fetches cached bitsets. // The idea is to emphasize the performance difference when bitset caching is on and off. // -// Typical results for an i7 laptop +// # Typical results for an i7 laptop // // BenchmarkIndex_ConcurrentWriteQuery/inmem/queries_100000/cache-8 1 5963346204 ns/op 2499655768 B/op 23964183 allocs/op // BenchmarkIndex_ConcurrentWriteQuery/inmem/queries_100000/no_cache-8 1 5314841090 ns/op 2499495280 B/op 23963322 allocs/op diff --git a/tsdb/internal/fieldsindex.pb.go b/tsdb/internal/fieldsindex.pb.go index fda9712b22..8280fd7a08 100644 --- a/tsdb/internal/fieldsindex.pb.go +++ b/tsdb/internal/fieldsindex.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.27.1 +// protoc-gen-go v1.28.1 // protoc v3.17.3 // source: internal/fieldsindex.proto diff --git a/tsdb/series_set_test.go b/tsdb/series_set_test.go index f64123403a..6f99c0385c 100644 --- a/tsdb/series_set_test.go +++ b/tsdb/series_set_test.go @@ -162,8 +162,6 @@ var set *SeriesIDSet // BenchmarkSeriesIDSet_Add/10-4 5000000 348 ns/op 0 B/op 0 allocs/op // BenchmarkSeriesIDSet_Add/100-4 5000000 373 ns/op 0 B/op 0 allocs/op // BenchmarkSeriesIDSet_Add/1000-4 5000000 342 ns/op 0 B/op 0 allocs/op -// -// func BenchmarkSeriesIDSet_AddMore(b *testing.B) { cardinalities := []uint64{1, 2, 10, 100, 1000, 10000, 100000, 1000000, 10000000} @@ -202,7 +200,6 @@ func BenchmarkSeriesIDSet_AddMore(b *testing.B) { // BenchmarkSeriesIDSet_Add/cardinality_1000000_check_add/random_global_lock-8 2000000 914 ns/op 0 B/op 0 allocs/op // BenchmarkSeriesIDSet_Add/cardinality_1000000_check_add/same_multi_lock-8 30000000 39.7 ns/op 0 B/op 0 allocs/op // BenchmarkSeriesIDSet_Add/cardinality_1000000_check_add/random_multi_lock-8 1000000 1002 ns/op 0 B/op 0 allocs/op -// func BenchmarkSeriesIDSet_Add(b *testing.B) { // Setup... set = NewSeriesIDSet() @@ -523,7 +520,6 @@ func BenchmarkSeriesIDSet_AddMany(b *testing.B) { // BenchmarkSeriesIDSet_Remove/cardinality_1000000_remove_same-4 20000000 99.1 ns/op 0 B/op 0 allocs/op // BenchmarkSeriesIDSet_Remove/cardinality_1000000_check_remove_global_lock-4 20000000 57.7 ns/op 0 B/op 0 allocs/op // BenchmarkSeriesIDSet_Remove/cardinality_1000000_check_remove_multi_lock-4 20000000 80.1 ns/op 0 B/op 0 allocs/op -// func BenchmarkSeriesIDSet_Remove(b *testing.B) { // Setup... set = NewSeriesIDSet() diff --git a/tsdb/shard.go b/tsdb/shard.go index 6bfd4d3c5c..8f1555cd5b 100644 --- a/tsdb/shard.go +++ b/tsdb/shard.go @@ -206,7 +206,7 @@ func (s *Shard) SetEnabled(enabled bool) { s.mu.Unlock() } -//! setEnabledNoLock performs actual work of SetEnabled. Must hold s.mu before calling. +// ! setEnabledNoLock performs actual work of SetEnabled. Must hold s.mu before calling. func (s *Shard) setEnabledNoLock(enabled bool) { // Prevent writes and queries s.enabled = enabled diff --git a/tsdb/store.go b/tsdb/store.go index 7197eca6cb..956c4468d3 100644 --- a/tsdb/store.go +++ b/tsdb/store.go @@ -1240,7 +1240,6 @@ func (s *Store) sketchesForDatabase(dbName string, getSketches func(*Shard) (est // // Cardinality is calculated exactly by unioning all shards' bitsets of series // IDs. The result of this method cannot be combined with any other results. -// func (s *Store) SeriesCardinality(ctx context.Context, database string) (int64, error) { s.mu.RLock() shards := s.filterShards(byDatabase(database))