build(flux): update flux to v0.171.0 (#23453)

* build(flux): update flux to v0.171.0

* chore: remove testing.loadStorage from tests

Also update skip lists in Flux test harness

* chore: remove now redundant Go end-to-end unit tests

This testing is all now provided by the `fluxtest` harness.
pull/23461/head
Christopher M. Wolff 2022-06-14 15:09:59 -07:00 committed by GitHub
parent 53580ead1d
commit a492993012
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
11 changed files with 111 additions and 534 deletions

View File

@ -31,11 +31,91 @@ build_test_harness() {
"$GO" build -o fluxtest ./internal/cmd/fluxtest-harness-influxdb
}
# Many tests targeting 3rd party databases are not yet supported in CI and should be filtered out.
DB_INTEGRATION_WRITE_TESTS=integration_mqtt_pub,integration_sqlite_write_to,integration_vertica_write_to,integration_mssql_write_to,integration_mysql_write_to,integration_mariadb_write_to,integration_pg_write_to,integration_hdb_write_to
DB_INTEGRATION_READ_TESTS=integration_sqlite_read_from_seed,integration_sqlite_read_from_nonseed,integration_vertica_read_from_seed,integration_vertica_read_from_nonseed,integration_mssql_read_from_seed,integration_mssql_read_from_nonseed,integration_mariadb_read_from_seed,integration_mariadb_read_from_nonseed,integration_mysql_read_from_seed,integration_mysql_read_from_nonseed,integration_pg_read_from_seed,integration_pg_read_from_nonseed,integration_hdb_read_from_seed,integration_hdb_read_from_nonseed
DB_INTEGRATION_INJECTION_TESTS=integration_sqlite_injection,integration_hdb_injection,integration_pg_injection,integration_mysql_injection,integration_mariadb_injection,integration_mssql_injection
DB_TESTS="${DB_INTEGRATION_WRITE_TESTS},${DB_INTEGRATION_READ_TESTS},${DB_INTEGRATION_INJECTION_TESTS}"
skipped_tests() {
doc=$(cat <<ENDSKIPS
# Integration write tests
integration_mqtt_pub
integration_sqlite_write_to
integration_vertica_write_to
integration_mssql_write_to
integration_mysql_write_to
integration_mariadb_write_to
integration_pg_write_to
integration_hdb_write_to
# Integration read tests
integration_sqlite_read_from_seed
integration_sqlite_read_from_nonseed
integration_vertica_read_from_seed
integration_vertica_read_from_nonseed
integration_mssql_read_from_seed
integration_mssql_read_from_nonseed
integration_mariadb_read_from_seed
integration_mariadb_read_from_nonseed
integration_mysql_read_from_seed
integration_mysql_read_from_nonseed
integration_pg_read_from_seed
integration_pg_read_from_nonseed
integration_hdb_read_from_seed
integration_hdb_read_from_nonseed
# Integration injection tests
integration_sqlite_injection
integration_hdb_injection
integration_pg_injection
integration_mysql_injection
integration_mariadb_injection
integration_mssql_injection
# Other skipped tests
buckets # unbounded
columns # failing with differences
cov # unbounded
covariance # failing with differences
cumulative_sum # failing with differences
cumulative_sum_default # failing with differences
cumulative_sum_noop # failing with differences
difference_columns # failing with differences
distinct # failing with differences
fill # failing with differences
first # unbounded
group # unbounded
highestAverage # unbounded
highestMax # unbounded
histogram # unbounded
histogram_quantile # failing with differences
histogram_quantile_minvalue # failing with error
join # unbounded
join_missing_on_col # unbounded
join_panic # unbounded
key_values # unbounded
key_values_host_name # unbounded
keys # failing with differences
last # unbounded
lowestAverage # failing with differences
map # unbounded
max # unbounded
min # unbounded
pivot_mean # failing with differences
sample # unbounded
secrets # failing with error
selector_preserve_time # failing with differences
set # failing with differences
shapeData # failing with differences
shapeDataWithFilter # failing with differences
shift # unbounded
shift_negative_duration # unbounded
state_changes_big_any_to_any # unbounded
state_changes_big_info_to_ok # unbounded
state_changes_big_ok_to_info # unbounded
union # unbounded
union_heterogeneous # unbounded
unique # unbounded
window_null # failing with differences
ENDSKIPS
)
echo "$doc" | sed '/^[[:space:]]*$/d' | sed 's/[[:space:]]*#.*$//' | tr '\n' ',' | sed 's/,$//'
}
run_integration_tests() {
log "Running integration tests..."
@ -43,7 +123,7 @@ run_integration_tests() {
-v \
-p flux.zip \
-p query/ \
--skip "${DB_TESTS}"
--skip "$(skipped_tests)"
}
cleanup() {

2
go.mod
View File

@ -29,7 +29,7 @@ require (
github.com/google/go-jsonnet v0.17.0
github.com/hashicorp/vault/api v1.0.2
github.com/influxdata/cron v0.0.0-20201006132531-4bb0a200dcbe
github.com/influxdata/flux v0.169.0
github.com/influxdata/flux v0.171.0
github.com/influxdata/httprouter v1.3.1-0.20191122104820-ee83e2772f69
github.com/influxdata/influx-cli/v2 v2.2.1-0.20220318222112-88ba3464cd07
github.com/influxdata/influxql v1.1.1-0.20211004132434-7e7d61973256

4
go.sum
View File

@ -497,8 +497,8 @@ github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NH
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/influxdata/cron v0.0.0-20201006132531-4bb0a200dcbe h1:7j4SdN/BvQwN6WoUq7mv0kg5U9NhnFBxPGMafYRKym0=
github.com/influxdata/cron v0.0.0-20201006132531-4bb0a200dcbe/go.mod h1:XabtPPW2qsCg0tl+kjaPU+cFS+CjQXEXbT1VJvHT4og=
github.com/influxdata/flux v0.169.0 h1:jyrWWkmsqsbETryyal1RDX2ig+PWEC63rFbAqaHMNXM=
github.com/influxdata/flux v0.169.0/go.mod h1:eNApXyjdyUdCNs6LxUQRBHxjUVqK1XrJrlMPhIQSQpA=
github.com/influxdata/flux v0.171.0 h1:9s0MA0bGXPRmzeAvZPYl1412qYSdeTNQb1cgW83nu2M=
github.com/influxdata/flux v0.171.0/go.mod h1:fNtcZ8tqtVDjwWYcPRvCdlY5t3n+NYCc5xunKCmigQA=
github.com/influxdata/gosnowflake v1.6.9 h1:BhE39Mmh8bC+Rvd4QQsP2gHypfeYIH1wqW1AjGWxxrE=
github.com/influxdata/gosnowflake v1.6.9/go.mod h1:9W/BvCXOKx2gJtQ+jdi1Vudev9t9/UDOEHnlJZ/y1nU=
github.com/influxdata/httprouter v1.3.1-0.20191122104820-ee83e2772f69 h1:WQsmW0fXO4ZE/lFGIE84G6rIV5SJN3P3sjIXAP1a8eU=

View File

@ -201,9 +201,6 @@ const writeOptSource = `
import "testing"
import c "csv"
option testing.loadStorage = (csv) => {
return c.from(csv: csv) |> to(bucket: bucket, org: org)
}
option testing.load = (tables=<-) => {
return tables |> to(bucket: bucket, org: org)
}
@ -216,9 +213,6 @@ const readOptSource = `
import "testing"
import c "csv"
option testing.loadStorage = (csv) => {
return from(bucket: bucket)
}
option testing.load = (tables=<-) => {
return from(bucket: bucket)
}

View File

@ -33,7 +33,8 @@ testcase filter {
,,0,2018-05-22T19:53:36Z,system,host.local,load1,1.63
")
got = testing.loadStorage(csv: input)
got = csv.from(csv: input)
|> testing.load()
|> range(start: -100y)
|> filter(fn: (r) => r._measurement == "system" and r._field == "load1")
|> drop(columns: ["_start", "_stop"])

View File

@ -55,7 +55,8 @@ input = "
"
testcase multi_measure {
got = testing.loadStorage(csv: input)
got = csv.from(csv: input)
|> testing.load()
|> range(start: 2018-01-01T00:00:00Z, stop: 2019-01-01T00:00:00Z)
|> filter(fn: (r) => r["_measurement"] == "system" or r["_measurement"] == "sys")
|> filter(fn: (r) => r["_field"] == "load1" or r["_field"] == "load3")
@ -83,7 +84,8 @@ testcase multi_measure {
}
testcase multi_measure_match_all {
got = testing.loadStorage(csv: input)
got = csv.from(csv: input)
|> testing.load()
|> range(start: 2018-01-01T00:00:00Z, stop: 2019-01-01T00:00:00Z)
|> filter(fn: (r) => r["_measurement"] == "system" or r["_measurement"] == "sys" or r["_measurement"] == "var" or r["_measurement"] == "swap")
|> filter(fn: (r) => r["_field"] == "load1" or r["_field"] == "load3" or r["_field"] == "load5" or r["_field"] == "used_percent")
@ -129,7 +131,8 @@ testcase multi_measure_match_all {
}
testcase multi_measure_tag_filter {
got = testing.loadStorage(csv: input)
got = csv.from(csv: input)
|> testing.load()
|> range(start: 2018-01-01T00:00:00Z, stop: 2019-01-01T00:00:00Z)
|> filter(fn: (r) => r["_measurement"] == "system" or r["_measurement"] == "swap")
|> filter(fn: (r) => r["_field"] == "load1" or r["_field"] == "load3" or r["_field"] == "used_percent")
@ -158,7 +161,8 @@ testcase multi_measure_tag_filter {
}
testcase multi_measure_complex_or {
got = testing.loadStorage(csv: input)
got = csv.from(csv: input)
|> testing.load()
|> range(start: 2018-01-01T00:00:00Z, stop: 2019-01-01T00:00:00Z)
|> filter(fn: (r) => (r["_measurement"] == "system" or r["_measurement"] == "swap") or (r["_measurement"] != "var" and r["host"] == "host.local"))
|> drop(columns: ["_start", "_stop"])
@ -197,7 +201,8 @@ testcase multi_measure_complex_or {
}
testcase multi_measure_complex_and {
got = testing.loadStorage(csv: input)
got = csv.from(csv: input)
|> testing.load()
|> range(start: 2018-01-01T00:00:00Z, stop: 2019-01-01T00:00:00Z)
|> filter(fn: (r) => r["_measurement"] != "system" or r["_measurement"] == "swap")
|> filter(fn: (r) => r["_measurement"] == "swap" or r["_measurement"] == "var")
@ -225,7 +230,8 @@ testcase multi_measure_complex_and {
}
testcase multi_measure_negation {
got = testing.loadStorage(csv: input)
got = csv.from(csv: input)
|> testing.load()
|> range(start: 2018-01-01T00:00:00Z, stop: 2019-01-01T00:00:00Z)
|> filter(fn: (r) => r["_measurement"] != "system")
|> filter(fn: (r) => r["host"] == "host.local" or not exists r["host"])

View File

@ -63,7 +63,8 @@ input = "
"
testcase tag_values_measurement_or_predicate {
got = testing.loadStorage(csv: input)
got = csv.from(csv: input)
|> testing.load()
|> range(start: -100y)
|> filter(fn: (r) => r["_measurement"] == "cpu")
|> filter(fn: (r) => r["_measurement"] == "someOtherThing" or r["host"] == "euterpe.local")
@ -86,7 +87,8 @@ testcase tag_values_measurement_or_predicate {
}
testcase tag_values_measurement_or_negation {
got = testing.loadStorage(csv: input)
got = csv.from(csv: input)
|> testing.load()
|> range(start: -100y)
|> filter(fn: (r) => r["_measurement"] != "cpu")
|> filter(fn: (r) => r["_measurement"] == "someOtherThing" or r["fstype"] != "apfs")
@ -108,7 +110,8 @@ testcase tag_values_measurement_or_negation {
}
testcase tag_values_measurement_or_regex {
got = testing.loadStorage(csv: input)
got = csv.from(csv: input)
|> testing.load()
|> range(start: -100y)
|> filter(fn: (r) => r["_measurement"] =~ /cp.*/)
|> filter(fn: (r) => r["_measurement"] == "someOtherThing" or r["host"] !~ /mnemo.*/)

View File

@ -4,5 +4,4 @@ package stdlib
import (
_ "github.com/influxdata/influxdb/v2/query/stdlib/influxdata/influxdb"
_ "github.com/influxdata/influxdb/v2/query/stdlib/influxdata/influxdb/v1"
_ "github.com/influxdata/influxdb/v2/query/stdlib/testing"
)

View File

@ -1,319 +0,0 @@
package testing_test
import (
"bufio"
"bytes"
"context"
"encoding/json"
"fmt"
"strings"
"testing"
"github.com/influxdata/flux/ast"
"github.com/influxdata/flux/execute"
"github.com/influxdata/flux/lang"
"github.com/influxdata/flux/parser"
"github.com/influxdata/flux/runtime"
"github.com/influxdata/flux/stdlib"
platform "github.com/influxdata/influxdb/v2"
"github.com/influxdata/influxdb/v2/cmd/influxd/launcher"
influxdbcontext "github.com/influxdata/influxdb/v2/context"
"github.com/influxdata/influxdb/v2/http"
"github.com/influxdata/influxdb/v2/kit/feature"
"github.com/influxdata/influxdb/v2/kit/feature/override"
"github.com/influxdata/influxdb/v2/mock"
"github.com/influxdata/influxdb/v2/query"
_ "github.com/influxdata/influxdb/v2/query/stdlib"
// Import the stdlib
itesting "github.com/influxdata/influxdb/v2/query/stdlib/testing"
)
// Flagger for end-to-end test cases. This flagger contains a pointer to a
// single struct instance that all the test cases will consult. It will return flags
// based on the contents of FluxEndToEndFeatureFlags and the currently active
// test case. This works only because tests are serialized. We can set the
// current test case in the common flagger state, then run the test. If we were
// to run tests in parallel we would need to create multiple users and assign
// them different flags combinations, then run the tests under different users.
type Flagger struct {
flaggerState *FlaggerState
}
type FlaggerState struct {
Path string
Name string
FeatureFlags itesting.PerTestFeatureFlagMap
DefaultFlagger feature.Flagger
}
func newFlagger(featureFlagMap itesting.PerTestFeatureFlagMap) Flagger {
flaggerState := &FlaggerState{}
flaggerState.FeatureFlags = featureFlagMap
flaggerState.DefaultFlagger = feature.DefaultFlagger()
return Flagger{flaggerState}
}
func (f Flagger) SetActiveTestCase(path string, name string) {
f.flaggerState.Path = path
f.flaggerState.Name = name
}
func (f Flagger) Flags(ctx context.Context, _f ...feature.Flag) (map[string]interface{}, error) {
// If an override is set for the test case, construct an override flagger
// and use it's computed flags.
overrides := f.flaggerState.FeatureFlags[f.flaggerState.Path][f.flaggerState.Name]
if overrides != nil {
f, err := override.Make(overrides, nil)
if err != nil {
panic("failed to construct override flagger, probably an invalid flag in FluxEndToEndFeatureFlags")
}
return f.Flags(ctx)
}
// Otherwise use flags from a default flagger.
return f.flaggerState.DefaultFlagger.Flags(ctx)
}
// Default context.
var ctx = influxdbcontext.SetAuthorizer(context.Background(), mock.NewMockAuthorizer(true, nil))
func init() {
runtime.FinalizeBuiltIns()
}
func TestFluxEndToEnd(t *testing.T) {
runEndToEnd(t, stdlib.FluxTestPackages)
}
func BenchmarkFluxEndToEnd(b *testing.B) {
benchEndToEnd(b, stdlib.FluxTestPackages)
}
func runEndToEnd(t *testing.T, pkgs []*ast.Package) {
l := launcher.NewTestLauncher()
flagger := newFlagger(itesting.FluxEndToEndFeatureFlags)
l.SetFlagger(flagger)
l.RunOrFail(t, ctx)
defer l.ShutdownOrFail(t, ctx)
l.SetupOrFail(t)
for _, pkg := range pkgs {
test := func(t *testing.T, f func(t *testing.T)) {
t.Run(pkg.Path, f)
}
if pkg.Path == "universe" {
test = func(t *testing.T, f func(t *testing.T)) {
f(t)
}
}
test(t, func(t *testing.T) {
for _, file := range pkg.Files {
name := strings.TrimSuffix(file.Name, "_test.flux")
t.Run(name, func(t *testing.T) {
if reason, ok := itesting.FluxEndToEndSkipList[pkg.Path][name]; ok {
t.Skip(reason)
}
flagger.SetActiveTestCase(pkg.Path, name)
testFlux(t, l, file)
})
}
})
}
}
func benchEndToEnd(b *testing.B, pkgs []*ast.Package) {
// TODO(jsternberg): These benchmarks don't run properly
// and need to be fixed. Commenting out the code for now.
b.Skip("https://github.com/influxdata/influxdb/issues/15391")
// l := launcher.RunTestLauncherOrFail(b, ctx)
// l.SetupOrFail(b)
// defer l.ShutdownOrFail(b, ctx)
// for _, pkg := range pkgs {
// pkg := pkg.Copy().(*ast.Package)
// name := pkg.Files[0].Name
// b.Run(name, func(b *testing.B) {
// if reason, ok := itesting.FluxEndToEndSkipList[strings.TrimSuffix(name, ".flux")]; ok {
// b.Skip(reason)
// }
// b.ResetTimer()
// b.ReportAllocs()
// for i := 0; i < b.N; i++ {
// testFlux(b, l, pkg)
// }
// })
// }
}
func makeTestPackage(file *ast.File) *ast.Package {
file = file.Copy().(*ast.File)
file.Package.Name.Name = "main"
pkg := &ast.Package{
Package: "main",
Files: []*ast.File{file},
}
return pkg
}
// This options definition puts to() in the path of the CSV input. The tests
// get run in this case and they would normally pass, if we checked the
// results, but don't look at them.
var writeOptSource = `
import "testing"
import c "csv"
option testing.loadStorage = (csv) => {
return c.from(csv: csv) |> to(bucket: bucket, org: org)
}
`
// This options definition is for the second run, the test run. It loads the
// data from previously written bucket. We check the results after running this
// second pass and report on them.
var readOptSource = `
import "testing"
import c "csv"
option testing.loadStorage = (csv) => {
return from(bucket: bucket)
}
`
var writeOptAST *ast.File
var readOptAST *ast.File
func prepareOptions(optionsSource string) *ast.File {
pkg := parser.ParseSource(optionsSource)
if ast.Check(pkg) > 0 {
panic(ast.GetError(pkg))
}
return pkg.Files[0]
}
func init() {
writeOptAST = prepareOptions(writeOptSource)
readOptAST = prepareOptions(readOptSource)
}
func testFlux(t testing.TB, l *launcher.TestLauncher, file *ast.File) {
b := &platform.Bucket{
OrgID: l.Org.ID,
Name: t.Name(),
RetentionPeriod: 0,
}
s := l.BucketService(t)
if err := s.CreateBucket(context.Background(), b); err != nil {
t.Fatal(err)
}
defer func() {
if err := s.DeleteBucket(context.Background(), b.ID); err != nil {
t.Logf("Failed to delete bucket: %s", err)
}
}()
// Define bucket and org options
bucketOpt := &ast.OptionStatement{
Assignment: &ast.VariableAssignment{
ID: &ast.Identifier{Name: "bucket"},
Init: &ast.StringLiteral{Value: b.Name},
},
}
orgOpt := &ast.OptionStatement{
Assignment: &ast.VariableAssignment{
ID: &ast.Identifier{Name: "org"},
Init: &ast.StringLiteral{Value: l.Org.Name},
},
}
executeWithOptions(t, l, bucketOpt, orgOpt, writeOptAST, file)
results := executeWithOptions(t, l, bucketOpt, orgOpt, readOptAST, file)
if results != nil {
logFormatted := func(name string, results map[string]*bytes.Buffer) {
if _, ok := results[name]; ok {
scanner := bufio.NewScanner(results[name])
for scanner.Scan() {
t.Log(scanner.Text())
}
} else {
t.Log("table ", name, " not present in results")
}
}
if _, ok := results["diff"]; ok {
t.Error("diff table was not empty")
logFormatted("diff", results)
logFormatted("want", results)
logFormatted("got", results)
t.Logf("all data in %s:", t.Name())
logFormatted(t.Name(), allDataFromBucket(t, l, t.Name()))
}
}
}
func allDataFromBucket(t testing.TB, l *launcher.TestLauncher, bucket string) map[string]*bytes.Buffer {
q := fmt.Sprintf(`from(bucket: "%s") |> range(start: 0)`, bucket)
bs, err := http.SimpleQuery(l.URL(), q, l.Org.Name, l.Auth.Token)
if err != nil {
t.Fatal(err)
}
return map[string]*bytes.Buffer{bucket: bytes.NewBuffer(bs)}
}
func executeWithOptions(t testing.TB, l *launcher.TestLauncher, bucketOpt *ast.OptionStatement,
orgOpt *ast.OptionStatement, optionsAST *ast.File, file *ast.File) map[string]*bytes.Buffer {
var results map[string]*bytes.Buffer
options := optionsAST.Copy().(*ast.File)
options.Body = append([]ast.Statement{bucketOpt, orgOpt}, options.Body...)
// Add options to pkg
pkg := makeTestPackage(file)
pkg.Files = append(pkg.Files, options)
// Use testing.inspect call to get all of diff, want, and got
inspectCalls := stdlib.TestingInspectCalls(pkg)
if len(inspectCalls.Body) == 0 {
t.Skip("no tests found")
return nil
}
pkg.Files = append(pkg.Files, inspectCalls)
bs, err := json.Marshal(pkg)
if err != nil {
t.Fatal(err)
}
req := &query.Request{
OrganizationID: l.Org.ID,
Compiler: lang.ASTCompiler{AST: bs},
}
if r, err := l.FluxQueryService().Query(ctx, req); err != nil {
t.Fatal(err)
} else {
results = make(map[string]*bytes.Buffer)
for r.More() {
v := r.Next()
if _, ok := results[v.Name()]; !ok {
results[v.Name()] = &bytes.Buffer{}
}
err := execute.FormatResult(results[v.Name()], v)
if err != nil {
t.Error(err)
}
}
if err := r.Err(); err != nil {
t.Error(err)
}
}
return results
}

View File

@ -1,188 +0,0 @@
package testing
import "runtime"
var FluxEndToEndSkipList = map[string]map[string]string{
"universe": {
// TODO(adam) determine the reason for these test failures.
"cov": "Reason TBD",
"covariance": "Reason TBD",
"cumulative_sum": "Reason TBD",
"cumulative_sum_default": "Reason TBD",
"cumulative_sum_noop": "Reason TBD",
"drop_non_existent": "Reason TBD",
"first": "Reason TBD",
"highestAverage": "Reason TBD",
"highestMax": "Reason TBD",
"histogram": "Reason TBD",
"histogram_normalize": "Reason TBD",
"histogram_quantile": "Reason TBD",
"join": "Reason TBD",
"join_across_measurements": "Reason TBD",
"join_agg": "Reason TBD",
"keep_non_existent": "Reason TBD",
"key_values": "Reason TBD",
"key_values_host_name": "Reason TBD",
"last": "Reason TBD",
"lowestAverage": "Reason TBD",
"max": "Reason TBD",
"min": "Reason TBD",
"sample": "Reason TBD",
"selector_preserve_time": "Reason TBD",
"shift": "Reason TBD",
"shift_negative_duration": "Reason TBD",
"task_per_line": "Reason TBD",
"top": "Reason TBD",
"union": "Reason TBD",
"union_heterogeneous": "Reason TBD",
"unique": "Reason TBD",
"distinct": "Reason TBD",
// it appears these occur when writing the input data. `to` may not be null safe.
"fill_bool": "failed to read meta data: panic: interface conversion: interface {} is nil, not uint64",
"fill_float": "failed to read meta data: panic: interface conversion: interface {} is nil, not uint64",
"fill_int": "failed to read meta data: panic: interface conversion: interface {} is nil, not uint64",
"fill_string": "failed to read meta data: panic: interface conversion: interface {} is nil, not uint64",
"fill_time": "failed to read meta data: panic: interface conversion: interface {} is nil, not uint64",
"fill_uint": "failed to read meta data: panic: interface conversion: interface {} is nil, not uint64",
"window_null": "failed to read meta data: panic: interface conversion: interface {} is nil, not float64",
// these may just be missing calls to range() in the tests. easy to fix in a new PR.
"group_nulls": "unbounded test",
"integral": "unbounded test",
"integral_columns": "unbounded test",
"map": "unbounded test",
"join_missing_on_col": "unbounded test",
"join_use_previous": "unbounded test (https://github.com/influxdata/flux/issues/2996)",
"join_panic": "unbounded test (https://github.com/influxdata/flux/issues/3465)",
"rowfn_with_import": "unbounded test",
// the following tests have a difference between the CSV-decoded input table, and the storage-retrieved version of that table
"columns": "group key mismatch",
"set": "column order mismatch",
"simple_max": "_stop missing from expected output",
"derivative": "time bounds mismatch (engine uses now() instead of bounds on input table)",
"difference_columns": "data write/read path loses columns x and y",
"keys": "group key mismatch",
// failed to read meta data errors: the CSV encoding is incomplete probably due to data schema errors. needs more detailed investigation to find root cause of error
// "filter_by_regex": "failed to read metadata",
// "filter_by_tags": "failed to read metadata",
"group": "failed to read metadata",
"group_except": "failed to read metadata",
"group_ungroup": "failed to read metadata",
"pivot_mean": "failed to read metadata",
"histogram_quantile_minvalue": "failed to read meta data: no column with label _measurement exists",
"increase": "failed to read meta data: table has no _value column",
"string_max": "error: invalid use of function: *functions.MaxSelector has no implementation for type string (https://github.com/influxdata/platform/issues/224)",
"null_as_value": "null not supported as value in influxql (https://github.com/influxdata/platform/issues/353)",
"string_interp": "string interpolation not working as expected in flux (https://github.com/influxdata/platform/issues/404)",
"to": "to functions are not supported in the testing framework (https://github.com/influxdata/flux/issues/77)",
"covariance_missing_column_1": "need to support known errors in new test framework (https://github.com/influxdata/flux/issues/536)",
"covariance_missing_column_2": "need to support known errors in new test framework (https://github.com/influxdata/flux/issues/536)",
"drop_before_rename": "need to support known errors in new test framework (https://github.com/influxdata/flux/issues/536)",
"drop_referenced": "need to support known errors in new test framework (https://github.com/influxdata/flux/issues/536)",
"yield": "yield requires special test case (https://github.com/influxdata/flux/issues/535)",
"window_group_mean_ungroup": "window trigger optimization modifies sort order of its output tables (https://github.com/influxdata/flux/issues/1067)",
"median_column": "failing in different ways (https://github.com/influxdata/influxdb/issues/13909)",
"dynamic_query": "tableFind does not work in e2e tests: https://github.com/influxdata/influxdb/issues/13975",
"to_int": "dateTime conversion issue: https://github.com/influxdata/influxdb/issues/14575",
"to_uint": "dateTime conversion issue: https://github.com/influxdata/influxdb/issues/14575",
"holt_winters_panic": "Expected output is an empty table which breaks the testing framework (https://github.com/influxdata/influxdb/issues/14749)",
"map_nulls": "to cannot write null values",
},
"array": {
"from": "test not meant to be consumed by influxdb",
"from_group": "test not meant to be consumed by influxdb",
},
"experimental": {
"set": "Reason TBD",
"join": "unbounded test",
"alignTime": "unbounded test",
"histogram_quantile": "mis-named columns for storage",
"distinct": "failing test",
"fill": "failing test",
"histogram": "failing test",
"unique": "failing test",
},
"experimental/oee": {
"apq": "failing test",
"computeapq": "failing test",
},
"experimental/geo": {
"filterRowsNotStrict": "tableFind does not work in e2e tests: https://github.com/influxdata/influxdb/issues/13975",
"filterRowsStrict": "tableFind does not work in e2e tests: https://github.com/influxdata/influxdb/issues/13975",
"gridFilterLevel": "tableFind does not work in e2e tests: https://github.com/influxdata/influxdb/issues/13975",
"gridFilter": "tableFind does not work in e2e tests: https://github.com/influxdata/influxdb/issues/13975",
"groupByArea": "tableFind does not work in e2e tests: https://github.com/influxdata/influxdb/issues/13975",
"filterRowsPivoted": "tableFind does not work in e2e tests: https://github.com/influxdata/influxdb/issues/13975",
"shapeDataWithFilter": "tableFind does not work in e2e tests: https://github.com/influxdata/influxdb/issues/13975",
"shapeData": "test run before to() is finished: https://github.com/influxdata/influxdb/issues/13975",
},
"regexp": {
"replaceAllString": "Reason TBD",
},
"http": {
"http_endpoint": "need ability to test side effects in e2e tests: (https://github.com/influxdata/flux/issues/1723)",
},
"influxdata/influxdb/schema": {
"show_tag_keys": "failing due to bug in test, unskip this after upgrading from Flux v0.91.0",
},
"influxdata/influxdb/monitor": {
"state_changes_big_any_to_any": "unbounded test",
"state_changes_big_info_to_ok": "unbounded test",
"state_changes_big_ok_to_info": "unbounded test",
"state_changes_any_to_any": "test run before to() is finished: https://github.com/influxdata/influxdb/issues/13975",
"state_changes_info_to_any": "test run before to() is finished: https://github.com/influxdata/influxdb/issues/13975",
"state_changes_invalid_any_to_any": "test run before to() is finished: https://github.com/influxdata/influxdb/issues/13975",
"state_changes": "test run before to() is finished: https://github.com/influxdata/influxdb/issues/13975",
},
"influxdata/influxdb/secrets": {
"secrets": "Cannot inject custom deps into the test framework so the secrets don't lookup correctly",
},
"internal/promql": {
"join": "unbounded test",
},
"testing/chronograf": {
"buckets": "unbounded test",
"aggregate_window_count": "flakey test: https://github.com/influxdata/influxdb/issues/18463",
},
"testing/kapacitor": {
"fill_default": "unknown field type for f1",
},
"testing/pandas": {
"extract_regexp_findStringIndex": "pandas. map does not correctly handled returned arrays (https://github.com/influxdata/flux/issues/1387)",
"partition_strings_splitN": "pandas. map does not correctly handled returned arrays (https://github.com/influxdata/flux/issues/1387)",
},
"testing/promql": {
"emptyTable": "tests a source",
"year": "flakey test: https://github.com/influxdata/influxdb/issues/15667",
"extrapolatedRate_counter_rate": "option \"testing.loadStorage\" reassigned: https://github.com/influxdata/flux/issues/3155",
"extrapolatedRate_nocounter": "option \"testing.loadStorage\" reassigned: https://github.com/influxdata/flux/issues/3155",
"extrapolatedRate_norate": "option \"testing.loadStorage\" reassigned: https://github.com/influxdata/flux/issues/3155",
"linearRegression_nopredict": "option \"testing.loadStorage\" reassigned: https://github.com/influxdata/flux/issues/3155",
"linearRegression_predict": "option \"testing.loadStorage\" reassigned: https://github.com/influxdata/flux/issues/3155",
},
"testing/influxql": {
"cumulative_sum": "invalid test data requires loadStorage to be overridden. See https://github.com/influxdata/flux/issues/3145",
"elapsed": "failing since split with Flux upgrade: https://github.com/influxdata/influxdb/issues/19568",
},
"contrib/RohanSreerama5/naiveBayesClassifier": {
"bayes": "error calling tableFind: ",
},
}
func init() {
if runtime.GOOS != "amd64" {
FluxEndToEndSkipList["universe"]["holt_winters"] = "expected HoltWinters outputs only valid on amd64"
}
}
type PerTestFeatureFlagMap = map[string]map[string]map[string]string
var FluxEndToEndFeatureFlags = PerTestFeatureFlagMap{}

View File

@ -46,7 +46,8 @@ testcase last_multi_shard {
,,0,2021-01-26T08:00:00Z,-1099,bank,pge_bill
",
)
result = testing.loadStorage(csv: input)
result = csv.from(csv: input)
|> testing.load()
|> range(start: -3y)
|> filter(fn: (r) => r._measurement == "pge_bill" and r._field == "bank")
|> last()