build(flux): update flux to master and change renamed structs (#22281)
parent
5aa91f0524
commit
f94783e016
2
go.mod
2
go.mod
|
@ -40,7 +40,7 @@ require (
|
|||
github.com/hashicorp/vault/api v1.0.2
|
||||
github.com/imdario/mergo v0.3.9 // indirect
|
||||
github.com/influxdata/cron v0.0.0-20201006132531-4bb0a200dcbe
|
||||
github.com/influxdata/flux v0.126.0
|
||||
github.com/influxdata/flux v0.126.1-0.20210823165046-5dfeb5051ff8
|
||||
github.com/influxdata/httprouter v1.3.1-0.20191122104820-ee83e2772f69
|
||||
github.com/influxdata/influx-cli/v2 v2.1.1-0.20210813175002-13799e7662c0
|
||||
github.com/influxdata/influxql v0.0.0-20180925231337-1cbfca8e56b6
|
||||
|
|
4
go.sum
4
go.sum
|
@ -330,8 +330,8 @@ github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NH
|
|||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||
github.com/influxdata/cron v0.0.0-20201006132531-4bb0a200dcbe h1:7j4SdN/BvQwN6WoUq7mv0kg5U9NhnFBxPGMafYRKym0=
|
||||
github.com/influxdata/cron v0.0.0-20201006132531-4bb0a200dcbe/go.mod h1:XabtPPW2qsCg0tl+kjaPU+cFS+CjQXEXbT1VJvHT4og=
|
||||
github.com/influxdata/flux v0.126.0 h1:MHmS0iYRytU/929otYmVkovBj92D4h6aytTEoVlunag=
|
||||
github.com/influxdata/flux v0.126.0/go.mod h1:atoyXj60GRia0R6TlRTIuKpcXvK50GGQh4VJ/kcRWws=
|
||||
github.com/influxdata/flux v0.126.1-0.20210823165046-5dfeb5051ff8 h1:qygSggU7Zzw0gx76s0/oqmbIz1qAm61/kYEaYpypnsQ=
|
||||
github.com/influxdata/flux v0.126.1-0.20210823165046-5dfeb5051ff8/go.mod h1:atoyXj60GRia0R6TlRTIuKpcXvK50GGQh4VJ/kcRWws=
|
||||
github.com/influxdata/httprouter v1.3.1-0.20191122104820-ee83e2772f69 h1:WQsmW0fXO4ZE/lFGIE84G6rIV5SJN3P3sjIXAP1a8eU=
|
||||
github.com/influxdata/httprouter v1.3.1-0.20191122104820-ee83e2772f69/go.mod h1:pwymjR6SrP3gD3pRj9RJwdl1j5s3doEEV8gS4X9qSzA=
|
||||
github.com/influxdata/influx-cli/v2 v2.1.1-0.20210813175002-13799e7662c0 h1:llPYnejbp/s9JkkS2xjSlAsdPKqIAsabhAgiOLV1NHw=
|
||||
|
|
|
@ -35,7 +35,7 @@ func TestFixed_Canceled(t *testing.T) {
|
|||
f := limiter.NewFixed(1)
|
||||
require.NoError(t, f.Take(context.Background()))
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30 * time.Second)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
cancel()
|
||||
err := f.Take(ctx)
|
||||
require.Error(t, err)
|
||||
|
|
|
@ -55,7 +55,7 @@ var (
|
|||
ConcurrencyQuota: 1,
|
||||
QueueSize: 1,
|
||||
}
|
||||
bothConfigs = map[string]control.Config{"unlimited": config, "limited": limitedConfig}
|
||||
bothConfigs = map[string]control.Config{"unlimited": config, "limited": limitedConfig}
|
||||
)
|
||||
|
||||
func setupPromRegistry(c *control.Controller) *prometheus.Registry {
|
||||
|
|
|
@ -975,7 +975,7 @@ func (p GroupWindowAggregateTransposeRule) Rewrite(ctx context.Context, pn plan.
|
|||
switch spec := fnNode.ProcedureSpec().(type) {
|
||||
case *universe.CountProcedureSpec:
|
||||
newFnNode := plan.CreateUniquePhysicalNode(ctx, "sum", &universe.SumProcedureSpec{
|
||||
AggregateConfig: spec.AggregateConfig,
|
||||
SimpleAggregateConfig: spec.SimpleAggregateConfig,
|
||||
})
|
||||
plan.ReplaceNode(fnNode, newFnNode)
|
||||
fnNode = newFnNode
|
||||
|
|
|
@ -1133,12 +1133,12 @@ func maxProcedureSpec() *universe.MaxProcedureSpec {
|
|||
}
|
||||
func countProcedureSpec() *universe.CountProcedureSpec {
|
||||
return &universe.CountProcedureSpec{
|
||||
AggregateConfig: execute.AggregateConfig{Columns: []string{execute.DefaultValueColLabel}},
|
||||
SimpleAggregateConfig: execute.SimpleAggregateConfig{Columns: []string{execute.DefaultValueColLabel}},
|
||||
}
|
||||
}
|
||||
func sumProcedureSpec() *universe.SumProcedureSpec {
|
||||
return &universe.SumProcedureSpec{
|
||||
AggregateConfig: execute.AggregateConfig{Columns: []string{execute.DefaultValueColLabel}},
|
||||
SimpleAggregateConfig: execute.SimpleAggregateConfig{Columns: []string{execute.DefaultValueColLabel}},
|
||||
}
|
||||
}
|
||||
func firstProcedureSpec() *universe.FirstProcedureSpec {
|
||||
|
@ -1153,7 +1153,7 @@ func lastProcedureSpec() *universe.LastProcedureSpec {
|
|||
}
|
||||
func meanProcedureSpec() *universe.MeanProcedureSpec {
|
||||
return &universe.MeanProcedureSpec{
|
||||
AggregateConfig: execute.AggregateConfig{Columns: []string{execute.DefaultValueColLabel}},
|
||||
SimpleAggregateConfig: execute.SimpleAggregateConfig{Columns: []string{execute.DefaultValueColLabel}},
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1527,7 +1527,7 @@ func TestPushDownWindowAggregateRule(t *testing.T) {
|
|||
Context: context.Background(),
|
||||
Rules: []plan.Rule{influxdb.PushDownWindowAggregateRule{}},
|
||||
Before: simplePlanWithWindowAgg(window1m, "mean", &universe.MeanProcedureSpec{
|
||||
AggregateConfig: execute.AggregateConfig{Columns: []string{"_valmoo"}},
|
||||
SimpleAggregateConfig: execute.SimpleAggregateConfig{Columns: []string{"_valmoo"}},
|
||||
}),
|
||||
NoChange: true,
|
||||
})
|
||||
|
@ -1536,7 +1536,7 @@ func TestPushDownWindowAggregateRule(t *testing.T) {
|
|||
Context: context.Background(),
|
||||
Rules: []plan.Rule{influxdb.PushDownWindowAggregateRule{}},
|
||||
Before: simplePlanWithWindowAgg(window1m, "mean", &universe.MeanProcedureSpec{
|
||||
AggregateConfig: execute.AggregateConfig{Columns: []string{"_value", "_valmoo"}},
|
||||
SimpleAggregateConfig: execute.SimpleAggregateConfig{Columns: []string{"_value", "_valmoo"}},
|
||||
}),
|
||||
NoChange: true,
|
||||
})
|
||||
|
@ -2760,12 +2760,12 @@ func TestPushDownGroupAggregateRule(t *testing.T) {
|
|||
}
|
||||
countProcedureSpec := func() *universe.CountProcedureSpec {
|
||||
return &universe.CountProcedureSpec{
|
||||
AggregateConfig: execute.DefaultAggregateConfig,
|
||||
SimpleAggregateConfig: execute.DefaultSimpleAggregateConfig,
|
||||
}
|
||||
}
|
||||
sumProcedureSpec := func() *universe.SumProcedureSpec {
|
||||
return &universe.SumProcedureSpec{
|
||||
AggregateConfig: execute.DefaultAggregateConfig,
|
||||
SimpleAggregateConfig: execute.DefaultSimpleAggregateConfig,
|
||||
}
|
||||
}
|
||||
firstProcedureSpec := func() *universe.FirstProcedureSpec {
|
||||
|
@ -2925,7 +2925,7 @@ func TestPushDownGroupAggregateRule(t *testing.T) {
|
|||
Context: context.Background(),
|
||||
Rules: []plan.Rule{influxdb.PushDownGroupAggregateRule{}},
|
||||
Before: simplePlanWithAgg("count", &universe.CountProcedureSpec{
|
||||
AggregateConfig: execute.AggregateConfig{Columns: []string{"_valmoo"}},
|
||||
SimpleAggregateConfig: execute.SimpleAggregateConfig{Columns: []string{"_valmoo"}},
|
||||
}),
|
||||
NoChange: true,
|
||||
})
|
||||
|
|
|
@ -3059,7 +3059,7 @@ func TestStorageReader_ReadGroup(t *testing.T) {
|
|||
},
|
||||
{
|
||||
aggregate: storageflux.FirstKind,
|
||||
want: static.TableGroup {
|
||||
want: static.TableGroup{
|
||||
static.StringKey("_measurement", "m0"),
|
||||
static.StringKey("_field", "f0"),
|
||||
static.TimeKey("_start", "2019-11-25T00:00:00Z"),
|
||||
|
@ -3077,7 +3077,7 @@ func TestStorageReader_ReadGroup(t *testing.T) {
|
|||
},
|
||||
{
|
||||
aggregate: storageflux.LastKind,
|
||||
want: static.TableGroup {
|
||||
want: static.TableGroup{
|
||||
static.StringKey("_measurement", "m0"),
|
||||
static.StringKey("_field", "f0"),
|
||||
static.TimeKey("_start", "2019-11-25T00:00:00Z"),
|
||||
|
|
|
@ -80,7 +80,9 @@ func newMockServices() mockedSvc {
|
|||
return mockedSvc{
|
||||
taskSvc: &mock.TaskService{
|
||||
FindTaskByIDFn: func(_ context.Context, id platform.ID) (*taskmodel.Task, error) { return &taskmodel.Task{ID: id}, nil },
|
||||
CreateTaskFn: func(context.Context, taskmodel.TaskCreate) (*taskmodel.Task, error) { return &taskmodel.Task{ID: 1}, nil },
|
||||
CreateTaskFn: func(context.Context, taskmodel.TaskCreate) (*taskmodel.Task, error) {
|
||||
return &taskmodel.Task{ID: 1}, nil
|
||||
},
|
||||
UpdateTaskFn: func(_ context.Context, id platform.ID, _ taskmodel.TaskUpdate) (*taskmodel.Task, error) {
|
||||
return &taskmodel.Task{ID: id}, nil
|
||||
},
|
||||
|
|
|
@ -2732,12 +2732,12 @@ func MustParsePointString(buf string) models.Point { return MustParsePointsStrin
|
|||
type mockPlanner struct{}
|
||||
|
||||
func (m *mockPlanner) Plan(lastWrite time.Time) ([]tsm1.CompactionGroup, int64) { return nil, 0 }
|
||||
func (m *mockPlanner) PlanLevel(level int) ([]tsm1.CompactionGroup, int64) { return nil, 0 }
|
||||
func (m *mockPlanner) PlanOptimize() ([]tsm1.CompactionGroup, int64) { return nil, 0 }
|
||||
func (m *mockPlanner) Release(groups []tsm1.CompactionGroup) {}
|
||||
func (m *mockPlanner) FullyCompacted() (bool, string) { return false, "not compacted" }
|
||||
func (m *mockPlanner) ForceFull() {}
|
||||
func (m *mockPlanner) SetFileStore(fs *tsm1.FileStore) {}
|
||||
func (m *mockPlanner) PlanLevel(level int) ([]tsm1.CompactionGroup, int64) { return nil, 0 }
|
||||
func (m *mockPlanner) PlanOptimize() ([]tsm1.CompactionGroup, int64) { return nil, 0 }
|
||||
func (m *mockPlanner) Release(groups []tsm1.CompactionGroup) {}
|
||||
func (m *mockPlanner) FullyCompacted() (bool, string) { return false, "not compacted" }
|
||||
func (m *mockPlanner) ForceFull() {}
|
||||
func (m *mockPlanner) SetFileStore(fs *tsm1.FileStore) {}
|
||||
|
||||
// ParseTags returns an instance of Tags for a comma-delimited list of key/values.
|
||||
func ParseTags(s string) query.Tags {
|
||||
|
|
|
@ -548,7 +548,7 @@ func BenchmarkLogFile_WriteTo(b *testing.B) {
|
|||
}
|
||||
}
|
||||
|
||||
func benchmarkLogFile_MeasurementHasSeries(b *testing.B, seriesKeyN, seriesValueN int) {
|
||||
func benchmarkLogFile_MeasurementHasSeries(b *testing.B, seriesKeyN, seriesValueN int) {
|
||||
b.StopTimer()
|
||||
|
||||
sfile := MustOpenSeriesFile()
|
||||
|
@ -607,10 +607,18 @@ func benchmarkLogFile_MeasurementHasSeries(b *testing.B, seriesKeyN, seriesValu
|
|||
}
|
||||
}
|
||||
|
||||
func BenchmarkLogFile_MeasurementHasSeries_2_10(b *testing.B) { benchmarkLogFile_MeasurementHasSeries(b, 2, 10) } // 100 series
|
||||
func BenchmarkLogFile_MeasurementHasSeries_3_10(b *testing.B) { benchmarkLogFile_MeasurementHasSeries(b, 3, 10) } // 1k series
|
||||
func BenchmarkLogFile_MeasurementHasSeries_4_10(b *testing.B) { benchmarkLogFile_MeasurementHasSeries(b, 4, 10) } // 10k series
|
||||
func BenchmarkLogFile_MeasurementHasSeries_5_10(b *testing.B) { benchmarkLogFile_MeasurementHasSeries(b, 5, 10) } // 100k series
|
||||
func BenchmarkLogFile_MeasurementHasSeries_2_10(b *testing.B) {
|
||||
benchmarkLogFile_MeasurementHasSeries(b, 2, 10)
|
||||
} // 100 series
|
||||
func BenchmarkLogFile_MeasurementHasSeries_3_10(b *testing.B) {
|
||||
benchmarkLogFile_MeasurementHasSeries(b, 3, 10)
|
||||
} // 1k series
|
||||
func BenchmarkLogFile_MeasurementHasSeries_4_10(b *testing.B) {
|
||||
benchmarkLogFile_MeasurementHasSeries(b, 4, 10)
|
||||
} // 10k series
|
||||
func BenchmarkLogFile_MeasurementHasSeries_5_10(b *testing.B) {
|
||||
benchmarkLogFile_MeasurementHasSeries(b, 5, 10)
|
||||
} // 100k series
|
||||
|
||||
// MustStartCPUProfile starts a cpu profile in a temporary path based on name.
|
||||
func MustStartCPUProfile(name string) {
|
||||
|
|
Loading…
Reference in New Issue