diff --git a/CHANGELOG.md b/CHANGELOG.md index 313b56c2d4..c35c23f869 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -21,6 +21,7 @@ The stress tool `influx_stress` will be removed in a subsequent release. We reco - [#7709](https://github.com/influxdata/influxdb/pull/7709): Add clear command to cli. - [#7688](https://github.com/influxdata/influxdb/pull/7688): Adding ability to use parameters in queries in the v2 client using the `Parameters` map in the `Query` struct. - [#7323](https://github.com/influxdata/influxdb/pull/7323): Allow add items to array config via ENV +- [#4619](https://github.com/influxdata/influxdb/issues/4619): Support subquery execution in the query language. ### Bugfixes diff --git a/cmd/influxd/run/server.go b/cmd/influxd/run/server.go index f0ed86beed..7449fd9b82 100644 --- a/cmd/influxd/run/server.go +++ b/cmd/influxd/run/server.go @@ -179,9 +179,13 @@ func NewServer(c *Config, buildInfo *BuildInfo) (*Server, error) { // Initialize query executor. s.QueryExecutor = influxql.NewQueryExecutor() s.QueryExecutor.StatementExecutor = &coordinator.StatementExecutor{ - MetaClient: s.MetaClient, - TaskManager: s.QueryExecutor.TaskManager, - TSDBStore: coordinator.LocalTSDBStore{Store: s.TSDBStore}, + MetaClient: s.MetaClient, + TaskManager: s.QueryExecutor.TaskManager, + TSDBStore: coordinator.LocalTSDBStore{Store: s.TSDBStore}, + ShardMapper: &coordinator.LocalShardMapper{ + MetaClient: s.MetaClient, + TSDBStore: coordinator.LocalTSDBStore{Store: s.TSDBStore}, + }, Monitor: s.Monitor, PointsWriter: s.PointsWriter, MaxSelectPointN: c.Coordinator.MaxSelectPointN, diff --git a/cmd/influxd/run/server_test.go b/cmd/influxd/run/server_test.go index a3fa82c044..0f7ae2dae7 100644 --- a/cmd/influxd/run/server_test.go +++ b/cmd/influxd/run/server_test.go @@ -4549,6 +4549,201 @@ func TestServer_Query_GroupByTimeCutoffs(t *testing.T) { } } +func TestServer_Query_Subqueries(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig()) + defer s.Close() + + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil { + t.Fatal(err) + } + + writes := []string{ + fmt.Sprintf(`cpu,host=server01 usage_user=70i,usage_system=30i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), + fmt.Sprintf(`cpu,host=server01 usage_user=45i,usage_system=55i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:10Z").UnixNano()), + fmt.Sprintf(`cpu,host=server01 usage_user=23i,usage_system=77i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:20Z").UnixNano()), + fmt.Sprintf(`cpu,host=server02 usage_user=11i,usage_system=89i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), + fmt.Sprintf(`cpu,host=server02 usage_user=28i,usage_system=72i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:10Z").UnixNano()), + fmt.Sprintf(`cpu,host=server02 usage_user=12i,usage_system=53i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:20Z").UnixNano()), + } + test := NewTest("db0", "rp0") + test.writes = Writes{ + &Write{data: strings.Join(writes, "\n")}, + } + + test.addQueries([]*Query{ + &Query{ + params: url.Values{"db": []string{"db0"}}, + command: `SELECT mean FROM (SELECT mean(usage_user) FROM cpu) WHERE time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T00:00:30Z'`, + exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","mean"],"values":[["2000-01-01T00:00:00Z",31.5]]}]}]}`, + }, + &Query{ + params: url.Values{"db": []string{"db0"}}, + command: `SELECT value FROM (SELECT mean(usage_user) AS value FROM cpu) WHERE time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T00:00:30Z'`, + exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","value"],"values":[["2000-01-01T00:00:00Z",31.5]]}]}]}`, + }, + &Query{ + params: url.Values{"db": []string{"db0"}}, + command: `SELECT mean(usage) FROM (SELECT 100 - usage_user AS usage FROM cpu) WHERE time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T00:00:30Z'`, + exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","mean"],"values":[["2000-01-01T00:00:00Z",68.5]]}]}]}`, + }, + &Query{ + params: url.Values{"db": []string{"db0"}}, + command: `SELECT host FROM (SELECT min(usage_user), host FROM cpu) WHERE time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T00:00:30Z'`, + exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","host"],"values":[["2000-01-01T00:00:00Z","server02"]]}]}]}`, + }, + &Query{ + params: url.Values{"db": []string{"db0"}}, + command: `SELECT host FROM (SELECT min(usage_user) FROM cpu GROUP BY host) WHERE time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T00:00:30Z'`, + exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","host"],"values":[["2000-01-01T00:00:00Z","server02"],["2000-01-01T00:00:20Z","server01"]]}]}]}`, + }, + &Query{ + params: url.Values{"db": []string{"db0"}}, + command: `SELECT mean(min) FROM (SELECT min(usage_user) FROM cpu GROUP BY host) WHERE time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T00:00:30Z'`, + exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","mean"],"values":[["2000-01-01T00:00:00Z",17]]}]}]}`, + }, + &Query{ + params: url.Values{"db": []string{"db0"}}, + command: `SELECT max(min), host FROM (SELECT min(usage_user) FROM cpu GROUP BY host) WHERE time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T00:00:30Z'`, + exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","max","host"],"values":[["2000-01-01T00:00:20Z",23,"server01"]]}]}]}`, + }, + &Query{ + params: url.Values{"db": []string{"db0"}}, + command: `SELECT host FROM (SELECT mean(usage_user) FROM cpu GROUP BY host) WHERE time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T00:00:30Z'`, + exp: `{"results":[{"statement_id":0}]}`, + skip: true, + }, + &Query{ + params: url.Values{"db": []string{"db0"}}, + command: `SELECT max(usage_system) FROM (SELECT min(usage_user), usage_system FROM cpu GROUP BY host) WHERE time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T00:00:30Z'`, + exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","max"],"values":[["2000-01-01T00:00:00Z",89]]}]}]}`, + }, + &Query{ + params: url.Values{"db": []string{"db0"}}, + command: `SELECT min(top), host FROM (SELECT top(usage_user, host, 2) FROM cpu) WHERE time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T00:00:30Z'`, + exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","min","host"],"values":[["2000-01-01T00:00:10Z",28,"server02"]]}]}]}`, + }, + &Query{ + params: url.Values{"db": []string{"db0"}}, + command: `SELECT min(top), host FROM (SELECT top(usage_user, 2), host FROM cpu) WHERE time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T00:00:30Z'`, + exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","min","host"],"values":[["2000-01-01T00:00:10Z",45,"server01"]]}]}]}`, + }, + &Query{ + params: url.Values{"db": []string{"db0"}}, + command: `SELECT count(host) FROM (SELECT top(usage_user, host, 2) FROM cpu) WHERE time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T00:00:30Z'`, + exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","count"],"values":[["2000-01-01T00:00:00Z",2]]}]}]}`, + }, + &Query{ + params: url.Values{"db": []string{"db0"}}, + command: `SELECT sum(derivative) FROM (SELECT derivative(usage_user) FROM cpu GROUP BY host) WHERE time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T00:00:30Z'`, + exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","sum"],"values":[["2000-01-01T00:00:00Z",-4.6]]}]}]}`, + }, + &Query{ + params: url.Values{"db": []string{"db0"}}, + command: `SELECT min(max) FROM (SELECT 100 - max(usage_user) FROM cpu GROUP BY host) WHERE time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T00:00:30Z'`, + exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","min"],"values":[["2000-01-01T00:00:00Z",30]]}]}]}`, + }, + &Query{ + params: url.Values{"db": []string{"db0"}}, + command: `SELECT min(usage_system) FROM (SELECT max(usage_user), 100 - usage_system FROM cpu GROUP BY host) WHERE time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T00:00:30Z'`, + exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","min"],"values":[["2000-01-01T00:00:10Z",28]]}]}]}`, + }, + &Query{ + params: url.Values{"db": []string{"db0"}}, + command: `SELECT min(value) FROM (SELECT max(usage_user), usage_user - usage_system AS value FROM cpu GROUP BY host) WHERE time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T00:00:30Z'`, + exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","min"],"values":[["2000-01-01T00:00:10Z",-44]]}]}]}`, + }, + }...) + + for i, query := range test.queries { + if i == 0 { + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + } + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +func TestServer_Query_SubqueryWithGroupBy(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig()) + defer s.Close() + + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil { + t.Fatal(err) + } + + writes := []string{ + fmt.Sprintf(`cpu,host=server01,region=uswest value=1i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), + fmt.Sprintf(`cpu,host=server01,region=uswest value=2i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:01Z").UnixNano()), + fmt.Sprintf(`cpu,host=server01,region=uswest value=3i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:02Z").UnixNano()), + fmt.Sprintf(`cpu,host=server01,region=uswest value=4i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:03Z").UnixNano()), + fmt.Sprintf(`cpu,host=server02,region=uswest value=5i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), + fmt.Sprintf(`cpu,host=server02,region=uswest value=6i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:01Z").UnixNano()), + fmt.Sprintf(`cpu,host=server02,region=uswest value=7i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:02Z").UnixNano()), + fmt.Sprintf(`cpu,host=server02,region=uswest value=8i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:03Z").UnixNano()), + fmt.Sprintf(`cpu,host=server01,region=useast value=9i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), + fmt.Sprintf(`cpu,host=server01,region=useast value=10i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:01Z").UnixNano()), + fmt.Sprintf(`cpu,host=server01,region=useast value=11i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:02Z").UnixNano()), + fmt.Sprintf(`cpu,host=server01,region=useast value=12i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:03Z").UnixNano()), + fmt.Sprintf(`cpu,host=server02,region=useast value=13i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), + fmt.Sprintf(`cpu,host=server02,region=useast value=14i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:01Z").UnixNano()), + fmt.Sprintf(`cpu,host=server02,region=useast value=15i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:02Z").UnixNano()), + fmt.Sprintf(`cpu,host=server02,region=useast value=16i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:03Z").UnixNano()), + } + test := NewTest("db0", "rp0") + test.writes = Writes{ + &Write{data: strings.Join(writes, "\n")}, + } + + test.addQueries([]*Query{ + &Query{ + name: "group by time(2s) - time(2s), host", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT mean(mean) FROM (SELECT mean(value) FROM cpu GROUP BY time(2s), host) WHERE time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T00:00:04Z' GROUP BY time(2s)`, + exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","mean"],"values":[["2000-01-01T00:00:00Z",7.5],["2000-01-01T00:00:02Z",9.5]]}]}]}`, + }, + &Query{ + name: "group by time(4s), host - time(2s), host", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT mean(mean) FROM (SELECT mean(value) FROM cpu GROUP BY time(2s), host) WHERE time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T00:00:04Z' GROUP BY time(4s), host`, + exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","mean"],"values":[["2000-01-01T00:00:00Z",6.5]]},{"name":"cpu","tags":{"host":"server02"},"columns":["time","mean"],"values":[["2000-01-01T00:00:00Z",10.5]]}]}]}`, + }, + &Query{ + name: "group by time(2s), host - time(2s), host, region", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT mean(mean) FROM (SELECT mean(value) FROM cpu GROUP BY time(2s), host, region) WHERE time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T00:00:04Z' GROUP BY time(2s), host`, + exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","mean"],"values":[["2000-01-01T00:00:00Z",5.5],["2000-01-01T00:00:02Z",7.5]]},{"name":"cpu","tags":{"host":"server02"},"columns":["time","mean"],"values":[["2000-01-01T00:00:00Z",9.5],["2000-01-01T00:00:02Z",11.5]]}]}]}`, + }, + }...) + + for i, query := range test.queries { + if i == 0 { + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + } + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + func TestServer_Write_Precision(t *testing.T) { t.Parallel() s := OpenServer(NewConfig()) diff --git a/coordinator/meta_client.go b/coordinator/meta_client.go index 040f4a107e..b459bb6b48 100644 --- a/coordinator/meta_client.go +++ b/coordinator/meta_client.go @@ -26,7 +26,7 @@ type MetaClient interface { RetentionPolicy(database, name string) (rpi *meta.RetentionPolicyInfo, err error) SetAdminPrivilege(username string, admin bool) error SetPrivilege(username, database string, p influxql.Privilege) error - ShardsByTimeRange(sources influxql.Sources, tmin, tmax time.Time) (a []meta.ShardInfo, err error) + ShardGroupsByTimeRange(database, policy string, min, max time.Time) (a []meta.ShardGroupInfo, err error) UpdateRetentionPolicy(database, name string, rpu *meta.RetentionPolicyUpdate, makeDefault bool) error UpdateUser(name, password string) error UserPrivilege(username, database string) (*influxql.Privilege, error) diff --git a/coordinator/meta_client_test.go b/coordinator/meta_client_test.go index 03a009f9bb..43c8d11338 100644 --- a/coordinator/meta_client_test.go +++ b/coordinator/meta_client_test.go @@ -31,7 +31,7 @@ type MetaClient struct { RetentionPolicyFn func(database, name string) (rpi *meta.RetentionPolicyInfo, err error) SetAdminPrivilegeFn func(username string, admin bool) error SetPrivilegeFn func(username, database string, p influxql.Privilege) error - ShardsByTimeRangeFn func(sources influxql.Sources, tmin, tmax time.Time) (a []meta.ShardInfo, err error) + ShardGroupsByTimeRangeFn func(database, policy string, min, max time.Time) (a []meta.ShardGroupInfo, err error) UpdateRetentionPolicyFn func(database, name string, rpu *meta.RetentionPolicyUpdate, makeDefault bool) error UpdateUserFn func(name, password string) error UserPrivilegeFn func(username, database string) (*influxql.Privilege, error) @@ -127,8 +127,8 @@ func (c *MetaClient) SetPrivilege(username, database string, p influxql.Privileg return c.SetPrivilegeFn(username, database, p) } -func (c *MetaClient) ShardsByTimeRange(sources influxql.Sources, tmin, tmax time.Time) (a []meta.ShardInfo, err error) { - return c.ShardsByTimeRangeFn(sources, tmin, tmax) +func (c *MetaClient) ShardGroupsByTimeRange(database, policy string, min, max time.Time) (a []meta.ShardGroupInfo, err error) { + return c.ShardGroupsByTimeRangeFn(database, policy, min, max) } func (c *MetaClient) UpdateRetentionPolicy(database, name string, rpu *meta.RetentionPolicyUpdate, makeDefault bool) error { diff --git a/coordinator/shard_mapper.go b/coordinator/shard_mapper.go new file mode 100644 index 0000000000..4041142cea --- /dev/null +++ b/coordinator/shard_mapper.go @@ -0,0 +1,181 @@ +package coordinator + +import ( + "io" + "time" + + "github.com/influxdata/influxdb/influxql" + "github.com/influxdata/influxdb/services/meta" + "github.com/influxdata/influxdb/tsdb" +) + +// IteratorCreator is an interface that combines mapping fields and creating iterators. +type IteratorCreator interface { + influxql.IteratorCreator + influxql.FieldMapper + io.Closer +} + +// ShardMapper retrieves and maps shards into an IteratorCreator that can later be +// used for executing queries. +type ShardMapper interface { + MapShards(sources influxql.Sources, opt *influxql.SelectOptions) (IteratorCreator, error) +} + +// LocalShardMapper implements a ShardMapper for local shards. +type LocalShardMapper struct { + MetaClient interface { + ShardGroupsByTimeRange(database, policy string, min, max time.Time) (a []meta.ShardGroupInfo, err error) + } + + TSDBStore interface { + ShardGroup(ids []uint64) tsdb.ShardGroup + } +} + +// MapShards maps the sources to the appropriate shards into an IteratorCreator. +func (e *LocalShardMapper) MapShards(sources influxql.Sources, opt *influxql.SelectOptions) (IteratorCreator, error) { + a := &LocalShardMapping{ + ShardMap: make(map[Source]tsdb.ShardGroup), + } + + if err := e.mapShards(a, sources, opt); err != nil { + return nil, err + } + return a, nil +} + +func (e *LocalShardMapper) mapShards(a *LocalShardMapping, sources influxql.Sources, opt *influxql.SelectOptions) error { + for _, s := range sources { + switch s := s.(type) { + case *influxql.Measurement: + source := Source{ + Database: s.Database, + RetentionPolicy: s.RetentionPolicy, + } + + // Retrieve the list of shards for this database. This list of + // shards is always the same regardless of which measurement we are + // using. + if _, ok := a.ShardMap[source]; !ok { + groups, err := e.MetaClient.ShardGroupsByTimeRange(s.Database, s.RetentionPolicy, opt.MinTime, opt.MaxTime) + if err != nil { + return err + } + + if len(groups) == 0 { + a.ShardMap[source] = nil + continue + } + + shardIDs := make([]uint64, 0, len(groups[0].Shards)*len(groups)) + for _, g := range groups { + for _, si := range g.Shards { + shardIDs = append(shardIDs, si.ID) + } + } + a.ShardMap[source] = e.TSDBStore.ShardGroup(shardIDs) + } + case *influxql.SubQuery: + if err := e.mapShards(a, s.Statement.Sources, opt); err != nil { + return err + } + } + } + return nil +} + +// ShardMapper maps data sources to a list of shard information. +type LocalShardMapping struct { + ShardMap map[Source]tsdb.ShardGroup +} + +func (a *LocalShardMapping) FieldDimensions(m *influxql.Measurement) (fields map[string]influxql.DataType, dimensions map[string]struct{}, err error) { + source := Source{ + Database: m.Database, + RetentionPolicy: m.RetentionPolicy, + } + + sg := a.ShardMap[source] + if sg == nil { + return + } + + fields = make(map[string]influxql.DataType) + dimensions = make(map[string]struct{}) + + var measurements []string + if m.Regex != nil { + measurements = sg.MeasurementsByRegex(m.Regex.Val) + } else { + measurements = []string{m.Name} + } + + f, d, err := sg.FieldDimensions(measurements) + if err != nil { + return nil, nil, err + } + for k, typ := range f { + fields[k] = typ + } + for k := range d { + dimensions[k] = struct{}{} + } + return +} + +func (a *LocalShardMapping) MapType(m *influxql.Measurement, field string) influxql.DataType { + source := Source{ + Database: m.Database, + RetentionPolicy: m.RetentionPolicy, + } + + sg := a.ShardMap[source] + if sg == nil { + return influxql.Unknown + } + return sg.MapType(m.Name, field) +} + +func (a *LocalShardMapping) CreateIterator(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) { + source := Source{ + Database: m.Database, + RetentionPolicy: m.RetentionPolicy, + } + + sg := a.ShardMap[source] + if sg == nil { + return nil, nil + } + + if m.Regex != nil { + measurements := sg.MeasurementsByRegex(m.Regex.Val) + inputs := make([]influxql.Iterator, 0, len(measurements)) + if err := func() error { + for _, measurement := range measurements { + input, err := sg.CreateIterator(measurement, opt) + if err != nil { + return err + } + inputs = append(inputs, input) + } + return nil + }(); err != nil { + influxql.Iterators(inputs).Close() + return nil, err + } + return influxql.Iterators(inputs).Merge(opt) + } + return sg.CreateIterator(m.Name, opt) +} + +// Close does nothing for a LocalShardMapping. +func (a *LocalShardMapping) Close() error { + return nil +} + +// Source contains the database and retention policy source for data. +type Source struct { + Database string + RetentionPolicy string +} diff --git a/coordinator/shard_mapper_test.go b/coordinator/shard_mapper_test.go new file mode 100644 index 0000000000..81dd356e56 --- /dev/null +++ b/coordinator/shard_mapper_test.go @@ -0,0 +1,102 @@ +package coordinator_test + +import ( + "reflect" + "testing" + "time" + + "github.com/influxdata/influxdb/coordinator" + "github.com/influxdata/influxdb/influxql" + "github.com/influxdata/influxdb/services/meta" + "github.com/influxdata/influxdb/tsdb" +) + +func TestLocalShardMapper(t *testing.T) { + var metaClient MetaClient + metaClient.ShardGroupsByTimeRangeFn = func(database, policy string, min, max time.Time) ([]meta.ShardGroupInfo, error) { + if database != "db0" { + t.Errorf("unexpected database: %s", database) + } + if policy != "rp0" { + t.Errorf("unexpected retention policy: %s", policy) + } + return []meta.ShardGroupInfo{ + {ID: 1, Shards: []meta.ShardInfo{ + {ID: 1, Owners: []meta.ShardOwner{{NodeID: 0}}}, + {ID: 2, Owners: []meta.ShardOwner{{NodeID: 0}}}, + }}, + {ID: 2, Shards: []meta.ShardInfo{ + {ID: 3, Owners: []meta.ShardOwner{{NodeID: 0}}}, + {ID: 4, Owners: []meta.ShardOwner{{NodeID: 0}}}, + }}, + }, nil + } + + var tsdbStore TSDBStore + tsdbStore.ShardGroupFn = func(ids []uint64) tsdb.ShardGroup { + if !reflect.DeepEqual(ids, []uint64{1, 2, 3, 4}) { + t.Errorf("unexpected shard ids: %#v", ids) + } + + var sh MockShard + sh.CreateIteratorFn = func(measurement string, opt influxql.IteratorOptions) (influxql.Iterator, error) { + if measurement != "cpu" { + t.Errorf("unexpected measurement: %s", measurement) + } + return &FloatIterator{}, nil + } + return &sh + } + + // Initialize the shard mapper. + shardMapper := &coordinator.LocalShardMapper{ + MetaClient: &metaClient, + TSDBStore: &tsdbStore, + } + + // Normal measurement. + measurement := &influxql.Measurement{ + Database: "db0", + RetentionPolicy: "rp0", + Name: "cpu", + } + ic, err := shardMapper.MapShards([]influxql.Source{measurement}, &influxql.SelectOptions{}) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + // This should be a LocalShardMapping. + m, ok := ic.(*coordinator.LocalShardMapping) + if !ok { + t.Fatalf("unexpected mapping type: %T", ic) + } else if len(m.ShardMap) != 1 { + t.Fatalf("unexpected number of shard mappings: %d", len(m.ShardMap)) + } + + if _, err := ic.CreateIterator(measurement, influxql.IteratorOptions{}); err != nil { + t.Fatalf("unexpected error: %s", err) + } + + // Subquery. + subquery := &influxql.SubQuery{ + Statement: &influxql.SelectStatement{ + Sources: []influxql.Source{measurement}, + }, + } + ic, err = shardMapper.MapShards([]influxql.Source{subquery}, &influxql.SelectOptions{}) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + // This should be a LocalShardMapping. + m, ok = ic.(*coordinator.LocalShardMapping) + if !ok { + t.Fatalf("unexpected mapping type: %T", ic) + } else if len(m.ShardMap) != 1 { + t.Fatalf("unexpected number of shard mappings: %d", len(m.ShardMap)) + } + + if _, err := ic.CreateIterator(measurement, influxql.IteratorOptions{}); err != nil { + t.Fatalf("unexpected error: %s", err) + } +} diff --git a/coordinator/statement_executor.go b/coordinator/statement_executor.go index 5b9027d583..20676e80b8 100644 --- a/coordinator/statement_executor.go +++ b/coordinator/statement_executor.go @@ -35,6 +35,9 @@ type StatementExecutor struct { // TSDB storage for local node. TSDBStore TSDBStore + // ShardMapper for mapping shards when executing a SELECT statement. + ShardMapper ShardMapper + // Holds monitoring data for SHOW STATS and SHOW DIAGNOSTICS. Monitor *monitor.Monitor @@ -495,11 +498,7 @@ func (e *StatementExecutor) createIterators(stmt *influxql.SelectStatement, ctx // Replace instances of "now()" with the current time, and check the resultant times. nowValuer := influxql.NowValuer{Now: now} - stmt.Condition = influxql.Reduce(stmt.Condition, &nowValuer) - // Replace instances of "now()" with the current time in the dimensions. - for _, d := range stmt.Dimensions { - d.Expr = influxql.Reduce(d.Expr, &nowValuer) - } + stmt = stmt.Reduce(&nowValuer) var err error opt.MinTime, opt.MaxTime, err = influxql.TimeRange(stmt.Condition) @@ -508,21 +507,7 @@ func (e *StatementExecutor) createIterators(stmt *influxql.SelectStatement, ctx } if opt.MaxTime.IsZero() { - // In the case that we're executing a meta query where the user cannot - // specify a time condition, then we expand the default max time - // to the maximum possible value, to ensure that data where all points - // are in the future are returned. - if influxql.Sources(stmt.Sources).HasSystemSource() { - opt.MaxTime = time.Unix(0, influxql.MaxTime).UTC() - } else { - if interval, err := stmt.GroupByInterval(); err != nil { - return nil, stmt, err - } else if interval > 0 { - opt.MaxTime = now - } else { - opt.MaxTime = time.Unix(0, influxql.MaxTime).UTC() - } - } + opt.MaxTime = time.Unix(0, influxql.MaxTime) } if opt.MinTime.IsZero() { opt.MinTime = time.Unix(0, influxql.MinTime).UTC() @@ -534,23 +519,20 @@ func (e *StatementExecutor) createIterators(stmt *influxql.SelectStatement, ctx // Remove "time" from fields list. stmt.RewriteTimeFields() + // Rewrite time condition. + if err := stmt.RewriteTimeCondition(now); err != nil { + return nil, stmt, err + } + // Rewrite any regex conditions that could make use of the index. stmt.RewriteRegexConditions() // Create an iterator creator based on the shards in the cluster. - ic, err := e.iteratorCreator(stmt, &opt) + ic, err := e.ShardMapper.MapShards(stmt.Sources, &opt) if err != nil { return nil, stmt, err } - - // Expand regex sources to their actual source names. - if stmt.Sources.HasRegex() { - sources, err := ic.ExpandSources(stmt.Sources) - if err != nil { - return nil, stmt, err - } - stmt.Sources = sources - } + defer ic.Close() // Rewrite wildcards, if any exist. tmp, err := stmt.RewriteFields(ic) @@ -591,16 +573,6 @@ func (e *StatementExecutor) createIterators(stmt *influxql.SelectStatement, ctx return itrs, stmt, nil } -// iteratorCreator returns a new instance of IteratorCreator based on stmt. -func (e *StatementExecutor) iteratorCreator(stmt *influxql.SelectStatement, opt *influxql.SelectOptions) (influxql.IteratorCreator, error) { - // Retrieve a list of shard IDs. - shards, err := e.MetaClient.ShardsByTimeRange(stmt.Sources, opt.MinTime, opt.MaxTime) - if err != nil { - return nil, err - } - return e.TSDBStore.IteratorCreator(shards, opt) -} - func (e *StatementExecutor) executeShowContinuousQueriesStatement(stmt *influxql.ShowContinuousQueriesStatement) (models.Rows, error) { dis := e.MetaClient.Databases() @@ -1148,7 +1120,6 @@ type TSDBStore interface { DeleteRetentionPolicy(database, name string) error DeleteSeries(database string, sources []influxql.Source, condition influxql.Expr) error DeleteShard(id uint64) error - IteratorCreator(shards []meta.ShardInfo, opt *influxql.SelectOptions) (influxql.IteratorCreator, error) Measurements(database string, cond influxql.Expr) ([]string, error) TagValues(database string, cond influxql.Expr) ([]tsdb.TagValues, error) @@ -1162,15 +1133,6 @@ type LocalTSDBStore struct { *tsdb.Store } -// IteratorCreator returns an influxql.IteratorCreator for the given shards, with the given select options. -func (s LocalTSDBStore) IteratorCreator(shards []meta.ShardInfo, opt *influxql.SelectOptions) (influxql.IteratorCreator, error) { - shardIDs := make([]uint64, len(shards)) - for i, sh := range shards { - shardIDs[i] = sh.ID - } - return s.Store.IteratorCreator(shardIDs, opt) -} - // ShardIteratorCreator is an interface for creating an IteratorCreator to access a specific shard. type ShardIteratorCreator interface { ShardIteratorCreator(id uint64) influxql.IteratorCreator diff --git a/coordinator/statement_executor_test.go b/coordinator/statement_executor_test.go index a58fa4438d..401777a111 100644 --- a/coordinator/statement_executor_test.go +++ b/coordinator/statement_executor_test.go @@ -6,6 +6,7 @@ import ( "io" "os" "reflect" + "regexp" "testing" "time" @@ -32,28 +33,35 @@ func TestQueryExecutor_ExecuteQuery_SelectStatement(t *testing.T) { e := DefaultQueryExecutor() // The meta client should return a single shard owned by the local node. - e.MetaClient.ShardsByTimeRangeFn = func(sources influxql.Sources, tmin, tmax time.Time) (a []meta.ShardInfo, err error) { - return []meta.ShardInfo{{ID: 100, Owners: []meta.ShardOwner{{NodeID: 0}}}}, nil + e.MetaClient.ShardGroupsByTimeRangeFn = func(database, policy string, min, max time.Time) (a []meta.ShardGroupInfo, err error) { + return []meta.ShardGroupInfo{ + {ID: 1, Shards: []meta.ShardInfo{ + {ID: 100, Owners: []meta.ShardOwner{{NodeID: 0}}}, + }}, + }, nil } // The TSDB store should return an IteratorCreator for shard. // This IteratorCreator returns a single iterator with "value" in the aux fields. - e.TSDBStore.ShardIteratorCreatorFn = func(id uint64) influxql.IteratorCreator { - if id != 100 { - t.Fatalf("unexpected shard id: %d", id) + e.TSDBStore.ShardGroupFn = func(ids []uint64) tsdb.ShardGroup { + if !reflect.DeepEqual(ids, []uint64{100}) { + t.Fatalf("unexpected shard ids: %v", ids) } - var ic IteratorCreator - ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + var sh MockShard + sh.CreateIteratorFn = func(m string, opt influxql.IteratorOptions) (influxql.Iterator, error) { return &FloatIterator{Points: []influxql.FloatPoint{ {Name: "cpu", Time: int64(0 * time.Second), Aux: []interface{}{float64(100)}}, {Name: "cpu", Time: int64(1 * time.Second), Aux: []interface{}{float64(200)}}, }}, nil } - ic.FieldDimensionsFn = func(sources influxql.Sources) (fields map[string]influxql.DataType, dimensions map[string]struct{}, err error) { + sh.FieldDimensionsFn = func(measurements []string) (fields map[string]influxql.DataType, dimensions map[string]struct{}, err error) { + if !reflect.DeepEqual(measurements, []string{"cpu"}) { + t.Fatalf("unexpected source: %#v", measurements) + } return map[string]influxql.DataType{"value": influxql.Float}, nil, nil } - return &ic + return &sh } // Verify all results from the query. @@ -80,22 +88,33 @@ func TestQueryExecutor_ExecuteQuery_MaxSelectBucketsN(t *testing.T) { e.StatementExecutor.MaxSelectBucketsN = 3 // The meta client should return a single shards on the local node. - e.MetaClient.ShardsByTimeRangeFn = func(sources influxql.Sources, tmin, tmax time.Time) (a []meta.ShardInfo, err error) { - return []meta.ShardInfo{ - {ID: 100, Owners: []meta.ShardOwner{{NodeID: 0}}}, + e.MetaClient.ShardGroupsByTimeRangeFn = func(database, policy string, min, max time.Time) (a []meta.ShardGroupInfo, err error) { + return []meta.ShardGroupInfo{ + {ID: 1, Shards: []meta.ShardInfo{ + {ID: 100, Owners: []meta.ShardOwner{{NodeID: 0}}}, + }}, }, nil } - var ic IteratorCreator - ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { - return &FloatIterator{ - Points: []influxql.FloatPoint{{Name: "cpu", Time: int64(0 * time.Second), Aux: []interface{}{float64(100)}}}, - }, nil + e.TSDBStore.ShardGroupFn = func(ids []uint64) tsdb.ShardGroup { + if !reflect.DeepEqual(ids, []uint64{100}) { + t.Fatalf("unexpected shard ids: %v", ids) + } + + var sh MockShard + sh.CreateIteratorFn = func(m string, opt influxql.IteratorOptions) (influxql.Iterator, error) { + return &FloatIterator{ + Points: []influxql.FloatPoint{{Name: "cpu", Time: int64(0 * time.Second), Aux: []interface{}{float64(100)}}}, + }, nil + } + sh.FieldDimensionsFn = func(measurements []string) (fields map[string]influxql.DataType, dimensions map[string]struct{}, err error) { + if !reflect.DeepEqual(measurements, []string{"cpu"}) { + t.Fatalf("unexpected source: %#v", measurements) + } + return map[string]influxql.DataType{"value": influxql.Float}, nil, nil + } + return &sh } - ic.FieldDimensionsFn = func(sources influxql.Sources) (fields map[string]influxql.DataType, dimensions map[string]struct{}, err error) { - return map[string]influxql.DataType{"value": influxql.Float}, nil, nil - } - e.TSDBStore.ShardIteratorCreatorFn = func(id uint64) influxql.IteratorCreator { return &ic } // Verify all results from the query. if a := ReadAllResults(e.ExecuteQuery(`SELECT count(value) FROM cpu WHERE time >= '2000-01-01T00:00:05Z' AND time < '2000-01-01T00:00:35Z' GROUP BY time(10s)`, "db0", 0)); !reflect.DeepEqual(a, []*influxql.Result{ @@ -193,6 +212,10 @@ func NewQueryExecutor() *QueryExecutor { e.StatementExecutor = &coordinator.StatementExecutor{ MetaClient: &e.MetaClient, TSDBStore: &e.TSDBStore, + ShardMapper: &coordinator.LocalShardMapper{ + MetaClient: &e.MetaClient, + TSDBStore: &e.TSDBStore, + }, } e.QueryExecutor.StatementExecutor = e.StatementExecutor @@ -237,7 +260,7 @@ type TSDBStore struct { DeleteShardFn func(id uint64) error DeleteSeriesFn func(database string, sources []influxql.Source, condition influxql.Expr) error DatabaseIndexFn func(name string) *tsdb.DatabaseIndex - ShardIteratorCreatorFn func(id uint64) influxql.IteratorCreator + ShardGroupFn func(ids []uint64) tsdb.ShardGroup } func (s *TSDBStore) CreateShard(database, policy string, shardID uint64, enabled bool) error { @@ -279,29 +302,8 @@ func (s *TSDBStore) DeleteSeries(database string, sources []influxql.Source, con return s.DeleteSeriesFn(database, sources, condition) } -func (s *TSDBStore) IteratorCreator(shards []meta.ShardInfo, opt *influxql.SelectOptions) (influxql.IteratorCreator, error) { - // Generate iterators for each node. - ics := make([]influxql.IteratorCreator, 0) - if err := func() error { - for _, shard := range shards { - ic := s.ShardIteratorCreator(shard.ID) - if ic == nil { - continue - } - ics = append(ics, ic) - } - - return nil - }(); err != nil { - influxql.IteratorCreators(ics).Close() - return nil, err - } - - return influxql.IteratorCreators(ics), nil -} - -func (s *TSDBStore) ShardIteratorCreator(id uint64) influxql.IteratorCreator { - return s.ShardIteratorCreatorFn(id) +func (s *TSDBStore) ShardGroup(ids []uint64) tsdb.ShardGroup { + return s.ShardGroupFn(ids) } func (s *TSDBStore) DatabaseIndex(name string) *tsdb.DatabaseIndex { @@ -316,6 +318,49 @@ func (s *TSDBStore) TagValues(database string, cond influxql.Expr) ([]tsdb.TagVa return nil, nil } +type MockShard struct { + Measurements []string + FieldDimensionsFn func(measurements []string) (fields map[string]influxql.DataType, dimensions map[string]struct{}, err error) + CreateIteratorFn func(m string, opt influxql.IteratorOptions) (influxql.Iterator, error) + ExpandSourcesFn func(sources influxql.Sources) (influxql.Sources, error) +} + +func (sh *MockShard) MeasurementsByRegex(re *regexp.Regexp) []string { + names := make([]string, 0, len(sh.Measurements)) + for _, name := range sh.Measurements { + if re.MatchString(name) { + names = append(names, name) + } + } + return names +} + +func (sh *MockShard) FieldDimensions(measurements []string) (fields map[string]influxql.DataType, dimensions map[string]struct{}, err error) { + return sh.FieldDimensionsFn(measurements) +} + +func (sh *MockShard) MapType(measurement, field string) influxql.DataType { + f, d, err := sh.FieldDimensions([]string{measurement}) + if err != nil { + return influxql.Unknown + } + + if typ, ok := f[field]; ok { + return typ + } else if _, ok := d[field]; ok { + return influxql.Tag + } + return influxql.Unknown +} + +func (sh *MockShard) CreateIterator(measurement string, opt influxql.IteratorOptions) (influxql.Iterator, error) { + return sh.CreateIteratorFn(measurement, opt) +} + +func (sh *MockShard) ExpandSources(sources influxql.Sources) (influxql.Sources, error) { + return sh.ExpandSourcesFn(sources) +} + // MustParseQuery parses s into a query. Panic on error. func MustParseQuery(s string) *influxql.Query { q, err := influxql.ParseQuery(s) @@ -334,25 +379,6 @@ func ReadAllResults(c <-chan *influxql.Result) []*influxql.Result { return a } -// IteratorCreator is a mockable implementation of IteratorCreator. -type IteratorCreator struct { - CreateIteratorFn func(opt influxql.IteratorOptions) (influxql.Iterator, error) - FieldDimensionsFn func(sources influxql.Sources) (fields map[string]influxql.DataType, dimensions map[string]struct{}, err error) - ExpandSourcesFn func(sources influxql.Sources) (influxql.Sources, error) -} - -func (ic *IteratorCreator) CreateIterator(opt influxql.IteratorOptions) (influxql.Iterator, error) { - return ic.CreateIteratorFn(opt) -} - -func (ic *IteratorCreator) FieldDimensions(sources influxql.Sources) (fields map[string]influxql.DataType, dimensions map[string]struct{}, err error) { - return ic.FieldDimensionsFn(sources) -} - -func (ic *IteratorCreator) ExpandSources(sources influxql.Sources) (influxql.Sources, error) { - return ic.ExpandSourcesFn(sources) -} - // FloatIterator is a represents an iterator that reads from a slice. type FloatIterator struct { Points []influxql.FloatPoint diff --git a/influxql/README.md b/influxql/README.md index dfed3c9347..e577b74af2 100644 --- a/influxql/README.md +++ b/influxql/README.md @@ -931,7 +931,7 @@ points: ``` type FloatIterator interface { - Next() *FloatPoint + Next() (*FloatPoint, error) } ``` @@ -939,7 +939,7 @@ These iterators are created through the `IteratorCreator` interface: ``` type IteratorCreator interface { - CreateIterator(opt *IteratorOptions) (Iterator, error) + CreateIterator(m *Measurement, opt IteratorOptions) (Iterator, error) } ``` @@ -1046,3 +1046,89 @@ Some iterators are more complex or need to be implemented at a higher level. For example, the `DERIVATIVE()` needs to retrieve all points for a window first before performing the calculation. This iterator is created by the engine itself and is never requested to be created by the lower levels. + +### Subqueries + +Subqueries are built on top of iterators. Most of the work involved in +supporting subqueries is in organizing how data is streamed to the +iterators that will process the data. + +The final ordering of the stream has to output all points from one +series before moving to the next series and it also needs to ensure +those points are printed in order. So there are two separate concepts we +need to consider when creating an iterator: ordering and grouping. + +When an inner query has a different grouping than the outermost query, +we still need to group together related points into buckets, but we do +not have to ensure that all points from one buckets are output before +the points in another bucket. In fact, if we do that, we will be unable +to perform the grouping for the outer query correctly. Instead, we group +all points by the outermost query for an interval and then, within that +interval, we group the points for the inner query. For example, here are +series keys and times in seconds (fields are omitted since they don't +matter in this example): + + cpu,host=server01 0 + cpu,host=server01 10 + cpu,host=server01 20 + cpu,host=server01 30 + cpu,host=server02 0 + cpu,host=server02 10 + cpu,host=server02 20 + cpu,host=server02 30 + +With the following query: + + SELECT mean(max) FROM (SELECT max(value) FROM cpu GROUP BY host, time(20s)) GROUP BY time(20s) + +The final grouping keeps all of the points together which means we need +to group `server01` with `server02`. That means we output the points +from the underlying engine like this: + + cpu,host=server01 0 + cpu,host=server01 10 + cpu,host=server02 0 + cpu,host=server02 10 + cpu,host=server01 20 + cpu,host=server01 30 + cpu,host=server02 20 + cpu,host=server02 30 + +Within each one of those time buckets, we calculate the `max()` value +for each unique host so the output stream gets transformed to look like +this: + + cpu,host=server01 0 + cpu,host=server02 0 + cpu,host=server01 20 + cpu,host=server02 20 + +Then we can process the `mean()` on this stream of data instead and it +will be output in the correct order. This is true of any order of +grouping since grouping can only go from more specific to less specific. + +When it comes to ordering, unordered data is faster to process, but we +always need to produce ordered data. When processing a raw query with no +aggregates, we need to ensure data coming from the engine is ordered so +the output is ordered. When we have an aggregate, we know one point is +being emitted for each interval and will always produce ordered output. +So for aggregates, we can take unordered data as the input and get +ordered output. Any ordered data as input will always result in ordered +data so we just need to look at how an iterator processes unordered +data. + +| | raw query | selector (without group by time) | selector (with group by time) | aggregator | +|-----------------|------------------|----------------------------------|-------------------------------|----------------| +| ordered input | ordered output | ordered output | ordered output | ordered output | +| unordered input | unordered output | unordered output | ordered output | ordered output | + +Since we always need ordered output, we just need to work backwards and +determine which pattern of input gives us ordered output. If both +ordered and unordered input produce ordered output, we prefer unordered +input since it is faster. + +There are also certain aggregates that require ordered input like +`median()` and `percentile()`. These functions will explicitly request +ordered input. It is also important to realize that selectors that are +grouped by time are the equivalent of an aggregator. It is only +selectors without a group by time that are different. diff --git a/influxql/ast.go b/influxql/ast.go index beb16e1752..cf75bd6630 100644 --- a/influxql/ast.go +++ b/influxql/ast.go @@ -169,6 +169,7 @@ func (*SortField) node() {} func (SortFields) node() {} func (Sources) node() {} func (*StringLiteral) node() {} +func (*SubQuery) node() {} func (*Target) node() {} func (*TimeLiteral) node() {} func (*VarRef) node() {} @@ -319,6 +320,7 @@ type Source interface { } func (*Measurement) source() {} +func (*SubQuery) source() {} // Sources represents a list of sources. type Sources []Source @@ -344,6 +346,9 @@ func (a Sources) Filter(database, retentionPolicy string) []Source { if s.Database == database && s.RetentionPolicy == retentionPolicy { sources = append(sources, s) } + case *SubQuery: + filteredSources := s.Statement.Sources.Filter(database, retentionPolicy) + sources = append(sources, filteredSources...) } } return sources @@ -390,6 +395,20 @@ func (a Sources) String() string { return buf.String() } +// Measurements returns all measurements including ones embedded in subqueries. +func (a Sources) Measurements() []*Measurement { + mms := make([]*Measurement, 0, len(a)) + for _, src := range a { + switch src := src.(type) { + case *Measurement: + mms = append(mms, src) + case *SubQuery: + mms = append(mms, src.Statement.Sources.Measurements()...) + } + } + return mms +} + // MarshalBinary encodes a list of sources to a binary format. func (a Sources) MarshalBinary() ([]byte, error) { var pb internal.Measurements @@ -989,6 +1008,21 @@ func (s *SelectStatement) IsSimpleDerivative() bool { return false } +// HasSelector returns true if there is exactly one selector. +func (s *SelectStatement) HasSelector() bool { + var selector *Call + for _, f := range s.Fields { + if call, ok := f.Expr.(*Call); ok { + if selector != nil || !IsSelector(call) { + // This is an aggregate call or there is already a selector. + return false + } + selector = call + } + } + return selector != nil +} + // TimeAscending returns true if the time field is sorted in chronological order. func (s *SelectStatement) TimeAscending() bool { return len(s.SortFields) == 0 || s.SortFields[0].Ascending @@ -1053,6 +1087,8 @@ func cloneSource(s Source) Source { m.Regex = &RegexLiteral{Val: regexp.MustCompile(s.Regex.Val.String())} } return m + case *SubQuery: + return &SubQuery{Statement: s.Statement.Clone()} default: panic("unreachable") } @@ -1062,11 +1098,20 @@ func cloneSource(s Source) Source { // fields are replaced with the supplied fields, and any wildcard GROUP BY fields are replaced // with the supplied dimensions. Any fields with no type specifier are rewritten with the // appropriate type. -func (s *SelectStatement) RewriteFields(ic IteratorCreator) (*SelectStatement, error) { - // Retrieve a list of unique field and dimensions. - fieldSet, dimensionSet, err := ic.FieldDimensions(s.Sources) - if err != nil { - return s, err +func (s *SelectStatement) RewriteFields(m FieldMapper) (*SelectStatement, error) { + // Clone the statement so we aren't rewriting the original. + other := s.Clone() + + // Iterate through the sources and rewrite any subqueries first. + for _, src := range other.Sources { + switch src := src.(type) { + case *SubQuery: + stmt, err := src.Statement.RewriteFields(m) + if err != nil { + return nil, err + } + src.Statement = stmt + } } // Rewrite all variable references in the fields with their types if one @@ -1077,28 +1122,31 @@ func (s *SelectStatement) RewriteFields(ic IteratorCreator) (*SelectStatement, e return } - if typ, ok := fieldSet[ref.Val]; ok { - ref.Type = typ - } else if ref.Type != AnyField { - if _, ok := dimensionSet[ref.Val]; ok { - ref.Type = Tag - } + typ := EvalType(ref, other.Sources, m) + if typ == Tag && ref.Type == AnyField { + return } + ref.Type = typ } - WalkFunc(s.Fields, rewrite) - WalkFunc(s.Condition, rewrite) + WalkFunc(other.Fields, rewrite) + WalkFunc(other.Condition, rewrite) // Ignore if there are no wildcards. - hasFieldWildcard := s.HasFieldWildcard() - hasDimensionWildcard := s.HasDimensionWildcard() + hasFieldWildcard := other.HasFieldWildcard() + hasDimensionWildcard := other.HasDimensionWildcard() if !hasFieldWildcard && !hasDimensionWildcard { - return s, nil + return other, nil + } + + fieldSet, dimensionSet, err := FieldDimensions(other.Sources, m) + if err != nil { + return nil, err } // If there are no dimension wildcards then merge dimensions to fields. if !hasDimensionWildcard { // Remove the dimensions present in the group by so they don't get added as fields. - for _, d := range s.Dimensions { + for _, d := range other.Dimensions { switch expr := d.Expr.(type) { case *VarRef: if _, ok := dimensionSet[expr.Val]; ok { @@ -1125,13 +1173,11 @@ func (s *SelectStatement) RewriteFields(ic IteratorCreator) (*SelectStatement, e } dimensions := stringSetSlice(dimensionSet) - other := s.Clone() - // Rewrite all wildcard query fields if hasFieldWildcard { // Allocate a slice assuming there is exactly one wildcard for efficiency. - rwFields := make(Fields, 0, len(s.Fields)+len(fields)-1) - for _, f := range s.Fields { + rwFields := make(Fields, 0, len(other.Fields)+len(fields)-1) + for _, f := range other.Fields { switch expr := f.Expr.(type) { case *Wildcard: for _, ref := range fields { @@ -1174,7 +1220,7 @@ func (s *SelectStatement) RewriteFields(ic IteratorCreator) (*SelectStatement, e switch expr := call.Args[0].(type) { case *Wildcard: if expr.Type == TAG { - return s, fmt.Errorf("unable to use tag wildcard in %s()", call.Name) + return nil, fmt.Errorf("unable to use tag wildcard in %s()", call.Name) } case *RegexLiteral: re = expr.Val @@ -1226,8 +1272,8 @@ func (s *SelectStatement) RewriteFields(ic IteratorCreator) (*SelectStatement, e // Rewrite all wildcard GROUP BY fields if hasDimensionWildcard { // Allocate a slice assuming there is exactly one wildcard for efficiency. - rwDimensions := make(Dimensions, 0, len(s.Dimensions)+len(dimensions)-1) - for _, d := range s.Dimensions { + rwDimensions := make(Dimensions, 0, len(other.Dimensions)+len(dimensions)-1) + for _, d := range other.Dimensions { switch expr := d.Expr.(type) { case *Wildcard: for _, name := range dimensions { @@ -1369,6 +1415,41 @@ func (s *SelectStatement) RewriteTimeFields() { } } +// RewriteTimeCondition adds time constraints to aggregate queries. +func (s *SelectStatement) RewriteTimeCondition(now time.Time) error { + interval, err := s.GroupByInterval() + if err != nil { + return err + } else if interval > 0 && s.Condition != nil { + _, tmax, err := TimeRange(s.Condition) + if err != nil { + return err + } + + if tmax.IsZero() { + s.Condition = &BinaryExpr{ + Op: AND, + LHS: s.Condition, + RHS: &BinaryExpr{ + Op: LTE, + LHS: &VarRef{Val: "time"}, + RHS: &TimeLiteral{Val: now}, + }, + } + } + } + + for _, source := range s.Sources { + switch source := source.(type) { + case *SubQuery: + if err := source.Statement.RewriteTimeCondition(now); err != nil { + return err + } + } + } + return nil +} + // ColumnNames will walk all fields and functions and return the appropriate field names for the select statement // while maintaining order of the field names. func (s *SelectStatement) ColumnNames() []string { @@ -1439,6 +1520,45 @@ func (s *SelectStatement) ColumnNames() []string { return columnNames } +// FieldExprByName returns the expression that matches the field name and the +// index where this was found. If the name matches one of the arguments to +// "top" or "bottom", the variable reference inside of the function is returned +// and the index is of the function call rather than the variable reference. +// If no expression is found, -1 is returned for the index and the expression +// will be nil. +func (s *SelectStatement) FieldExprByName(name string) (int, Expr) { + for i, f := range s.Fields { + if f.Name() == name { + return i, f.Expr + } else if call, ok := f.Expr.(*Call); ok && (call.Name == "top" || call.Name == "bottom") && len(call.Args) > 2 { + for _, arg := range call.Args[1 : len(call.Args)-1] { + if arg, ok := arg.(*VarRef); ok && arg.Val == name { + return i, arg + } + } + } + } + return -1, nil +} + +// Reduce calls the Reduce function on the different components of the +// SelectStatement to reduce the statement. +func (s *SelectStatement) Reduce(valuer Valuer) *SelectStatement { + stmt := s.Clone() + stmt.Condition = Reduce(stmt.Condition, valuer) + for _, d := range stmt.Dimensions { + d.Expr = Reduce(d.Expr, valuer) + } + + for _, source := range stmt.Sources { + switch source := source.(type) { + case *SubQuery: + source.Statement = source.Statement.Reduce(valuer) + } + } + return stmt +} + // HasTimeFieldSpecified will walk all fields and determine if the user explicitly asked for time. // This is needed to determine re-write behaviors for functions like TOP and BOTTOM. func (s *SelectStatement) HasTimeFieldSpecified() bool { @@ -1923,8 +2043,41 @@ func (s *SelectStatement) validateAggregates(tr targetRequirement) error { // If we have an aggregate function with a group by time without a where clause, it's an invalid statement if tr == targetNotRequired { // ignore create continuous query statements - if !s.IsRawQuery && groupByDuration > 0 && !HasTimeExpr(s.Condition) { - return fmt.Errorf("aggregate functions with GROUP BY time require a WHERE time clause") + if err := s.validateGroupByInterval(); err != nil { + return err + } + } + return nil +} + +// validateGroupByInterval ensures that any select statements that have a group +// by interval either have a time expression limiting the time range or have a +// parent query that does that. +func (s *SelectStatement) validateGroupByInterval() error { + // If we have a time expression, we and all subqueries are fine. + if HasTimeExpr(s.Condition) { + return nil + } + + // Check if this is not a raw query and if the group by duration exists. + // If these are true, then we have an error. + interval, err := s.GroupByInterval() + if err != nil { + return err + } else if !s.IsRawQuery && interval > 0 { + return fmt.Errorf("aggregate functions with GROUP BY time require a WHERE time clause") + } + + // Validate the subqueries. If we have a time expression in this select + // statement, we don't need to do this because parent time ranges propagate + // to children. So we only execute this when there is no time condition in + // the parent. + for _, source := range s.Sources { + switch source := source.(type) { + case *SubQuery: + if err := source.Statement.validateGroupByInterval(); err != nil { + return err + } } } return nil @@ -3277,6 +3430,16 @@ func decodeMeasurement(pb *internal.Measurement) (*Measurement, error) { return mm, nil } +// SubQuery is a source with a SelectStatement as the backing store. +type SubQuery struct { + Statement *SelectStatement +} + +// String returns a string representation of the subquery. +func (s *SubQuery) String() string { + return fmt.Sprintf("(%s)", s.Statement.String()) +} + // VarRef represents a reference to a variable. type VarRef struct { Val string @@ -3914,6 +4077,9 @@ func Walk(v Visitor, node Node) { Walk(v, s) } + case *SubQuery: + Walk(v, n.Statement) + case Statements: for _, s := range n { Walk(v, s) @@ -3959,6 +4125,9 @@ func Rewrite(r Rewriter, node Node) Node { n.Sources = Rewrite(r, n.Sources).(Sources) n.Condition = Rewrite(r, n.Condition).(Expr) + case *SubQuery: + n.Statement = Rewrite(r, n.Statement).(*SelectStatement) + case Fields: for i, f := range n { n[i] = Rewrite(r, f).(*Field) @@ -4219,6 +4388,132 @@ func EvalBool(expr Expr, m map[string]interface{}) bool { return v } +// TypeMapper maps a data type to the measurement and field. +type TypeMapper interface { + MapType(measurement *Measurement, field string) DataType +} + +type nilTypeMapper struct{} + +func (nilTypeMapper) MapType(*Measurement, string) DataType { return Unknown } + +// EvalType evaluates the expression's type. +func EvalType(expr Expr, sources Sources, typmap TypeMapper) DataType { + if typmap == nil { + typmap = nilTypeMapper{} + } + + switch expr := expr.(type) { + case *VarRef: + // If this variable already has an assigned type, just use that. + if expr.Type != Unknown && expr.Type != AnyField { + return expr.Type + } + + var typ DataType + for _, src := range sources { + switch src := src.(type) { + case *Measurement: + t := typmap.MapType(src, expr.Val) + if typ == Unknown || t < typ { + typ = t + } + case *SubQuery: + _, e := src.Statement.FieldExprByName(expr.Val) + if e != nil { + t := EvalType(e, src.Statement.Sources, typmap) + if typ == Unknown || t < typ { + typ = t + } + } + + if typ == Unknown { + for _, d := range src.Statement.Dimensions { + if d, ok := d.Expr.(*VarRef); ok && expr.Val == d.Val { + typ = Tag + } + } + } + } + } + return typ + case *Call: + switch expr.Name { + case "mean", "median": + return Float + case "count": + return Integer + default: + return EvalType(expr.Args[0], sources, typmap) + } + case *ParenExpr: + return EvalType(expr, sources, typmap) + case *NumberLiteral: + return Float + case *IntegerLiteral: + return Integer + case *StringLiteral: + return String + case *BooleanLiteral: + return Boolean + case *BinaryExpr: + lhs := EvalType(expr.LHS, sources, typmap) + rhs := EvalType(expr.RHS, sources, typmap) + if lhs != Unknown && rhs != Unknown { + if lhs < rhs { + return lhs + } else { + return rhs + } + } else if lhs != Unknown { + return lhs + } else { + return rhs + } + } + return Unknown +} + +func FieldDimensions(sources Sources, m FieldMapper) (fields map[string]DataType, dimensions map[string]struct{}, err error) { + fields = make(map[string]DataType) + dimensions = make(map[string]struct{}) + + for _, src := range sources { + switch src := src.(type) { + case *Measurement: + f, d, err := m.FieldDimensions(src) + if err != nil { + return nil, nil, err + } + + for k, typ := range f { + if _, ok := fields[k]; typ != Unknown && (!ok || typ < fields[k]) { + fields[k] = typ + } + } + for k := range d { + dimensions[k] = struct{}{} + } + case *SubQuery: + for _, f := range src.Statement.Fields { + k := f.Name() + typ := EvalType(f.Expr, src.Statement.Sources, m) + + if _, ok := fields[k]; typ != Unknown && (!ok || typ < fields[k]) { + fields[k] = typ + } + } + for _, d := range src.Statement.Dimensions { + switch d := d.Expr.(type) { + case *VarRef: + dimensions[d.Val] = struct{}{} + } + } + } + } + return +} + // Reduce evaluates expr using the available values in valuer. // References that don't exist in valuer are ignored. func Reduce(expr Expr, valuer Valuer) Expr { @@ -4245,6 +4540,8 @@ func reduce(expr Expr, valuer Valuer) Expr { return reduceParenExpr(expr, valuer) case *VarRef: return reduceVarRef(expr, valuer) + case *nilLiteral: + return expr default: return CloneExpr(expr) } @@ -4771,3 +5068,13 @@ func (v *containsVarRefVisitor) Visit(n Node) Visitor { } return v } + +func IsSelector(expr Expr) bool { + if call, ok := expr.(*Call); ok { + switch call.Name { + case "first", "last", "min", "max", "percentile", "sample", "top", "bottom": + return true + } + } + return false +} diff --git a/influxql/ast_test.go b/influxql/ast_test.go index f1d3eab582..65f7126542 100644 --- a/influxql/ast_test.go +++ b/influxql/ast_test.go @@ -407,6 +407,12 @@ func TestSelectStatement_RewriteFields(t *testing.T) { stmt: `SELECT mean(/1/) FROM cpu`, rewrite: `SELECT mean(value1::float) AS mean_value1 FROM cpu`, }, + + // Rewrite subquery + { + stmt: `SELECT * FROM (SELECT mean(value1) FROM cpu GROUP BY host) GROUP BY *`, + rewrite: `SELECT mean::float FROM (SELECT mean(value1::float) FROM cpu GROUP BY host) GROUP BY host`, + }, } for i, tt := range tests { @@ -417,9 +423,8 @@ func TestSelectStatement_RewriteFields(t *testing.T) { } var ic IteratorCreator - ic.FieldDimensionsFn = func(sources influxql.Sources) (fields map[string]influxql.DataType, dimensions map[string]struct{}, err error) { - source := sources[0].(*influxql.Measurement) - switch source.Name { + ic.FieldDimensionsFn = func(m *influxql.Measurement) (fields map[string]influxql.DataType, dimensions map[string]struct{}, err error) { + switch m.Name { case "cpu": fields = map[string]influxql.DataType{ "value1": influxql.Float, @@ -1071,6 +1076,80 @@ func TestEval(t *testing.T) { } } +type EvalFixture map[string]map[string]influxql.DataType + +func (e EvalFixture) MapType(measurement *influxql.Measurement, field string) influxql.DataType { + m := e[measurement.Name] + if m == nil { + return influxql.Unknown + } + return m[field] +} + +func TestEvalType(t *testing.T) { + for i, tt := range []struct { + name string + in string + typ influxql.DataType + data EvalFixture + }{ + { + name: `a single data type`, + in: `min(value)`, + typ: influxql.Integer, + data: EvalFixture{ + "cpu": map[string]influxql.DataType{ + "value": influxql.Integer, + }, + }, + }, + { + name: `multiple data types`, + in: `min(value)`, + typ: influxql.Integer, + data: EvalFixture{ + "cpu": map[string]influxql.DataType{ + "value": influxql.Integer, + }, + "mem": map[string]influxql.DataType{ + "value": influxql.String, + }, + }, + }, + { + name: `count() with a float`, + in: `count(value)`, + typ: influxql.Integer, + data: EvalFixture{ + "cpu": map[string]influxql.DataType{ + "value": influxql.Float, + }, + }, + }, + { + name: `mean() with an integer`, + in: `mean(value)`, + typ: influxql.Float, + data: EvalFixture{ + "cpu": map[string]influxql.DataType{ + "value": influxql.Integer, + }, + }, + }, + } { + sources := make([]influxql.Source, 0, len(tt.data)) + for src := range tt.data { + sources = append(sources, &influxql.Measurement{Name: src}) + } + + expr := influxql.MustParseExpr(tt.in) + typ := influxql.EvalType(expr, sources, tt.data) + if typ != tt.typ { + t.Errorf("%d. %s: unexpected type:\n\nexp=%#v\n\ngot=%#v\n\n", i, tt.name, tt.typ, typ) + } + } +} + // Ensure an expression can be reduced. func TestReduce(t *testing.T) { now := mustParseTime("2000-01-01T00:00:00Z") diff --git a/influxql/call_iterator.go b/influxql/call_iterator.go index 779e01381e..908cee06a9 100644 --- a/influxql/call_iterator.go +++ b/influxql/call_iterator.go @@ -67,25 +67,25 @@ func newCountIterator(input Iterator, opt IteratorOptions) (Iterator, error) { fn := NewFloatFuncIntegerReducer(FloatCountReduce, &IntegerPoint{Value: 0, Time: ZeroTime}) return fn, fn } - return &floatReduceIntegerIterator{input: newBufFloatIterator(input), opt: opt, create: createFn}, nil + return newFloatReduceIntegerIterator(input, opt, createFn), nil case IntegerIterator: createFn := func() (IntegerPointAggregator, IntegerPointEmitter) { fn := NewIntegerFuncReducer(IntegerCountReduce, &IntegerPoint{Value: 0, Time: ZeroTime}) return fn, fn } - return &integerReduceIntegerIterator{input: newBufIntegerIterator(input), opt: opt, create: createFn}, nil + return newIntegerReduceIntegerIterator(input, opt, createFn), nil case StringIterator: createFn := func() (StringPointAggregator, IntegerPointEmitter) { fn := NewStringFuncIntegerReducer(StringCountReduce, &IntegerPoint{Value: 0, Time: ZeroTime}) return fn, fn } - return &stringReduceIntegerIterator{input: newBufStringIterator(input), opt: opt, create: createFn}, nil + return newStringReduceIntegerIterator(input, opt, createFn), nil case BooleanIterator: createFn := func() (BooleanPointAggregator, IntegerPointEmitter) { fn := NewBooleanFuncIntegerReducer(BooleanCountReduce, &IntegerPoint{Value: 0, Time: ZeroTime}) return fn, fn } - return &booleanReduceIntegerIterator{input: newBufBooleanIterator(input), opt: opt, create: createFn}, nil + return newBooleanReduceIntegerIterator(input, opt, createFn), nil default: return nil, fmt.Errorf("unsupported count iterator type: %T", input) } @@ -131,19 +131,19 @@ func newMinIterator(input Iterator, opt IteratorOptions) (Iterator, error) { fn := NewFloatFuncReducer(FloatMinReduce, nil) return fn, fn } - return &floatReduceFloatIterator{input: newBufFloatIterator(input), opt: opt, create: createFn}, nil + return newFloatReduceFloatIterator(input, opt, createFn), nil case IntegerIterator: createFn := func() (IntegerPointAggregator, IntegerPointEmitter) { fn := NewIntegerFuncReducer(IntegerMinReduce, nil) return fn, fn } - return &integerReduceIntegerIterator{input: newBufIntegerIterator(input), opt: opt, create: createFn}, nil + return newIntegerReduceIntegerIterator(input, opt, createFn), nil case BooleanIterator: createFn := func() (BooleanPointAggregator, BooleanPointEmitter) { fn := NewBooleanFuncReducer(BooleanMinReduce, nil) return fn, fn } - return &booleanReduceBooleanIterator{input: newBufBooleanIterator(input), opt: opt, create: createFn}, nil + return newBooleanReduceBooleanIterator(input, opt, createFn), nil default: return nil, fmt.Errorf("unsupported min iterator type: %T", input) } @@ -152,7 +152,7 @@ func newMinIterator(input Iterator, opt IteratorOptions) (Iterator, error) { // FloatMinReduce returns the minimum value between prev & curr. func FloatMinReduce(prev, curr *FloatPoint) (int64, float64, []interface{}) { if prev == nil || curr.Value < prev.Value || (curr.Value == prev.Value && curr.Time < prev.Time) { - return curr.Time, curr.Value, curr.Aux + return curr.Time, curr.Value, cloneAux(curr.Aux) } return prev.Time, prev.Value, prev.Aux } @@ -160,7 +160,7 @@ func FloatMinReduce(prev, curr *FloatPoint) (int64, float64, []interface{}) { // IntegerMinReduce returns the minimum value between prev & curr. func IntegerMinReduce(prev, curr *IntegerPoint) (int64, int64, []interface{}) { if prev == nil || curr.Value < prev.Value || (curr.Value == prev.Value && curr.Time < prev.Time) { - return curr.Time, curr.Value, curr.Aux + return curr.Time, curr.Value, cloneAux(curr.Aux) } return prev.Time, prev.Value, prev.Aux } @@ -168,7 +168,7 @@ func IntegerMinReduce(prev, curr *IntegerPoint) (int64, int64, []interface{}) { // BooleanMinReduce returns the minimum value between prev & curr. func BooleanMinReduce(prev, curr *BooleanPoint) (int64, bool, []interface{}) { if prev == nil || (curr.Value != prev.Value && !curr.Value) || (curr.Value == prev.Value && curr.Time < prev.Time) { - return curr.Time, curr.Value, curr.Aux + return curr.Time, curr.Value, cloneAux(curr.Aux) } return prev.Time, prev.Value, prev.Aux } @@ -181,19 +181,19 @@ func newMaxIterator(input Iterator, opt IteratorOptions) (Iterator, error) { fn := NewFloatFuncReducer(FloatMaxReduce, nil) return fn, fn } - return &floatReduceFloatIterator{input: newBufFloatIterator(input), opt: opt, create: createFn}, nil + return newFloatReduceFloatIterator(input, opt, createFn), nil case IntegerIterator: createFn := func() (IntegerPointAggregator, IntegerPointEmitter) { fn := NewIntegerFuncReducer(IntegerMaxReduce, nil) return fn, fn } - return &integerReduceIntegerIterator{input: newBufIntegerIterator(input), opt: opt, create: createFn}, nil + return newIntegerReduceIntegerIterator(input, opt, createFn), nil case BooleanIterator: createFn := func() (BooleanPointAggregator, BooleanPointEmitter) { fn := NewBooleanFuncReducer(BooleanMaxReduce, nil) return fn, fn } - return &booleanReduceBooleanIterator{input: newBufBooleanIterator(input), opt: opt, create: createFn}, nil + return newBooleanReduceBooleanIterator(input, opt, createFn), nil default: return nil, fmt.Errorf("unsupported max iterator type: %T", input) } @@ -202,7 +202,7 @@ func newMaxIterator(input Iterator, opt IteratorOptions) (Iterator, error) { // FloatMaxReduce returns the maximum value between prev & curr. func FloatMaxReduce(prev, curr *FloatPoint) (int64, float64, []interface{}) { if prev == nil || curr.Value > prev.Value || (curr.Value == prev.Value && curr.Time < prev.Time) { - return curr.Time, curr.Value, curr.Aux + return curr.Time, curr.Value, cloneAux(curr.Aux) } return prev.Time, prev.Value, prev.Aux } @@ -210,7 +210,7 @@ func FloatMaxReduce(prev, curr *FloatPoint) (int64, float64, []interface{}) { // IntegerMaxReduce returns the maximum value between prev & curr. func IntegerMaxReduce(prev, curr *IntegerPoint) (int64, int64, []interface{}) { if prev == nil || curr.Value > prev.Value || (curr.Value == prev.Value && curr.Time < prev.Time) { - return curr.Time, curr.Value, curr.Aux + return curr.Time, curr.Value, cloneAux(curr.Aux) } return prev.Time, prev.Value, prev.Aux } @@ -218,7 +218,7 @@ func IntegerMaxReduce(prev, curr *IntegerPoint) (int64, int64, []interface{}) { // BooleanMaxReduce returns the minimum value between prev & curr. func BooleanMaxReduce(prev, curr *BooleanPoint) (int64, bool, []interface{}) { if prev == nil || (curr.Value != prev.Value && curr.Value) || (curr.Value == prev.Value && curr.Time < prev.Time) { - return curr.Time, curr.Value, curr.Aux + return curr.Time, curr.Value, cloneAux(curr.Aux) } return prev.Time, prev.Value, prev.Aux } @@ -231,13 +231,13 @@ func newSumIterator(input Iterator, opt IteratorOptions) (Iterator, error) { fn := NewFloatFuncReducer(FloatSumReduce, &FloatPoint{Value: 0, Time: ZeroTime}) return fn, fn } - return &floatReduceFloatIterator{input: newBufFloatIterator(input), opt: opt, create: createFn}, nil + return newFloatReduceFloatIterator(input, opt, createFn), nil case IntegerIterator: createFn := func() (IntegerPointAggregator, IntegerPointEmitter) { fn := NewIntegerFuncReducer(IntegerSumReduce, &IntegerPoint{Value: 0, Time: ZeroTime}) return fn, fn } - return &integerReduceIntegerIterator{input: newBufIntegerIterator(input), opt: opt, create: createFn}, nil + return newIntegerReduceIntegerIterator(input, opt, createFn), nil default: return nil, fmt.Errorf("unsupported sum iterator type: %T", input) } @@ -267,25 +267,25 @@ func newFirstIterator(input Iterator, opt IteratorOptions) (Iterator, error) { fn := NewFloatFuncReducer(FloatFirstReduce, nil) return fn, fn } - return &floatReduceFloatIterator{input: newBufFloatIterator(input), opt: opt, create: createFn}, nil + return newFloatReduceFloatIterator(input, opt, createFn), nil case IntegerIterator: createFn := func() (IntegerPointAggregator, IntegerPointEmitter) { fn := NewIntegerFuncReducer(IntegerFirstReduce, nil) return fn, fn } - return &integerReduceIntegerIterator{input: newBufIntegerIterator(input), opt: opt, create: createFn}, nil + return newIntegerReduceIntegerIterator(input, opt, createFn), nil case StringIterator: createFn := func() (StringPointAggregator, StringPointEmitter) { fn := NewStringFuncReducer(StringFirstReduce, nil) return fn, fn } - return &stringReduceStringIterator{input: newBufStringIterator(input), opt: opt, create: createFn}, nil + return newStringReduceStringIterator(input, opt, createFn), nil case BooleanIterator: createFn := func() (BooleanPointAggregator, BooleanPointEmitter) { fn := NewBooleanFuncReducer(BooleanFirstReduce, nil) return fn, fn } - return &booleanReduceBooleanIterator{input: newBufBooleanIterator(input), opt: opt, create: createFn}, nil + return newBooleanReduceBooleanIterator(input, opt, createFn), nil default: return nil, fmt.Errorf("unsupported first iterator type: %T", input) } @@ -294,7 +294,7 @@ func newFirstIterator(input Iterator, opt IteratorOptions) (Iterator, error) { // FloatFirstReduce returns the first point sorted by time. func FloatFirstReduce(prev, curr *FloatPoint) (int64, float64, []interface{}) { if prev == nil || curr.Time < prev.Time || (curr.Time == prev.Time && curr.Value > prev.Value) { - return curr.Time, curr.Value, curr.Aux + return curr.Time, curr.Value, cloneAux(curr.Aux) } return prev.Time, prev.Value, prev.Aux } @@ -302,7 +302,7 @@ func FloatFirstReduce(prev, curr *FloatPoint) (int64, float64, []interface{}) { // IntegerFirstReduce returns the first point sorted by time. func IntegerFirstReduce(prev, curr *IntegerPoint) (int64, int64, []interface{}) { if prev == nil || curr.Time < prev.Time || (curr.Time == prev.Time && curr.Value > prev.Value) { - return curr.Time, curr.Value, curr.Aux + return curr.Time, curr.Value, cloneAux(curr.Aux) } return prev.Time, prev.Value, prev.Aux } @@ -310,7 +310,7 @@ func IntegerFirstReduce(prev, curr *IntegerPoint) (int64, int64, []interface{}) // StringFirstReduce returns the first point sorted by time. func StringFirstReduce(prev, curr *StringPoint) (int64, string, []interface{}) { if prev == nil || curr.Time < prev.Time || (curr.Time == prev.Time && curr.Value > prev.Value) { - return curr.Time, curr.Value, curr.Aux + return curr.Time, curr.Value, cloneAux(curr.Aux) } return prev.Time, prev.Value, prev.Aux } @@ -318,7 +318,7 @@ func StringFirstReduce(prev, curr *StringPoint) (int64, string, []interface{}) { // BooleanFirstReduce returns the first point sorted by time. func BooleanFirstReduce(prev, curr *BooleanPoint) (int64, bool, []interface{}) { if prev == nil || curr.Time < prev.Time || (curr.Time == prev.Time && !curr.Value && prev.Value) { - return curr.Time, curr.Value, curr.Aux + return curr.Time, curr.Value, cloneAux(curr.Aux) } return prev.Time, prev.Value, prev.Aux } @@ -331,25 +331,25 @@ func newLastIterator(input Iterator, opt IteratorOptions) (Iterator, error) { fn := NewFloatFuncReducer(FloatLastReduce, nil) return fn, fn } - return &floatReduceFloatIterator{input: newBufFloatIterator(input), opt: opt, create: createFn}, nil + return newFloatReduceFloatIterator(input, opt, createFn), nil case IntegerIterator: createFn := func() (IntegerPointAggregator, IntegerPointEmitter) { fn := NewIntegerFuncReducer(IntegerLastReduce, nil) return fn, fn } - return &integerReduceIntegerIterator{input: newBufIntegerIterator(input), opt: opt, create: createFn}, nil + return newIntegerReduceIntegerIterator(input, opt, createFn), nil case StringIterator: createFn := func() (StringPointAggregator, StringPointEmitter) { fn := NewStringFuncReducer(StringLastReduce, nil) return fn, fn } - return &stringReduceStringIterator{input: newBufStringIterator(input), opt: opt, create: createFn}, nil + return newStringReduceStringIterator(input, opt, createFn), nil case BooleanIterator: createFn := func() (BooleanPointAggregator, BooleanPointEmitter) { fn := NewBooleanFuncReducer(BooleanLastReduce, nil) return fn, fn } - return &booleanReduceBooleanIterator{input: newBufBooleanIterator(input), opt: opt, create: createFn}, nil + return newBooleanReduceBooleanIterator(input, opt, createFn), nil default: return nil, fmt.Errorf("unsupported last iterator type: %T", input) } @@ -358,7 +358,7 @@ func newLastIterator(input Iterator, opt IteratorOptions) (Iterator, error) { // FloatLastReduce returns the last point sorted by time. func FloatLastReduce(prev, curr *FloatPoint) (int64, float64, []interface{}) { if prev == nil || curr.Time > prev.Time || (curr.Time == prev.Time && curr.Value > prev.Value) { - return curr.Time, curr.Value, curr.Aux + return curr.Time, curr.Value, cloneAux(curr.Aux) } return prev.Time, prev.Value, prev.Aux } @@ -366,7 +366,7 @@ func FloatLastReduce(prev, curr *FloatPoint) (int64, float64, []interface{}) { // IntegerLastReduce returns the last point sorted by time. func IntegerLastReduce(prev, curr *IntegerPoint) (int64, int64, []interface{}) { if prev == nil || curr.Time > prev.Time || (curr.Time == prev.Time && curr.Value > prev.Value) { - return curr.Time, curr.Value, curr.Aux + return curr.Time, curr.Value, cloneAux(curr.Aux) } return prev.Time, prev.Value, prev.Aux } @@ -374,7 +374,7 @@ func IntegerLastReduce(prev, curr *IntegerPoint) (int64, int64, []interface{}) { // StringLastReduce returns the first point sorted by time. func StringLastReduce(prev, curr *StringPoint) (int64, string, []interface{}) { if prev == nil || curr.Time > prev.Time || (curr.Time == prev.Time && curr.Value > prev.Value) { - return curr.Time, curr.Value, curr.Aux + return curr.Time, curr.Value, cloneAux(curr.Aux) } return prev.Time, prev.Value, prev.Aux } @@ -382,7 +382,7 @@ func StringLastReduce(prev, curr *StringPoint) (int64, string, []interface{}) { // BooleanLastReduce returns the first point sorted by time. func BooleanLastReduce(prev, curr *BooleanPoint) (int64, bool, []interface{}) { if prev == nil || curr.Time > prev.Time || (curr.Time == prev.Time && curr.Value && !prev.Value) { - return curr.Time, curr.Value, curr.Aux + return curr.Time, curr.Value, cloneAux(curr.Aux) } return prev.Time, prev.Value, prev.Aux } @@ -395,25 +395,25 @@ func NewDistinctIterator(input Iterator, opt IteratorOptions) (Iterator, error) fn := NewFloatDistinctReducer() return fn, fn } - return &floatReduceFloatIterator{input: newBufFloatIterator(input), opt: opt, create: createFn}, nil + return newFloatReduceFloatIterator(input, opt, createFn), nil case IntegerIterator: createFn := func() (IntegerPointAggregator, IntegerPointEmitter) { fn := NewIntegerDistinctReducer() return fn, fn } - return &integerReduceIntegerIterator{input: newBufIntegerIterator(input), opt: opt, create: createFn}, nil + return newIntegerReduceIntegerIterator(input, opt, createFn), nil case StringIterator: createFn := func() (StringPointAggregator, StringPointEmitter) { fn := NewStringDistinctReducer() return fn, fn } - return &stringReduceStringIterator{input: newBufStringIterator(input), opt: opt, create: createFn}, nil + return newStringReduceStringIterator(input, opt, createFn), nil case BooleanIterator: createFn := func() (BooleanPointAggregator, BooleanPointEmitter) { fn := NewBooleanDistinctReducer() return fn, fn } - return &booleanReduceBooleanIterator{input: newBufBooleanIterator(input), opt: opt, create: createFn}, nil + return newBooleanReduceBooleanIterator(input, opt, createFn), nil default: return nil, fmt.Errorf("unsupported distinct iterator type: %T", input) } @@ -427,13 +427,13 @@ func newMeanIterator(input Iterator, opt IteratorOptions) (Iterator, error) { fn := NewFloatMeanReducer() return fn, fn } - return &floatReduceFloatIterator{input: newBufFloatIterator(input), opt: opt, create: createFn}, nil + return newFloatReduceFloatIterator(input, opt, createFn), nil case IntegerIterator: createFn := func() (IntegerPointAggregator, FloatPointEmitter) { fn := NewIntegerMeanReducer() return fn, fn } - return &integerReduceFloatIterator{input: newBufIntegerIterator(input), opt: opt, create: createFn}, nil + return newIntegerReduceFloatIterator(input, opt, createFn), nil default: return nil, fmt.Errorf("unsupported mean iterator type: %T", input) } @@ -452,13 +452,13 @@ func newMedianIterator(input Iterator, opt IteratorOptions) (Iterator, error) { fn := NewFloatSliceFuncReducer(FloatMedianReduceSlice) return fn, fn } - return &floatReduceFloatIterator{input: newBufFloatIterator(input), opt: opt, create: createFn}, nil + return newFloatReduceFloatIterator(input, opt, createFn), nil case IntegerIterator: createFn := func() (IntegerPointAggregator, FloatPointEmitter) { fn := NewIntegerSliceFuncFloatReducer(IntegerMedianReduceSlice) return fn, fn } - return &integerReduceFloatIterator{input: newBufIntegerIterator(input), opt: opt, create: createFn}, nil + return newIntegerReduceFloatIterator(input, opt, createFn), nil default: return nil, fmt.Errorf("unsupported median iterator type: %T", input) } @@ -508,26 +508,25 @@ func NewModeIterator(input Iterator, opt IteratorOptions) (Iterator, error) { fn := NewFloatSliceFuncReducer(FloatModeReduceSlice) return fn, fn } - return &floatReduceFloatIterator{input: newBufFloatIterator(input), opt: opt, create: createFn}, nil + return newFloatReduceFloatIterator(input, opt, createFn), nil case IntegerIterator: createFn := func() (IntegerPointAggregator, IntegerPointEmitter) { fn := NewIntegerSliceFuncReducer(IntegerModeReduceSlice) return fn, fn } - return &integerReduceIntegerIterator{input: newBufIntegerIterator(input), opt: opt, create: createFn}, nil + return newIntegerReduceIntegerIterator(input, opt, createFn), nil case StringIterator: createFn := func() (StringPointAggregator, StringPointEmitter) { fn := NewStringSliceFuncReducer(StringModeReduceSlice) return fn, fn } - return &stringReduceStringIterator{input: newBufStringIterator(input), opt: opt, create: createFn}, nil + return newStringReduceStringIterator(input, opt, createFn), nil case BooleanIterator: createFn := func() (BooleanPointAggregator, BooleanPointEmitter) { fn := NewBooleanSliceFuncReducer(BooleanModeReduceSlice) return fn, fn } - return &booleanReduceBooleanIterator{input: newBufBooleanIterator(input), opt: opt, create: createFn}, nil - + return newBooleanReduceBooleanIterator(input, opt, createFn), nil default: return nil, fmt.Errorf("unsupported median iterator type: %T", input) } @@ -668,13 +667,13 @@ func newStddevIterator(input Iterator, opt IteratorOptions) (Iterator, error) { fn := NewFloatSliceFuncReducer(FloatStddevReduceSlice) return fn, fn } - return &floatReduceFloatIterator{input: newBufFloatIterator(input), opt: opt, create: createFn}, nil + return newFloatReduceFloatIterator(input, opt, createFn), nil case IntegerIterator: createFn := func() (IntegerPointAggregator, FloatPointEmitter) { fn := NewIntegerSliceFuncFloatReducer(IntegerStddevReduceSlice) return fn, fn } - return &integerReduceFloatIterator{input: newBufIntegerIterator(input), opt: opt, create: createFn}, nil + return newIntegerReduceFloatIterator(input, opt, createFn), nil default: return nil, fmt.Errorf("unsupported stddev iterator type: %T", input) } @@ -746,13 +745,13 @@ func newSpreadIterator(input Iterator, opt IteratorOptions) (Iterator, error) { fn := NewFloatSliceFuncReducer(FloatSpreadReduceSlice) return fn, fn } - return &floatReduceFloatIterator{input: newBufFloatIterator(input), opt: opt, create: createFn}, nil + return newFloatReduceFloatIterator(input, opt, createFn), nil case IntegerIterator: createFn := func() (IntegerPointAggregator, IntegerPointEmitter) { fn := NewIntegerSliceFuncReducer(IntegerSpreadReduceSlice) return fn, fn } - return &integerReduceIntegerIterator{input: newBufIntegerIterator(input), opt: opt, create: createFn}, nil + return newIntegerReduceIntegerIterator(input, opt, createFn), nil default: return nil, fmt.Errorf("unsupported spread iterator type: %T", input) } @@ -792,14 +791,14 @@ func newTopIterator(input Iterator, opt IteratorOptions, n *IntegerLiteral, tags fn := NewFloatSliceFuncReducer(aggregateFn) return fn, fn } - return &floatReduceFloatIterator{input: newBufFloatIterator(input), opt: opt, create: createFn}, nil + return newFloatReduceFloatIterator(input, opt, createFn), nil case IntegerIterator: aggregateFn := NewIntegerTopReduceSliceFunc(int(n.Val), tags, opt.Interval) createFn := func() (IntegerPointAggregator, IntegerPointEmitter) { fn := NewIntegerSliceFuncReducer(aggregateFn) return fn, fn } - return &integerReduceIntegerIterator{input: newBufIntegerIterator(input), opt: opt, create: createFn}, nil + return newIntegerReduceIntegerIterator(input, opt, createFn), nil default: return nil, fmt.Errorf("unsupported top iterator type: %T", input) } @@ -905,14 +904,14 @@ func newBottomIterator(input Iterator, opt IteratorOptions, n *IntegerLiteral, t fn := NewFloatSliceFuncReducer(aggregateFn) return fn, fn } - return &floatReduceFloatIterator{input: newBufFloatIterator(input), opt: opt, create: createFn}, nil + return newFloatReduceFloatIterator(input, opt, createFn), nil case IntegerIterator: aggregateFn := NewIntegerBottomReduceSliceFunc(int(n.Val), tags, opt.Interval) createFn := func() (IntegerPointAggregator, IntegerPointEmitter) { fn := NewIntegerSliceFuncReducer(aggregateFn) return fn, fn } - return &integerReduceIntegerIterator{input: newBufIntegerIterator(input), opt: opt, create: createFn}, nil + return newIntegerReduceIntegerIterator(input, opt, createFn), nil default: return nil, fmt.Errorf("unsupported bottom iterator type: %T", input) } @@ -1079,14 +1078,14 @@ func newPercentileIterator(input Iterator, opt IteratorOptions, percentile float fn := NewFloatSliceFuncReducer(floatPercentileReduceSlice) return fn, fn } - return &floatReduceFloatIterator{input: newBufFloatIterator(input), opt: opt, create: createFn}, nil + return newFloatReduceFloatIterator(input, opt, createFn), nil case IntegerIterator: integerPercentileReduceSlice := NewIntegerPercentileReduceSliceFunc(percentile) createFn := func() (IntegerPointAggregator, IntegerPointEmitter) { fn := NewIntegerSliceFuncReducer(integerPercentileReduceSlice) return fn, fn } - return &integerReduceIntegerIterator{input: newBufIntegerIterator(input), opt: opt, create: createFn}, nil + return newIntegerReduceIntegerIterator(input, opt, createFn), nil default: return nil, fmt.Errorf("unsupported percentile iterator type: %T", input) } @@ -1103,7 +1102,7 @@ func NewFloatPercentileReduceSliceFunc(percentile float64) FloatReduceSliceFunc } sort.Sort(floatPointsByValue(a)) - return []FloatPoint{{Time: a[i].Time, Value: a[i].Value, Aux: a[i].Aux}} + return []FloatPoint{{Time: a[i].Time, Value: a[i].Value, Aux: cloneAux(a[i].Aux)}} } } @@ -1118,7 +1117,7 @@ func NewIntegerPercentileReduceSliceFunc(percentile float64) IntegerReduceSliceF } sort.Sort(integerPointsByValue(a)) - return []IntegerPoint{{Time: a[i].Time, Value: a[i].Value, Aux: a[i].Aux}} + return []IntegerPoint{{Time: a[i].Time, Value: a[i].Value, Aux: cloneAux(a[i].Aux)}} } } @@ -1242,13 +1241,13 @@ func newHoltWintersIterator(input Iterator, opt IteratorOptions, h, m int, inclu fn := NewFloatHoltWintersReducer(h, m, includeFitData, interval) return fn, fn } - return &floatReduceFloatIterator{input: newBufFloatIterator(input), opt: opt, create: createFn}, nil + return newFloatReduceFloatIterator(input, opt, createFn), nil case IntegerIterator: createFn := func() (IntegerPointAggregator, FloatPointEmitter) { fn := NewFloatHoltWintersReducer(h, m, includeFitData, interval) return fn, fn } - return &integerReduceFloatIterator{input: newBufIntegerIterator(input), opt: opt, create: createFn}, nil + return newIntegerReduceFloatIterator(input, opt, createFn), nil default: return nil, fmt.Errorf("unsupported elapsed iterator type: %T", input) } @@ -1267,25 +1266,25 @@ func newSampleIterator(input Iterator, opt IteratorOptions, size int) (Iterator, fn := NewFloatSampleReducer(size) return fn, fn } - return &floatReduceFloatIterator{input: newBufFloatIterator(input), opt: opt, create: createFn}, nil + return newFloatReduceFloatIterator(input, opt, createFn), nil case IntegerIterator: createFn := func() (IntegerPointAggregator, IntegerPointEmitter) { fn := NewIntegerSampleReducer(size) return fn, fn } - return &integerReduceIntegerIterator{input: newBufIntegerIterator(input), opt: opt, create: createFn}, nil + return newIntegerReduceIntegerIterator(input, opt, createFn), nil case StringIterator: createFn := func() (StringPointAggregator, StringPointEmitter) { fn := NewStringSampleReducer(size) return fn, fn } - return &stringReduceStringIterator{input: newBufStringIterator(input), opt: opt, create: createFn}, nil + return newStringReduceStringIterator(input, opt, createFn), nil case BooleanIterator: createFn := func() (BooleanPointAggregator, BooleanPointEmitter) { fn := NewBooleanSampleReducer(size) return fn, fn } - return &booleanReduceBooleanIterator{input: newBufBooleanIterator(input), opt: opt, create: createFn}, nil + return newBooleanReduceBooleanIterator(input, opt, createFn), nil default: return nil, fmt.Errorf("unsupported elapsed iterator type: %T", input) } diff --git a/influxql/emitter.go b/influxql/emitter.go index 9c914b0046..07b665a207 100644 --- a/influxql/emitter.go +++ b/influxql/emitter.go @@ -148,7 +148,6 @@ func (e *Emitter) createRow(name string, tags Tags, values []interface{}) { // readAt returns the next slice of values from the iterators at time/name/tags. // Returns nil values once the iterators are exhausted. func (e *Emitter) readAt(t int64, name string, tags Tags) []interface{} { - // If time is included then move colums over by one. offset := 1 if e.OmitTime { offset = 0 @@ -158,29 +157,31 @@ func (e *Emitter) readAt(t int64, name string, tags Tags) []interface{} { if !e.OmitTime { values[0] = time.Unix(0, t).UTC() } + e.readInto(t, name, tags, values[offset:]) + return values +} +func (e *Emitter) readInto(t int64, name string, tags Tags, values []interface{}) { for i, p := range e.buf { // Skip if buffer is empty. if p == nil { - values[i+offset] = nil + values[i] = nil continue } // Skip point if it doesn't match time/name/tags. pTags := p.tags() if p.time() != t || p.name() != name || !pTags.Equals(&tags) { - values[i+offset] = nil + values[i] = nil continue } // Read point value. - values[i+offset] = p.value() + values[i] = p.value() // Clear buffer. e.buf[i] = nil } - - return values } // readIterator reads the next point from itr. diff --git a/influxql/functions.gen.go b/influxql/functions.gen.go index 85e92f392a..1bc391db45 100644 --- a/influxql/functions.gen.go +++ b/influxql/functions.gen.go @@ -381,7 +381,7 @@ func (r *FloatElapsedReducer) Emit() []IntegerPoint { return nil } -// FloatSampleReduces implements a reservoir sampling to calculate a random subset of points +// FloatSampleReducer implements a reservoir sampling to calculate a random subset of points type FloatSampleReducer struct { count int // how many points we've iterated over rng *rand.Rand // random number generator for each reducer @@ -402,7 +402,7 @@ func (r *FloatSampleReducer) AggregateFloat(p *FloatPoint) { r.count++ // Fill the reservoir with the first n points if r.count-1 < len(r.points) { - r.points[r.count-1] = *p + p.CopyTo(&r.points[r.count-1]) return } @@ -411,7 +411,7 @@ func (r *FloatSampleReducer) AggregateFloat(p *FloatPoint) { // replace the point at that index rnd with p. rnd := r.rng.Intn(r.count) if rnd < len(r.points) { - r.points[rnd] = *p + p.CopyTo(&r.points[rnd]) } } @@ -795,7 +795,7 @@ func (r *IntegerElapsedReducer) Emit() []IntegerPoint { return nil } -// IntegerSampleReduces implements a reservoir sampling to calculate a random subset of points +// IntegerSampleReducer implements a reservoir sampling to calculate a random subset of points type IntegerSampleReducer struct { count int // how many points we've iterated over rng *rand.Rand // random number generator for each reducer @@ -816,7 +816,7 @@ func (r *IntegerSampleReducer) AggregateInteger(p *IntegerPoint) { r.count++ // Fill the reservoir with the first n points if r.count-1 < len(r.points) { - r.points[r.count-1] = *p + p.CopyTo(&r.points[r.count-1]) return } @@ -825,7 +825,7 @@ func (r *IntegerSampleReducer) AggregateInteger(p *IntegerPoint) { // replace the point at that index rnd with p. rnd := r.rng.Intn(r.count) if rnd < len(r.points) { - r.points[rnd] = *p + p.CopyTo(&r.points[rnd]) } } @@ -1209,7 +1209,7 @@ func (r *StringElapsedReducer) Emit() []IntegerPoint { return nil } -// StringSampleReduces implements a reservoir sampling to calculate a random subset of points +// StringSampleReducer implements a reservoir sampling to calculate a random subset of points type StringSampleReducer struct { count int // how many points we've iterated over rng *rand.Rand // random number generator for each reducer @@ -1230,7 +1230,7 @@ func (r *StringSampleReducer) AggregateString(p *StringPoint) { r.count++ // Fill the reservoir with the first n points if r.count-1 < len(r.points) { - r.points[r.count-1] = *p + p.CopyTo(&r.points[r.count-1]) return } @@ -1239,7 +1239,7 @@ func (r *StringSampleReducer) AggregateString(p *StringPoint) { // replace the point at that index rnd with p. rnd := r.rng.Intn(r.count) if rnd < len(r.points) { - r.points[rnd] = *p + p.CopyTo(&r.points[rnd]) } } @@ -1623,7 +1623,7 @@ func (r *BooleanElapsedReducer) Emit() []IntegerPoint { return nil } -// BooleanSampleReduces implements a reservoir sampling to calculate a random subset of points +// BooleanSampleReducer implements a reservoir sampling to calculate a random subset of points type BooleanSampleReducer struct { count int // how many points we've iterated over rng *rand.Rand // random number generator for each reducer @@ -1644,7 +1644,7 @@ func (r *BooleanSampleReducer) AggregateBoolean(p *BooleanPoint) { r.count++ // Fill the reservoir with the first n points if r.count-1 < len(r.points) { - r.points[r.count-1] = *p + p.CopyTo(&r.points[r.count-1]) return } @@ -1653,7 +1653,7 @@ func (r *BooleanSampleReducer) AggregateBoolean(p *BooleanPoint) { // replace the point at that index rnd with p. rnd := r.rng.Intn(r.count) if rnd < len(r.points) { - r.points[rnd] = *p + p.CopyTo(&r.points[rnd]) } } diff --git a/influxql/functions.gen.go.tmpl b/influxql/functions.gen.go.tmpl index 2b9a244893..86a50862a6 100644 --- a/influxql/functions.gen.go.tmpl +++ b/influxql/functions.gen.go.tmpl @@ -170,7 +170,7 @@ func (r *{{$k.Name}}ElapsedReducer) Emit() []IntegerPoint { return nil } -// {{$k.Name}}SampleReduces implements a reservoir sampling to calculate a random subset of points +// {{$k.Name}}SampleReducer implements a reservoir sampling to calculate a random subset of points type {{$k.Name}}SampleReducer struct { count int // how many points we've iterated over rng *rand.Rand // random number generator for each reducer @@ -191,7 +191,7 @@ func (r *{{$k.Name}}SampleReducer) Aggregate{{$k.Name}}(p *{{$k.Name}}Point) { r.count++ // Fill the reservoir with the first n points if r.count-1 < len(r.points) { - r.points[r.count-1] = *p + p.CopyTo(&r.points[r.count-1]) return } @@ -200,7 +200,7 @@ func (r *{{$k.Name}}SampleReducer) Aggregate{{$k.Name}}(p *{{$k.Name}}Point) { // replace the point at that index rnd with p. rnd := r.rng.Intn(r.count) if rnd < len(r.points) { - r.points[rnd] = *p + p.CopyTo(&r.points[rnd]) } } diff --git a/influxql/internal/internal.pb.go b/influxql/internal/internal.pb.go index cfafbf4395..700a470650 100644 --- a/influxql/internal/internal.pb.go +++ b/influxql/internal/internal.pb.go @@ -188,6 +188,7 @@ type IteratorOptions struct { Sources []*Measurement `protobuf:"bytes,3,rep,name=Sources" json:"Sources,omitempty"` Interval *Interval `protobuf:"bytes,4,opt,name=Interval" json:"Interval,omitempty"` Dimensions []string `protobuf:"bytes,5,rep,name=Dimensions" json:"Dimensions,omitempty"` + GroupBy []string `protobuf:"bytes,19,rep,name=GroupBy" json:"GroupBy,omitempty"` Fill *int32 `protobuf:"varint,6,opt,name=Fill" json:"Fill,omitempty"` FillValue *float64 `protobuf:"fixed64,7,opt,name=FillValue" json:"FillValue,omitempty"` Condition *string `protobuf:"bytes,8,opt,name=Condition" json:"Condition,omitempty"` @@ -199,6 +200,8 @@ type IteratorOptions struct { SLimit *int64 `protobuf:"varint,14,opt,name=SLimit" json:"SLimit,omitempty"` SOffset *int64 `protobuf:"varint,15,opt,name=SOffset" json:"SOffset,omitempty"` Dedupe *bool `protobuf:"varint,16,opt,name=Dedupe" json:"Dedupe,omitempty"` + MaxSeriesN *int64 `protobuf:"varint,18,opt,name=MaxSeriesN" json:"MaxSeriesN,omitempty"` + Ordered *bool `protobuf:"varint,20,opt,name=Ordered" json:"Ordered,omitempty"` XXX_unrecognized []byte `json:"-"` } @@ -249,6 +252,13 @@ func (m *IteratorOptions) GetDimensions() []string { return nil } +func (m *IteratorOptions) GetGroupBy() []string { + if m != nil { + return m.GroupBy + } + return nil +} + func (m *IteratorOptions) GetFill() int32 { if m != nil && m.Fill != nil { return *m.Fill @@ -326,6 +336,20 @@ func (m *IteratorOptions) GetDedupe() bool { return false } +func (m *IteratorOptions) GetMaxSeriesN() int64 { + if m != nil && m.MaxSeriesN != nil { + return *m.MaxSeriesN + } + return 0 +} + +func (m *IteratorOptions) GetOrdered() bool { + if m != nil && m.Ordered != nil { + return *m.Ordered + } + return false +} + type Measurements struct { Items []*Measurement `protobuf:"bytes,1,rep,name=Items" json:"Items,omitempty"` XXX_unrecognized []byte `json:"-"` @@ -481,48 +505,51 @@ func init() { func init() { proto.RegisterFile("internal/internal.proto", fileDescriptorInternal) } var fileDescriptorInternal = []byte{ - // 685 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x84, 0x54, 0xd1, 0x6a, 0xdc, 0x3a, - 0x10, 0xc5, 0xf6, 0x7a, 0x63, 0x6b, 0xb3, 0x37, 0xb9, 0x22, 0xf7, 0x46, 0x94, 0xd2, 0x1a, 0x3f, - 0x19, 0x4a, 0x37, 0x90, 0xd7, 0x42, 0x61, 0xdb, 0x24, 0xb0, 0xd0, 0x6e, 0x82, 0x1c, 0xf2, 0xae, - 0x66, 0x67, 0x8d, 0xc0, 0x2b, 0x6f, 0x65, 0xb9, 0x6c, 0xde, 0xfa, 0x1b, 0xfd, 0x86, 0x7e, 0x4c, - 0x7f, 0xa9, 0x68, 0x64, 0xaf, 0x9d, 0x14, 0x9a, 0x27, 0xcf, 0x39, 0x33, 0x92, 0x7c, 0x66, 0x8e, - 0x44, 0x4e, 0xa5, 0x32, 0xa0, 0x95, 0x28, 0xcf, 0xba, 0x60, 0xb6, 0xd5, 0x95, 0xa9, 0x68, 0x24, - 0xd5, 0xba, 0x6c, 0x76, 0x5f, 0xcb, 0xf4, 0x97, 0x4f, 0xc2, 0x9b, 0x4a, 0x2a, 0x43, 0x29, 0x19, - 0x2d, 0xc5, 0x06, 0x98, 0x97, 0xf8, 0x59, 0xcc, 0x31, 0xb6, 0xdc, 0xad, 0x28, 0x6a, 0xe6, 0x3b, - 0xce, 0xc6, 0xc8, 0xc9, 0x0d, 0xb0, 0x20, 0xf1, 0xb3, 0x80, 0x63, 0x4c, 0x8f, 0x49, 0xb0, 0x94, - 0x25, 0x1b, 0x25, 0x7e, 0x16, 0x71, 0x1b, 0xd2, 0xd7, 0x24, 0x98, 0x37, 0x3b, 0x16, 0x26, 0x41, - 0x36, 0x39, 0x9f, 0xce, 0xba, 0xf3, 0x66, 0xf3, 0x66, 0xc7, 0x6d, 0x86, 0xbe, 0x22, 0x64, 0x5e, - 0x14, 0x1a, 0x0a, 0x61, 0x60, 0xc5, 0xc6, 0x89, 0x97, 0x4d, 0xf9, 0x80, 0xb1, 0xf9, 0xab, 0xb2, - 0x12, 0xe6, 0x4e, 0x94, 0x0d, 0xb0, 0x83, 0xc4, 0xcb, 0x3c, 0x3e, 0x60, 0x68, 0x4a, 0x0e, 0x17, - 0xca, 0x40, 0x01, 0xda, 0x55, 0x44, 0x89, 0x97, 0x05, 0xfc, 0x11, 0x47, 0x13, 0x32, 0xc9, 0x8d, - 0x96, 0xaa, 0x70, 0x25, 0x71, 0xe2, 0x65, 0x31, 0x1f, 0x52, 0x76, 0x97, 0x0f, 0x55, 0x55, 0x82, - 0x50, 0xae, 0x84, 0x24, 0x5e, 0x16, 0xf1, 0x47, 0x1c, 0x7d, 0x4b, 0xc2, 0xdc, 0x08, 0x53, 0xb3, - 0x49, 0xe2, 0x65, 0x93, 0xf3, 0xd3, 0x5e, 0xcc, 0xc2, 0x80, 0x16, 0xa6, 0xd2, 0x98, 0xe6, 0xae, - 0x2a, 0xfd, 0xe9, 0xa1, 0x74, 0xfa, 0x82, 0x44, 0x17, 0xc2, 0x88, 0xdb, 0x87, 0xad, 0xeb, 0x69, - 0xc8, 0xf7, 0xf8, 0x89, 0x38, 0xff, 0x59, 0x71, 0xc1, 0xf3, 0xe2, 0x46, 0xcf, 0x8b, 0x0b, 0xff, - 0x14, 0x97, 0x7e, 0x1f, 0x91, 0xa3, 0x4e, 0xc6, 0xf5, 0xd6, 0xc8, 0x4a, 0xe1, 0x84, 0x2f, 0x77, - 0x5b, 0xcd, 0x3c, 0xdc, 0x12, 0x63, 0x3b, 0x61, 0x3b, 0x4f, 0x3f, 0x09, 0xb2, 0xd8, 0x0d, 0x30, - 0x23, 0xe3, 0x2b, 0x09, 0xe5, 0xaa, 0x66, 0xff, 0xe2, 0x90, 0x8f, 0xfb, 0xbe, 0xdc, 0x09, 0xcd, - 0x61, 0xcd, 0xdb, 0x3c, 0x3d, 0x23, 0x07, 0x79, 0xd5, 0xe8, 0x7b, 0xa8, 0x59, 0x80, 0xa5, 0xff, - 0xf5, 0xa5, 0x9f, 0x41, 0xd4, 0x8d, 0x86, 0x0d, 0x28, 0xc3, 0xbb, 0x2a, 0x3a, 0x23, 0x91, 0x95, - 0xaa, 0xbf, 0x89, 0x12, 0x75, 0x4d, 0xce, 0xe9, 0xa0, 0xe9, 0x6d, 0x86, 0xef, 0x6b, 0x6c, 0x3b, - 0x2f, 0xe4, 0x06, 0x54, 0x6d, 0x7f, 0x1f, 0x3d, 0x17, 0xf3, 0x01, 0x63, 0x05, 0x5d, 0xc9, 0xb2, - 0x44, 0x97, 0x85, 0x1c, 0x63, 0xfa, 0x92, 0xc4, 0xf6, 0x3b, 0xb4, 0x57, 0x4f, 0xd8, 0xec, 0xc7, - 0x4a, 0xad, 0xa4, 0x6d, 0x08, 0x5a, 0x2b, 0xe6, 0x3d, 0x61, 0xb3, 0xb9, 0x11, 0xda, 0xe0, 0x3d, - 0x88, 0x71, 0x36, 0x3d, 0x41, 0x19, 0x39, 0xb8, 0x54, 0x2b, 0xcc, 0x11, 0xcc, 0x75, 0xd0, 0xae, - 0x9b, 0xd7, 0xf7, 0xa0, 0x56, 0x52, 0x15, 0xe8, 0xa6, 0x88, 0xf7, 0x04, 0x3d, 0x21, 0xe1, 0x27, - 0xb9, 0x91, 0x86, 0x1d, 0xe2, 0x2a, 0x07, 0xe8, 0xff, 0x64, 0x7c, 0xbd, 0x5e, 0xd7, 0x60, 0xd8, - 0x14, 0xe9, 0x16, 0x59, 0x3e, 0x77, 0xe5, 0xff, 0x38, 0xde, 0x21, 0x7b, 0x7a, 0xde, 0x2e, 0x38, - 0x72, 0xa7, 0xe7, 0xfd, 0x8a, 0x0b, 0x58, 0x35, 0x5b, 0x60, 0xc7, 0x78, 0x74, 0x8b, 0xd2, 0x77, - 0xe4, 0x70, 0x30, 0x85, 0x9a, 0xbe, 0x21, 0xe1, 0xc2, 0xc0, 0xa6, 0x66, 0xde, 0xdf, 0x86, 0xe5, - 0x6a, 0xd2, 0x1f, 0x1e, 0x99, 0x0c, 0xe8, 0xce, 0xf5, 0x5f, 0x44, 0x0d, 0xad, 0x7f, 0xf6, 0x98, - 0x66, 0xe4, 0x88, 0x83, 0x01, 0x65, 0x7b, 0x78, 0x53, 0x95, 0xf2, 0xfe, 0x01, 0xad, 0x1f, 0xf3, - 0xa7, 0xf4, 0xfe, 0x2d, 0x0a, 0x9c, 0x03, 0xf1, 0x2d, 0x3a, 0x21, 0x21, 0x87, 0x02, 0x76, 0xad, - 0xd3, 0x1d, 0xb0, 0xe7, 0x2d, 0xea, 0x5b, 0xa1, 0x0b, 0x30, 0xad, 0xbf, 0xf7, 0x38, 0x7d, 0xdf, - 0xdb, 0x08, 0xff, 0xab, 0xd1, 0x02, 0xe7, 0xe9, 0x61, 0x5f, 0xf6, 0x78, 0xd0, 0x62, 0x7f, 0xd8, - 0xe2, 0x74, 0x4e, 0xa6, 0x8f, 0x6e, 0x38, 0xf6, 0x16, 0xb4, 0x84, 0x7a, 0xd9, 0xee, 0xd1, 0x41, - 0xbb, 0x05, 0xbe, 0xa2, 0xcb, 0x6e, 0x0b, 0x87, 0xd2, 0x19, 0x19, 0xbb, 0xcb, 0x60, 0x2f, 0xd0, - 0x9d, 0x28, 0xdb, 0xd7, 0xd5, 0x86, 0xf8, 0x90, 0xda, 0xc7, 0xc1, 0x77, 0xae, 0xb4, 0xf1, 0xef, - 0x00, 0x00, 0x00, 0xff, 0xff, 0x8c, 0x01, 0x5d, 0x11, 0xb2, 0x05, 0x00, 0x00, + // 726 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x84, 0x54, 0x51, 0x6b, 0xe3, 0x46, + 0x10, 0x46, 0x92, 0xe5, 0x58, 0xeb, 0xb8, 0x49, 0xb7, 0x69, 0xb2, 0x94, 0xd2, 0x0a, 0x3d, 0x09, + 0x4a, 0x1d, 0xc8, 0x6b, 0xa1, 0xe0, 0x34, 0x49, 0x31, 0x5c, 0x9c, 0xb0, 0x0a, 0x79, 0xdf, 0x8b, + 0xc6, 0x62, 0x41, 0x5e, 0xf9, 0x56, 0xab, 0xc3, 0xf9, 0x29, 0xf7, 0x1b, 0xee, 0xc7, 0xdc, 0xcb, + 0xfd, 0xa0, 0x63, 0x67, 0x25, 0x4b, 0xc9, 0xc1, 0xe5, 0x49, 0xf3, 0x7d, 0x33, 0xbb, 0xab, 0x99, + 0xf9, 0x66, 0xc8, 0x99, 0x54, 0x06, 0xb4, 0x12, 0xe5, 0x79, 0x67, 0xcc, 0xb7, 0xba, 0x32, 0x15, + 0x9d, 0x48, 0xb5, 0x2e, 0x9b, 0xdd, 0x87, 0x32, 0xf9, 0xe2, 0x93, 0xf0, 0xbe, 0x92, 0xca, 0x50, + 0x4a, 0x46, 0x2b, 0xb1, 0x01, 0xe6, 0xc5, 0x7e, 0x1a, 0x71, 0xb4, 0x2d, 0xf7, 0x20, 0x8a, 0x9a, + 0xf9, 0x8e, 0xb3, 0x36, 0x72, 0x72, 0x03, 0x2c, 0x88, 0xfd, 0x34, 0xe0, 0x68, 0xd3, 0x63, 0x12, + 0xac, 0x64, 0xc9, 0x46, 0xb1, 0x9f, 0x4e, 0xb8, 0x35, 0xe9, 0x9f, 0x24, 0x58, 0x34, 0x3b, 0x16, + 0xc6, 0x41, 0x3a, 0xbd, 0x98, 0xcd, 0xbb, 0xf7, 0xe6, 0x8b, 0x66, 0xc7, 0xad, 0x87, 0xfe, 0x41, + 0xc8, 0xa2, 0x28, 0x34, 0x14, 0xc2, 0x40, 0xce, 0xc6, 0xb1, 0x97, 0xce, 0xf8, 0x80, 0xb1, 0xfe, + 0x9b, 0xb2, 0x12, 0xe6, 0x51, 0x94, 0x0d, 0xb0, 0x83, 0xd8, 0x4b, 0x3d, 0x3e, 0x60, 0x68, 0x42, + 0x0e, 0x97, 0xca, 0x40, 0x01, 0xda, 0x45, 0x4c, 0x62, 0x2f, 0x0d, 0xf8, 0x0b, 0x8e, 0xc6, 0x64, + 0x9a, 0x19, 0x2d, 0x55, 0xe1, 0x42, 0xa2, 0xd8, 0x4b, 0x23, 0x3e, 0xa4, 0xec, 0x2d, 0x97, 0x55, + 0x55, 0x82, 0x50, 0x2e, 0x84, 0xc4, 0x5e, 0x3a, 0xe1, 0x2f, 0x38, 0xfa, 0x37, 0x09, 0x33, 0x23, + 0x4c, 0xcd, 0xa6, 0xb1, 0x97, 0x4e, 0x2f, 0xce, 0xfa, 0x64, 0x96, 0x06, 0xb4, 0x30, 0x95, 0x46, + 0x37, 0x77, 0x51, 0xc9, 0x67, 0x0f, 0x53, 0xa7, 0xbf, 0x91, 0xc9, 0x95, 0x30, 0xe2, 0xe1, 0x79, + 0xeb, 0x6a, 0x1a, 0xf2, 0x3d, 0x7e, 0x95, 0x9c, 0xff, 0x66, 0x72, 0xc1, 0xdb, 0xc9, 0x8d, 0xde, + 0x4e, 0x2e, 0xfc, 0x3e, 0xb9, 0xe4, 0xeb, 0x88, 0x1c, 0x75, 0x69, 0xdc, 0x6d, 0x8d, 0xac, 0x14, + 0x76, 0xf8, 0x7a, 0xb7, 0xd5, 0xcc, 0xc3, 0x2b, 0xd1, 0xb6, 0x1d, 0xb6, 0xfd, 0xf4, 0xe3, 0x20, + 0x8d, 0x5c, 0x03, 0x53, 0x32, 0xbe, 0x91, 0x50, 0xe6, 0x35, 0xfb, 0x19, 0x9b, 0x7c, 0xdc, 0xd7, + 0xe5, 0x51, 0x68, 0x0e, 0x6b, 0xde, 0xfa, 0xe9, 0x39, 0x39, 0xc8, 0xaa, 0x46, 0x3f, 0x41, 0xcd, + 0x02, 0x0c, 0xfd, 0xb5, 0x0f, 0xbd, 0x05, 0x51, 0x37, 0x1a, 0x36, 0xa0, 0x0c, 0xef, 0xa2, 0xe8, + 0x9c, 0x4c, 0x6c, 0xaa, 0xfa, 0xa3, 0x28, 0x31, 0xaf, 0xe9, 0x05, 0x1d, 0x14, 0xbd, 0xf5, 0xf0, + 0x7d, 0x8c, 0x2d, 0xe7, 0x95, 0xdc, 0x80, 0xaa, 0xed, 0xef, 0xa3, 0xe6, 0x22, 0x3e, 0x60, 0x28, + 0x23, 0x07, 0xff, 0xeb, 0xaa, 0xd9, 0x5e, 0x3e, 0xb3, 0x5f, 0xd0, 0xd9, 0x41, 0x9b, 0xea, 0x8d, + 0x2c, 0x4b, 0xd4, 0x5f, 0xc8, 0xd1, 0xa6, 0xbf, 0x93, 0xc8, 0x7e, 0x87, 0xc2, 0xeb, 0x09, 0xeb, + 0xfd, 0xaf, 0x52, 0xb9, 0xb4, 0xa5, 0x42, 0xd1, 0x45, 0xbc, 0x27, 0xac, 0x37, 0x33, 0x42, 0x1b, + 0x9c, 0x90, 0x08, 0xbb, 0xd6, 0x13, 0xf6, 0x3f, 0xae, 0x55, 0x8e, 0x3e, 0x82, 0xbe, 0x0e, 0xda, + 0x73, 0x8b, 0xfa, 0x09, 0x54, 0x2e, 0x55, 0x81, 0x3a, 0x9b, 0xf0, 0x9e, 0xa0, 0x27, 0x24, 0x7c, + 0x27, 0x37, 0xd2, 0xb0, 0x43, 0x3c, 0xe5, 0x00, 0x3d, 0x25, 0xe3, 0xbb, 0xf5, 0xba, 0x06, 0xc3, + 0x66, 0x48, 0xb7, 0xc8, 0xf2, 0x99, 0x0b, 0xff, 0xc9, 0xf1, 0x0e, 0xd9, 0xd7, 0xb3, 0xf6, 0xc0, + 0x91, 0x7b, 0x3d, 0xeb, 0x4f, 0x5c, 0x41, 0xde, 0x6c, 0x81, 0x1d, 0xe3, 0xd3, 0x2d, 0xb2, 0x75, + 0xbd, 0x15, 0xbb, 0x0c, 0xb4, 0x84, 0x7a, 0xc5, 0x28, 0x1e, 0x1a, 0x30, 0xf6, 0xc6, 0x3b, 0x9d, + 0x83, 0x86, 0x9c, 0x9d, 0xe0, 0xc1, 0x0e, 0x26, 0xff, 0x90, 0xc3, 0x41, 0x67, 0x6b, 0xfa, 0x17, + 0x09, 0x97, 0x06, 0x36, 0x35, 0xf3, 0x7e, 0x24, 0x00, 0x17, 0x93, 0x7c, 0xf2, 0xc8, 0x74, 0x40, + 0x77, 0x93, 0xf4, 0x5e, 0xd4, 0xd0, 0x6a, 0x72, 0x8f, 0x69, 0x4a, 0x8e, 0x38, 0x18, 0x50, 0xb6, + 0xfa, 0xf7, 0x55, 0x29, 0x9f, 0x9e, 0x71, 0x9c, 0x22, 0xfe, 0x9a, 0xde, 0xef, 0xb7, 0xc0, 0xa9, + 0x1a, 0xf7, 0xdb, 0x09, 0x09, 0x39, 0x14, 0xb0, 0x6b, 0xa7, 0xc7, 0x01, 0xfb, 0xde, 0xb2, 0x7e, + 0x10, 0xba, 0x00, 0xd3, 0xce, 0xcc, 0x1e, 0x27, 0xff, 0xf6, 0xd2, 0xc4, 0xff, 0x6a, 0xb4, 0x40, + 0x25, 0x78, 0x58, 0x9c, 0x3d, 0x1e, 0x34, 0xc7, 0x1f, 0x36, 0x27, 0x59, 0x90, 0xd9, 0x8b, 0xad, + 0x81, 0x5d, 0x69, 0x0b, 0xec, 0xb5, 0x5d, 0x69, 0xab, 0x7b, 0x4a, 0xc6, 0xb8, 0x99, 0x57, 0xdd, + 0x15, 0x0e, 0x25, 0x73, 0x32, 0x76, 0x03, 0x66, 0x87, 0xf2, 0x51, 0x94, 0xed, 0xc6, 0xb6, 0x26, + 0x2e, 0x67, 0xbb, 0x70, 0x7c, 0xa7, 0x67, 0x6b, 0x7f, 0x0b, 0x00, 0x00, 0xff, 0xff, 0xd1, 0x2a, + 0x83, 0x14, 0x06, 0x06, 0x00, 0x00, } diff --git a/influxql/internal/internal.proto b/influxql/internal/internal.proto index 5f15c9358f..cbbc976d7d 100644 --- a/influxql/internal/internal.proto +++ b/influxql/internal/internal.proto @@ -1,3 +1,4 @@ +syntax = "proto2"; package influxql; message Point { @@ -31,6 +32,7 @@ message IteratorOptions { repeated Measurement Sources = 3; optional Interval Interval = 4; repeated string Dimensions = 5; + repeated string GroupBy = 19; optional int32 Fill = 6; optional double FillValue = 7; optional string Condition = 8; @@ -42,6 +44,8 @@ message IteratorOptions { optional int64 SLimit = 14; optional int64 SOffset = 15; optional bool Dedupe = 16; + optional int64 MaxSeriesN = 18; + optional bool Ordered = 20; } message Measurements { diff --git a/influxql/iterator.gen.go b/influxql/iterator.gen.go index f76a80e174..00be1c116a 100644 --- a/influxql/iterator.gen.go +++ b/influxql/iterator.gen.go @@ -9,7 +9,6 @@ package influxql import ( "container/heap" "encoding/binary" - "errors" "fmt" "io" "sort" @@ -205,7 +204,8 @@ func (itr *floatMergeIterator) Next() (*FloatPoint, error) { if err != nil { return nil, err } - itr.window.name, itr.window.tags = p.Name, p.Tags.ID() + tags := p.Tags.Subset(itr.heap.opt.Dimensions) + itr.window.name, itr.window.tags = p.Name, tags.ID() itr.window.startTime, itr.window.endTime = itr.heap.opt.Window(p.Time) return p, nil } @@ -226,7 +226,7 @@ func (itr *floatMergeIterator) Next() (*FloatPoint, error) { inWindow := true if window := itr.window; window.name != p.Name { inWindow = false - } else if window.tags != p.Tags.ID() { + } else if tags := p.Tags.Subset(itr.heap.opt.Dimensions); window.tags != tags.ID() { inWindow = false } else if opt := itr.heap.opt; opt.Ascending && p.Time >= window.endTime { inWindow = false @@ -253,9 +253,9 @@ type floatMergeHeap struct { items []*floatMergeHeapItem } -func (h floatMergeHeap) Len() int { return len(h.items) } -func (h floatMergeHeap) Swap(i, j int) { h.items[i], h.items[j] = h.items[j], h.items[i] } -func (h floatMergeHeap) Less(i, j int) bool { +func (h *floatMergeHeap) Len() int { return len(h.items) } +func (h *floatMergeHeap) Swap(i, j int) { h.items[i], h.items[j] = h.items[j], h.items[i] } +func (h *floatMergeHeap) Less(i, j int) bool { x, err := h.items[i].itr.peek() if err != nil { return true @@ -268,14 +268,14 @@ func (h floatMergeHeap) Less(i, j int) bool { if h.opt.Ascending { if x.Name != y.Name { return x.Name < y.Name - } else if x.Tags.ID() != y.Tags.ID() { - return x.Tags.ID() < y.Tags.ID() + } else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); xTags.ID() != yTags.ID() { + return xTags.ID() < yTags.ID() } } else { if x.Name != y.Name { return x.Name > y.Name - } else if x.Tags.ID() != y.Tags.ID() { - return x.Tags.ID() > y.Tags.ID() + } else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); xTags.ID() != yTags.ID() { + return xTags.ID() > yTags.ID() } } @@ -307,8 +307,7 @@ type floatMergeHeapItem struct { // floatSortedMergeIterator is an iterator that sorts and merges multiple iterators into one. type floatSortedMergeIterator struct { inputs []FloatIterator - opt IteratorOptions - heap floatSortedMergeHeap + heap *floatSortedMergeHeap init bool } @@ -316,14 +315,16 @@ type floatSortedMergeIterator struct { func newFloatSortedMergeIterator(inputs []FloatIterator, opt IteratorOptions) Iterator { itr := &floatSortedMergeIterator{ inputs: inputs, - heap: make(floatSortedMergeHeap, 0, len(inputs)), - opt: opt, + heap: &floatSortedMergeHeap{ + items: make([]*floatSortedMergeHeapItem, 0, len(inputs)), + opt: opt, + }, } // Initialize heap items. for _, input := range inputs { // Append to the heap. - itr.heap = append(itr.heap, &floatSortedMergeHeapItem{itr: input, ascending: opt.Ascending}) + itr.heap.items = append(itr.heap.items, &floatSortedMergeHeapItem{itr: input}) } return itr @@ -354,8 +355,8 @@ func (itr *floatSortedMergeIterator) Next() (*FloatPoint, error) { return itr.po func (itr *floatSortedMergeIterator) pop() (*FloatPoint, error) { // Initialize the heap. See the MergeIterator to see why this has to be done lazily. if !itr.init { - items := itr.heap - itr.heap = make([]*floatSortedMergeHeapItem, 0, len(items)) + items := itr.heap.items + itr.heap.items = make([]*floatSortedMergeHeapItem, 0, len(items)) for _, item := range items { var err error if item.point, err = item.itr.Next(); err != nil { @@ -363,18 +364,18 @@ func (itr *floatSortedMergeIterator) pop() (*FloatPoint, error) { } else if item.point == nil { continue } - itr.heap = append(itr.heap, item) + itr.heap.items = append(itr.heap.items, item) } - heap.Init(&itr.heap) + heap.Init(itr.heap) itr.init = true } - if len(itr.heap) == 0 { + if len(itr.heap.items) == 0 { return nil, nil } // Read the next item from the heap. - item := heap.Pop(&itr.heap).(*floatSortedMergeHeapItem) + item := heap.Pop(itr.heap).(*floatSortedMergeHeapItem) if item.err != nil { return nil, item.err } else if item.point == nil { @@ -386,54 +387,56 @@ func (itr *floatSortedMergeIterator) pop() (*FloatPoint, error) { // Read the next item from the cursor. Push back to heap if one exists. if item.point, item.err = item.itr.Next(); item.point != nil { - heap.Push(&itr.heap, item) + heap.Push(itr.heap, item) } return p, nil } // floatSortedMergeHeap represents a heap of floatSortedMergeHeapItems. -type floatSortedMergeHeap []*floatSortedMergeHeapItem +type floatSortedMergeHeap struct { + opt IteratorOptions + items []*floatSortedMergeHeapItem +} -func (h floatSortedMergeHeap) Len() int { return len(h) } -func (h floatSortedMergeHeap) Swap(i, j int) { h[i], h[j] = h[j], h[i] } -func (h floatSortedMergeHeap) Less(i, j int) bool { - x, y := h[i].point, h[j].point +func (h *floatSortedMergeHeap) Len() int { return len(h.items) } +func (h *floatSortedMergeHeap) Swap(i, j int) { h.items[i], h.items[j] = h.items[j], h.items[i] } +func (h *floatSortedMergeHeap) Less(i, j int) bool { + x, y := h.items[i].point, h.items[j].point - if h[i].ascending { + if h.opt.Ascending { if x.Name != y.Name { return x.Name < y.Name - } else if !x.Tags.Equals(&y.Tags) { - return x.Tags.ID() < y.Tags.ID() + } else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); !xTags.Equals(&yTags) { + return xTags.ID() < yTags.ID() } return x.Time < y.Time } if x.Name != y.Name { return x.Name > y.Name - } else if !x.Tags.Equals(&y.Tags) { - return x.Tags.ID() > y.Tags.ID() + } else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); !xTags.Equals(&yTags) { + return xTags.ID() > yTags.ID() } return x.Time > y.Time } func (h *floatSortedMergeHeap) Push(x interface{}) { - *h = append(*h, x.(*floatSortedMergeHeapItem)) + h.items = append(h.items, x.(*floatSortedMergeHeapItem)) } func (h *floatSortedMergeHeap) Pop() interface{} { - old := *h + old := h.items n := len(old) item := old[n-1] - *h = old[0 : n-1] + h.items = old[0 : n-1] return item } type floatSortedMergeHeapItem struct { - point *FloatPoint - err error - itr FloatIterator - ascending bool + point *FloatPoint + err error + itr FloatIterator } // floatParallelIterator represents an iterator that pulls data in a separate goroutine. @@ -550,10 +553,6 @@ func (itr *floatLimitIterator) Next() (*FloatPoint, error) { // Read next point if we're beyond the limit. if itr.opt.Limit > 0 && (itr.n-itr.opt.Offset) > itr.opt.Limit { - // If there's no interval, no groups, and a single source then simply exit. - if itr.opt.Interval.IsZero() && len(itr.opt.Dimensions) == 0 && len(itr.opt.Sources) == 1 { - return nil, nil - } continue } @@ -840,7 +839,7 @@ type auxFloatPoint struct { type floatAuxIterator struct { input *bufFloatIterator output chan auxFloatPoint - fields auxIteratorFields + fields *auxIteratorFields background bool } @@ -869,28 +868,6 @@ func (itr *floatAuxIterator) Iterator(name string, typ DataType) Iterator { return itr.fields.iterator(name, typ) } -func (itr *floatAuxIterator) CreateIterator(opt IteratorOptions) (Iterator, error) { - expr := opt.Expr - if expr == nil { - panic("unable to create an iterator with no expression from an aux iterator") - } - - switch expr := expr.(type) { - case *VarRef: - return itr.Iterator(expr.Val, expr.Type), nil - default: - panic(fmt.Sprintf("invalid expression type for an aux iterator: %T", expr)) - } -} - -func (itr *floatAuxIterator) FieldDimensions(sources Sources) (fields map[string]DataType, dimensions map[string]struct{}, err error) { - return nil, nil, errors.New("not implemented") -} - -func (itr *floatAuxIterator) ExpandSources(sources Sources) (Sources, error) { - return nil, errors.New("not implemented") -} - func (itr *floatAuxIterator) stream() { for { // Read next point. @@ -1015,10 +992,20 @@ func (itr *floatChanIterator) Next() (*FloatPoint, error) { type floatReduceFloatIterator struct { input *bufFloatIterator create func() (FloatPointAggregator, FloatPointEmitter) + dims []string opt IteratorOptions points []FloatPoint } +func newFloatReduceFloatIterator(input FloatIterator, opt IteratorOptions, createFn func() (FloatPointAggregator, FloatPointEmitter)) *floatReduceFloatIterator { + return &floatReduceFloatIterator{ + input: newBufFloatIterator(input), + create: createFn, + dims: opt.GetDimensions(), + opt: opt, + } +} + // Stats returns stats from the input iterator. func (itr *floatReduceFloatIterator) Stats() IteratorStats { return itr.input.Stats() } @@ -1054,11 +1041,20 @@ type floatReduceFloatPoint struct { // The previous value for the dimension is passed to fn. func (itr *floatReduceFloatIterator) reduce() ([]FloatPoint, error) { // Calculate next window. - t, err := itr.input.peekTime() - if err != nil { - return nil, err + var startTime, endTime int64 + for { + p, err := itr.input.Next() + if err != nil || p == nil { + return nil, err + } else if p.Nil { + continue + } + + // Unread the point so it can be processed. + itr.input.unread(p) + startTime, endTime = itr.opt.Window(p.Time) + break } - startTime, endTime := itr.opt.Window(t) // Create points by tags. m := make(map[string]*floatReduceFloatPoint) @@ -1072,7 +1068,7 @@ func (itr *floatReduceFloatIterator) reduce() ([]FloatPoint, error) { } else if curr.Nil { continue } - tags := curr.Tags.Subset(itr.opt.Dimensions) + tags := curr.Tags.Subset(itr.dims) id := curr.Name if len(tags.m) > 0 { @@ -1103,6 +1099,8 @@ func (itr *floatReduceFloatIterator) reduce() ([]FloatPoint, error) { sort.Sort(reverseStringSlice(keys)) } + // Assume the points are already sorted until proven otherwise. + sortedByTime := true a := make([]FloatPoint, 0, len(m)) for _, k := range keys { rp := m[k] @@ -1113,11 +1111,18 @@ func (itr *floatReduceFloatIterator) reduce() ([]FloatPoint, error) { // Set the points time to the interval time if the reducer didn't provide one. if points[i].Time == ZeroTime { points[i].Time = startTime + } else { + sortedByTime = false } a = append(a, points[i]) } } + // Points may be out of order. Perform a stable sort by time if requested. + if !sortedByTime && itr.opt.Ordered { + sort.Stable(sort.Reverse(floatPointsByTime(a))) + } + return a, nil } @@ -1125,6 +1130,7 @@ func (itr *floatReduceFloatIterator) reduce() ([]FloatPoint, error) { type floatStreamFloatIterator struct { input *bufFloatIterator create func() (FloatPointAggregator, FloatPointEmitter) + dims []string opt IteratorOptions m map[string]*floatReduceFloatPoint points []FloatPoint @@ -1135,6 +1141,7 @@ func newFloatStreamFloatIterator(input FloatIterator, createFn func() (FloatPoin return &floatStreamFloatIterator{ input: newBufFloatIterator(input), create: createFn, + dims: opt.GetDimensions(), opt: opt, m: make(map[string]*floatReduceFloatPoint), } @@ -1174,7 +1181,7 @@ func (itr *floatStreamFloatIterator) reduce() ([]FloatPoint, error) { } else if curr.Nil { continue } - tags := curr.Tags.Subset(itr.opt.Dimensions) + tags := curr.Tags.Subset(itr.dims) id := curr.Name if len(tags.m) > 0 { @@ -1319,10 +1326,20 @@ type floatExprFunc func(a, b float64) float64 type floatReduceIntegerIterator struct { input *bufFloatIterator create func() (FloatPointAggregator, IntegerPointEmitter) + dims []string opt IteratorOptions points []IntegerPoint } +func newFloatReduceIntegerIterator(input FloatIterator, opt IteratorOptions, createFn func() (FloatPointAggregator, IntegerPointEmitter)) *floatReduceIntegerIterator { + return &floatReduceIntegerIterator{ + input: newBufFloatIterator(input), + create: createFn, + dims: opt.GetDimensions(), + opt: opt, + } +} + // Stats returns stats from the input iterator. func (itr *floatReduceIntegerIterator) Stats() IteratorStats { return itr.input.Stats() } @@ -1358,11 +1375,20 @@ type floatReduceIntegerPoint struct { // The previous value for the dimension is passed to fn. func (itr *floatReduceIntegerIterator) reduce() ([]IntegerPoint, error) { // Calculate next window. - t, err := itr.input.peekTime() - if err != nil { - return nil, err + var startTime, endTime int64 + for { + p, err := itr.input.Next() + if err != nil || p == nil { + return nil, err + } else if p.Nil { + continue + } + + // Unread the point so it can be processed. + itr.input.unread(p) + startTime, endTime = itr.opt.Window(p.Time) + break } - startTime, endTime := itr.opt.Window(t) // Create points by tags. m := make(map[string]*floatReduceIntegerPoint) @@ -1376,7 +1402,7 @@ func (itr *floatReduceIntegerIterator) reduce() ([]IntegerPoint, error) { } else if curr.Nil { continue } - tags := curr.Tags.Subset(itr.opt.Dimensions) + tags := curr.Tags.Subset(itr.dims) id := curr.Name if len(tags.m) > 0 { @@ -1407,6 +1433,8 @@ func (itr *floatReduceIntegerIterator) reduce() ([]IntegerPoint, error) { sort.Sort(reverseStringSlice(keys)) } + // Assume the points are already sorted until proven otherwise. + sortedByTime := true a := make([]IntegerPoint, 0, len(m)) for _, k := range keys { rp := m[k] @@ -1417,11 +1445,18 @@ func (itr *floatReduceIntegerIterator) reduce() ([]IntegerPoint, error) { // Set the points time to the interval time if the reducer didn't provide one. if points[i].Time == ZeroTime { points[i].Time = startTime + } else { + sortedByTime = false } a = append(a, points[i]) } } + // Points may be out of order. Perform a stable sort by time if requested. + if !sortedByTime && itr.opt.Ordered { + sort.Stable(sort.Reverse(integerPointsByTime(a))) + } + return a, nil } @@ -1429,6 +1464,7 @@ func (itr *floatReduceIntegerIterator) reduce() ([]IntegerPoint, error) { type floatStreamIntegerIterator struct { input *bufFloatIterator create func() (FloatPointAggregator, IntegerPointEmitter) + dims []string opt IteratorOptions m map[string]*floatReduceIntegerPoint points []IntegerPoint @@ -1439,6 +1475,7 @@ func newFloatStreamIntegerIterator(input FloatIterator, createFn func() (FloatPo return &floatStreamIntegerIterator{ input: newBufFloatIterator(input), create: createFn, + dims: opt.GetDimensions(), opt: opt, m: make(map[string]*floatReduceIntegerPoint), } @@ -1478,7 +1515,7 @@ func (itr *floatStreamIntegerIterator) reduce() ([]IntegerPoint, error) { } else if curr.Nil { continue } - tags := curr.Tags.Subset(itr.opt.Dimensions) + tags := curr.Tags.Subset(itr.dims) id := curr.Name if len(tags.m) > 0 { @@ -1627,10 +1664,20 @@ type floatIntegerExprFunc func(a, b float64) int64 type floatReduceStringIterator struct { input *bufFloatIterator create func() (FloatPointAggregator, StringPointEmitter) + dims []string opt IteratorOptions points []StringPoint } +func newFloatReduceStringIterator(input FloatIterator, opt IteratorOptions, createFn func() (FloatPointAggregator, StringPointEmitter)) *floatReduceStringIterator { + return &floatReduceStringIterator{ + input: newBufFloatIterator(input), + create: createFn, + dims: opt.GetDimensions(), + opt: opt, + } +} + // Stats returns stats from the input iterator. func (itr *floatReduceStringIterator) Stats() IteratorStats { return itr.input.Stats() } @@ -1666,11 +1713,20 @@ type floatReduceStringPoint struct { // The previous value for the dimension is passed to fn. func (itr *floatReduceStringIterator) reduce() ([]StringPoint, error) { // Calculate next window. - t, err := itr.input.peekTime() - if err != nil { - return nil, err + var startTime, endTime int64 + for { + p, err := itr.input.Next() + if err != nil || p == nil { + return nil, err + } else if p.Nil { + continue + } + + // Unread the point so it can be processed. + itr.input.unread(p) + startTime, endTime = itr.opt.Window(p.Time) + break } - startTime, endTime := itr.opt.Window(t) // Create points by tags. m := make(map[string]*floatReduceStringPoint) @@ -1684,7 +1740,7 @@ func (itr *floatReduceStringIterator) reduce() ([]StringPoint, error) { } else if curr.Nil { continue } - tags := curr.Tags.Subset(itr.opt.Dimensions) + tags := curr.Tags.Subset(itr.dims) id := curr.Name if len(tags.m) > 0 { @@ -1715,6 +1771,8 @@ func (itr *floatReduceStringIterator) reduce() ([]StringPoint, error) { sort.Sort(reverseStringSlice(keys)) } + // Assume the points are already sorted until proven otherwise. + sortedByTime := true a := make([]StringPoint, 0, len(m)) for _, k := range keys { rp := m[k] @@ -1725,11 +1783,18 @@ func (itr *floatReduceStringIterator) reduce() ([]StringPoint, error) { // Set the points time to the interval time if the reducer didn't provide one. if points[i].Time == ZeroTime { points[i].Time = startTime + } else { + sortedByTime = false } a = append(a, points[i]) } } + // Points may be out of order. Perform a stable sort by time if requested. + if !sortedByTime && itr.opt.Ordered { + sort.Stable(sort.Reverse(stringPointsByTime(a))) + } + return a, nil } @@ -1737,6 +1802,7 @@ func (itr *floatReduceStringIterator) reduce() ([]StringPoint, error) { type floatStreamStringIterator struct { input *bufFloatIterator create func() (FloatPointAggregator, StringPointEmitter) + dims []string opt IteratorOptions m map[string]*floatReduceStringPoint points []StringPoint @@ -1747,6 +1813,7 @@ func newFloatStreamStringIterator(input FloatIterator, createFn func() (FloatPoi return &floatStreamStringIterator{ input: newBufFloatIterator(input), create: createFn, + dims: opt.GetDimensions(), opt: opt, m: make(map[string]*floatReduceStringPoint), } @@ -1786,7 +1853,7 @@ func (itr *floatStreamStringIterator) reduce() ([]StringPoint, error) { } else if curr.Nil { continue } - tags := curr.Tags.Subset(itr.opt.Dimensions) + tags := curr.Tags.Subset(itr.dims) id := curr.Name if len(tags.m) > 0 { @@ -1935,10 +2002,20 @@ type floatStringExprFunc func(a, b float64) string type floatReduceBooleanIterator struct { input *bufFloatIterator create func() (FloatPointAggregator, BooleanPointEmitter) + dims []string opt IteratorOptions points []BooleanPoint } +func newFloatReduceBooleanIterator(input FloatIterator, opt IteratorOptions, createFn func() (FloatPointAggregator, BooleanPointEmitter)) *floatReduceBooleanIterator { + return &floatReduceBooleanIterator{ + input: newBufFloatIterator(input), + create: createFn, + dims: opt.GetDimensions(), + opt: opt, + } +} + // Stats returns stats from the input iterator. func (itr *floatReduceBooleanIterator) Stats() IteratorStats { return itr.input.Stats() } @@ -1974,11 +2051,20 @@ type floatReduceBooleanPoint struct { // The previous value for the dimension is passed to fn. func (itr *floatReduceBooleanIterator) reduce() ([]BooleanPoint, error) { // Calculate next window. - t, err := itr.input.peekTime() - if err != nil { - return nil, err + var startTime, endTime int64 + for { + p, err := itr.input.Next() + if err != nil || p == nil { + return nil, err + } else if p.Nil { + continue + } + + // Unread the point so it can be processed. + itr.input.unread(p) + startTime, endTime = itr.opt.Window(p.Time) + break } - startTime, endTime := itr.opt.Window(t) // Create points by tags. m := make(map[string]*floatReduceBooleanPoint) @@ -1992,7 +2078,7 @@ func (itr *floatReduceBooleanIterator) reduce() ([]BooleanPoint, error) { } else if curr.Nil { continue } - tags := curr.Tags.Subset(itr.opt.Dimensions) + tags := curr.Tags.Subset(itr.dims) id := curr.Name if len(tags.m) > 0 { @@ -2023,6 +2109,8 @@ func (itr *floatReduceBooleanIterator) reduce() ([]BooleanPoint, error) { sort.Sort(reverseStringSlice(keys)) } + // Assume the points are already sorted until proven otherwise. + sortedByTime := true a := make([]BooleanPoint, 0, len(m)) for _, k := range keys { rp := m[k] @@ -2033,11 +2121,18 @@ func (itr *floatReduceBooleanIterator) reduce() ([]BooleanPoint, error) { // Set the points time to the interval time if the reducer didn't provide one. if points[i].Time == ZeroTime { points[i].Time = startTime + } else { + sortedByTime = false } a = append(a, points[i]) } } + // Points may be out of order. Perform a stable sort by time if requested. + if !sortedByTime && itr.opt.Ordered { + sort.Stable(sort.Reverse(booleanPointsByTime(a))) + } + return a, nil } @@ -2045,6 +2140,7 @@ func (itr *floatReduceBooleanIterator) reduce() ([]BooleanPoint, error) { type floatStreamBooleanIterator struct { input *bufFloatIterator create func() (FloatPointAggregator, BooleanPointEmitter) + dims []string opt IteratorOptions m map[string]*floatReduceBooleanPoint points []BooleanPoint @@ -2055,6 +2151,7 @@ func newFloatStreamBooleanIterator(input FloatIterator, createFn func() (FloatPo return &floatStreamBooleanIterator{ input: newBufFloatIterator(input), create: createFn, + dims: opt.GetDimensions(), opt: opt, m: make(map[string]*floatReduceBooleanPoint), } @@ -2094,7 +2191,7 @@ func (itr *floatStreamBooleanIterator) reduce() ([]BooleanPoint, error) { } else if curr.Nil { continue } - tags := curr.Tags.Subset(itr.opt.Dimensions) + tags := curr.Tags.Subset(itr.dims) id := curr.Name if len(tags.m) > 0 { @@ -2567,7 +2664,8 @@ func (itr *integerMergeIterator) Next() (*IntegerPoint, error) { if err != nil { return nil, err } - itr.window.name, itr.window.tags = p.Name, p.Tags.ID() + tags := p.Tags.Subset(itr.heap.opt.Dimensions) + itr.window.name, itr.window.tags = p.Name, tags.ID() itr.window.startTime, itr.window.endTime = itr.heap.opt.Window(p.Time) return p, nil } @@ -2588,7 +2686,7 @@ func (itr *integerMergeIterator) Next() (*IntegerPoint, error) { inWindow := true if window := itr.window; window.name != p.Name { inWindow = false - } else if window.tags != p.Tags.ID() { + } else if tags := p.Tags.Subset(itr.heap.opt.Dimensions); window.tags != tags.ID() { inWindow = false } else if opt := itr.heap.opt; opt.Ascending && p.Time >= window.endTime { inWindow = false @@ -2615,9 +2713,9 @@ type integerMergeHeap struct { items []*integerMergeHeapItem } -func (h integerMergeHeap) Len() int { return len(h.items) } -func (h integerMergeHeap) Swap(i, j int) { h.items[i], h.items[j] = h.items[j], h.items[i] } -func (h integerMergeHeap) Less(i, j int) bool { +func (h *integerMergeHeap) Len() int { return len(h.items) } +func (h *integerMergeHeap) Swap(i, j int) { h.items[i], h.items[j] = h.items[j], h.items[i] } +func (h *integerMergeHeap) Less(i, j int) bool { x, err := h.items[i].itr.peek() if err != nil { return true @@ -2630,14 +2728,14 @@ func (h integerMergeHeap) Less(i, j int) bool { if h.opt.Ascending { if x.Name != y.Name { return x.Name < y.Name - } else if x.Tags.ID() != y.Tags.ID() { - return x.Tags.ID() < y.Tags.ID() + } else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); xTags.ID() != yTags.ID() { + return xTags.ID() < yTags.ID() } } else { if x.Name != y.Name { return x.Name > y.Name - } else if x.Tags.ID() != y.Tags.ID() { - return x.Tags.ID() > y.Tags.ID() + } else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); xTags.ID() != yTags.ID() { + return xTags.ID() > yTags.ID() } } @@ -2669,8 +2767,7 @@ type integerMergeHeapItem struct { // integerSortedMergeIterator is an iterator that sorts and merges multiple iterators into one. type integerSortedMergeIterator struct { inputs []IntegerIterator - opt IteratorOptions - heap integerSortedMergeHeap + heap *integerSortedMergeHeap init bool } @@ -2678,14 +2775,16 @@ type integerSortedMergeIterator struct { func newIntegerSortedMergeIterator(inputs []IntegerIterator, opt IteratorOptions) Iterator { itr := &integerSortedMergeIterator{ inputs: inputs, - heap: make(integerSortedMergeHeap, 0, len(inputs)), - opt: opt, + heap: &integerSortedMergeHeap{ + items: make([]*integerSortedMergeHeapItem, 0, len(inputs)), + opt: opt, + }, } // Initialize heap items. for _, input := range inputs { // Append to the heap. - itr.heap = append(itr.heap, &integerSortedMergeHeapItem{itr: input, ascending: opt.Ascending}) + itr.heap.items = append(itr.heap.items, &integerSortedMergeHeapItem{itr: input}) } return itr @@ -2716,8 +2815,8 @@ func (itr *integerSortedMergeIterator) Next() (*IntegerPoint, error) { return it func (itr *integerSortedMergeIterator) pop() (*IntegerPoint, error) { // Initialize the heap. See the MergeIterator to see why this has to be done lazily. if !itr.init { - items := itr.heap - itr.heap = make([]*integerSortedMergeHeapItem, 0, len(items)) + items := itr.heap.items + itr.heap.items = make([]*integerSortedMergeHeapItem, 0, len(items)) for _, item := range items { var err error if item.point, err = item.itr.Next(); err != nil { @@ -2725,18 +2824,18 @@ func (itr *integerSortedMergeIterator) pop() (*IntegerPoint, error) { } else if item.point == nil { continue } - itr.heap = append(itr.heap, item) + itr.heap.items = append(itr.heap.items, item) } - heap.Init(&itr.heap) + heap.Init(itr.heap) itr.init = true } - if len(itr.heap) == 0 { + if len(itr.heap.items) == 0 { return nil, nil } // Read the next item from the heap. - item := heap.Pop(&itr.heap).(*integerSortedMergeHeapItem) + item := heap.Pop(itr.heap).(*integerSortedMergeHeapItem) if item.err != nil { return nil, item.err } else if item.point == nil { @@ -2748,54 +2847,56 @@ func (itr *integerSortedMergeIterator) pop() (*IntegerPoint, error) { // Read the next item from the cursor. Push back to heap if one exists. if item.point, item.err = item.itr.Next(); item.point != nil { - heap.Push(&itr.heap, item) + heap.Push(itr.heap, item) } return p, nil } // integerSortedMergeHeap represents a heap of integerSortedMergeHeapItems. -type integerSortedMergeHeap []*integerSortedMergeHeapItem +type integerSortedMergeHeap struct { + opt IteratorOptions + items []*integerSortedMergeHeapItem +} -func (h integerSortedMergeHeap) Len() int { return len(h) } -func (h integerSortedMergeHeap) Swap(i, j int) { h[i], h[j] = h[j], h[i] } -func (h integerSortedMergeHeap) Less(i, j int) bool { - x, y := h[i].point, h[j].point +func (h *integerSortedMergeHeap) Len() int { return len(h.items) } +func (h *integerSortedMergeHeap) Swap(i, j int) { h.items[i], h.items[j] = h.items[j], h.items[i] } +func (h *integerSortedMergeHeap) Less(i, j int) bool { + x, y := h.items[i].point, h.items[j].point - if h[i].ascending { + if h.opt.Ascending { if x.Name != y.Name { return x.Name < y.Name - } else if !x.Tags.Equals(&y.Tags) { - return x.Tags.ID() < y.Tags.ID() + } else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); !xTags.Equals(&yTags) { + return xTags.ID() < yTags.ID() } return x.Time < y.Time } if x.Name != y.Name { return x.Name > y.Name - } else if !x.Tags.Equals(&y.Tags) { - return x.Tags.ID() > y.Tags.ID() + } else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); !xTags.Equals(&yTags) { + return xTags.ID() > yTags.ID() } return x.Time > y.Time } func (h *integerSortedMergeHeap) Push(x interface{}) { - *h = append(*h, x.(*integerSortedMergeHeapItem)) + h.items = append(h.items, x.(*integerSortedMergeHeapItem)) } func (h *integerSortedMergeHeap) Pop() interface{} { - old := *h + old := h.items n := len(old) item := old[n-1] - *h = old[0 : n-1] + h.items = old[0 : n-1] return item } type integerSortedMergeHeapItem struct { - point *IntegerPoint - err error - itr IntegerIterator - ascending bool + point *IntegerPoint + err error + itr IntegerIterator } // integerParallelIterator represents an iterator that pulls data in a separate goroutine. @@ -2912,10 +3013,6 @@ func (itr *integerLimitIterator) Next() (*IntegerPoint, error) { // Read next point if we're beyond the limit. if itr.opt.Limit > 0 && (itr.n-itr.opt.Offset) > itr.opt.Limit { - // If there's no interval, no groups, and a single source then simply exit. - if itr.opt.Interval.IsZero() && len(itr.opt.Dimensions) == 0 && len(itr.opt.Sources) == 1 { - return nil, nil - } continue } @@ -3202,7 +3299,7 @@ type auxIntegerPoint struct { type integerAuxIterator struct { input *bufIntegerIterator output chan auxIntegerPoint - fields auxIteratorFields + fields *auxIteratorFields background bool } @@ -3231,28 +3328,6 @@ func (itr *integerAuxIterator) Iterator(name string, typ DataType) Iterator { return itr.fields.iterator(name, typ) } -func (itr *integerAuxIterator) CreateIterator(opt IteratorOptions) (Iterator, error) { - expr := opt.Expr - if expr == nil { - panic("unable to create an iterator with no expression from an aux iterator") - } - - switch expr := expr.(type) { - case *VarRef: - return itr.Iterator(expr.Val, expr.Type), nil - default: - panic(fmt.Sprintf("invalid expression type for an aux iterator: %T", expr)) - } -} - -func (itr *integerAuxIterator) FieldDimensions(sources Sources) (fields map[string]DataType, dimensions map[string]struct{}, err error) { - return nil, nil, errors.New("not implemented") -} - -func (itr *integerAuxIterator) ExpandSources(sources Sources) (Sources, error) { - return nil, errors.New("not implemented") -} - func (itr *integerAuxIterator) stream() { for { // Read next point. @@ -3374,10 +3449,20 @@ func (itr *integerChanIterator) Next() (*IntegerPoint, error) { type integerReduceFloatIterator struct { input *bufIntegerIterator create func() (IntegerPointAggregator, FloatPointEmitter) + dims []string opt IteratorOptions points []FloatPoint } +func newIntegerReduceFloatIterator(input IntegerIterator, opt IteratorOptions, createFn func() (IntegerPointAggregator, FloatPointEmitter)) *integerReduceFloatIterator { + return &integerReduceFloatIterator{ + input: newBufIntegerIterator(input), + create: createFn, + dims: opt.GetDimensions(), + opt: opt, + } +} + // Stats returns stats from the input iterator. func (itr *integerReduceFloatIterator) Stats() IteratorStats { return itr.input.Stats() } @@ -3413,11 +3498,20 @@ type integerReduceFloatPoint struct { // The previous value for the dimension is passed to fn. func (itr *integerReduceFloatIterator) reduce() ([]FloatPoint, error) { // Calculate next window. - t, err := itr.input.peekTime() - if err != nil { - return nil, err + var startTime, endTime int64 + for { + p, err := itr.input.Next() + if err != nil || p == nil { + return nil, err + } else if p.Nil { + continue + } + + // Unread the point so it can be processed. + itr.input.unread(p) + startTime, endTime = itr.opt.Window(p.Time) + break } - startTime, endTime := itr.opt.Window(t) // Create points by tags. m := make(map[string]*integerReduceFloatPoint) @@ -3431,7 +3525,7 @@ func (itr *integerReduceFloatIterator) reduce() ([]FloatPoint, error) { } else if curr.Nil { continue } - tags := curr.Tags.Subset(itr.opt.Dimensions) + tags := curr.Tags.Subset(itr.dims) id := curr.Name if len(tags.m) > 0 { @@ -3462,6 +3556,8 @@ func (itr *integerReduceFloatIterator) reduce() ([]FloatPoint, error) { sort.Sort(reverseStringSlice(keys)) } + // Assume the points are already sorted until proven otherwise. + sortedByTime := true a := make([]FloatPoint, 0, len(m)) for _, k := range keys { rp := m[k] @@ -3472,11 +3568,18 @@ func (itr *integerReduceFloatIterator) reduce() ([]FloatPoint, error) { // Set the points time to the interval time if the reducer didn't provide one. if points[i].Time == ZeroTime { points[i].Time = startTime + } else { + sortedByTime = false } a = append(a, points[i]) } } + // Points may be out of order. Perform a stable sort by time if requested. + if !sortedByTime && itr.opt.Ordered { + sort.Stable(sort.Reverse(floatPointsByTime(a))) + } + return a, nil } @@ -3484,6 +3587,7 @@ func (itr *integerReduceFloatIterator) reduce() ([]FloatPoint, error) { type integerStreamFloatIterator struct { input *bufIntegerIterator create func() (IntegerPointAggregator, FloatPointEmitter) + dims []string opt IteratorOptions m map[string]*integerReduceFloatPoint points []FloatPoint @@ -3494,6 +3598,7 @@ func newIntegerStreamFloatIterator(input IntegerIterator, createFn func() (Integ return &integerStreamFloatIterator{ input: newBufIntegerIterator(input), create: createFn, + dims: opt.GetDimensions(), opt: opt, m: make(map[string]*integerReduceFloatPoint), } @@ -3533,7 +3638,7 @@ func (itr *integerStreamFloatIterator) reduce() ([]FloatPoint, error) { } else if curr.Nil { continue } - tags := curr.Tags.Subset(itr.opt.Dimensions) + tags := curr.Tags.Subset(itr.dims) id := curr.Name if len(tags.m) > 0 { @@ -3682,10 +3787,20 @@ type integerFloatExprFunc func(a, b int64) float64 type integerReduceIntegerIterator struct { input *bufIntegerIterator create func() (IntegerPointAggregator, IntegerPointEmitter) + dims []string opt IteratorOptions points []IntegerPoint } +func newIntegerReduceIntegerIterator(input IntegerIterator, opt IteratorOptions, createFn func() (IntegerPointAggregator, IntegerPointEmitter)) *integerReduceIntegerIterator { + return &integerReduceIntegerIterator{ + input: newBufIntegerIterator(input), + create: createFn, + dims: opt.GetDimensions(), + opt: opt, + } +} + // Stats returns stats from the input iterator. func (itr *integerReduceIntegerIterator) Stats() IteratorStats { return itr.input.Stats() } @@ -3721,11 +3836,20 @@ type integerReduceIntegerPoint struct { // The previous value for the dimension is passed to fn. func (itr *integerReduceIntegerIterator) reduce() ([]IntegerPoint, error) { // Calculate next window. - t, err := itr.input.peekTime() - if err != nil { - return nil, err + var startTime, endTime int64 + for { + p, err := itr.input.Next() + if err != nil || p == nil { + return nil, err + } else if p.Nil { + continue + } + + // Unread the point so it can be processed. + itr.input.unread(p) + startTime, endTime = itr.opt.Window(p.Time) + break } - startTime, endTime := itr.opt.Window(t) // Create points by tags. m := make(map[string]*integerReduceIntegerPoint) @@ -3739,7 +3863,7 @@ func (itr *integerReduceIntegerIterator) reduce() ([]IntegerPoint, error) { } else if curr.Nil { continue } - tags := curr.Tags.Subset(itr.opt.Dimensions) + tags := curr.Tags.Subset(itr.dims) id := curr.Name if len(tags.m) > 0 { @@ -3770,6 +3894,8 @@ func (itr *integerReduceIntegerIterator) reduce() ([]IntegerPoint, error) { sort.Sort(reverseStringSlice(keys)) } + // Assume the points are already sorted until proven otherwise. + sortedByTime := true a := make([]IntegerPoint, 0, len(m)) for _, k := range keys { rp := m[k] @@ -3780,11 +3906,18 @@ func (itr *integerReduceIntegerIterator) reduce() ([]IntegerPoint, error) { // Set the points time to the interval time if the reducer didn't provide one. if points[i].Time == ZeroTime { points[i].Time = startTime + } else { + sortedByTime = false } a = append(a, points[i]) } } + // Points may be out of order. Perform a stable sort by time if requested. + if !sortedByTime && itr.opt.Ordered { + sort.Stable(sort.Reverse(integerPointsByTime(a))) + } + return a, nil } @@ -3792,6 +3925,7 @@ func (itr *integerReduceIntegerIterator) reduce() ([]IntegerPoint, error) { type integerStreamIntegerIterator struct { input *bufIntegerIterator create func() (IntegerPointAggregator, IntegerPointEmitter) + dims []string opt IteratorOptions m map[string]*integerReduceIntegerPoint points []IntegerPoint @@ -3802,6 +3936,7 @@ func newIntegerStreamIntegerIterator(input IntegerIterator, createFn func() (Int return &integerStreamIntegerIterator{ input: newBufIntegerIterator(input), create: createFn, + dims: opt.GetDimensions(), opt: opt, m: make(map[string]*integerReduceIntegerPoint), } @@ -3841,7 +3976,7 @@ func (itr *integerStreamIntegerIterator) reduce() ([]IntegerPoint, error) { } else if curr.Nil { continue } - tags := curr.Tags.Subset(itr.opt.Dimensions) + tags := curr.Tags.Subset(itr.dims) id := curr.Name if len(tags.m) > 0 { @@ -3986,10 +4121,20 @@ type integerExprFunc func(a, b int64) int64 type integerReduceStringIterator struct { input *bufIntegerIterator create func() (IntegerPointAggregator, StringPointEmitter) + dims []string opt IteratorOptions points []StringPoint } +func newIntegerReduceStringIterator(input IntegerIterator, opt IteratorOptions, createFn func() (IntegerPointAggregator, StringPointEmitter)) *integerReduceStringIterator { + return &integerReduceStringIterator{ + input: newBufIntegerIterator(input), + create: createFn, + dims: opt.GetDimensions(), + opt: opt, + } +} + // Stats returns stats from the input iterator. func (itr *integerReduceStringIterator) Stats() IteratorStats { return itr.input.Stats() } @@ -4025,11 +4170,20 @@ type integerReduceStringPoint struct { // The previous value for the dimension is passed to fn. func (itr *integerReduceStringIterator) reduce() ([]StringPoint, error) { // Calculate next window. - t, err := itr.input.peekTime() - if err != nil { - return nil, err + var startTime, endTime int64 + for { + p, err := itr.input.Next() + if err != nil || p == nil { + return nil, err + } else if p.Nil { + continue + } + + // Unread the point so it can be processed. + itr.input.unread(p) + startTime, endTime = itr.opt.Window(p.Time) + break } - startTime, endTime := itr.opt.Window(t) // Create points by tags. m := make(map[string]*integerReduceStringPoint) @@ -4043,7 +4197,7 @@ func (itr *integerReduceStringIterator) reduce() ([]StringPoint, error) { } else if curr.Nil { continue } - tags := curr.Tags.Subset(itr.opt.Dimensions) + tags := curr.Tags.Subset(itr.dims) id := curr.Name if len(tags.m) > 0 { @@ -4074,6 +4228,8 @@ func (itr *integerReduceStringIterator) reduce() ([]StringPoint, error) { sort.Sort(reverseStringSlice(keys)) } + // Assume the points are already sorted until proven otherwise. + sortedByTime := true a := make([]StringPoint, 0, len(m)) for _, k := range keys { rp := m[k] @@ -4084,11 +4240,18 @@ func (itr *integerReduceStringIterator) reduce() ([]StringPoint, error) { // Set the points time to the interval time if the reducer didn't provide one. if points[i].Time == ZeroTime { points[i].Time = startTime + } else { + sortedByTime = false } a = append(a, points[i]) } } + // Points may be out of order. Perform a stable sort by time if requested. + if !sortedByTime && itr.opt.Ordered { + sort.Stable(sort.Reverse(stringPointsByTime(a))) + } + return a, nil } @@ -4096,6 +4259,7 @@ func (itr *integerReduceStringIterator) reduce() ([]StringPoint, error) { type integerStreamStringIterator struct { input *bufIntegerIterator create func() (IntegerPointAggregator, StringPointEmitter) + dims []string opt IteratorOptions m map[string]*integerReduceStringPoint points []StringPoint @@ -4106,6 +4270,7 @@ func newIntegerStreamStringIterator(input IntegerIterator, createFn func() (Inte return &integerStreamStringIterator{ input: newBufIntegerIterator(input), create: createFn, + dims: opt.GetDimensions(), opt: opt, m: make(map[string]*integerReduceStringPoint), } @@ -4145,7 +4310,7 @@ func (itr *integerStreamStringIterator) reduce() ([]StringPoint, error) { } else if curr.Nil { continue } - tags := curr.Tags.Subset(itr.opt.Dimensions) + tags := curr.Tags.Subset(itr.dims) id := curr.Name if len(tags.m) > 0 { @@ -4294,10 +4459,20 @@ type integerStringExprFunc func(a, b int64) string type integerReduceBooleanIterator struct { input *bufIntegerIterator create func() (IntegerPointAggregator, BooleanPointEmitter) + dims []string opt IteratorOptions points []BooleanPoint } +func newIntegerReduceBooleanIterator(input IntegerIterator, opt IteratorOptions, createFn func() (IntegerPointAggregator, BooleanPointEmitter)) *integerReduceBooleanIterator { + return &integerReduceBooleanIterator{ + input: newBufIntegerIterator(input), + create: createFn, + dims: opt.GetDimensions(), + opt: opt, + } +} + // Stats returns stats from the input iterator. func (itr *integerReduceBooleanIterator) Stats() IteratorStats { return itr.input.Stats() } @@ -4333,11 +4508,20 @@ type integerReduceBooleanPoint struct { // The previous value for the dimension is passed to fn. func (itr *integerReduceBooleanIterator) reduce() ([]BooleanPoint, error) { // Calculate next window. - t, err := itr.input.peekTime() - if err != nil { - return nil, err + var startTime, endTime int64 + for { + p, err := itr.input.Next() + if err != nil || p == nil { + return nil, err + } else if p.Nil { + continue + } + + // Unread the point so it can be processed. + itr.input.unread(p) + startTime, endTime = itr.opt.Window(p.Time) + break } - startTime, endTime := itr.opt.Window(t) // Create points by tags. m := make(map[string]*integerReduceBooleanPoint) @@ -4351,7 +4535,7 @@ func (itr *integerReduceBooleanIterator) reduce() ([]BooleanPoint, error) { } else if curr.Nil { continue } - tags := curr.Tags.Subset(itr.opt.Dimensions) + tags := curr.Tags.Subset(itr.dims) id := curr.Name if len(tags.m) > 0 { @@ -4382,6 +4566,8 @@ func (itr *integerReduceBooleanIterator) reduce() ([]BooleanPoint, error) { sort.Sort(reverseStringSlice(keys)) } + // Assume the points are already sorted until proven otherwise. + sortedByTime := true a := make([]BooleanPoint, 0, len(m)) for _, k := range keys { rp := m[k] @@ -4392,11 +4578,18 @@ func (itr *integerReduceBooleanIterator) reduce() ([]BooleanPoint, error) { // Set the points time to the interval time if the reducer didn't provide one. if points[i].Time == ZeroTime { points[i].Time = startTime + } else { + sortedByTime = false } a = append(a, points[i]) } } + // Points may be out of order. Perform a stable sort by time if requested. + if !sortedByTime && itr.opt.Ordered { + sort.Stable(sort.Reverse(booleanPointsByTime(a))) + } + return a, nil } @@ -4404,6 +4597,7 @@ func (itr *integerReduceBooleanIterator) reduce() ([]BooleanPoint, error) { type integerStreamBooleanIterator struct { input *bufIntegerIterator create func() (IntegerPointAggregator, BooleanPointEmitter) + dims []string opt IteratorOptions m map[string]*integerReduceBooleanPoint points []BooleanPoint @@ -4414,6 +4608,7 @@ func newIntegerStreamBooleanIterator(input IntegerIterator, createFn func() (Int return &integerStreamBooleanIterator{ input: newBufIntegerIterator(input), create: createFn, + dims: opt.GetDimensions(), opt: opt, m: make(map[string]*integerReduceBooleanPoint), } @@ -4453,7 +4648,7 @@ func (itr *integerStreamBooleanIterator) reduce() ([]BooleanPoint, error) { } else if curr.Nil { continue } - tags := curr.Tags.Subset(itr.opt.Dimensions) + tags := curr.Tags.Subset(itr.dims) id := curr.Name if len(tags.m) > 0 { @@ -4926,7 +5121,8 @@ func (itr *stringMergeIterator) Next() (*StringPoint, error) { if err != nil { return nil, err } - itr.window.name, itr.window.tags = p.Name, p.Tags.ID() + tags := p.Tags.Subset(itr.heap.opt.Dimensions) + itr.window.name, itr.window.tags = p.Name, tags.ID() itr.window.startTime, itr.window.endTime = itr.heap.opt.Window(p.Time) return p, nil } @@ -4947,7 +5143,7 @@ func (itr *stringMergeIterator) Next() (*StringPoint, error) { inWindow := true if window := itr.window; window.name != p.Name { inWindow = false - } else if window.tags != p.Tags.ID() { + } else if tags := p.Tags.Subset(itr.heap.opt.Dimensions); window.tags != tags.ID() { inWindow = false } else if opt := itr.heap.opt; opt.Ascending && p.Time >= window.endTime { inWindow = false @@ -4974,9 +5170,9 @@ type stringMergeHeap struct { items []*stringMergeHeapItem } -func (h stringMergeHeap) Len() int { return len(h.items) } -func (h stringMergeHeap) Swap(i, j int) { h.items[i], h.items[j] = h.items[j], h.items[i] } -func (h stringMergeHeap) Less(i, j int) bool { +func (h *stringMergeHeap) Len() int { return len(h.items) } +func (h *stringMergeHeap) Swap(i, j int) { h.items[i], h.items[j] = h.items[j], h.items[i] } +func (h *stringMergeHeap) Less(i, j int) bool { x, err := h.items[i].itr.peek() if err != nil { return true @@ -4989,14 +5185,14 @@ func (h stringMergeHeap) Less(i, j int) bool { if h.opt.Ascending { if x.Name != y.Name { return x.Name < y.Name - } else if x.Tags.ID() != y.Tags.ID() { - return x.Tags.ID() < y.Tags.ID() + } else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); xTags.ID() != yTags.ID() { + return xTags.ID() < yTags.ID() } } else { if x.Name != y.Name { return x.Name > y.Name - } else if x.Tags.ID() != y.Tags.ID() { - return x.Tags.ID() > y.Tags.ID() + } else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); xTags.ID() != yTags.ID() { + return xTags.ID() > yTags.ID() } } @@ -5028,8 +5224,7 @@ type stringMergeHeapItem struct { // stringSortedMergeIterator is an iterator that sorts and merges multiple iterators into one. type stringSortedMergeIterator struct { inputs []StringIterator - opt IteratorOptions - heap stringSortedMergeHeap + heap *stringSortedMergeHeap init bool } @@ -5037,14 +5232,16 @@ type stringSortedMergeIterator struct { func newStringSortedMergeIterator(inputs []StringIterator, opt IteratorOptions) Iterator { itr := &stringSortedMergeIterator{ inputs: inputs, - heap: make(stringSortedMergeHeap, 0, len(inputs)), - opt: opt, + heap: &stringSortedMergeHeap{ + items: make([]*stringSortedMergeHeapItem, 0, len(inputs)), + opt: opt, + }, } // Initialize heap items. for _, input := range inputs { // Append to the heap. - itr.heap = append(itr.heap, &stringSortedMergeHeapItem{itr: input, ascending: opt.Ascending}) + itr.heap.items = append(itr.heap.items, &stringSortedMergeHeapItem{itr: input}) } return itr @@ -5075,8 +5272,8 @@ func (itr *stringSortedMergeIterator) Next() (*StringPoint, error) { return itr. func (itr *stringSortedMergeIterator) pop() (*StringPoint, error) { // Initialize the heap. See the MergeIterator to see why this has to be done lazily. if !itr.init { - items := itr.heap - itr.heap = make([]*stringSortedMergeHeapItem, 0, len(items)) + items := itr.heap.items + itr.heap.items = make([]*stringSortedMergeHeapItem, 0, len(items)) for _, item := range items { var err error if item.point, err = item.itr.Next(); err != nil { @@ -5084,18 +5281,18 @@ func (itr *stringSortedMergeIterator) pop() (*StringPoint, error) { } else if item.point == nil { continue } - itr.heap = append(itr.heap, item) + itr.heap.items = append(itr.heap.items, item) } - heap.Init(&itr.heap) + heap.Init(itr.heap) itr.init = true } - if len(itr.heap) == 0 { + if len(itr.heap.items) == 0 { return nil, nil } // Read the next item from the heap. - item := heap.Pop(&itr.heap).(*stringSortedMergeHeapItem) + item := heap.Pop(itr.heap).(*stringSortedMergeHeapItem) if item.err != nil { return nil, item.err } else if item.point == nil { @@ -5107,54 +5304,56 @@ func (itr *stringSortedMergeIterator) pop() (*StringPoint, error) { // Read the next item from the cursor. Push back to heap if one exists. if item.point, item.err = item.itr.Next(); item.point != nil { - heap.Push(&itr.heap, item) + heap.Push(itr.heap, item) } return p, nil } // stringSortedMergeHeap represents a heap of stringSortedMergeHeapItems. -type stringSortedMergeHeap []*stringSortedMergeHeapItem +type stringSortedMergeHeap struct { + opt IteratorOptions + items []*stringSortedMergeHeapItem +} -func (h stringSortedMergeHeap) Len() int { return len(h) } -func (h stringSortedMergeHeap) Swap(i, j int) { h[i], h[j] = h[j], h[i] } -func (h stringSortedMergeHeap) Less(i, j int) bool { - x, y := h[i].point, h[j].point +func (h *stringSortedMergeHeap) Len() int { return len(h.items) } +func (h *stringSortedMergeHeap) Swap(i, j int) { h.items[i], h.items[j] = h.items[j], h.items[i] } +func (h *stringSortedMergeHeap) Less(i, j int) bool { + x, y := h.items[i].point, h.items[j].point - if h[i].ascending { + if h.opt.Ascending { if x.Name != y.Name { return x.Name < y.Name - } else if !x.Tags.Equals(&y.Tags) { - return x.Tags.ID() < y.Tags.ID() + } else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); !xTags.Equals(&yTags) { + return xTags.ID() < yTags.ID() } return x.Time < y.Time } if x.Name != y.Name { return x.Name > y.Name - } else if !x.Tags.Equals(&y.Tags) { - return x.Tags.ID() > y.Tags.ID() + } else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); !xTags.Equals(&yTags) { + return xTags.ID() > yTags.ID() } return x.Time > y.Time } func (h *stringSortedMergeHeap) Push(x interface{}) { - *h = append(*h, x.(*stringSortedMergeHeapItem)) + h.items = append(h.items, x.(*stringSortedMergeHeapItem)) } func (h *stringSortedMergeHeap) Pop() interface{} { - old := *h + old := h.items n := len(old) item := old[n-1] - *h = old[0 : n-1] + h.items = old[0 : n-1] return item } type stringSortedMergeHeapItem struct { - point *StringPoint - err error - itr StringIterator - ascending bool + point *StringPoint + err error + itr StringIterator } // stringParallelIterator represents an iterator that pulls data in a separate goroutine. @@ -5271,10 +5470,6 @@ func (itr *stringLimitIterator) Next() (*StringPoint, error) { // Read next point if we're beyond the limit. if itr.opt.Limit > 0 && (itr.n-itr.opt.Offset) > itr.opt.Limit { - // If there's no interval, no groups, and a single source then simply exit. - if itr.opt.Interval.IsZero() && len(itr.opt.Dimensions) == 0 && len(itr.opt.Sources) == 1 { - return nil, nil - } continue } @@ -5546,7 +5741,7 @@ type auxStringPoint struct { type stringAuxIterator struct { input *bufStringIterator output chan auxStringPoint - fields auxIteratorFields + fields *auxIteratorFields background bool } @@ -5575,28 +5770,6 @@ func (itr *stringAuxIterator) Iterator(name string, typ DataType) Iterator { return itr.fields.iterator(name, typ) } -func (itr *stringAuxIterator) CreateIterator(opt IteratorOptions) (Iterator, error) { - expr := opt.Expr - if expr == nil { - panic("unable to create an iterator with no expression from an aux iterator") - } - - switch expr := expr.(type) { - case *VarRef: - return itr.Iterator(expr.Val, expr.Type), nil - default: - panic(fmt.Sprintf("invalid expression type for an aux iterator: %T", expr)) - } -} - -func (itr *stringAuxIterator) FieldDimensions(sources Sources) (fields map[string]DataType, dimensions map[string]struct{}, err error) { - return nil, nil, errors.New("not implemented") -} - -func (itr *stringAuxIterator) ExpandSources(sources Sources) (Sources, error) { - return nil, errors.New("not implemented") -} - func (itr *stringAuxIterator) stream() { for { // Read next point. @@ -5718,10 +5891,20 @@ func (itr *stringChanIterator) Next() (*StringPoint, error) { type stringReduceFloatIterator struct { input *bufStringIterator create func() (StringPointAggregator, FloatPointEmitter) + dims []string opt IteratorOptions points []FloatPoint } +func newStringReduceFloatIterator(input StringIterator, opt IteratorOptions, createFn func() (StringPointAggregator, FloatPointEmitter)) *stringReduceFloatIterator { + return &stringReduceFloatIterator{ + input: newBufStringIterator(input), + create: createFn, + dims: opt.GetDimensions(), + opt: opt, + } +} + // Stats returns stats from the input iterator. func (itr *stringReduceFloatIterator) Stats() IteratorStats { return itr.input.Stats() } @@ -5757,11 +5940,20 @@ type stringReduceFloatPoint struct { // The previous value for the dimension is passed to fn. func (itr *stringReduceFloatIterator) reduce() ([]FloatPoint, error) { // Calculate next window. - t, err := itr.input.peekTime() - if err != nil { - return nil, err + var startTime, endTime int64 + for { + p, err := itr.input.Next() + if err != nil || p == nil { + return nil, err + } else if p.Nil { + continue + } + + // Unread the point so it can be processed. + itr.input.unread(p) + startTime, endTime = itr.opt.Window(p.Time) + break } - startTime, endTime := itr.opt.Window(t) // Create points by tags. m := make(map[string]*stringReduceFloatPoint) @@ -5775,7 +5967,7 @@ func (itr *stringReduceFloatIterator) reduce() ([]FloatPoint, error) { } else if curr.Nil { continue } - tags := curr.Tags.Subset(itr.opt.Dimensions) + tags := curr.Tags.Subset(itr.dims) id := curr.Name if len(tags.m) > 0 { @@ -5806,6 +5998,8 @@ func (itr *stringReduceFloatIterator) reduce() ([]FloatPoint, error) { sort.Sort(reverseStringSlice(keys)) } + // Assume the points are already sorted until proven otherwise. + sortedByTime := true a := make([]FloatPoint, 0, len(m)) for _, k := range keys { rp := m[k] @@ -5816,11 +6010,18 @@ func (itr *stringReduceFloatIterator) reduce() ([]FloatPoint, error) { // Set the points time to the interval time if the reducer didn't provide one. if points[i].Time == ZeroTime { points[i].Time = startTime + } else { + sortedByTime = false } a = append(a, points[i]) } } + // Points may be out of order. Perform a stable sort by time if requested. + if !sortedByTime && itr.opt.Ordered { + sort.Stable(sort.Reverse(floatPointsByTime(a))) + } + return a, nil } @@ -5828,6 +6029,7 @@ func (itr *stringReduceFloatIterator) reduce() ([]FloatPoint, error) { type stringStreamFloatIterator struct { input *bufStringIterator create func() (StringPointAggregator, FloatPointEmitter) + dims []string opt IteratorOptions m map[string]*stringReduceFloatPoint points []FloatPoint @@ -5838,6 +6040,7 @@ func newStringStreamFloatIterator(input StringIterator, createFn func() (StringP return &stringStreamFloatIterator{ input: newBufStringIterator(input), create: createFn, + dims: opt.GetDimensions(), opt: opt, m: make(map[string]*stringReduceFloatPoint), } @@ -5877,7 +6080,7 @@ func (itr *stringStreamFloatIterator) reduce() ([]FloatPoint, error) { } else if curr.Nil { continue } - tags := curr.Tags.Subset(itr.opt.Dimensions) + tags := curr.Tags.Subset(itr.dims) id := curr.Name if len(tags.m) > 0 { @@ -6026,10 +6229,20 @@ type stringFloatExprFunc func(a, b string) float64 type stringReduceIntegerIterator struct { input *bufStringIterator create func() (StringPointAggregator, IntegerPointEmitter) + dims []string opt IteratorOptions points []IntegerPoint } +func newStringReduceIntegerIterator(input StringIterator, opt IteratorOptions, createFn func() (StringPointAggregator, IntegerPointEmitter)) *stringReduceIntegerIterator { + return &stringReduceIntegerIterator{ + input: newBufStringIterator(input), + create: createFn, + dims: opt.GetDimensions(), + opt: opt, + } +} + // Stats returns stats from the input iterator. func (itr *stringReduceIntegerIterator) Stats() IteratorStats { return itr.input.Stats() } @@ -6065,11 +6278,20 @@ type stringReduceIntegerPoint struct { // The previous value for the dimension is passed to fn. func (itr *stringReduceIntegerIterator) reduce() ([]IntegerPoint, error) { // Calculate next window. - t, err := itr.input.peekTime() - if err != nil { - return nil, err + var startTime, endTime int64 + for { + p, err := itr.input.Next() + if err != nil || p == nil { + return nil, err + } else if p.Nil { + continue + } + + // Unread the point so it can be processed. + itr.input.unread(p) + startTime, endTime = itr.opt.Window(p.Time) + break } - startTime, endTime := itr.opt.Window(t) // Create points by tags. m := make(map[string]*stringReduceIntegerPoint) @@ -6083,7 +6305,7 @@ func (itr *stringReduceIntegerIterator) reduce() ([]IntegerPoint, error) { } else if curr.Nil { continue } - tags := curr.Tags.Subset(itr.opt.Dimensions) + tags := curr.Tags.Subset(itr.dims) id := curr.Name if len(tags.m) > 0 { @@ -6114,6 +6336,8 @@ func (itr *stringReduceIntegerIterator) reduce() ([]IntegerPoint, error) { sort.Sort(reverseStringSlice(keys)) } + // Assume the points are already sorted until proven otherwise. + sortedByTime := true a := make([]IntegerPoint, 0, len(m)) for _, k := range keys { rp := m[k] @@ -6124,11 +6348,18 @@ func (itr *stringReduceIntegerIterator) reduce() ([]IntegerPoint, error) { // Set the points time to the interval time if the reducer didn't provide one. if points[i].Time == ZeroTime { points[i].Time = startTime + } else { + sortedByTime = false } a = append(a, points[i]) } } + // Points may be out of order. Perform a stable sort by time if requested. + if !sortedByTime && itr.opt.Ordered { + sort.Stable(sort.Reverse(integerPointsByTime(a))) + } + return a, nil } @@ -6136,6 +6367,7 @@ func (itr *stringReduceIntegerIterator) reduce() ([]IntegerPoint, error) { type stringStreamIntegerIterator struct { input *bufStringIterator create func() (StringPointAggregator, IntegerPointEmitter) + dims []string opt IteratorOptions m map[string]*stringReduceIntegerPoint points []IntegerPoint @@ -6146,6 +6378,7 @@ func newStringStreamIntegerIterator(input StringIterator, createFn func() (Strin return &stringStreamIntegerIterator{ input: newBufStringIterator(input), create: createFn, + dims: opt.GetDimensions(), opt: opt, m: make(map[string]*stringReduceIntegerPoint), } @@ -6185,7 +6418,7 @@ func (itr *stringStreamIntegerIterator) reduce() ([]IntegerPoint, error) { } else if curr.Nil { continue } - tags := curr.Tags.Subset(itr.opt.Dimensions) + tags := curr.Tags.Subset(itr.dims) id := curr.Name if len(tags.m) > 0 { @@ -6334,10 +6567,20 @@ type stringIntegerExprFunc func(a, b string) int64 type stringReduceStringIterator struct { input *bufStringIterator create func() (StringPointAggregator, StringPointEmitter) + dims []string opt IteratorOptions points []StringPoint } +func newStringReduceStringIterator(input StringIterator, opt IteratorOptions, createFn func() (StringPointAggregator, StringPointEmitter)) *stringReduceStringIterator { + return &stringReduceStringIterator{ + input: newBufStringIterator(input), + create: createFn, + dims: opt.GetDimensions(), + opt: opt, + } +} + // Stats returns stats from the input iterator. func (itr *stringReduceStringIterator) Stats() IteratorStats { return itr.input.Stats() } @@ -6373,11 +6616,20 @@ type stringReduceStringPoint struct { // The previous value for the dimension is passed to fn. func (itr *stringReduceStringIterator) reduce() ([]StringPoint, error) { // Calculate next window. - t, err := itr.input.peekTime() - if err != nil { - return nil, err + var startTime, endTime int64 + for { + p, err := itr.input.Next() + if err != nil || p == nil { + return nil, err + } else if p.Nil { + continue + } + + // Unread the point so it can be processed. + itr.input.unread(p) + startTime, endTime = itr.opt.Window(p.Time) + break } - startTime, endTime := itr.opt.Window(t) // Create points by tags. m := make(map[string]*stringReduceStringPoint) @@ -6391,7 +6643,7 @@ func (itr *stringReduceStringIterator) reduce() ([]StringPoint, error) { } else if curr.Nil { continue } - tags := curr.Tags.Subset(itr.opt.Dimensions) + tags := curr.Tags.Subset(itr.dims) id := curr.Name if len(tags.m) > 0 { @@ -6422,6 +6674,8 @@ func (itr *stringReduceStringIterator) reduce() ([]StringPoint, error) { sort.Sort(reverseStringSlice(keys)) } + // Assume the points are already sorted until proven otherwise. + sortedByTime := true a := make([]StringPoint, 0, len(m)) for _, k := range keys { rp := m[k] @@ -6432,11 +6686,18 @@ func (itr *stringReduceStringIterator) reduce() ([]StringPoint, error) { // Set the points time to the interval time if the reducer didn't provide one. if points[i].Time == ZeroTime { points[i].Time = startTime + } else { + sortedByTime = false } a = append(a, points[i]) } } + // Points may be out of order. Perform a stable sort by time if requested. + if !sortedByTime && itr.opt.Ordered { + sort.Stable(sort.Reverse(stringPointsByTime(a))) + } + return a, nil } @@ -6444,6 +6705,7 @@ func (itr *stringReduceStringIterator) reduce() ([]StringPoint, error) { type stringStreamStringIterator struct { input *bufStringIterator create func() (StringPointAggregator, StringPointEmitter) + dims []string opt IteratorOptions m map[string]*stringReduceStringPoint points []StringPoint @@ -6454,6 +6716,7 @@ func newStringStreamStringIterator(input StringIterator, createFn func() (String return &stringStreamStringIterator{ input: newBufStringIterator(input), create: createFn, + dims: opt.GetDimensions(), opt: opt, m: make(map[string]*stringReduceStringPoint), } @@ -6493,7 +6756,7 @@ func (itr *stringStreamStringIterator) reduce() ([]StringPoint, error) { } else if curr.Nil { continue } - tags := curr.Tags.Subset(itr.opt.Dimensions) + tags := curr.Tags.Subset(itr.dims) id := curr.Name if len(tags.m) > 0 { @@ -6638,10 +6901,20 @@ type stringExprFunc func(a, b string) string type stringReduceBooleanIterator struct { input *bufStringIterator create func() (StringPointAggregator, BooleanPointEmitter) + dims []string opt IteratorOptions points []BooleanPoint } +func newStringReduceBooleanIterator(input StringIterator, opt IteratorOptions, createFn func() (StringPointAggregator, BooleanPointEmitter)) *stringReduceBooleanIterator { + return &stringReduceBooleanIterator{ + input: newBufStringIterator(input), + create: createFn, + dims: opt.GetDimensions(), + opt: opt, + } +} + // Stats returns stats from the input iterator. func (itr *stringReduceBooleanIterator) Stats() IteratorStats { return itr.input.Stats() } @@ -6677,11 +6950,20 @@ type stringReduceBooleanPoint struct { // The previous value for the dimension is passed to fn. func (itr *stringReduceBooleanIterator) reduce() ([]BooleanPoint, error) { // Calculate next window. - t, err := itr.input.peekTime() - if err != nil { - return nil, err + var startTime, endTime int64 + for { + p, err := itr.input.Next() + if err != nil || p == nil { + return nil, err + } else if p.Nil { + continue + } + + // Unread the point so it can be processed. + itr.input.unread(p) + startTime, endTime = itr.opt.Window(p.Time) + break } - startTime, endTime := itr.opt.Window(t) // Create points by tags. m := make(map[string]*stringReduceBooleanPoint) @@ -6695,7 +6977,7 @@ func (itr *stringReduceBooleanIterator) reduce() ([]BooleanPoint, error) { } else if curr.Nil { continue } - tags := curr.Tags.Subset(itr.opt.Dimensions) + tags := curr.Tags.Subset(itr.dims) id := curr.Name if len(tags.m) > 0 { @@ -6726,6 +7008,8 @@ func (itr *stringReduceBooleanIterator) reduce() ([]BooleanPoint, error) { sort.Sort(reverseStringSlice(keys)) } + // Assume the points are already sorted until proven otherwise. + sortedByTime := true a := make([]BooleanPoint, 0, len(m)) for _, k := range keys { rp := m[k] @@ -6736,11 +7020,18 @@ func (itr *stringReduceBooleanIterator) reduce() ([]BooleanPoint, error) { // Set the points time to the interval time if the reducer didn't provide one. if points[i].Time == ZeroTime { points[i].Time = startTime + } else { + sortedByTime = false } a = append(a, points[i]) } } + // Points may be out of order. Perform a stable sort by time if requested. + if !sortedByTime && itr.opt.Ordered { + sort.Stable(sort.Reverse(booleanPointsByTime(a))) + } + return a, nil } @@ -6748,6 +7039,7 @@ func (itr *stringReduceBooleanIterator) reduce() ([]BooleanPoint, error) { type stringStreamBooleanIterator struct { input *bufStringIterator create func() (StringPointAggregator, BooleanPointEmitter) + dims []string opt IteratorOptions m map[string]*stringReduceBooleanPoint points []BooleanPoint @@ -6758,6 +7050,7 @@ func newStringStreamBooleanIterator(input StringIterator, createFn func() (Strin return &stringStreamBooleanIterator{ input: newBufStringIterator(input), create: createFn, + dims: opt.GetDimensions(), opt: opt, m: make(map[string]*stringReduceBooleanPoint), } @@ -6797,7 +7090,7 @@ func (itr *stringStreamBooleanIterator) reduce() ([]BooleanPoint, error) { } else if curr.Nil { continue } - tags := curr.Tags.Subset(itr.opt.Dimensions) + tags := curr.Tags.Subset(itr.dims) id := curr.Name if len(tags.m) > 0 { @@ -7270,7 +7563,8 @@ func (itr *booleanMergeIterator) Next() (*BooleanPoint, error) { if err != nil { return nil, err } - itr.window.name, itr.window.tags = p.Name, p.Tags.ID() + tags := p.Tags.Subset(itr.heap.opt.Dimensions) + itr.window.name, itr.window.tags = p.Name, tags.ID() itr.window.startTime, itr.window.endTime = itr.heap.opt.Window(p.Time) return p, nil } @@ -7291,7 +7585,7 @@ func (itr *booleanMergeIterator) Next() (*BooleanPoint, error) { inWindow := true if window := itr.window; window.name != p.Name { inWindow = false - } else if window.tags != p.Tags.ID() { + } else if tags := p.Tags.Subset(itr.heap.opt.Dimensions); window.tags != tags.ID() { inWindow = false } else if opt := itr.heap.opt; opt.Ascending && p.Time >= window.endTime { inWindow = false @@ -7318,9 +7612,9 @@ type booleanMergeHeap struct { items []*booleanMergeHeapItem } -func (h booleanMergeHeap) Len() int { return len(h.items) } -func (h booleanMergeHeap) Swap(i, j int) { h.items[i], h.items[j] = h.items[j], h.items[i] } -func (h booleanMergeHeap) Less(i, j int) bool { +func (h *booleanMergeHeap) Len() int { return len(h.items) } +func (h *booleanMergeHeap) Swap(i, j int) { h.items[i], h.items[j] = h.items[j], h.items[i] } +func (h *booleanMergeHeap) Less(i, j int) bool { x, err := h.items[i].itr.peek() if err != nil { return true @@ -7333,14 +7627,14 @@ func (h booleanMergeHeap) Less(i, j int) bool { if h.opt.Ascending { if x.Name != y.Name { return x.Name < y.Name - } else if x.Tags.ID() != y.Tags.ID() { - return x.Tags.ID() < y.Tags.ID() + } else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); xTags.ID() != yTags.ID() { + return xTags.ID() < yTags.ID() } } else { if x.Name != y.Name { return x.Name > y.Name - } else if x.Tags.ID() != y.Tags.ID() { - return x.Tags.ID() > y.Tags.ID() + } else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); xTags.ID() != yTags.ID() { + return xTags.ID() > yTags.ID() } } @@ -7372,8 +7666,7 @@ type booleanMergeHeapItem struct { // booleanSortedMergeIterator is an iterator that sorts and merges multiple iterators into one. type booleanSortedMergeIterator struct { inputs []BooleanIterator - opt IteratorOptions - heap booleanSortedMergeHeap + heap *booleanSortedMergeHeap init bool } @@ -7381,14 +7674,16 @@ type booleanSortedMergeIterator struct { func newBooleanSortedMergeIterator(inputs []BooleanIterator, opt IteratorOptions) Iterator { itr := &booleanSortedMergeIterator{ inputs: inputs, - heap: make(booleanSortedMergeHeap, 0, len(inputs)), - opt: opt, + heap: &booleanSortedMergeHeap{ + items: make([]*booleanSortedMergeHeapItem, 0, len(inputs)), + opt: opt, + }, } // Initialize heap items. for _, input := range inputs { // Append to the heap. - itr.heap = append(itr.heap, &booleanSortedMergeHeapItem{itr: input, ascending: opt.Ascending}) + itr.heap.items = append(itr.heap.items, &booleanSortedMergeHeapItem{itr: input}) } return itr @@ -7419,8 +7714,8 @@ func (itr *booleanSortedMergeIterator) Next() (*BooleanPoint, error) { return it func (itr *booleanSortedMergeIterator) pop() (*BooleanPoint, error) { // Initialize the heap. See the MergeIterator to see why this has to be done lazily. if !itr.init { - items := itr.heap - itr.heap = make([]*booleanSortedMergeHeapItem, 0, len(items)) + items := itr.heap.items + itr.heap.items = make([]*booleanSortedMergeHeapItem, 0, len(items)) for _, item := range items { var err error if item.point, err = item.itr.Next(); err != nil { @@ -7428,18 +7723,18 @@ func (itr *booleanSortedMergeIterator) pop() (*BooleanPoint, error) { } else if item.point == nil { continue } - itr.heap = append(itr.heap, item) + itr.heap.items = append(itr.heap.items, item) } - heap.Init(&itr.heap) + heap.Init(itr.heap) itr.init = true } - if len(itr.heap) == 0 { + if len(itr.heap.items) == 0 { return nil, nil } // Read the next item from the heap. - item := heap.Pop(&itr.heap).(*booleanSortedMergeHeapItem) + item := heap.Pop(itr.heap).(*booleanSortedMergeHeapItem) if item.err != nil { return nil, item.err } else if item.point == nil { @@ -7451,54 +7746,56 @@ func (itr *booleanSortedMergeIterator) pop() (*BooleanPoint, error) { // Read the next item from the cursor. Push back to heap if one exists. if item.point, item.err = item.itr.Next(); item.point != nil { - heap.Push(&itr.heap, item) + heap.Push(itr.heap, item) } return p, nil } // booleanSortedMergeHeap represents a heap of booleanSortedMergeHeapItems. -type booleanSortedMergeHeap []*booleanSortedMergeHeapItem +type booleanSortedMergeHeap struct { + opt IteratorOptions + items []*booleanSortedMergeHeapItem +} -func (h booleanSortedMergeHeap) Len() int { return len(h) } -func (h booleanSortedMergeHeap) Swap(i, j int) { h[i], h[j] = h[j], h[i] } -func (h booleanSortedMergeHeap) Less(i, j int) bool { - x, y := h[i].point, h[j].point +func (h *booleanSortedMergeHeap) Len() int { return len(h.items) } +func (h *booleanSortedMergeHeap) Swap(i, j int) { h.items[i], h.items[j] = h.items[j], h.items[i] } +func (h *booleanSortedMergeHeap) Less(i, j int) bool { + x, y := h.items[i].point, h.items[j].point - if h[i].ascending { + if h.opt.Ascending { if x.Name != y.Name { return x.Name < y.Name - } else if !x.Tags.Equals(&y.Tags) { - return x.Tags.ID() < y.Tags.ID() + } else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); !xTags.Equals(&yTags) { + return xTags.ID() < yTags.ID() } return x.Time < y.Time } if x.Name != y.Name { return x.Name > y.Name - } else if !x.Tags.Equals(&y.Tags) { - return x.Tags.ID() > y.Tags.ID() + } else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); !xTags.Equals(&yTags) { + return xTags.ID() > yTags.ID() } return x.Time > y.Time } func (h *booleanSortedMergeHeap) Push(x interface{}) { - *h = append(*h, x.(*booleanSortedMergeHeapItem)) + h.items = append(h.items, x.(*booleanSortedMergeHeapItem)) } func (h *booleanSortedMergeHeap) Pop() interface{} { - old := *h + old := h.items n := len(old) item := old[n-1] - *h = old[0 : n-1] + h.items = old[0 : n-1] return item } type booleanSortedMergeHeapItem struct { - point *BooleanPoint - err error - itr BooleanIterator - ascending bool + point *BooleanPoint + err error + itr BooleanIterator } // booleanParallelIterator represents an iterator that pulls data in a separate goroutine. @@ -7615,10 +7912,6 @@ func (itr *booleanLimitIterator) Next() (*BooleanPoint, error) { // Read next point if we're beyond the limit. if itr.opt.Limit > 0 && (itr.n-itr.opt.Offset) > itr.opt.Limit { - // If there's no interval, no groups, and a single source then simply exit. - if itr.opt.Interval.IsZero() && len(itr.opt.Dimensions) == 0 && len(itr.opt.Sources) == 1 { - return nil, nil - } continue } @@ -7890,7 +8183,7 @@ type auxBooleanPoint struct { type booleanAuxIterator struct { input *bufBooleanIterator output chan auxBooleanPoint - fields auxIteratorFields + fields *auxIteratorFields background bool } @@ -7919,28 +8212,6 @@ func (itr *booleanAuxIterator) Iterator(name string, typ DataType) Iterator { return itr.fields.iterator(name, typ) } -func (itr *booleanAuxIterator) CreateIterator(opt IteratorOptions) (Iterator, error) { - expr := opt.Expr - if expr == nil { - panic("unable to create an iterator with no expression from an aux iterator") - } - - switch expr := expr.(type) { - case *VarRef: - return itr.Iterator(expr.Val, expr.Type), nil - default: - panic(fmt.Sprintf("invalid expression type for an aux iterator: %T", expr)) - } -} - -func (itr *booleanAuxIterator) FieldDimensions(sources Sources) (fields map[string]DataType, dimensions map[string]struct{}, err error) { - return nil, nil, errors.New("not implemented") -} - -func (itr *booleanAuxIterator) ExpandSources(sources Sources) (Sources, error) { - return nil, errors.New("not implemented") -} - func (itr *booleanAuxIterator) stream() { for { // Read next point. @@ -8062,10 +8333,20 @@ func (itr *booleanChanIterator) Next() (*BooleanPoint, error) { type booleanReduceFloatIterator struct { input *bufBooleanIterator create func() (BooleanPointAggregator, FloatPointEmitter) + dims []string opt IteratorOptions points []FloatPoint } +func newBooleanReduceFloatIterator(input BooleanIterator, opt IteratorOptions, createFn func() (BooleanPointAggregator, FloatPointEmitter)) *booleanReduceFloatIterator { + return &booleanReduceFloatIterator{ + input: newBufBooleanIterator(input), + create: createFn, + dims: opt.GetDimensions(), + opt: opt, + } +} + // Stats returns stats from the input iterator. func (itr *booleanReduceFloatIterator) Stats() IteratorStats { return itr.input.Stats() } @@ -8101,11 +8382,20 @@ type booleanReduceFloatPoint struct { // The previous value for the dimension is passed to fn. func (itr *booleanReduceFloatIterator) reduce() ([]FloatPoint, error) { // Calculate next window. - t, err := itr.input.peekTime() - if err != nil { - return nil, err + var startTime, endTime int64 + for { + p, err := itr.input.Next() + if err != nil || p == nil { + return nil, err + } else if p.Nil { + continue + } + + // Unread the point so it can be processed. + itr.input.unread(p) + startTime, endTime = itr.opt.Window(p.Time) + break } - startTime, endTime := itr.opt.Window(t) // Create points by tags. m := make(map[string]*booleanReduceFloatPoint) @@ -8119,7 +8409,7 @@ func (itr *booleanReduceFloatIterator) reduce() ([]FloatPoint, error) { } else if curr.Nil { continue } - tags := curr.Tags.Subset(itr.opt.Dimensions) + tags := curr.Tags.Subset(itr.dims) id := curr.Name if len(tags.m) > 0 { @@ -8150,6 +8440,8 @@ func (itr *booleanReduceFloatIterator) reduce() ([]FloatPoint, error) { sort.Sort(reverseStringSlice(keys)) } + // Assume the points are already sorted until proven otherwise. + sortedByTime := true a := make([]FloatPoint, 0, len(m)) for _, k := range keys { rp := m[k] @@ -8160,11 +8452,18 @@ func (itr *booleanReduceFloatIterator) reduce() ([]FloatPoint, error) { // Set the points time to the interval time if the reducer didn't provide one. if points[i].Time == ZeroTime { points[i].Time = startTime + } else { + sortedByTime = false } a = append(a, points[i]) } } + // Points may be out of order. Perform a stable sort by time if requested. + if !sortedByTime && itr.opt.Ordered { + sort.Stable(sort.Reverse(floatPointsByTime(a))) + } + return a, nil } @@ -8172,6 +8471,7 @@ func (itr *booleanReduceFloatIterator) reduce() ([]FloatPoint, error) { type booleanStreamFloatIterator struct { input *bufBooleanIterator create func() (BooleanPointAggregator, FloatPointEmitter) + dims []string opt IteratorOptions m map[string]*booleanReduceFloatPoint points []FloatPoint @@ -8182,6 +8482,7 @@ func newBooleanStreamFloatIterator(input BooleanIterator, createFn func() (Boole return &booleanStreamFloatIterator{ input: newBufBooleanIterator(input), create: createFn, + dims: opt.GetDimensions(), opt: opt, m: make(map[string]*booleanReduceFloatPoint), } @@ -8221,7 +8522,7 @@ func (itr *booleanStreamFloatIterator) reduce() ([]FloatPoint, error) { } else if curr.Nil { continue } - tags := curr.Tags.Subset(itr.opt.Dimensions) + tags := curr.Tags.Subset(itr.dims) id := curr.Name if len(tags.m) > 0 { @@ -8370,10 +8671,20 @@ type booleanFloatExprFunc func(a, b bool) float64 type booleanReduceIntegerIterator struct { input *bufBooleanIterator create func() (BooleanPointAggregator, IntegerPointEmitter) + dims []string opt IteratorOptions points []IntegerPoint } +func newBooleanReduceIntegerIterator(input BooleanIterator, opt IteratorOptions, createFn func() (BooleanPointAggregator, IntegerPointEmitter)) *booleanReduceIntegerIterator { + return &booleanReduceIntegerIterator{ + input: newBufBooleanIterator(input), + create: createFn, + dims: opt.GetDimensions(), + opt: opt, + } +} + // Stats returns stats from the input iterator. func (itr *booleanReduceIntegerIterator) Stats() IteratorStats { return itr.input.Stats() } @@ -8409,11 +8720,20 @@ type booleanReduceIntegerPoint struct { // The previous value for the dimension is passed to fn. func (itr *booleanReduceIntegerIterator) reduce() ([]IntegerPoint, error) { // Calculate next window. - t, err := itr.input.peekTime() - if err != nil { - return nil, err + var startTime, endTime int64 + for { + p, err := itr.input.Next() + if err != nil || p == nil { + return nil, err + } else if p.Nil { + continue + } + + // Unread the point so it can be processed. + itr.input.unread(p) + startTime, endTime = itr.opt.Window(p.Time) + break } - startTime, endTime := itr.opt.Window(t) // Create points by tags. m := make(map[string]*booleanReduceIntegerPoint) @@ -8427,7 +8747,7 @@ func (itr *booleanReduceIntegerIterator) reduce() ([]IntegerPoint, error) { } else if curr.Nil { continue } - tags := curr.Tags.Subset(itr.opt.Dimensions) + tags := curr.Tags.Subset(itr.dims) id := curr.Name if len(tags.m) > 0 { @@ -8458,6 +8778,8 @@ func (itr *booleanReduceIntegerIterator) reduce() ([]IntegerPoint, error) { sort.Sort(reverseStringSlice(keys)) } + // Assume the points are already sorted until proven otherwise. + sortedByTime := true a := make([]IntegerPoint, 0, len(m)) for _, k := range keys { rp := m[k] @@ -8468,11 +8790,18 @@ func (itr *booleanReduceIntegerIterator) reduce() ([]IntegerPoint, error) { // Set the points time to the interval time if the reducer didn't provide one. if points[i].Time == ZeroTime { points[i].Time = startTime + } else { + sortedByTime = false } a = append(a, points[i]) } } + // Points may be out of order. Perform a stable sort by time if requested. + if !sortedByTime && itr.opt.Ordered { + sort.Stable(sort.Reverse(integerPointsByTime(a))) + } + return a, nil } @@ -8480,6 +8809,7 @@ func (itr *booleanReduceIntegerIterator) reduce() ([]IntegerPoint, error) { type booleanStreamIntegerIterator struct { input *bufBooleanIterator create func() (BooleanPointAggregator, IntegerPointEmitter) + dims []string opt IteratorOptions m map[string]*booleanReduceIntegerPoint points []IntegerPoint @@ -8490,6 +8820,7 @@ func newBooleanStreamIntegerIterator(input BooleanIterator, createFn func() (Boo return &booleanStreamIntegerIterator{ input: newBufBooleanIterator(input), create: createFn, + dims: opt.GetDimensions(), opt: opt, m: make(map[string]*booleanReduceIntegerPoint), } @@ -8529,7 +8860,7 @@ func (itr *booleanStreamIntegerIterator) reduce() ([]IntegerPoint, error) { } else if curr.Nil { continue } - tags := curr.Tags.Subset(itr.opt.Dimensions) + tags := curr.Tags.Subset(itr.dims) id := curr.Name if len(tags.m) > 0 { @@ -8678,10 +9009,20 @@ type booleanIntegerExprFunc func(a, b bool) int64 type booleanReduceStringIterator struct { input *bufBooleanIterator create func() (BooleanPointAggregator, StringPointEmitter) + dims []string opt IteratorOptions points []StringPoint } +func newBooleanReduceStringIterator(input BooleanIterator, opt IteratorOptions, createFn func() (BooleanPointAggregator, StringPointEmitter)) *booleanReduceStringIterator { + return &booleanReduceStringIterator{ + input: newBufBooleanIterator(input), + create: createFn, + dims: opt.GetDimensions(), + opt: opt, + } +} + // Stats returns stats from the input iterator. func (itr *booleanReduceStringIterator) Stats() IteratorStats { return itr.input.Stats() } @@ -8717,11 +9058,20 @@ type booleanReduceStringPoint struct { // The previous value for the dimension is passed to fn. func (itr *booleanReduceStringIterator) reduce() ([]StringPoint, error) { // Calculate next window. - t, err := itr.input.peekTime() - if err != nil { - return nil, err + var startTime, endTime int64 + for { + p, err := itr.input.Next() + if err != nil || p == nil { + return nil, err + } else if p.Nil { + continue + } + + // Unread the point so it can be processed. + itr.input.unread(p) + startTime, endTime = itr.opt.Window(p.Time) + break } - startTime, endTime := itr.opt.Window(t) // Create points by tags. m := make(map[string]*booleanReduceStringPoint) @@ -8735,7 +9085,7 @@ func (itr *booleanReduceStringIterator) reduce() ([]StringPoint, error) { } else if curr.Nil { continue } - tags := curr.Tags.Subset(itr.opt.Dimensions) + tags := curr.Tags.Subset(itr.dims) id := curr.Name if len(tags.m) > 0 { @@ -8766,6 +9116,8 @@ func (itr *booleanReduceStringIterator) reduce() ([]StringPoint, error) { sort.Sort(reverseStringSlice(keys)) } + // Assume the points are already sorted until proven otherwise. + sortedByTime := true a := make([]StringPoint, 0, len(m)) for _, k := range keys { rp := m[k] @@ -8776,11 +9128,18 @@ func (itr *booleanReduceStringIterator) reduce() ([]StringPoint, error) { // Set the points time to the interval time if the reducer didn't provide one. if points[i].Time == ZeroTime { points[i].Time = startTime + } else { + sortedByTime = false } a = append(a, points[i]) } } + // Points may be out of order. Perform a stable sort by time if requested. + if !sortedByTime && itr.opt.Ordered { + sort.Stable(sort.Reverse(stringPointsByTime(a))) + } + return a, nil } @@ -8788,6 +9147,7 @@ func (itr *booleanReduceStringIterator) reduce() ([]StringPoint, error) { type booleanStreamStringIterator struct { input *bufBooleanIterator create func() (BooleanPointAggregator, StringPointEmitter) + dims []string opt IteratorOptions m map[string]*booleanReduceStringPoint points []StringPoint @@ -8798,6 +9158,7 @@ func newBooleanStreamStringIterator(input BooleanIterator, createFn func() (Bool return &booleanStreamStringIterator{ input: newBufBooleanIterator(input), create: createFn, + dims: opt.GetDimensions(), opt: opt, m: make(map[string]*booleanReduceStringPoint), } @@ -8837,7 +9198,7 @@ func (itr *booleanStreamStringIterator) reduce() ([]StringPoint, error) { } else if curr.Nil { continue } - tags := curr.Tags.Subset(itr.opt.Dimensions) + tags := curr.Tags.Subset(itr.dims) id := curr.Name if len(tags.m) > 0 { @@ -8986,10 +9347,20 @@ type booleanStringExprFunc func(a, b bool) string type booleanReduceBooleanIterator struct { input *bufBooleanIterator create func() (BooleanPointAggregator, BooleanPointEmitter) + dims []string opt IteratorOptions points []BooleanPoint } +func newBooleanReduceBooleanIterator(input BooleanIterator, opt IteratorOptions, createFn func() (BooleanPointAggregator, BooleanPointEmitter)) *booleanReduceBooleanIterator { + return &booleanReduceBooleanIterator{ + input: newBufBooleanIterator(input), + create: createFn, + dims: opt.GetDimensions(), + opt: opt, + } +} + // Stats returns stats from the input iterator. func (itr *booleanReduceBooleanIterator) Stats() IteratorStats { return itr.input.Stats() } @@ -9025,11 +9396,20 @@ type booleanReduceBooleanPoint struct { // The previous value for the dimension is passed to fn. func (itr *booleanReduceBooleanIterator) reduce() ([]BooleanPoint, error) { // Calculate next window. - t, err := itr.input.peekTime() - if err != nil { - return nil, err + var startTime, endTime int64 + for { + p, err := itr.input.Next() + if err != nil || p == nil { + return nil, err + } else if p.Nil { + continue + } + + // Unread the point so it can be processed. + itr.input.unread(p) + startTime, endTime = itr.opt.Window(p.Time) + break } - startTime, endTime := itr.opt.Window(t) // Create points by tags. m := make(map[string]*booleanReduceBooleanPoint) @@ -9043,7 +9423,7 @@ func (itr *booleanReduceBooleanIterator) reduce() ([]BooleanPoint, error) { } else if curr.Nil { continue } - tags := curr.Tags.Subset(itr.opt.Dimensions) + tags := curr.Tags.Subset(itr.dims) id := curr.Name if len(tags.m) > 0 { @@ -9074,6 +9454,8 @@ func (itr *booleanReduceBooleanIterator) reduce() ([]BooleanPoint, error) { sort.Sort(reverseStringSlice(keys)) } + // Assume the points are already sorted until proven otherwise. + sortedByTime := true a := make([]BooleanPoint, 0, len(m)) for _, k := range keys { rp := m[k] @@ -9084,11 +9466,18 @@ func (itr *booleanReduceBooleanIterator) reduce() ([]BooleanPoint, error) { // Set the points time to the interval time if the reducer didn't provide one. if points[i].Time == ZeroTime { points[i].Time = startTime + } else { + sortedByTime = false } a = append(a, points[i]) } } + // Points may be out of order. Perform a stable sort by time if requested. + if !sortedByTime && itr.opt.Ordered { + sort.Stable(sort.Reverse(booleanPointsByTime(a))) + } + return a, nil } @@ -9096,6 +9485,7 @@ func (itr *booleanReduceBooleanIterator) reduce() ([]BooleanPoint, error) { type booleanStreamBooleanIterator struct { input *bufBooleanIterator create func() (BooleanPointAggregator, BooleanPointEmitter) + dims []string opt IteratorOptions m map[string]*booleanReduceBooleanPoint points []BooleanPoint @@ -9106,6 +9496,7 @@ func newBooleanStreamBooleanIterator(input BooleanIterator, createFn func() (Boo return &booleanStreamBooleanIterator{ input: newBufBooleanIterator(input), create: createFn, + dims: opt.GetDimensions(), opt: opt, m: make(map[string]*booleanReduceBooleanPoint), } @@ -9145,7 +9536,7 @@ func (itr *booleanStreamBooleanIterator) reduce() ([]BooleanPoint, error) { } else if curr.Nil { continue } - tags := curr.Tags.Subset(itr.opt.Dimensions) + tags := curr.Tags.Subset(itr.dims) id := curr.Name if len(tags.m) > 0 { diff --git a/influxql/iterator.gen.go.tmpl b/influxql/iterator.gen.go.tmpl index 09dcbb5831..ec25c688a8 100644 --- a/influxql/iterator.gen.go.tmpl +++ b/influxql/iterator.gen.go.tmpl @@ -2,7 +2,6 @@ package influxql import ( "container/heap" - "errors" "encoding/binary" "fmt" "io" @@ -131,7 +130,7 @@ func new{{$k.Name}}MergeIterator(inputs []{{$k.Name}}Iterator, opt IteratorOptio inputs: inputs, heap: &{{$k.name}}MergeHeap{ items: make([]*{{$k.name}}MergeHeapItem, 0, len(inputs)), - opt: opt, + opt: opt, }, } @@ -202,7 +201,8 @@ func (itr *{{$k.name}}MergeIterator) Next() (*{{$k.Name}}Point, error) { if err != nil { return nil, err } - itr.window.name, itr.window.tags = p.Name, p.Tags.ID() + tags := p.Tags.Subset(itr.heap.opt.Dimensions) + itr.window.name, itr.window.tags = p.Name, tags.ID() itr.window.startTime, itr.window.endTime = itr.heap.opt.Window(p.Time) return p, nil } @@ -223,7 +223,7 @@ func (itr *{{$k.name}}MergeIterator) Next() (*{{$k.Name}}Point, error) { inWindow := true if window := itr.window; window.name != p.Name { inWindow = false - } else if window.tags != p.Tags.ID() { + } else if tags := p.Tags.Subset(itr.heap.opt.Dimensions); window.tags != tags.ID() { inWindow = false } else if opt := itr.heap.opt; opt.Ascending && p.Time >= window.endTime { inWindow = false @@ -250,9 +250,9 @@ type {{$k.name}}MergeHeap struct { items []*{{$k.name}}MergeHeapItem } -func (h {{$k.name}}MergeHeap) Len() int { return len(h.items) } -func (h {{$k.name}}MergeHeap) Swap(i, j int) { h.items[i], h.items[j] = h.items[j], h.items[i] } -func (h {{$k.name}}MergeHeap) Less(i, j int) bool { +func (h *{{$k.name}}MergeHeap) Len() int { return len(h.items) } +func (h *{{$k.name}}MergeHeap) Swap(i, j int) { h.items[i], h.items[j] = h.items[j], h.items[i] } +func (h *{{$k.name}}MergeHeap) Less(i, j int) bool { x, err := h.items[i].itr.peek() if err != nil { return true @@ -265,14 +265,14 @@ func (h {{$k.name}}MergeHeap) Less(i, j int) bool { if h.opt.Ascending { if x.Name != y.Name { return x.Name < y.Name - } else if x.Tags.ID() != y.Tags.ID() { - return x.Tags.ID() < y.Tags.ID() + } else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); xTags.ID() != yTags.ID() { + return xTags.ID() < yTags.ID() } } else { if x.Name != y.Name { return x.Name > y.Name - } else if x.Tags.ID() != y.Tags.ID() { - return x.Tags.ID() > y.Tags.ID() + } else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); xTags.ID() != yTags.ID() { + return xTags.ID() > yTags.ID() } } @@ -305,8 +305,7 @@ type {{$k.name}}MergeHeapItem struct { // {{$k.name}}SortedMergeIterator is an iterator that sorts and merges multiple iterators into one. type {{$k.name}}SortedMergeIterator struct { inputs []{{$k.Name}}Iterator - opt IteratorOptions - heap {{$k.name}}SortedMergeHeap + heap *{{$k.name}}SortedMergeHeap init bool } @@ -314,14 +313,16 @@ type {{$k.name}}SortedMergeIterator struct { func new{{$k.Name}}SortedMergeIterator(inputs []{{$k.Name}}Iterator, opt IteratorOptions) Iterator { itr := &{{$k.name}}SortedMergeIterator{ inputs: inputs, - heap: make({{$k.name}}SortedMergeHeap, 0, len(inputs)), - opt: opt, + heap: &{{$k.name}}SortedMergeHeap{ + items: make([]*{{$k.name}}SortedMergeHeapItem, 0, len(inputs)), + opt: opt, + }, } // Initialize heap items. for _, input := range inputs { // Append to the heap. - itr.heap = append(itr.heap, &{{$k.name}}SortedMergeHeapItem{itr: input, ascending: opt.Ascending}) + itr.heap.items = append(itr.heap.items, &{{$k.name}}SortedMergeHeapItem{itr: input}) } return itr @@ -352,8 +353,8 @@ func (itr *{{$k.name}}SortedMergeIterator) Next() (*{{$k.Name}}Point, error) { r func (itr *{{$k.name}}SortedMergeIterator) pop() (*{{$k.Name}}Point, error) { // Initialize the heap. See the MergeIterator to see why this has to be done lazily. if !itr.init { - items := itr.heap - itr.heap = make([]*{{$k.name}}SortedMergeHeapItem, 0, len(items)) + items := itr.heap.items + itr.heap.items = make([]*{{$k.name}}SortedMergeHeapItem, 0, len(items)) for _, item := range items { var err error if item.point, err = item.itr.Next(); err != nil { @@ -361,18 +362,18 @@ func (itr *{{$k.name}}SortedMergeIterator) pop() (*{{$k.Name}}Point, error) { } else if item.point == nil { continue } - itr.heap = append(itr.heap, item) + itr.heap.items = append(itr.heap.items, item) } - heap.Init(&itr.heap) + heap.Init(itr.heap) itr.init = true } - if len(itr.heap) == 0 { + if len(itr.heap.items) == 0 { return nil, nil } // Read the next item from the heap. - item := heap.Pop(&itr.heap).(*{{$k.name}}SortedMergeHeapItem) + item := heap.Pop(itr.heap).(*{{$k.name}}SortedMergeHeapItem) if item.err != nil { return nil, item.err } else if item.point == nil { @@ -384,46 +385,49 @@ func (itr *{{$k.name}}SortedMergeIterator) pop() (*{{$k.Name}}Point, error) { // Read the next item from the cursor. Push back to heap if one exists. if item.point, item.err = item.itr.Next(); item.point != nil { - heap.Push(&itr.heap, item) + heap.Push(itr.heap, item) } return p, nil } // {{$k.name}}SortedMergeHeap represents a heap of {{$k.name}}SortedMergeHeapItems. -type {{$k.name}}SortedMergeHeap []*{{$k.name}}SortedMergeHeapItem +type {{$k.name}}SortedMergeHeap struct { + opt IteratorOptions + items []*{{$k.name}}SortedMergeHeapItem +} -func (h {{$k.name}}SortedMergeHeap) Len() int { return len(h) } -func (h {{$k.name}}SortedMergeHeap) Swap(i, j int) { h[i], h[j] = h[j], h[i] } -func (h {{$k.name}}SortedMergeHeap) Less(i, j int) bool { - x, y := h[i].point, h[j].point +func (h *{{$k.name}}SortedMergeHeap) Len() int { return len(h.items) } +func (h *{{$k.name}}SortedMergeHeap) Swap(i, j int) { h.items[i], h.items[j] = h.items[j], h.items[i] } +func (h *{{$k.name}}SortedMergeHeap) Less(i, j int) bool { + x, y := h.items[i].point, h.items[j].point - if h[i].ascending { + if h.opt.Ascending { if x.Name != y.Name { return x.Name < y.Name - } else if !x.Tags.Equals(&y.Tags) { - return x.Tags.ID() < y.Tags.ID() + } else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); !xTags.Equals(&yTags) { + return xTags.ID() < yTags.ID() } return x.Time < y.Time } if x.Name != y.Name { return x.Name > y.Name - } else if !x.Tags.Equals(&y.Tags) { - return x.Tags.ID() > y.Tags.ID() + } else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); !xTags.Equals(&yTags) { + return xTags.ID() > yTags.ID() } return x.Time > y.Time } func (h *{{$k.name}}SortedMergeHeap) Push(x interface{}) { - *h = append(*h, x.(*{{$k.name}}SortedMergeHeapItem)) + h.items = append(h.items, x.(*{{$k.name}}SortedMergeHeapItem)) } func (h *{{$k.name}}SortedMergeHeap) Pop() interface{} { - old := *h + old := h.items n := len(old) item := old[n-1] - *h = old[0 : n-1] + h.items = old[0 : n-1] return item } @@ -431,7 +435,6 @@ type {{$k.name}}SortedMergeHeapItem struct { point *{{$k.Name}}Point err error itr {{$k.Name}}Iterator - ascending bool } // {{$k.name}}ParallelIterator represents an iterator that pulls data in a separate goroutine. @@ -548,10 +551,6 @@ func (itr *{{$k.name}}LimitIterator) Next() (*{{$k.Name}}Point, error) { // Read next point if we're beyond the limit. if itr.opt.Limit > 0 && (itr.n-itr.opt.Offset) > itr.opt.Limit { - // If there's no interval, no groups, and a single source then simply exit. - if itr.opt.Interval.IsZero() && len(itr.opt.Dimensions) == 0 && len(itr.opt.Sources) == 1 { - return nil, nil - } continue } @@ -841,7 +840,7 @@ type aux{{$k.Name}}Point struct { type {{$k.name}}AuxIterator struct { input *buf{{$k.Name}}Iterator output chan aux{{$k.Name}}Point - fields auxIteratorFields + fields *auxIteratorFields background bool } @@ -868,28 +867,6 @@ func (itr *{{$k.name}}AuxIterator) Next() (*{{$k.Name}}Point, error) { } func (itr *{{$k.name}}AuxIterator) Iterator(name string, typ DataType) Iterator { return itr.fields.iterator(name, typ) } -func (itr *{{$k.name}}AuxIterator) CreateIterator(opt IteratorOptions) (Iterator, error) { - expr := opt.Expr - if expr == nil { - panic("unable to create an iterator with no expression from an aux iterator") - } - - switch expr := expr.(type) { - case *VarRef: - return itr.Iterator(expr.Val, expr.Type), nil - default: - panic(fmt.Sprintf("invalid expression type for an aux iterator: %T", expr)) - } -} - -func (itr *{{$k.name}}AuxIterator) FieldDimensions(sources Sources) (fields map[string]DataType, dimensions map[string]struct{}, err error) { - return nil, nil, errors.New("not implemented") -} - -func (itr *{{$k.name}}AuxIterator) ExpandSources(sources Sources) (Sources, error) { - return nil, errors.New("not implemented") -} - func (itr *{{.name}}AuxIterator) stream() { for { // Read next point. @@ -1016,10 +993,20 @@ func (itr *{{$k.name}}ChanIterator) Next() (*{{$k.Name}}Point, error) { type {{$k.name}}Reduce{{$v.Name}}Iterator struct { input *buf{{$k.Name}}Iterator create func() ({{$k.Name}}PointAggregator, {{$v.Name}}PointEmitter) + dims []string opt IteratorOptions points []{{$v.Name}}Point } +func new{{$k.Name}}Reduce{{$v.Name}}Iterator(input {{$k.Name}}Iterator, opt IteratorOptions, createFn func() ({{$k.Name}}PointAggregator, {{$v.Name}}PointEmitter)) *{{$k.name}}Reduce{{$v.Name}}Iterator { + return &{{$k.name}}Reduce{{$v.Name}}Iterator{ + input: newBuf{{$k.Name}}Iterator(input), + create: createFn, + dims: opt.GetDimensions(), + opt: opt, + } +} + // Stats returns stats from the input iterator. func (itr *{{$k.name}}Reduce{{$v.Name}}Iterator) Stats() IteratorStats { return itr.input.Stats() } @@ -1055,11 +1042,20 @@ type {{$k.name}}Reduce{{$v.Name}}Point struct { // The previous value for the dimension is passed to fn. func (itr *{{$k.name}}Reduce{{$v.Name}}Iterator) reduce() ([]{{$v.Name}}Point, error) { // Calculate next window. - t, err := itr.input.peekTime() - if err != nil { - return nil, err + var startTime, endTime int64 + for { + p, err := itr.input.Next() + if err != nil || p == nil { + return nil, err + } else if p.Nil { + continue + } + + // Unread the point so it can be processed. + itr.input.unread(p) + startTime, endTime = itr.opt.Window(p.Time) + break } - startTime, endTime := itr.opt.Window(t) // Create points by tags. m := make(map[string]*{{$k.name}}Reduce{{$v.Name}}Point) @@ -1073,7 +1069,7 @@ func (itr *{{$k.name}}Reduce{{$v.Name}}Iterator) reduce() ([]{{$v.Name}}Point, e } else if curr.Nil { continue } - tags := curr.Tags.Subset(itr.opt.Dimensions) + tags := curr.Tags.Subset(itr.dims) id := curr.Name if len(tags.m) > 0 { @@ -1084,7 +1080,7 @@ func (itr *{{$k.name}}Reduce{{$v.Name}}Iterator) reduce() ([]{{$v.Name}}Point, e rp := m[id] if rp == nil { aggregator, emitter := itr.create() - rp = &{{$k.name}}Reduce{{.Name}}Point{ + rp = &{{$k.name}}Reduce{{$v.Name}}Point{ Name: curr.Name, Tags: tags, Aggregator: aggregator, @@ -1104,6 +1100,8 @@ func (itr *{{$k.name}}Reduce{{$v.Name}}Iterator) reduce() ([]{{$v.Name}}Point, e sort.Sort(reverseStringSlice(keys)) } + // Assume the points are already sorted until proven otherwise. + sortedByTime := true a := make([]{{$v.Name}}Point, 0, len(m)) for _, k := range keys { rp := m[k] @@ -1114,11 +1112,18 @@ func (itr *{{$k.name}}Reduce{{$v.Name}}Iterator) reduce() ([]{{$v.Name}}Point, e // Set the points time to the interval time if the reducer didn't provide one. if points[i].Time == ZeroTime { points[i].Time = startTime + } else { + sortedByTime = false } a = append(a, points[i]) } } + // Points may be out of order. Perform a stable sort by time if requested. + if !sortedByTime && itr.opt.Ordered { + sort.Stable(sort.Reverse({{$v.name}}PointsByTime(a))) + } + return a, nil } @@ -1126,6 +1131,7 @@ func (itr *{{$k.name}}Reduce{{$v.Name}}Iterator) reduce() ([]{{$v.Name}}Point, e type {{$k.name}}Stream{{$v.Name}}Iterator struct { input *buf{{$k.Name}}Iterator create func() ({{$k.Name}}PointAggregator, {{$v.Name}}PointEmitter) + dims []string opt IteratorOptions m map[string]*{{$k.name}}Reduce{{$v.Name}}Point points []{{$v.Name}}Point @@ -1136,6 +1142,7 @@ func new{{$k.Name}}Stream{{$v.Name}}Iterator(input {{$k.Name}}Iterator, createFn return &{{$k.name}}Stream{{$v.Name}}Iterator{ input: newBuf{{$k.Name}}Iterator(input), create: createFn, + dims: opt.GetDimensions(), opt: opt, m: make(map[string]*{{$k.name}}Reduce{{$v.Name}}Point), } @@ -1175,7 +1182,7 @@ func (itr *{{$k.name}}Stream{{$v.Name}}Iterator) reduce() ([]{{$v.Name}}Point, e } else if curr.Nil { continue } - tags := curr.Tags.Subset(itr.opt.Dimensions) + tags := curr.Tags.Subset(itr.dims) id := curr.Name if len(tags.m) > 0 { diff --git a/influxql/iterator.go b/influxql/iterator.go index f867e9a493..0669ce5ebb 100644 --- a/influxql/iterator.go +++ b/influxql/iterator.go @@ -4,7 +4,6 @@ import ( "errors" "fmt" "io" - "sort" "sync" "time" @@ -132,18 +131,23 @@ func (a Iterators) Merge(opt IteratorOptions) (Iterator, error) { return nil, nil } - if opt.Expr != nil { - if expr, ok := opt.Expr.(*Call); ok && expr.Name == "count" { - opt.Expr = &Call{ - Name: "sum", - Args: expr.Args, - } - } - } - if opt.InterruptCh != nil { itr = NewInterruptIterator(itr, opt.InterruptCh) } + + call, ok := opt.Expr.(*Call) + if !ok { + // This is not a call expression so do not use a call iterator. + return itr, nil + } + + // When merging the count() function, use sum() to sum the counted points. + if call.Name == "count" { + opt.Expr = &Call{ + Name: "sum", + Args: call.Args, + } + } return NewCallIterator(itr, opt) } @@ -220,6 +224,8 @@ func NewSortedMergeIterator(inputs []Iterator, opt IteratorOptions) Iterator { inputs = Iterators(inputs).filterNonNil() if len(inputs) == 0 { return nil + } else if len(inputs) == 1 { + return inputs[0] } switch inputs := Iterators(inputs).cast().(type) { @@ -363,7 +369,6 @@ func NewCloseInterruptIterator(input Iterator, closing <-chan struct{}) Iterator // AuxIterator represents an iterator that can split off separate auxiliary iterators. type AuxIterator interface { Iterator - IteratorCreator // Auxilary iterator Iterator(name string, typ DataType) Iterator @@ -415,26 +420,32 @@ func (f *auxIteratorField) close() { } } -type auxIteratorFields []*auxIteratorField +type auxIteratorFields struct { + fields []*auxIteratorField + dimensions []string +} // newAuxIteratorFields returns a new instance of auxIteratorFields from a list of field names. -func newAuxIteratorFields(opt IteratorOptions) auxIteratorFields { - fields := make(auxIteratorFields, len(opt.Aux)) +func newAuxIteratorFields(opt IteratorOptions) *auxIteratorFields { + fields := make([]*auxIteratorField, len(opt.Aux)) for i, ref := range opt.Aux { fields[i] = &auxIteratorField{name: ref.Val, typ: ref.Type, opt: opt} } - return fields + return &auxIteratorFields{ + fields: fields, + dimensions: opt.GetDimensions(), + } } -func (a auxIteratorFields) close() { - for _, f := range a { +func (a *auxIteratorFields) close() { + for _, f := range a.fields { f.close() } } // iterator creates a new iterator for a named auxilary field. -func (a auxIteratorFields) iterator(name string, typ DataType) Iterator { - for _, f := range a { +func (a *auxIteratorFields) iterator(name string, typ DataType) Iterator { + for _, f := range a.fields { // Skip field if it's name doesn't match. // Exit if no points were received by the iterator. if f.name != name || (typ != Unknown && f.typ != typ) { @@ -468,13 +479,13 @@ func (a auxIteratorFields) iterator(name string, typ DataType) Iterator { } // send sends a point to all field iterators. -func (a auxIteratorFields) send(p Point) (ok bool) { +func (a *auxIteratorFields) send(p Point) (ok bool) { values := p.aux() - for i, f := range a { + for i, f := range a.fields { v := values[i] tags := p.tags() - tags = tags.Subset(f.opt.Dimensions) + tags = tags.Subset(a.dimensions) // Send new point for each aux iterator. // Primitive pointers represent nil values. @@ -496,8 +507,8 @@ func (a auxIteratorFields) send(p Point) (ok bool) { return ok } -func (a auxIteratorFields) sendError(err error) { - for _, f := range a { +func (a *auxIteratorFields) sendError(err error) { + for _, f := range a.fields { for _, itr := range f.itrs { switch itr := itr.(type) { case *floatChanIterator: @@ -591,112 +602,14 @@ func NewReaderIterator(r io.Reader, typ DataType, stats IteratorStats) Iterator // IteratorCreator is an interface to create Iterators. type IteratorCreator interface { // Creates a simple iterator for use in an InfluxQL query. - CreateIterator(opt IteratorOptions) (Iterator, error) - - // Returns the unique fields and dimensions across a list of sources. - FieldDimensions(sources Sources) (fields map[string]DataType, dimensions map[string]struct{}, err error) - - // Expands regex sources to all matching sources. - ExpandSources(sources Sources) (Sources, error) + CreateIterator(source *Measurement, opt IteratorOptions) (Iterator, error) } -// IteratorCreators represents a list of iterator creators. -type IteratorCreators []IteratorCreator +// FieldMapper returns the data type for the field inside of the measurement. +type FieldMapper interface { + FieldDimensions(m *Measurement) (fields map[string]DataType, dimensions map[string]struct{}, err error) -// Close closes all iterator creators that implement io.Closer. -func (a IteratorCreators) Close() error { - for _, ic := range a { - if ic, ok := ic.(io.Closer); ok { - ic.Close() - } - } - return nil -} - -// CreateIterator returns a single combined iterator from multiple iterator creators. -func (a IteratorCreators) CreateIterator(opt IteratorOptions) (Iterator, error) { - // Create iterators for each shard. - // Ensure that they are closed if an error occurs. - itrs := make([]Iterator, 0, len(a)) - if err := func() error { - for _, ic := range a { - itr, err := ic.CreateIterator(opt) - if err != nil { - return err - } else if itr == nil { - continue - } - itrs = append(itrs, itr) - } - return nil - }(); err != nil { - Iterators(itrs).Close() - return nil, err - } - - if len(itrs) == 0 { - return nil, nil - } - - return Iterators(itrs).Merge(opt) -} - -// FieldDimensions returns unique fields and dimensions from multiple iterator creators. -func (a IteratorCreators) FieldDimensions(sources Sources) (fields map[string]DataType, dimensions map[string]struct{}, err error) { - fields = make(map[string]DataType) - dimensions = make(map[string]struct{}) - - for _, ic := range a { - f, d, err := ic.FieldDimensions(sources) - if err != nil { - return nil, nil, err - } - for k, typ := range f { - if _, ok := fields[k]; typ != Unknown && (!ok || typ < fields[k]) { - fields[k] = typ - } - } - for k := range d { - dimensions[k] = struct{}{} - } - } - return -} - -// ExpandSources expands sources across all iterator creators and returns a unique result. -func (a IteratorCreators) ExpandSources(sources Sources) (Sources, error) { - m := make(map[string]Source) - - for _, ic := range a { - expanded, err := ic.ExpandSources(sources) - if err != nil { - return nil, err - } - - for _, src := range expanded { - switch src := src.(type) { - case *Measurement: - m[src.String()] = src - default: - return nil, fmt.Errorf("IteratorCreators.ExpandSources: unsupported source type: %T", src) - } - } - } - - // Convert set to sorted slice. - names := make([]string, 0, len(m)) - for name := range m { - names = append(names, name) - } - sort.Strings(names) - - // Convert set to a list of Sources. - sorted := make(Sources, 0, len(m)) - for _, name := range names { - sorted = append(sorted, m[name]) - } - - return sorted, nil + TypeMapper } // IteratorOptions is an object passed to CreateIterator to specify creation options. @@ -708,12 +621,14 @@ type IteratorOptions struct { // Auxilary tags or values to also retrieve for the point. Aux []VarRef - // Data sources from which to retrieve data. + // Data sources from which to receive data. This is only used for encoding + // measurements over RPC and is no longer used in the open source version. Sources []Source // Group by interval and tags. Interval Interval - Dimensions []string + Dimensions []string // The final dimensions of the query (stays the same even in subqueries). + GroupBy map[string]struct{} // Dimensions to group points by in intermediate iterators. // Fill options. Fill FillOption @@ -738,6 +653,12 @@ type IteratorOptions struct { // Removes duplicate rows from raw queries. Dedupe bool + // Determines if this is a query for raw data or an aggregate/selector. + Ordered bool + + // Limits on the creation of iterators. + MaxSeriesN int + // If this channel is set and is closed, the iterator should try to exit // and close as soon as possible. InterruptCh <-chan struct{} @@ -786,14 +707,18 @@ func newIteratorOptionsStmt(stmt *SelectStatement, sopt *SelectOptions) (opt Ite } opt.Interval.Duration = interval + // Determine if the input for this select call must be ordered. + opt.Ordered = stmt.IsRawQuery + // Determine dimensions. + opt.GroupBy = make(map[string]struct{}, len(opt.Dimensions)) for _, d := range stmt.Dimensions { if d, ok := d.Expr.(*VarRef); ok { opt.Dimensions = append(opt.Dimensions, d.Val) + opt.GroupBy[d.Val] = struct{}{} } } - opt.Sources = stmt.Sources opt.Condition = stmt.Condition opt.Ascending = stmt.TimeAscending() opt.Dedupe = stmt.Dedupe @@ -808,14 +733,52 @@ func newIteratorOptionsStmt(stmt *SelectStatement, sopt *SelectOptions) (opt Ite opt.Limit, opt.Offset = stmt.Limit, stmt.Offset opt.SLimit, opt.SOffset = stmt.SLimit, stmt.SOffset if sopt != nil { + opt.MaxSeriesN = sopt.MaxSeriesN opt.InterruptCh = sopt.InterruptCh } return opt, nil } +func newIteratorOptionsSubstatement(stmt *SelectStatement, opt IteratorOptions) (IteratorOptions, error) { + subOpt, err := newIteratorOptionsStmt(stmt, nil) + if err != nil { + return IteratorOptions{}, err + } + + if subOpt.StartTime < opt.StartTime { + subOpt.StartTime = opt.StartTime + } + if subOpt.EndTime > opt.EndTime { + subOpt.EndTime = opt.EndTime + } + subOpt.Dimensions = opt.Dimensions + subOpt.InterruptCh = opt.InterruptCh + + // Propagate the SLIMIT and SOFFSET from the outer query. + subOpt.SLimit += opt.SLimit + subOpt.SOffset += opt.SOffset + + // If the inner query uses a null fill option, switch it to none so we + // don't hit an unnecessary penalty from the fill iterator. Null values + // will end up getting stripped by an outer query anyway so there's no + // point in having them here. We still need all other types of fill + // iterators because they can affect the result of the outer query. + if subOpt.Fill == NullFill { + subOpt.Fill = NoFill + } + + // Determine if the input to this iterator needs to be ordered so it outputs + // the correct order to the outer query. + interval, err := stmt.GroupByInterval() + if err != nil { + return IteratorOptions{}, err + } + subOpt.Ordered = opt.Ordered && (interval == 0 && stmt.HasSelector()) + return subOpt, nil +} + // MergeSorted returns true if the options require a sorted merge. -// This is only needed when the expression is a variable reference or there is no expr. func (opt IteratorOptions) MergeSorted() bool { if opt.Expr == nil { return true @@ -882,6 +845,18 @@ func (opt IteratorOptions) ElapsedInterval() Interval { return Interval{Duration: time.Nanosecond} } +// GetDimensions retrieves the dimensions for this query. +func (opt IteratorOptions) GetDimensions() []string { + if len(opt.GroupBy) > 0 { + dimensions := make([]string, 0, len(opt.GroupBy)) + for dim := range opt.GroupBy { + dimensions = append(dimensions, dim) + } + return dimensions + } + return opt.Dimensions +} + // MarshalBinary encodes opt into a binary format. func (opt *IteratorOptions) MarshalBinary() ([]byte, error) { return proto.Marshal(encodeIteratorOptions(opt)) @@ -916,6 +891,8 @@ func encodeIteratorOptions(opt *IteratorOptions) *internal.IteratorOptions { SLimit: proto.Int64(int64(opt.SLimit)), SOffset: proto.Int64(int64(opt.SOffset)), Dedupe: proto.Bool(opt.Dedupe), + MaxSeriesN: proto.Int64(int64(opt.MaxSeriesN)), + Ordered: proto.Bool(opt.Ordered), } // Set expression, if set. @@ -931,13 +908,24 @@ func encodeIteratorOptions(opt *IteratorOptions) *internal.IteratorOptions { pb.Aux[i] = ref.Val } - // Convert and encode sources to measurements. - sources := make([]*internal.Measurement, len(opt.Sources)) - for i, source := range opt.Sources { - mm := source.(*Measurement) - sources[i] = encodeMeasurement(mm) + // Encode group by dimensions from a map. + if pb.GroupBy != nil { + dimensions := make([]string, 0, len(opt.GroupBy)) + for dim := range opt.GroupBy { + dimensions = append(dimensions, dim) + } + pb.GroupBy = dimensions + } + + // Convert and encode sources to measurements. + if opt.Sources != nil { + sources := make([]*internal.Measurement, len(opt.Sources)) + for i, source := range opt.Sources { + mm := source.(*Measurement) + sources[i] = encodeMeasurement(mm) + } + pb.Sources = sources } - pb.Sources = sources // Fill value can only be a number. Set it if available. if v, ok := opt.FillValue.(float64); ok { @@ -966,6 +954,8 @@ func decodeIteratorOptions(pb *internal.IteratorOptions) (*IteratorOptions, erro SLimit: int(pb.GetSLimit()), SOffset: int(pb.GetSOffset()), Dedupe: pb.GetDedupe(), + MaxSeriesN: int(pb.GetMaxSeriesN()), + Ordered: pb.GetOrdered(), } // Set expression, if set. @@ -990,16 +980,27 @@ func decodeIteratorOptions(pb *internal.IteratorOptions) (*IteratorOptions, erro } } - // Convert and dencode sources to measurements. - sources := make([]Source, len(pb.GetSources())) - for i, source := range pb.GetSources() { - mm, err := decodeMeasurement(source) - if err != nil { - return nil, err + // Convert and decode sources to measurements. + if pb.Sources != nil { + sources := make([]Source, len(pb.GetSources())) + for i, source := range pb.GetSources() { + mm, err := decodeMeasurement(source) + if err != nil { + return nil, err + } + sources[i] = mm } - sources[i] = mm + opt.Sources = sources + } + + // Convert group by dimensions to a map. + if pb.GroupBy != nil { + dimensions := make(map[string]struct{}, len(pb.GroupBy)) + for _, dim := range pb.GetGroupBy() { + dimensions[dim] = struct{}{} + } + opt.GroupBy = dimensions } - opt.Sources = sources // Set condition, if set. if pb.Condition != nil { @@ -1041,6 +1042,22 @@ func (v *selectInfo) Visit(n Node) Visitor { return v } +// FindSelector returns a selector from the selectInfo. This will only +// return a selector if the Call is a selector and it's the only function +// in the selectInfo. +func (v *selectInfo) FindSelector() *Call { + if len(v.calls) != 1 { + return nil + } + + for s := range v.calls { + if IsSelector(s) { + return s + } + } + return nil +} + // Interval represents a repeating interval for a query. type Interval struct { Duration time.Duration diff --git a/influxql/iterator_mapper.go b/influxql/iterator_mapper.go new file mode 100644 index 0000000000..ff5702a52f --- /dev/null +++ b/influxql/iterator_mapper.go @@ -0,0 +1,49 @@ +package influxql + +type iteratorMapper struct { + e *Emitter + buf []interface{} + fields []int // which iterator to use for an aux field + auxFields []interface{} +} + +func NewIteratorMapper(itrs []Iterator, fields []int, opt IteratorOptions) Iterator { + e := NewEmitter(itrs, opt.Ascending, 0) + e.OmitTime = true + return &iteratorMapper{ + e: e, + buf: make([]interface{}, len(itrs)), + fields: fields, + auxFields: make([]interface{}, len(fields)), + } +} + +func (itr *iteratorMapper) Next() (*FloatPoint, error) { + t, name, tags, err := itr.e.loadBuf() + if err != nil || t == ZeroTime { + return nil, err + } + + itr.e.readInto(t, name, tags, itr.buf) + for i, f := range itr.fields { + itr.auxFields[i] = itr.buf[f] + } + return &FloatPoint{ + Name: name, + Tags: tags, + Time: t, + Aux: itr.auxFields, + }, nil +} + +func (itr *iteratorMapper) Stats() IteratorStats { + stats := IteratorStats{} + for _, itr := range itr.e.itrs { + stats.Add(itr.Stats()) + } + return stats +} + +func (itr *iteratorMapper) Close() error { + return itr.e.Close() +} diff --git a/influxql/iterator_mapper_test.go b/influxql/iterator_mapper_test.go new file mode 100644 index 0000000000..cded2a8375 --- /dev/null +++ b/influxql/iterator_mapper_test.go @@ -0,0 +1,58 @@ +package influxql_test + +import ( + "testing" + + "github.com/davecgh/go-spew/spew" + "github.com/influxdata/influxdb/influxql" + "github.com/influxdata/influxdb/pkg/deep" +) + +func TestIteratorMapper(t *testing.T) { + val1itr := &FloatIterator{Points: []influxql.FloatPoint{ + {Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: 1}, + {Name: "cpu", Tags: ParseTags("host=A"), Time: 5, Value: 3}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 2, Value: 2}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 8, Value: 8}, + }} + + val2itr := &StringIterator{Points: []influxql.StringPoint{ + {Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: "a"}, + {Name: "cpu", Tags: ParseTags("host=A"), Time: 5, Value: "c"}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 2, Value: "b"}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 8, Value: "h"}, + }} + inputs := []influxql.Iterator{val1itr, val2itr} + + opt := influxql.IteratorOptions{ + Ascending: true, + Aux: []influxql.VarRef{ + {Val: "val1", Type: influxql.Float}, + {Val: "val2", Type: influxql.String}, + }, + } + itr := influxql.NewIteratorMapper(inputs, []int{0, 1}, opt) + if a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]influxql.Point{ + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Aux: []interface{}{float64(1), "a"}}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 5, Aux: []interface{}{float64(3), "c"}}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 2, Aux: []interface{}{float64(2), "b"}}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 8, Aux: []interface{}{float64(8), "h"}}}, + }) { + t.Errorf("unexpected points: %s", spew.Sdump(a)) + } + + for i, input := range inputs { + switch input := input.(type) { + case *FloatIterator: + if !input.Closed { + t.Errorf("iterator %d not closed", i) + } + case *StringIterator: + if !input.Closed { + t.Errorf("iterator %d not closed", i) + } + } + } +} diff --git a/influxql/iterator_test.go b/influxql/iterator_test.go index 910efbc063..91a3562642 100644 --- a/influxql/iterator_test.go +++ b/influxql/iterator_test.go @@ -6,7 +6,6 @@ import ( "math" "math/rand" "reflect" - "regexp" "testing" "time" @@ -39,7 +38,8 @@ func TestMergeIterator_Float(t *testing.T) { Interval: influxql.Interval{ Duration: 10 * time.Nanosecond, }, - Ascending: true, + Dimensions: []string{"host"}, + Ascending: true, }) if a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil { t.Fatalf("unexpected error: %s", err) @@ -86,7 +86,8 @@ func TestMergeIterator_Integer(t *testing.T) { Interval: influxql.Interval{ Duration: 10 * time.Nanosecond, }, - Ascending: true, + Dimensions: []string{"host"}, + Ascending: true, }) if a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil { @@ -134,7 +135,8 @@ func TestMergeIterator_String(t *testing.T) { Interval: influxql.Interval{ Duration: 10 * time.Nanosecond, }, - Ascending: true, + Dimensions: []string{"host"}, + Ascending: true, }) if a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil { @@ -182,7 +184,8 @@ func TestMergeIterator_Boolean(t *testing.T) { Interval: influxql.Interval{ Duration: 10 * time.Nanosecond, }, - Ascending: true, + Dimensions: []string{"host"}, + Ascending: true, }) if a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil { @@ -236,7 +239,8 @@ func TestMergeIterator_Cast_Float(t *testing.T) { Interval: influxql.Interval{ Duration: 10 * time.Nanosecond, }, - Ascending: true, + Dimensions: []string{"host"}, + Ascending: true, }) if a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil { t.Fatalf("unexpected error: %s", err) @@ -290,7 +294,8 @@ func TestSortedMergeIterator_Float(t *testing.T) { Interval: influxql.Interval{ Duration: 10 * time.Nanosecond, }, - Ascending: true, + Dimensions: []string{"host"}, + Ascending: true, }) if a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil { t.Fatalf("unexpected error: %s", err) @@ -337,7 +342,8 @@ func TestSortedMergeIterator_Integer(t *testing.T) { Interval: influxql.Interval{ Duration: 10 * time.Nanosecond, }, - Ascending: true, + Dimensions: []string{"host"}, + Ascending: true, }) if a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil { t.Fatalf("unexpected error: %s", err) @@ -384,7 +390,8 @@ func TestSortedMergeIterator_String(t *testing.T) { Interval: influxql.Interval{ Duration: 10 * time.Nanosecond, }, - Ascending: true, + Dimensions: []string{"host"}, + Ascending: true, }) if a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil { t.Fatalf("unexpected error: %s", err) @@ -431,7 +438,8 @@ func TestSortedMergeIterator_Boolean(t *testing.T) { Interval: influxql.Interval{ Duration: 10 * time.Nanosecond, }, - Ascending: true, + Dimensions: []string{"host"}, + Ascending: true, }) if a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil { t.Fatalf("unexpected error: %s", err) @@ -484,7 +492,8 @@ func TestSortedMergeIterator_Cast_Float(t *testing.T) { Interval: influxql.Interval{ Duration: 10 * time.Nanosecond, }, - Ascending: true, + Dimensions: []string{"host"}, + Ascending: true, }) if a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil { t.Fatalf("unexpected error: %s", err) @@ -832,26 +841,6 @@ func TestIteratorOptions_SeekTime_Descending(t *testing.T) { } } -func TestIteratorOptions_MergeSorted(t *testing.T) { - opt := influxql.IteratorOptions{} - sorted := opt.MergeSorted() - if !sorted { - t.Error("expected no expression to be sorted, got unsorted") - } - - opt.Expr = &influxql.VarRef{} - sorted = opt.MergeSorted() - if !sorted { - t.Error("expected expression with varref to be sorted, got unsorted") - } - - opt.Expr = &influxql.Call{} - sorted = opt.MergeSorted() - if sorted { - t.Error("expected expression without varref to be unsorted, got sorted") - } -} - func TestIteratorOptions_DerivativeInterval_Default(t *testing.T) { opt := influxql.IteratorOptions{} expected := influxql.Interval{Duration: time.Second} @@ -945,9 +934,6 @@ func TestIteratorOptions_MarshalBinary(t *testing.T) { opt := &influxql.IteratorOptions{ Expr: MustParseExpr("count(value)"), Aux: []influxql.VarRef{{Val: "a"}, {Val: "b"}, {Val: "c"}}, - Sources: []influxql.Source{ - &influxql.Measurement{Database: "db0", RetentionPolicy: "rp0", Name: "mm0"}, - }, Interval: influxql.Interval{ Duration: 1 * time.Hour, Offset: 20 * time.Minute, @@ -981,29 +967,6 @@ func TestIteratorOptions_MarshalBinary(t *testing.T) { } } -// Ensure iterator options with a regex measurement can be marshaled. -func TestIteratorOptions_MarshalBinary_Measurement_Regex(t *testing.T) { - opt := &influxql.IteratorOptions{ - Sources: []influxql.Source{ - &influxql.Measurement{Database: "db1", RetentionPolicy: "rp2", Regex: &influxql.RegexLiteral{Val: regexp.MustCompile(`series.+`)}}, - }, - } - - // Marshal to binary. - buf, err := opt.MarshalBinary() - if err != nil { - t.Fatal(err) - } - - // Unmarshal back to an object. - var other influxql.IteratorOptions - if err := other.UnmarshalBinary(buf); err != nil { - t.Fatal(err) - } else if v := other.Sources[0].(*influxql.Measurement).Regex.Val.String(); v != `series.+` { - t.Fatalf("unexpected measurement regex: %s", v) - } -} - // Ensure iterator can be encoded and decoded over a byte stream. func TestIterator_EncodeDecode(t *testing.T) { var buf bytes.Buffer @@ -1056,21 +1019,31 @@ func TestIterator_EncodeDecode(t *testing.T) { // IteratorCreator is a mockable implementation of SelectStatementExecutor.IteratorCreator. type IteratorCreator struct { - CreateIteratorFn func(opt influxql.IteratorOptions) (influxql.Iterator, error) - FieldDimensionsFn func(sources influxql.Sources) (fields map[string]influxql.DataType, dimensions map[string]struct{}, err error) - ExpandSourcesFn func(sources influxql.Sources) (influxql.Sources, error) + CreateIteratorFn func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) + FieldDimensionsFn func(m *influxql.Measurement) (fields map[string]influxql.DataType, dimensions map[string]struct{}, err error) } -func (ic *IteratorCreator) CreateIterator(opt influxql.IteratorOptions) (influxql.Iterator, error) { - return ic.CreateIteratorFn(opt) +func (ic *IteratorCreator) CreateIterator(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) { + return ic.CreateIteratorFn(m, opt) } -func (ic *IteratorCreator) FieldDimensions(sources influxql.Sources) (fields map[string]influxql.DataType, dimensions map[string]struct{}, err error) { - return ic.FieldDimensionsFn(sources) +func (ic *IteratorCreator) FieldDimensions(m *influxql.Measurement) (fields map[string]influxql.DataType, dimensions map[string]struct{}, err error) { + return ic.FieldDimensionsFn(m) } -func (ic *IteratorCreator) ExpandSources(sources influxql.Sources) (influxql.Sources, error) { - return ic.ExpandSourcesFn(sources) +func (ic *IteratorCreator) MapType(m *influxql.Measurement, field string) influxql.DataType { + f, d, err := ic.FieldDimensions(m) + if err != nil { + return influxql.Unknown + } + + if typ, ok := f[field]; ok { + return typ + } + if _, ok := d[field]; ok { + return influxql.Tag + } + return influxql.Unknown } // Test implementation of influxql.FloatIterator diff --git a/influxql/parser.go b/influxql/parser.go index 82978b3435..3e1c49dfcf 100644 --- a/influxql/parser.go +++ b/influxql/parser.go @@ -928,7 +928,7 @@ func (p *Parser) parseSelectStatement(tr targetRequirement) (*SelectStatement, e if tok, pos, lit := p.scanIgnoreWhitespace(); tok != FROM { return nil, newParseError(tokstr(tok, lit), []string{"FROM"}, pos) } - if stmt.Sources, err = p.parseSources(); err != nil { + if stmt.Sources, err = p.parseSources(true); err != nil { return nil, err } @@ -993,6 +993,7 @@ type targetRequirement int const ( targetRequired targetRequirement = iota targetNotRequired + targetSubquery ) // parseTarget parses a string and returns a Target. @@ -1049,7 +1050,7 @@ func (p *Parser) parseDeleteStatement() (Statement, error) { if tok == FROM { // Parse source. - if stmt.Sources, err = p.parseSources(); err != nil { + if stmt.Sources, err = p.parseSources(false); err != nil { return nil, err } @@ -1107,7 +1108,7 @@ func (p *Parser) parseShowSeriesStatement() (*ShowSeriesStatement, error) { // Parse optional FROM. if tok, _, _ := p.scanIgnoreWhitespace(); tok == FROM { - if stmt.Sources, err = p.parseSources(); err != nil { + if stmt.Sources, err = p.parseSources(false); err != nil { return nil, err } } else { @@ -1166,7 +1167,7 @@ func (p *Parser) parseShowMeasurementsStatement() (*ShowMeasurementsStatement, e switch tok { case EQ, EQREGEX: // Parse required source (measurement name or regex). - if stmt.Source, err = p.parseSource(); err != nil { + if stmt.Source, err = p.parseSource(false); err != nil { return nil, err } default: @@ -1245,7 +1246,7 @@ func (p *Parser) parseShowTagKeysStatement() (*ShowTagKeysStatement, error) { // Parse optional source. if tok, _, _ := p.scanIgnoreWhitespace(); tok == FROM { - if stmt.Sources, err = p.parseSources(); err != nil { + if stmt.Sources, err = p.parseSources(false); err != nil { return nil, err } } else { @@ -1304,7 +1305,7 @@ func (p *Parser) parseShowTagValuesStatement() (*ShowTagValuesStatement, error) // Parse optional source. if tok, _, _ := p.scanIgnoreWhitespace(); tok == FROM { - if stmt.Sources, err = p.parseSources(); err != nil { + if stmt.Sources, err = p.parseSources(false); err != nil { return nil, err } } else { @@ -1420,7 +1421,7 @@ func (p *Parser) parseShowFieldKeysStatement() (*ShowFieldKeysStatement, error) // Parse optional source. if tok, _, _ := p.scanIgnoreWhitespace(); tok == FROM { - if stmt.Sources, err = p.parseSources(); err != nil { + if stmt.Sources, err = p.parseSources(false); err != nil { return nil, err } } else { @@ -1470,7 +1471,7 @@ func (p *Parser) parseDropSeriesStatement() (*DropSeriesStatement, error) { if tok == FROM { // Parse source. - if stmt.Sources, err = p.parseSources(); err != nil { + if stmt.Sources, err = p.parseSources(false); err != nil { return nil, err } @@ -2050,11 +2051,11 @@ func (p *Parser) parseAlias() (string, error) { } // parseSources parses a comma delimited list of sources. -func (p *Parser) parseSources() (Sources, error) { +func (p *Parser) parseSources(subqueries bool) (Sources, error) { var sources Sources for { - s, err := p.parseSource() + s, err := p.parseSource(subqueries) if err != nil { return nil, err } @@ -2079,7 +2080,7 @@ func (p *Parser) peekRune() rune { return r } -func (p *Parser) parseSource() (Source, error) { +func (p *Parser) parseSource(subqueries bool) (Source, error) { m := &Measurement{} // Attempt to parse a regex. @@ -2092,6 +2093,28 @@ func (p *Parser) parseSource() (Source, error) { return m, nil } + // If there is no regular expression, this might be a subquery. + // Parse the subquery if we are in a query that allows them as a source. + if m.Regex == nil && subqueries { + if tok, _, _ := p.scanIgnoreWhitespace(); tok == LPAREN { + if err := p.parseTokens([]Token{SELECT}); err != nil { + return nil, err + } + + stmt, err := p.parseSelectStatement(targetSubquery) + if err != nil { + return nil, err + } + + if err := p.parseTokens([]Token{RPAREN}); err != nil { + return nil, err + } + return &SubQuery{Statement: stmt}, nil + } else { + p.unscan() + } + } + // Didn't find a regex so parse segmented identifiers. idents, err := p.parseSegmentedIdents() if err != nil { diff --git a/influxql/parser_test.go b/influxql/parser_test.go index 954205bacd..d6f82d4141 100644 --- a/influxql/parser_test.go +++ b/influxql/parser_test.go @@ -1103,6 +1103,151 @@ func TestParser_ParseStatement(t *testing.T) { }, }, + // SELECT statement with a subquery + { + s: `SELECT sum(derivative) FROM (SELECT derivative(value) FROM cpu GROUP BY host) WHERE time >= now() - 1d GROUP BY time(1h)`, + stmt: &influxql.SelectStatement{ + Fields: []*influxql.Field{{ + Expr: &influxql.Call{ + Name: "sum", + Args: []influxql.Expr{ + &influxql.VarRef{Val: "derivative"}, + }}, + }}, + Dimensions: []*influxql.Dimension{{ + Expr: &influxql.Call{ + Name: "time", + Args: []influxql.Expr{ + &influxql.DurationLiteral{Val: time.Hour}, + }, + }, + }}, + Sources: []influxql.Source{ + &influxql.SubQuery{ + Statement: &influxql.SelectStatement{ + Fields: []*influxql.Field{{ + Expr: &influxql.Call{ + Name: "derivative", + Args: []influxql.Expr{ + &influxql.VarRef{Val: "value"}, + }, + }, + }}, + Dimensions: []*influxql.Dimension{{ + Expr: &influxql.VarRef{Val: "host"}, + }}, + Sources: []influxql.Source{ + &influxql.Measurement{Name: "cpu"}, + }, + }, + }, + }, + Condition: &influxql.BinaryExpr{ + Op: influxql.GTE, + LHS: &influxql.VarRef{Val: "time"}, + RHS: &influxql.BinaryExpr{ + Op: influxql.SUB, + LHS: &influxql.Call{Name: "now"}, + RHS: &influxql.DurationLiteral{Val: 24 * time.Hour}, + }, + }, + }, + }, + + { + s: `SELECT sum(mean) FROM (SELECT mean(value) FROM cpu GROUP BY time(1h)) WHERE time >= now() - 1d`, + stmt: &influxql.SelectStatement{ + Fields: []*influxql.Field{{ + Expr: &influxql.Call{ + Name: "sum", + Args: []influxql.Expr{ + &influxql.VarRef{Val: "mean"}, + }}, + }}, + Sources: []influxql.Source{ + &influxql.SubQuery{ + Statement: &influxql.SelectStatement{ + Fields: []*influxql.Field{{ + Expr: &influxql.Call{ + Name: "mean", + Args: []influxql.Expr{ + &influxql.VarRef{Val: "value"}, + }, + }, + }}, + Dimensions: []*influxql.Dimension{{ + Expr: &influxql.Call{ + Name: "time", + Args: []influxql.Expr{ + &influxql.DurationLiteral{Val: time.Hour}, + }, + }, + }}, + Sources: []influxql.Source{ + &influxql.Measurement{Name: "cpu"}, + }, + }, + }, + }, + Condition: &influxql.BinaryExpr{ + Op: influxql.GTE, + LHS: &influxql.VarRef{Val: "time"}, + RHS: &influxql.BinaryExpr{ + Op: influxql.SUB, + LHS: &influxql.Call{Name: "now"}, + RHS: &influxql.DurationLiteral{Val: 24 * time.Hour}, + }, + }, + }, + }, + + { + s: `SELECT sum(mean) FROM (SELECT mean(value) FROM cpu WHERE time >= now() - 1d GROUP BY time(1h))`, + stmt: &influxql.SelectStatement{ + Fields: []*influxql.Field{{ + Expr: &influxql.Call{ + Name: "sum", + Args: []influxql.Expr{ + &influxql.VarRef{Val: "mean"}, + }}, + }}, + Sources: []influxql.Source{ + &influxql.SubQuery{ + Statement: &influxql.SelectStatement{ + Fields: []*influxql.Field{{ + Expr: &influxql.Call{ + Name: "mean", + Args: []influxql.Expr{ + &influxql.VarRef{Val: "value"}, + }, + }, + }}, + Dimensions: []*influxql.Dimension{{ + Expr: &influxql.Call{ + Name: "time", + Args: []influxql.Expr{ + &influxql.DurationLiteral{Val: time.Hour}, + }, + }, + }}, + Condition: &influxql.BinaryExpr{ + Op: influxql.GTE, + LHS: &influxql.VarRef{Val: "time"}, + RHS: &influxql.BinaryExpr{ + Op: influxql.SUB, + LHS: &influxql.Call{Name: "now"}, + RHS: &influxql.DurationLiteral{Val: 24 * time.Hour}, + }, + }, + Sources: []influxql.Source{ + &influxql.Measurement{Name: "cpu"}, + }, + }, + }, + }, + }, + }, + // See issues https://github.com/influxdata/influxdb/issues/1647 // and https://github.com/influxdata/influxdb/issues/4404 // DELETE statement @@ -2342,6 +2487,7 @@ func TestParser_ParseStatement(t *testing.T) { {s: `SELECT (count(foo + sum(bar))) FROM cpu`, err: `expected field argument in count()`}, {s: `SELECT sum(value) + count(foo + sum(bar)) FROM cpu`, err: `binary expressions cannot mix aggregates and raw fields`}, {s: `SELECT mean(value) FROM cpu FILL + value`, err: `fill must be a function call`}, + {s: `SELECT sum(mean) FROM (SELECT mean(value) FROM cpu GROUP BY time(1h))`, err: `aggregate functions with GROUP BY time require a WHERE time clause`}, // See issues https://github.com/influxdata/influxdb/issues/1647 // and https://github.com/influxdata/influxdb/issues/4404 //{s: `DELETE`, err: `found EOF, expected FROM at line 1, char 8`}, @@ -2520,6 +2666,12 @@ func TestParser_ParseStatement(t *testing.T) { // We are memoizing a field so for testing we need to... if s, ok := tt.stmt.(*influxql.SelectStatement); ok { s.GroupByInterval() + for _, source := range s.Sources { + switch source := source.(type) { + case *influxql.SubQuery: + source.Statement.GroupByInterval() + } + } } else if st, ok := stmt.(*influxql.CreateContinuousQueryStatement); ok { // if it's a CQ, there is a non-exported field that gets memoized during parsing that needs to be set if st != nil && st.Source != nil { tt.stmt.(*influxql.CreateContinuousQueryStatement).Source.GroupByInterval() diff --git a/influxql/point.gen.go b/influxql/point.gen.go index 71c3e3ae41..605a6b3264 100644 --- a/influxql/point.gen.go +++ b/influxql/point.gen.go @@ -58,6 +58,15 @@ func (v *FloatPoint) Clone() *FloatPoint { return &other } +// CopyTo makes a deep copy into the point. +func (v *FloatPoint) CopyTo(other *FloatPoint) { + *other = *v + if v.Aux != nil { + other.Aux = make([]interface{}, len(v.Aux)) + copy(other.Aux, v.Aux) + } +} + func encodeFloatPoint(p *FloatPoint) *internal.Point { return &internal.Point{ Name: proto.String(p.Name), @@ -260,6 +269,15 @@ func (v *IntegerPoint) Clone() *IntegerPoint { return &other } +// CopyTo makes a deep copy into the point. +func (v *IntegerPoint) CopyTo(other *IntegerPoint) { + *other = *v + if v.Aux != nil { + other.Aux = make([]interface{}, len(v.Aux)) + copy(other.Aux, v.Aux) + } +} + func encodeIntegerPoint(p *IntegerPoint) *internal.Point { return &internal.Point{ Name: proto.String(p.Name), @@ -462,6 +480,15 @@ func (v *StringPoint) Clone() *StringPoint { return &other } +// CopyTo makes a deep copy into the point. +func (v *StringPoint) CopyTo(other *StringPoint) { + *other = *v + if v.Aux != nil { + other.Aux = make([]interface{}, len(v.Aux)) + copy(other.Aux, v.Aux) + } +} + func encodeStringPoint(p *StringPoint) *internal.Point { return &internal.Point{ Name: proto.String(p.Name), @@ -664,6 +691,15 @@ func (v *BooleanPoint) Clone() *BooleanPoint { return &other } +// CopyTo makes a deep copy into the point. +func (v *BooleanPoint) CopyTo(other *BooleanPoint) { + *other = *v + if v.Aux != nil { + other.Aux = make([]interface{}, len(v.Aux)) + copy(other.Aux, v.Aux) + } +} + func encodeBooleanPoint(p *BooleanPoint) *internal.Point { return &internal.Point{ Name: proto.String(p.Name), diff --git a/influxql/point.gen.go.tmpl b/influxql/point.gen.go.tmpl index 4e16441b9e..71791d35be 100644 --- a/influxql/point.gen.go.tmpl +++ b/influxql/point.gen.go.tmpl @@ -54,6 +54,15 @@ func (v *{{.Name}}Point) Clone() *{{.Name}}Point { return &other } +// CopyTo makes a deep copy into the point. +func (v *{{.Name}}Point) CopyTo(other *{{.Name}}Point) { + *other = *v + if v.Aux != nil { + other.Aux = make([]interface{}, len(v.Aux)) + copy(other.Aux, v.Aux) + } +} + func encode{{.Name}}Point(p *{{.Name}}Point) *internal.Point { return &internal.Point{ Name: proto.String(p.Name), diff --git a/influxql/point.go b/influxql/point.go index 9eba4e482f..d1bd292d16 100644 --- a/influxql/point.go +++ b/influxql/point.go @@ -283,6 +283,15 @@ func decodeAux(pb []*internal.Aux) []interface{} { return aux } +func cloneAux(src []interface{}) []interface{} { + if src == nil { + return src + } + dest := make([]interface{}, len(src)) + copy(dest, src) + return dest +} + // PointDecoder decodes generic points from a reader. type PointDecoder struct { r io.Reader diff --git a/influxql/select.go b/influxql/select.go index 49e8b689c7..a0a0c9be8a 100644 --- a/influxql/select.go +++ b/influxql/select.go @@ -37,7 +37,10 @@ func Select(stmt *SelectStatement, ic IteratorCreator, sopt *SelectOptions) ([]I if err != nil { return nil, err } + return buildIterators(stmt, ic, opt) +} +func buildIterators(stmt *SelectStatement, ic IteratorCreator, opt IteratorOptions) ([]Iterator, error) { // Retrieve refs for each call and var ref. info := newSelectInfo(stmt) if len(info.calls) > 1 && len(info.refs) > 0 { @@ -53,7 +56,7 @@ func Select(stmt *SelectStatement, ic IteratorCreator, sopt *SelectOptions) ([]I // If there are multiple auxilary fields and no calls then construct an aux iterator. if len(info.calls) == 0 && len(info.refs) > 0 { - return buildAuxIterators(stmt.Fields, ic, opt) + return buildAuxIterators(stmt.Fields, ic, stmt.Sources, opt) } // Include auxiliary fields from top() and bottom() @@ -89,21 +92,135 @@ func Select(stmt *SelectStatement, ic IteratorCreator, sopt *SelectOptions) ([]I selector := false if len(info.calls) == 1 { for call := range info.calls { - switch call.Name { - case "first", "last", "min", "max", "percentile": - selector = true - } + selector = IsSelector(call) } } - return buildFieldIterators(fields, ic, opt, selector) + return buildFieldIterators(fields, ic, stmt.Sources, opt, selector) } // buildAuxIterators creates a set of iterators from a single combined auxiliary iterator. -func buildAuxIterators(fields Fields, ic IteratorCreator, opt IteratorOptions) ([]Iterator, error) { - // Create iterator to read auxiliary fields. - input, err := ic.CreateIterator(opt) +func buildAuxIterators(fields Fields, ic IteratorCreator, sources Sources, opt IteratorOptions) ([]Iterator, error) { + // Create the auxiliary iterators for each source. + inputs := make([]Iterator, 0, len(sources)) + if err := func() error { + for _, source := range sources { + switch source := source.(type) { + case *Measurement: + input, err := ic.CreateIterator(source, opt) + if err != nil { + return err + } + inputs = append(inputs, input) + case *SubQuery: + fields := make([]*Field, 0, len(opt.Aux)) + indexes := make([]int, len(opt.Aux)) + offset := 0 + for i, name := range opt.Aux { + // Search through the fields to find one that matches this auxiliary field. + var match *Field + FIELDS: + for _, f := range source.Statement.Fields { + if f.Name() == name.Val { + match = f + break + } else if call, ok := f.Expr.(*Call); ok && (call.Name == "top" || call.Name == "bottom") { + // We may match one of the arguments in "top" or "bottom". + if len(call.Args) > 2 { + for j, arg := range call.Args[1 : len(call.Args)-1] { + if arg, ok := arg.(*VarRef); ok && arg.Val == name.Val { + match = f + // Increment the offset since we are looking for the tag + // associated with this value rather than the value itself. + offset += j + 1 + break FIELDS + } + } + } + } + } + + // Look within the dimensions and create a field if we find it. + if match == nil { + for _, d := range source.Statement.Dimensions { + if d, ok := d.Expr.(*VarRef); ok && name.Val == d.Val { + match = &Field{ + Expr: &VarRef{ + Val: d.Val, + Type: Tag, + }, + } + break + } + } + } + + // There is no field that matches this name so signal this + // should be a nil iterator. + if match == nil { + match = &Field{Expr: (*nilLiteral)(nil)} + } + fields = append(fields, match) + indexes[i] = i + offset + } + + // Check if we need any selectors within the selected fields. + // If we have an expression that relies on the selector, we + // need to include that even if it isn't referenced directly. + var selector *Field + for _, f := range source.Statement.Fields { + if IsSelector(f.Expr) { + selector = f + break + } + } + + // There is a selector in the inner query. Now check if we have that selector + // in the constructed fields list. + if selector != nil { + hasSelector := false + for _, f := range fields { + if _, ok := f.Expr.(*Call); ok { + hasSelector = true + break + } + } + + if !hasSelector { + // Append the selector to the statement fields. + fields = append(fields, selector) + } + } + + // Clone the statement and replace the fields with our custom ordering. + stmt := source.Statement.Clone() + stmt.Fields = fields + + subOpt, err := newIteratorOptionsSubstatement(stmt, opt) + if err != nil { + return err + } + + itrs, err := buildIterators(stmt, ic, subOpt) + if err != nil { + return err + } + + // Construct the iterators for the subquery. + input := NewIteratorMapper(itrs, indexes, opt) + inputs = append(inputs, input) + } + } + return nil + }(); err != nil { + Iterators(inputs).Close() + return nil, err + } + + // Merge iterators to read auxilary fields. + input, err := Iterators(inputs).Merge(opt) if err != nil { + Iterators(inputs).Close() return nil, err } else if input == nil { input = &nilFloatIterator{} @@ -128,7 +245,7 @@ func buildAuxIterators(fields Fields, ic IteratorCreator, opt IteratorOptions) ( input = NewLimitIterator(input, opt) } - // Wrap in an auxilary iterator to separate the fields. + // Wrap in an auxiliary iterator to separate the fields. aitr := NewAuxIterator(input, opt) // Generate iterators for each field. @@ -136,18 +253,11 @@ func buildAuxIterators(fields Fields, ic IteratorCreator, opt IteratorOptions) ( if err := func() error { for i, f := range fields { expr := Reduce(f.Expr, nil) - switch expr := expr.(type) { - case *VarRef: - itrs[i] = aitr.Iterator(expr.Val, expr.Type) - case *BinaryExpr: - itr, err := buildExprIterator(expr, aitr, opt, false) - if err != nil { - return fmt.Errorf("error constructing iterator for field '%s': %s", f.String(), err) - } - itrs[i] = itr - default: - return fmt.Errorf("invalid expression type: %T", expr) + itr, err := buildAuxIterator(expr, aitr, opt) + if err != nil { + return err } + itrs[i] = itr } return nil }(); err != nil { @@ -162,8 +272,54 @@ func buildAuxIterators(fields Fields, ic IteratorCreator, opt IteratorOptions) ( return itrs, nil } +// buildAuxIterator constructs an Iterator for an expression from an AuxIterator. +func buildAuxIterator(expr Expr, aitr AuxIterator, opt IteratorOptions) (Iterator, error) { + switch expr := expr.(type) { + case *VarRef: + return aitr.Iterator(expr.Val, expr.Type), nil + case *BinaryExpr: + if rhs, ok := expr.RHS.(Literal); ok { + // The right hand side is a literal. It is more common to have the RHS be a literal, + // so we check that one first and have this be the happy path. + if lhs, ok := expr.LHS.(Literal); ok { + // We have two literals that couldn't be combined by Reduce. + return nil, fmt.Errorf("unable to construct an iterator from two literals: LHS: %T, RHS: %T", lhs, rhs) + } + + lhs, err := buildAuxIterator(expr.LHS, aitr, opt) + if err != nil { + return nil, err + } + return buildRHSTransformIterator(lhs, rhs, expr.Op, opt) + } else if lhs, ok := expr.LHS.(Literal); ok { + rhs, err := buildAuxIterator(expr.RHS, aitr, opt) + if err != nil { + return nil, err + } + return buildLHSTransformIterator(lhs, rhs, expr.Op, opt) + } else { + // We have two iterators. Combine them into a single iterator. + lhs, err := buildAuxIterator(expr.LHS, aitr, opt) + if err != nil { + return nil, err + } + rhs, err := buildAuxIterator(expr.RHS, aitr, opt) + if err != nil { + return nil, err + } + return buildTransformIterator(lhs, rhs, expr.Op, opt) + } + case *ParenExpr: + return buildAuxIterator(expr.Expr, aitr, opt) + case *nilLiteral: + return &nilFloatIterator{}, nil + default: + return nil, fmt.Errorf("invalid expression type: %T", expr) + } +} + // buildFieldIterators creates an iterator for each field expression. -func buildFieldIterators(fields Fields, ic IteratorCreator, opt IteratorOptions, selector bool) ([]Iterator, error) { +func buildFieldIterators(fields Fields, ic IteratorCreator, sources Sources, opt IteratorOptions, selector bool) ([]Iterator, error) { // Create iterators from fields against the iterator creator. itrs := make([]Iterator, len(fields)) @@ -181,9 +337,11 @@ func buildFieldIterators(fields Fields, ic IteratorCreator, opt IteratorOptions, } expr := Reduce(f.Expr, nil) - itr, err := buildExprIterator(expr, ic, opt, selector) + itr, err := buildExprIterator(expr, ic, sources, opt, selector) if err != nil { return err + } else if itr == nil { + itr = &nilFloatIterator{} } itrs[i] = itr input = itr @@ -203,9 +361,11 @@ func buildFieldIterators(fields Fields, ic IteratorCreator, opt IteratorOptions, } expr := Reduce(f.Expr, nil) - itr, err := buildExprIterator(expr, aitr, opt, false) + itr, err := buildAuxIterator(expr, aitr, opt) if err != nil { return err + } else if itr == nil { + itr = &nilFloatIterator{} } itrs[i] = itr } @@ -228,277 +388,574 @@ func buildFieldIterators(fields Fields, ic IteratorCreator, opt IteratorOptions, } // buildExprIterator creates an iterator for an expression. -func buildExprIterator(expr Expr, ic IteratorCreator, opt IteratorOptions, selector bool) (Iterator, error) { +func buildExprIterator(expr Expr, ic IteratorCreator, sources Sources, opt IteratorOptions, selector bool) (Iterator, error) { opt.Expr = expr + b := exprIteratorBuilder{ + ic: ic, + sources: sources, + opt: opt, + selector: selector, + } switch expr := expr.(type) { case *VarRef: - itr, err := ic.CreateIterator(opt) - if err != nil { - return nil, err - } else if itr == nil { - itr = &nilFloatIterator{} - } - return itr, nil + return b.buildVarRefIterator(expr) case *Call: - // FIXME(benbjohnson): Validate that only calls with 1 arg are passed to IC. - - switch expr.Name { - case "distinct": - input, err := buildExprIterator(expr.Args[0].(*VarRef), ic, opt, selector) - if err != nil { - return nil, err - } - input, err = NewDistinctIterator(input, opt) - if err != nil { - return nil, err - } - return NewIntervalIterator(input, opt), nil - case "sample": - input, err := buildExprIterator(expr.Args[0], ic, opt, selector) - if err != nil { - return nil, err - } - size := expr.Args[1].(*IntegerLiteral) - - return newSampleIterator(input, opt, int(size.Val)) - case "holt_winters", "holt_winters_with_fit": - input, err := buildExprIterator(expr.Args[0], ic, opt, selector) - if err != nil { - return nil, err - } - h := expr.Args[1].(*IntegerLiteral) - m := expr.Args[2].(*IntegerLiteral) - - includeFitData := "holt_winters_with_fit" == expr.Name - - interval := opt.Interval.Duration - // Redifine interval to be unbounded to capture all aggregate results - opt.StartTime = MinTime - opt.EndTime = MaxTime - opt.Interval = Interval{} - - return newHoltWintersIterator(input, opt, int(h.Val), int(m.Val), includeFitData, interval) - case "derivative", "non_negative_derivative", "difference", "moving_average", "elapsed": - if !opt.Interval.IsZero() { - if opt.Ascending { - opt.StartTime -= int64(opt.Interval.Duration) - } else { - opt.EndTime += int64(opt.Interval.Duration) - } - } - - input, err := buildExprIterator(expr.Args[0], ic, opt, selector) - if err != nil { - return nil, err - } - - switch expr.Name { - case "derivative", "non_negative_derivative": - interval := opt.DerivativeInterval() - isNonNegative := (expr.Name == "non_negative_derivative") - return newDerivativeIterator(input, opt, interval, isNonNegative) - case "elapsed": - interval := opt.ElapsedInterval() - return newElapsedIterator(input, opt, interval) - case "difference": - return newDifferenceIterator(input, opt) - case "moving_average": - n := expr.Args[1].(*IntegerLiteral) - if n.Val > 1 && !opt.Interval.IsZero() { - if opt.Ascending { - opt.StartTime -= int64(opt.Interval.Duration) * (n.Val - 1) - } else { - opt.EndTime += int64(opt.Interval.Duration) * (n.Val - 1) - } - } - return newMovingAverageIterator(input, int(n.Val), opt) - } - panic(fmt.Sprintf("invalid series aggregate function: %s", expr.Name)) - case "cumulative_sum": - input, err := buildExprIterator(expr.Args[0], ic, opt, selector) - if err != nil { - return nil, err - } - return newCumulativeSumIterator(input, opt) - default: - itr, err := func() (Iterator, error) { - switch expr.Name { - case "count": - switch arg := expr.Args[0].(type) { - case *Call: - if arg.Name == "distinct" { - input, err := buildExprIterator(arg, ic, opt, selector) - if err != nil { - return nil, err - } - return newCountIterator(input, opt) - } - } - - itr, err := ic.CreateIterator(opt) - if err != nil { - return nil, err - } else if itr == nil { - itr = &nilFloatIterator{} - } - return itr, nil - case "min", "max", "sum", "first", "last", "mean": - itr, err := ic.CreateIterator(opt) - if err != nil { - return nil, err - } else if itr == nil { - itr = &nilFloatIterator{} - } - return itr, nil - case "median": - input, err := buildExprIterator(expr.Args[0].(*VarRef), ic, opt, false) - if err != nil { - return nil, err - } - return newMedianIterator(input, opt) - case "mode": - input, err := buildExprIterator(expr.Args[0].(*VarRef), ic, opt, false) - if err != nil { - return nil, err - } - return NewModeIterator(input, opt) - case "stddev": - input, err := buildExprIterator(expr.Args[0].(*VarRef), ic, opt, false) - if err != nil { - return nil, err - } - return newStddevIterator(input, opt) - case "spread": - // OPTIMIZE(benbjohnson): convert to map/reduce - input, err := buildExprIterator(expr.Args[0].(*VarRef), ic, opt, false) - if err != nil { - return nil, err - } - return newSpreadIterator(input, opt) - case "top": - var tags []int - if len(expr.Args) < 2 { - return nil, fmt.Errorf("top() requires 2 or more arguments, got %d", len(expr.Args)) - } else if len(expr.Args) > 2 { - // We need to find the indices of where the tag values are stored in Aux - // This section is O(n^2), but for what should be a low value. - for i := 1; i < len(expr.Args)-1; i++ { - ref := expr.Args[i].(*VarRef) - for index, aux := range opt.Aux { - if aux.Val == ref.Val { - tags = append(tags, index) - break - } - } - } - } - - input, err := buildExprIterator(expr.Args[0].(*VarRef), ic, opt, false) - if err != nil { - return nil, err - } - n := expr.Args[len(expr.Args)-1].(*IntegerLiteral) - return newTopIterator(input, opt, n, tags) - case "bottom": - var tags []int - if len(expr.Args) < 2 { - return nil, fmt.Errorf("bottom() requires 2 or more arguments, got %d", len(expr.Args)) - } else if len(expr.Args) > 2 { - // We need to find the indices of where the tag values are stored in Aux - // This section is O(n^2), but for what should be a low value. - for i := 1; i < len(expr.Args)-1; i++ { - ref := expr.Args[i].(*VarRef) - for index, aux := range opt.Aux { - if aux.Val == ref.Val { - tags = append(tags, index) - break - } - } - } - } - - input, err := buildExprIterator(expr.Args[0].(*VarRef), ic, opt, false) - if err != nil { - return nil, err - } - n := expr.Args[len(expr.Args)-1].(*IntegerLiteral) - return newBottomIterator(input, opt, n, tags) - case "percentile": - input, err := buildExprIterator(expr.Args[0].(*VarRef), ic, opt, false) - if err != nil { - return nil, err - } - var percentile float64 - switch arg := expr.Args[1].(type) { - case *NumberLiteral: - percentile = arg.Val - case *IntegerLiteral: - percentile = float64(arg.Val) - } - return newPercentileIterator(input, opt, percentile) - default: - return nil, fmt.Errorf("unsupported call: %s", expr.Name) - } - }() - - if err != nil { - return nil, err - } - - if !selector || !opt.Interval.IsZero() { - if expr.Name != "top" && expr.Name != "bottom" { - itr = NewIntervalIterator(itr, opt) - } - if !opt.Interval.IsZero() && opt.Fill != NoFill { - itr = NewFillIterator(itr, expr, opt) - } - } - if opt.InterruptCh != nil { - itr = NewInterruptIterator(itr, opt.InterruptCh) - } - return itr, nil - } + return b.buildCallIterator(expr) case *BinaryExpr: - if rhs, ok := expr.RHS.(Literal); ok { - // The right hand side is a literal. It is more common to have the RHS be a literal, - // so we check that one first and have this be the happy path. - if lhs, ok := expr.LHS.(Literal); ok { - // We have two literals that couldn't be combined by Reduce. - return nil, fmt.Errorf("unable to construct an iterator from two literals: LHS: %T, RHS: %T", lhs, rhs) - } - - lhs, err := buildExprIterator(expr.LHS, ic, opt, false) - if err != nil { - return nil, err - } - return buildRHSTransformIterator(lhs, rhs, expr.Op, ic, opt) - } else if lhs, ok := expr.LHS.(Literal); ok { - rhs, err := buildExprIterator(expr.RHS, ic, opt, false) - if err != nil { - return nil, err - } - return buildLHSTransformIterator(lhs, rhs, expr.Op, ic, opt) - } else { - // We have two iterators. Combine them into a single iterator. - lhs, err := buildExprIterator(expr.LHS, ic, opt, false) - if err != nil { - return nil, err - } - rhs, err := buildExprIterator(expr.RHS, ic, opt, false) - if err != nil { - return nil, err - } - return buildTransformIterator(lhs, rhs, expr.Op, ic, opt) - } + return b.buildBinaryExprIterator(expr) case *ParenExpr: - return buildExprIterator(expr.Expr, ic, opt, selector) + return buildExprIterator(expr.Expr, ic, sources, opt, selector) + case *nilLiteral: + return &nilFloatIterator{}, nil default: return nil, fmt.Errorf("invalid expression type: %T", expr) } } -func buildRHSTransformIterator(lhs Iterator, rhs Literal, op Token, ic IteratorCreator, opt IteratorOptions) (Iterator, error) { +type exprIteratorBuilder struct { + ic IteratorCreator + sources Sources + opt IteratorOptions + selector bool +} + +func (b *exprIteratorBuilder) buildVarRefIterator(expr *VarRef) (Iterator, error) { + inputs := make([]Iterator, 0, len(b.sources)) + if err := func() error { + for _, source := range b.sources { + switch source := source.(type) { + case *Measurement: + input, err := b.ic.CreateIterator(source, b.opt) + if err != nil { + return err + } + inputs = append(inputs, input) + case *SubQuery: + info := newSelectInfo(source.Statement) + if len(info.calls) > 1 && len(info.refs) > 0 { + return errors.New("cannot select fields when selecting multiple aggregates from subquery") + } + + if input, err := func() (Iterator, error) { + // Look for the field that matches this name. + i, e := source.Statement.FieldExprByName(expr.Val) + if e == nil { + return nil, nil + } + f := source.Statement.Fields[i] + + // Retrieve the select info for the substatement. + info := newSelectInfo(source.Statement) + if len(info.calls) == 0 && len(info.refs) > 0 { + // There are no aggregates in the subquery, so + // it is just a raw query. Match the auxiliary + // fields to the other fields and pass as-is. + subOpt, err := newIteratorOptionsSubstatement(source.Statement, b.opt) + if err != nil { + return nil, err + } + + subOpt.Aux = make([]VarRef, len(b.opt.Aux)) + for i, ref := range b.opt.Aux { + if ref.Type != Tag { + for _, f := range source.Statement.Fields { + if f.Name() == ref.Val { + subOpt.Aux[i] = *(e.(*VarRef)) + break + } + } + } + + // Look in the dimensions. + if subOpt.Aux[i].Val == "" && (ref.Type == Unknown || ref.Type == Tag) { + for _, d := range source.Statement.Dimensions { + if d, ok := d.Expr.(*VarRef); ok && ref.Val == d.Val { + subOpt.Aux[i] = VarRef{ + Val: d.Val, + Type: Tag, + } + break + } + } + } + } + return buildExprIterator(e, b.ic, source.Statement.Sources, subOpt, false) + } + + switch e := e.(type) { + case *VarRef: + // If the field we selected is a variable + // reference, then we need to find the associated + // selector (and ensure it is actually a selector) + // and build the iterator off of that. + selector := info.FindSelector() + if selector == nil { + return nil, nil + } + + subOpt, err := newIteratorOptionsSubstatement(source.Statement, b.opt) + if err != nil { + return nil, err + } + + // If we have top() or bottom(), we need to + // fill the aux fields with what is in the + // function even if we aren't using the result. + if call, ok := f.Expr.(*Call); ok && (call.Name == "top" || call.Name == "bottom") { + // Prepare the auxiliary fields for this call. + subOpt.Aux = make([]VarRef, 0, len(call.Args)-1) + + // Look for the auxiliary field inside of the call. + // If we can't find it, then add it to the end. + hasVarRef := false + for _, arg := range call.Args[1 : len(call.Args)-1] { + if arg, ok := arg.(*VarRef); ok { + subOpt.Aux = append(subOpt.Aux, *arg) + if arg.Val == e.Val { + hasVarRef = true + } + } + } + + // We need to attach the actual auxiliary field we're looking + // for if it wasn't in the argument list. + // This is for SELECT top(value, 1), host. + if !hasVarRef { + subOpt.Aux = append(subOpt.Aux, *e) + } + } else { + subOpt.Aux = []VarRef{*e} + } + + // Construct the selector iterator. + input, err := buildExprIterator(selector, b.ic, source.Statement.Sources, subOpt, true) + if err != nil { + return nil, err + } + + // Create an auxiliary iterator. + aitr := NewAuxIterator(input, subOpt) + itr := aitr.Iterator(e.Val, e.Type) + aitr.Background() + return itr, nil + case *Call: + subOpt, err := newIteratorOptionsSubstatement(source.Statement, b.opt) + if err != nil { + return nil, err + } + + if len(b.opt.Aux) > 0 { + subOpt.Aux = make([]VarRef, len(b.opt.Aux)) + for i, ref := range b.opt.Aux { + _, expr := source.Statement.FieldExprByName(ref.Val) + if expr != nil { + v, ok := expr.(*VarRef) + if ok { + subOpt.Aux[i] = *v + continue + } + } + + if ref.Type == Unknown || ref.Type == Tag { + for _, d := range source.Statement.Dimensions { + if d, ok := d.Expr.(*VarRef); ok && ref.Val == d.Val { + subOpt.Aux[i] = VarRef{ + Val: d.Val, + Type: Tag, + } + break + } + } + } + } + } + + // Check if this is a selector or not and + // create the iterator directly. + selector := len(info.calls) == 1 && IsSelector(e) + return buildExprIterator(e, b.ic, source.Statement.Sources, subOpt, selector) + case *BinaryExpr: + // Retrieve the calls and references for this binary expression. + // There should be no mixing of calls and refs. + i := selectInfo{ + calls: make(map[*Call]struct{}), + refs: make(map[*VarRef]struct{}), + } + Walk(&i, e) + + opt, err := newIteratorOptionsSubstatement(source.Statement, b.opt) + if err != nil { + return nil, err + } + + if len(i.refs) > 0 { + if len(b.opt.Aux) > 0 { + // Catch this so we don't cause a panic. This + // is too difficult to implement now though. + // TODO(jsternberg): Implement this. + return nil, errors.New("unsupported") + } + + selector := info.FindSelector() + if selector == nil { + return nil, nil + } + + // Prepare the auxiliary iterators with the refs we care about. + opt.Aux = make([]VarRef, 0, len(i.refs)) + for ref := range i.refs { + opt.Aux = append(opt.Aux, *ref) + } + + input, err := buildExprIterator(selector, b.ic, source.Statement.Sources, opt, true) + if err != nil { + return nil, err + } + + aitr := NewAuxIterator(input, opt) + itr, err := buildAuxIterator(e, aitr, opt) + if err != nil { + aitr.Close() + return nil, err + } + aitr.Background() + return itr, nil + } + + // Determine if this expression is a selector or not. + selector := len(i.calls) == 1 && len(info.calls) == 1 + // Prepare the auxiliary fields we need. + if len(b.opt.Aux) > 0 { + opt.Aux = make([]VarRef, len(b.opt.Aux)) + for i, ref := range b.opt.Aux { + _, expr := source.Statement.FieldExprByName(ref.Val) + if v, ok := expr.(*VarRef); ok { + opt.Aux[i] = *v + } + } + } + + // Build the iterator using the options we created. + return buildExprIterator(e, b.ic, source.Statement.Sources, opt, selector) + default: + panic(fmt.Sprintf("unsupported use of %T in a subquery", e)) + } + }(); err != nil { + return err + } else if input != nil { + inputs = append(inputs, input) + } + } + } + return nil + }(); err != nil { + Iterators(inputs).Close() + return nil, err + } + + // Variable references in this section will always go into some call + // iterator. Combine it with a merge iterator. + itr := NewMergeIterator(inputs, b.opt) + if itr == nil { + itr = &nilFloatIterator{} + } + + if b.opt.InterruptCh != nil { + itr = NewInterruptIterator(itr, b.opt.InterruptCh) + } + return itr, nil +} + +func (b *exprIteratorBuilder) buildCallIterator(expr *Call) (Iterator, error) { + // TODO(jsternberg): Refactor this. This section needs to die in a fire. + switch expr.Name { + case "distinct": + input, err := buildExprIterator(expr.Args[0].(*VarRef), b.ic, b.sources, b.opt, b.selector) + if err != nil { + return nil, err + } + input, err = NewDistinctIterator(input, b.opt) + if err != nil { + return nil, err + } + return NewIntervalIterator(input, b.opt), nil + case "sample": + input, err := buildExprIterator(expr.Args[0], b.ic, b.sources, b.opt, b.selector) + if err != nil { + return nil, err + } + size := expr.Args[1].(*IntegerLiteral) + + return newSampleIterator(input, b.opt, int(size.Val)) + case "holt_winters", "holt_winters_with_fit": + input, err := buildExprIterator(expr.Args[0], b.ic, b.sources, b.opt, b.selector) + if err != nil { + return nil, err + } + h := expr.Args[1].(*IntegerLiteral) + m := expr.Args[2].(*IntegerLiteral) + + includeFitData := "holt_winters_with_fit" == expr.Name + + interval := b.opt.Interval.Duration + // Redefine interval to be unbounded to capture all aggregate results + opt := b.opt + opt.StartTime = MinTime + opt.EndTime = MaxTime + opt.Interval = Interval{} + + return newHoltWintersIterator(input, opt, int(h.Val), int(m.Val), includeFitData, interval) + case "derivative", "non_negative_derivative", "difference", "moving_average", "elapsed": + opt := b.opt + if !opt.Interval.IsZero() { + if opt.Ascending { + opt.StartTime -= int64(opt.Interval.Duration) + } else { + opt.EndTime += int64(opt.Interval.Duration) + } + } + + input, err := buildExprIterator(expr.Args[0], b.ic, b.sources, opt, b.selector) + if err != nil { + return nil, err + } + + switch expr.Name { + case "derivative", "non_negative_derivative": + interval := opt.DerivativeInterval() + isNonNegative := (expr.Name == "non_negative_derivative") + return newDerivativeIterator(input, opt, interval, isNonNegative) + case "elapsed": + interval := opt.ElapsedInterval() + return newElapsedIterator(input, opt, interval) + case "difference": + return newDifferenceIterator(input, opt) + case "moving_average": + n := expr.Args[1].(*IntegerLiteral) + if n.Val > 1 && !b.opt.Interval.IsZero() { + if opt.Ascending { + opt.StartTime -= int64(opt.Interval.Duration) * (n.Val - 1) + } else { + opt.EndTime += int64(opt.Interval.Duration) * (n.Val - 1) + } + } + return newMovingAverageIterator(input, int(n.Val), opt) + } + panic(fmt.Sprintf("invalid series aggregate function: %s", expr.Name)) + case "cumulative_sum": + input, err := buildExprIterator(expr.Args[0], b.ic, b.sources, b.opt, b.selector) + if err != nil { + return nil, err + } + return newCumulativeSumIterator(input, b.opt) + } + + itr, err := func() (Iterator, error) { + switch expr.Name { + case "count": + switch arg0 := expr.Args[0].(type) { + case *Call: + if arg0.Name == "distinct" { + input, err := buildExprIterator(arg0, b.ic, b.sources, b.opt, b.selector) + if err != nil { + return nil, err + } + return newCountIterator(input, b.opt) + } + } + fallthrough + case "min", "max", "sum", "first", "last", "mean": + inputs := make([]Iterator, 0, len(b.sources)) + if err := func() error { + for _, source := range b.sources { + switch source := source.(type) { + case *Measurement: + input, err := b.ic.CreateIterator(source, b.opt) + if err != nil { + return err + } + inputs = append(inputs, input) + case *SubQuery: + // Identify the name of the field we are using. + arg0 := expr.Args[0].(*VarRef) + + input, err := buildExprIterator(arg0, b.ic, []Source{source}, b.opt, b.selector) + if err != nil { + return err + } + + // Wrap the result in a call iterator. + i, err := NewCallIterator(input, b.opt) + if err != nil { + input.Close() + return err + } + inputs = append(inputs, i) + } + } + return nil + }(); err != nil { + Iterators(inputs).Close() + return nil, err + } + + itr, err := Iterators(inputs).Merge(b.opt) + if err != nil { + Iterators(inputs).Close() + return nil, err + } else if itr == nil { + itr = &nilFloatIterator{} + } + return itr, nil + case "median": + opt := b.opt + opt.Ordered = true + input, err := buildExprIterator(expr.Args[0].(*VarRef), b.ic, b.sources, opt, false) + if err != nil { + return nil, err + } + return newMedianIterator(input, opt) + case "mode": + input, err := buildExprIterator(expr.Args[0].(*VarRef), b.ic, b.sources, b.opt, false) + if err != nil { + return nil, err + } + return NewModeIterator(input, b.opt) + case "stddev": + input, err := buildExprIterator(expr.Args[0].(*VarRef), b.ic, b.sources, b.opt, false) + if err != nil { + return nil, err + } + return newStddevIterator(input, b.opt) + case "spread": + // OPTIMIZE(benbjohnson): convert to map/reduce + input, err := buildExprIterator(expr.Args[0].(*VarRef), b.ic, b.sources, b.opt, false) + if err != nil { + return nil, err + } + return newSpreadIterator(input, b.opt) + case "top": + var tags []int + if len(expr.Args) < 2 { + return nil, fmt.Errorf("top() requires 2 or more arguments, got %d", len(expr.Args)) + } else if len(expr.Args) > 2 { + // We need to find the indices of where the tag values are stored in Aux + // This section is O(n^2), but for what should be a low value. + for i := 1; i < len(expr.Args)-1; i++ { + ref := expr.Args[i].(*VarRef) + for index, aux := range b.opt.Aux { + if aux.Val == ref.Val { + tags = append(tags, index) + break + } + } + } + } + + input, err := buildExprIterator(expr.Args[0].(*VarRef), b.ic, b.sources, b.opt, false) + if err != nil { + return nil, err + } + n := expr.Args[len(expr.Args)-1].(*IntegerLiteral) + return newTopIterator(input, b.opt, n, tags) + case "bottom": + var tags []int + if len(expr.Args) < 2 { + return nil, fmt.Errorf("bottom() requires 2 or more arguments, got %d", len(expr.Args)) + } else if len(expr.Args) > 2 { + // We need to find the indices of where the tag values are stored in Aux + // This section is O(n^2), but for what should be a low value. + for i := 1; i < len(expr.Args)-1; i++ { + ref := expr.Args[i].(*VarRef) + for index, aux := range b.opt.Aux { + if aux.Val == ref.Val { + tags = append(tags, index) + break + } + } + } + } + + input, err := buildExprIterator(expr.Args[0].(*VarRef), b.ic, b.sources, b.opt, false) + if err != nil { + return nil, err + } + n := expr.Args[len(expr.Args)-1].(*IntegerLiteral) + return newBottomIterator(input, b.opt, n, tags) + case "percentile": + opt := b.opt + opt.Ordered = true + input, err := buildExprIterator(expr.Args[0].(*VarRef), b.ic, b.sources, opt, false) + if err != nil { + return nil, err + } + var percentile float64 + switch arg := expr.Args[1].(type) { + case *NumberLiteral: + percentile = arg.Val + case *IntegerLiteral: + percentile = float64(arg.Val) + } + return newPercentileIterator(input, opt, percentile) + default: + return nil, fmt.Errorf("unsupported call: %s", expr.Name) + } + }() + + if err != nil { + return nil, err + } + + if !b.selector || !b.opt.Interval.IsZero() { + if expr.Name != "top" && expr.Name != "bottom" { + itr = NewIntervalIterator(itr, b.opt) + } + if !b.opt.Interval.IsZero() && b.opt.Fill != NoFill { + itr = NewFillIterator(itr, expr, b.opt) + } + } + if b.opt.InterruptCh != nil { + itr = NewInterruptIterator(itr, b.opt.InterruptCh) + } + return itr, nil +} + +func (b *exprIteratorBuilder) buildBinaryExprIterator(expr *BinaryExpr) (Iterator, error) { + if rhs, ok := expr.RHS.(Literal); ok { + // The right hand side is a literal. It is more common to have the RHS be a literal, + // so we check that one first and have this be the happy path. + if lhs, ok := expr.LHS.(Literal); ok { + // We have two literals that couldn't be combined by Reduce. + return nil, fmt.Errorf("unable to construct an iterator from two literals: LHS: %T, RHS: %T", lhs, rhs) + } + + lhs, err := buildExprIterator(expr.LHS, b.ic, b.sources, b.opt, IsSelector(expr.LHS)) + if err != nil { + return nil, err + } + return buildRHSTransformIterator(lhs, rhs, expr.Op, b.opt) + } else if lhs, ok := expr.LHS.(Literal); ok { + rhs, err := buildExprIterator(expr.RHS, b.ic, b.sources, b.opt, IsSelector(expr.RHS)) + if err != nil { + return nil, err + } + return buildLHSTransformIterator(lhs, rhs, expr.Op, b.opt) + } else { + // We have two iterators. Combine them into a single iterator. + lhs, err := buildExprIterator(expr.LHS, b.ic, b.sources, b.opt, false) + if err != nil { + return nil, err + } + rhs, err := buildExprIterator(expr.RHS, b.ic, b.sources, b.opt, false) + if err != nil { + return nil, err + } + return buildTransformIterator(lhs, rhs, expr.Op, b.opt) + } +} + +func buildRHSTransformIterator(lhs Iterator, rhs Literal, op Token, opt IteratorOptions) (Iterator, error) { fn := binaryExprFunc(iteratorDataType(lhs), literalDataType(rhs), op) switch fn := fn.(type) { case func(float64, float64) float64: @@ -677,7 +1134,7 @@ func buildRHSTransformIterator(lhs Iterator, rhs Literal, op Token, ic IteratorC return nil, fmt.Errorf("unable to construct rhs transform iterator from %T and %T", lhs, rhs) } -func buildLHSTransformIterator(lhs Literal, rhs Iterator, op Token, ic IteratorCreator, opt IteratorOptions) (Iterator, error) { +func buildLHSTransformIterator(lhs Literal, rhs Iterator, op Token, opt IteratorOptions) (Iterator, error) { fn := binaryExprFunc(literalDataType(lhs), iteratorDataType(rhs), op) switch fn := fn.(type) { case func(float64, float64) float64: @@ -856,7 +1313,7 @@ func buildLHSTransformIterator(lhs Literal, rhs Iterator, op Token, ic IteratorC return nil, fmt.Errorf("unable to construct lhs transform iterator from %T and %T", lhs, rhs) } -func buildTransformIterator(lhs Iterator, rhs Iterator, op Token, ic IteratorCreator, opt IteratorOptions) (Iterator, error) { +func buildTransformIterator(lhs Iterator, rhs Iterator, op Token, opt IteratorOptions) (Iterator, error) { fn := binaryExprFunc(iteratorDataType(lhs), iteratorDataType(rhs), op) switch fn := fn.(type) { case func(float64, float64) float64: diff --git a/influxql/select_test.go b/influxql/select_test.go index 9c66c7c9a2..d738207a15 100644 --- a/influxql/select_test.go +++ b/influxql/select_test.go @@ -17,7 +17,10 @@ const Second = int64(time.Second) // Ensure a SELECT min() query can be executed. func TestSelect_Min(t *testing.T) { var ic IteratorCreator - ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + ic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) { + if m.Name != "cpu" { + t.Fatalf("unexpected source: %s", m.Name) + } if !reflect.DeepEqual(opt.Expr, MustParseExpr(`min(value)`)) { t.Fatalf("unexpected expr: %s", spew.Sdump(opt.Expr)) } @@ -51,7 +54,10 @@ func TestSelect_Min(t *testing.T) { // Ensure a SELECT distinct() query can be executed. func TestSelect_Distinct_Float(t *testing.T) { var ic IteratorCreator - ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + ic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) { + if m.Name != "cpu" { + t.Fatalf("unexpected source: %s", m.Name) + } return &FloatIterator{Points: []influxql.FloatPoint{ {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20}, {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 1 * Second, Value: 19}, @@ -82,7 +88,10 @@ func TestSelect_Distinct_Float(t *testing.T) { // Ensure a SELECT distinct() query can be executed. func TestSelect_Distinct_Integer(t *testing.T) { var ic IteratorCreator - ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + ic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) { + if m.Name != "cpu" { + t.Fatalf("unexpected source: %s", m.Name) + } return &IntegerIterator{Points: []influxql.IntegerPoint{ {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20}, {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 1 * Second, Value: 19}, @@ -113,7 +122,10 @@ func TestSelect_Distinct_Integer(t *testing.T) { // Ensure a SELECT distinct() query can be executed. func TestSelect_Distinct_String(t *testing.T) { var ic IteratorCreator - ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + ic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) { + if m.Name != "cpu" { + t.Fatalf("unexpected source: %s", m.Name) + } return &StringIterator{Points: []influxql.StringPoint{ {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: "a"}, {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 1 * Second, Value: "b"}, @@ -144,7 +156,10 @@ func TestSelect_Distinct_String(t *testing.T) { // Ensure a SELECT distinct() query can be executed. func TestSelect_Distinct_Boolean(t *testing.T) { var ic IteratorCreator - ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + ic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) { + if m.Name != "cpu" { + t.Fatalf("unexpected source: %s", m.Name) + } return &BooleanIterator{Points: []influxql.BooleanPoint{ {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: true}, {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 1 * Second, Value: false}, @@ -176,7 +191,10 @@ func TestSelect_Distinct_Boolean(t *testing.T) { // Ensure a SELECT mean() query can be executed. func TestSelect_Mean_Float(t *testing.T) { var ic IteratorCreator - ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + ic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) { + if m.Name != "cpu" { + t.Fatalf("unexpected source: %s", m.Name) + } return influxql.NewCallIterator(&FloatIterator{Points: []influxql.FloatPoint{ {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20}, {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10}, @@ -213,7 +231,10 @@ func TestSelect_Mean_Float(t *testing.T) { // Ensure a SELECT mean() query can be executed. func TestSelect_Mean_Integer(t *testing.T) { var ic IteratorCreator - ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + ic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) { + if m.Name != "cpu" { + t.Fatalf("unexpected source: %s", m.Name) + } return influxql.NewCallIterator(&IntegerIterator{Points: []influxql.IntegerPoint{ {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20}, {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10}, @@ -250,7 +271,10 @@ func TestSelect_Mean_Integer(t *testing.T) { // Ensure a SELECT mean() query cannot be executed on strings. func TestSelect_Mean_String(t *testing.T) { var ic IteratorCreator - ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + ic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) { + if m.Name != "cpu" { + t.Fatalf("unexpected source: %s", m.Name) + } return influxql.NewCallIterator(&StringIterator{}, opt) } @@ -268,7 +292,10 @@ func TestSelect_Mean_String(t *testing.T) { // Ensure a SELECT mean() query cannot be executed on booleans. func TestSelect_Mean_Boolean(t *testing.T) { var ic IteratorCreator - ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + ic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) { + if m.Name != "cpu" { + t.Fatalf("unexpected source: %s", m.Name) + } return influxql.NewCallIterator(&BooleanIterator{}, opt) } @@ -286,7 +313,10 @@ func TestSelect_Mean_Boolean(t *testing.T) { // Ensure a SELECT median() query can be executed. func TestSelect_Median_Float(t *testing.T) { var ic IteratorCreator - ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + ic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) { + if m.Name != "cpu" { + t.Fatalf("unexpected source: %s", m.Name) + } return &FloatIterator{Points: []influxql.FloatPoint{ {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20}, {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10}, @@ -323,7 +353,10 @@ func TestSelect_Median_Float(t *testing.T) { // Ensure a SELECT median() query can be executed. func TestSelect_Median_Integer(t *testing.T) { var ic IteratorCreator - ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + ic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) { + if m.Name != "cpu" { + t.Fatalf("unexpected source: %s", m.Name) + } return &IntegerIterator{Points: []influxql.IntegerPoint{ {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20}, {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10}, @@ -360,7 +393,10 @@ func TestSelect_Median_Integer(t *testing.T) { // Ensure a SELECT median() query cannot be executed on strings. func TestSelect_Median_String(t *testing.T) { var ic IteratorCreator - ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + ic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) { + if m.Name != "cpu" { + t.Fatalf("unexpected source: %s", m.Name) + } return &StringIterator{}, nil } @@ -378,7 +414,10 @@ func TestSelect_Median_String(t *testing.T) { // Ensure a SELECT median() query cannot be executed on booleans. func TestSelect_Median_Boolean(t *testing.T) { var ic IteratorCreator - ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + ic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) { + if m.Name != "cpu" { + t.Fatalf("unexpected source: %s", m.Name) + } return &BooleanIterator{}, nil } @@ -396,7 +435,10 @@ func TestSelect_Median_Boolean(t *testing.T) { // Ensure a SELECT mode() query can be executed. func TestSelect_Mode_Float(t *testing.T) { var ic IteratorCreator - ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + ic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) { + if m.Name != "cpu" { + t.Fatalf("unexpected source: %s", m.Name) + } return &FloatIterator{Points: []influxql.FloatPoint{ {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 10}, {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10}, @@ -433,7 +475,10 @@ func TestSelect_Mode_Float(t *testing.T) { // Ensure a SELECT mode() query can be executed. func TestSelect_Mode_Integer(t *testing.T) { var ic IteratorCreator - ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + ic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) { + if m.Name != "cpu" { + t.Fatalf("unexpected source: %s", m.Name) + } return &IntegerIterator{Points: []influxql.IntegerPoint{ {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 10}, {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10}, @@ -470,7 +515,10 @@ func TestSelect_Mode_Integer(t *testing.T) { // Ensure a SELECT mode() query cannot be executed on strings. func TestSelect_Mode_String(t *testing.T) { var ic IteratorCreator - ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + ic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) { + if m.Name != "cpu" { + t.Fatalf("unexpected source: %s", m.Name) + } return &StringIterator{Points: []influxql.StringPoint{ {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: "a"}, {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 1 * Second, Value: "a"}, @@ -503,7 +551,10 @@ func TestSelect_Mode_String(t *testing.T) { // Ensure a SELECT mode() query cannot be executed on booleans. func TestSelect_Mode_Boolean(t *testing.T) { var ic IteratorCreator - ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + ic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) { + if m.Name != "cpu" { + t.Fatalf("unexpected source: %s", m.Name) + } return &BooleanIterator{Points: []influxql.BooleanPoint{ {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: true}, {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 1 * Second, Value: false}, @@ -535,7 +586,10 @@ func TestSelect_Mode_Boolean(t *testing.T) { // Ensure a SELECT top() query can be executed. func TestSelect_Top_NoTags_Float(t *testing.T) { var ic IteratorCreator - ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + ic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) { + if m.Name != "cpu" { + t.Fatalf("unexpected source: %s", m.Name) + } return &FloatIterator{Points: []influxql.FloatPoint{ {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20}, {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10}, @@ -573,7 +627,10 @@ func TestSelect_Top_NoTags_Float(t *testing.T) { // Ensure a SELECT top() query can be executed. func TestSelect_Top_NoTags_Integer(t *testing.T) { var ic IteratorCreator - ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + ic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) { + if m.Name != "cpu" { + t.Fatalf("unexpected source: %s", m.Name) + } return &IntegerIterator{Points: []influxql.IntegerPoint{ {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20}, {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10}, @@ -611,7 +668,10 @@ func TestSelect_Top_NoTags_Integer(t *testing.T) { // Ensure a SELECT top() query can be executed with tags. func TestSelect_Top_Tags_Float(t *testing.T) { var ic IteratorCreator - ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + ic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) { + if m.Name != "cpu" { + t.Fatalf("unexpected source: %s", m.Name) + } return &FloatIterator{Points: []influxql.FloatPoint{ {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20, Aux: []interface{}{"A"}}, {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10, Aux: []interface{}{"B"}}, @@ -659,7 +719,10 @@ func TestSelect_Top_Tags_Float(t *testing.T) { // Ensure a SELECT top() query can be executed with tags. func TestSelect_Top_Tags_Integer(t *testing.T) { var ic IteratorCreator - ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + ic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) { + if m.Name != "cpu" { + t.Fatalf("unexpected source: %s", m.Name) + } return &IntegerIterator{Points: []influxql.IntegerPoint{ {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20, Aux: []interface{}{"A"}}, {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10, Aux: []interface{}{"B"}}, @@ -707,7 +770,10 @@ func TestSelect_Top_Tags_Integer(t *testing.T) { // Ensure a SELECT top() query can be executed with tags and group by. func TestSelect_Top_GroupByTags_Float(t *testing.T) { var ic IteratorCreator - ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + ic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) { + if m.Name != "cpu" { + t.Fatalf("unexpected source: %s", m.Name) + } return &FloatIterator{Points: []influxql.FloatPoint{ {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20, Aux: []interface{}{"A"}}, {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10, Aux: []interface{}{"B"}}, @@ -751,7 +817,10 @@ func TestSelect_Top_GroupByTags_Float(t *testing.T) { // Ensure a SELECT top() query can be executed with tags and group by. func TestSelect_Top_GroupByTags_Integer(t *testing.T) { var ic IteratorCreator - ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + ic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) { + if m.Name != "cpu" { + t.Fatalf("unexpected source: %s", m.Name) + } return &IntegerIterator{Points: []influxql.IntegerPoint{ {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20, Aux: []interface{}{"A"}}, {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10, Aux: []interface{}{"B"}}, @@ -795,7 +864,10 @@ func TestSelect_Top_GroupByTags_Integer(t *testing.T) { // Ensure a SELECT bottom() query can be executed. func TestSelect_Bottom_NoTags_Float(t *testing.T) { var ic IteratorCreator - ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + ic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) { + if m.Name != "cpu" { + t.Fatalf("unexpected source: %s", m.Name) + } return &FloatIterator{Points: []influxql.FloatPoint{ {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20}, {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10}, @@ -833,7 +905,10 @@ func TestSelect_Bottom_NoTags_Float(t *testing.T) { // Ensure a SELECT bottom() query can be executed. func TestSelect_Bottom_NoTags_Integer(t *testing.T) { var ic IteratorCreator - ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + ic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) { + if m.Name != "cpu" { + t.Fatalf("unexpected source: %s", m.Name) + } return &IntegerIterator{Points: []influxql.IntegerPoint{ {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20}, {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10}, @@ -871,7 +946,10 @@ func TestSelect_Bottom_NoTags_Integer(t *testing.T) { // Ensure a SELECT bottom() query can be executed with tags. func TestSelect_Bottom_Tags_Float(t *testing.T) { var ic IteratorCreator - ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + ic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) { + if m.Name != "cpu" { + t.Fatalf("unexpected source: %s", m.Name) + } return &FloatIterator{Points: []influxql.FloatPoint{ {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20, Aux: []interface{}{"A"}}, {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10, Aux: []interface{}{"B"}}, @@ -919,7 +997,10 @@ func TestSelect_Bottom_Tags_Float(t *testing.T) { // Ensure a SELECT bottom() query can be executed with tags. func TestSelect_Bottom_Tags_Integer(t *testing.T) { var ic IteratorCreator - ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + ic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) { + if m.Name != "cpu" { + t.Fatalf("unexpected source: %s", m.Name) + } return &IntegerIterator{Points: []influxql.IntegerPoint{ {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20, Aux: []interface{}{"A"}}, {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10, Aux: []interface{}{"B"}}, @@ -967,7 +1048,10 @@ func TestSelect_Bottom_Tags_Integer(t *testing.T) { // Ensure a SELECT bottom() query can be executed with tags and group by. func TestSelect_Bottom_GroupByTags_Float(t *testing.T) { var ic IteratorCreator - ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + ic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) { + if m.Name != "cpu" { + t.Fatalf("unexpected source: %s", m.Name) + } return &FloatIterator{Points: []influxql.FloatPoint{ {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20, Aux: []interface{}{"A"}}, {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10, Aux: []interface{}{"B"}}, @@ -1011,7 +1095,10 @@ func TestSelect_Bottom_GroupByTags_Float(t *testing.T) { // Ensure a SELECT bottom() query can be executed with tags and group by. func TestSelect_Bottom_GroupByTags_Integer(t *testing.T) { var ic IteratorCreator - ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + ic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) { + if m.Name != "cpu" { + t.Fatalf("unexpected source: %s", m.Name) + } return &IntegerIterator{Points: []influxql.IntegerPoint{ {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20, Aux: []interface{}{"A"}}, {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10, Aux: []interface{}{"B"}}, @@ -1055,7 +1142,10 @@ func TestSelect_Bottom_GroupByTags_Integer(t *testing.T) { // Ensure a SELECT query with a fill(null) statement can be executed. func TestSelect_Fill_Null_Float(t *testing.T) { var ic IteratorCreator - ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + ic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) { + if m.Name != "cpu" { + t.Fatalf("unexpected source: %s", m.Name) + } return influxql.NewCallIterator(&FloatIterator{Points: []influxql.FloatPoint{ {Name: "cpu", Tags: ParseTags("host=A"), Time: 12 * Second, Value: 2}, }}, opt) @@ -1082,7 +1172,10 @@ func TestSelect_Fill_Null_Float(t *testing.T) { // Ensure a SELECT query with a fill() statement can be executed. func TestSelect_Fill_Number_Float(t *testing.T) { var ic IteratorCreator - ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + ic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) { + if m.Name != "cpu" { + t.Fatalf("unexpected source: %s", m.Name) + } return influxql.NewCallIterator(&FloatIterator{Points: []influxql.FloatPoint{ {Name: "cpu", Tags: ParseTags("host=A"), Time: 12 * Second, Value: 2}, }}, opt) @@ -1109,7 +1202,10 @@ func TestSelect_Fill_Number_Float(t *testing.T) { // Ensure a SELECT query with a fill(previous) statement can be executed. func TestSelect_Fill_Previous_Float(t *testing.T) { var ic IteratorCreator - ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + ic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) { + if m.Name != "cpu" { + t.Fatalf("unexpected source: %s", m.Name) + } return influxql.NewCallIterator(&FloatIterator{Points: []influxql.FloatPoint{ {Name: "cpu", Tags: ParseTags("host=A"), Time: 12 * Second, Value: 2}, }}, opt) @@ -1136,7 +1232,10 @@ func TestSelect_Fill_Previous_Float(t *testing.T) { // Ensure a SELECT query with a fill(linear) statement can be executed. func TestSelect_Fill_Linear_Float_One(t *testing.T) { var ic IteratorCreator - ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + ic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) { + if m.Name != "cpu" { + t.Fatalf("unexpected source: %s", m.Name) + } return influxql.NewCallIterator(&FloatIterator{Points: []influxql.FloatPoint{ {Name: "cpu", Tags: ParseTags("host=A"), Time: 12 * Second, Value: 2}, {Name: "cpu", Tags: ParseTags("host=A"), Time: 32 * Second, Value: 4}, @@ -1163,7 +1262,10 @@ func TestSelect_Fill_Linear_Float_One(t *testing.T) { func TestSelect_Fill_Linear_Float_Many(t *testing.T) { var ic IteratorCreator - ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + ic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) { + if m.Name != "cpu" { + t.Fatalf("unexpected source: %s", m.Name) + } return influxql.NewCallIterator(&FloatIterator{Points: []influxql.FloatPoint{ {Name: "cpu", Tags: ParseTags("host=A"), Time: 12 * Second, Value: 2}, {Name: "cpu", Tags: ParseTags("host=A"), Time: 62 * Second, Value: 7}, @@ -1192,7 +1294,10 @@ func TestSelect_Fill_Linear_Float_Many(t *testing.T) { // Ensure a SELECT query with a fill(linear) statement can be executed for integers. func TestSelect_Fill_Linear_Integer_One(t *testing.T) { var ic IteratorCreator - ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + ic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) { + if m.Name != "cpu" { + t.Fatalf("unexpected source: %s", m.Name) + } return influxql.NewCallIterator(&IntegerIterator{Points: []influxql.IntegerPoint{ {Name: "cpu", Tags: ParseTags("host=A"), Time: 12 * Second, Value: 1}, {Name: "cpu", Tags: ParseTags("host=A"), Time: 32 * Second, Value: 4}, @@ -1219,7 +1324,10 @@ func TestSelect_Fill_Linear_Integer_One(t *testing.T) { func TestSelect_Fill_Linear_Integer_Many(t *testing.T) { var ic IteratorCreator - ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + ic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) { + if m.Name != "cpu" { + t.Fatalf("unexpected source: %s", m.Name) + } return influxql.NewCallIterator(&IntegerIterator{Points: []influxql.IntegerPoint{ {Name: "cpu", Tags: ParseTags("host=A"), Time: 12 * Second, Value: 1}, {Name: "cpu", Tags: ParseTags("host=A"), Time: 72 * Second, Value: 10}, @@ -1249,7 +1357,10 @@ func TestSelect_Fill_Linear_Integer_Many(t *testing.T) { // Ensure a SELECT stddev() query can be executed. func TestSelect_Stddev_Float(t *testing.T) { var ic IteratorCreator - ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + ic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) { + if m.Name != "cpu" { + t.Fatalf("unexpected source: %s", m.Name) + } return &FloatIterator{Points: []influxql.FloatPoint{ {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20}, {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10}, @@ -1286,7 +1397,10 @@ func TestSelect_Stddev_Float(t *testing.T) { // Ensure a SELECT stddev() query can be executed. func TestSelect_Stddev_Integer(t *testing.T) { var ic IteratorCreator - ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + ic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) { + if m.Name != "cpu" { + t.Fatalf("unexpected source: %s", m.Name) + } return &IntegerIterator{Points: []influxql.IntegerPoint{ {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20}, {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10}, @@ -1323,7 +1437,10 @@ func TestSelect_Stddev_Integer(t *testing.T) { // Ensure a SELECT spread() query can be executed. func TestSelect_Spread_Float(t *testing.T) { var ic IteratorCreator - ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + ic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) { + if m.Name != "cpu" { + t.Fatalf("unexpected source: %s", m.Name) + } return &FloatIterator{Points: []influxql.FloatPoint{ {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20}, {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10}, @@ -1360,7 +1477,10 @@ func TestSelect_Spread_Float(t *testing.T) { // Ensure a SELECT spread() query can be executed. func TestSelect_Spread_Integer(t *testing.T) { var ic IteratorCreator - ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + ic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) { + if m.Name != "cpu" { + t.Fatalf("unexpected source: %s", m.Name) + } return &IntegerIterator{Points: []influxql.IntegerPoint{ {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20}, {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10}, @@ -1397,7 +1517,10 @@ func TestSelect_Spread_Integer(t *testing.T) { // Ensure a SELECT percentile() query can be executed. func TestSelect_Percentile_Float(t *testing.T) { var ic IteratorCreator - ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + ic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) { + if m.Name != "cpu" { + t.Fatalf("unexpected source: %s", m.Name) + } return &FloatIterator{Points: []influxql.FloatPoint{ {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20}, {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10}, @@ -1439,7 +1562,10 @@ func TestSelect_Percentile_Float(t *testing.T) { // Ensure a SELECT percentile() query can be executed. func TestSelect_Percentile_Integer(t *testing.T) { var ic IteratorCreator - ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + ic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) { + if m.Name != "cpu" { + t.Fatalf("unexpected source: %s", m.Name) + } return &IntegerIterator{Points: []influxql.IntegerPoint{ {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20}, {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10}, @@ -1481,7 +1607,10 @@ func TestSelect_Percentile_Integer(t *testing.T) { // Ensure a SELECT sample() query can be executed. func TestSelect_Sample_Float(t *testing.T) { var ic IteratorCreator - ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + ic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) { + if m.Name != "cpu" { + t.Fatalf("unexpected source: %s", m.Name) + } return &FloatIterator{Points: []influxql.FloatPoint{ {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20}, {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 5 * Second, Value: 10}, @@ -1509,7 +1638,10 @@ func TestSelect_Sample_Float(t *testing.T) { // Ensure a SELECT sample() query can be executed. func TestSelect_Sample_Integer(t *testing.T) { var ic IteratorCreator - ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + ic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) { + if m.Name != "cpu" { + t.Fatalf("unexpected source: %s", m.Name) + } return &IntegerIterator{Points: []influxql.IntegerPoint{ {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20}, {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 5 * Second, Value: 10}, @@ -1537,7 +1669,10 @@ func TestSelect_Sample_Integer(t *testing.T) { // Ensure a SELECT sample() query can be executed. func TestSelect_Sample_Boolean(t *testing.T) { var ic IteratorCreator - ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + ic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) { + if m.Name != "cpu" { + t.Fatalf("unexpected source: %s", m.Name) + } return &BooleanIterator{Points: []influxql.BooleanPoint{ {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: true}, {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 5 * Second, Value: false}, @@ -1565,7 +1700,10 @@ func TestSelect_Sample_Boolean(t *testing.T) { // Ensure a SELECT sample() query can be executed. func TestSelect_Sample_String(t *testing.T) { var ic IteratorCreator - ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + ic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) { + if m.Name != "cpu" { + t.Fatalf("unexpected source: %s", m.Name) + } return &StringIterator{Points: []influxql.StringPoint{ {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: "a"}, {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 5 * Second, Value: "b"}, @@ -1594,7 +1732,10 @@ func TestSelect_Sample_String(t *testing.T) { func TestSelect_Raw(t *testing.T) { // Mock two iterators -- one for each value in the query. var ic IteratorCreator - ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + ic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) { + if m.Name != "cpu" { + t.Fatalf("unexpected source: %s", m.Name) + } if !reflect.DeepEqual(opt.Aux, []influxql.VarRef{{Val: "v1", Type: influxql.Float}, {Val: "v2", Type: influxql.Float}}) { t.Fatalf("unexpected options: %s", spew.Sdump(opt.Expr)) @@ -1633,7 +1774,10 @@ func TestSelect_Raw(t *testing.T) { // Ensure a SELECT binary expr queries can be executed as floats. func TestSelect_BinaryExpr_Float(t *testing.T) { var ic IteratorCreator - ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + ic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) { + if m.Name != "cpu" { + t.Fatalf("unexpected source: %s", m.Name) + } makeAuxFields := func(value float64) []interface{} { aux := make([]interface{}, len(opt.Aux)) for i := range aux { @@ -1647,7 +1791,10 @@ func TestSelect_BinaryExpr_Float(t *testing.T) { {Name: "cpu", Time: 9 * Second, Value: 19, Aux: makeAuxFields(19)}, }}, nil } - ic.FieldDimensionsFn = func(sources influxql.Sources) (map[string]influxql.DataType, map[string]struct{}, error) { + ic.FieldDimensionsFn = func(m *influxql.Measurement) (map[string]influxql.DataType, map[string]struct{}, error) { + if m.Name != "cpu" { + t.Fatalf("unexpected source: %s", m.Name) + } return map[string]influxql.DataType{"value": influxql.Float}, nil, nil } @@ -1856,7 +2003,10 @@ func TestSelect_BinaryExpr_Float(t *testing.T) { // Ensure a SELECT binary expr queries can be executed as integers. func TestSelect_BinaryExpr_Integer(t *testing.T) { var ic IteratorCreator - ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + ic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) { + if m.Name != "cpu" { + t.Fatalf("unexpected source: %s", m.Name) + } makeAuxFields := func(value int64) []interface{} { aux := make([]interface{}, len(opt.Aux)) for i := range aux { @@ -1870,7 +2020,10 @@ func TestSelect_BinaryExpr_Integer(t *testing.T) { {Name: "cpu", Time: 9 * Second, Value: 19, Aux: makeAuxFields(19)}, }}, nil } - ic.FieldDimensionsFn = func(sources influxql.Sources) (map[string]influxql.DataType, map[string]struct{}, error) { + ic.FieldDimensionsFn = func(m *influxql.Measurement) (map[string]influxql.DataType, map[string]struct{}, error) { + if m.Name != "cpu" { + t.Fatalf("unexpected source: %s", m.Name) + } return map[string]influxql.DataType{"value": influxql.Integer}, nil, nil } @@ -2079,14 +2232,20 @@ func TestSelect_BinaryExpr_Integer(t *testing.T) { // Ensure a SELECT binary expr queries can be executed on mixed iterators. func TestSelect_BinaryExpr_Mixed(t *testing.T) { var ic IteratorCreator - ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + ic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) { + if m.Name != "cpu" { + t.Fatalf("unexpected source: %s", m.Name) + } return &IntegerIterator{Points: []influxql.IntegerPoint{ {Name: "cpu", Time: 0 * Second, Value: 20, Aux: []interface{}{float64(20), int64(10)}}, {Name: "cpu", Time: 5 * Second, Value: 10, Aux: []interface{}{float64(10), int64(15)}}, {Name: "cpu", Time: 9 * Second, Value: 19, Aux: []interface{}{float64(19), int64(5)}}, }}, nil } - ic.FieldDimensionsFn = func(sources influxql.Sources) (map[string]influxql.DataType, map[string]struct{}, error) { + ic.FieldDimensionsFn = func(m *influxql.Measurement) (map[string]influxql.DataType, map[string]struct{}, error) { + if m.Name != "cpu" { + t.Fatalf("unexpected source: %s", m.Name) + } return map[string]influxql.DataType{ "total": influxql.Float, "value": influxql.Integer, @@ -2156,14 +2315,20 @@ func TestSelect_BinaryExpr_Mixed(t *testing.T) { // but not the other. func TestSelect_BinaryExpr_NilValues(t *testing.T) { var ic IteratorCreator - ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + ic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) { + if m.Name != "cpu" { + t.Fatalf("unexpected source: %s", m.Name) + } return &FloatIterator{Points: []influxql.FloatPoint{ {Name: "cpu", Time: 0 * Second, Value: 20, Aux: []interface{}{float64(20), nil}}, {Name: "cpu", Time: 5 * Second, Value: 10, Aux: []interface{}{float64(10), float64(15)}}, {Name: "cpu", Time: 9 * Second, Value: 19, Aux: []interface{}{nil, float64(5)}}, }}, nil } - ic.FieldDimensionsFn = func(sources influxql.Sources) (map[string]influxql.DataType, map[string]struct{}, error) { + ic.FieldDimensionsFn = func(m *influxql.Measurement) (map[string]influxql.DataType, map[string]struct{}, error) { + if m.Name != "cpu" { + t.Fatalf("unexpected source: %s", m.Name) + } return map[string]influxql.DataType{ "total": influxql.Float, "value": influxql.Float, @@ -2231,7 +2396,10 @@ func TestSelect_BinaryExpr_NilValues(t *testing.T) { // Ensure a SELECT (...) query can be executed. func TestSelect_ParenExpr(t *testing.T) { var ic IteratorCreator - ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + ic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) { + if m.Name != "cpu" { + t.Fatalf("unexpected source: %s", m.Name) + } if !reflect.DeepEqual(opt.Expr, MustParseExpr(`min(value)`)) { t.Fatalf("unexpected expr: %s", spew.Sdump(opt.Expr)) } @@ -2261,7 +2429,10 @@ func TestSelect_ParenExpr(t *testing.T) { t.Fatalf("unexpected points: %s", spew.Sdump(a)) } - ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + ic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) { + if m.Name != "cpu" { + t.Fatalf("unexpected source: %s", m.Name) + } return &FloatIterator{Points: []influxql.FloatPoint{ {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20}, {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 1 * Second, Value: 19}, @@ -2291,7 +2462,10 @@ func TestSelect_ParenExpr(t *testing.T) { func TestSelect_Derivative_Float(t *testing.T) { var ic IteratorCreator - ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + ic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) { + if m.Name != "cpu" { + t.Fatalf("unexpected source: %s", m.Name) + } return &FloatIterator{Points: []influxql.FloatPoint{ {Name: "cpu", Time: 0 * Second, Value: 20}, {Name: "cpu", Time: 4 * Second, Value: 10}, @@ -2317,7 +2491,10 @@ func TestSelect_Derivative_Float(t *testing.T) { func TestSelect_Derivative_Integer(t *testing.T) { var ic IteratorCreator - ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + ic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) { + if m.Name != "cpu" { + t.Fatalf("unexpected source: %s", m.Name) + } return &IntegerIterator{Points: []influxql.IntegerPoint{ {Name: "cpu", Time: 0 * Second, Value: 20}, {Name: "cpu", Time: 4 * Second, Value: 10}, @@ -2343,7 +2520,10 @@ func TestSelect_Derivative_Integer(t *testing.T) { func TestSelect_Derivative_Desc_Float(t *testing.T) { var ic IteratorCreator - ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + ic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) { + if m.Name != "cpu" { + t.Fatalf("unexpected source: %s", m.Name) + } return &FloatIterator{Points: []influxql.FloatPoint{ {Name: "cpu", Time: 12 * Second, Value: 3}, {Name: "cpu", Time: 8 * Second, Value: 19}, @@ -2369,7 +2549,10 @@ func TestSelect_Derivative_Desc_Float(t *testing.T) { func TestSelect_Derivative_Desc_Integer(t *testing.T) { var ic IteratorCreator - ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + ic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) { + if m.Name != "cpu" { + t.Fatalf("unexpected source: %s", m.Name) + } return &IntegerIterator{Points: []influxql.IntegerPoint{ {Name: "cpu", Time: 12 * Second, Value: 3}, {Name: "cpu", Time: 8 * Second, Value: 19}, @@ -2395,7 +2578,10 @@ func TestSelect_Derivative_Desc_Integer(t *testing.T) { func TestSelect_Derivative_Duplicate_Float(t *testing.T) { var ic IteratorCreator - ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + ic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) { + if m.Name != "cpu" { + t.Fatalf("unexpected source: %s", m.Name) + } return &FloatIterator{Points: []influxql.FloatPoint{ {Name: "cpu", Time: 0 * Second, Value: 20}, {Name: "cpu", Time: 0 * Second, Value: 19}, @@ -2419,7 +2605,10 @@ func TestSelect_Derivative_Duplicate_Float(t *testing.T) { func TestSelect_Derivative_Duplicate_Integer(t *testing.T) { var ic IteratorCreator - ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + ic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) { + if m.Name != "cpu" { + t.Fatalf("unexpected source: %s", m.Name) + } return &IntegerIterator{Points: []influxql.IntegerPoint{ {Name: "cpu", Time: 0 * Second, Value: 20}, {Name: "cpu", Time: 0 * Second, Value: 19}, @@ -2443,7 +2632,10 @@ func TestSelect_Derivative_Duplicate_Integer(t *testing.T) { func TestSelect_Difference_Float(t *testing.T) { var ic IteratorCreator - ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + ic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) { + if m.Name != "cpu" { + t.Fatalf("unexpected source: %s", m.Name) + } return &FloatIterator{Points: []influxql.FloatPoint{ {Name: "cpu", Time: 0 * Second, Value: 20}, {Name: "cpu", Time: 4 * Second, Value: 10}, @@ -2469,7 +2661,10 @@ func TestSelect_Difference_Float(t *testing.T) { func TestSelect_Difference_Integer(t *testing.T) { var ic IteratorCreator - ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + ic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) { + if m.Name != "cpu" { + t.Fatalf("unexpected source: %s", m.Name) + } return &IntegerIterator{Points: []influxql.IntegerPoint{ {Name: "cpu", Time: 0 * Second, Value: 20}, {Name: "cpu", Time: 4 * Second, Value: 10}, @@ -2495,7 +2690,10 @@ func TestSelect_Difference_Integer(t *testing.T) { func TestSelect_Difference_Duplicate_Float(t *testing.T) { var ic IteratorCreator - ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + ic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) { + if m.Name != "cpu" { + t.Fatalf("unexpected source: %s", m.Name) + } return &FloatIterator{Points: []influxql.FloatPoint{ {Name: "cpu", Time: 0 * Second, Value: 20}, {Name: "cpu", Time: 0 * Second, Value: 19}, @@ -2519,7 +2717,10 @@ func TestSelect_Difference_Duplicate_Float(t *testing.T) { func TestSelect_Difference_Duplicate_Integer(t *testing.T) { var ic IteratorCreator - ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + ic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) { + if m.Name != "cpu" { + t.Fatalf("unexpected source: %s", m.Name) + } return &IntegerIterator{Points: []influxql.IntegerPoint{ {Name: "cpu", Time: 0 * Second, Value: 20}, {Name: "cpu", Time: 0 * Second, Value: 19}, @@ -2543,7 +2744,10 @@ func TestSelect_Difference_Duplicate_Integer(t *testing.T) { func TestSelect_Elapsed_Float(t *testing.T) { var ic IteratorCreator - ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + ic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) { + if m.Name != "cpu" { + t.Fatalf("unexpected source: %s", m.Name) + } return &FloatIterator{Points: []influxql.FloatPoint{ {Name: "cpu", Time: 0 * Second, Value: 20}, {Name: "cpu", Time: 4 * Second, Value: 10}, @@ -2569,7 +2773,10 @@ func TestSelect_Elapsed_Float(t *testing.T) { func TestSelect_Elapsed_Integer(t *testing.T) { var ic IteratorCreator - ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + ic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) { + if m.Name != "cpu" { + t.Fatalf("unexpected source: %s", m.Name) + } return &IntegerIterator{Points: []influxql.IntegerPoint{ {Name: "cpu", Time: 0 * Second, Value: 20}, {Name: "cpu", Time: 4 * Second, Value: 10}, @@ -2595,7 +2802,10 @@ func TestSelect_Elapsed_Integer(t *testing.T) { func TestSelect_Elapsed_String(t *testing.T) { var ic IteratorCreator - ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + ic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) { + if m.Name != "cpu" { + t.Fatalf("unexpected source: %s", m.Name) + } return &StringIterator{Points: []influxql.StringPoint{ {Name: "cpu", Time: 0 * Second, Value: "a"}, {Name: "cpu", Time: 4 * Second, Value: "b"}, @@ -2621,7 +2831,10 @@ func TestSelect_Elapsed_String(t *testing.T) { func TestSelect_Elapsed_Boolean(t *testing.T) { var ic IteratorCreator - ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + ic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) { + if m.Name != "cpu" { + t.Fatalf("unexpected source: %s", m.Name) + } return &BooleanIterator{Points: []influxql.BooleanPoint{ {Name: "cpu", Time: 0 * Second, Value: true}, {Name: "cpu", Time: 4 * Second, Value: false}, @@ -2647,7 +2860,10 @@ func TestSelect_Elapsed_Boolean(t *testing.T) { func TestSelect_MovingAverage_Float(t *testing.T) { var ic IteratorCreator - ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + ic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) { + if m.Name != "cpu" { + t.Fatalf("unexpected source: %s", m.Name) + } return &FloatIterator{Points: []influxql.FloatPoint{ {Name: "cpu", Time: 0 * Second, Value: 20}, {Name: "cpu", Time: 4 * Second, Value: 10}, @@ -2673,7 +2889,10 @@ func TestSelect_MovingAverage_Float(t *testing.T) { func TestSelect_MovingAverage_Integer(t *testing.T) { var ic IteratorCreator - ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + ic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) { + if m.Name != "cpu" { + t.Fatalf("unexpected source: %s", m.Name) + } return &IntegerIterator{Points: []influxql.IntegerPoint{ {Name: "cpu", Time: 0 * Second, Value: 20}, {Name: "cpu", Time: 4 * Second, Value: 10}, @@ -2699,7 +2918,10 @@ func TestSelect_MovingAverage_Integer(t *testing.T) { func TestSelect_CumulativeSum_Float(t *testing.T) { var ic IteratorCreator - ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + ic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) { + if m.Name != "cpu" { + t.Fatalf("unexpected source: %s", m.Name) + } return &FloatIterator{Points: []influxql.FloatPoint{ {Name: "cpu", Time: 0 * Second, Value: 20}, {Name: "cpu", Time: 4 * Second, Value: 10}, @@ -2726,7 +2948,10 @@ func TestSelect_CumulativeSum_Float(t *testing.T) { func TestSelect_CumulativeSum_Integer(t *testing.T) { var ic IteratorCreator - ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + ic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) { + if m.Name != "cpu" { + t.Fatalf("unexpected source: %s", m.Name) + } return &IntegerIterator{Points: []influxql.IntegerPoint{ {Name: "cpu", Time: 0 * Second, Value: 20}, {Name: "cpu", Time: 4 * Second, Value: 10}, @@ -2753,7 +2978,10 @@ func TestSelect_CumulativeSum_Integer(t *testing.T) { func TestSelect_CumulativeSum_Duplicate_Float(t *testing.T) { var ic IteratorCreator - ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + ic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) { + if m.Name != "cpu" { + t.Fatalf("unexpected source: %s", m.Name) + } return &FloatIterator{Points: []influxql.FloatPoint{ {Name: "cpu", Time: 0 * Second, Value: 20}, {Name: "cpu", Time: 0 * Second, Value: 19}, @@ -2780,7 +3008,10 @@ func TestSelect_CumulativeSum_Duplicate_Float(t *testing.T) { func TestSelect_CumulativeSum_Duplicate_Integer(t *testing.T) { var ic IteratorCreator - ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + ic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) { + if m.Name != "cpu" { + t.Fatalf("unexpected source: %s", m.Name) + } return &IntegerIterator{Points: []influxql.IntegerPoint{ {Name: "cpu", Time: 0 * Second, Value: 20}, {Name: "cpu", Time: 0 * Second, Value: 19}, @@ -2807,7 +3038,10 @@ func TestSelect_CumulativeSum_Duplicate_Integer(t *testing.T) { func TestSelect_HoltWinters_GroupBy_Agg(t *testing.T) { var ic IteratorCreator - ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + ic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) { + if m.Name != "cpu" { + t.Fatalf("unexpected source: %s", m.Name) + } return influxql.NewCallIterator(&FloatIterator{Points: []influxql.FloatPoint{ {Name: "cpu", Time: 10 * Second, Value: 4}, {Name: "cpu", Time: 11 * Second, Value: 6}, @@ -2842,7 +3076,10 @@ func TestSelect_HoltWinters_GroupBy_Agg(t *testing.T) { func TestSelect_UnsupportedCall(t *testing.T) { var ic IteratorCreator - ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + ic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) { + if m.Name != "cpu" { + t.Fatalf("unexpected source: %s", m.Name) + } return &FloatIterator{}, nil } @@ -2854,7 +3091,10 @@ func TestSelect_UnsupportedCall(t *testing.T) { func TestSelect_InvalidQueries(t *testing.T) { var ic IteratorCreator - ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + ic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) { + if m.Name != "cpu" { + t.Fatalf("unexpected source: %s", m.Name) + } return &FloatIterator{}, nil } @@ -2907,7 +3147,7 @@ func benchmarkSelect(b *testing.B, stmt *influxql.SelectStatement, ic influxql.I // NewRawBenchmarkIteratorCreator returns a new mock iterator creator with generated fields. func NewRawBenchmarkIteratorCreator(pointN int) *IteratorCreator { var ic IteratorCreator - ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + ic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) { if opt.Expr != nil { panic("unexpected expression") } @@ -2939,7 +3179,7 @@ func benchmarkSelectDedupe(b *testing.B, seriesN, pointsPerSeries int) { stmt.Dedupe = true var ic IteratorCreator - ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + ic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) { if opt.Expr != nil { panic("unexpected expression") } diff --git a/internal/meta_client.go b/internal/meta_client.go index e7bac425ca..5faa605f76 100644 --- a/internal/meta_client.go +++ b/internal/meta_client.go @@ -34,16 +34,16 @@ type MetaClientMock struct { RetentionPolicyFn func(database, name string) (rpi *meta.RetentionPolicyInfo, err error) - SetAdminPrivilegeFn func(username string, admin bool) error - SetDataFn func(*meta.Data) error - SetPrivilegeFn func(username, database string, p influxql.Privilege) error - ShardsByTimeRangeFn func(sources influxql.Sources, tmin, tmax time.Time) (a []meta.ShardInfo, err error) - ShardOwnerFn func(shardID uint64) (database, policy string, sgi *meta.ShardGroupInfo) - UpdateRetentionPolicyFn func(database, name string, rpu *meta.RetentionPolicyUpdate, makeDefault bool) error - UpdateUserFn func(name, password string) error - UserPrivilegeFn func(username, database string) (*influxql.Privilege, error) - UserPrivilegesFn func(username string) (map[string]influxql.Privilege, error) - UsersFn func() []meta.UserInfo + SetAdminPrivilegeFn func(username string, admin bool) error + SetDataFn func(*meta.Data) error + SetPrivilegeFn func(username, database string, p influxql.Privilege) error + ShardGroupsByTimeRangeFn func(database, policy string, min, max time.Time) (a []meta.ShardGroupInfo, err error) + ShardOwnerFn func(shardID uint64) (database, policy string, sgi *meta.ShardGroupInfo) + UpdateRetentionPolicyFn func(database, name string, rpu *meta.RetentionPolicyUpdate, makeDefault bool) error + UpdateUserFn func(name, password string) error + UserPrivilegeFn func(username, database string) (*influxql.Privilege, error) + UserPrivilegesFn func(username string) (map[string]influxql.Privilege, error) + UsersFn func() []meta.UserInfo } func (c *MetaClientMock) Close() error { @@ -126,8 +126,8 @@ func (c *MetaClientMock) SetPrivilege(username, database string, p influxql.Priv return c.SetPrivilegeFn(username, database, p) } -func (c *MetaClientMock) ShardsByTimeRange(sources influxql.Sources, tmin, tmax time.Time) (a []meta.ShardInfo, err error) { - return c.ShardsByTimeRangeFn(sources, tmin, tmax) +func (c *MetaClientMock) ShardGroupsByTimeRange(database, policy string, min, max time.Time) (a []meta.ShardGroupInfo, err error) { + return c.ShardGroupsByTimeRangeFn(database, policy, min, max) } func (c *MetaClientMock) ShardOwner(shardID uint64) (database, policy string, sgi *meta.ShardGroupInfo) { diff --git a/services/meta/client.go b/services/meta/client.go index 59ca04ca27..5df83bd4ec 100644 --- a/services/meta/client.go +++ b/services/meta/client.go @@ -637,12 +637,7 @@ func (c *Client) ShardGroupsByTimeRange(database, policy string, min, max time.T // ShardsByTimeRange returns a slice of shards that may contain data in the time range. func (c *Client) ShardsByTimeRange(sources influxql.Sources, tmin, tmax time.Time) (a []ShardInfo, err error) { m := make(map[*ShardInfo]struct{}) - for _, src := range sources { - mm, ok := src.(*influxql.Measurement) - if !ok { - return nil, fmt.Errorf("invalid source type: %#v", src) - } - + for _, mm := range sources.Measurements() { groups, err := c.ShardGroupsByTimeRange(mm.Database, mm.RetentionPolicy, tmin, tmax) if err != nil { return nil, err diff --git a/tsdb/engine.go b/tsdb/engine.go index 5ec979bce6..7d109fb8f2 100644 --- a/tsdb/engine.go +++ b/tsdb/engine.go @@ -34,7 +34,7 @@ type Engine interface { Backup(w io.Writer, basePath string, since time.Time) error Restore(r io.Reader, basePath string) error - CreateIterator(opt influxql.IteratorOptions) (influxql.Iterator, error) + CreateIterator(measurement string, opt influxql.IteratorOptions) (influxql.Iterator, error) WritePoints(points []models.Point) error ContainsSeries(keys []string) (map[string]bool, error) DeleteSeries(keys []string) error diff --git a/tsdb/engine/tsm1/engine.go b/tsdb/engine/tsm1/engine.go index 37ba8ee2a3..3b0bed2a4b 100644 --- a/tsdb/engine/tsm1/engine.go +++ b/tsdb/engine/tsm1/engine.go @@ -1243,8 +1243,8 @@ func (e *Engine) KeyCursor(key string, t int64, ascending bool) *KeyCursor { return e.FileStore.KeyCursor(key, t, ascending) } -// CreateIterator returns an iterator based on opt. -func (e *Engine) CreateIterator(opt influxql.IteratorOptions) (influxql.Iterator, error) { +// CreateIterator returns an iterator for the measurement based on opt. +func (e *Engine) CreateIterator(measurement string, opt influxql.IteratorOptions) (influxql.Iterator, error) { if call, ok := opt.Expr.(*influxql.Call); ok { refOpt := opt refOpt.Expr = call.Args[0].(*influxql.VarRef) @@ -1256,14 +1256,16 @@ func (e *Engine) CreateIterator(opt influxql.IteratorOptions) (influxql.Iterator aggregate = false refOpt.Limit = 1 refOpt.Ascending = true + refOpt.Ordered = true case "last": aggregate = false refOpt.Limit = 1 refOpt.Ascending = false + refOpt.Ordered = true } } - inputs, err := e.createVarRefIterator(refOpt, aggregate) + inputs, err := e.createVarRefIterator(measurement, refOpt, aggregate) if err != nil { return nil, err } else if len(inputs) == 0 { @@ -1286,7 +1288,7 @@ func (e *Engine) CreateIterator(opt influxql.IteratorOptions) (influxql.Iterator return influxql.NewParallelMergeIterator(inputs, opt, runtime.GOMAXPROCS(0)), nil } - itrs, err := e.createVarRefIterator(opt, false) + itrs, err := e.createVarRefIterator(measurement, opt, false) if err != nil { return nil, err } @@ -1301,35 +1303,43 @@ func (e *Engine) CreateIterator(opt influxql.IteratorOptions) (influxql.Iterator // createVarRefIterator creates an iterator for a variable reference. // The aggregate argument determines this is being created for an aggregate. // If this is an aggregate, the limit optimization is disabled temporarily. See #6661. -func (e *Engine) createVarRefIterator(opt influxql.IteratorOptions, aggregate bool) ([]influxql.Iterator, error) { +func (e *Engine) createVarRefIterator(measurement string, opt influxql.IteratorOptions, aggregate bool) ([]influxql.Iterator, error) { ref, _ := opt.Expr.(*influxql.VarRef) - var itrs []influxql.Iterator - if err := func() error { - mms := tsdb.Measurements(e.index.MeasurementsByName(influxql.Sources(opt.Sources).Names())) + mm := e.index.Measurement(measurement) + if mm == nil { + return nil, nil + } - for _, mm := range mms { - // Determine tagsets for this measurement based on dimensions and filters. - tagSets, err := mm.TagSets(e.id, opt.Dimensions, opt.Condition) + // Determine tagsets for this measurement based on dimensions and filters. + tagSets, err := mm.TagSets(e.id, opt.Dimensions, opt.Condition) + if err != nil { + return nil, err + } + + // Calculate tag sets and apply SLIMIT/SOFFSET. + tagSets = influxql.LimitTagSets(tagSets, opt.SLimit, opt.SOffset) + + itrs := make([]influxql.Iterator, 0, len(tagSets)) + if err := func() error { + for _, t := range tagSets { + inputs, err := e.createTagSetIterators(ref, mm, t, opt) if err != nil { return err + } else if len(inputs) == 0 { + continue + } + + itr, err := influxql.Iterators(inputs).Merge(opt) + if err != nil { + influxql.Iterators(inputs).Close() + return err } - // Calculate tag sets and apply SLIMIT/SOFFSET. - tagSets = influxql.LimitTagSets(tagSets, opt.SLimit, opt.SOffset) - - for _, t := range tagSets { - inputs, err := e.createTagSetIterators(ref, mm, t, opt) - if err != nil { - return err - } - - if !aggregate && len(inputs) > 0 && (opt.Limit > 0 || opt.Offset > 0) { - itrs = append(itrs, newLimitIterator(influxql.NewSortedMergeIterator(inputs, opt), opt)) - } else { - itrs = append(itrs, inputs...) - } + if !aggregate && len(inputs) > 0 && (opt.Limit > 0 || opt.Offset > 0) { + itr = newLimitIterator(itr, opt) } + itrs = append(itrs, itr) } return nil }(); err != nil { @@ -1526,7 +1536,8 @@ func (e *Engine) createVarRefSeriesIterator(ref *influxql.VarRef, mm *tsdb.Measu condNames := influxql.VarRefs(conditionFields).Strings() // Limit tags to only the dimensions selected. - tags = tags.Subset(opt.Dimensions) + dimensions := opt.GetDimensions() + tags = tags.Subset(dimensions) // If it's only auxiliary fields then it doesn't matter what type of iterator we use. if ref == nil { diff --git a/tsdb/engine/tsm1/engine_test.go b/tsdb/engine/tsm1/engine_test.go index 0d7f61a636..e8d03c424c 100644 --- a/tsdb/engine/tsm1/engine_test.go +++ b/tsdb/engine/tsm1/engine_test.go @@ -234,10 +234,9 @@ func TestEngine_CreateIterator_Cache_Ascending(t *testing.T) { t.Fatalf("failed to write points: %s", err.Error()) } - itr, err := e.CreateIterator(influxql.IteratorOptions{ + itr, err := e.CreateIterator("cpu", influxql.IteratorOptions{ Expr: influxql.MustParseExpr(`value`), Dimensions: []string{"host"}, - Sources: []influxql.Source{&influxql.Measurement{Name: "cpu"}}, StartTime: influxql.MinTime, EndTime: influxql.MaxTime, Ascending: true, @@ -289,10 +288,9 @@ func TestEngine_CreateIterator_Cache_Descending(t *testing.T) { t.Fatalf("failed to write points: %s", err.Error()) } - itr, err := e.CreateIterator(influxql.IteratorOptions{ + itr, err := e.CreateIterator("cpu", influxql.IteratorOptions{ Expr: influxql.MustParseExpr(`value`), Dimensions: []string{"host"}, - Sources: []influxql.Source{&influxql.Measurement{Name: "cpu"}}, StartTime: influxql.MinTime, EndTime: influxql.MaxTime, Ascending: false, @@ -345,10 +343,9 @@ func TestEngine_CreateIterator_TSM_Ascending(t *testing.T) { } e.MustWriteSnapshot() - itr, err := e.CreateIterator(influxql.IteratorOptions{ + itr, err := e.CreateIterator("cpu", influxql.IteratorOptions{ Expr: influxql.MustParseExpr(`value`), Dimensions: []string{"host"}, - Sources: []influxql.Source{&influxql.Measurement{Name: "cpu"}}, StartTime: influxql.MinTime, EndTime: influxql.MaxTime, Ascending: true, @@ -401,10 +398,9 @@ func TestEngine_CreateIterator_TSM_Descending(t *testing.T) { } e.MustWriteSnapshot() - itr, err := e.CreateIterator(influxql.IteratorOptions{ + itr, err := e.CreateIterator("cpu", influxql.IteratorOptions{ Expr: influxql.MustParseExpr(`value`), Dimensions: []string{"host"}, - Sources: []influxql.Source{&influxql.Measurement{Name: "cpu"}}, StartTime: influxql.MinTime, EndTime: influxql.MaxTime, Ascending: false, @@ -459,11 +455,10 @@ func TestEngine_CreateIterator_Aux(t *testing.T) { t.Fatalf("failed to write points: %s", err.Error()) } - itr, err := e.CreateIterator(influxql.IteratorOptions{ + itr, err := e.CreateIterator("cpu", influxql.IteratorOptions{ Expr: influxql.MustParseExpr(`value`), Aux: []influxql.VarRef{{Val: "F"}}, Dimensions: []string{"host"}, - Sources: []influxql.Source{&influxql.Measurement{Name: "cpu"}}, StartTime: influxql.MinTime, EndTime: influxql.MaxTime, Ascending: true, @@ -525,11 +520,10 @@ func TestEngine_CreateIterator_Condition(t *testing.T) { t.Fatalf("failed to write points: %s", err.Error()) } - itr, err := e.CreateIterator(influxql.IteratorOptions{ + itr, err := e.CreateIterator("cpu", influxql.IteratorOptions{ Expr: influxql.MustParseExpr(`value`), Dimensions: []string{"host"}, Condition: influxql.MustParseExpr(`X = 10 OR Y > 150`), - Sources: []influxql.Source{&influxql.Measurement{Name: "cpu"}}, StartTime: influxql.MinTime, EndTime: influxql.MaxTime, Ascending: true, @@ -680,7 +674,6 @@ func BenchmarkEngine_CreateIterator_Count_1M(b *testing.B) { func benchmarkEngineCreateIteratorCount(b *testing.B, pointN int) { benchmarkIterator(b, influxql.IteratorOptions{ Expr: influxql.MustParseExpr("count(value)"), - Sources: []influxql.Source{&influxql.Measurement{Name: "cpu"}}, Ascending: true, StartTime: influxql.MinTime, EndTime: influxql.MaxTime, @@ -700,7 +693,6 @@ func BenchmarkEngine_CreateIterator_First_1M(b *testing.B) { func benchmarkEngineCreateIteratorFirst(b *testing.B, pointN int) { benchmarkIterator(b, influxql.IteratorOptions{ Expr: influxql.MustParseExpr("first(value)"), - Sources: []influxql.Source{&influxql.Measurement{Name: "cpu"}}, Dimensions: []string{"host"}, Ascending: true, StartTime: influxql.MinTime, @@ -721,7 +713,6 @@ func BenchmarkEngine_CreateIterator_Last_1M(b *testing.B) { func benchmarkEngineCreateIteratorLast(b *testing.B, pointN int) { benchmarkIterator(b, influxql.IteratorOptions{ Expr: influxql.MustParseExpr("last(value)"), - Sources: []influxql.Source{&influxql.Measurement{Name: "cpu"}}, Dimensions: []string{"host"}, Ascending: true, StartTime: influxql.MinTime, @@ -849,7 +840,6 @@ func benchmarkEngine_WritePoints_Parallel(b *testing.B, batchSize int) { func benchmarkEngineCreateIteratorLimit(b *testing.B, pointN int) { benchmarkIterator(b, influxql.IteratorOptions{ Expr: influxql.MustParseExpr("value"), - Sources: []influxql.Source{&influxql.Measurement{Name: "cpu"}}, Dimensions: []string{"host"}, Ascending: true, StartTime: influxql.MinTime, @@ -864,7 +854,7 @@ func benchmarkIterator(b *testing.B, opt influxql.IteratorOptions, pointN int) { b.ReportAllocs() for i := 0; i < b.N; i++ { - itr, err := e.CreateIterator(opt) + itr, err := e.CreateIterator("cpu", opt) if err != nil { b.Fatal(err) } diff --git a/tsdb/shard.go b/tsdb/shard.go index 4ccf5d043d..802f826862 100644 --- a/tsdb/shard.go +++ b/tsdb/shard.go @@ -7,6 +7,7 @@ import ( "io" "os" "path/filepath" + "regexp" "sort" "strings" "sync" @@ -669,27 +670,20 @@ func (s *Shard) WriteTo(w io.Writer) (int64, error) { } // CreateIterator returns an iterator for the data in the shard. -func (s *Shard) CreateIterator(opt influxql.IteratorOptions) (influxql.Iterator, error) { +func (s *Shard) CreateIterator(measurement string, opt influxql.IteratorOptions) (influxql.Iterator, error) { if err := s.ready(); err != nil { return nil, err } - if influxql.Sources(opt.Sources).HasSystemSource() { - return s.createSystemIterator(opt) + if strings.HasPrefix(measurement, "_") { + return s.createSystemIterator(measurement, opt) } - opt.Sources = influxql.Sources(opt.Sources).Filter(s.database, s.retentionPolicy) - return s.engine.CreateIterator(opt) + return s.engine.CreateIterator(measurement, opt) } // createSystemIterator returns an iterator for a system source. -func (s *Shard) createSystemIterator(opt influxql.IteratorOptions) (influxql.Iterator, error) { - // Only support a single system source. - if len(opt.Sources) > 1 { - return nil, errors.New("cannot select from multiple system sources") - } - - m := opt.Sources[0].(*influxql.Measurement) - switch m.Name { +func (s *Shard) createSystemIterator(measurement string, opt influxql.IteratorOptions) (influxql.Iterator, error) { + switch measurement { case "_fieldKeys": return NewFieldKeysIterator(s, opt) case "_series": @@ -697,71 +691,112 @@ func (s *Shard) createSystemIterator(opt influxql.IteratorOptions) (influxql.Ite case "_tagKeys": return NewTagKeysIterator(s, opt) default: - return nil, fmt.Errorf("unknown system source: %s", m.Name) + return nil, fmt.Errorf("unknown system source: %s", measurement) } } // FieldDimensions returns unique sets of fields and dimensions across a list of sources. -func (s *Shard) FieldDimensions(sources influxql.Sources) (fields map[string]influxql.DataType, dimensions map[string]struct{}, err error) { +func (s *Shard) FieldDimensions(measurements []string) (fields map[string]influxql.DataType, dimensions map[string]struct{}, err error) { if err := s.ready(); err != nil { return nil, nil, err } - if sources.HasSystemSource() { - // Only support a single system source. - if len(sources) > 1 { - return nil, nil, errors.New("cannot select from multiple system sources") - } - - switch m := sources[0].(type) { - case *influxql.Measurement: - switch m.Name { - case "_fieldKeys": - return map[string]influxql.DataType{ - "fieldKey": influxql.String, - "fieldType": influxql.String, - }, nil, nil - case "_series": - return map[string]influxql.DataType{ - "key": influxql.String, - }, nil, nil - case "_tagKeys": - return map[string]influxql.DataType{ - "tagKey": influxql.String, - }, nil, nil - } - } - return nil, nil, nil - } - fields = make(map[string]influxql.DataType) dimensions = make(map[string]struct{}) - for _, src := range sources { - switch m := src.(type) { - case *influxql.Measurement: - // Retrieve measurement. - mm := s.index.Measurement(m.Name) - if mm == nil { - continue + for _, name := range measurements { + // Handle system sources. + if strings.HasPrefix(name, "_") { + var keys []string + switch name { + case "_fieldKeys": + keys = []string{"fieldKey", "fieldType"} + case "_series": + keys = []string{"key"} + case "_tagKeys": + keys = []string{"tagKey"} } - // Append fields and dimensions. - mf := s.engine.MeasurementFields(m.Name) - if mf != nil { - for name, typ := range mf.FieldSet() { - fields[name] = typ + for _, k := range keys { + if _, ok := fields[k]; !ok || influxql.String < fields[k] { + fields[k] = influxql.String } } - for _, key := range mm.TagKeys() { - dimensions[key] = struct{}{} + continue + } + + // Retrieve measurement. + mm := s.index.Measurement(name) + if mm == nil { + continue + } + + // Append fields and dimensions. + mf := s.engine.MeasurementFields(name) + if mf != nil { + for k, typ := range mf.FieldSet() { + if _, ok := fields[k]; !ok || typ < fields[k] { + fields[k] = typ + } } } + for _, key := range mm.TagKeys() { + dimensions[key] = struct{}{} + } } return } +func (s *Shard) MeasurementsByRegex(re *regexp.Regexp) []string { + mms := s.index.MeasurementsByRegex(re) + names := make([]string, len(mms)) + for i, mm := range mms { + names[i] = mm.Name + } + return names +} + +// MapType returns the data type for the field within the measurement. +func (s *Shard) MapType(measurement, field string) influxql.DataType { + // Process system measurements. + if strings.HasPrefix(measurement, "_") { + switch measurement { + case "_fieldKeys": + if field == "fieldKey" || field == "fieldType" { + return influxql.String + } + case "_series": + if field == "key" { + return influxql.String + } + case "_tagKeys": + if field == "tagKey" { + return influxql.String + } + } + return influxql.Unknown + } + + mm := s.index.Measurement(measurement) + if mm == nil { + return influxql.Unknown + } + + mf := s.engine.MeasurementFields(measurement) + if mf != nil { + f := mf.Field(field) + if f != nil { + return f.Type + } + } + + if mm.HasTagKey(field) { + return influxql.Tag + } + return influxql.Unknown +} + // ExpandSources expands regex sources and removes duplicates. // NOTE: sources must be normalized (db and rp set) before calling this function. func (s *Shard) ExpandSources(sources influxql.Sources) (influxql.Sources, error) { @@ -879,6 +914,14 @@ func (s *Shard) monitor() { } } +type ShardGroup interface { + MeasurementsByRegex(re *regexp.Regexp) []string + FieldDimensions(measurements []string) (fields map[string]influxql.DataType, dimensions map[string]struct{}, err error) + MapType(measurement, field string) influxql.DataType + CreateIterator(measurement string, opt influxql.IteratorOptions) (influxql.Iterator, error) + ExpandSources(sources influxql.Sources) (influxql.Sources, error) +} + // Shards represents a sortable list of shards. type Shards []*Shard @@ -891,6 +934,119 @@ func (a Shards) Less(i, j int) bool { return a[i].id < a[j].id } // Swap implements sort.Interface. func (a Shards) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a Shards) MeasurementsByRegex(re *regexp.Regexp) []string { + m := make(map[string]struct{}) + for _, sh := range a { + names := sh.MeasurementsByRegex(re) + for _, name := range names { + m[name] = struct{}{} + } + } + + if len(m) == 0 { + return nil + } + + names := make([]string, 0, len(m)) + for key := range m { + names = append(names, key) + } + sort.Strings(names) + return names +} + +func (a Shards) FieldDimensions(measurements []string) (fields map[string]influxql.DataType, dimensions map[string]struct{}, err error) { + fields = make(map[string]influxql.DataType) + dimensions = make(map[string]struct{}) + + for _, sh := range a { + f, d, err := sh.FieldDimensions(measurements) + if err != nil { + return nil, nil, err + } + for k, typ := range f { + if _, ok := fields[k]; typ != influxql.Unknown && (!ok || typ < fields[k]) { + fields[k] = typ + } + } + for k := range d { + dimensions[k] = struct{}{} + } + } + return +} + +func (a Shards) MapType(measurement, field string) influxql.DataType { + var typ influxql.DataType + for _, sh := range a { + t := sh.MapType(measurement, field) + if typ == influxql.Unknown || (t != influxql.Unknown && t < typ) { + typ = t + } + } + return typ +} + +func (a Shards) CreateIterator(measurement string, opt influxql.IteratorOptions) (influxql.Iterator, error) { + itrs := make([]influxql.Iterator, 0, len(a)) + for _, sh := range a { + itr, err := sh.CreateIterator(measurement, opt) + if err != nil { + influxql.Iterators(itrs).Close() + return nil, err + } else if itr == nil { + continue + } + itrs = append(itrs, itr) + + // Enforce series limit at creation time. + if opt.MaxSeriesN > 0 { + stats := itr.Stats() + if stats.SeriesN > opt.MaxSeriesN { + influxql.Iterators(itrs).Close() + return nil, fmt.Errorf("max-select-series limit exceeded: (%d/%d)", stats.SeriesN, opt.MaxSeriesN) + } + } + } + return influxql.Iterators(itrs).Merge(opt) +} + +func (a Shards) ExpandSources(sources influxql.Sources) (influxql.Sources, error) { + // Use a map as a set to prevent duplicates. + set := map[string]influxql.Source{} + + // Iterate through every shard and expand the sources. + for _, sh := range a { + expanded, err := sh.ExpandSources(sources) + if err != nil { + return nil, err + } + + for _, src := range expanded { + switch src := src.(type) { + case *influxql.Measurement: + set[src.String()] = src + default: + return nil, fmt.Errorf("Store.ExpandSources: unsupported source type: %T", src) + } + } + } + + // Convert set to sorted slice. + names := make([]string, 0, len(set)) + for name := range set { + names = append(names, name) + } + sort.Strings(names) + + // Convert set to a list of Sources. + sorted := make([]influxql.Source, 0, len(set)) + for _, name := range names { + sorted = append(sorted, set[name]) + } + return sorted, nil +} + // MeasurementFields holds the fields of a measurement and their codec. type MeasurementFields struct { mu sync.RWMutex @@ -1006,41 +1162,6 @@ type Field struct { Type influxql.DataType `json:"type,omitempty"` } -// shardIteratorCreator creates iterators for a local shard. -// This simply wraps the shard so that Close() does not close the underlying shard. -type shardIteratorCreator struct { - sh *Shard - maxSeriesN int -} - -func (ic *shardIteratorCreator) Close() error { return nil } - -func (ic *shardIteratorCreator) CreateIterator(opt influxql.IteratorOptions) (influxql.Iterator, error) { - itr, err := ic.sh.CreateIterator(opt) - if err != nil { - return nil, err - } else if itr == nil { - return nil, nil - } - - // Enforce series limit at creation time. - if ic.maxSeriesN > 0 { - stats := itr.Stats() - if stats.SeriesN > ic.maxSeriesN { - itr.Close() - return nil, fmt.Errorf("max-select-series limit exceeded: (%d/%d)", stats.SeriesN, ic.maxSeriesN) - } - } - - return itr, nil -} -func (ic *shardIteratorCreator) FieldDimensions(sources influxql.Sources) (fields map[string]influxql.DataType, dimensions map[string]struct{}, err error) { - return ic.sh.FieldDimensions(sources) -} -func (ic *shardIteratorCreator) ExpandSources(sources influxql.Sources) (influxql.Sources, error) { - return ic.sh.ExpandSources(sources) -} - // NewFieldKeysIterator returns an iterator that can be iterated over to // retrieve field keys. func NewFieldKeysIterator(sh *Shard, opt influxql.IteratorOptions) (influxql.Iterator, error) { diff --git a/tsdb/shard_test.go b/tsdb/shard_test.go index 61d4732fa2..341659f616 100644 --- a/tsdb/shard_test.go +++ b/tsdb/shard_test.go @@ -485,7 +485,7 @@ func TestShard_CreateIterator_Ascending(t *testing.T) { // Calling CreateIterator when the engine is not open will return // ErrEngineClosed. - _, got := sh.CreateIterator(influxql.IteratorOptions{}) + _, got := sh.CreateIterator("cpu", influxql.IteratorOptions{}) if exp := tsdb.ErrEngineClosed; got != exp { t.Fatalf("got %v, expected %v", got, exp) } @@ -502,18 +502,13 @@ cpu,host=serverB,region=uswest value=25 0 `) // Create iterator. - itr, err := sh.CreateIterator(influxql.IteratorOptions{ + itr, err := sh.CreateIterator("cpu", influxql.IteratorOptions{ Expr: influxql.MustParseExpr(`value`), Aux: []influxql.VarRef{{Val: "val2"}}, Dimensions: []string{"host"}, - Sources: []influxql.Source{&influxql.Measurement{ - Name: "cpu", - Database: "db0", - RetentionPolicy: "rp0", - }}, - Ascending: true, - StartTime: influxql.MinTime, - EndTime: influxql.MaxTime, + Ascending: true, + StartTime: influxql.MinTime, + EndTime: influxql.MaxTime, }) if err != nil { t.Fatal(err) @@ -565,7 +560,7 @@ func TestShard_CreateIterator_Descending(t *testing.T) { // Calling CreateIterator when the engine is not open will return // ErrEngineClosed. - _, got := sh.CreateIterator(influxql.IteratorOptions{}) + _, got := sh.CreateIterator("cpu", influxql.IteratorOptions{}) if exp := tsdb.ErrEngineClosed; got != exp { t.Fatalf("got %v, expected %v", got, exp) } @@ -582,18 +577,13 @@ cpu,host=serverB,region=uswest value=25 0 `) // Create iterator. - itr, err := sh.CreateIterator(influxql.IteratorOptions{ + itr, err := sh.CreateIterator("cpu", influxql.IteratorOptions{ Expr: influxql.MustParseExpr(`value`), Aux: []influxql.VarRef{{Val: "val2"}}, Dimensions: []string{"host"}, - Sources: []influxql.Source{&influxql.Measurement{ - Name: "cpu", - Database: "db0", - RetentionPolicy: "rp0", - }}, - Ascending: false, - StartTime: influxql.MinTime, - EndTime: influxql.MaxTime, + Ascending: false, + StartTime: influxql.MinTime, + EndTime: influxql.MaxTime, }) if err != nil { t.Fatal(err) @@ -663,7 +653,7 @@ func TestShard_Disabled_WriteQuery(t *testing.T) { t.Fatalf(err.Error()) } - _, got := sh.CreateIterator(influxql.IteratorOptions{}) + _, got := sh.CreateIterator("cpu", influxql.IteratorOptions{}) if err == nil { t.Fatalf("expected shard disabled error") } @@ -678,11 +668,70 @@ func TestShard_Disabled_WriteQuery(t *testing.T) { t.Fatalf("unexpected error: %v", err) } - if _, err = sh.CreateIterator(influxql.IteratorOptions{}); err != nil { + if _, err = sh.CreateIterator("cpu", influxql.IteratorOptions{}); err != nil { t.Fatalf("unexpected error: %v", got) } } +func TestShard_FieldDimensions(t *testing.T) { + sh := NewShard() + + if err := sh.Open(); err != nil { + t.Fatal(err) + } + defer sh.Close() + + sh.MustWritePointsString(` +cpu,host=serverA,region=uswest value=100 0 +cpu,host=serverA,region=uswest value=50,val2=5 10 +cpu,host=serverB,region=uswest value=25 0 +mem,host=serverA value=25i 0 +mem,host=serverB value=50i,val3=t 10 +`) + + for i, tt := range []struct { + sources []string + f map[string]influxql.DataType + d map[string]struct{} + }{ + { + sources: []string{"cpu"}, + f: map[string]influxql.DataType{ + "value": influxql.Float, + "val2": influxql.Float, + }, + d: map[string]struct{}{ + "host": struct{}{}, + "region": struct{}{}, + }, + }, + { + sources: []string{"cpu", "mem"}, + f: map[string]influxql.DataType{ + "value": influxql.Float, + "val2": influxql.Float, + "val3": influxql.Boolean, + }, + d: map[string]struct{}{ + "host": struct{}{}, + "region": struct{}{}, + }, + }, + } { + f, d, err := sh.FieldDimensions(tt.sources) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + if !reflect.DeepEqual(f, tt.f) { + t.Errorf("%d. unexpected fields:\n\nexp=%#v\n\ngot=%#v\n\n", i, tt.f, f) + } + if !reflect.DeepEqual(d, tt.d) { + t.Errorf("%d. unexpected dimensions:\n\nexp=%#v\n\ngot=%#v\n\n", i, tt.d, d) + } + } +} + func BenchmarkWritePoints_NewSeries_1K(b *testing.B) { benchmarkWritePoints(b, 38, 3, 3, 1) } func BenchmarkWritePoints_NewSeries_100K(b *testing.B) { benchmarkWritePoints(b, 32, 5, 5, 1) } func BenchmarkWritePoints_NewSeries_250K(b *testing.B) { benchmarkWritePoints(b, 80, 5, 5, 1) } diff --git a/tsdb/store.go b/tsdb/store.go index 5b745c4653..b3e1b38317 100644 --- a/tsdb/store.go +++ b/tsdb/store.go @@ -272,6 +272,11 @@ func (s *Store) Shards(ids []uint64) []*Shard { return a } +// ShardGroup returns a ShardGroup with a list of shards by id. +func (s *Store) ShardGroup(ids []uint64) ShardGroup { + return Shards(s.Shards(ids)) +} + // ShardN returns the number of shards in the store. func (s *Store) ShardN() int { s.mu.RLock() @@ -379,18 +384,6 @@ func (s *Store) DeleteShard(shardID uint64) error { return nil } -// ShardIteratorCreator returns an iterator creator for a shard. -func (s *Store) ShardIteratorCreator(id uint64, opt *influxql.SelectOptions) influxql.IteratorCreator { - sh := s.Shard(id) - if sh == nil { - return nil - } - return &shardIteratorCreator{ - sh: sh, - maxSeriesN: opt.MaxSeriesN, - } -} - // DeleteDatabase will close all shards associated with a database and remove the directory and files from disk. func (s *Store) DeleteDatabase(name string) error { s.mu.RLock() @@ -769,41 +762,12 @@ func (s *Store) deleteSeries(database string, seriesKeys []string, min, max int6 // ExpandSources expands sources against all local shards. func (s *Store) ExpandSources(sources influxql.Sources) (influxql.Sources, error) { - return s.IteratorCreators().ExpandSources(sources) -} - -// IteratorCreators returns a set of all local shards as iterator creators. -func (s *Store) IteratorCreators() influxql.IteratorCreators { - s.mu.RLock() - defer s.mu.RUnlock() - - a := make(influxql.IteratorCreators, 0, len(s.shards)) - for _, sh := range s.shards { - a = append(a, sh) - } - return a -} - -// IteratorCreator returns an iterator creator for all shards in the given shard IDs. -func (s *Store) IteratorCreator(shards []uint64, opt *influxql.SelectOptions) (influxql.IteratorCreator, error) { - // Generate iterators for each node. - ics := make([]influxql.IteratorCreator, 0) - if err := func() error { - for _, id := range shards { - ic := s.ShardIteratorCreator(id, opt) - if ic == nil { - continue - } - ics = append(ics, ic) - } - - return nil - }(); err != nil { - influxql.IteratorCreators(ics).Close() - return nil, err - } - - return influxql.IteratorCreators(ics), nil + shards := func() Shards { + s.mu.RLock() + defer s.mu.RUnlock() + return Shards(s.shardsSlice()) + }() + return shards.ExpandSources(sources) } // WriteToShard writes a list of points to a shard identified by its ID. diff --git a/tsdb/store_test.go b/tsdb/store_test.go index 792fa79c29..deaabdaf0f 100644 --- a/tsdb/store_test.go +++ b/tsdb/store_test.go @@ -239,25 +239,16 @@ func TestShards_CreateIterator(t *testing.T) { `cpu,host=serverC value=3 60`, ) - // Retrieve shards and convert to iterator creators. - shards := s.Shards([]uint64{0, 1}) - ics := make(influxql.IteratorCreators, len(shards)) - for i := range ics { - ics[i] = shards[i] - } + // Retrieve shard group. + shards := s.ShardGroup([]uint64{0, 1}) // Create iterator. - itr, err := ics.CreateIterator(influxql.IteratorOptions{ + itr, err := shards.CreateIterator("cpu", influxql.IteratorOptions{ Expr: influxql.MustParseExpr(`value`), Dimensions: []string{"host"}, - Sources: []influxql.Source{&influxql.Measurement{ - Name: "cpu", - Database: "db0", - RetentionPolicy: "rp0", - }}, - Ascending: true, - StartTime: influxql.MinTime, - EndTime: influxql.MaxTime, + Ascending: true, + StartTime: influxql.MinTime, + EndTime: influxql.MaxTime, }) if err != nil { t.Fatal(err) @@ -332,13 +323,8 @@ func TestStore_BackupRestoreShard(t *testing.T) { } // Read data from - itr, err := s1.Shard(100).CreateIterator(influxql.IteratorOptions{ - Expr: influxql.MustParseExpr(`value`), - Sources: []influxql.Source{&influxql.Measurement{ - Name: "cpu", - Database: "db0", - RetentionPolicy: "rp0", - }}, + itr, err := s1.Shard(100).CreateIterator("cpu", influxql.IteratorOptions{ + Expr: influxql.MustParseExpr(`value`), Ascending: true, StartTime: influxql.MinTime, EndTime: influxql.MaxTime,