refactor of limit/offset
parent
56281fbfd5
commit
a6171b3382
|
@ -218,6 +218,15 @@ func queryAndWait(t *testing.T, nodes Cluster, urlDb, q, expected string, timeou
|
|||
timer = time.NewTimer(time.Duration(math.MaxInt64))
|
||||
)
|
||||
defer timer.Stop()
|
||||
|
||||
// Check to see if they set the env for duration sleep
|
||||
sleep := 10 * time.Millisecond
|
||||
if d, e := time.ParseDuration(os.Getenv("TEST_SLEEP")); e == nil {
|
||||
// this will limit the http log noise in the test output
|
||||
sleep = d
|
||||
timeout = d + 1
|
||||
}
|
||||
|
||||
if timeout > 0 {
|
||||
timer.Reset(time.Duration(timeout))
|
||||
go func() {
|
||||
|
@ -232,7 +241,7 @@ func queryAndWait(t *testing.T, nodes Cluster, urlDb, q, expected string, timeou
|
|||
} else if atomic.LoadInt32(&timedOut) == 1 {
|
||||
return got, false
|
||||
} else {
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
time.Sleep(sleep)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -270,7 +279,7 @@ func runTests_Errors(t *testing.T, nodes Cluster) {
|
|||
}
|
||||
|
||||
// runTests tests write and query of data. Setting testNumbers allows only a subset of tests to be run.
|
||||
func runTestsData(t *testing.T, testName string, nodes Cluster, database, retention string, testNums ...int) {
|
||||
func runTestsData(t *testing.T, testName string, nodes Cluster, database, retention string) {
|
||||
t.Logf("Running tests against %d-node cluster", len(nodes))
|
||||
|
||||
// Start by ensuring database and retention policy exist.
|
||||
|
@ -574,12 +583,12 @@ func runTestsData(t *testing.T, testName string, nodes Cluster, database, retent
|
|||
expected: `{"results":[{"series":[{"name":"limit","columns":["time","foo"],"values":[["2009-11-10T23:00:03Z",3],["2009-11-10T23:00:04Z",4]]}]}]}`,
|
||||
},
|
||||
{
|
||||
name: "limit + offset higher than number of points",
|
||||
name: "limit + offset equal to total number of points",
|
||||
query: `select foo from "%DB%"."%RP%".limit LIMIT 3 OFFSET 3`,
|
||||
expected: `{"results":[{"series":[{"name":"limit","columns":["time","foo"],"values":[["2009-11-10T23:00:05Z",5]]}]}]}`,
|
||||
},
|
||||
{
|
||||
name: "offset higher than number of points",
|
||||
name: "limit - offset higher than number of points",
|
||||
query: `select foo from "%DB%"."%RP%".limit LIMIT 2 OFFSET 20`,
|
||||
expected: `{"results":[{"series":[{"name":"limit","columns":["time","foo"]}]}]}`,
|
||||
},
|
||||
|
@ -599,14 +608,14 @@ func runTestsData(t *testing.T, testName string, nodes Cluster, database, retent
|
|||
expected: `{"results":[{"series":[{"name":"limit","columns":["time","mean"],"values":[["2009-11-10T23:00:03Z",3],["2009-11-10T23:00:04Z",4]]}]}]}`,
|
||||
},
|
||||
{
|
||||
name: "limit + offset higher than number of points with group by time",
|
||||
name: "limit + offset equal to the number of points with group by time",
|
||||
query: `select mean(foo) from "%DB%"."%RP%".limit WHERE time >= '2009-11-10T23:00:02Z' AND time < '2009-11-10T23:00:06Z' GROUP BY time(1s) LIMIT 3 OFFSET 3`,
|
||||
expected: `{"results":[{"series":[{"name":"limit","columns":["time","mean"],"values":[["2009-11-10T23:00:05Z",5]]}]}]}`,
|
||||
},
|
||||
{
|
||||
name: "offset higher than number of points with group by time",
|
||||
name: "limit - offset higher than number of points with group by time",
|
||||
query: `select mean(foo) from "%DB%"."%RP%".limit WHERE time >= '2009-11-10T23:00:02Z' AND time < '2009-11-10T23:00:06Z' GROUP BY time(1s) LIMIT 2 OFFSET 20`,
|
||||
expected: `{"results":[{"series":[{"name":"limit","columns":["time","mean"]}]}]}`,
|
||||
expected: `{"results":[{}]}`,
|
||||
},
|
||||
|
||||
// Fill tests
|
||||
|
@ -952,25 +961,23 @@ func runTestsData(t *testing.T, testName string, nodes Cluster, database, retent
|
|||
},
|
||||
}
|
||||
|
||||
// See if we should run a subset of this test
|
||||
testPrefix := os.Getenv("TEST_PREFIX")
|
||||
if testPrefix != "" {
|
||||
t.Logf("Skipping all tests that do not match the prefix of %q\n", testPrefix)
|
||||
}
|
||||
|
||||
for i, tt := range tests {
|
||||
// If tests were explicitly requested, only run those tests.
|
||||
if len(testNums) > 0 {
|
||||
var found bool
|
||||
for _, t := range testNums {
|
||||
if i == t {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
name := tt.name
|
||||
if name == "" {
|
||||
name = tt.query
|
||||
}
|
||||
|
||||
if testPrefix != "" && !strings.HasPrefix(name, testPrefix) {
|
||||
continue
|
||||
}
|
||||
|
||||
fmt.Printf("TEST: %d: %s\n", i, name)
|
||||
t.Logf("Running test %d: %s", i, name)
|
||||
|
||||
|
|
|
@ -6,6 +6,8 @@ import (
|
|||
"hash/fnv"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/davecgh/go-spew/spew"
|
||||
)
|
||||
|
||||
// DB represents an interface for creating transactions.
|
||||
|
@ -103,14 +105,9 @@ func (m *MapReduceJob) Execute(out chan *Row, filterEmptyResults bool) {
|
|||
|
||||
// For group by time queries, limit the number of data points returned by the limit and offset
|
||||
// raw query limits are handled elsewhere
|
||||
warn("> ", m.stmt.IsRawQuery, pointCountInResult)
|
||||
if !m.stmt.IsRawQuery && (m.stmt.Limit > 0 || m.stmt.Offset > 0) {
|
||||
// ensure that the offset isn't higher than the number of points we'd get
|
||||
if m.stmt.Offset > pointCountInResult {
|
||||
out <- &Row{
|
||||
Name: m.MeasurementName,
|
||||
Tags: m.TagSet.Tags,
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -120,7 +117,6 @@ func (m *MapReduceJob) Execute(out chan *Row, filterEmptyResults bool) {
|
|||
pointCountInResult = m.stmt.Limit
|
||||
}
|
||||
}
|
||||
warn("< ", m.stmt.Limit)
|
||||
|
||||
// If we are exceeding our MaxGroupByPoints and we aren't a raw query, error out
|
||||
if !m.stmt.IsRawQuery && pointCountInResult > MaxGroupByPoints {
|
||||
|
@ -140,17 +136,28 @@ func (m *MapReduceJob) Execute(out chan *Row, filterEmptyResults bool) {
|
|||
startTimeBucket := m.TMin / m.interval * m.interval
|
||||
|
||||
for i, _ := range resultTimes {
|
||||
t := startTimeBucket + (int64(i+1) * m.interval * int64(m.stmt.Offset+1)) - m.interval
|
||||
var t int64
|
||||
if m.stmt.Offset > 0 {
|
||||
t = startTimeBucket + (int64(i+1) * m.interval * int64(m.stmt.Offset))
|
||||
} else {
|
||||
t = startTimeBucket + (int64(i+1) * m.interval) - m.interval
|
||||
}
|
||||
|
||||
// If we start getting out of our max time range, then truncate values and return
|
||||
if t > m.TMax && !isRaw {
|
||||
resultValues = resultValues[:i]
|
||||
break
|
||||
}
|
||||
resultTimes[i] = t
|
||||
// we always include time so we need one more column than we have aggregates
|
||||
vals := make([]interface{}, 0, len(aggregates)+1)
|
||||
resultValues[i] = append(vals, time.Unix(0, t).UTC())
|
||||
}
|
||||
spew.Dump(resultValues)
|
||||
|
||||
// This just makes sure that if they specify a start time less than what the start time would be with the offset,
|
||||
// we just reset the start time to the later time to avoid going over data that won't show up in the result.
|
||||
if m.stmt.Offset > 0 && !m.stmt.IsRawQuery {
|
||||
warn(". setting tmin: ", resultTimes[0])
|
||||
m.TMin = resultTimes[0]
|
||||
}
|
||||
|
||||
|
|
|
@ -61,6 +61,7 @@ func TestParser_ParseStatement(t *testing.T) {
|
|||
{
|
||||
s: `SELECT * FROM myseries`,
|
||||
stmt: &influxql.SelectStatement{
|
||||
IsRawQuery: true,
|
||||
Fields: []*influxql.Field{
|
||||
{Expr: &influxql.Wildcard{}},
|
||||
},
|
||||
|
@ -70,7 +71,7 @@ func TestParser_ParseStatement(t *testing.T) {
|
|||
|
||||
// SELECT statement
|
||||
{
|
||||
s: `SELECT field1, field2 ,field3 AS field_x FROM myseries WHERE host = 'hosta.influxdb.org' GROUP BY 10h ORDER BY ASC LIMIT 20 OFFSET 10;`,
|
||||
s: `SELECT field1, field2 ,field3 AS field_x FROM myseries WHERE host = 'hosta.influxdb.org' GROUP BY time(10h) ORDER BY ASC LIMIT 20 OFFSET 10;`,
|
||||
stmt: &influxql.SelectStatement{
|
||||
Fields: []*influxql.Field{
|
||||
{Expr: &influxql.VarRef{Val: "field1"}},
|
||||
|
@ -83,9 +84,7 @@ func TestParser_ParseStatement(t *testing.T) {
|
|||
LHS: &influxql.VarRef{Val: "host"},
|
||||
RHS: &influxql.StringLiteral{Val: "hosta.influxdb.org"},
|
||||
},
|
||||
Dimensions: []*influxql.Dimension{
|
||||
{Expr: &influxql.DurationLiteral{Val: 10 * time.Hour}},
|
||||
},
|
||||
Dimensions: influxql.Dimensions{{Expr: &influxql.Call{Name: "time", Args: []influxql.Expr{&influxql.DurationLiteral{Val: 10 * time.Hour}}}}},
|
||||
SortFields: []*influxql.SortField{
|
||||
{Ascending: true},
|
||||
},
|
||||
|
@ -98,6 +97,7 @@ func TestParser_ParseStatement(t *testing.T) {
|
|||
{
|
||||
s: `select my_field from myseries`,
|
||||
stmt: &influxql.SelectStatement{
|
||||
IsRawQuery: true,
|
||||
Fields: []*influxql.Field{{Expr: &influxql.VarRef{Val: "my_field"}}},
|
||||
Sources: []influxql.Source{&influxql.Measurement{Name: "myseries"}},
|
||||
},
|
||||
|
@ -107,6 +107,7 @@ func TestParser_ParseStatement(t *testing.T) {
|
|||
{
|
||||
s: `SELECT field1 FROM myseries ORDER BY ASC, field1, field2 DESC LIMIT 10`,
|
||||
stmt: &influxql.SelectStatement{
|
||||
IsRawQuery: true,
|
||||
Fields: []*influxql.Field{{Expr: &influxql.VarRef{Val: "field1"}}},
|
||||
Sources: []influxql.Source{&influxql.Measurement{Name: "myseries"}},
|
||||
SortFields: []*influxql.SortField{
|
||||
|
@ -122,6 +123,7 @@ func TestParser_ParseStatement(t *testing.T) {
|
|||
{
|
||||
s: `SELECT field1 FROM myseries SLIMIT 10 SOFFSET 5`,
|
||||
stmt: &influxql.SelectStatement{
|
||||
IsRawQuery: true,
|
||||
Fields: []*influxql.Field{{Expr: &influxql.VarRef{Val: "field1"}}},
|
||||
Sources: []influxql.Source{&influxql.Measurement{Name: "myseries"}},
|
||||
SLimit: 10,
|
||||
|
@ -133,6 +135,7 @@ func TestParser_ParseStatement(t *testing.T) {
|
|||
{
|
||||
s: `SELECT * FROM cpu WHERE host = 'serverC' AND region =~ /.*west.*/`,
|
||||
stmt: &influxql.SelectStatement{
|
||||
IsRawQuery: true,
|
||||
Fields: []*influxql.Field{{Expr: &influxql.Wildcard{}}},
|
||||
Sources: []influxql.Source{&influxql.Measurement{Name: "cpu"}},
|
||||
Condition: &influxql.BinaryExpr{
|
||||
|
@ -155,6 +158,7 @@ func TestParser_ParseStatement(t *testing.T) {
|
|||
{
|
||||
s: `SELECT * FROM /cpu.*/`,
|
||||
stmt: &influxql.SelectStatement{
|
||||
IsRawQuery: true,
|
||||
Fields: []*influxql.Field{{Expr: &influxql.Wildcard{}}},
|
||||
Sources: []influxql.Source{&influxql.Measurement{
|
||||
Regex: &influxql.RegexLiteral{Val: regexp.MustCompile("cpu.*")}}},
|
||||
|
@ -170,14 +174,7 @@ func TestParser_ParseStatement(t *testing.T) {
|
|||
Name: "mean",
|
||||
Args: []influxql.Expr{&influxql.VarRef{Val: "value"}}}}},
|
||||
Sources: []influxql.Source{&influxql.Measurement{Name: "cpu"}},
|
||||
Dimensions: []*influxql.Dimension{
|
||||
{Expr: &influxql.Call{
|
||||
Name: "time",
|
||||
Args: []influxql.Expr{
|
||||
&influxql.DurationLiteral{Val: 5 * time.Minute},
|
||||
},
|
||||
}},
|
||||
},
|
||||
Dimensions: []*influxql.Dimension{{Expr: &influxql.Call{Name: "time", Args: []influxql.Expr{&influxql.DurationLiteral{Val: 5 * time.Minute}}}}},
|
||||
Fill: influxql.NumberFill,
|
||||
FillValue: float64(1),
|
||||
},
|
||||
|
@ -192,14 +189,7 @@ func TestParser_ParseStatement(t *testing.T) {
|
|||
Name: "mean",
|
||||
Args: []influxql.Expr{&influxql.VarRef{Val: "value"}}}}},
|
||||
Sources: []influxql.Source{&influxql.Measurement{Name: "cpu"}},
|
||||
Dimensions: []*influxql.Dimension{
|
||||
{Expr: &influxql.Call{
|
||||
Name: "time",
|
||||
Args: []influxql.Expr{
|
||||
&influxql.DurationLiteral{Val: 5 * time.Minute},
|
||||
},
|
||||
}},
|
||||
},
|
||||
Dimensions: []*influxql.Dimension{{Expr: &influxql.Call{Name: "time", Args: []influxql.Expr{&influxql.DurationLiteral{Val: 5 * time.Minute}}}}},
|
||||
Fill: influxql.PreviousFill,
|
||||
},
|
||||
},
|
||||
|
|
Loading…
Reference in New Issue