Add Slimit and Soffset. Update tests and behavior to use those.
parent
0f98a605e2
commit
8d2abc5c86
|
@ -97,8 +97,8 @@ INNER INSERT INTO KEY KEYS LIMIT
|
|||
SHOW MEASUREMENT MEASUREMENTS OFFSET ON ORDER
|
||||
PASSWORD POLICY POLICIES PRIVILEGES QUERIES QUERY
|
||||
READ REPLICATION RETENTION REVOKE SELECT SERIES
|
||||
TAG TO USER USERS VALUES WHERE
|
||||
WITH WRITE
|
||||
SLIMIT SOFFSET TAG TO USER USERS
|
||||
VALUES WHERE WITH WRITE
|
||||
```
|
||||
|
||||
## Literals
|
||||
|
@ -569,7 +569,7 @@ REVOKE READ ON mydb FROM jdoe;
|
|||
```
|
||||
select_stmt = fields from_clause [ into_clause ] [ where_clause ]
|
||||
[ group_by_clause ] [ order_by_clause ] [ limit_clause ]
|
||||
[ offset_clause ] .
|
||||
[ offset_clause ] [ slimit_clause ] [ soffset_clause ].
|
||||
```
|
||||
|
||||
#### Examples:
|
||||
|
@ -590,6 +590,10 @@ limit_clause = "LIMIT" int_lit .
|
|||
|
||||
offset_clause = "OFFSET" int_lit .
|
||||
|
||||
slimit_clause = "SLIMIT" int_lit .
|
||||
|
||||
soffset_clause = "SOFFSET" int_lit .
|
||||
|
||||
on_clause = db_name .
|
||||
|
||||
order_by_clause = "ORDER BY" sort_fields .
|
||||
|
|
|
@ -547,13 +547,18 @@ type SelectStatement struct {
|
|||
// Fields to sort results by
|
||||
SortFields SortFields
|
||||
|
||||
// Maximum number of rows to be returned.
|
||||
// Unlimited if zero.
|
||||
// Maximum number of rows to be returned. Unlimited if zero.
|
||||
Limit int
|
||||
|
||||
// Returns rows starting at an offset from the first row.
|
||||
Offset int
|
||||
|
||||
// Maxiumum number of series to be returned. Unlimited if zero.
|
||||
Slimit int
|
||||
|
||||
// Returns series starting at an offset from the first one.
|
||||
Soffset int
|
||||
|
||||
// memoize the group by interval
|
||||
groupByInterval time.Duration
|
||||
|
||||
|
@ -571,6 +576,8 @@ func (s *SelectStatement) Clone() *SelectStatement {
|
|||
Condition: CloneExpr(s.Condition),
|
||||
Limit: s.Limit,
|
||||
Offset: s.Offset,
|
||||
Slimit: s.Slimit,
|
||||
Soffset: s.Soffset,
|
||||
}
|
||||
if s.Target != nil {
|
||||
other.Target = &Target{Measurement: s.Target.Measurement, Database: s.Target.Database}
|
||||
|
|
|
@ -494,15 +494,15 @@ func (p *Planner) Plan(stmt *SelectStatement) (*Executor, error) {
|
|||
}
|
||||
|
||||
// LIMIT and OFFSET the unique series
|
||||
if stmt.Limit > 0 || stmt.Offset > 0 {
|
||||
if stmt.Offset > len(jobs) {
|
||||
if stmt.Slimit > 0 || stmt.Soffset > 0 {
|
||||
if stmt.Soffset > len(jobs) {
|
||||
jobs = nil
|
||||
} else {
|
||||
if stmt.Offset+stmt.Limit > len(jobs) {
|
||||
stmt.Limit = len(jobs) - stmt.Offset
|
||||
if stmt.Soffset+stmt.Slimit > len(jobs) {
|
||||
stmt.Slimit = len(jobs) - stmt.Soffset
|
||||
}
|
||||
|
||||
jobs = jobs[stmt.Offset : stmt.Offset+stmt.Limit]
|
||||
jobs = jobs[stmt.Soffset : stmt.Soffset+stmt.Slimit]
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -566,6 +566,16 @@ func (p *Parser) parseSelectStatement(tr targetRequirement) (*SelectStatement, e
|
|||
return nil, err
|
||||
}
|
||||
|
||||
// Parse series limit: "SLIMIT <n>".
|
||||
if stmt.Slimit, err = p.parseOptionalTokenAndInt(SLIMIT); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Parse series offset: "SOFFSET <n>".
|
||||
if stmt.Soffset, err = p.parseOptionalTokenAndInt(SOFFSET); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return stmt, nil
|
||||
}
|
||||
|
||||
|
|
|
@ -137,6 +137,17 @@ func TestParser_ParseStatement(t *testing.T) {
|
|||
},
|
||||
},
|
||||
|
||||
// SELECT statement with SLIMIT and SOFFSET
|
||||
{
|
||||
s: `SELECT field1 FROM myseries SLIMIT 10 SOFFSET 5`,
|
||||
stmt: &influxql.SelectStatement{
|
||||
Fields: []*influxql.Field{{Expr: &influxql.VarRef{Val: "field1"}}},
|
||||
Source: &influxql.Measurement{Name: "myseries"},
|
||||
Slimit: 10,
|
||||
Soffset: 5,
|
||||
},
|
||||
},
|
||||
|
||||
// SELECT * FROM cpu WHERE host = 'serverC' AND region =~ /.*west.*/
|
||||
{
|
||||
s: `SELECT * FROM cpu WHERE host = 'serverC' AND region =~ /.*west.*/`,
|
||||
|
|
|
@ -86,7 +86,6 @@ const (
|
|||
KEY
|
||||
KEYS
|
||||
LIMIT
|
||||
SHOW
|
||||
MEASUREMENT
|
||||
MEASUREMENTS
|
||||
OFFSET
|
||||
|
@ -104,6 +103,9 @@ const (
|
|||
REVOKE
|
||||
SELECT
|
||||
SERIES
|
||||
SHOW
|
||||
SLIMIT
|
||||
SOFFSET
|
||||
TAG
|
||||
TO
|
||||
USER
|
||||
|
@ -184,7 +186,6 @@ var tokens = [...]string{
|
|||
KEY: "KEY",
|
||||
KEYS: "KEYS",
|
||||
LIMIT: "LIMIT",
|
||||
SHOW: "SHOW",
|
||||
MEASUREMENT: "MEASUREMENT",
|
||||
MEASUREMENTS: "MEASUREMENTS",
|
||||
OFFSET: "OFFSET",
|
||||
|
@ -202,6 +203,9 @@ var tokens = [...]string{
|
|||
REVOKE: "REVOKE",
|
||||
SELECT: "SELECT",
|
||||
SERIES: "SERIES",
|
||||
SHOW: "SHOW",
|
||||
SLIMIT: "SLIMIT",
|
||||
SOFFSET: "SOFFSET",
|
||||
TAG: "TAG",
|
||||
TO: "TO",
|
||||
USER: "USER",
|
||||
|
|
|
@ -1494,14 +1494,14 @@ func TestServer_LimitAndOffset(t *testing.T) {
|
|||
s.MustWriteSeries("foo", "raw", []influxdb.Point{{Name: "cpu", Tags: map[string]string{"region": "us-east", "host": host}, Timestamp: time.Unix(int64(i), 0), Fields: map[string]interface{}{"value": float64(i)}}})
|
||||
}
|
||||
|
||||
results := s.ExecuteQuery(MustParseQuery(`SELECT count(value) FROM cpu GROUP BY * LIMIT 20`), "foo", nil)
|
||||
results := s.ExecuteQuery(MustParseQuery(`SELECT count(value) FROM cpu GROUP BY * SLIMIT 20`), "foo", nil)
|
||||
if res := results.Results[0]; res.Err != nil {
|
||||
t.Fatalf("unexpected error during COUNT: %s", res.Err)
|
||||
} else if len(res.Series) != 9 {
|
||||
t.Fatalf("unexpected 9 series back but got %d", len(res.Series))
|
||||
}
|
||||
|
||||
results = s.ExecuteQuery(MustParseQuery(`SELECT count(value) FROM cpu GROUP BY * LIMIT 2 OFFSET 1`), "foo", nil)
|
||||
results = s.ExecuteQuery(MustParseQuery(`SELECT count(value) FROM cpu GROUP BY * SLIMIT 2 SOFFSET 1`), "foo", nil)
|
||||
expected := `{"series":[{"name":"cpu","tags":{"host":"server-2","region":"us-east"},"columns":["time","count"],"values":[["1970-01-01T00:00:00Z",1]]},{"name":"cpu","tags":{"host":"server-3","region":"us-east"},"columns":["time","count"],"values":[["1970-01-01T00:00:00Z",1]]}]}`
|
||||
if res := results.Results[0]; res.Err != nil {
|
||||
t.Fatalf("unexpected error during COUNT: %s", res.Err)
|
||||
|
@ -1509,7 +1509,7 @@ func TestServer_LimitAndOffset(t *testing.T) {
|
|||
t.Fatalf("unexpected row(0) during COUNT:\n exp: %s\n got: %s", expected, s)
|
||||
}
|
||||
|
||||
results = s.ExecuteQuery(MustParseQuery(`SELECT count(value) FROM cpu GROUP BY * LIMIT 2 OFFSET 3`), "foo", nil)
|
||||
results = s.ExecuteQuery(MustParseQuery(`SELECT count(value) FROM cpu GROUP BY * SLIMIT 2 SOFFSET 3`), "foo", nil)
|
||||
expected = `{"series":[{"name":"cpu","tags":{"host":"server-4","region":"us-east"},"columns":["time","count"],"values":[["1970-01-01T00:00:00Z",1]]},{"name":"cpu","tags":{"host":"server-5","region":"us-east"},"columns":["time","count"],"values":[["1970-01-01T00:00:00Z",1]]}]}`
|
||||
if res := results.Results[0]; res.Err != nil {
|
||||
t.Fatalf("unexpected error during COUNT: %s", res.Err)
|
||||
|
@ -1517,14 +1517,14 @@ func TestServer_LimitAndOffset(t *testing.T) {
|
|||
t.Fatalf("unexpected row(0) during COUNT:\n exp: %s\n got: %s", expected, s)
|
||||
}
|
||||
|
||||
results = s.ExecuteQuery(MustParseQuery(`SELECT count(value) FROM cpu GROUP BY * LIMIT 3 OFFSET 8`), "foo", nil)
|
||||
results = s.ExecuteQuery(MustParseQuery(`SELECT count(value) FROM cpu GROUP BY * SLIMIT 3 SOFFSET 8`), "foo", nil)
|
||||
if res := results.Results[0]; res.Err != nil {
|
||||
t.Fatalf("unexpected error during COUNT: %s", res.Err)
|
||||
} else if s := mustMarshalJSON(res); s != `{"series":[{"name":"cpu","tags":{"host":"server-9","region":"us-east"},"columns":["time","count"],"values":[["1970-01-01T00:00:00Z",1]]}]}` {
|
||||
t.Fatalf("unexpected row(0) during COUNT: %s", s)
|
||||
}
|
||||
|
||||
results = s.ExecuteQuery(MustParseQuery(`SELECT count(value) FROM cpu GROUP BY * LIMIT 3 OFFSET 20`), "foo", nil)
|
||||
results = s.ExecuteQuery(MustParseQuery(`SELECT count(value) FROM cpu GROUP BY * SLIMIT 3 SOFFSET 20`), "foo", nil)
|
||||
if res := results.Results[0]; res.Err != nil {
|
||||
t.Fatalf("unexpected error during COUNT: %s", res.Err)
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue