influxdb/cmd/influxd/run/server_test.go

1439 lines
55 KiB
Go

package run_test
import (
"fmt"
"net/url"
"strconv"
"strings"
"testing"
"time"
)
// Ensure the database commands work.
func TestServer_DatabaseCommands(t *testing.T) {
t.Parallel()
s := OpenServer(NewConfig(), "")
defer s.Close()
test := Test{
queries: []*Query{
&Query{
name: "create database should succeed",
command: `CREATE DATABASE db0`,
exp: `{"results":[{}]}`,
},
&Query{
name: "create database should error with bad name",
command: `CREATE DATABASE 0xdb0`,
exp: `{"error":"error parsing query: found 0, expected identifier at line 1, char 17"}`,
},
&Query{
name: "show database should succeed",
command: `SHOW DATABASES`,
exp: `{"results":[{"series":[{"name":"databases","columns":["name"],"values":[["db0"]]}]}]}`,
},
&Query{
name: "create database should error if it already exists",
command: `CREATE DATABASE db0`,
exp: `{"results":[{"error":"database already exists"}]}`,
},
&Query{
skip: true,
name: "drop database should succeed - FIXME pauldix",
command: `DROP DATABASE db0`,
exp: `{"results":[{}]}`,
},
&Query{
skip: true,
name: "show database should have no results - FIXME pauldix",
command: `SHOW DATABASES`,
exp: `FIXME`,
},
&Query{
skip: true,
name: "drop database should error if it doesn't exist - FIXME pauldix",
command: `DROP DATABASE db0`,
exp: `FIXME`,
},
},
}
for _, query := range test.queries {
if query.skip {
t.Logf("SKIP:: %s", query.name)
continue
}
if err := query.Execute(s); err != nil {
t.Error(query.Error(err))
} else if !query.success() {
t.Error(query.failureMessage())
}
}
}
// Ensure retention policy commands work.
func TestServer_RetentionPolicyCommands(t *testing.T) {
t.Parallel()
s := OpenServer(NewConfig(), "")
defer s.Close()
// Create a database.
if _, err := s.MetaStore.CreateDatabase("db0"); err != nil {
t.Fatal(err)
}
test := Test{
queries: []*Query{
&Query{
name: "create retention policy should succeed",
command: `CREATE RETENTION POLICY rp0 ON db0 DURATION 1h REPLICATION 1`,
exp: `{"results":[{}]}`,
},
&Query{
name: "create retention policy should error if it already exists",
command: `CREATE RETENTION POLICY rp0 ON db0 DURATION 1h REPLICATION 1`,
exp: `{"results":[{"error":"retention policy already exists"}]}`,
},
&Query{
name: "show retention policy should succeed",
command: `SHOW RETENTION POLICIES db0`,
exp: `{"results":[{"series":[{"columns":["name","duration","replicaN","default"],"values":[["rp0","1h0m0s",1,false]]}]}]}`,
},
&Query{
name: "alter retention policy should succeed",
command: `ALTER RETENTION POLICY rp0 ON db0 DURATION 2h REPLICATION 3 DEFAULT`,
exp: `{"results":[{}]}`,
},
&Query{
name: "show retention policy should have new altered information",
command: `SHOW RETENTION POLICIES db0`,
exp: `{"results":[{"series":[{"columns":["name","duration","replicaN","default"],"values":[["rp0","2h0m0s",3,true]]}]}]}`,
},
&Query{
name: "drop retention policy should succeed",
command: `DROP RETENTION POLICY rp0 ON db0`,
exp: `{"results":[{}]}`,
},
&Query{
name: "show retention policy should be empty after dropping them",
command: `SHOW RETENTION POLICIES db0`,
exp: `{"results":[{"series":[{"columns":["name","duration","replicaN","default"]}]}]}`,
},
},
}
for _, query := range test.queries {
if query.skip {
t.Logf("SKIP:: %s", query.name)
continue
}
if err := query.Execute(s); err != nil {
t.Error(query.Error(err))
} else if !query.success() {
t.Error(query.failureMessage())
}
}
}
// Ensure user commands work.
func TestServer_UserCommands(t *testing.T) {
t.Parallel()
s := OpenServer(NewConfig(), "")
defer s.Close()
// Create a database.
if _, err := s.MetaStore.CreateDatabase("db0"); err != nil {
t.Fatal(err)
}
test := Test{
queries: []*Query{
&Query{
name: "show users, no actual users",
command: `SHOW USERS`,
exp: `{"results":[{"series":[{"columns":["user","admin"]}]}]}`,
},
&Query{
name: `create user`,
command: "CREATE USER jdoe WITH PASSWORD '1337'",
exp: `{"results":[{}]}`,
},
&Query{
name: "show users, 1 existing user",
command: `SHOW USERS`,
exp: `{"results":[{"series":[{"columns":["user","admin"],"values":[["jdoe",false]]}]}]}`,
},
&Query{
name: "grant all priviledges to jdoe",
command: `GRANT ALL PRIVILEGES TO jdoe`,
exp: `{"results":[{}]}`,
},
&Query{
skip: true,
name: "show users, existing user as admin - FIXME",
command: `SHOW USERS`,
exp: `{"results":[{"series":[{"columns":["user","admin"],"values":[["jdoe",true]]}]}]}`,
},
&Query{
name: "grant DB privileges to user",
command: `GRANT READ ON db0 TO jdoe`,
exp: `{"results":[{}]}`,
},
&Query{
name: "revoke all privileges",
command: `REVOKE ALL PRIVILEGES FROM jdoe`,
exp: `{"results":[{}]}`,
},
&Query{
name: "bad create user request",
command: `CREATE USER 0xBAD WITH PASSWORD pwd1337`,
exp: `{"error":"error parsing query: found 0, expected identifier at line 1, char 13"}`,
},
&Query{
name: "bad create user request, no name",
command: `CREATE USER WITH PASSWORD pwd1337`,
exp: `{"error":"error parsing query: found WITH, expected identifier at line 1, char 13"}`,
},
&Query{
name: "bad create user request, no password",
command: `CREATE USER jdoe`,
exp: `{"error":"error parsing query: found EOF, expected WITH at line 1, char 18"}`,
},
&Query{
name: "drop user",
command: `DROP USER jdoe`,
exp: `{"results":[{}]}`,
},
&Query{
name: "make sure user was dropped",
command: `SHOW USERS`,
exp: `{"results":[{"series":[{"columns":["user","admin"]}]}]}`,
},
&Query{
name: "delete non existing user",
command: `DROP USER noone`,
exp: `{"results":[{"error":"user not found"}]}`,
},
},
}
for _, query := range test.queries {
if query.skip {
t.Logf("SKIP:: %s", query.name)
continue
}
if err := query.Execute(s); err != nil {
t.Error(fmt.Sprintf("command: %s - err: %s", query.command, query.Error(err)))
} else if !query.success() {
t.Error(query.failureMessage())
}
}
}
// Ensure the server can create a single point via json protocol and read it back.
func TestServer_Write_JSON(t *testing.T) {
t.Parallel()
s := OpenServer(NewConfig(), "")
defer s.Close()
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil {
t.Fatal(err)
}
now := now()
if res, err := s.Write("", "", fmt.Sprintf(`{"database" : "db0", "retentionPolicy" : "rp0", "points": [{"measurement": "cpu", "tags": {"host": "server02"},"fields": {"value": 1.0}}],"time":"%s"} `, now.Format(time.RFC3339Nano)), nil); err != nil {
t.Fatal(err)
} else if exp := ``; exp != res {
t.Fatalf("unexpected results\nexp: %s\ngot: %s\n", exp, res)
}
// Verify the data was written.
if res, err := s.Query(`SELECT * FROM db0.rp0.cpu`); err != nil {
t.Fatal(err)
} else if exp := fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","tags":{"host":"server02"},"columns":["time","value"],"values":[["%s",1]]}]}]}`, now.Format(time.RFC3339Nano)); exp != res {
t.Fatalf("unexpected results\nexp: %s\ngot: %s\n", exp, res)
}
}
// Ensure the server can create a single point via line protocol with float type and read it back.
func TestServer_Write_LineProtocol_Float(t *testing.T) {
t.Parallel()
s := OpenServer(NewConfig(), "")
defer s.Close()
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil {
t.Fatal(err)
}
now := now()
if res, err := s.Write("db0", "rp0", `cpu,host=server01 value=1.0 `+strconv.FormatInt(now.UnixNano(), 10), nil); err != nil {
t.Fatal(err)
} else if exp := ``; exp != res {
t.Fatalf("unexpected results\nexp: %s\ngot: %s\n", exp, res)
}
// Verify the data was written.
if res, err := s.Query(`SELECT * FROM db0.rp0.cpu`); err != nil {
t.Fatal(err)
} else if exp := fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value"],"values":[["%s",1]]}]}]}`, now.Format(time.RFC3339Nano)); exp != res {
t.Fatalf("unexpected results\nexp: %s\ngot: %s\n", exp, res)
}
}
// Ensure the server can create a single point via line protocol with bool type and read it back.
func TestServer_Write_LineProtocol_Bool(t *testing.T) {
t.Parallel()
s := OpenServer(NewConfig(), "")
defer s.Close()
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil {
t.Fatal(err)
}
now := now()
if res, err := s.Write("db0", "rp0", `cpu,host=server01 value=true `+strconv.FormatInt(now.UnixNano(), 10), nil); err != nil {
t.Fatal(err)
} else if exp := ``; exp != res {
t.Fatalf("unexpected results\nexp: %s\ngot: %s\n", exp, res)
}
// Verify the data was written.
if res, err := s.Query(`SELECT * FROM db0.rp0.cpu`); err != nil {
t.Fatal(err)
} else if exp := fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value"],"values":[["%s",true]]}]}]}`, now.Format(time.RFC3339Nano)); exp != res {
t.Fatalf("unexpected results\nexp: %s\ngot: %s\n", exp, res)
}
}
// Ensure the server can create a single point via line protocol with string type and read it back.
func TestServer_Write_LineProtocol_String(t *testing.T) {
t.Parallel()
s := OpenServer(NewConfig(), "")
defer s.Close()
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil {
t.Fatal(err)
}
now := now()
if res, err := s.Write("db0", "rp0", `cpu,host=server01 value="disk full" `+strconv.FormatInt(now.UnixNano(), 10), nil); err != nil {
t.Fatal(err)
} else if exp := ``; exp != res {
t.Fatalf("unexpected results\nexp: %s\ngot: %s\n", exp, res)
}
// Verify the data was written.
if res, err := s.Query(`SELECT * FROM db0.rp0.cpu`); err != nil {
t.Fatal(err)
} else if exp := fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value"],"values":[["%s","disk full"]]}]}]}`, now.Format(time.RFC3339Nano)); exp != res {
t.Fatalf("unexpected results\nexp: %s\ngot: %s\n", exp, res)
}
}
// Ensure the server can create a single point via line protocol with integer type and read it back.
func TestServer_Write_LineProtocol_Integer(t *testing.T) {
t.Parallel()
s := OpenServer(NewConfig(), "")
defer s.Close()
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil {
t.Fatal(err)
}
now := now()
if res, err := s.Write("db0", "rp0", `cpu,host=server01 value=100 `+strconv.FormatInt(now.UnixNano(), 10), nil); err != nil {
t.Fatal(err)
} else if exp := ``; exp != res {
t.Fatalf("unexpected results\nexp: %s\ngot: %s\n", exp, res)
}
// Verify the data was written.
if res, err := s.Query(`SELECT * FROM db0.rp0.cpu`); err != nil {
t.Fatal(err)
} else if exp := fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value"],"values":[["%s",100]]}]}]}`, now.Format(time.RFC3339Nano)); exp != res {
t.Fatalf("unexpected results\nexp: %s\ngot: %s\n", exp, res)
}
}
// Ensure the server can query with default databases (via param) and default retention policy
func TestServer_Query_DefaultDBAndRP(t *testing.T) {
t.Parallel()
s := OpenServer(NewConfig(), "")
defer s.Close()
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil {
t.Fatal(err)
}
if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil {
t.Fatal(err)
}
test := NewTest("db0", "rp0")
test.write = fmt.Sprintf(`cpu value=1.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T01:00:00Z").UnixNano())
test.addQueries([]*Query{
&Query{
name: "default db and rp",
params: url.Values{"db": []string{"db0"}},
command: `SELECT * FROM cpu`,
exp: `{"results":[{"series":[{"name":"cpu","columns":["time","value"],"values":[["2000-01-01T01:00:00Z",1]]}]}]}`,
},
&Query{
skip: true,
name: "default rp - FIXME pauldix",
command: `SELECT * FROM db0..cpu`,
exp: `{"results":[{"series":[{"name":"cpu","columns":["time","value"],"values":[["2000-01-01T01:00:00Z",1]]}]}]}`,
},
&Query{
name: "default dp",
params: url.Values{"db": []string{"db0"}},
command: `SELECT * FROM rp0.cpu`,
exp: `{"results":[{"series":[{"name":"cpu","columns":["time","value"],"values":[["2000-01-01T01:00:00Z",1]]}]}]}`,
},
}...)
if err := test.init(s); err != nil {
t.Fatalf("test init failed: %s", err)
}
for _, query := range test.queries {
if query.skip {
t.Logf("SKIP:: %s", query.name)
continue
}
if err := query.Execute(s); err != nil {
t.Error(query.Error(err))
} else if !query.success() {
t.Error(query.failureMessage())
}
}
}
// Ensure the server can query with the count aggregate function
func TestServer_Query_Count(t *testing.T) {
t.Parallel()
s := OpenServer(NewConfig(), "")
defer s.Close()
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil {
t.Fatal(err)
}
now := now()
test := NewTest("db0", "rp0")
test.write = `cpu,host=server01 value=1.0 ` + strconv.FormatInt(now.UnixNano(), 10)
test.addQueries([]*Query{
&Query{
name: "selecting count(value) should succeed",
command: `SELECT count(value) FROM db0.rp0.cpu`,
exp: `{"results":[{"series":[{"name":"cpu","columns":["time","count"],"values":[["1970-01-01T00:00:00Z",1]]}]}]}`,
},
&Query{
name: "selecting count(*) should error",
command: `SELECT count(*) FROM db0.rp0.cpu`,
exp: `{"results":[{"error":"expected field argument in count()"}]}`,
},
}...)
if err := test.init(s); err != nil {
t.Fatalf("test init failed: %s", err)
}
for _, query := range test.queries {
if query.skip {
t.Logf("SKIP:: %s", query.name)
continue
}
if err := query.Execute(s); err != nil {
t.Error(query.Error(err))
} else if !query.success() {
t.Error(query.failureMessage())
}
}
}
// Ensure the server can query with Now().
func TestServer_Query_Now(t *testing.T) {
t.Parallel()
s := OpenServer(NewConfig(), "")
defer s.Close()
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil {
t.Fatal(err)
}
now := now()
test := NewTest("db0", "rp0")
test.write = `cpu,host=server01 value=1.0 ` + strconv.FormatInt(now.UnixNano(), 10)
test.addQueries([]*Query{
&Query{
name: "where with time < now() should work",
command: `SELECT * FROM db0.rp0.cpu where time < now()`,
exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value"],"values":[["%s",1]]}]}]}`, now.Format(time.RFC3339Nano)),
},
&Query{
skip: true,
name: "where with time > now() should return an empty result - FIXME pauldix",
command: `SELECT * FROM db0.rp0.cpu where time > now()`,
exp: `{"results":[{}]}`,
},
}...)
if err := test.init(s); err != nil {
t.Fatalf("test init failed: %s", err)
}
for _, query := range test.queries {
if query.skip {
t.Logf("SKIP:: %s", query.name)
continue
}
if err := query.Execute(s); err != nil {
t.Error(query.Error(err))
} else if !query.success() {
t.Error(query.failureMessage())
}
}
}
// Ensure the server can query with epoch precisions.
func TestServer_Query_EpochPrecision(t *testing.T) {
t.Parallel()
s := OpenServer(NewConfig(), "")
defer s.Close()
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil {
t.Fatal(err)
}
now := now()
test := NewTest("db0", "rp0")
test.write = `cpu,host=server01 value=1.0 ` + strconv.FormatInt(now.UnixNano(), 10)
test.addQueries([]*Query{
&Query{
name: "nanosecond precision",
command: `SELECT * FROM db0.rp0.cpu`,
params: url.Values{"epoch": []string{"n"}},
exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value"],"values":[[%d,1]]}]}]}`, now.UnixNano()),
},
&Query{
name: "microsecond precision",
command: `SELECT * FROM db0.rp0.cpu`,
params: url.Values{"epoch": []string{"u"}},
exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value"],"values":[[%d,1]]}]}]}`, now.UnixNano()/int64(time.Microsecond)),
},
&Query{
name: "millisecond precision",
command: `SELECT * FROM db0.rp0.cpu`,
params: url.Values{"epoch": []string{"ms"}},
exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value"],"values":[[%d,1]]}]}]}`, now.UnixNano()/int64(time.Millisecond)),
},
&Query{
name: "second precision",
command: `SELECT * FROM db0.rp0.cpu`,
params: url.Values{"epoch": []string{"s"}},
exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value"],"values":[[%d,1]]}]}]}`, now.UnixNano()/int64(time.Second)),
},
&Query{
name: "minute precision",
command: `SELECT * FROM db0.rp0.cpu`,
params: url.Values{"epoch": []string{"m"}},
exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value"],"values":[[%d,1]]}]}]}`, now.UnixNano()/int64(time.Minute)),
},
&Query{
name: "hour precision",
command: `SELECT * FROM db0.rp0.cpu`,
params: url.Values{"epoch": []string{"h"}},
exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value"],"values":[[%d,1]]}]}]}`, now.UnixNano()/int64(time.Hour)),
},
}...)
if err := test.init(s); err != nil {
t.Fatalf("test init failed: %s", err)
}
for _, query := range test.queries {
if query.skip {
t.Logf("SKIP:: %s", query.name)
continue
}
if err := query.Execute(s); err != nil {
t.Error(query.Error(err))
} else if !query.success() {
t.Error(query.failureMessage())
}
}
}
// Ensure the server works with tag queries.
func TestServer_Query_Tags(t *testing.T) {
t.Parallel()
s := OpenServer(NewConfig(), "")
defer s.Close()
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil {
t.Fatal(err)
}
now := now()
test := NewTest("db0", "rp0")
test.write = fmt.Sprintf("cpu,host=server01 value=100,core=4 %s\ncpu,host=server02 value=50,core=2 %s", strconv.FormatInt(now.UnixNano(), 10), strconv.FormatInt(now.Add(1).UnixNano(), 10))
test.addQueries([]*Query{
&Query{
name: "tag without field should return error",
command: `SELECT host FROM db0.rp0.cpu`,
exp: `{"results":[{"error":"select statement must include at least one field or function call"}]}`,
},
&Query{
name: "field with tag should succeed",
command: `SELECT host, value FROM db0.rp0.cpu`,
exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value"],"values":[["%s",100]]},{"name":"cpu","tags":{"host":"server02"},"columns":["time","value"],"values":[["%s",50]]}]}]}`, now.Format(time.RFC3339Nano), now.Add(1).Format(time.RFC3339Nano)),
},
&Query{
name: "field with two tags should succeed",
command: `SELECT host, value, core FROM db0.rp0.cpu`,
exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value","core"],"values":[["%s",100,4]]},{"name":"cpu","tags":{"host":"server02"},"columns":["time","value","core"],"values":[["%s",50,2]]}]}]}`, now.Format(time.RFC3339Nano), now.Add(1).Format(time.RFC3339Nano)),
},
&Query{
name: "select * with tags should succeed",
command: `SELECT * FROM db0.rp0.cpu`,
exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","core","value"],"values":[["%s",4,100]]},{"name":"cpu","tags":{"host":"server02"},"columns":["time","core","value"],"values":[["%s",2,50]]}]}]}`, now.Format(time.RFC3339Nano), now.Add(1).Format(time.RFC3339Nano)),
},
&Query{
name: "group by tag",
command: `SELECT value FROM db0.rp0.cpu GROUP by host`,
exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value"],"values":[["%s",100]]},{"name":"cpu","tags":{"host":"server02"},"columns":["time","value"],"values":[["%s",50]]}]}]}`, now.Format(time.RFC3339Nano), now.Add(1).Format(time.RFC3339Nano)),
},
}...)
if err := test.init(s); err != nil {
t.Fatalf("test init failed: %s", err)
}
for _, query := range test.queries {
if query.skip {
t.Logf("SKIP:: %s", query.name)
continue
}
if err := query.Execute(s); err != nil {
t.Error(query.Error(err))
} else if !query.success() {
t.Error(query.failureMessage())
}
}
}
// Ensure the server will succeed and error for common scenarios.
func TestServer_Query_Common(t *testing.T) {
t.Parallel()
s := OpenServer(NewConfig(), "")
defer s.Close()
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil {
t.Fatal(err)
}
now := now()
test := NewTest("db0", "rp0")
test.write = fmt.Sprintf("cpu,host=server01 value=1 %s", strconv.FormatInt(now.UnixNano(), 10))
test.addQueries([]*Query{
&Query{
name: "selecting a from a non-existent database should error",
command: `SELECT value FROM db1.rp0.cpu`,
exp: `{"results":[{"error":"database not found"}]}`,
},
&Query{
name: "selecting a from a non-existent retention policy should error",
command: `SELECT value FROM db0.rp1.cpu`,
exp: `{"results":[{"error":"retention policy not found"}]}`,
},
&Query{
name: "selecting a valid measurement and field should succeed",
command: `SELECT value FROM db0.rp0.cpu`,
exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","columns":["time","value"],"values":[["%s",1]]}]}]}`, now.Format(time.RFC3339Nano)),
},
&Query{
name: "selecting a measurement that doesn't exist should error",
command: `SELECT value FROM db0.rp0.idontexist`,
exp: `.*measurement not found*`,
pattern: true,
},
&Query{
name: "selecting a field that doesn't exist should error",
command: `SELECT idontexist FROM db0.rp0.cpu`,
exp: `{"results":[{"error":"unknown field or tag name in select clause: idontexist"}]}`,
},
&Query{
skip: true,
name: "no results should return an empty result - FIXME pauldix",
command: `SELECT value FROM db0.rp0.cpu where time > now()`,
exp: `{"results":[{}]}`,
},
}...)
if err := test.init(s); err != nil {
t.Fatalf("test init failed: %s", err)
}
for _, query := range test.queries {
if query.skip {
t.Logf("SKIP:: %s", query.name)
continue
}
if err := query.Execute(s); err != nil {
t.Error(query.Error(err))
} else if !query.success() {
t.Error(query.failureMessage())
}
}
}
// Ensure the server can query two points.
func TestServer_Query_SelectTwoPoints(t *testing.T) {
t.Parallel()
s := OpenServer(NewConfig(), "")
defer s.Close()
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil {
t.Fatal(err)
}
now := now()
test := NewTest("db0", "rp0")
test.write = fmt.Sprintf("cpu value=100 %s\ncpu value=200 %s", strconv.FormatInt(now.UnixNano(), 10), strconv.FormatInt(now.Add(1).UnixNano(), 10))
test.addQueries(&Query{
name: "selecting two points should result in two points",
command: `SELECT * FROM db0.rp0.cpu`,
exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","columns":["time","value"],"values":[["%s",100],["%s",200]]}]}]}`, now.Format(time.RFC3339Nano), now.Add(1).Format(time.RFC3339Nano)),
})
for i, query := range test.queries {
if i == 0 {
if err := test.init(s); err != nil {
t.Fatalf("test init failed: %s", err)
}
}
if query.skip {
t.Logf("SKIP:: %s", query.name)
continue
}
if err := query.Execute(s); err != nil {
t.Error(query.Error(err))
} else if !query.success() {
t.Error(query.failureMessage())
}
}
}
// Ensure the server can query two negative points.
func TestServer_Query_SelectTwoNegativePoints(t *testing.T) {
t.Parallel()
s := OpenServer(NewConfig(), "")
defer s.Close()
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil {
t.Fatal(err)
}
now := now()
test := NewTest("db0", "rp0")
test.write = fmt.Sprintf("cpu value=-100 %s\ncpu value=-200 %s", strconv.FormatInt(now.UnixNano(), 10), strconv.FormatInt(now.Add(1).UnixNano(), 10))
test.addQueries(&Query{
name: "selecting two negative points should succeed",
command: `SELECT * FROM db0.rp0.cpu`,
exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","columns":["time","value"],"values":[["%s",-100],["%s",-200]]}]}]}`, now.Format(time.RFC3339Nano), now.Add(1).Format(time.RFC3339Nano)),
})
for i, query := range test.queries {
if i == 0 {
if err := test.init(s); err != nil {
t.Fatalf("test init failed: %s", err)
}
}
if query.skip {
t.Logf("SKIP:: %s", query.name)
continue
}
if err := query.Execute(s); err != nil {
t.Error(query.Error(err))
} else if !query.success() {
t.Error(query.failureMessage())
}
}
}
// Ensure the server can query with relative time.
func TestServer_Query_SelectRelativeTime(t *testing.T) {
t.Parallel()
s := OpenServer(NewConfig(), "")
defer s.Close()
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil {
t.Fatal(err)
}
now := now()
yesterday := yesterday()
test := NewTest("db0", "rp0")
test.write = fmt.Sprintf("cpu,host=server01 value=100 %s\ncpu,host=server01 value=200 %s", strconv.FormatInt(yesterday.UnixNano(), 10), strconv.FormatInt(now.UnixNano(), 10))
test.addQueries([]*Query{
&Query{
name: "single point with time pre-calculated for past time queries yesterday",
command: `SELECT * FROM db0.rp0.cpu where time >= '` + yesterday.Add(-1*time.Minute).Format(time.RFC3339Nano) + `'`,
exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value"],"values":[["%s",100],["%s",200]]}]}]}`, yesterday.Format(time.RFC3339Nano), now.Format(time.RFC3339Nano)),
},
&Query{
name: "single point with time pre-calculated for relative time queries now",
command: `SELECT * FROM db0.rp0.cpu where time >= now() - 1m`,
exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value"],"values":[["%s",200]]}]}]}`, now.Format(time.RFC3339Nano)),
},
}...)
for i, query := range test.queries {
if i == 0 {
if err := test.init(s); err != nil {
t.Fatalf("test init failed: %s", err)
}
}
if query.skip {
t.Logf("SKIP:: %s", query.name)
continue
}
if err := query.Execute(s); err != nil {
t.Error(query.Error(err))
} else if !query.success() {
t.Error(query.failureMessage())
}
}
}
// mergeMany ensures that when merging many series together and some of them have a different number
// of points than others in a group by interval the results are correct
func TestServer_Query_MergeMany(t *testing.T) {
t.Parallel()
s := OpenServer(NewConfig(), "")
defer s.Close()
// set infinite retention policy as we are inserting data in the past and don't want retention policy enforcement to make this test racy
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil {
t.Fatal(err)
}
test := NewTest("db0", "rp0")
writes := []string{}
for i := 1; i < 11; i++ {
for j := 1; j < 5+i%3; j++ {
data := fmt.Sprintf(`cpu,host=server_%d value=22 %d`, i, time.Unix(int64(j), int64(0)).UTC().UnixNano())
writes = append(writes, data)
}
}
test.write = strings.Join(writes, "\n")
test.addQueries([]*Query{
&Query{
name: "GROUP by time",
command: `SELECT count(value) FROM db0.rp0.cpu WHERE time >= '1970-01-01T00:00:01Z' AND time <= '1970-01-01T00:00:06Z' GROUP BY time(1s)`,
exp: `{"results":[{"series":[{"name":"cpu","columns":["time","count"],"values":[["1970-01-01T00:00:01Z",10],["1970-01-01T00:00:02Z",10],["1970-01-01T00:00:03Z",10],["1970-01-01T00:00:04Z",10],["1970-01-01T00:00:05Z",7],["1970-01-01T00:00:06Z",3]]}]}]}`,
},
&Query{
skip: true,
name: "GROUP by tag - FIXME pauldix",
command: `SELECT count(value) FROM db0.rp0.cpu where time >= '2000-01-01T00:00:00Z' and time <= '2000-01-01T02:00:00Z' group by host`,
exp: `{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","count"],"values":[["2000-01-01T00:00:00Z",1]]},{"name":"cpu","tags":{"host":"server02"},"columns":["time","count"],"values":[["2000-01-01T00:00:00Z",1]]},{"name":"cpu","tags":{"host":"server03"},"columns":["time","count"],"values":[["2000-01-01T00:00:00Z",1]]}]}]}`,
},
&Query{
name: "GROUP by field",
command: `SELECT count(value) FROM db0.rp0.cpu where time >= '2000-01-01T00:00:00Z' and time <= '2000-01-01T02:00:00Z' group by value`,
exp: `{"results":[{"error":"can not use field in group by clause: value"}]}`,
},
}...)
for i, query := range test.queries {
if i == 0 {
if err := test.init(s); err != nil {
t.Fatalf("test init failed: %s", err)
}
}
if query.skip {
t.Logf("SKIP:: %s", query.name)
continue
}
if err := query.Execute(s); err != nil {
t.Error(query.Error(err))
} else if !query.success() {
t.Error(query.failureMessage())
}
}
}
func TestServer_Query_LimitAndOffset(t *testing.T) {
t.Parallel()
s := OpenServer(NewConfig(), "")
defer s.Close()
// set infinite retention policy as we are inserting data in the past and don't want retention policy enforcement to make this test racy
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil {
t.Fatal(err)
}
test := NewTest("db0", "rp0")
writes := []string{}
for i := 1; i < 10; i++ {
data := fmt.Sprintf(`cpu,region=us-east,host=server-%d value=%d %d`, i, i, time.Unix(int64(i), int64(0)).UnixNano())
writes = append(writes, data)
}
test.write = strings.Join(writes, "\n")
test.addQueries([]*Query{
&Query{
name: "SLIMIT 2 SOFFSET 1",
command: `SELECT count(value) FROM db0.rp0.cpu GROUP BY * SLIMIT 2 SOFFSET 1`,
exp: `{"results":[{"series":[{"name":"cpu","tags":{"host":"server-2","region":"us-east"},"columns":["time","count"],"values":[["1970-01-01T00:00:00Z",1]]},{"name":"cpu","tags":{"host":"server-3","region":"us-east"},"columns":["time","count"],"values":[["1970-01-01T00:00:00Z",1]]}]}]}`,
},
&Query{
name: "SLIMIT 2 SOFFSET 3",
command: `SELECT count(value) FROM db0.rp0.cpu GROUP BY * SLIMIT 2 SOFFSET 3`,
exp: `{"results":[{"series":[{"name":"cpu","tags":{"host":"server-4","region":"us-east"},"columns":["time","count"],"values":[["1970-01-01T00:00:00Z",1]]},{"name":"cpu","tags":{"host":"server-5","region":"us-east"},"columns":["time","count"],"values":[["1970-01-01T00:00:00Z",1]]}]}]}`,
},
&Query{
name: "SLIMIT 3 SOFFSET 8",
command: `SELECT count(value) FROM db0.rp0.cpu GROUP BY * SLIMIT 3 SOFFSET 8`,
exp: `{"results":[{"series":[{"name":"cpu","tags":{"host":"server-9","region":"us-east"},"columns":["time","count"],"values":[["1970-01-01T00:00:00Z",1]]}]}]}`,
},
}...)
for i, query := range test.queries {
if i == 0 {
if err := test.init(s); err != nil {
t.Fatalf("test init failed: %s", err)
}
}
if query.skip {
t.Logf("SKIP:: %s", query.name)
continue
}
if err := query.Execute(s); err != nil {
t.Error(query.Error(err))
} else if !query.success() {
t.Error(query.failureMessage())
}
}
}
func TestServer_Query_Regex(t *testing.T) {
t.Parallel()
s := OpenServer(NewConfig(), "")
defer s.Close()
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil {
t.Fatal(err)
}
if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil {
t.Fatal(err)
}
writes := []string{
fmt.Sprintf(`cpu1,host=server01 value=10 %d`, mustParseTime(time.RFC3339Nano, "2015-02-28T01:03:36.703820946Z").UnixNano()),
fmt.Sprintf(`cpu2,host=server01 value=20 %d`, mustParseTime(time.RFC3339Nano, "2015-02-28T01:03:36.703820946Z").UnixNano()),
fmt.Sprintf(`cpu3,host=server01 value=30 %d`, mustParseTime(time.RFC3339Nano, "2015-02-28T01:03:36.703820946Z").UnixNano()),
}
test := NewTest("db0", "rp0")
test.write = strings.Join(writes, "\n")
test.addQueries([]*Query{
&Query{
name: "default db and rp",
command: `SELECT * FROM /cpu[13]/`,
params: url.Values{"db": []string{"db0"}},
exp: `{"results":[{"series":[{"name":"cpu1","tags":{"host":"server01"},"columns":["time","value"],"values":[["2015-02-28T01:03:36.703820946Z",10]]},{"name":"cpu3","tags":{"host":"server01"},"columns":["time","value"],"values":[["2015-02-28T01:03:36.703820946Z",30]]}]}]}`,
},
&Query{
name: "specifying db and rp",
command: `SELECT * FROM db0.rp0./cpu[13]/`,
exp: `{"results":[{"series":[{"name":"cpu1","tags":{"host":"server01"},"columns":["time","value"],"values":[["2015-02-28T01:03:36.703820946Z",10]]},{"name":"cpu3","tags":{"host":"server01"},"columns":["time","value"],"values":[["2015-02-28T01:03:36.703820946Z",30]]}]}]}`,
},
&Query{
name: "default db and specified rp",
command: `SELECT * FROM rp0./cpu[13]/`,
params: url.Values{"db": []string{"db0"}},
exp: `{"results":[{"series":[{"name":"cpu1","tags":{"host":"server01"},"columns":["time","value"],"values":[["2015-02-28T01:03:36.703820946Z",10]]},{"name":"cpu3","tags":{"host":"server01"},"columns":["time","value"],"values":[["2015-02-28T01:03:36.703820946Z",30]]}]}]}`,
},
&Query{
skip: true,
name: "specified db and default rp - FIXME pauldix",
command: `SELECT * FROM db0../cpu[13]/`,
exp: `{"results":[{"series":[{"name":"cpu1","tags":{"host":"server01"},"columns":["time","value"],"values":[["2015-02-28T01:03:36.703820946Z",10]]},{"name":"cpu3","tags":{"host":"server01"},"columns":["time","value"],"values":[["2015-02-28T01:03:36.703820946Z",30]]}]}]}`,
},
}...)
for i, query := range test.queries {
if i == 0 {
if err := test.init(s); err != nil {
t.Fatalf("test init failed: %s", err)
}
}
if query.skip {
t.Logf("SKIP:: %s", query.name)
continue
}
if err := query.Execute(s); err != nil {
t.Error(query.Error(err))
} else if !query.success() {
t.Error(query.failureMessage())
}
}
}
func TestServer_Query_Aggregates(t *testing.T) {
t.Parallel()
s := OpenServer(NewConfig(), "")
defer s.Close()
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil {
t.Fatal(err)
}
if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil {
t.Fatal(err)
}
writes := []string{
fmt.Sprintf(`int value=45 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()),
fmt.Sprintf(`floatsingle value=45.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()),
fmt.Sprintf(`floatmax value=%s %d`, maxFloat64(), mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()),
fmt.Sprintf(`floatmax value=%s %d`, maxFloat64(), mustParseTime(time.RFC3339Nano, "2000-01-01T01:00:00Z").UnixNano()),
fmt.Sprintf(`floatmany,host=server01 value=2.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()),
fmt.Sprintf(`floatmany,host=server02 value=4.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:10Z").UnixNano()),
fmt.Sprintf(`floatmany,host=server03 value=4.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:20Z").UnixNano()),
fmt.Sprintf(`floatmany,host=server04 value=4.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:30Z").UnixNano()),
fmt.Sprintf(`floatmany,host=server05 value=5.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:40Z").UnixNano()),
fmt.Sprintf(`floatmany,host=server06 value=5.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:50Z").UnixNano()),
fmt.Sprintf(`floatmany,host=server07 value=7.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:01:00Z").UnixNano()),
fmt.Sprintf(`floatmany,host=server08 value=9.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:01:10Z").UnixNano()),
fmt.Sprintf(`floatoverlap,region=us-east value=20.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()),
fmt.Sprintf(`floatoverlap,region=us-east value=30.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:10Z").UnixNano()),
fmt.Sprintf(`floatoverlap,region=us-west value=100.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()),
fmt.Sprintf(`floatoverlap,region=us-east otherVal=20.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:03Z").UnixNano()),
fmt.Sprintf(`load,region=us-east,host=serverA value=20.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()),
fmt.Sprintf(`load,region=us-east,host=serverB value=30.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:10Z").UnixNano()),
fmt.Sprintf(`load,region=us-west,host=serverC value=100.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()),
fmt.Sprintf(`cpu,region=uk,host=serverZ,service=redis value=20.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:03Z").UnixNano()),
fmt.Sprintf(`cpu,region=uk,host=serverZ,service=mysql value=30.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:03Z").UnixNano()),
fmt.Sprintf(`stringdata value="first" %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:03Z").UnixNano()),
fmt.Sprintf(`stringdata value="last" %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:04Z").UnixNano()),
}
test := NewTest("db0", "rp0")
test.write = strings.Join(writes, "\n")
//FIXME add all of the int style tests once it is fixed.
test.addQueries([]*Query{
&Query{
skip: true,
name: "stddev with just one point - int FIXME currently panics the server",
params: url.Values{"db": []string{"db0"}},
command: `SELECT STDDEV(value) FROM int where time = '2000-01-01T00:00:00Z' and time < '2000-01-01T01:00:00Z'`,
exp: `{"results":[{"series":[{"name":"int","columns":["time","stddev"],"values":[["1970-01-01T00:00:00Z",null]]}]}]}`,
},
&Query{
name: "stddev with just one point - float",
params: url.Values{"db": []string{"db0"}},
command: `SELECT STDDEV(value) FROM floatsingle`,
exp: `{"results":[{"series":[{"name":"floatsingle","columns":["time","stddev"],"values":[["1970-01-01T00:00:00Z",null]]}]}]}`,
},
&Query{
name: "large mean and stddev - float",
params: url.Values{"db": []string{"db0"}},
command: `SELECT MEAN(value), STDDEV(value) FROM floatmax`,
exp: `{"results":[{"series":[{"name":"floatmax","columns":["time","mean","stddev"],"values":[["1970-01-01T00:00:00Z",` + maxFloat64() + `,0]]}]}]}`,
},
&Query{
name: "mean and stddev - float",
params: url.Values{"db": []string{"db0"}},
command: `SELECT MEAN(value), STDDEV(value) FROM floatmany WHERE time >= '2000-01-01' AND time < '2000-01-01T00:02:00Z' GROUP BY time(10m)`,
exp: `{"results":[{"series":[{"name":"floatmany","columns":["time","mean","stddev"],"values":[["2000-01-01T00:00:00Z",5,2.138089935299395]]}]}]}`,
},
&Query{
name: "first",
params: url.Values{"db": []string{"db0"}},
command: `SELECT FIRST(value) FROM floatmany`,
exp: `{"results":[{"series":[{"name":"floatmany","columns":["time","first"],"values":[["1970-01-01T00:00:00Z",2]]}]}]}`,
},
&Query{
name: "last",
params: url.Values{"db": []string{"db0"}},
command: `SELECT LAST(value) FROM floatmany`,
exp: `{"results":[{"series":[{"name":"floatmany","columns":["time","last"],"values":[["1970-01-01T00:00:00Z",9]]}]}]}`,
},
&Query{
name: "spread",
params: url.Values{"db": []string{"db0"}},
command: `SELECT SPREAD(value) FROM floatmany`,
exp: `{"results":[{"series":[{"name":"floatmany","columns":["time","spread"],"values":[["1970-01-01T00:00:00Z",7]]}]}]}`,
},
&Query{
name: "median - even count",
params: url.Values{"db": []string{"db0"}},
command: `SELECT MEDIAN(value) FROM floatmany`,
exp: `{"results":[{"series":[{"name":"floatmany","columns":["time","median"],"values":[["1970-01-01T00:00:00Z",4.5]]}]}]}`,
},
&Query{
name: "median - odd count",
params: url.Values{"db": []string{"db0"}},
command: `SELECT MEDIAN(value) FROM floatmany where time < '2000-01-01T00:01:10Z'`,
exp: `{"results":[{"series":[{"name":"floatmany","columns":["time","median"],"values":[["1970-01-01T00:00:00Z",4]]}]}]}`,
},
&Query{
name: "distinct as call",
params: url.Values{"db": []string{"db0"}},
command: `SELECT DISTINCT(value) FROM floatmany`,
exp: `{"results":[{"series":[{"name":"floatmany","columns":["time","distinct"],"values":[["1970-01-01T00:00:00Z",[2,4,5,7,9]]]}]}]}`,
},
&Query{
name: "distinct alt syntax",
params: url.Values{"db": []string{"db0"}},
command: `SELECT DISTINCT value FROM floatmany`,
exp: `{"results":[{"series":[{"name":"floatmany","columns":["time","distinct"],"values":[["1970-01-01T00:00:00Z",[2,4,5,7,9]]]}]}]}`,
},
&Query{
name: "distinct select tag",
params: url.Values{"db": []string{"db0"}},
command: `SELECT DISTINCT(host) FROM floatmany`,
exp: `{"results":[{"error":"host isn't a field on measurement floatmany; to query the unique values for a tag use SHOW TAG VALUES FROM floatmany WITH KEY = \"host\""}]}`,
},
&Query{
name: "distinct alt select tag",
params: url.Values{"db": []string{"db0"}},
command: `SELECT DISTINCT host FROM floatmany`,
exp: `{"results":[{"error":"host isn't a field on measurement floatmany; to query the unique values for a tag use SHOW TAG VALUES FROM floatmany WITH KEY = \"host\""}]}`,
},
&Query{
name: "count distinct",
params: url.Values{"db": []string{"db0"}},
command: `SELECT COUNT(DISTINCT value) FROM floatmany`,
exp: `{"results":[{"series":[{"name":"floatmany","columns":["time","count"],"values":[["1970-01-01T00:00:00Z",5]]}]}]}`,
},
&Query{
name: "count distinct as call",
params: url.Values{"db": []string{"db0"}},
command: `SELECT COUNT(DISTINCT(value)) FROM floatmany`,
exp: `{"results":[{"series":[{"name":"floatmany","columns":["time","count"],"values":[["1970-01-01T00:00:00Z",5]]}]}]}`,
},
&Query{
name: "count distinct select tag",
params: url.Values{"db": []string{"db0"}},
command: `SELECT COUNT(DISTINCT host) FROM floatmany`,
exp: `{"results":[{"error":"host isn't a field on measurement floatmany; count(distinct) on tags isn't yet supported"}]}`,
},
&Query{
name: "count distinct as call select tag",
params: url.Values{"db": []string{"db0"}},
command: `SELECT COUNT(DISTINCT host) FROM floatmany`,
exp: `{"results":[{"error":"host isn't a field on measurement floatmany; count(distinct) on tags isn't yet supported"}]}`,
},
&Query{
name: "aggregation with no interval",
params: url.Values{"db": []string{"db0"}},
command: `SELECT count(value) FROM floatoverlap WHERE time = '2000-01-01 00:00:00'`,
exp: `{"results":[{"series":[{"name":"floatoverlap","columns":["time","count"],"values":[["2000-01-01T00:00:00Z",2]]}]}]}`,
},
&Query{
name: "sum",
params: url.Values{"db": []string{"db0"}},
command: `SELECT SUM(value) FROM floatoverlap WHERE time >= '2000-01-01 00:00:05' AND time <= '2000-01-01T00:00:10Z' GROUP BY time(10s), region`,
exp: `{"results":[{"series":[{"name":"floatoverlap","tags":{"region":"us-east"},"columns":["time","sum"],"values":[["2000-01-01T00:00:00Z",null],["2000-01-01T00:00:10Z",30]]}]}]}`,
},
&Query{
name: "aggregation with a null field value",
params: url.Values{"db": []string{"db0"}},
command: `SELECT SUM(value) FROM floatoverlap GROUP BY region`,
exp: `{"results":[{"series":[{"name":"floatoverlap","tags":{"region":"us-east"},"columns":["time","sum"],"values":[["1970-01-01T00:00:00Z",50]]},{"name":"floatoverlap","tags":{"region":"us-west"},"columns":["time","sum"],"values":[["1970-01-01T00:00:00Z",100]]}]}]}`,
},
&Query{
name: "multiple aggregations",
params: url.Values{"db": []string{"db0"}},
command: `SELECT SUM(value), MEAN(value) FROM floatoverlap GROUP BY region`,
exp: `{"results":[{"series":[{"name":"floatoverlap","tags":{"region":"us-east"},"columns":["time","sum","mean"],"values":[["1970-01-01T00:00:00Z",50,25]]},{"name":"floatoverlap","tags":{"region":"us-west"},"columns":["time","sum","mean"],"values":[["1970-01-01T00:00:00Z",100,100]]}]}]}`,
},
&Query{
name: "multiple aggregations with division",
params: url.Values{"db": []string{"db0"}},
command: `SELECT sum(value) / mean(value) as div FROM floatoverlap GROUP BY region`,
exp: `{"results":[{"series":[{"name":"floatoverlap","tags":{"region":"us-east"},"columns":["time","div"],"values":[["1970-01-01T00:00:00Z",2]]},{"name":"floatoverlap","tags":{"region":"us-west"},"columns":["time","div"],"values":[["1970-01-01T00:00:00Z",1]]}]}]}`,
},
&Query{
name: "group by multiple dimensions",
params: url.Values{"db": []string{"db0"}},
command: `SELECT sum(value) FROM load GROUP BY region, host`,
exp: `{"results":[{"series":[{"name":"load","tags":{"host":"serverA","region":"us-east"},"columns":["time","sum"],"values":[["1970-01-01T00:00:00Z",20]]},{"name":"load","tags":{"host":"serverB","region":"us-east"},"columns":["time","sum"],"values":[["1970-01-01T00:00:00Z",30]]},{"name":"load","tags":{"host":"serverC","region":"us-west"},"columns":["time","sum"],"values":[["1970-01-01T00:00:00Z",100]]}]}]}`,
},
&Query{
name: "aggregation with WHERE and AND",
params: url.Values{"db": []string{"db0"}},
command: `SELECT sum(value) FROM cpu WHERE region='uk' AND host='serverZ'`,
exp: `{"results":[{"series":[{"name":"cpu","columns":["time","sum"],"values":[["1970-01-01T00:00:00Z",50]]}]}]}`,
},
&Query{
skip: true,
name: "STDDEV on string data. FIXME pauldix I panic!",
params: url.Values{"db": []string{"db0"}},
command: `SELECT STDDEV(value) FROM stringdata`,
exp: `{"results":[{"error":"aggregate 'stddev' requires numerical field values. Field 'value' is of type string"}]}`,
},
&Query{
skip: true,
name: "MEAN on string data. FIXME pauldix I panic!",
params: url.Values{"db": []string{"db0"}},
command: `SELECT MEAN(value) FROM stringdata`,
exp: `{"results":[{"error":"aggregate 'mean' requires numerical field values. Field 'value' is of type string"}]}`,
},
&Query{
skip: true,
name: "MEDIAN on string data. FIXME pauldix I panic!",
params: url.Values{"db": []string{"db0"}},
command: `SELECT MEDIAN(value) FROM stringdata`,
exp: `{"results":[{"error":"aggregate 'median' requires numerical field values. Field 'value' is of type string"}]}`,
},
&Query{
name: "COUNT on string data.",
params: url.Values{"db": []string{"db0"}},
command: `SELECT COUNT(value) FROM stringdata`,
exp: `{"results":[{"series":[{"name":"stringdata","columns":["time","count"],"values":[["1970-01-01T00:00:00Z",2]]}]}]}`,
},
&Query{
name: "FIRST on string data.",
params: url.Values{"db": []string{"db0"}},
command: `SELECT FIRST(value) FROM stringdata`,
exp: `{"results":[{"series":[{"name":"stringdata","columns":["time","first"],"values":[["1970-01-01T00:00:00Z","first"]]}]}]}`,
},
&Query{
name: "LAST on string data.",
params: url.Values{"db": []string{"db0"}},
command: `SELECT LAST(value) FROM stringdata`,
exp: `{"results":[{"series":[{"name":"stringdata","columns":["time","last"],"values":[["1970-01-01T00:00:00Z","last"]]}]}]}`,
},
}...)
for i, query := range test.queries {
if i == 0 {
if err := test.init(s); err != nil {
t.Fatalf("test init failed: %s", err)
}
}
if query.skip {
t.Logf("SKIP:: %s", query.name)
continue
}
if err := query.Execute(s); err != nil {
t.Error(query.Error(err))
} else if !query.success() {
t.Error(query.failureMessage())
}
}
}
func TestServer_Write_Precision(t *testing.T) {
t.Parallel()
s := OpenServer(NewConfig(), "")
defer s.Close()
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil {
t.Fatal(err)
}
if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil {
t.Fatal(err)
}
writes := []struct {
write string
params url.Values
}{
{
write: fmt.Sprintf("cpu_n0_precision value=1 %d", mustParseTime(time.RFC3339Nano, "2000-01-01T12:34:56.789012345Z").UnixNano()),
},
{
write: fmt.Sprintf("cpu_n1_precision value=1.1 %d", mustParseTime(time.RFC3339Nano, "2000-01-01T12:34:56.789012345Z").UnixNano()),
params: url.Values{"precision": []string{"n"}},
},
{
write: fmt.Sprintf("cpu_u_precision value=100 %d", mustParseTime(time.RFC3339Nano, "2000-01-01T12:34:56.789012345Z").Truncate(time.Microsecond).UnixNano()/int64(time.Microsecond)),
params: url.Values{"precision": []string{"u"}},
},
{
write: fmt.Sprintf("cpu_ms_precision value=200 %d", mustParseTime(time.RFC3339Nano, "2000-01-01T12:34:56.789012345Z").Truncate(time.Millisecond).UnixNano()/int64(time.Millisecond)),
params: url.Values{"precision": []string{"ms"}},
},
{
write: fmt.Sprintf("cpu_s_precision value=300 %d", mustParseTime(time.RFC3339Nano, "2000-01-01T12:34:56.789012345Z").Truncate(time.Second).UnixNano()/int64(time.Second)),
params: url.Values{"precision": []string{"s"}},
},
{
write: fmt.Sprintf("cpu_m_precision value=400 %d", mustParseTime(time.RFC3339Nano, "2000-01-01T12:34:56.789012345Z").Truncate(time.Minute).UnixNano()/int64(time.Minute)),
params: url.Values{"precision": []string{"m"}},
},
{
write: fmt.Sprintf("cpu_h_precision value=500 %d", mustParseTime(time.RFC3339Nano, "2000-01-01T12:34:56.789012345Z").Truncate(time.Hour).UnixNano()/int64(time.Hour)),
params: url.Values{"precision": []string{"h"}},
},
}
test := NewTest("db0", "rp0")
test.addQueries([]*Query{
&Query{
name: "point with nanosecond precision time - no precision specified on write",
command: `SELECT * FROM cpu_n0_precision`,
params: url.Values{"db": []string{"db0"}},
exp: `{"results":[{"series":[{"name":"cpu_n0_precision","columns":["time","value"],"values":[["2000-01-01T12:34:56.789012345Z",1]]}]}]}`,
},
&Query{
name: "point with nanosecond precision time",
command: `SELECT * FROM cpu_n1_precision`,
params: url.Values{"db": []string{"db0"}},
exp: `{"results":[{"series":[{"name":"cpu_n1_precision","columns":["time","value"],"values":[["2000-01-01T12:34:56.789012345Z",1.1]]}]}]}`,
},
&Query{
name: "point with microsecond precision time",
command: `SELECT * FROM cpu_u_precision`,
params: url.Values{"db": []string{"db0"}},
exp: `{"results":[{"series":[{"name":"cpu_u_precision","columns":["time","value"],"values":[["2000-01-01T12:34:56.789012Z",100]]}]}]}`,
},
&Query{
name: "point with millisecond precision time",
command: `SELECT * FROM cpu_ms_precision`,
params: url.Values{"db": []string{"db0"}},
exp: `{"results":[{"series":[{"name":"cpu_ms_precision","columns":["time","value"],"values":[["2000-01-01T12:34:56.789Z",200]]}]}]}`,
},
&Query{
name: "point with second precision time",
command: `SELECT * FROM cpu_s_precision`,
params: url.Values{"db": []string{"db0"}},
exp: `{"results":[{"series":[{"name":"cpu_s_precision","columns":["time","value"],"values":[["2000-01-01T12:34:56Z",300]]}]}]}`,
},
&Query{
name: "point with minute precision time",
command: `SELECT * FROM cpu_m_precision`,
params: url.Values{"db": []string{"db0"}},
exp: `{"results":[{"series":[{"name":"cpu_m_precision","columns":["time","value"],"values":[["2000-01-01T12:34:00Z",400]]}]}]}`,
},
&Query{
name: "point with hour precision time",
command: `SELECT * FROM cpu_h_precision`,
params: url.Values{"db": []string{"db0"}},
exp: `{"results":[{"series":[{"name":"cpu_h_precision","columns":["time","value"],"values":[["2000-01-01T12:00:00Z",500]]}]}]}`,
},
}...)
// we are doing writes that require parameter changes, so we are fighting the test harness a little to make this happen properly
for _, w := range writes {
test.write = w.write
test.params = w.params
test.initialized = false
if err := test.init(s); err != nil {
t.Fatalf("test init failed: %s", err)
}
}
for _, query := range test.queries {
if query.skip {
t.Logf("SKIP:: %s", query.name)
continue
}
if err := query.Execute(s); err != nil {
t.Error(query.Error(err))
} else if !query.success() {
t.Error(query.failureMessage())
}
}
}
func TestServer_Query_Wildcards(t *testing.T) {
t.Parallel()
s := OpenServer(NewConfig(), "")
defer s.Close()
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil {
t.Fatal(err)
}
if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil {
t.Fatal(err)
}
writes := []string{
fmt.Sprintf(`wildcard,region=us-east value=10 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()),
fmt.Sprintf(`wildcard,region=us-east val-x=20 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:10Z").UnixNano()),
fmt.Sprintf(`wildcard,region=us-east value=30,val-x=40 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:20Z").UnixNano()),
fmt.Sprintf(`wgroup,region=us-east value=10.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()),
fmt.Sprintf(`wgroup,region=us-east value=20.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:10Z").UnixNano()),
fmt.Sprintf(`wgroup,region=us-west value=30.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:20Z").UnixNano()),
}
test := NewTest("db0", "rp0")
test.write = strings.Join(writes, "\n")
test.addQueries([]*Query{
&Query{
name: "wildcard",
params: url.Values{"db": []string{"db0"}},
command: `SELECT * FROM wildcard`,
exp: `{"results":[{"series":[{"name":"wildcard","tags":{"region":"us-east"},"columns":["time","val-x","value"],"values":[["2000-01-01T00:00:00Z",null,10],["2000-01-01T00:00:10Z",20,null],["2000-01-01T00:00:20Z",40,30]]}]}]}`,
},
&Query{
name: "GROUP BY queries",
params: url.Values{"db": []string{"db0"}},
command: `SELECT mean(value) FROM wgroup GROUP BY *`,
exp: `{"results":[{"series":[{"name":"wgroup","tags":{"region":"us-east"},"columns":["time","mean"],"values":[["1970-01-01T00:00:00Z",15]]},{"name":"wgroup","tags":{"region":"us-west"},"columns":["time","mean"],"values":[["1970-01-01T00:00:00Z",30]]}]}]}`,
},
&Query{
name: "GROUP BY queries with time",
params: url.Values{"db": []string{"db0"}},
command: `SELECT mean(value) FROM wgroup WHERE time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T00:01:00Z' GROUP BY *,TIME(1m)`,
exp: `{"results":[{"series":[{"name":"wgroup","tags":{"region":"us-east"},"columns":["time","mean"],"values":[["2000-01-01T00:00:00Z",15]]},{"name":"wgroup","tags":{"region":"us-west"},"columns":["time","mean"],"values":[["2000-01-01T00:00:00Z",30]]}]}]}`,
},
}...)
for i, query := range test.queries {
if i == 0 {
if err := test.init(s); err != nil {
t.Fatalf("test init failed: %s", err)
}
}
if query.skip {
t.Logf("SKIP:: %s", query.name)
continue
}
if err := query.Execute(s); err != nil {
t.Error(query.Error(err))
} else if !query.success() {
t.Error(query.failureMessage())
}
}
}