Merge branch 'master' of https://github.com/influxdb/influxdb into stateless-broker

pull/1935/head
Ben Johnson 2015-03-12 11:59:55 -06:00
commit 7ab19b9f91
16 changed files with 630 additions and 61 deletions

View File

@ -1,7 +1,16 @@
## v0.9.0-rc11 [unreleased]
### Bugfixes
- [#1917](https://github.com/influxdb/influxdb/pull/1902): Creating Infinite Retention Policy Failed.
- [#1758](https://github.com/influxdb/influxdb/pull/1758): Add Graphite Integration Test.
- [#1929](https://github.com/influxdb/influxdb/pull/1929): Default Retention Policy incorrectly auto created.
- [#1930](https://github.com/influxdb/influxdb/pull/1930): Auto create database for graphite if not specified.
- [#1931](https://github.com/influxdb/influxdb/pull/1931): Add default column to SHOW RETENTION POLICIES.
### Features
- [#1902](https://github.com/influxdb/influxdb/pull/1902): Enforce retention policies to have a minimum duraton.
- [#1902](https://github.com/influxdb/influxdb/pull/1902): Enforce retention policies to have a minimum duration.
- [#1906](https://github.com/influxdb/influxdb/pull/1906): Add show servers to query language.
- [#1925](https://github.com/influxdb/influxdb/pull/1925): Add `fill(none)`, `fill(previous)`, and `fill(<num>)` to queries.
## v0.9.0-rc10 [2015-03-09]

View File

@ -67,9 +67,14 @@ func Run(config *Config, join, version string, logWriter *os.File) (*messaging.B
// We want to make sure we are spun up before we exit this function, so we manually listen and serve
listener, err := net.Listen("tcp", config.BrokerAddr())
if err != nil {
log.Fatal(err)
log.Fatalf("Broker failed to listen on %s. %s ", config.BrokerAddr(), err)
}
go func() { log.Fatal(http.Serve(listener, h)) }()
go func() {
err := http.Serve(listener, h)
if err != nil {
log.Fatalf("Broker failed to server on %s.: %s", config.BrokerAddr(), err)
}
}()
log.Printf("broker listening on %s", config.BrokerAddr())
// have it occasionally tell a data node in the cluster to run continuous queries
@ -162,6 +167,7 @@ func Run(config *Config, join, version string, logWriter *os.File) (*messaging.B
if strings.ToLower(c.Protocol) == "tcp" {
g := graphite.NewTCPServer(parser, s)
g.Database = c.Database
g.SetLogOutput(logWriter)
err := g.ListenAndServe(c.ConnectionString(config.BindAddress))
if err != nil {
log.Printf("failed to start TCP Graphite Server: %v\n", err.Error())
@ -169,12 +175,13 @@ func Run(config *Config, join, version string, logWriter *os.File) (*messaging.B
} else if strings.ToLower(c.Protocol) == "udp" {
g := graphite.NewUDPServer(parser, s)
g.Database = c.Database
g.SetLogOutput(logWriter)
err := g.ListenAndServe(c.ConnectionString(config.BindAddress))
if err != nil {
log.Printf("failed to start UDP Graphite Server: %v\n", err.Error())
}
} else {
log.Fatalf("unrecognized Graphite Server prototcol %s", c.Protocol)
log.Fatalf("unrecognized Graphite Server protocol %s", c.Protocol)
}
}
}

View File

@ -5,12 +5,15 @@ import (
"encoding/json"
"fmt"
"io/ioutil"
"math"
"net"
"net/http"
"net/url"
"os"
"path/filepath"
"strconv"
"strings"
"sync/atomic"
"testing"
"time"
@ -66,7 +69,7 @@ type Cluster []*Node
// the testing is marked as failed.
//
// This function returns a slice of nodes, the first of which will be the leader.
func createCombinedNodeCluster(t *testing.T, testName, tmpDir string, nNodes, basePort int) Cluster {
func createCombinedNodeCluster(t *testing.T, testName, tmpDir string, nNodes, basePort int, baseConfig *main.Config) Cluster {
t.Logf("Creating cluster of %d nodes for test %s", nNodes, testName)
if nNodes < 1 {
t.Fatalf("Test %s: asked to create nonsense cluster", testName)
@ -85,7 +88,10 @@ func createCombinedNodeCluster(t *testing.T, testName, tmpDir string, nNodes, ba
_ = os.RemoveAll(tmpDataDir)
// Create the first node, special case.
c := main.NewConfig()
c := baseConfig
if c == nil {
c = main.NewConfig()
}
c.Broker.Dir = filepath.Join(tmpBrokerDir, strconv.Itoa(basePort))
c.Data.Dir = filepath.Join(tmpDataDir, strconv.Itoa(basePort))
c.Broker.Port = basePort
@ -167,9 +173,6 @@ func write(t *testing.T, node *Node, data string) {
body, _ := ioutil.ReadAll(resp.Body)
t.Fatalf("Write to database failed. Unexpected status code. expected: %d, actual %d, %s", http.StatusOK, resp.StatusCode, string(body))
}
// Until races are solved.
time.Sleep(3 * time.Second)
}
// query executes the given query against all nodes in the cluster, and verifies no errors occured, and
@ -202,6 +205,38 @@ func query(t *testing.T, nodes Cluster, urlDb, query, expected string) (string,
return "", true
}
// queryAndWait executes the given query against all nodes in the cluster, and verifies no errors occured, and
// ensures the returned data is as expected until the timeout occurs
func queryAndWait(t *testing.T, nodes Cluster, urlDb, q, expected string, timeout time.Duration) (string, bool) {
v := url.Values{"q": []string{q}}
if urlDb != "" {
v.Set("db", urlDb)
}
var (
timedOut int32
timer = time.NewTimer(time.Duration(math.MaxInt64))
)
defer timer.Stop()
if timeout > 0 {
timer.Reset(time.Duration(timeout))
go func() {
<-timer.C
atomic.StoreInt32(&timedOut, 1)
}()
}
for {
if got, ok := query(t, nodes, urlDb, q, expected); ok {
return got, ok
} else if atomic.LoadInt32(&timedOut) == 1 {
return fmt.Sprintf("timed out before expected result was found: got: %s", got), false
} else {
time.Sleep(10 * time.Millisecond)
}
}
}
// runTests_Errors tests some basic error cases.
func runTests_Errors(t *testing.T, nodes Cluster) {
t.Logf("Running tests against %d-node cluster", len(nodes))
@ -260,6 +295,11 @@ func runTestsData(t *testing.T, testName string, nodes Cluster, database, retent
query: `SELECT * FROM "%DB%"."%RP%".cpu`,
expected: `{"results":[{"series":[{"name":"cpu","columns":["time","value"],"values":[["2015-02-28T01:03:36.703820946Z",100]]}]}]}`,
},
{
name: "single point count query with timestamp",
query: `SELECT count(value) FROM "%DB%"."%RP%".cpu`,
expected: `{"results":[{"series":[{"name":"cpu","columns":["time","count"],"values":[["1970-01-01T00:00:00Z",1]]}]}]}`,
},
{
name: "single string point with timestamp",
write: `{"database" : "%DB%", "retentionPolicy" : "%RP%", "points": [{"name": "logs", "timestamp": "2015-02-28T01:03:36.703820946Z", "tags": {"host": "server01"}, "fields": {"value": "disk full"}}]}`,
@ -290,6 +330,30 @@ func runTestsData(t *testing.T, testName string, nodes Cluster, database, retent
expected: `{"results":[{"error":"unknown field or tag name in select clause: abc"}]}`,
},
{
name: "single string point with second precision timestamp",
write: `{"database" : "%DB%", "retentionPolicy" : "%RP%", "points": [{"name": "cpu_s_precision", "timestamp": 1, "precision": "s", "fields": {"value": 100}}]}`,
query: `SELECT * FROM "%DB%"."%RP%".cpu_s_precision`,
expected: `{"results":[{"series":[{"name":"cpu_s_precision","columns":["time","value"],"values":[["1970-01-01T00:00:01Z",100]]}]}]}`,
},
{
name: "single string point with millisecond precision timestamp",
write: `{"database" : "%DB%", "retentionPolicy" : "%RP%", "points": [{"name": "cpu_ms_precision", "timestamp": 1000, "precision": "ms", "fields": {"value": 100}}]}`,
query: `SELECT * FROM "%DB%"."%RP%".cpu_ms_precision`,
expected: `{"results":[{"series":[{"name":"cpu_ms_precision","columns":["time","value"],"values":[["1970-01-01T00:00:01Z",100]]}]}]}`,
},
{
name: "single string point with nanosecond precision timestamp",
write: `{"database" : "%DB%", "retentionPolicy" : "%RP%", "points": [{"name": "cpu_n_precision", "timestamp": 2000000000, "precision": "n", "fields": {"value": 100}}]}`,
query: `SELECT * FROM "%DB%"."%RP%".cpu_n_precision`,
expected: `{"results":[{"series":[{"name":"cpu_n_precision","columns":["time","value"],"values":[["1970-01-01T00:00:02Z",100]]}]}]}`,
},
{
name: "single point count query with nanosecond precision timestamp",
query: `SELECT count(value) FROM "%DB%"."%RP%".cpu_n_precision`,
expected: `{"results":[{"series":[{"name":"cpu_n_precision","columns":["time","count"],"values":[["1970-01-01T00:00:00Z",1]]}]}]}`,
},
// WHERE fields queries
{
reset: true,
@ -431,6 +495,34 @@ func runTestsData(t *testing.T, testName string, nodes Cluster, database, retent
expected: `{"results":[{"series":[{"name":"limit","columns":["time","foo"]}]}]}`,
},
// Fill tests
{
name: "fill with value",
write: `{"database" : "%DB%", "retentionPolicy" : "%RP%", "points": [
{"name": "fills", "timestamp": "2009-11-10T23:00:02Z","fields": {"val": 3}},
{"name": "fills", "timestamp": "2009-11-10T23:00:03Z","fields": {"val": 5}},
{"name": "fills", "timestamp": "2009-11-10T23:00:06Z","fields": {"val": 4}},
{"name": "fills", "timestamp": "2009-11-10T23:00:16Z","fields": {"val": 10}}
]}`,
query: `select mean(val) from "%DB%"."%RP%".fills where time >= '2009-11-10T23:00:00Z' and time < '2009-11-10T23:00:20Z' group by time(5s) fill(1)`,
expected: `{"results":[{"series":[{"name":"fills","columns":["time","mean"],"values":[["2009-11-10T23:00:00Z",4],["2009-11-10T23:00:05Z",4],["2009-11-10T23:00:10Z",1],["2009-11-10T23:00:15Z",10]]}]}]}`,
},
{
name: "fill with previous",
query: `select mean(val) from "%DB%"."%RP%".fills where time >= '2009-11-10T23:00:00Z' and time < '2009-11-10T23:00:20Z' group by time(5s) fill(previous)`,
expected: `{"results":[{"series":[{"name":"fills","columns":["time","mean"],"values":[["2009-11-10T23:00:00Z",4],["2009-11-10T23:00:05Z",4],["2009-11-10T23:00:10Z",4],["2009-11-10T23:00:15Z",10]]}]}]}`,
},
{
name: "fill with none, i.e. clear out nulls",
query: `select mean(val) from "%DB%"."%RP%".fills where time >= '2009-11-10T23:00:00Z' and time < '2009-11-10T23:00:20Z' group by time(5s) fill(none)`,
expected: `{"results":[{"series":[{"name":"fills","columns":["time","mean"],"values":[["2009-11-10T23:00:00Z",4],["2009-11-10T23:00:05Z",4],["2009-11-10T23:00:15Z",10]]}]}]}`,
},
{
name: "fill defaults to null",
query: `select mean(val) from "%DB%"."%RP%".fills where time >= '2009-11-10T23:00:00Z' and time < '2009-11-10T23:00:20Z' group by time(5s)`,
expected: `{"results":[{"series":[{"name":"fills","columns":["time","mean"],"values":[["2009-11-10T23:00:00Z",4],["2009-11-10T23:00:05Z",4],["2009-11-10T23:00:10Z",null],["2009-11-10T23:00:15Z",10]]}]}]}`,
},
// Metadata display tests
{
@ -594,7 +686,22 @@ func runTestsData(t *testing.T, testName string, nodes Cluster, database, retent
{
name: "Check for default retention policy",
query: `SHOW RETENTION POLICIES mydatabase`,
expected: `{"results":[{"series":[{"columns":["name","duration","replicaN"],"values":[["default","0",1]]}]}]}`,
expected: `{"results":[{"series":[{"columns":["name","duration","replicaN","default"],"values":[["default","0",1,true]]}]}]}`,
},
{
name: "Ensure retention policy with infinite retention can be created",
query: `CREATE RETENTION POLICY rp1 ON mydatabase DURATION INF REPLICATION 1`,
expected: `{"results":[{}]}`,
},
{
name: "Ensure retention policy with acceptable retention can be created",
query: `CREATE RETENTION POLICY rp2 ON mydatabase DURATION 30d REPLICATION 1`,
expected: `{"results":[{}]}`,
},
{
name: "Ensure retention policy with unacceptable retention cannot be created",
query: `CREATE RETENTION POLICY rp3 ON mydatabase DURATION 1s REPLICATION 1`,
expected: `{"results":[{"error":"retention policy duration needs to be at least 1h0m0s"}]}`,
},
{
name: "Ensure database with default retention policy can be deleted",
@ -710,7 +817,7 @@ func runTestsData(t *testing.T, testName string, nodes Cluster, database, retent
if tt.queryDb != "" {
urlDb = tt.queryDb
}
got, ok := query(t, nodes, rewriteDbRp(urlDb, database, retention), rewriteDbRp(tt.query, database, retention), rewriteDbRp(tt.expected, database, retention))
got, ok := queryAndWait(t, nodes, rewriteDbRp(urlDb, database, retention), rewriteDbRp(tt.query, database, retention), rewriteDbRp(tt.expected, database, retention), 3*time.Second)
if !ok {
t.Errorf("Test \"%s\" failed\n exp: %s\n got: %s\n", name, rewriteDbRp(tt.expected, database, retention), got)
}
@ -728,7 +835,7 @@ func TestSingleServer(t *testing.T) {
os.RemoveAll(dir)
}()
nodes := createCombinedNodeCluster(t, testName, dir, 1, 8090)
nodes := createCombinedNodeCluster(t, testName, dir, 1, 8090, nil)
runTestsData(t, testName, nodes, "mydb", "myrp")
}
@ -744,7 +851,7 @@ func Test3NodeServer(t *testing.T) {
os.RemoveAll(dir)
}()
nodes := createCombinedNodeCluster(t, testName, dir, 3, 8190)
nodes := createCombinedNodeCluster(t, testName, dir, 3, 8190, nil)
runTestsData(t, testName, nodes, "mydb", "myrp")
}
@ -763,7 +870,7 @@ func TestClientLibrary(t *testing.T) {
retentionPolicy := "myrp"
now := time.Now().UTC()
nodes := createCombinedNodeCluster(t, testName, dir, 1, 8290)
nodes := createCombinedNodeCluster(t, testName, dir, 1, 8290, nil)
createDatabase(t, testName, nodes, database)
createRetentionPolicy(t, testName, nodes, database, retentionPolicy)
@ -834,6 +941,117 @@ func TestClientLibrary(t *testing.T) {
}
}
func Test_ServerSingleGraphiteIntegration(t *testing.T) {
if testing.Short() {
t.Skip()
}
nNodes := 1
basePort := 8390
testName := "graphite integration"
dir := tempfile()
now := time.Now().UTC().Round(time.Millisecond)
c := main.NewConfig()
g := main.Graphite{
Enabled: true,
Database: "graphite",
Protocol: "TCP",
}
c.Graphites = append(c.Graphites, g)
t.Logf("Graphite Connection String: %s\n", g.ConnectionString(c.BindAddress))
nodes := createCombinedNodeCluster(t, testName, dir, nNodes, basePort, c)
createDatabase(t, testName, nodes, "graphite")
createRetentionPolicy(t, testName, nodes, "graphite", "raw")
// Connect to the graphite endpoint we just spun up
conn, err := net.Dial("tcp", g.ConnectionString(c.BindAddress))
if err != nil {
t.Fatal(err)
return
}
t.Log("Writing data")
data := []byte(`cpu 23.456 `)
data = append(data, []byte(fmt.Sprintf("%d", now.UnixNano()/1000000))...)
data = append(data, '\n')
_, err = conn.Write(data)
conn.Close()
if err != nil {
t.Fatal(err)
return
}
expected := fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","columns":["time","cpu"],"values":[["%s",23.456]]}]}]}`, now.Format(time.RFC3339Nano))
// query and wait for results
got, ok := queryAndWait(t, nodes, "graphite", `select * from "graphite"."raw".cpu`, expected, 2*time.Second)
if !ok {
t.Errorf(`Test "%s" failed, expected: %s, got: %s`, testName, expected, got)
}
}
func Test_ServerSingleGraphiteIntegration_NoDatabase(t *testing.T) {
if testing.Short() {
t.Skip()
}
nNodes := 1
basePort := 8490
testName := "graphite integration"
dir := tempfile()
now := time.Now().UTC().Round(time.Millisecond)
c := main.NewConfig()
g := main.Graphite{
Enabled: true,
Port: 2103,
Protocol: "TCP",
}
c.Graphites = append(c.Graphites, g)
c.Logging.WriteTracing = true
t.Logf("Graphite Connection String: %s\n", g.ConnectionString(c.BindAddress))
nodes := createCombinedNodeCluster(t, testName, dir, nNodes, basePort, c)
// Connect to the graphite endpoint we just spun up
conn, err := net.Dial("tcp", g.ConnectionString(c.BindAddress))
if err != nil {
t.Fatal(err)
return
}
// Need to wait for the database to be created
expected := `{"results":[{"series":[{"columns":["name"],"values":[["graphite"]]}]}]}`
got, ok := queryAndWait(t, nodes, "graphite", `show databases`, expected, 2*time.Second)
if !ok {
t.Errorf(`Test "%s" failed, expected: %s, got: %s`, testName, expected, got)
}
// Need to wait for the database to get a default retention policy
expected = `{"results":[{"series":[{"columns":["name","duration","replicaN","default"],"values":[["default","0",1,true]]}]}]}`
got, ok = queryAndWait(t, nodes, "graphite", `show retention policies graphite`, expected, 2*time.Second)
if !ok {
t.Errorf(`Test "%s" failed, expected: %s, got: %s`, testName, expected, got)
}
t.Log("Writing data")
data := []byte(`cpu 23.456 `)
data = append(data, []byte(fmt.Sprintf("%d", now.UnixNano()/1000000))...)
data = append(data, '\n')
_, err = conn.Write(data)
conn.Close()
if err != nil {
t.Fatal(err)
return
}
// Wait for data to show up
expected = fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","columns":["time","cpu"],"values":[["%s",23.456]]}]}]}`, now.Format(time.RFC3339Nano))
got, ok = queryAndWait(t, nodes, "graphite", `select * from "graphite"."default".cpu`, expected, 2*time.Second)
if !ok {
t.Errorf(`Test "%s" failed, expected: %s, got: %s`, testName, expected, got)
}
}
// helper funcs
func errToString(err error) string {
@ -849,4 +1067,5 @@ func mustMarshalJSON(v interface{}) string {
panic(e)
}
return string(b)
}

View File

@ -16,6 +16,9 @@ const (
// DefaultGraphiteNameSeparator represents the default Graphite field separator.
DefaultGraphiteNameSeparator = "."
// DefaultDatabaseName is the default database that is created if none is specified
DefaultDatabaseName = "graphite"
)
var (
@ -26,16 +29,16 @@ var (
// ErrServerClosed return when closing an already closed graphite server.
ErrServerClosed = errors.New("server already closed")
// ErrDatabaseNotSpecified retuned when no database was specified in the config file
ErrDatabaseNotSpecified = errors.New("database was not specified in config")
// ErrServerNotSpecified returned when Server is not specified.
ErrServerNotSpecified = errors.New("server not present")
)
// SeriesWriter defines the interface for the destination of the data.
type SeriesWriter interface {
WriteSeries(database, retentionPolicy string, points []influxdb.Point) (uint64, error)
type Server interface {
WriteSeries(string, string, []influxdb.Point) (uint64, error)
CreateDatabase(string) error
CreateRetentionPolicy(string, *influxdb.RetentionPolicy) error
DatabaseExists(string) bool
}
// Parser encapulates a Graphite Parser.

View File

@ -2,6 +2,7 @@ package graphite
import (
"bufio"
"io"
"log"
"net"
"strings"
@ -11,27 +12,40 @@ import (
// TCPServer processes Graphite data received over TCP connections.
type TCPServer struct {
writer SeriesWriter
server Server
parser *Parser
Database string
Logger *log.Logger
}
// NewTCPServer returns a new instance of a TCPServer.
func NewTCPServer(p *Parser, w SeriesWriter) *TCPServer {
func NewTCPServer(p *Parser, s Server) *TCPServer {
return &TCPServer{
parser: p,
writer: w,
server: s,
}
}
// SetLogOutput sets writer for all Graphite log output.
func (s *TCPServer) SetLogOutput(w io.Writer) {
s.Logger = log.New(w, "[graphite] ", log.LstdFlags)
}
// ListenAndServe instructs the TCPServer to start processing Graphite data
// on the given interface. iface must be in the form host:port
func (t *TCPServer) ListenAndServe(iface string) error {
if iface == "" { // Make sure we have an address
return ErrBindAddressRequired
} else if t.Database == "" { // Make sure they have a database
return ErrDatabaseNotSpecified
} else if t.Database == "" {
// If they didn't specify a database, create one and set a default retention policy.
if !t.server.DatabaseExists(DefaultDatabaseName) {
t.Logger.Printf("default database %q does not exist. creating.\n", DefaultDatabaseName)
if e := t.server.CreateDatabase(DefaultDatabaseName); e != nil {
return e
}
t.Database = DefaultDatabaseName
}
}
ln, err := net.Listen("tcp", iface)
@ -42,7 +56,7 @@ func (t *TCPServer) ListenAndServe(iface string) error {
for {
conn, err := ln.Accept()
if err != nil {
log.Println("error accepting TCP connection", err.Error())
t.Logger.Println("error accepting TCP connection", err.Error())
continue
}
go t.handleConnection(conn)
@ -69,11 +83,14 @@ func (t *TCPServer) handleConnection(conn net.Conn) {
// Parse it.
point, err := t.parser.Parse(line)
if err != nil {
log.Printf("unable to parse data: %s", err)
t.Logger.Printf("unable to parse data: %s", err)
continue
}
// Send the data to database
t.writer.WriteSeries(t.Database, "", []influxdb.Point{point})
_, e := t.server.WriteSeries(t.Database, "", []influxdb.Point{point})
if e != nil {
t.Logger.Printf("failed to write data point to database %q: %s\n", t.Database, e)
}
}
}

View File

@ -1,6 +1,8 @@
package graphite
import (
"io"
"log"
"net"
"strings"
@ -13,28 +15,41 @@ const (
// UDPerver processes Graphite data received via UDP.
type UDPServer struct {
writer SeriesWriter
server Server
parser *Parser
Database string
Logger *log.Logger
}
// NewUDPServer returns a new instance of a UDPServer
func NewUDPServer(p *Parser, w SeriesWriter) *UDPServer {
func NewUDPServer(p *Parser, s Server) *UDPServer {
u := UDPServer{
parser: p,
writer: w,
server: s,
}
return &u
}
// SetLogOutput sets writer for all Graphite log output.
func (s *UDPServer) SetLogOutput(w io.Writer) {
s.Logger = log.New(w, "[graphite] ", log.LstdFlags)
}
// ListenAndServer instructs the UDPServer to start processing Graphite data
// on the given interface. iface must be in the form host:port.
func (u *UDPServer) ListenAndServe(iface string) error {
if iface == "" { // Make sure we have an address
return ErrBindAddressRequired
} else if u.Database == "" { // Make sure they have a database
return ErrDatabaseNotSpecified
// If they didn't specify a database, create one and set a default retention policy.
if !u.server.DatabaseExists(DefaultDatabaseName) {
u.Logger.Printf("default database %q does not exist. creating.\n", DefaultDatabaseName)
if e := u.server.CreateDatabase(DefaultDatabaseName); e != nil {
return e
}
u.Database = DefaultDatabaseName
}
}
addr, err := net.ResolveUDPAddr("udp", iface)
@ -61,7 +76,10 @@ func (u *UDPServer) ListenAndServe(iface string) error {
}
// Send the data to database
u.writer.WriteSeries(u.Database, "", []influxdb.Point{point})
_, e := u.server.WriteSeries(u.Database, "", []influxdb.Point{point})
if e != nil {
u.Logger.Printf("failed to write data point: %s\n", e)
}
}
}
}()

View File

@ -278,7 +278,7 @@ func TestHandler_RetentionPolicies(t *testing.T) {
if status != http.StatusOK {
t.Fatalf("unexpected status: %d", status)
} else if body != `{"results":[{"series":[{"columns":["name","duration","replicaN"],"values":[["bar","168h0m0s",1]]}]}]}` {
} else if body != `{"results":[{"series":[{"columns":["name","duration","replicaN","default"],"values":[["bar","168h0m0s",1,false]]}]}]}` {
t.Fatalf("unexpected body: %s", body)
}
}

View File

@ -576,7 +576,7 @@ select_stmt = fields from_clause [ into_clause ] [ where_clause ]
```sql
-- select mean value from the cpu measurement where region = 'uswest' grouped by 10 minute intervals
SELECT mean(value) FROM cpu WHERE region = 'uswest' GROUP BY time(10m);
SELECT mean(value) FROM cpu WHERE region = 'uswest' GROUP BY time(10m) fill(0);
```
## Clauses
@ -584,7 +584,7 @@ SELECT mean(value) FROM cpu WHERE region = 'uswest' GROUP BY time(10m);
```
from_clause = "FROM" measurements .
group_by_clause = "GROUP BY" dimensions .
group_by_clause = "GROUP BY" dimensions fill(<option>).
limit_clause = "LIMIT" int_lit .

View File

@ -71,6 +71,7 @@ func (*DropSeriesStatement) node() {}
func (*DropUserStatement) node() {}
func (*GrantStatement) node() {}
func (*ShowContinuousQueriesStatement) node() {}
func (*ShowServersStatement) node() {}
func (*ShowDatabasesStatement) node() {}
func (*ShowFieldKeysStatement) node() {}
func (*ShowRetentionPoliciesStatement) node() {}
@ -161,6 +162,7 @@ func (*DropSeriesStatement) stmt() {}
func (*DropUserStatement) stmt() {}
func (*GrantStatement) stmt() {}
func (*ShowContinuousQueriesStatement) stmt() {}
func (*ShowServersStatement) stmt() {}
func (*ShowDatabasesStatement) stmt() {}
func (*ShowFieldKeysStatement) stmt() {}
func (*ShowMeasurementsStatement) stmt() {}
@ -527,6 +529,19 @@ func (s *AlterRetentionPolicyStatement) RequiredPrivileges() ExecutionPrivileges
return ExecutionPrivileges{{Name: "", Privilege: AllPrivileges}}
}
type FillOption int
const (
// NullFill means that empty aggregate windows will just have null values.
NullFill FillOption = iota
// NoFill means that empty aggregate windows will be purged from the result.
NoFill
// NumberFill means that empty aggregate windows will be filled with the given number
NumberFill
// PreviousFill means that empty aggregate windows will be filled with whatever the previous aggregate window had
PreviousFill
)
// SelectStatement represents a command for extracting data from the database.
type SelectStatement struct {
// Expressions returned from the selection.
@ -564,6 +579,12 @@ type SelectStatement struct {
// if it's a query for raw data values (i.e. not an aggregate)
RawQuery bool
// What fill option the select statement uses, if any
Fill FillOption
// The value to fill empty aggregate buckets with, if any
FillValue interface{}
}
// Clone returns a deep copy of the statement.
@ -578,6 +599,8 @@ func (s *SelectStatement) Clone() *SelectStatement {
Offset: s.Offset,
SLimit: s.SLimit,
SOffset: s.SOffset,
Fill: s.Fill,
FillValue: s.FillValue,
}
if s.Target != nil {
other.Target = &Target{Measurement: s.Target.Measurement, Database: s.Target.Database}
@ -673,6 +696,14 @@ func (s *SelectStatement) String() string {
_, _ = buf.WriteString(" GROUP BY ")
_, _ = buf.WriteString(s.Dimensions.String())
}
switch s.Fill {
case NoFill:
_, _ = buf.WriteString(" fill(none)")
case NumberFill:
_, _ = buf.WriteString(fmt.Sprintf(" fill(%v)", s.FillValue))
case PreviousFill:
_, _ = buf.WriteString(" fill(previous)")
}
if len(s.SortFields) > 0 {
_, _ = buf.WriteString(" ORDER BY ")
_, _ = buf.WriteString(s.SortFields.String())
@ -1174,6 +1205,17 @@ func (s *ShowContinuousQueriesStatement) RequiredPrivileges() ExecutionPrivilege
return ExecutionPrivileges{{Name: "", Privilege: ReadPrivilege}}
}
// ShowServersStatement represents a command for listing all servers.
type ShowServersStatement struct{}
// String returns a string representation of the show servers command.
func (s *ShowServersStatement) String() string { return "SHOW SERVERS" }
// RequiredPrivileges returns the privilege required to execute a ShowServersStatement
func (s *ShowServersStatement) RequiredPrivileges() ExecutionPrivileges {
return ExecutionPrivileges{{Name: "", Privilege: AllPrivileges}}
}
// ShowDatabasesStatement represents a command for listing all databases in the cluster.
type ShowDatabasesStatement struct{}

View File

@ -373,6 +373,12 @@ func TestSelectStatement_RewriteWildcards(t *testing.T) {
rewrite: `SELECT value FROM cpu GROUP BY host, region, time(1m)`,
},
// GROUP BY wildarde with fill
{
stmt: `SELECT value FROM cpu GROUP BY *,time(1m) fill(0)`,
rewrite: `SELECT value FROM cpu GROUP BY host, region, time(1m) fill(0)`,
},
// GROUP BY wildcard with explicit
{
stmt: `SELECT value FROM cpu GROUP BY *,host`,

View File

@ -182,6 +182,9 @@ func (m *MapReduceJob) Execute(out chan *Row, filterEmptyResults bool) {
// processes the result values if there's any math in there
resultValues = m.processResults(resultValues)
// handle any fill options
resultValues = m.processFill(resultValues)
row := &Row{
Name: m.MeasurementName,
Tags: m.TagSet.Tags,
@ -226,6 +229,52 @@ func (m *MapReduceJob) processResults(results [][]interface{}) [][]interface{} {
return mathResults
}
// processFill will take the results and return new reaults (or the same if no fill modifications are needed) with whatever fill options the query has.
func (m *MapReduceJob) processFill(results [][]interface{}) [][]interface{} {
// don't do anything if it's raw query results or we're supposed to leave the nulls
if m.stmt.RawQuery || m.stmt.Fill == NullFill {
return results
}
if m.stmt.Fill == NoFill {
// remove any rows that have even one nil value. This one is tricky because they could have multiple
// aggregates, but this option means that any row that has even one nil gets purged.
newResults := make([][]interface{}, 0, len(results))
for _, vals := range results {
hasNil := false
// start at 1 because the first value is always time
for j := 1; j < len(vals); j++ {
if vals[j] == nil {
hasNil = true
break
}
}
if !hasNil {
newResults = append(newResults, vals)
}
}
return newResults
}
// they're either filling with previous values or a specific number
for i, vals := range results {
// start at 1 because the first value is always time
for j := 1; j < len(vals); j++ {
if vals[j] == nil {
switch m.stmt.Fill {
case PreviousFill:
if i != 0 {
vals[j] = results[i-1][j]
}
case NumberFill:
vals[j] = m.stmt.FillValue
}
}
}
}
return results
}
func getProcessor(expr Expr, startIndex int) (processor, int) {
switch expr := expr.(type) {
case *VarRef:

View File

@ -101,6 +101,8 @@ func (p *Parser) parseShowStatement() (Statement, error) {
return p.parseShowContinuousQueriesStatement()
case DATABASES:
return p.parseShowDatabasesStatement()
case SERVERS:
return p.parseShowServersStatement()
case FIELD:
tok, pos, lit := p.scanIgnoreWhitespace()
if tok == KEYS {
@ -129,7 +131,7 @@ func (p *Parser) parseShowStatement() (Statement, error) {
return p.parseShowUsersStatement()
}
return nil, newParseError(tokstr(tok, lit), []string{"CONTINUOUS", "DATABASES", "FIELD", "MEASUREMENTS", "RETENTION", "SERIES", "TAG", "USERS"}, pos)
return nil, newParseError(tokstr(tok, lit), []string{"CONTINUOUS", "DATABASES", "FIELD", "MEASUREMENTS", "RETENTION", "SERIES", "SERVERS", "TAG", "USERS"}, pos)
}
// parseCreateStatement parses a string and returns a create statement.
@ -551,6 +553,11 @@ func (p *Parser) parseSelectStatement(tr targetRequirement) (*SelectStatement, e
return nil, err
}
// Parse fill options: "fill(<option>)"
if stmt.Fill, stmt.FillValue, err = p.parseFill(); err != nil {
return nil, err
}
// Parse sort: "ORDER BY FIELD+".
if stmt.SortFields, err = p.parseOrderBy(); err != nil {
return nil, err
@ -947,6 +954,13 @@ func (p *Parser) parseShowContinuousQueriesStatement() (*ShowContinuousQueriesSt
return stmt, nil
}
// parseShowServersStatement parses a string and returns a ShowServersStatement.
// This function assumes the "SHOW SERVERS" tokens have already been consumed.
func (p *Parser) parseShowServersStatement() (*ShowServersStatement, error) {
stmt := &ShowServersStatement{}
return stmt, nil
}
// parseShowDatabasesStatement parses a string and returns a ShowDatabasesStatement.
// This function assumes the "SHOW DATABASE" tokens have already been consumed.
func (p *Parser) parseShowDatabasesStatement() (*ShowDatabasesStatement, error) {
@ -1361,6 +1375,42 @@ func (p *Parser) parseDimension() (*Dimension, error) {
return &Dimension{Expr: expr}, nil
}
// parseFill parses the fill call and its optios.
func (p *Parser) parseFill() (FillOption, interface{}, error) {
// Parse the expression first.
expr, err := p.ParseExpr()
if err != nil {
p.unscan()
return NullFill, nil, nil
}
if lit, ok := expr.(*Call); !ok {
p.unscan()
return NullFill, nil, nil
} else {
if lit.Name != "fill" {
p.unscan()
return NullFill, nil, nil
}
if len(lit.Args) != 1 {
return NullFill, nil, errors.New("fill requires an argument, e.g.: 0, null, none, previous")
}
switch lit.Args[0].String() {
case "null":
return NullFill, nil, nil
case "none":
return NoFill, nil, nil
case "previous":
return PreviousFill, nil, nil
default:
num, ok := lit.Args[0].(*NumberLiteral)
if !ok {
return NullFill, nil, fmt.Errorf("expected number argument in fill()")
}
return NumberFill, num.Val, nil
}
}
}
// parseOptionalTokenAndInt parses the specified token followed
// by an int, if it exists.
func (p *Parser) parseOptionalTokenAndInt(t Token) (int, error) {

View File

@ -170,6 +170,49 @@ func TestParser_ParseStatement(t *testing.T) {
},
},
// SELECT statement with fill
{
s: `SELECT mean(value) FROM cpu GROUP BY time(5m) fill(1)`,
stmt: &influxql.SelectStatement{
Fields: []*influxql.Field{{
Expr: &influxql.Call{
Name: "mean",
Args: []influxql.Expr{&influxql.VarRef{Val: "value"}}}}},
Source: &influxql.Measurement{Name: "cpu"},
Dimensions: []*influxql.Dimension{
{Expr: &influxql.Call{
Name: "time",
Args: []influxql.Expr{
&influxql.DurationLiteral{Val: 5 * time.Minute},
},
}},
},
Fill: influxql.NumberFill,
FillValue: float64(1),
},
},
// SELECT statement with previous fill
{
s: `SELECT mean(value) FROM cpu GROUP BY time(5m) fill(previous)`,
stmt: &influxql.SelectStatement{
Fields: []*influxql.Field{{
Expr: &influxql.Call{
Name: "mean",
Args: []influxql.Expr{&influxql.VarRef{Val: "value"}}}}},
Source: &influxql.Measurement{Name: "cpu"},
Dimensions: []*influxql.Dimension{
{Expr: &influxql.Call{
Name: "time",
Args: []influxql.Expr{
&influxql.DurationLiteral{Val: 5 * time.Minute},
},
}},
},
Fill: influxql.PreviousFill,
},
},
// DELETE statement
{
s: `DELETE FROM myseries WHERE host = 'hosta.influxdb.org'`,
@ -183,6 +226,12 @@ func TestParser_ParseStatement(t *testing.T) {
},
},
// SHOW SERVERS
{
s: `SHOW SERVERS`,
stmt: &influxql.ShowServersStatement{},
},
// SHOW DATABASES
{
s: `SHOW DATABASES`,
@ -738,7 +787,7 @@ func TestParser_ParseStatement(t *testing.T) {
{s: `SHOW CONTINUOUS`, err: `found EOF, expected QUERIES at line 1, char 17`},
{s: `SHOW RETENTION`, err: `found EOF, expected POLICIES at line 1, char 16`},
{s: `SHOW RETENTION POLICIES`, err: `found EOF, expected identifier at line 1, char 25`},
{s: `SHOW FOO`, err: `found FOO, expected CONTINUOUS, DATABASES, FIELD, MEASUREMENTS, RETENTION, SERIES, TAG, USERS at line 1, char 6`},
{s: `SHOW FOO`, err: `found FOO, expected CONTINUOUS, DATABASES, FIELD, MEASUREMENTS, RETENTION, SERIES, SERVERS, TAG, USERS at line 1, char 6`},
{s: `DROP CONTINUOUS`, err: `found EOF, expected QUERY at line 1, char 17`},
{s: `DROP CONTINUOUS QUERY`, err: `found EOF, expected identifier at line 1, char 23`},
{s: `CREATE CONTINUOUS`, err: `found EOF, expected QUERY at line 1, char 19`},

View File

@ -103,6 +103,7 @@ const (
REVOKE
SELECT
SERIES
SERVERS
SHOW
SLIMIT
SOFFSET
@ -203,6 +204,7 @@ var tokens = [...]string{
REVOKE: "REVOKE",
SELECT: "SELECT",
SERIES: "SERIES",
SERVERS: "SERVERS",
SHOW: "SHOW",
SLIMIT: "SLIMIT",
SOFFSET: "SOFFSET",

View File

@ -770,10 +770,11 @@ func (s *Server) applyCreateDatabase(m *messaging.Message) (err error) {
if s.RetentionAutoCreate {
// Create the default retention policy.
db.policies[c.Name] = &RetentionPolicy{
Name: DefaultRetentionPolicyName,
Duration: 0,
ReplicaN: 1,
db.policies[DefaultRetentionPolicyName] = &RetentionPolicy{
Name: DefaultRetentionPolicyName,
Duration: 0,
ShardGroupDuration: calculateShardGroupDuration(0),
ReplicaN: 1,
}
db.defaultRetentionPolicy = DefaultRetentionPolicyName
s.Logger.Printf("retention policy '%s' auto-created for database '%s'", DefaultRetentionPolicyName, c.Name)
@ -1262,36 +1263,37 @@ func (s *Server) RetentionPolicies(database string) ([]*RetentionPolicy, error)
// CreateRetentionPolicy creates a retention policy for a database.
func (s *Server) CreateRetentionPolicy(database string, rp *RetentionPolicy) error {
// Enforce duration of at least retentionPolicyMinDuration
if rp.Duration < retentionPolicyMinDuration {
if rp.Duration < retentionPolicyMinDuration && rp.Duration != 0 {
return ErrRetentionPolicyMinDuration
}
const (
day = time.Hour * 24
month = day * 30
)
var sgd time.Duration
switch {
case rp.Duration > 6*month || rp.Duration == 0:
sgd = 7 * day
case rp.Duration > 2*day:
sgd = 1 * day
default:
sgd = 1 * time.Hour
}
c := &createRetentionPolicyCommand{
Database: database,
Name: rp.Name,
Duration: rp.Duration,
ShardGroupDuration: sgd,
ShardGroupDuration: calculateShardGroupDuration(rp.Duration),
ReplicaN: rp.ReplicaN,
}
_, err := s.broadcast(createRetentionPolicyMessageType, c)
return err
}
func calculateShardGroupDuration(d time.Duration) time.Duration {
const (
day = time.Hour * 24
month = day * 30
)
switch {
case d > 6*month || d == 0:
return 7 * day
case d > 2*day:
return 1 * day
default:
return 1 * time.Hour
}
}
func (s *Server) applyCreateRetentionPolicy(m *messaging.Message) error {
var c createRetentionPolicyCommand
mustUnmarshalJSON(m.Data, &c)
@ -1333,7 +1335,7 @@ type RetentionPolicyUpdate struct {
// UpdateRetentionPolicy updates an existing retention policy on a database.
func (s *Server) UpdateRetentionPolicy(database, name string, rpu *RetentionPolicyUpdate) error {
// Enforce duration of at least retentionPolicyMinDuration
if *rpu.Duration < retentionPolicyMinDuration {
if *rpu.Duration < retentionPolicyMinDuration && *rpu.Duration != 0 {
return ErrRetentionPolicyMinDuration
}
@ -1895,6 +1897,8 @@ func (s *Server) ExecuteQuery(q *influxql.Query, database string, user *User) Re
res = s.executeDropDatabaseStatement(stmt, user)
case *influxql.ShowDatabasesStatement:
res = s.executeShowDatabasesStatement(stmt, user)
case *influxql.ShowServersStatement:
res = s.executeShowServersStatement(stmt, user)
case *influxql.CreateUserStatement:
res = s.executeCreateUserStatement(stmt, user)
case *influxql.DropUserStatement:
@ -2044,6 +2048,14 @@ func (s *Server) executeShowDatabasesStatement(q *influxql.ShowDatabasesStatemen
return &Result{Series: []*influxql.Row{row}}
}
func (s *Server) executeShowServersStatement(q *influxql.ShowServersStatement, user *User) *Result {
row := &influxql.Row{Columns: []string{"id", "url"}}
for _, node := range s.DataNodes() {
row.Values = append(row.Values, []interface{}{node.ID, node.URL.String()})
}
return &Result{Series: []*influxql.Row{row}}
}
func (s *Server) executeCreateUserStatement(q *influxql.CreateUserStatement, user *User) *Result {
isAdmin := false
if q.Privilege != nil {
@ -2593,9 +2605,11 @@ func (s *Server) executeShowRetentionPoliciesStatement(q *influxql.ShowRetention
return &Result{Err: err}
}
row := &influxql.Row{Columns: []string{"name", "duration", "replicaN"}}
d := s.databases[q.Database]
row := &influxql.Row{Columns: []string{"name", "duration", "replicaN", "default"}}
for _, rp := range a {
row.Values = append(row.Values, []interface{}{rp.Name, rp.Duration.String(), rp.ReplicaN})
row.Values = append(row.Values, []interface{}{rp.Name, rp.Duration.String(), rp.ReplicaN, d.defaultRetentionPolicy == rp.Name})
}
return &Result{Series: []*influxql.Row{row}}
}

View File

@ -594,6 +594,71 @@ func TestServer_CreateRetentionPolicy(t *testing.T) {
}
}
// Ensure the database can create a new retention policy with infinite duration.
func TestServer_CreateRetentionPolicyInfinite(t *testing.T) {
s := OpenServer(NewMessagingClient())
defer s.Close()
// Create a database.
if err := s.CreateDatabase("foo"); err != nil {
t.Fatal(err)
}
// Create a retention policy on the database.
rp := &influxdb.RetentionPolicy{
Name: "bar",
Duration: 0,
ShardGroupDuration: time.Hour * 24 * 7,
ReplicaN: 2,
}
if err := s.CreateRetentionPolicy("foo", rp); err != nil {
t.Fatal(err)
}
s.Restart()
// Verify that the policy exists.
if o, err := s.RetentionPolicy("foo", "bar"); err != nil {
t.Fatalf("unexpected error: %s", err)
} else if o == nil {
t.Fatalf("retention policy not found")
} else if !reflect.DeepEqual(rp, o) {
t.Logf("expected: %#v\n", rp)
t.Fatalf("retention policy mismatch: %#v", o)
}
}
// Ensure the database can creates a default retention policy.
func TestServer_CreateRetentionPolicyDefault(t *testing.T) {
s := OpenServer(NewMessagingClient())
defer s.Close()
s.RetentionAutoCreate = true
// Create a database.
if err := s.CreateDatabase("foo"); err != nil {
t.Fatal(err)
}
s.Restart()
rp := &influxdb.RetentionPolicy{
Name: "default",
Duration: 0,
ShardGroupDuration: time.Hour * 24 * 7,
ReplicaN: 1,
}
// Verify that the policy exists.
if o, err := s.RetentionPolicy("foo", "default"); err != nil {
t.Fatalf("unexpected error: %s", err)
} else if o == nil {
t.Fatalf("retention policy not found")
} else if !reflect.DeepEqual(rp, o) {
t.Logf("expected: %#v\n", rp)
t.Fatalf("retention policy mismatch: %#v", o)
}
}
// Ensure the server returns an error when creating a retention policy with an invalid db.
func TestServer_CreateRetentionPolicy_ErrDatabaseNotFound(t *testing.T) {
c := test.NewMessagingClient()
@ -706,6 +771,25 @@ func TestServer_AlterRetentionPolicy(t *testing.T) {
} else if o.ReplicaN != *rp2.ReplicaN {
t.Fatalf("retention policy mismatch:\n\texp ReplicaN = %d\n\tgot ReplicaN = %d\n", rp2.ReplicaN, o.ReplicaN)
}
// set duration to infinite to catch edge case.
duration = 0
results = s.ExecuteQuery(MustParseQuery(`ALTER RETENTION POLICY bar ON foo DURATION INF`), "foo", nil)
if results.Error() != nil {
t.Fatalf("unexpected error: %s", results.Error())
}
// Verify results
if o, err := s.RetentionPolicy("foo", "bar"); err != nil {
t.Fatalf("unexpected error: %s", err)
} else if o == nil {
t.Fatalf("retention policy not found")
} else if o.Duration != duration {
t.Fatalf("retention policy mismatch:\n\texp Duration = %s\n\tgot Duration = %s\n", duration, o.Duration)
} else if o.ReplicaN != *rp2.ReplicaN {
t.Fatalf("retention policy mismatch:\n\texp ReplicaN = %d\n\tgot ReplicaN = %d\n", rp2.ReplicaN, o.ReplicaN)
}
}
// Ensure the server an error is returned if trying to alter a retention policy with a duration too small.