2015-02-02 21:01:35 +00:00
|
|
|
package main_test
|
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
|
|
|
"fmt"
|
|
|
|
"io/ioutil"
|
|
|
|
"net/http"
|
|
|
|
"net/url"
|
|
|
|
"os"
|
|
|
|
"path/filepath"
|
2015-02-09 23:16:25 +00:00
|
|
|
"strconv"
|
2015-02-28 02:05:30 +00:00
|
|
|
"strings"
|
2015-02-02 21:01:35 +00:00
|
|
|
"testing"
|
|
|
|
"time"
|
|
|
|
|
2015-02-10 00:35:28 +00:00
|
|
|
"github.com/influxdb/influxdb"
|
|
|
|
"github.com/influxdb/influxdb/messaging"
|
2015-02-02 21:01:35 +00:00
|
|
|
|
|
|
|
main "github.com/influxdb/influxdb/cmd/influxd"
|
|
|
|
)
|
|
|
|
|
2015-02-21 19:54:05 +00:00
|
|
|
const (
|
|
|
|
// Use a prime batch size, so that internal batching code, which most likely
|
|
|
|
// uses nice round batches, has to deal with leftover.
|
|
|
|
batchSize = 4217
|
|
|
|
)
|
|
|
|
|
2015-02-28 02:05:30 +00:00
|
|
|
// tempfile returns a temporary path.
|
|
|
|
func tempfile() string {
|
|
|
|
f, _ := ioutil.TempFile("", "influxdb-")
|
|
|
|
path := f.Name()
|
|
|
|
f.Close()
|
|
|
|
os.Remove(path)
|
|
|
|
return path
|
|
|
|
}
|
|
|
|
|
2015-02-10 03:58:17 +00:00
|
|
|
// urlFor returns a URL with the path and query params correctly appended and set.
|
|
|
|
func urlFor(u *url.URL, path string, params url.Values) *url.URL {
|
2015-02-26 00:01:06 +00:00
|
|
|
v, _ := url.Parse(u.String())
|
|
|
|
v.Path = path
|
|
|
|
v.RawQuery = params.Encode()
|
|
|
|
return v
|
2015-02-10 03:58:17 +00:00
|
|
|
}
|
|
|
|
|
2015-02-28 02:05:30 +00:00
|
|
|
// rewriteDbRp returns a copy of old with occurrences of %DB% with the given database,
|
|
|
|
// and occurences of %RP with the given retention
|
|
|
|
func rewriteDbRp(old, database, retention string) string {
|
|
|
|
return strings.Replace(strings.Replace(old, "%DB%", database, -1), "%RP%", retention, -1)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Node represents a node under test, which is both a broker and data node.
|
|
|
|
type Node struct {
|
2015-02-10 00:35:28 +00:00
|
|
|
broker *messaging.Broker
|
|
|
|
server *influxdb.Server
|
2015-02-10 01:55:07 +00:00
|
|
|
url *url.URL
|
2015-02-10 01:59:19 +00:00
|
|
|
leader bool
|
2015-02-10 00:35:28 +00:00
|
|
|
}
|
|
|
|
|
2015-02-28 02:05:30 +00:00
|
|
|
// Cluster represents a multi-node cluster.
|
|
|
|
type Cluster []*Node
|
2015-02-20 23:11:51 +00:00
|
|
|
|
2015-02-09 23:16:25 +00:00
|
|
|
// createCombinedNodeCluster creates a cluster of nServers nodes, each of which
|
|
|
|
// runs as both a Broker and Data node. If any part cluster creation fails,
|
|
|
|
// the testing is marked as failed.
|
2015-02-10 00:35:28 +00:00
|
|
|
//
|
|
|
|
// This function returns a slice of nodes, the first of which will be the leader.
|
2015-02-28 02:05:30 +00:00
|
|
|
func createCombinedNodeCluster(t *testing.T, testName, tmpDir string, nNodes, basePort int) Cluster {
|
2015-02-09 23:46:55 +00:00
|
|
|
t.Logf("Creating cluster of %d nodes for test %s", nNodes, testName)
|
|
|
|
if nNodes < 1 {
|
2015-02-09 23:51:10 +00:00
|
|
|
t.Fatalf("Test %s: asked to create nonsense cluster", testName)
|
2015-02-09 23:16:25 +00:00
|
|
|
}
|
2015-02-02 21:01:35 +00:00
|
|
|
|
2015-02-28 02:05:30 +00:00
|
|
|
nodes := make([]*Node, 0)
|
2015-02-10 00:35:28 +00:00
|
|
|
|
2015-02-09 23:16:25 +00:00
|
|
|
tmpBrokerDir := filepath.Join(tmpDir, "broker-integration-test")
|
|
|
|
tmpDataDir := filepath.Join(tmpDir, "data-integration-test")
|
2015-02-09 23:27:59 +00:00
|
|
|
t.Logf("Test %s: using tmp directory %q for brokers\n", testName, tmpBrokerDir)
|
|
|
|
t.Logf("Test %s: using tmp directory %q for data nodes\n", testName, tmpDataDir)
|
2015-02-09 23:16:25 +00:00
|
|
|
// Sometimes if a test fails, it's because of a log.Fatal() in the program.
|
2015-02-05 21:06:18 +00:00
|
|
|
// This prevents the defer from cleaning up directories.
|
|
|
|
// To be safe, nuke them always before starting
|
|
|
|
_ = os.RemoveAll(tmpBrokerDir)
|
|
|
|
_ = os.RemoveAll(tmpDataDir)
|
2015-02-02 21:01:35 +00:00
|
|
|
|
2015-02-09 23:16:25 +00:00
|
|
|
// Create the first node, special case.
|
2015-02-02 21:01:35 +00:00
|
|
|
c := main.NewConfig()
|
2015-02-09 23:16:25 +00:00
|
|
|
c.Broker.Dir = filepath.Join(tmpBrokerDir, strconv.Itoa(basePort))
|
|
|
|
c.Data.Dir = filepath.Join(tmpDataDir, strconv.Itoa(basePort))
|
|
|
|
c.Broker.Port = basePort
|
|
|
|
c.Data.Port = basePort
|
2015-02-12 00:33:21 +00:00
|
|
|
c.Admin.Enabled = false
|
2015-02-12 19:23:10 +00:00
|
|
|
c.ReportingDisabled = true
|
2015-02-09 23:40:21 +00:00
|
|
|
|
2015-02-10 00:35:28 +00:00
|
|
|
b, s := main.Run(c, "", "x.x", os.Stderr)
|
|
|
|
if b == nil {
|
|
|
|
t.Fatalf("Test %s: failed to create broker on port %d", testName, basePort)
|
|
|
|
}
|
2015-02-09 23:16:25 +00:00
|
|
|
if s == nil {
|
2015-02-10 00:35:28 +00:00
|
|
|
t.Fatalf("Test %s: failed to create leader data node on port %d", testName, basePort)
|
2015-02-09 23:40:21 +00:00
|
|
|
}
|
2015-02-28 02:05:30 +00:00
|
|
|
nodes = append(nodes, &Node{
|
2015-02-10 01:55:07 +00:00
|
|
|
broker: b,
|
|
|
|
server: s,
|
|
|
|
url: &url.URL{Scheme: "http", Host: "localhost:" + strconv.Itoa(basePort)},
|
2015-02-10 01:59:19 +00:00
|
|
|
leader: true,
|
2015-02-10 01:55:07 +00:00
|
|
|
})
|
2015-02-09 23:40:21 +00:00
|
|
|
|
|
|
|
// Create subsequent nodes, which join to first node.
|
2015-02-09 23:46:55 +00:00
|
|
|
for i := 1; i < nNodes; i++ {
|
2015-02-09 23:40:21 +00:00
|
|
|
nextPort := basePort + i
|
|
|
|
c.Broker.Dir = filepath.Join(tmpBrokerDir, strconv.Itoa(nextPort))
|
|
|
|
c.Data.Dir = filepath.Join(tmpDataDir, strconv.Itoa(nextPort))
|
|
|
|
c.Broker.Port = nextPort
|
|
|
|
c.Data.Port = nextPort
|
|
|
|
|
2015-02-10 00:35:28 +00:00
|
|
|
b, s := main.Run(c, "http://localhost:"+strconv.Itoa(basePort), "x.x", os.Stderr)
|
|
|
|
if b == nil {
|
|
|
|
t.Fatalf("Test %s: failed to create following broker on port %d", testName, basePort)
|
|
|
|
}
|
2015-02-09 23:40:21 +00:00
|
|
|
if s == nil {
|
2015-02-10 00:35:28 +00:00
|
|
|
t.Fatalf("Test %s: failed to create following data node on port %d", testName, basePort)
|
2015-02-09 23:40:21 +00:00
|
|
|
}
|
2015-02-10 01:55:07 +00:00
|
|
|
|
2015-02-28 02:05:30 +00:00
|
|
|
nodes = append(nodes, &Node{
|
2015-02-10 01:55:07 +00:00
|
|
|
broker: b,
|
|
|
|
server: s,
|
|
|
|
url: &url.URL{Scheme: "http", Host: "localhost:" + strconv.Itoa(nextPort)},
|
|
|
|
})
|
2015-02-09 23:16:25 +00:00
|
|
|
}
|
2015-02-10 00:35:28 +00:00
|
|
|
|
|
|
|
return nodes
|
2015-02-09 23:16:25 +00:00
|
|
|
}
|
2015-02-02 21:01:35 +00:00
|
|
|
|
2015-02-10 01:37:17 +00:00
|
|
|
// createDatabase creates a database, and verifies that the creation was successful.
|
2015-02-28 02:05:30 +00:00
|
|
|
func createDatabase(t *testing.T, testName string, nodes Cluster, database string) {
|
2015-02-10 01:37:17 +00:00
|
|
|
t.Logf("Test: %s: creating database %s", testName, database)
|
2015-02-28 02:05:30 +00:00
|
|
|
query(t, nodes[:1], "CREATE DATABASE "+database, `{"results":[{}]}`)
|
2015-02-10 01:37:17 +00:00
|
|
|
}
|
2015-02-02 21:01:35 +00:00
|
|
|
|
2015-02-10 01:37:17 +00:00
|
|
|
// createRetentionPolicy creates a retetention policy and verifies that the creation was successful.
|
2015-02-28 02:05:30 +00:00
|
|
|
// Replication factor is set to equal the number nodes in the cluster.
|
|
|
|
func createRetentionPolicy(t *testing.T, testName string, nodes Cluster, database, retention string) {
|
|
|
|
t.Logf("Creating retention policy %s for database %s", retention, database)
|
|
|
|
command := fmt.Sprintf("CREATE RETENTION POLICY %s ON %s DURATION 1h REPLICATION %d DEFAULT", retention, database, len(nodes))
|
|
|
|
query(t, nodes[:1], command, `{"results":[{}]}`)
|
|
|
|
}
|
2015-02-02 21:01:35 +00:00
|
|
|
|
2015-02-28 02:05:30 +00:00
|
|
|
// deleteDatabase delete a database, and verifies that the deletion was successful.
|
|
|
|
func deleteDatabase(t *testing.T, testName string, nodes Cluster, database string) {
|
|
|
|
t.Logf("Test: %s: deleting database %s", testName, database)
|
|
|
|
query(t, nodes[:1], "DROP DATABASE "+database, `{"results":[{}]}`)
|
2015-02-10 01:37:17 +00:00
|
|
|
}
|
2015-02-02 21:01:35 +00:00
|
|
|
|
2015-02-10 03:38:06 +00:00
|
|
|
// writes writes the provided data to the cluster. It verfies that a 200 OK is returned by the server.
|
2015-02-28 02:05:30 +00:00
|
|
|
func write(t *testing.T, node *Node, data string) {
|
|
|
|
u := urlFor(node.url, "write", url.Values{})
|
2015-02-02 21:01:35 +00:00
|
|
|
|
2015-02-28 02:05:30 +00:00
|
|
|
resp, err := http.Post(u.String(), "application/json", bytes.NewReader([]byte(data)))
|
2015-02-02 21:01:35 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Couldn't write data: %s", err)
|
|
|
|
}
|
|
|
|
if resp.StatusCode != http.StatusOK {
|
2015-02-28 02:05:30 +00:00
|
|
|
body, _ := ioutil.ReadAll(resp.Body)
|
|
|
|
t.Fatalf("Write to database failed. Unexpected status code. expected: %d, actual %d, %s", http.StatusOK, resp.StatusCode, string(body))
|
2015-02-02 21:01:35 +00:00
|
|
|
}
|
|
|
|
|
2015-02-28 02:05:30 +00:00
|
|
|
// Until races are solved.
|
|
|
|
time.Sleep(3 * time.Second)
|
2015-02-10 03:38:06 +00:00
|
|
|
}
|
|
|
|
|
2015-02-28 02:05:30 +00:00
|
|
|
// query executes the given query against all nodes in the cluster, and verifies no errors occured, and
|
|
|
|
// ensures the returned data is as expected
|
|
|
|
func query(t *testing.T, nodes Cluster, query, expected string) (string, bool) {
|
2015-02-02 21:01:35 +00:00
|
|
|
// Query the data exists
|
2015-02-10 03:56:54 +00:00
|
|
|
for _, n := range nodes {
|
2015-02-26 00:35:06 +00:00
|
|
|
u := urlFor(n.url, "query", url.Values{"q": []string{query}})
|
2015-02-10 03:56:54 +00:00
|
|
|
resp, err := http.Get(u.String())
|
|
|
|
if err != nil {
|
2015-02-28 02:05:30 +00:00
|
|
|
t.Fatalf("Failed to execute query '%s': %s", query, err.Error())
|
2015-02-10 03:56:54 +00:00
|
|
|
}
|
|
|
|
defer resp.Body.Close()
|
2015-02-02 21:01:35 +00:00
|
|
|
|
2015-02-10 03:56:54 +00:00
|
|
|
body, err := ioutil.ReadAll(resp.Body)
|
|
|
|
if err != nil {
|
2015-02-28 02:05:30 +00:00
|
|
|
t.Fatalf("Couldn't read body of response: %s", err.Error())
|
2015-02-10 03:56:54 +00:00
|
|
|
}
|
2015-02-02 21:01:35 +00:00
|
|
|
|
2015-02-28 02:05:30 +00:00
|
|
|
if expected != string(body) {
|
|
|
|
return string(body), false
|
2015-02-10 03:56:54 +00:00
|
|
|
}
|
2015-02-10 03:38:06 +00:00
|
|
|
}
|
|
|
|
|
2015-02-28 02:05:30 +00:00
|
|
|
return "", true
|
2015-02-23 18:01:21 +00:00
|
|
|
}
|
|
|
|
|
2015-02-28 02:05:30 +00:00
|
|
|
// runTests_Errors tests some basic error cases.
|
|
|
|
func runTests_Errors(t *testing.T, nodes Cluster) {
|
|
|
|
t.Logf("Running tests against %d-node cluster", len(nodes))
|
|
|
|
|
|
|
|
tests := []struct {
|
|
|
|
name string
|
|
|
|
write string // If equal to the empty string, no data is written.
|
|
|
|
query string // If equal to the blank string, no query is executed.
|
|
|
|
expected string // If 'query' is equal to the blank string, this is ignored.
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
name: "simple SELECT from non-existent database",
|
|
|
|
write: "",
|
|
|
|
query: `SELECT * FROM "qux"."bar".cpu`,
|
|
|
|
expected: `{"results":[{"error":"database not found: qux"}]}`,
|
|
|
|
},
|
|
|
|
}
|
2015-02-21 19:54:05 +00:00
|
|
|
|
2015-02-28 02:05:30 +00:00
|
|
|
for _, tt := range tests {
|
|
|
|
if tt.write != "" {
|
|
|
|
write(t, nodes[0], tt.write)
|
2015-02-21 19:54:05 +00:00
|
|
|
}
|
|
|
|
|
2015-02-28 02:05:30 +00:00
|
|
|
if tt.query != "" {
|
|
|
|
got, ok := query(t, nodes, tt.query, tt.expected)
|
|
|
|
if !ok {
|
|
|
|
t.Errorf("Test '%s' failed, expected: %s, got: %s", tt.name, tt.expected, got)
|
|
|
|
}
|
2015-02-21 19:54:05 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-02-28 02:05:30 +00:00
|
|
|
// runTests tests write and query of data.
|
|
|
|
func runTestsData(t *testing.T, testName string, nodes Cluster, database, retention string) {
|
|
|
|
t.Logf("Running tests against %d-node cluster", len(nodes))
|
|
|
|
|
|
|
|
// Start by ensuring database and retention policy exist.
|
|
|
|
createDatabase(t, testName, nodes, database)
|
|
|
|
createRetentionPolicy(t, testName, nodes, database, retention)
|
|
|
|
|
|
|
|
// The tests. Within these tests %DB% and %RP% will be replaced with the database and retention passed into
|
|
|
|
// this function.
|
|
|
|
tests := []struct {
|
|
|
|
reset bool // Delete and recreate the database.
|
|
|
|
name string // Test name, for easy-to-read test log output.
|
|
|
|
write string // If equal to the empty string, no data is written.
|
|
|
|
query string // If equal to the blank string, no query is executed.
|
|
|
|
expected string // If 'query' is equal to the blank string, this is ignored.
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
reset: true,
|
|
|
|
name: "single point with timestamp",
|
|
|
|
write: `{"database" : "%DB%", "retentionPolicy" : "%RP%", "points": [{"name": "cpu", "timestamp": "2015-02-28T01:03:36.703820946Z", "tags": {"host": "server01"}, "fields": {"value": 100}}]}`,
|
|
|
|
query: `SELECT * FROM "%DB%"."myrp".cpu`,
|
|
|
|
expected: `{"results":[{"series":[{"name":"cpu","columns":["time","value"],"values":[["2015-02-28T01:03:36.703820946Z",100]]}]}]}`,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "single point, select with now()",
|
|
|
|
query: `SELECT * FROM "%DB%"."%RP%".cpu WHERE time < now()`,
|
|
|
|
expected: `{"results":[{"series":[{"name":"cpu","columns":["time","value"],"values":[["2015-02-28T01:03:36.703820946Z",100]]}]}]}`,
|
2015-02-02 21:01:35 +00:00
|
|
|
},
|
|
|
|
}
|
2015-02-09 23:27:59 +00:00
|
|
|
|
2015-02-28 02:05:30 +00:00
|
|
|
for _, tt := range tests {
|
|
|
|
if tt.reset {
|
|
|
|
t.Logf(`reseting for test "%s"`, tt.name)
|
|
|
|
deleteDatabase(t, testName, nodes, database)
|
|
|
|
createDatabase(t, testName, nodes, database)
|
|
|
|
createRetentionPolicy(t, testName, nodes, database, retention)
|
2015-02-10 03:42:56 +00:00
|
|
|
}
|
2015-02-10 03:38:06 +00:00
|
|
|
|
2015-02-28 02:05:30 +00:00
|
|
|
if tt.write != "" {
|
|
|
|
write(t, nodes[0], rewriteDbRp(tt.write, database, retention))
|
|
|
|
}
|
2015-02-09 23:46:55 +00:00
|
|
|
|
2015-02-28 02:05:30 +00:00
|
|
|
if tt.query != "" {
|
|
|
|
got, ok := query(t, nodes, rewriteDbRp(tt.query, database, retention), rewriteDbRp(tt.expected, database, retention))
|
|
|
|
if !ok {
|
|
|
|
t.Errorf(`Test "%s" failed, expected: %s, got: %s`, tt.name, rewriteDbRp(tt.expected, database, retention), got)
|
|
|
|
}
|
|
|
|
}
|
2015-02-10 03:38:06 +00:00
|
|
|
}
|
2015-02-09 23:40:21 +00:00
|
|
|
}
|
2015-02-20 23:11:51 +00:00
|
|
|
|
2015-02-28 02:05:30 +00:00
|
|
|
func TestSingleServer(t *testing.T) {
|
|
|
|
testName := "single server integration"
|
2015-02-20 23:11:51 +00:00
|
|
|
if testing.Short() {
|
2015-02-28 02:05:30 +00:00
|
|
|
t.Skip(fmt.Sprintf("skipping '%s'", testName))
|
2015-02-20 23:11:51 +00:00
|
|
|
}
|
2015-02-28 02:05:30 +00:00
|
|
|
dir := tempfile()
|
|
|
|
defer func() {
|
|
|
|
os.RemoveAll(dir)
|
|
|
|
}()
|
2015-02-21 20:07:19 +00:00
|
|
|
|
2015-02-28 02:05:30 +00:00
|
|
|
nodes := createCombinedNodeCluster(t, testName, dir, 1, 8090)
|
2015-02-21 20:07:19 +00:00
|
|
|
|
2015-02-28 02:05:30 +00:00
|
|
|
runTestsData(t, testName, nodes, "mydb", "myrp")
|
2015-02-21 20:07:19 +00:00
|
|
|
}
|
2015-02-21 20:21:34 +00:00
|
|
|
|
2015-02-28 02:05:30 +00:00
|
|
|
func Test3NodeServer(t *testing.T) {
|
|
|
|
testName := "3-node server integration"
|
2015-02-21 20:21:34 +00:00
|
|
|
if testing.Short() {
|
2015-02-28 02:05:30 +00:00
|
|
|
t.Skip(fmt.Sprintf("skipping '%s'", testName))
|
2015-02-21 20:21:34 +00:00
|
|
|
}
|
2015-02-28 02:05:30 +00:00
|
|
|
dir := tempfile()
|
|
|
|
defer func() {
|
|
|
|
os.RemoveAll(dir)
|
|
|
|
}()
|
|
|
|
|
|
|
|
nodes := createCombinedNodeCluster(t, testName, dir, 3, 8190)
|
|
|
|
|
|
|
|
runTestsData(t, testName, nodes, "mydb", "myrp")
|
2015-02-21 20:21:34 +00:00
|
|
|
}
|