Merge pull request #2405 from neonstalwart/time

change timestamp to time
pull/2538/head
Todd Persen 2015-05-11 12:38:00 -07:00
commit 730e8cdfd4
33 changed files with 1402 additions and 1402 deletions

View File

@ -46,7 +46,7 @@ curl -H "Content-Type: application/json" http://localhost:8086/write -d '
"retentionPolicy": "default",
"points": [
{
"timestamp": "2014-11-10T23:00:00Z",
"time": "2014-11-10T23:00:00Z",
"name": "cpu",
"tags": {
"region":"uswest",

View File

@ -84,12 +84,12 @@ The mechanism is to create one or more points and then create a batch aka *batch
and write these to a given database and series. A series is a combination of a
measurement (time/values) and a set of tags.
In this sample we will create a batch of a 1,000 points. Each point has a timestamp and
In this sample we will create a batch of a 1,000 points. Each point has a time and
a single value as well as 2 tags indicating a shape and color. We write these points
to a database called _square_holes_ using a measurement named _shapes_.
NOTE: In this example, we are specifically assigning timestamp, tags and precision
to each point. Alternately, you can specify a timestamp, tags and precision at
NOTE: In this example, we are specifically assigning time, tags and precision
to each point. Alternately, you can specify a time, tags and precision at
the batch point level that could be used as defaults if an associated point
does not provide these metrics.
@ -117,7 +117,7 @@ func writePoints(con *client.Client) {
Fields: map[string]interface{}{
"value": rand.Intn(sampleSize),
},
Timestamp: time.Now(),
Time: time.Now(),
Precision: "s",
}
}

View File

@ -96,7 +96,7 @@ func ExampleClient_Write() {
Fields: map[string]interface{}{
"value": rand.Intn(sampleSize),
},
Timestamp: time.Now(),
Time: time.Now(),
Precision: "s",
}
}

View File

@ -302,13 +302,13 @@ func (r Response) Error() error {
}
// Point defines the fields that will be written to the database
// Name, Timestamp, and Fields are required
// Precision can be specified if the timestamp is in epoch format (integer).
// Name, Time, and Fields are required
// Precision can be specified if the time is in epoch format (integer).
// Valid values for Precision are n, u, ms, s, m, and h
type Point struct {
Name string
Tags map[string]string
Timestamp time.Time
Time time.Time
Fields map[string]interface{}
Precision string
}
@ -320,7 +320,7 @@ func (p *Point) MarshalJSON() ([]byte, error) {
point := struct {
Name string `json:"name,omitempty"`
Tags map[string]string `json:"tags,omitempty"`
Timestamp string `json:"timestamp,omitempty"`
Time string `json:"time,omitempty"`
Fields map[string]interface{} `json:"fields,omitempty"`
Precision string `json:"precision,omitempty"`
}{
@ -330,8 +330,8 @@ func (p *Point) MarshalJSON() ([]byte, error) {
Precision: p.Precision,
}
// Let it omit empty if it's really zero
if !p.Timestamp.IsZero() {
point.Timestamp = p.Timestamp.UTC().Format(time.RFC3339Nano)
if !p.Time.IsZero() {
point.Time = p.Time.UTC().Format(time.RFC3339Nano)
}
return json.Marshal(&point)
}
@ -341,14 +341,14 @@ func (p *Point) UnmarshalJSON(b []byte) error {
var normal struct {
Name string `json:"name"`
Tags map[string]string `json:"tags"`
Timestamp time.Time `json:"timestamp"`
Time time.Time `json:"time"`
Precision string `json:"precision"`
Fields map[string]interface{} `json:"fields"`
}
var epoch struct {
Name string `json:"name"`
Tags map[string]string `json:"tags"`
Timestamp *int64 `json:"timestamp"`
Time *int64 `json:"time"`
Precision string `json:"precision"`
Fields map[string]interface{} `json:"fields"`
}
@ -360,18 +360,18 @@ func (p *Point) UnmarshalJSON(b []byte) error {
if err = dec.Decode(&epoch); err != nil {
return err
}
// Convert from epoch to time.Time, but only if Timestamp
// Convert from epoch to time.Time, but only if Time
// was actually set.
var ts time.Time
if epoch.Timestamp != nil {
ts, err = EpochToTime(*epoch.Timestamp, epoch.Precision)
if epoch.Time != nil {
ts, err = EpochToTime(*epoch.Time, epoch.Precision)
if err != nil {
return err
}
}
p.Name = epoch.Name
p.Tags = epoch.Tags
p.Timestamp = ts
p.Time = ts
p.Precision = epoch.Precision
p.Fields = normalizeFields(epoch.Fields)
return nil
@ -384,10 +384,10 @@ func (p *Point) UnmarshalJSON(b []byte) error {
if err := dec.Decode(&normal); err != nil {
return err
}
normal.Timestamp = SetPrecision(normal.Timestamp, normal.Precision)
normal.Time = SetPrecision(normal.Time, normal.Precision)
p.Name = normal.Name
p.Tags = normal.Tags
p.Timestamp = normal.Timestamp
p.Time = normal.Time
p.Precision = normal.Precision
p.Fields = normalizeFields(normal.Fields)
@ -417,15 +417,15 @@ func normalizeFields(fields map[string]interface{}) map[string]interface{} {
// Database and Points are required
// If no retention policy is specified, it will use the databases default retention policy.
// If tags are specified, they will be "merged" with all points. If a point already has that tag, it is ignored.
// If timestamp is specified, it will be applied to any point with an empty timestamp.
// Precision can be specified if the timestamp is in epoch format (integer).
// If time is specified, it will be applied to any point with an empty time.
// Precision can be specified if the time is in epoch format (integer).
// Valid values for Precision are n, u, ms, s, m, and h
type BatchPoints struct {
Points []Point `json:"points,omitempty"`
Database string `json:"database,omitempty"`
RetentionPolicy string `json:"retentionPolicy,omitempty"`
Tags map[string]string `json:"tags,omitempty"`
Timestamp time.Time `json:"timestamp,omitempty"`
Time time.Time `json:"time,omitempty"`
Precision string `json:"precision,omitempty"`
}
@ -436,7 +436,7 @@ func (bp *BatchPoints) UnmarshalJSON(b []byte) error {
Database string `json:"database"`
RetentionPolicy string `json:"retentionPolicy"`
Tags map[string]string `json:"tags"`
Timestamp time.Time `json:"timestamp"`
Time time.Time `json:"time"`
Precision string `json:"precision"`
}
var epoch struct {
@ -444,7 +444,7 @@ func (bp *BatchPoints) UnmarshalJSON(b []byte) error {
Database string `json:"database"`
RetentionPolicy string `json:"retentionPolicy"`
Tags map[string]string `json:"tags"`
Timestamp *int64 `json:"timestamp"`
Time *int64 `json:"time"`
Precision string `json:"precision"`
}
@ -455,8 +455,8 @@ func (bp *BatchPoints) UnmarshalJSON(b []byte) error {
}
// Convert from epoch to time.Time
var ts time.Time
if epoch.Timestamp != nil {
ts, err = EpochToTime(*epoch.Timestamp, epoch.Precision)
if epoch.Time != nil {
ts, err = EpochToTime(*epoch.Time, epoch.Precision)
if err != nil {
return err
}
@ -465,7 +465,7 @@ func (bp *BatchPoints) UnmarshalJSON(b []byte) error {
bp.Database = epoch.Database
bp.RetentionPolicy = epoch.RetentionPolicy
bp.Tags = epoch.Tags
bp.Timestamp = ts
bp.Time = ts
bp.Precision = epoch.Precision
return nil
}(); err == nil {
@ -475,12 +475,12 @@ func (bp *BatchPoints) UnmarshalJSON(b []byte) error {
if err := json.Unmarshal(b, &normal); err != nil {
return err
}
normal.Timestamp = SetPrecision(normal.Timestamp, normal.Precision)
normal.Time = SetPrecision(normal.Time, normal.Precision)
bp.Points = normal.Points
bp.Database = normal.Database
bp.RetentionPolicy = normal.RetentionPolicy
bp.Tags = normal.Tags
bp.Timestamp = normal.Timestamp
bp.Time = normal.Time
bp.Precision = normal.Precision
return nil

View File

@ -251,15 +251,15 @@ func TestPoint_UnmarshalEpoch(t *testing.T) {
for _, test := range tests {
t.Logf("testing %q\n", test.name)
data := []byte(fmt.Sprintf(`{"timestamp": %d, "precision":"%s"}`, test.epoch, test.precision))
data := []byte(fmt.Sprintf(`{"time": %d, "precision":"%s"}`, test.epoch, test.precision))
t.Logf("json: %s", string(data))
var p client.Point
err := json.Unmarshal(data, &p)
if err != nil {
t.Fatalf("unexpected error. exptected: %v, actual: %v", nil, err)
}
if !p.Timestamp.Equal(test.expected) {
t.Fatalf("Unexpected time. expected: %v, actual: %v", test.expected, p.Timestamp)
if !p.Time.Equal(test.expected) {
t.Fatalf("Unexpected time. expected: %v, actual: %v", test.expected, p.Time)
}
}
}
@ -289,15 +289,15 @@ func TestPoint_UnmarshalRFC(t *testing.T) {
for _, test := range tests {
t.Logf("testing %q\n", test.name)
ts := test.now.Format(test.rfc)
data := []byte(fmt.Sprintf(`{"timestamp": %q}`, ts))
data := []byte(fmt.Sprintf(`{"time": %q}`, ts))
t.Logf("json: %s", string(data))
var p client.Point
err := json.Unmarshal(data, &p)
if err != nil {
t.Fatalf("unexpected error. exptected: %v, actual: %v", nil, err)
}
if !p.Timestamp.Equal(test.expected) {
t.Fatalf("Unexpected time. expected: %v, actual: %v", test.expected, p.Timestamp)
if !p.Time.Equal(test.expected) {
t.Fatalf("Unexpected time. expected: %v, actual: %v", test.expected, p.Time)
}
}
}
@ -318,9 +318,9 @@ func TestPoint_MarshalOmitempty(t *testing.T) {
},
{
name: "with time",
point: client.Point{Name: "cpu", Fields: map[string]interface{}{"value": 1.1}, Timestamp: now},
point: client.Point{Name: "cpu", Fields: map[string]interface{}{"value": 1.1}, Time: now},
now: now,
expected: fmt.Sprintf(`{"name":"cpu","timestamp":"%s","fields":{"value":1.1}}`, now.Format(time.RFC3339Nano)),
expected: fmt.Sprintf(`{"name":"cpu","time":"%s","fields":{"value":1.1}}`, now.Format(time.RFC3339Nano)),
},
{
name: "with tags",
@ -386,7 +386,7 @@ func emptyTestServer() *httptest.Server {
}))
}
// Ensure that data with epoch timestamps can be decoded.
// Ensure that data with epoch times can be decoded.
func TestBatchPoints_Normal(t *testing.T) {
var bp client.BatchPoints
data := []byte(`
@ -399,7 +399,7 @@ func TestBatchPoints_Normal(t *testing.T) {
"tags": {
"host": "server01"
},
"timestamp": 14244733039069373,
"time": 14244733039069373,
"precision": "n",
"values": {
"value": 4541770385657154000
@ -410,7 +410,7 @@ func TestBatchPoints_Normal(t *testing.T) {
"tags": {
"host": "server01"
},
"timestamp": 14244733039069380,
"time": 14244733039069380,
"precision": "n",
"values": {
"value": 7199311900554737000

View File

@ -54,7 +54,7 @@ func TestRestoreCommand(t *testing.T) {
if err := s.CreateDatabase("db"); err != nil {
t.Fatalf("cannot create database: %s", err)
}
if index, err := s.WriteSeries("db", "default", []influxdb.Point{{Name: "cpu", Timestamp: now, Fields: map[string]interface{}{"value": float64(100)}}}); err != nil {
if index, err := s.WriteSeries("db", "default", []influxdb.Point{{Name: "cpu", Time: now, Fields: map[string]interface{}{"value": float64(100)}}}); err != nil {
t.Fatalf("cannot write series: %s", err)
} else if err = s.Sync(1, index); err != nil {
t.Fatalf("shard sync: %s", err)
@ -106,7 +106,7 @@ func TestRestoreCommand(t *testing.T) {
if err := s.CreateDatabase("newdb"); err != nil {
t.Fatalf("cannot create new database: %s", err)
}
if index, err := s.WriteSeries("newdb", "default", []influxdb.Point{{Name: "mem", Timestamp: now, Fields: map[string]interface{}{"value": float64(1000)}}}); err != nil {
if index, err := s.WriteSeries("newdb", "default", []influxdb.Point{{Name: "mem", Time: now, Fields: map[string]interface{}{"value": float64(1000)}}}); err != nil {
t.Fatalf("cannot write new series: %s", err)
} else if err = s.Sync(2, index); err != nil {
t.Fatalf("shard sync: %s", err)

View File

@ -313,7 +313,7 @@ func queryAndWait(t *testing.T, nodes Cluster, urlDb, q, expected, expectPattern
var mergeMany = func(t *testing.T, node *TestNode, database, retention string) {
for i := 1; i < 11; i++ {
for j := 1; j < 5+i%3; j++ {
data := fmt.Sprintf(`{"database": "%s", "retentionPolicy": "%s", "points": [{"name": "cpu", "timestamp": "%s", "tags": {"host": "server_%d"}, "fields": {"value": 22}}]}`,
data := fmt.Sprintf(`{"database": "%s", "retentionPolicy": "%s", "points": [{"name": "cpu", "time": "%s", "tags": {"host": "server_%d"}, "fields": {"value": 22}}]}`,
database, retention, time.Unix(int64(j), int64(0)).UTC().Format(time.RFC3339), i)
write(t, node, data)
}
@ -322,7 +322,7 @@ var mergeMany = func(t *testing.T, node *TestNode, database, retention string) {
var limitAndOffset = func(t *testing.T, node *TestNode, database, retention string) {
for i := 1; i < 10; i++ {
data := fmt.Sprintf(`{"database": "%s", "retentionPolicy": "%s", "points": [{"name": "cpu", "timestamp": "%s", "tags": {"region": "us-east", "host": "server-%d"}, "fields": {"value": %d}}]}`,
data := fmt.Sprintf(`{"database": "%s", "retentionPolicy": "%s", "points": [{"name": "cpu", "time": "%s", "tags": {"region": "us-east", "host": "server-%d"}, "fields": {"value": %d}}]}`,
database, retention, time.Unix(int64(i), int64(0)).Format(time.RFC3339), i, i)
write(t, node, data)
}
@ -342,7 +342,7 @@ func runTest_rawDataReturnsInOrder(t *testing.T, testName string, nodes Cluster,
var expected string
for i := 1; i < numPoints; i++ {
data := fmt.Sprintf(`{"database": "%s", "retentionPolicy": "%s", "points": [{"name": "cpu", "timestamp": "%s", "tags": {"region": "us-east", "host": "server-%d"}, "fields": {"value": %d}}]}`,
data := fmt.Sprintf(`{"database": "%s", "retentionPolicy": "%s", "points": [{"name": "cpu", "time": "%s", "tags": {"region": "us-east", "host": "server-%d"}, "fields": {"value": %d}}]}`,
database, retention, time.Unix(int64(i), int64(0)).Format(time.RFC3339), i%10, i)
write(t, nodes[0], data)
}
@ -430,25 +430,25 @@ func runTestsData(t *testing.T, testName string, nodes Cluster, database, retent
// Data read and write tests
{
reset: true,
name: "single point with timestamp",
write: `{"database" : "%DB%", "retentionPolicy" : "%RP%", "points": [{"name": "cpu", "timestamp": "2015-02-28T01:03:36.703820946Z", "tags": {"host": "server01"}, "fields": {"value": 100}}]}`,
name: "single point with time",
write: `{"database" : "%DB%", "retentionPolicy" : "%RP%", "points": [{"name": "cpu", "time": "2015-02-28T01:03:36.703820946Z", "tags": {"host": "server01"}, "fields": {"value": 100}}]}`,
query: `SELECT * FROM "%DB%"."%RP%".cpu`,
expected: `{"results":[{"series":[{"name":"cpu","columns":["time","value"],"values":[["2015-02-28T01:03:36.703820946Z",100]]}]}]}`,
},
{
name: "single point count query with timestamp",
name: "single point count query with time",
query: `SELECT count(value) FROM "%DB%"."%RP%".cpu`,
expected: `{"results":[{"series":[{"name":"cpu","columns":["time","count"],"values":[["1970-01-01T00:00:00Z",1]]}]}]}`,
},
{
name: "single string point with timestamp",
write: `{"database" : "%DB%", "retentionPolicy" : "%RP%", "points": [{"name": "logs", "timestamp": "2015-02-28T01:03:36.703820946Z", "tags": {"host": "server01"}, "fields": {"value": "disk full"}}]}`,
name: "single string point with time",
write: `{"database" : "%DB%", "retentionPolicy" : "%RP%", "points": [{"name": "logs", "time": "2015-02-28T01:03:36.703820946Z", "tags": {"host": "server01"}, "fields": {"value": "disk full"}}]}`,
query: `SELECT * FROM "%DB%"."%RP%".logs`,
expected: `{"results":[{"series":[{"name":"logs","columns":["time","value"],"values":[["2015-02-28T01:03:36.703820946Z","disk full"]]}]}]}`,
},
{
name: "single bool point with timestamp",
write: `{"database" : "%DB%", "retentionPolicy" : "%RP%", "points": [{"name": "status", "timestamp": "2015-02-28T01:03:36.703820946Z", "tags": {"host": "server01"}, "fields": {"value": "true"}}]}`,
name: "single bool point with time",
write: `{"database" : "%DB%", "retentionPolicy" : "%RP%", "points": [{"name": "status", "time": "2015-02-28T01:03:36.703820946Z", "tags": {"host": "server01"}, "fields": {"value": "true"}}]}`,
query: `SELECT * FROM "%DB%"."%RP%".status`,
expected: `{"results":[{"series":[{"name":"status","columns":["time","value"],"values":[["2015-02-28T01:03:36.703820946Z","true"]]}]}]}`,
},
@ -482,9 +482,9 @@ func runTestsData(t *testing.T, testName string, nodes Cluster, database, retent
{
reset: true,
name: "two points with timestamp",
write: `{"database" : "%DB%", "retentionPolicy" : "%RP%", "points": [{"name": "cpu", "timestamp": "2015-02-28T01:04:36.703820946Z", "fields": {"value": 100}},
{"name": "cpu", "timestamp": "2015-02-28T01:03:36.703820946Z", "fields": {"value": 100}}
name: "two points with time",
write: `{"database" : "%DB%", "retentionPolicy" : "%RP%", "points": [{"name": "cpu", "time": "2015-02-28T01:04:36.703820946Z", "fields": {"value": 100}},
{"name": "cpu", "time": "2015-02-28T01:03:36.703820946Z", "fields": {"value": 100}}
]}`,
query: `SELECT * FROM "%DB%"."%RP%".cpu`,
expected: `{"results":[{"series":[{"name":"cpu","columns":["time","value"],"values":[["2015-02-28T01:03:36.703820946Z",100],["2015-02-28T01:04:36.703820946Z",100]]}]}]}`,
@ -493,8 +493,8 @@ func runTestsData(t *testing.T, testName string, nodes Cluster, database, retent
{
reset: true,
name: "two points with negative values",
write: `{"database" : "%DB%", "retentionPolicy" : "%RP%", "points": [{"name": "cpu", "timestamp": "2015-02-28T01:04:36.703820946Z", "fields": {"value": -200}},
{"name": "cpu", "timestamp": "2015-02-28T01:03:36.703820946Z", "fields": {"value": -100}}
write: `{"database" : "%DB%", "retentionPolicy" : "%RP%", "points": [{"name": "cpu", "time": "2015-02-28T01:04:36.703820946Z", "fields": {"value": -200}},
{"name": "cpu", "time": "2015-02-28T01:03:36.703820946Z", "fields": {"value": -100}}
]}`,
query: `SELECT * FROM "%DB%"."%RP%".cpu`,
expected: `{"results":[{"series":[{"name":"cpu","columns":["time","value"],"values":[["2015-02-28T01:03:36.703820946Z",-100],["2015-02-28T01:04:36.703820946Z",-200]]}]}]}`,
@ -503,15 +503,15 @@ func runTestsData(t *testing.T, testName string, nodes Cluster, database, retent
// Data read and write tests using relative time
{
reset: true,
name: "single point with timestamp pre-calculated for past time queries yesterday",
write: `{"database" : "%DB%", "retentionPolicy" : "%RP%", "points": [{"name": "cpu", "timestamp": "` + yesterday.Format(time.RFC3339Nano) + `", "tags": {"host": "server01"}, "fields": {"value": 100}}]}`,
name: "single point with time pre-calculated for past time queries yesterday",
write: `{"database" : "%DB%", "retentionPolicy" : "%RP%", "points": [{"name": "cpu", "time": "` + yesterday.Format(time.RFC3339Nano) + `", "tags": {"host": "server01"}, "fields": {"value": 100}}]}`,
query: `SELECT * FROM "%DB%"."%RP%".cpu where time >= '` + yesterday.Add(-1*time.Minute).Format(time.RFC3339Nano) + `'`,
expected: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","columns":["time","value"],"values":[["%s",100]]}]}]}`, yesterday.Format(time.RFC3339Nano)),
},
{
reset: true,
name: "single point with timestamp pre-calculated for relative time queries now",
write: `{"database" : "%DB%", "retentionPolicy" : "%RP%", "points": [{"name": "cpu", "timestamp": "` + now.Format(time.RFC3339Nano) + `", "tags": {"host": "server01"}, "fields": {"value": 100}}]}`,
name: "single point with time pre-calculated for relative time queries now",
write: `{"database" : "%DB%", "retentionPolicy" : "%RP%", "points": [{"name": "cpu", "time": "` + now.Format(time.RFC3339Nano) + `", "tags": {"host": "server01"}, "fields": {"value": 100}}]}`,
query: `SELECT * FROM "%DB%"."%RP%".cpu where time >= now() - 1m`,
expected: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","columns":["time","value"],"values":[["%s",100]]}]}]}`, now.Format(time.RFC3339Nano)),
},
@ -551,9 +551,9 @@ func runTestsData(t *testing.T, testName string, nodes Cluster, database, retent
reset: true,
name: "FROM regex using default db and rp",
write: `{"database" : "%DB%", "retentionPolicy" : "%RP%", "points": [
{"name": "cpu1", "timestamp": "2015-02-28T01:03:36.703820946Z", "tags": {"host": "server01"}, "fields": {"value": 10}},
{"name": "cpu2", "timestamp": "2015-02-28T01:03:36.703820946Z", "tags": {"host": "server01"}, "fields": {"value": 20}},
{"name": "cpu3", "timestamp": "2015-02-28T01:03:36.703820946Z", "tags": {"host": "server01"}, "fields": {"value": 30}}
{"name": "cpu1", "time": "2015-02-28T01:03:36.703820946Z", "tags": {"host": "server01"}, "fields": {"value": 10}},
{"name": "cpu2", "time": "2015-02-28T01:03:36.703820946Z", "tags": {"host": "server01"}, "fields": {"value": 20}},
{"name": "cpu3", "time": "2015-02-28T01:03:36.703820946Z", "tags": {"host": "server01"}, "fields": {"value": 30}}
]}`,
query: `SELECT * FROM /cpu[13]/`,
queryDb: "%DB%",
@ -583,7 +583,7 @@ func runTestsData(t *testing.T, testName string, nodes Cluster, database, retent
reset: true,
name: "stddev with just one point",
write: `{"database" : "%DB%", "retentionPolicy" : "%RP%", "points": [
{"name": "cpu", "timestamp": "2015-04-20T14:27:41Z", "fields": {"value": 45}}
{"name": "cpu", "time": "2015-04-20T14:27:41Z", "fields": {"value": 45}}
]}`,
query: `SELECT STDDEV(value) FROM cpu`,
queryDb: "%DB%",
@ -593,8 +593,8 @@ func runTestsData(t *testing.T, testName string, nodes Cluster, database, retent
reset: true,
name: "large mean and stddev",
write: `{"database" : "%DB%", "retentionPolicy" : "%RP%", "points": [
{"name": "cpu", "timestamp": "2015-04-20T14:27:40Z", "fields": {"value": ` + string(maxFloat64) + `}},
{"name": "cpu", "timestamp": "2015-04-20T14:27:41Z", "fields": {"value": ` + string(maxFloat64) + `}}
{"name": "cpu", "time": "2015-04-20T14:27:40Z", "fields": {"value": ` + string(maxFloat64) + `}},
{"name": "cpu", "time": "2015-04-20T14:27:41Z", "fields": {"value": ` + string(maxFloat64) + `}}
]}`,
query: `SELECT MEAN(value), STDDEV(value) FROM cpu`,
queryDb: "%DB%",
@ -604,14 +604,14 @@ func runTestsData(t *testing.T, testName string, nodes Cluster, database, retent
reset: true,
name: "mean and stddev",
write: `{"database" : "%DB%", "retentionPolicy" : "%RP%", "points": [
{"name": "cpu", "timestamp": "2000-01-01T00:00:00Z", "fields": {"value": 2}},
{"name": "cpu", "timestamp": "2000-01-01T00:00:10Z", "fields": {"value": 4}},
{"name": "cpu", "timestamp": "2000-01-01T00:00:20Z", "fields": {"value": 4}},
{"name": "cpu", "timestamp": "2000-01-01T00:00:30Z", "fields": {"value": 4}},
{"name": "cpu", "timestamp": "2000-01-01T00:00:40Z", "fields": {"value": 5}},
{"name": "cpu", "timestamp": "2000-01-01T00:00:50Z", "fields": {"value": 5}},
{"name": "cpu", "timestamp": "2000-01-01T00:01:00Z", "fields": {"value": 7}},
{"name": "cpu", "timestamp": "2000-01-01T00:01:10Z", "fields": {"value": 9}}
{"name": "cpu", "time": "2000-01-01T00:00:00Z", "fields": {"value": 2}},
{"name": "cpu", "time": "2000-01-01T00:00:10Z", "fields": {"value": 4}},
{"name": "cpu", "time": "2000-01-01T00:00:20Z", "fields": {"value": 4}},
{"name": "cpu", "time": "2000-01-01T00:00:30Z", "fields": {"value": 4}},
{"name": "cpu", "time": "2000-01-01T00:00:40Z", "fields": {"value": 5}},
{"name": "cpu", "time": "2000-01-01T00:00:50Z", "fields": {"value": 5}},
{"name": "cpu", "time": "2000-01-01T00:01:00Z", "fields": {"value": 7}},
{"name": "cpu", "time": "2000-01-01T00:01:10Z", "fields": {"value": 9}}
]}`,
query: `SELECT MEAN(value), STDDEV(value) FROM cpu WHERE time >= '2000-01-01' AND time < '2000-01-01T00:02:00Z' GROUP BY time(10m)`,
queryDb: "%DB%",
@ -641,10 +641,10 @@ func runTestsData(t *testing.T, testName string, nodes Cluster, database, retent
{
name: "median - even sample size",
write: `{"database" : "%DB%", "retentionPolicy" : "%RP%", "points": [
{"name": "cpu-even", "timestamp": "2000-01-01T00:00:00Z", "tags": {"region": "us-east"}, "fields": {"value": 200}},
{"name": "cpu-even", "timestamp": "2000-01-01T00:00:10Z", "tags": {"region": "us-east"}, "fields": {"value": 30}},
{"name": "cpu-even", "timestamp": "2000-01-01T00:00:20Z", "tags": {"region": "us-east"}, "fields": {"value": 40}},
{"name": "cpu-even", "timestamp": "2000-01-01T00:00:30Z", "tags": {"region": "us-west"}, "fields": {"value": 100}}
{"name": "cpu-even", "time": "2000-01-01T00:00:00Z", "tags": {"region": "us-east"}, "fields": {"value": 200}},
{"name": "cpu-even", "time": "2000-01-01T00:00:10Z", "tags": {"region": "us-east"}, "fields": {"value": 30}},
{"name": "cpu-even", "time": "2000-01-01T00:00:20Z", "tags": {"region": "us-east"}, "fields": {"value": 40}},
{"name": "cpu-even", "time": "2000-01-01T00:00:30Z", "tags": {"region": "us-west"}, "fields": {"value": 100}}
]}`,
query: `SELECT median(value) FROM "cpu-even"`,
queryDb: "%DB%",
@ -653,9 +653,9 @@ func runTestsData(t *testing.T, testName string, nodes Cluster, database, retent
{
name: "median - odd sample size",
write: `{"database" : "%DB%", "retentionPolicy" : "%RP%", "points": [
{"name": "cpu-odd", "timestamp": "2000-01-01T00:00:00Z", "tags": {"region": "us-east"}, "fields": {"value": 200}},
{"name": "cpu-odd", "timestamp": "2000-01-01T00:00:10Z", "tags": {"region": "us-east"}, "fields": {"value": 30}},
{"name": "cpu-odd", "timestamp": "2000-01-01T00:00:20Z", "tags": {"region": "us-west"}, "fields": {"value": 100}}
{"name": "cpu-odd", "time": "2000-01-01T00:00:00Z", "tags": {"region": "us-east"}, "fields": {"value": 200}},
{"name": "cpu-odd", "time": "2000-01-01T00:00:10Z", "tags": {"region": "us-east"}, "fields": {"value": 30}},
{"name": "cpu-odd", "time": "2000-01-01T00:00:20Z", "tags": {"region": "us-west"}, "fields": {"value": 100}}
]}`,
query: `SELECT median(value) FROM "cpu-odd"`,
queryDb: "%DB%",
@ -665,9 +665,9 @@ func runTestsData(t *testing.T, testName string, nodes Cluster, database, retent
reset: true,
name: "aggregations",
write: `{"database" : "%DB%", "retentionPolicy" : "%RP%", "points": [
{"name": "cpu", "timestamp": "2000-01-01T00:00:00Z", "tags": {"region": "us-east"}, "fields": {"value": 20}},
{"name": "cpu", "timestamp": "2000-01-01T00:00:10Z", "tags": {"region": "us-east"}, "fields": {"value": 30}},
{"name": "cpu", "timestamp": "2000-01-01T00:00:00Z", "tags": {"region": "us-west"}, "fields": {"value": 100}}
{"name": "cpu", "time": "2000-01-01T00:00:00Z", "tags": {"region": "us-east"}, "fields": {"value": 20}},
{"name": "cpu", "time": "2000-01-01T00:00:10Z", "tags": {"region": "us-east"}, "fields": {"value": 30}},
{"name": "cpu", "time": "2000-01-01T00:00:00Z", "tags": {"region": "us-west"}, "fields": {"value": 100}}
]}`,
query: `SELECT value FROM cpu WHERE time >= '2000-01-01 00:00:05'`,
queryDb: "%DB%",
@ -681,7 +681,7 @@ func runTestsData(t *testing.T, testName string, nodes Cluster, database, retent
},
{
write: `{"database" : "%DB%", "retentionPolicy" : "%RP%", "points": [
{"name": "cpu", "timestamp": "2000-01-01T00:00:03Z", "tags": {"region": "us-east"}, "fields": {"otherVal": 20}}
{"name": "cpu", "time": "2000-01-01T00:00:03Z", "tags": {"region": "us-east"}, "fields": {"otherVal": 20}}
]}`,
name: "aggregation with a null field value",
query: `SELECT SUM(value) FROM cpu GROUP BY region`,
@ -702,9 +702,9 @@ func runTestsData(t *testing.T, testName string, nodes Cluster, database, retent
{
name: "group by multiple dimensions",
write: `{"database" : "%DB%", "retentionPolicy" : "%RP%", "points": [
{"name": "load", "timestamp": "2000-01-01T00:00:00Z", "tags": {"region": "us-east", "host": "serverA"}, "fields": {"value": 20}},
{"name": "load", "timestamp": "2000-01-01T00:00:10Z", "tags": {"region": "us-east", "host": "serverB"}, "fields": {"value": 30}},
{"name": "load", "timestamp": "2000-01-01T00:00:00Z", "tags": {"region": "us-west", "host": "serverC"}, "fields": {"value": 100}}
{"name": "load", "time": "2000-01-01T00:00:00Z", "tags": {"region": "us-east", "host": "serverA"}, "fields": {"value": 20}},
{"name": "load", "time": "2000-01-01T00:00:10Z", "tags": {"region": "us-east", "host": "serverB"}, "fields": {"value": 30}},
{"name": "load", "time": "2000-01-01T00:00:00Z", "tags": {"region": "us-west", "host": "serverC"}, "fields": {"value": 100}}
]}`,
query: `SELECT sum(value) FROM load GROUP BY region, host`,
queryDb: "%DB%",
@ -713,8 +713,8 @@ func runTestsData(t *testing.T, testName string, nodes Cluster, database, retent
{
name: "WHERE with AND",
write: `{"database" : "%DB%", "retentionPolicy" : "%RP%", "points": [
{"name": "cpu", "timestamp": "2000-01-01T00:00:03Z", "tags": {"region": "uk", "host": "serverZ", "service": "redis"}, "fields": {"value": 20}},
{"name": "cpu", "timestamp": "2000-01-01T00:00:03Z", "tags": {"region": "uk", "host": "serverZ", "service": "mysql"}, "fields": {"value": 30}}
{"name": "cpu", "time": "2000-01-01T00:00:03Z", "tags": {"region": "uk", "host": "serverZ", "service": "redis"}, "fields": {"value": 20}},
{"name": "cpu", "time": "2000-01-01T00:00:03Z", "tags": {"region": "uk", "host": "serverZ", "service": "mysql"}, "fields": {"value": 30}}
]}`,
query: `SELECT sum(value) FROM cpu WHERE region='uk' AND host='serverZ'`,
queryDb: "%DB%",
@ -723,25 +723,25 @@ func runTestsData(t *testing.T, testName string, nodes Cluster, database, retent
// Precision-specified writes
{
name: "single string point with second precision timestamp",
write: `{"database" : "%DB%", "retentionPolicy" : "%RP%", "points": [{"name": "cpu_s_precision", "timestamp": 1, "precision": "s", "fields": {"value": 100}}]}`,
name: "single string point with second precision time",
write: `{"database" : "%DB%", "retentionPolicy" : "%RP%", "points": [{"name": "cpu_s_precision", "time": 1, "precision": "s", "fields": {"value": 100}}]}`,
query: `SELECT * FROM "%DB%"."%RP%".cpu_s_precision`,
expected: `{"results":[{"series":[{"name":"cpu_s_precision","columns":["time","value"],"values":[["1970-01-01T00:00:01Z",100]]}]}]}`,
},
{
name: "single string point with millisecond precision timestamp",
write: `{"database" : "%DB%", "retentionPolicy" : "%RP%", "points": [{"name": "cpu_ms_precision", "timestamp": 1000, "precision": "ms", "fields": {"value": 100}}]}`,
name: "single string point with millisecond precision time",
write: `{"database" : "%DB%", "retentionPolicy" : "%RP%", "points": [{"name": "cpu_ms_precision", "time": 1000, "precision": "ms", "fields": {"value": 100}}]}`,
query: `SELECT * FROM "%DB%"."%RP%".cpu_ms_precision`,
expected: `{"results":[{"series":[{"name":"cpu_ms_precision","columns":["time","value"],"values":[["1970-01-01T00:00:01Z",100]]}]}]}`,
},
{
name: "single string point with nanosecond precision timestamp",
write: `{"database" : "%DB%", "retentionPolicy" : "%RP%", "points": [{"name": "cpu_n_precision", "timestamp": 2000000000, "precision": "n", "fields": {"value": 100}}]}`,
name: "single string point with nanosecond precision time",
write: `{"database" : "%DB%", "retentionPolicy" : "%RP%", "points": [{"name": "cpu_n_precision", "time": 2000000000, "precision": "n", "fields": {"value": 100}}]}`,
query: `SELECT * FROM "%DB%"."%RP%".cpu_n_precision`,
expected: `{"results":[{"series":[{"name":"cpu_n_precision","columns":["time","value"],"values":[["1970-01-01T00:00:02Z",100]]}]}]}`,
},
{
name: "single point count query with nanosecond precision timestamp",
name: "single point count query with nanosecond precision time",
query: `SELECT count(value) FROM "%DB%"."%RP%".cpu_n_precision`,
expected: `{"results":[{"series":[{"name":"cpu_n_precision","columns":["time","count"],"values":[["1970-01-01T00:00:00Z",1]]}]}]}`,
},
@ -751,9 +751,9 @@ func runTestsData(t *testing.T, testName string, nodes Cluster, database, retent
reset: true,
name: "wildcard queries",
write: `{"database" : "%DB%", "retentionPolicy" : "%RP%", "points": [
{"name": "cpu", "timestamp": "2000-01-01T00:00:00Z", "tags": {"region": "us-east"}, "fields": {"value": 10}},
{"name": "cpu", "timestamp": "2000-01-01T00:00:10Z", "tags": {"region": "us-east"}, "fields": {"val-x": 20}},
{"name": "cpu", "timestamp": "2000-01-01T00:00:20Z", "tags": {"region": "us-east"}, "fields": {"value": 30, "val-x": 40}}
{"name": "cpu", "time": "2000-01-01T00:00:00Z", "tags": {"region": "us-east"}, "fields": {"value": 10}},
{"name": "cpu", "time": "2000-01-01T00:00:10Z", "tags": {"region": "us-east"}, "fields": {"val-x": 20}},
{"name": "cpu", "time": "2000-01-01T00:00:20Z", "tags": {"region": "us-east"}, "fields": {"value": 30, "val-x": 40}}
]}`,
query: `SELECT * FROM cpu`,
queryDb: "%DB%",
@ -763,9 +763,9 @@ func runTestsData(t *testing.T, testName string, nodes Cluster, database, retent
reset: true,
name: "wildcard GROUP BY queries",
write: `{"database" : "%DB%", "retentionPolicy" : "%RP%", "points": [
{"name": "cpu", "timestamp": "2000-01-01T00:00:00Z", "tags": {"region": "us-east"}, "fields": {"value": 10}},
{"name": "cpu", "timestamp": "2000-01-01T00:00:10Z", "tags": {"region": "us-east"}, "fields": {"value": 20}},
{"name": "cpu", "timestamp": "2000-01-01T00:00:20Z", "tags": {"region": "us-west"}, "fields": {"value": 30}}
{"name": "cpu", "time": "2000-01-01T00:00:00Z", "tags": {"region": "us-east"}, "fields": {"value": 10}},
{"name": "cpu", "time": "2000-01-01T00:00:10Z", "tags": {"region": "us-east"}, "fields": {"value": 20}},
{"name": "cpu", "time": "2000-01-01T00:00:20Z", "tags": {"region": "us-west"}, "fields": {"value": 30}}
]}`,
query: `SELECT mean(value) FROM cpu GROUP BY *`,
queryDb: "%DB%",
@ -782,9 +782,9 @@ func runTestsData(t *testing.T, testName string, nodes Cluster, database, retent
{
reset: true,
name: "WHERE tags SELECT single field (EQ tag value1)",
write: `{"database" : "%DB%", "retentionPolicy" : "%RP%", "points": [{"name": "cpu", "timestamp": "2015-02-28T01:03:36.703820946Z", "tags": {"host": "server01", "region": "us-west"}, "fields": {"value": 100}},
{"name": "cpu", "timestamp": "2010-02-28T01:03:37.703820946Z", "tags": {"host": "server02"}, "fields": {"value": 200}},
{"name": "cpu", "timestamp": "2012-02-28T01:03:38.703820946Z", "tags": {"host": "server03"}, "fields": {"value": 300}}]}`,
write: `{"database" : "%DB%", "retentionPolicy" : "%RP%", "points": [{"name": "cpu", "time": "2015-02-28T01:03:36.703820946Z", "tags": {"host": "server01", "region": "us-west"}, "fields": {"value": 100}},
{"name": "cpu", "time": "2010-02-28T01:03:37.703820946Z", "tags": {"host": "server02"}, "fields": {"value": 200}},
{"name": "cpu", "time": "2012-02-28T01:03:38.703820946Z", "tags": {"host": "server03"}, "fields": {"value": 300}}]}`,
query: `SELECT value FROM "%DB%"."%RP%".cpu WHERE host = 'server01'`,
expected: `{"results":[{"series":[{"name":"cpu","columns":["time","value"],"values":[["2015-02-28T01:03:36.703820946Z",100]]}]}]}`,
},
@ -846,16 +846,16 @@ func runTestsData(t *testing.T, testName string, nodes Cluster, database, retent
{
reset: true,
name: "WHERE tags SELECT single field (NEQ tag value1, point without any tags)",
write: `{"database" : "%DB%", "retentionPolicy" : "%RP%", "points": [{"name": "cpu", "timestamp": "2015-02-28T01:03:36.703820946Z", "tags": {"host": "server01"}, "fields": {"value": 100}},
{"name": "cpu", "timestamp": "2012-02-28T01:03:38.703820946Z", "fields": {"value": 200}}]}`,
write: `{"database" : "%DB%", "retentionPolicy" : "%RP%", "points": [{"name": "cpu", "time": "2015-02-28T01:03:36.703820946Z", "tags": {"host": "server01"}, "fields": {"value": 100}},
{"name": "cpu", "time": "2012-02-28T01:03:38.703820946Z", "fields": {"value": 200}}]}`,
query: `SELECT value FROM "%DB%"."%RP%".cpu WHERE host != 'server01'`,
expected: `{"results":[{"series":[{"name":"cpu","columns":["time","value"],"values":[["2012-02-28T01:03:38.703820946Z",200]]}]}]}`,
},
{
reset: true,
name: "WHERE tags SELECT single field (regex tag no match)",
write: `{"database" : "%DB%", "retentionPolicy" : "%RP%", "points": [{"name": "cpu", "timestamp": "2015-02-28T01:03:36.703820946Z", "tags": {"host": "server01"}, "fields": {"value": 100}},
{"name": "cpu", "timestamp": "2012-02-28T01:03:38.703820946Z", "fields": {"value": 200}}]}`,
write: `{"database" : "%DB%", "retentionPolicy" : "%RP%", "points": [{"name": "cpu", "time": "2015-02-28T01:03:36.703820946Z", "tags": {"host": "server01"}, "fields": {"value": 100}},
{"name": "cpu", "time": "2012-02-28T01:03:38.703820946Z", "fields": {"value": 200}}]}`,
query: `SELECT value FROM "%DB%"."%RP%".cpu WHERE host !~ /server01/`,
expected: `{"results":[{"series":[{"name":"cpu","columns":["time","value"],"values":[["2012-02-28T01:03:38.703820946Z",200]]}]}]}`,
},
@ -874,7 +874,7 @@ func runTestsData(t *testing.T, testName string, nodes Cluster, database, retent
{
reset: true,
name: "WHERE fields SELECT single field",
write: `{"database" : "%DB%", "retentionPolicy" : "%RP%", "points": [{"name": "cpu", "timestamp": "2015-02-28T01:03:36.703820946Z", "fields": {"alert_id": "alert", "tenant_id": "tenant", "_cust": "acme"}}]}`,
write: `{"database" : "%DB%", "retentionPolicy" : "%RP%", "points": [{"name": "cpu", "time": "2015-02-28T01:03:36.703820946Z", "fields": {"alert_id": "alert", "tenant_id": "tenant", "_cust": "acme"}}]}`,
query: `SELECT alert_id FROM "%DB%"."%RP%".cpu WHERE alert_id='alert'`,
expected: `{"results":[{"series":[{"name":"cpu","columns":["time","alert_id"],"values":[["2015-02-28T01:03:36.703820946Z","alert"]]}]}]}`,
},
@ -895,8 +895,8 @@ func runTestsData(t *testing.T, testName string, nodes Cluster, database, retent
},
{
name: "select where field greater than some value",
write: `{"database" : "%DB%", "retentionPolicy" : "%RP%", "points": [{"name": "cpu", "timestamp": "2009-11-10T23:00:02Z", "fields": {"load": 100}},
{"name": "cpu", "timestamp": "2009-11-10T23:01:02Z", "fields": {"load": 80}}]}`,
write: `{"database" : "%DB%", "retentionPolicy" : "%RP%", "points": [{"name": "cpu", "time": "2009-11-10T23:00:02Z", "fields": {"load": 100}},
{"name": "cpu", "time": "2009-11-10T23:01:02Z", "fields": {"load": 80}}]}`,
query: `select load from "%DB%"."%RP%".cpu where load > 100`,
expected: `{"results":[{"series":[{"name":"cpu","columns":["time","load"]}]}]}`,
},
@ -933,13 +933,13 @@ func runTestsData(t *testing.T, testName string, nodes Cluster, database, retent
expected: `{"results":[{"series":[{"name":"cpu","columns":["time","load"],"values":[["2009-11-10T23:01:02Z",80]]}]}]}`,
},
{
write: `{"database" : "%DB%", "retentionPolicy" : "%RP%", "points": [{"name": "logs", "timestamp": "2009-11-10T23:00:02Z","fields": {"event": "disk full"}},
{"name": "logs", "timestamp": "2009-11-10T23:02:02Z","fields": {"event": "disk not full"}}]}`,
write: `{"database" : "%DB%", "retentionPolicy" : "%RP%", "points": [{"name": "logs", "time": "2009-11-10T23:00:02Z","fields": {"event": "disk full"}},
{"name": "logs", "time": "2009-11-10T23:02:02Z","fields": {"event": "disk not full"}}]}`,
query: `select event from "%DB%"."%RP%".logs where event = 'disk full'`,
expected: `{"results":[{"series":[{"name":"logs","columns":["time","event"],"values":[["2009-11-10T23:00:02Z","disk full"]]}]}]}`,
},
{
write: `{"database" : "%DB%", "retentionPolicy" : "%RP%", "points": [{"name": "logs", "timestamp": "2009-11-10T23:00:02Z","fields": {"event": "disk full"}}]}`,
write: `{"database" : "%DB%", "retentionPolicy" : "%RP%", "points": [{"name": "logs", "time": "2009-11-10T23:00:02Z","fields": {"event": "disk full"}}]}`,
query: `select event from "%DB%"."%RP%".logs where event = 'nonsense'`,
expected: `{"results":[{"series":[{"name":"logs","columns":["time","event"]}]}]}`,
},
@ -951,10 +951,10 @@ func runTestsData(t *testing.T, testName string, nodes Cluster, database, retent
{
name: "where on a tag, field and time",
write: `{"database" : "%DB%", "retentionPolicy" : "%RP%", "points": [
{"name": "where_events", "timestamp": "2009-11-10T23:00:02Z","fields": {"foo": "bar"}, "tags": {"tennant": "paul"}},
{"name": "where_events", "timestamp": "2009-11-10T23:00:03Z","fields": {"foo": "baz"}, "tags": {"tennant": "paul"}},
{"name": "where_events", "timestamp": "2009-11-10T23:00:04Z","fields": {"foo": "bat"}, "tags": {"tennant": "paul"}},
{"name": "where_events", "timestamp": "2009-11-10T23:00:05Z","fields": {"foo": "bar"}, "tags": {"tennant": "todd"}}
{"name": "where_events", "time": "2009-11-10T23:00:02Z","fields": {"foo": "bar"}, "tags": {"tennant": "paul"}},
{"name": "where_events", "time": "2009-11-10T23:00:03Z","fields": {"foo": "baz"}, "tags": {"tennant": "paul"}},
{"name": "where_events", "time": "2009-11-10T23:00:04Z","fields": {"foo": "bat"}, "tags": {"tennant": "paul"}},
{"name": "where_events", "time": "2009-11-10T23:00:05Z","fields": {"foo": "bar"}, "tags": {"tennant": "todd"}}
]}`,
query: `select foo from "%DB%"."%RP%".where_events where tennant = 'paul' AND time > 1s AND (foo = 'bar' OR foo = 'baz')`,
expected: `{"results":[{"series":[{"name":"where_events","columns":["time","foo"],"values":[["2009-11-10T23:00:02Z","bar"],["2009-11-10T23:00:03Z","baz"]]}]}]}`,
@ -971,10 +971,10 @@ func runTestsData(t *testing.T, testName string, nodes Cluster, database, retent
{
name: "limit1 on points",
write: `{"database" : "%DB%", "retentionPolicy" : "%RP%", "points": [
{"name": "limit", "timestamp": "2009-11-10T23:00:02Z","fields": {"foo": 2}, "tags": {"tennant": "paul"}},
{"name": "limit", "timestamp": "2009-11-10T23:00:03Z","fields": {"foo": 3}, "tags": {"tennant": "paul"}},
{"name": "limit", "timestamp": "2009-11-10T23:00:04Z","fields": {"foo": 4}, "tags": {"tennant": "paul"}},
{"name": "limit", "timestamp": "2009-11-10T23:00:05Z","fields": {"foo": 5}, "tags": {"tennant": "todd"}}
{"name": "limit", "time": "2009-11-10T23:00:02Z","fields": {"foo": 2}, "tags": {"tennant": "paul"}},
{"name": "limit", "time": "2009-11-10T23:00:03Z","fields": {"foo": 3}, "tags": {"tennant": "paul"}},
{"name": "limit", "time": "2009-11-10T23:00:04Z","fields": {"foo": 4}, "tags": {"tennant": "paul"}},
{"name": "limit", "time": "2009-11-10T23:00:05Z","fields": {"foo": 5}, "tags": {"tennant": "todd"}}
]}`,
query: `select foo from "%DB%"."%RP%"."limit" LIMIT 2`,
expected: `{"results":[{"series":[{"name":"limit","columns":["time","foo"],"values":[["2009-11-10T23:00:02Z",2],["2009-11-10T23:00:03Z",3]]}]}]}`,
@ -1039,10 +1039,10 @@ func runTestsData(t *testing.T, testName string, nodes Cluster, database, retent
{
name: "fill with value",
write: `{"database" : "%DB%", "retentionPolicy" : "%RP%", "points": [
{"name": "fills", "timestamp": "2009-11-10T23:00:02Z","fields": {"val": 3}},
{"name": "fills", "timestamp": "2009-11-10T23:00:03Z","fields": {"val": 5}},
{"name": "fills", "timestamp": "2009-11-10T23:00:06Z","fields": {"val": 4}},
{"name": "fills", "timestamp": "2009-11-10T23:00:16Z","fields": {"val": 10}}
{"name": "fills", "time": "2009-11-10T23:00:02Z","fields": {"val": 3}},
{"name": "fills", "time": "2009-11-10T23:00:03Z","fields": {"val": 5}},
{"name": "fills", "time": "2009-11-10T23:00:06Z","fields": {"val": 4}},
{"name": "fills", "time": "2009-11-10T23:00:16Z","fields": {"val": 10}}
]}`,
query: `select mean(val) from "%DB%"."%RP%".fills where time >= '2009-11-10T23:00:00Z' and time < '2009-11-10T23:00:20Z' group by time(5s) FILL(1)`,
expected: `{"results":[{"series":[{"name":"fills","columns":["time","mean"],"values":[["2009-11-10T23:00:00Z",4],["2009-11-10T23:00:05Z",4],["2009-11-10T23:00:10Z",1],["2009-11-10T23:00:15Z",10]]}]}]}`,
@ -1078,8 +1078,8 @@ func runTestsData(t *testing.T, testName string, nodes Cluster, database, retent
reset: true,
name: "Drop Measurement, series tags preserved tests",
write: `{"database" : "%DB%", "retentionPolicy" : "%RP%", "points": [
{"name": "cpu", "timestamp": "2000-01-01T00:00:00Z", "tags": {"host": "serverA", "region": "uswest"}, "fields": {"val": 23.2}},
{"name": "memory", "timestamp": "2000-01-01T00:00:01Z", "tags": {"host": "serverB", "region": "uswest"}, "fields": {"val": 33.2}}
{"name": "cpu", "time": "2000-01-01T00:00:00Z", "tags": {"host": "serverA", "region": "uswest"}, "fields": {"val": 23.2}},
{"name": "memory", "time": "2000-01-01T00:00:01Z", "tags": {"host": "serverB", "region": "uswest"}, "fields": {"val": 33.2}}
]}`,
query: `SHOW MEASUREMENTS`,
queryDb: "%DB%",
@ -1148,12 +1148,12 @@ func runTestsData(t *testing.T, testName string, nodes Cluster, database, retent
{
reset: true,
write: `{"database" : "%DB%", "retentionPolicy" : "%RP%", "points": [
{"name": "cpu", "tags": {"host": "server01"},"timestamp": "2009-11-10T23:00:00Z","fields": {"value": 100}},
{"name": "cpu", "tags": {"host": "server01", "region": "uswest"},"timestamp": "2009-11-10T23:00:00Z","fields": {"value": 100}},
{"name": "cpu", "tags": {"host": "server01", "region": "useast"},"timestamp": "2009-11-10T23:00:00Z","fields": {"value": 100}},
{"name": "cpu", "tags": {"host": "server02", "region": "useast"},"timestamp": "2009-11-10T23:00:00Z","fields": {"value": 100}},
{"name": "gpu", "tags": {"host": "server02", "region": "useast"},"timestamp": "2009-11-10T23:00:00Z","fields": {"value": 100}},
{"name": "gpu", "tags": {"host": "server03", "region": "caeast"},"timestamp": "2009-11-10T23:00:00Z","fields": {"value": 100}}
{"name": "cpu", "tags": {"host": "server01"},"time": "2009-11-10T23:00:00Z","fields": {"value": 100}},
{"name": "cpu", "tags": {"host": "server01", "region": "uswest"},"time": "2009-11-10T23:00:00Z","fields": {"value": 100}},
{"name": "cpu", "tags": {"host": "server01", "region": "useast"},"time": "2009-11-10T23:00:00Z","fields": {"value": 100}},
{"name": "cpu", "tags": {"host": "server02", "region": "useast"},"time": "2009-11-10T23:00:00Z","fields": {"value": 100}},
{"name": "gpu", "tags": {"host": "server02", "region": "useast"},"time": "2009-11-10T23:00:00Z","fields": {"value": 100}},
{"name": "gpu", "tags": {"host": "server03", "region": "caeast"},"time": "2009-11-10T23:00:00Z","fields": {"value": 100}}
]}`,
query: "SHOW SERIES",
queryDb: "%DB%",
@ -1188,13 +1188,13 @@ func runTestsData(t *testing.T, testName string, nodes Cluster, database, retent
{
reset: true,
write: `{"database" : "%DB%", "retentionPolicy" : "%RP%", "points": [
{"name": "cpu", "tags": {"host": "server01"},"timestamp": "2009-11-10T23:00:00Z","fields": {"value": 100}},
{"name": "cpu", "tags": {"host": "server01", "region": "uswest"},"timestamp": "2009-11-10T23:00:00Z","fields": {"value": 100}},
{"name": "cpu", "tags": {"host": "server01", "region": "useast"},"timestamp": "2009-11-10T23:00:00Z","fields": {"value": 100}},
{"name": "cpu", "tags": {"host": "server02", "region": "useast"},"timestamp": "2009-11-10T23:00:00Z","fields": {"value": 100}},
{"name": "gpu", "tags": {"host": "server02", "region": "useast"},"timestamp": "2009-11-10T23:00:00Z","fields": {"value": 100}},
{"name": "gpu", "tags": {"host": "server02", "region": "caeast"},"timestamp": "2009-11-10T23:00:00Z","fields": {"value": 100}},
{"name": "other", "tags": {"host": "server03", "region": "caeast"},"timestamp": "2009-11-10T23:00:00Z","fields": {"value": 100}}
{"name": "cpu", "tags": {"host": "server01"},"time": "2009-11-10T23:00:00Z","fields": {"value": 100}},
{"name": "cpu", "tags": {"host": "server01", "region": "uswest"},"time": "2009-11-10T23:00:00Z","fields": {"value": 100}},
{"name": "cpu", "tags": {"host": "server01", "region": "useast"},"time": "2009-11-10T23:00:00Z","fields": {"value": 100}},
{"name": "cpu", "tags": {"host": "server02", "region": "useast"},"time": "2009-11-10T23:00:00Z","fields": {"value": 100}},
{"name": "gpu", "tags": {"host": "server02", "region": "useast"},"time": "2009-11-10T23:00:00Z","fields": {"value": 100}},
{"name": "gpu", "tags": {"host": "server02", "region": "caeast"},"time": "2009-11-10T23:00:00Z","fields": {"value": 100}},
{"name": "other", "tags": {"host": "server03", "region": "caeast"},"time": "2009-11-10T23:00:00Z","fields": {"value": 100}}
]}`,
query: "SHOW MEASUREMENTS LIMIT 2",
queryDb: "%DB%",
@ -1214,12 +1214,12 @@ func runTestsData(t *testing.T, testName string, nodes Cluster, database, retent
{
reset: true,
write: `{"database" : "%DB%", "retentionPolicy" : "%RP%", "points": [
{"name": "cpu", "tags": {"host": "server01"},"timestamp": "2009-11-10T23:00:00Z","fields": {"value": 100}},
{"name": "cpu", "tags": {"host": "server01", "region": "uswest"},"timestamp": "2009-11-10T23:00:00Z","fields": {"value": 100}},
{"name": "cpu", "tags": {"host": "server01", "region": "useast"},"timestamp": "2009-11-10T23:00:00Z","fields": {"value": 100}},
{"name": "cpu", "tags": {"host": "server02", "region": "useast"},"timestamp": "2009-11-10T23:00:00Z","fields": {"value": 100}},
{"name": "gpu", "tags": {"host": "server02", "region": "useast"},"timestamp": "2009-11-10T23:00:00Z","fields": {"value": 100}},
{"name": "gpu", "tags": {"host": "server03", "region": "caeast"},"timestamp": "2009-11-10T23:00:00Z","fields": {"value": 100}}
{"name": "cpu", "tags": {"host": "server01"},"time": "2009-11-10T23:00:00Z","fields": {"value": 100}},
{"name": "cpu", "tags": {"host": "server01", "region": "uswest"},"time": "2009-11-10T23:00:00Z","fields": {"value": 100}},
{"name": "cpu", "tags": {"host": "server01", "region": "useast"},"time": "2009-11-10T23:00:00Z","fields": {"value": 100}},
{"name": "cpu", "tags": {"host": "server02", "region": "useast"},"time": "2009-11-10T23:00:00Z","fields": {"value": 100}},
{"name": "gpu", "tags": {"host": "server02", "region": "useast"},"time": "2009-11-10T23:00:00Z","fields": {"value": 100}},
{"name": "gpu", "tags": {"host": "server03", "region": "caeast"},"time": "2009-11-10T23:00:00Z","fields": {"value": 100}}
]}`,
query: "SHOW TAG KEYS",
queryDb: "%DB%",
@ -1239,12 +1239,12 @@ func runTestsData(t *testing.T, testName string, nodes Cluster, database, retent
{
reset: true,
write: `{"database" : "%DB%", "retentionPolicy" : "%RP%", "points": [
{"name": "cpu", "tags": {"host": "server01"},"timestamp": "2009-11-10T23:00:00Z","fields": {"value": 100}},
{"name": "cpu", "tags": {"host": "server01", "region": "uswest"},"timestamp": "2009-11-10T23:00:00Z","fields": {"value": 100}},
{"name": "cpu", "tags": {"host": "server01", "region": "useast"},"timestamp": "2009-11-10T23:00:00Z","fields": {"value": 100}},
{"name": "cpu", "tags": {"host": "server02", "region": "useast"},"timestamp": "2009-11-10T23:00:00Z","fields": {"value": 100}},
{"name": "gpu", "tags": {"host": "server02", "region": "useast"},"timestamp": "2009-11-10T23:00:00Z","fields": {"value": 100}},
{"name": "gpu", "tags": {"host": "server03", "region": "caeast"},"timestamp": "2009-11-10T23:00:00Z","fields": {"value": 100}}
{"name": "cpu", "tags": {"host": "server01"},"time": "2009-11-10T23:00:00Z","fields": {"value": 100}},
{"name": "cpu", "tags": {"host": "server01", "region": "uswest"},"time": "2009-11-10T23:00:00Z","fields": {"value": 100}},
{"name": "cpu", "tags": {"host": "server01", "region": "useast"},"time": "2009-11-10T23:00:00Z","fields": {"value": 100}},
{"name": "cpu", "tags": {"host": "server02", "region": "useast"},"time": "2009-11-10T23:00:00Z","fields": {"value": 100}},
{"name": "gpu", "tags": {"host": "server02", "region": "useast"},"time": "2009-11-10T23:00:00Z","fields": {"value": 100}},
{"name": "gpu", "tags": {"host": "server03", "region": "caeast"},"time": "2009-11-10T23:00:00Z","fields": {"value": 100}}
]}`,
query: "SHOW TAG VALUES WITH KEY = host",
queryDb: "%DB%",
@ -1279,12 +1279,12 @@ func runTestsData(t *testing.T, testName string, nodes Cluster, database, retent
{
reset: true,
write: `{"database" : "%DB%", "retentionPolicy" : "%RP%", "points": [
{"name": "cpu", "tags": {"host": "server01"},"timestamp": "2009-11-10T23:00:00Z","fields": {"field1": 100}},
{"name": "cpu", "tags": {"host": "server01", "region": "uswest"},"timestamp": "2009-11-10T23:00:00Z","fields": {"field1": 200, "field2": 300, "field3": 400}},
{"name": "cpu", "tags": {"host": "server01", "region": "useast"},"timestamp": "2009-11-10T23:00:00Z","fields": {"field1": 200, "field2": 300, "field3": 400}},
{"name": "cpu", "tags": {"host": "server02", "region": "useast"},"timestamp": "2009-11-10T23:00:00Z","fields": {"field1": 200, "field2": 300, "field3": 400}},
{"name": "gpu", "tags": {"host": "server01", "region": "useast"},"timestamp": "2009-11-10T23:00:00Z","fields": {"field4": 200, "field5": 300}},
{"name": "gpu", "tags": {"host": "server03", "region": "caeast"},"timestamp": "2009-11-10T23:00:00Z","fields": {"field6": 200, "field7": 300}}
{"name": "cpu", "tags": {"host": "server01"},"time": "2009-11-10T23:00:00Z","fields": {"field1": 100}},
{"name": "cpu", "tags": {"host": "server01", "region": "uswest"},"time": "2009-11-10T23:00:00Z","fields": {"field1": 200, "field2": 300, "field3": 400}},
{"name": "cpu", "tags": {"host": "server01", "region": "useast"},"time": "2009-11-10T23:00:00Z","fields": {"field1": 200, "field2": 300, "field3": 400}},
{"name": "cpu", "tags": {"host": "server02", "region": "useast"},"time": "2009-11-10T23:00:00Z","fields": {"field1": 200, "field2": 300, "field3": 400}},
{"name": "gpu", "tags": {"host": "server01", "region": "useast"},"time": "2009-11-10T23:00:00Z","fields": {"field4": 200, "field5": 300}},
{"name": "gpu", "tags": {"host": "server03", "region": "caeast"},"time": "2009-11-10T23:00:00Z","fields": {"field6": 200, "field7": 300}}
]}`,
query: `SHOW FIELD KEYS`,
queryDb: "%DB%",
@ -1673,7 +1673,7 @@ func TestClientLibrary(t *testing.T) {
bp: client.BatchPoints{
Database: "mydb",
Points: []client.Point{
{Name: "cpu", Fields: map[string]interface{}{"value": 1.1}, Timestamp: now},
{Name: "cpu", Fields: map[string]interface{}{"value": 1.1}, Time: now},
},
},
expected: `null`,
@ -1689,9 +1689,9 @@ func TestClientLibrary(t *testing.T) {
{
name: "mulitple points, multiple values",
writes: []write{
{bp: client.BatchPoints{Database: "mydb", Points: []client.Point{{Name: "network", Fields: map[string]interface{}{"rx": 1.1, "tx": 2.1}, Timestamp: now}}}, expected: `null`},
{bp: client.BatchPoints{Database: "mydb", Points: []client.Point{{Name: "network", Fields: map[string]interface{}{"rx": 1.2, "tx": 2.2}, Timestamp: now.Add(time.Nanosecond)}}}, expected: `null`},
{bp: client.BatchPoints{Database: "mydb", Points: []client.Point{{Name: "network", Fields: map[string]interface{}{"rx": 1.3, "tx": 2.3}, Timestamp: now.Add(2 * time.Nanosecond)}}}, expected: `null`},
{bp: client.BatchPoints{Database: "mydb", Points: []client.Point{{Name: "network", Fields: map[string]interface{}{"rx": 1.1, "tx": 2.1}, Time: now}}}, expected: `null`},
{bp: client.BatchPoints{Database: "mydb", Points: []client.Point{{Name: "network", Fields: map[string]interface{}{"rx": 1.2, "tx": 2.2}, Time: now.Add(time.Nanosecond)}}}, expected: `null`},
{bp: client.BatchPoints{Database: "mydb", Points: []client.Point{{Name: "network", Fields: map[string]interface{}{"rx": 1.3, "tx": 2.3}, Time: now.Add(2 * time.Nanosecond)}}}, expected: `null`},
},
queries: []query{
{

View File

@ -186,10 +186,10 @@ func Unmarshal(data *gollectd.Packet) []influxdb.Point {
tags["type_instance"] = data.TypeInstance
}
p := influxdb.Point{
Name: name,
Tags: tags,
Timestamp: timestamp,
Fields: fields,
Name: name,
Tags: tags,
Time: timestamp,
Fields: fields,
}
points = append(points, p)

View File

@ -319,7 +319,7 @@ func TestUnmarshal_Time(t *testing.T) {
},
},
points: []influxdb.Point{
{Timestamp: testTime},
{Time: testTime},
},
},
{
@ -333,7 +333,7 @@ func TestUnmarshal_Time(t *testing.T) {
},
},
points: []influxdb.Point{
{Timestamp: testTime.Round(time.Second)},
{Time: testTime.Round(time.Second)},
},
},
}
@ -346,10 +346,10 @@ func TestUnmarshal_Time(t *testing.T) {
}
for _, p := range points {
if test.packet.TimeHR > 0 {
if p.Timestamp.Format(time.RFC3339Nano) != testTime.Format(time.RFC3339Nano) {
t.Errorf("timestamp mis-match, got %v, expected %v", p.Timestamp.Format(time.RFC3339Nano), testTime.Format(time.RFC3339Nano))
} else if p.Timestamp.Format(time.RFC3339) != testTime.Format(time.RFC3339) {
t.Errorf("timestamp mis-match, got %v, expected %v", p.Timestamp.Format(time.RFC3339), testTime.Format(time.RFC3339))
if p.Time.Format(time.RFC3339Nano) != testTime.Format(time.RFC3339Nano) {
t.Errorf("time mis-match, got %v, expected %v", p.Time.Format(time.RFC3339Nano), testTime.Format(time.RFC3339Nano))
} else if p.Time.Format(time.RFC3339) != testTime.Format(time.RFC3339) {
t.Errorf("time mis-match, got %v, expected %v", p.Time.Format(time.RFC3339), testTime.Format(time.RFC3339))
}
}
}

View File

@ -66,9 +66,9 @@ type dropDatabaseCommand struct {
}
type createShardGroupIfNotExistsCommand struct {
Database string `json:"database"`
Policy string `json:"policy"`
Timestamp time.Time `json:"timestamp"`
Database string `json:"database"`
Policy string `json:"policy"`
Time time.Time `json:"time"`
}
type deleteShardGroupCommand struct {

View File

@ -92,17 +92,17 @@ func (p *Parser) Parse(line string) (influxdb.Point, error) {
// Parse timestamp.
unixTime, err := strconv.ParseFloat(fields[2], 64)
if err != nil {
return influxdb.Point{}, fmt.Errorf("field \"%s\" timestamp: %s", fields[0], err)
return influxdb.Point{}, fmt.Errorf("field \"%s\" time: %s", fields[0], err)
}
// Check if we have fractional seconds
timestamp := time.Unix(int64(unixTime), int64((unixTime-math.Floor(unixTime))*float64(time.Second)))
point := influxdb.Point{
Name: name,
Tags: tags,
Fields: fieldValues,
Timestamp: timestamp,
Name: name,
Tags: tags,
Fields: fieldValues,
Time: timestamp,
}
return point, nil

View File

@ -62,61 +62,61 @@ func Test_DecodeMetric(t *testing.T) {
name string
tags map[string]string
value float64
timestamp time.Time
time time.Time
position, separator string
err string
}{
{
test: "position first by default",
line: `cpu.foo.bar 50 ` + strTime,
name: "cpu",
tags: map[string]string{"foo": "bar"},
value: 50,
timestamp: testTime,
test: "position first by default",
line: `cpu.foo.bar 50 ` + strTime,
name: "cpu",
tags: map[string]string{"foo": "bar"},
value: 50,
time: testTime,
},
{
test: "position first if unable to determine",
position: "foo",
line: `cpu.foo.bar 50 ` + strTime,
name: "cpu",
tags: map[string]string{"foo": "bar"},
value: 50,
timestamp: testTime,
test: "position first if unable to determine",
position: "foo",
line: `cpu.foo.bar 50 ` + strTime,
name: "cpu",
tags: map[string]string{"foo": "bar"},
value: 50,
time: testTime,
},
{
test: "position last if specified",
position: "last",
line: `foo.bar.cpu 50 ` + strTime,
name: "cpu",
tags: map[string]string{"foo": "bar"},
value: 50,
timestamp: testTime,
test: "position last if specified",
position: "last",
line: `foo.bar.cpu 50 ` + strTime,
name: "cpu",
tags: map[string]string{"foo": "bar"},
value: 50,
time: testTime,
},
{
test: "position first if specified with no series",
position: "first",
line: `cpu 50 ` + strTime,
name: "cpu",
tags: map[string]string{},
value: 50,
timestamp: testTime,
test: "position first if specified with no series",
position: "first",
line: `cpu 50 ` + strTime,
name: "cpu",
tags: map[string]string{},
value: 50,
time: testTime,
},
{
test: "position last if specified with no series",
position: "last",
line: `cpu 50 ` + strTime,
name: "cpu",
tags: map[string]string{},
value: 50,
timestamp: testTime,
test: "position last if specified with no series",
position: "last",
line: `cpu 50 ` + strTime,
name: "cpu",
tags: map[string]string{},
value: 50,
time: testTime,
},
{
test: "separator is . by default",
line: `cpu.foo.bar 50 ` + strTime,
name: "cpu",
tags: map[string]string{"foo": "bar"},
value: 50,
timestamp: testTime,
test: "separator is . by default",
line: `cpu.foo.bar 50 ` + strTime,
name: "cpu",
tags: map[string]string{"foo": "bar"},
value: 50,
time: testTime,
},
{
test: "separator is . if specified",
@ -125,7 +125,7 @@ func Test_DecodeMetric(t *testing.T) {
name: "cpu",
tags: map[string]string{"foo": "bar"},
value: 50,
timestamp: testTime,
time: testTime,
},
{
test: "separator is - if specified",
@ -134,7 +134,7 @@ func Test_DecodeMetric(t *testing.T) {
name: "cpu",
tags: map[string]string{"foo": "bar"},
value: 50,
timestamp: testTime,
time: testTime,
},
{
test: "separator is boo if specified",
@ -143,23 +143,23 @@ func Test_DecodeMetric(t *testing.T) {
name: "cpu",
tags: map[string]string{"foo": "bar"},
value: 50,
timestamp: testTime,
time: testTime,
},
{
test: "series + metric + integer value",
line: `cpu.foo.bar 50 ` + strTime,
name: "cpu",
tags: map[string]string{"foo": "bar"},
value: 50,
timestamp: testTime,
test: "series + metric + integer value",
line: `cpu.foo.bar 50 ` + strTime,
name: "cpu",
tags: map[string]string{"foo": "bar"},
value: 50,
time: testTime,
},
{
test: "metric only with float value",
line: `cpu 50.554 ` + strTime,
name: "cpu",
value: 50.554,
timestamp: testTime,
test: "metric only with float value",
line: `cpu 50.554 ` + strTime,
name: "cpu",
value: 50.554,
time: testTime,
},
{
test: "missing metric",
@ -184,7 +184,7 @@ func Test_DecodeMetric(t *testing.T) {
{
test: "should fail parsing invalid time",
line: `cpu 50.554 14199724z57825`,
err: `field "cpu" timestamp: strconv.ParseFloat: parsing "14199724z57825": invalid syntax`,
err: `field "cpu" time: strconv.ParseFloat: parsing "14199724z57825": invalid syntax`,
},
}
@ -215,8 +215,8 @@ func Test_DecodeMetric(t *testing.T) {
if point.Fields[point.Name] != f {
t.Fatalf("floatValue value mismatch. expected %v, got %v", test.value, f)
}
if point.Timestamp.UnixNano()/1000000 != test.timestamp.UnixNano()/1000000 {
t.Fatalf("timestamp value mismatch. expected %v, got %v", test.timestamp.UnixNano(), point.Timestamp.UnixNano())
if point.Time.UnixNano()/1000000 != test.time.UnixNano()/1000000 {
t.Fatalf("time value mismatch. expected %v, got %v", test.time.UnixNano(), point.Time.UnixNano())
}
}
}

View File

@ -337,10 +337,10 @@ func interfaceToString(v interface{}) string {
}
type Point struct {
Name string `json:"name"`
Timestamp time.Time `json:"timestamp"`
Tags map[string]string `json:"tags"`
Fields map[string]interface{} `json:"fields"`
Name string `json:"name"`
Time time.Time `json:"time"`
Tags map[string]string `json:"tags"`
Fields map[string]interface{} `json:"fields"`
}
type Batch struct {
@ -426,7 +426,7 @@ func (h *Handler) serveDump(w http.ResponseWriter, r *http.Request, user *influx
for _, tuple := range row.Values {
for subscript, cell := range tuple {
if row.Columns[subscript] == "time" {
point.Timestamp, _ = cell.(time.Time)
point.Time, _ = cell.(time.Time)
continue
}
point.Fields[row.Columns[subscript]] = cell

View File

@ -87,15 +87,15 @@ func TestBatchWrite_UnmarshalEpoch(t *testing.T) {
for _, test := range tests {
t.Logf("testing %q\n", test.name)
data := []byte(fmt.Sprintf(`{"timestamp": %d, "precision":"%s"}`, test.epoch, test.precision))
data := []byte(fmt.Sprintf(`{"time": %d, "precision":"%s"}`, test.epoch, test.precision))
t.Logf("json: %s", string(data))
var bp client.BatchPoints
err := json.Unmarshal(data, &bp)
if err != nil {
t.Fatalf("unexpected error. expected: %v, actual: %v", nil, err)
}
if !bp.Timestamp.Equal(test.expected) {
t.Fatalf("Unexpected time. expected: %v, actual: %v", test.expected, bp.Timestamp)
if !bp.Time.Equal(test.expected) {
t.Fatalf("Unexpected time. expected: %v, actual: %v", test.expected, bp.Time)
}
}
}
@ -125,15 +125,15 @@ func TestBatchWrite_UnmarshalRFC(t *testing.T) {
for _, test := range tests {
t.Logf("testing %q\n", test.name)
ts := test.now.Format(test.rfc)
data := []byte(fmt.Sprintf(`{"timestamp": %q}`, ts))
data := []byte(fmt.Sprintf(`{"time": %q}`, ts))
t.Logf("json: %s", string(data))
var bp client.BatchPoints
err := json.Unmarshal(data, &bp)
if err != nil {
t.Fatalf("unexpected error. exptected: %v, actual: %v", nil, err)
}
if !bp.Timestamp.Equal(test.expected) {
t.Fatalf("Unexpected time. expected: %v, actual: %v", test.expected, bp.Timestamp)
if !bp.Time.Equal(test.expected) {
t.Fatalf("Unexpected time. expected: %v, actual: %v", test.expected, bp.Time)
}
}
}
@ -173,7 +173,7 @@ func TestHandler_SelectTagNotFound(t *testing.T) {
defer s.Close()
// Write some data
status, _ := MustHTTP("POST", s.URL+`/write`, nil, nil, `{"database" : "foo", "retentionPolicy" : "default", "points": [{"name": "bin", "tags": {"host": "server01"},"timestamp": "2009-11-10T23:00:00Z","fields": {"value": 100}}]}`)
status, _ := MustHTTP("POST", s.URL+`/write`, nil, nil, `{"database" : "foo", "retentionPolicy" : "default", "points": [{"name": "bin", "tags": {"host": "server01"},"time": "2009-11-10T23:00:00Z","fields": {"value": 100}}]}`)
if status != http.StatusOK {
t.Fatalf("unexpected status: %d", status)
}
@ -625,7 +625,7 @@ func TestHandler_WaitIncrement(t *testing.T) {
status, _ := MustHTTP("GET", s.URL+`/data/wait/2`, map[string]string{"timeout": "200"}, nil, "")
// Write some data
_, _ = MustHTTP("POST", s.URL+`/write`, nil, nil, `{"database" : "foo", "retentionPolicy" : "bar", "points": [{"name": "cpu", "tags": {"host": "server01"},"timestamp": "2009-11-10T23:00:00Z","fields": {"value": 100}}]}`)
_, _ = MustHTTP("POST", s.URL+`/write`, nil, nil, `{"database" : "foo", "retentionPolicy" : "bar", "points": [{"name": "cpu", "tags": {"host": "server01"},"time": "2009-11-10T23:00:00Z","fields": {"value": 100}}]}`)
if status != http.StatusOK {
t.Fatalf("unexpected status, expected: %d, actual: %d", http.StatusOK, status)
@ -1096,7 +1096,7 @@ func TestHandler_DropSeries(t *testing.T) {
s := NewAPIServer(srvr)
defer s.Close()
status, _ := MustHTTP("POST", s.URL+`/write`, nil, nil, `{"database" : "foo", "retentionPolicy" : "bar", "points": [{"name": "cpu", "tags": {"host": "server01"},"timestamp": "2009-11-10T23:00:00Z","fields": {"value": 100}}]}`)
status, _ := MustHTTP("POST", s.URL+`/write`, nil, nil, `{"database" : "foo", "retentionPolicy" : "bar", "points": [{"name": "cpu", "tags": {"host": "server01"},"time": "2009-11-10T23:00:00Z","fields": {"value": 100}}]}`)
if status != http.StatusOK {
t.Fatalf("unexpected status: %d", status)
@ -1118,7 +1118,7 @@ func TestHandler_serveWriteSeries(t *testing.T) {
s := NewAPIServer(srvr)
defer s.Close()
status, _ := MustHTTP("POST", s.URL+`/write`, nil, nil, `{"database" : "foo", "retentionPolicy" : "default", "points": [{"name": "cpu", "tags": {"host": "server01"},"timestamp": "2009-11-10T23:00:00Z","fields": {"value": 100}}]}`)
status, _ := MustHTTP("POST", s.URL+`/write`, nil, nil, `{"database" : "foo", "retentionPolicy" : "default", "points": [{"name": "cpu", "tags": {"host": "server01"},"time": "2009-11-10T23:00:00Z","fields": {"value": 100}}]}`)
if status != http.StatusOK {
t.Fatalf("unexpected status for post: %d", status)
@ -1141,7 +1141,7 @@ func TestHandler_serveDump(t *testing.T) {
s := NewAPIServer(srvr)
defer s.Close()
status, _ := MustHTTP("POST", s.URL+`/write`, nil, nil, `{"database" : "foo", "retentionPolicy" : "default", "points": [{"name": "cpu", "tags": {"host": "server01"},"timestamp": "2009-11-10T23:00:00Z","fields": {"value": 100}}]}`)
status, _ := MustHTTP("POST", s.URL+`/write`, nil, nil, `{"database" : "foo", "retentionPolicy" : "default", "points": [{"name": "cpu", "tags": {"host": "server01"},"time": "2009-11-10T23:00:00Z","fields": {"value": 100}}]}`)
if status != http.StatusOK {
t.Fatalf("unexpected status for post: %d", status)
@ -1173,7 +1173,7 @@ func TestHandler_serveWriteSeriesWithNoFields(t *testing.T) {
s := NewAPIServer(srvr)
defer s.Close()
status, body := MustHTTP("POST", s.URL+`/write`, nil, nil, `{"database" : "foo", "retentionPolicy" : "bar", "points": [{"name": "cpu", "tags": {"host": "server01"},"timestamp": "2009-11-10T23:00:00Z"}]}`)
status, body := MustHTTP("POST", s.URL+`/write`, nil, nil, `{"database" : "foo", "retentionPolicy" : "bar", "points": [{"name": "cpu", "tags": {"host": "server01"},"time": "2009-11-10T23:00:00Z"}]}`)
expected := fmt.Sprintf(`{"error":"%s"}`, influxdb.ErrFieldsRequired.Error())
@ -1213,7 +1213,7 @@ func TestHandler_serveWriteSeriesWithAuthNilUser(t *testing.T) {
s := NewAuthenticatedAPIServer(srvr)
defer s.Close()
status, body := MustHTTP("POST", s.URL+`/write`, nil, nil, `{"database" : "foo", "retentionPolicy" : "bar", "points": [{"name": "cpu", "tags": {"host": "server01"},"timestamp": "2009-11-10T23:00:00Z","fields": {"value": 100}}]}`)
status, body := MustHTTP("POST", s.URL+`/write`, nil, nil, `{"database" : "foo", "retentionPolicy" : "bar", "points": [{"name": "cpu", "tags": {"host": "server01"},"time": "2009-11-10T23:00:00Z","fields": {"value": 100}}]}`)
if status != http.StatusUnauthorized {
t.Fatalf("unexpected status: %d", status)
@ -1232,7 +1232,7 @@ func TestHandler_serveWriteSeries_noDatabaseExists(t *testing.T) {
s := NewAPIServer(srvr)
defer s.Close()
status, body := MustHTTP("POST", s.URL+`/write`, nil, nil, `{"database" : "foo", "retentionPolicy" : "bar", "points": [{"name": "cpu", "tags": {"host": "server01"},"timestamp": "2009-11-10T23:00:00Z","fields": {"value": 100}}]}`)
status, body := MustHTTP("POST", s.URL+`/write`, nil, nil, `{"database" : "foo", "retentionPolicy" : "bar", "points": [{"name": "cpu", "tags": {"host": "server01"},"time": "2009-11-10T23:00:00Z","fields": {"value": 100}}]}`)
expectedStatus := http.StatusNotFound
if status != expectedStatus {
@ -1281,7 +1281,7 @@ func TestHandler_serveWriteSeries_queryHasJsonContentType(t *testing.T) {
s := NewAPIServer(srvr)
defer s.Close()
status, _ := MustHTTP("POST", s.URL+`/write`, nil, nil, `{"database" : "foo", "retentionPolicy" : "bar", "points": [{"name": "cpu", "tags": {"host": "server01"},"timestamp": "2009-11-10T23:00:00Z", "fields": {"value": 100}}]}`)
status, _ := MustHTTP("POST", s.URL+`/write`, nil, nil, `{"database" : "foo", "retentionPolicy" : "bar", "points": [{"name": "cpu", "tags": {"host": "server01"},"time": "2009-11-10T23:00:00Z", "fields": {"value": 100}}]}`)
if status != http.StatusOK {
t.Fatalf("unexpected status: %d", status)
}
@ -1335,7 +1335,7 @@ func TestHandler_serveWriteSeries_invalidJSON(t *testing.T) {
s := NewAPIServer(srvr)
defer s.Close()
status, body := MustHTTP("POST", s.URL+`/write`, nil, nil, `{"database" : foo", "retentionPolicy" : "bar", "points": [{"name": "cpu", "tags": {"host": "server01"},"timestamp": "2009-11-10T23:00:00Z","fields": {"value": 100}}]}`)
status, body := MustHTTP("POST", s.URL+`/write`, nil, nil, `{"database" : foo", "retentionPolicy" : "bar", "points": [{"name": "cpu", "tags": {"host": "server01"},"time": "2009-11-10T23:00:00Z","fields": {"value": 100}}]}`)
if status != http.StatusInternalServerError {
t.Fatalf("unexpected status: expected: %d, actual: %d", http.StatusInternalServerError, status)
@ -1377,7 +1377,7 @@ func TestHandler_serveWriteSeriesNonZeroTime(t *testing.T) {
s := NewAPIServer(srvr)
defer s.Close()
status, _ := MustHTTP("POST", s.URL+`/write`, nil, nil, `{"database" : "foo", "retentionPolicy" : "bar", "points": [{"name": "cpu", "tags": {"host": "server01"},"timestamp": "2009-11-10T23:00:00Z", "fields": {"value": 100}}]}`)
status, _ := MustHTTP("POST", s.URL+`/write`, nil, nil, `{"database" : "foo", "retentionPolicy" : "bar", "points": [{"name": "cpu", "tags": {"host": "server01"},"time": "2009-11-10T23:00:00Z", "fields": {"value": 100}}]}`)
if status != http.StatusOK {
t.Fatalf("unexpected status: %d", status)
}
@ -1482,7 +1482,7 @@ func TestHandler_serveWriteSeriesBatch(t *testing.T) {
"points": [
{
"name": "disk",
"timestamp": "2009-11-10T23:00:00Z",
"time": "2009-11-10T23:00:00Z",
"tags": {
"host": "server01"
},
@ -1492,7 +1492,7 @@ func TestHandler_serveWriteSeriesBatch(t *testing.T) {
},
{
"name": "disk",
"timestamp": "2009-11-10T23:00:01Z",
"time": "2009-11-10T23:00:01Z",
"tags": {
"host": "server01"
},
@ -1502,7 +1502,7 @@ func TestHandler_serveWriteSeriesBatch(t *testing.T) {
},
{
"name": "disk",
"timestamp": "2009-11-10T23:00:02Z",
"time": "2009-11-10T23:00:02Z",
"tags": {
"host": "server02"
},
@ -1667,8 +1667,8 @@ func TestHandler_ChunkedResponses(t *testing.T) {
defer s.Close()
status, errString := MustHTTP("POST", s.URL+`/write`, nil, nil, `{"database" : "foo", "retentionPolicy" : "bar", "points": [
{"name": "cpu", "tags": {"host": "server01"},"timestamp": "2009-11-10T23:00:00Z", "fields": {"value": 100}},
{"name": "cpu", "tags": {"host": "server02"},"timestamp": "2009-11-10T23:30:00Z", "fields": {"value": 25}}]}`)
{"name": "cpu", "tags": {"host": "server01"},"time": "2009-11-10T23:00:00Z", "fields": {"value": 100}},
{"name": "cpu", "tags": {"host": "server02"},"time": "2009-11-10T23:30:00Z", "fields": {"value": 25}}]}`)
if status != http.StatusOK {
t.Fatalf("unexpected status: %d - %s", status, errString)
}

View File

@ -220,22 +220,22 @@ func warn(v ...interface{}) { fmt.Fprintln(os.Stderr, v...) }
func warnf(msg string, v ...interface{}) { fmt.Fprintf(os.Stderr, msg+"\n", v...) }
// NormalizeBatchPoints returns a slice of Points, created by populating individual
// points within the batch, which do not have timestamps or tags, with the top-level
// points within the batch, which do not have times or tags, with the top-level
// values.
func NormalizeBatchPoints(bp client.BatchPoints) ([]Point, error) {
points := []Point{}
for _, p := range bp.Points {
if p.Timestamp.IsZero() {
if bp.Timestamp.IsZero() {
p.Timestamp = time.Now()
if p.Time.IsZero() {
if bp.Time.IsZero() {
p.Time = time.Now()
} else {
p.Timestamp = bp.Timestamp
p.Time = bp.Time
}
}
if p.Precision == "" && bp.Precision != "" {
p.Precision = bp.Precision
}
p.Timestamp = client.SetPrecision(p.Timestamp, p.Precision)
p.Time = client.SetPrecision(p.Time, p.Precision)
if len(bp.Tags) > 0 {
if p.Tags == nil {
p.Tags = make(map[string]string)
@ -248,10 +248,10 @@ func NormalizeBatchPoints(bp client.BatchPoints) ([]Point, error) {
}
// Need to convert from a client.Point to a influxdb.Point
points = append(points, Point{
Name: p.Name,
Tags: p.Tags,
Timestamp: p.Timestamp,
Fields: p.Fields,
Name: p.Name,
Tags: p.Tags,
Time: p.Time,
Fields: p.Fields,
})
}

View File

@ -21,23 +21,23 @@ func TestNormalizeBatchPoints(t *testing.T) {
name: "default",
bp: client.BatchPoints{
Points: []client.Point{
{Name: "cpu", Tags: map[string]string{"region": "useast"}, Timestamp: now, Fields: map[string]interface{}{"value": 1.0}},
{Name: "cpu", Tags: map[string]string{"region": "useast"}, Time: now, Fields: map[string]interface{}{"value": 1.0}},
},
},
p: []influxdb.Point{
{Name: "cpu", Tags: map[string]string{"region": "useast"}, Timestamp: now, Fields: map[string]interface{}{"value": 1.0}},
{Name: "cpu", Tags: map[string]string{"region": "useast"}, Time: now, Fields: map[string]interface{}{"value": 1.0}},
},
},
{
name: "merge timestamp",
name: "merge time",
bp: client.BatchPoints{
Timestamp: now,
Time: now,
Points: []client.Point{
{Name: "cpu", Tags: map[string]string{"region": "useast"}, Fields: map[string]interface{}{"value": 1.0}},
},
},
p: []influxdb.Point{
{Name: "cpu", Tags: map[string]string{"region": "useast"}, Timestamp: now, Fields: map[string]interface{}{"value": 1.0}},
{Name: "cpu", Tags: map[string]string{"region": "useast"}, Time: now, Fields: map[string]interface{}{"value": 1.0}},
},
},
{
@ -45,13 +45,13 @@ func TestNormalizeBatchPoints(t *testing.T) {
bp: client.BatchPoints{
Tags: map[string]string{"day": "monday"},
Points: []client.Point{
{Name: "cpu", Tags: map[string]string{"region": "useast"}, Timestamp: now, Fields: map[string]interface{}{"value": 1.0}},
{Name: "memory", Timestamp: now, Fields: map[string]interface{}{"value": 2.0}},
{Name: "cpu", Tags: map[string]string{"region": "useast"}, Time: now, Fields: map[string]interface{}{"value": 1.0}},
{Name: "memory", Time: now, Fields: map[string]interface{}{"value": 2.0}},
},
},
p: []influxdb.Point{
{Name: "cpu", Tags: map[string]string{"day": "monday", "region": "useast"}, Timestamp: now, Fields: map[string]interface{}{"value": 1.0}},
{Name: "memory", Tags: map[string]string{"day": "monday"}, Timestamp: now, Fields: map[string]interface{}{"value": 2.0}},
{Name: "cpu", Tags: map[string]string{"day": "monday", "region": "useast"}, Time: now, Fields: map[string]interface{}{"value": 1.0}},
{Name: "memory", Tags: map[string]string{"day": "monday"}, Time: now, Fields: map[string]interface{}{"value": 2.0}},
},
},
}

View File

@ -94,7 +94,7 @@ func (m *MapReduceJob) Execute(out chan *Row, filterEmptyResults bool) {
reduceFuncs[i] = reduceFunc
}
// we'll have a fixed number of points with timestamps in buckets. Initialize those times and a slice to hold the associated values
// we'll have a fixed number of points with times in buckets. Initialize those times and a slice to hold the associated values
var pointCountInResult int
// if the user didn't specify a start time or a group by interval, we're returning a single point that describes the entire range
@ -255,7 +255,7 @@ func (m *MapReduceJob) processRawQuery(out chan *Row, filterEmptyResults bool) {
}
// find the min of the last point in each mapper
t := o[len(o)-1].Timestamp
t := o[len(o)-1].Time
if t < min {
min = t
}
@ -267,7 +267,7 @@ func (m *MapReduceJob) processRawQuery(out chan *Row, filterEmptyResults bool) {
// find the index of the point up to the min
ind := len(o)
for i, mo := range o {
if mo.Timestamp > min {
if mo.Time > min {
ind = i
break
}
@ -587,13 +587,13 @@ func (m *MapReduceJob) processRawResults(values []*rawQueryMapOutput) *Row {
vals := make([]interface{}, len(selectNames))
if singleValue {
vals[0] = time.Unix(0, v.Timestamp).UTC()
vals[0] = time.Unix(0, v.Time).UTC()
vals[1] = v.Values.(interface{})
} else {
fields := v.Values.(map[string]interface{})
// time is always the first value
vals[0] = time.Unix(0, v.Timestamp).UTC()
vals[0] = time.Unix(0, v.Time).UTC()
// populate the other values
for i := 1; i < len(selectNames); i++ {

View File

@ -17,7 +17,7 @@ import (
// Iterator represents a forward-only iterator over a set of points.
// These are used by the MapFunctions in this file
type Iterator interface {
Next() (seriesID uint64, timestamp int64, value interface{})
Next() (seriesID uint64, time int64, value interface{})
}
// MapFunc represents a function used for mapping over a sequential series of data.
@ -760,12 +760,12 @@ func MapRawQuery(itr Iterator) interface{} {
}
type rawQueryMapOutput struct {
Timestamp int64
Values interface{}
Time int64
Values interface{}
}
type rawOutputs []*rawQueryMapOutput
func (a rawOutputs) Len() int { return len(a) }
func (a rawOutputs) Less(i, j int) bool { return a[i].Timestamp < a[j].Timestamp }
func (a rawOutputs) Less(i, j int) bool { return a[i].Time < a[j].Time }
func (a rawOutputs) Swap(i, j int) { a[i], a[j] = a[j], a[i] }

View File

@ -4,9 +4,9 @@ import "testing"
import "sort"
type point struct {
seriesID uint64
timestamp int64
value interface{}
seriesID uint64
time int64
value interface{}
}
type testIterator struct {
@ -17,7 +17,7 @@ func (t *testIterator) Next() (seriesID uint64, timestamp int64, value interface
if len(t.values) > 0 {
v := t.values[0]
t.values = t.values[1:]
return v.seriesID, v.timestamp, v.value
return v.seriesID, v.time, v.value
}
return 0, 0, nil
}

View File

@ -189,7 +189,7 @@ func (s *Server) HandleTelnet(conn net.Conn) {
var t time.Time
ts, err := strconv.ParseInt(tsStr, 10, 64)
if err != nil {
log.Println("TSDBServer: malformed timestamp, skipping: ", tsStr)
log.Println("TSDBServer: malformed time, skipping: ", tsStr)
}
switch len(tsStr) {
@ -200,7 +200,7 @@ func (s *Server) HandleTelnet(conn net.Conn) {
t = time.Unix(ts/1000, (ts%1000)*1000)
break
default:
log.Println("TSDBServer: timestamp must be 10 or 13 chars, skipping: ", tsStr)
log.Println("TSDBServer: time must be 10 or 13 chars, skipping: ", tsStr)
continue
}
@ -224,10 +224,10 @@ func (s *Server) HandleTelnet(conn net.Conn) {
}
p := influxdb.Point{
Name: name,
Tags: tags,
Timestamp: t,
Fields: fields,
Name: name,
Tags: tags,
Time: t,
Fields: fields,
}
_, err = s.writer.WriteSeries(s.database, s.retentionpolicy, []influxdb.Point{p})
@ -252,10 +252,10 @@ func (s *Server) HandleTelnet(conn net.Conn) {
}
*/
type tsdbDP struct {
Metric string `json:"metric"`
Timestamp int64 `json:"timestamp"`
Value float64 `json:"value"`
Tags map[string]string `json:"tags,omitempty"`
Metric string `json:"metric"`
Time int64 `json:"timestamp"`
Value float64 `json:"value"`
Tags map[string]string `json:"tags,omitempty"`
}
// ServeHTTP implements OpenTSDB's HTTP /api/put endpoint
@ -321,10 +321,10 @@ func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
dp := dps[dpi]
var ts time.Time
if dp.Timestamp < 10000000000 {
ts = time.Unix(dp.Timestamp, 0)
if dp.Time < 10000000000 {
ts = time.Unix(dp.Time, 0)
} else {
ts = time.Unix(dp.Timestamp/1000, (dp.Timestamp%1000)*1000)
ts = time.Unix(dp.Time/1000, (dp.Time%1000)*1000)
}
fields := make(map[string]interface{})
@ -333,10 +333,10 @@ func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
continue
}
p := influxdb.Point{
Name: dp.Metric,
Tags: dp.Tags,
Timestamp: ts,
Fields: fields,
Name: dp.Metric,
Tags: dp.Tags,
Time: ts,
Fields: fields,
}
idps = append(idps, p)
}

View File

@ -361,10 +361,10 @@ func (s *Server) StartSelfMonitoring(database, retention string, interval time.D
now := time.Now()
st.Walk(func(k string, v int64) {
point := Point{
Timestamp: now,
Name: st.name + "_" + k,
Tags: make(map[string]string),
Fields: map[string]interface{}{"value": int(v)},
Time: now,
Name: st.name + "_" + k,
Tags: make(map[string]string),
Fields: map[string]interface{}{"value": int(v)},
}
// Specifically create a new map.
for k, v := range tags {
@ -511,7 +511,7 @@ func (s *Server) ShardGroupPreCreate(checkInterval time.Duration) {
Database string
Retention string
ID uint64
Timestamp time.Time
Time time.Time
}
var groups []group
@ -530,7 +530,7 @@ func (s *Server) ShardGroupPreCreate(checkInterval time.Duration) {
// Check to see if it is going to end before our interval
if g.EndTime.Before(cutoff) {
log.Printf("pre-creating shard group for %d, retention policy %s, database %s", g.ID, rp.Name, db.name)
groups = append(groups, group{Database: db.name, Retention: rp.Name, ID: g.ID, Timestamp: g.EndTime.Add(1 * time.Nanosecond)})
groups = append(groups, group{Database: db.name, Retention: rp.Name, ID: g.ID, Time: g.EndTime.Add(1 * time.Nanosecond)})
}
}
}
@ -538,8 +538,8 @@ func (s *Server) ShardGroupPreCreate(checkInterval time.Duration) {
}()
for _, g := range groups {
if err := s.CreateShardGroupIfNotExists(g.Database, g.Retention, g.Timestamp); err != nil {
log.Printf("failed to request pre-creation of shard group %d for time %s: %s", g.ID, g.Timestamp, err.Error())
if err := s.CreateShardGroupIfNotExists(g.Database, g.Retention, g.Time); err != nil {
log.Printf("failed to request pre-creation of shard group %d for time %s: %s", g.ID, g.Time, err.Error())
}
}
}
@ -1127,7 +1127,7 @@ func (s *Server) ShardGroups(database string) ([]*ShardGroup, error) {
// CreateShardGroupIfNotExists creates the shard group for a retention policy for the interval a timestamp falls into.
func (s *Server) CreateShardGroupIfNotExists(database, policy string, timestamp time.Time) error {
c := &createShardGroupIfNotExistsCommand{Database: database, Policy: policy, Timestamp: timestamp}
c := &createShardGroupIfNotExistsCommand{Database: database, Policy: policy, Time: timestamp}
_, err := s.broadcast(createShardGroupIfNotExistsMessageType, c)
return err
}
@ -1149,7 +1149,7 @@ func (s *Server) applyCreateShardGroupIfNotExists(m *messaging.Message) error {
}
// If we can match to an existing shard group date range then just ignore request.
if g := rp.shardGroupByTimestamp(c.Timestamp); g != nil {
if g := rp.shardGroupByTimestamp(c.Time); g != nil {
return nil
}
@ -1173,7 +1173,7 @@ func (s *Server) applyCreateShardGroupIfNotExists(m *messaging.Message) error {
// replicated the correct number of times.
shardN := len(nodes) / replicaN
g := newShardGroup(c.Timestamp, rp.ShardGroupDuration)
g := newShardGroup(c.Time, rp.ShardGroupDuration)
// Create and intialize shards based on the node count and replication factor.
if err := g.initialize(m.Index, shardN, replicaN, db, rp, nodes, s.meta); err != nil {
@ -1750,10 +1750,10 @@ func (s *Server) DropSeries(database string, seriesByMeasurement map[string][]ui
// Point defines the values that will be written to the database
type Point struct {
Name string
Tags map[string]string
Timestamp time.Time
Fields map[string]interface{}
Name string
Tags map[string]string
Time time.Time
Fields map[string]interface{}
}
// WriteSeries writes series data to the database.
@ -1832,7 +1832,7 @@ func (s *Server) WriteSeries(database, retentionPolicy string, points []Point) (
}
// Retrieve shard group.
g, err := s.shardGroupByTimestamp(database, retentionPolicy, p.Timestamp)
g, err := s.shardGroupByTimestamp(database, retentionPolicy, p.Time)
if err != nil {
return err
}
@ -1861,7 +1861,7 @@ func (s *Server) WriteSeries(database, retentionPolicy string, points []Point) (
}
// Encode point header, followed by point data, and add to shard's batch.
data := marshalPointHeader(series.ID, uint32(len(encodedFields)), p.Timestamp.UnixNano())
data := marshalPointHeader(series.ID, uint32(len(encodedFields)), p.Time.UnixNano())
data = append(data, encodedFields...)
if shardData[sh.ID] == nil {
shardData[sh.ID] = make([]byte, 0)
@ -2069,16 +2069,16 @@ func (s *Server) createShardGroupsIfNotExists(database, retentionPolicy string,
defer s.mu.RUnlock()
for _, p := range points {
// Check if shard group exists first.
g, err := s.shardGroupByTimestamp(database, retentionPolicy, p.Timestamp)
g, err := s.shardGroupByTimestamp(database, retentionPolicy, p.Time)
if err != nil {
return err
} else if g != nil {
continue
}
commands = append(commands, &createShardGroupIfNotExistsCommand{
Database: database,
Policy: retentionPolicy,
Timestamp: p.Timestamp,
Database: database,
Policy: retentionPolicy,
Time: p.Time,
})
}
return nil
@ -2089,9 +2089,9 @@ func (s *Server) createShardGroupsIfNotExists(database, retentionPolicy string,
// Create any required shard groups across the cluster. Must be done without holding the lock.
for _, c := range commands {
err = s.CreateShardGroupIfNotExists(c.Database, c.Policy, c.Timestamp)
err = s.CreateShardGroupIfNotExists(c.Database, c.Policy, c.Time)
if err != nil {
return fmt.Errorf("create shard(%s:%s/%s): %s", c.Database, c.Policy, c.Timestamp.Format(time.RFC3339Nano), err)
return fmt.Errorf("create shard(%s:%s/%s): %s", c.Database, c.Policy, c.Time.Format(time.RFC3339Nano), err)
}
}
@ -4056,10 +4056,10 @@ func (s *Server) convertRowToPoints(measurementName string, row *influxql.Row) (
}
p := &Point{
Name: measurementName,
Tags: row.Tags,
Timestamp: v[timeIndex].(time.Time),
Fields: vals,
Name: measurementName,
Tags: row.Tags,
Time: v[timeIndex].(time.Time),
Fields: vals,
}
points = append(points, *p)

View File

@ -1008,10 +1008,10 @@ func TestServer_WriteAllDataTypes(t *testing.T) {
s.SetDefaultRetentionPolicy("foo", "raw")
// Write series with one point to the database.
s.MustWriteSeries("foo", "raw", []influxdb.Point{{Name: "series1", Timestamp: mustParseTime("2000-01-01T00:00:00Z"), Fields: map[string]interface{}{"value": float64(20)}}})
s.MustWriteSeries("foo", "raw", []influxdb.Point{{Name: "series2", Timestamp: mustParseTime("2000-01-01T00:00:00Z"), Fields: map[string]interface{}{"value": int64(30)}}})
s.MustWriteSeries("foo", "raw", []influxdb.Point{{Name: "series3", Timestamp: mustParseTime("2000-01-01T00:00:00Z"), Fields: map[string]interface{}{"value": "baz"}}})
s.MustWriteSeries("foo", "raw", []influxdb.Point{{Name: "series4", Timestamp: mustParseTime("2000-01-01T00:00:00Z"), Fields: map[string]interface{}{"value": true}}})
s.MustWriteSeries("foo", "raw", []influxdb.Point{{Name: "series1", Time: mustParseTime("2000-01-01T00:00:00Z"), Fields: map[string]interface{}{"value": float64(20)}}})
s.MustWriteSeries("foo", "raw", []influxdb.Point{{Name: "series2", Time: mustParseTime("2000-01-01T00:00:00Z"), Fields: map[string]interface{}{"value": int64(30)}}})
s.MustWriteSeries("foo", "raw", []influxdb.Point{{Name: "series3", Time: mustParseTime("2000-01-01T00:00:00Z"), Fields: map[string]interface{}{"value": "baz"}}})
s.MustWriteSeries("foo", "raw", []influxdb.Point{{Name: "series4", Time: mustParseTime("2000-01-01T00:00:00Z"), Fields: map[string]interface{}{"value": true}}})
time.Sleep(time.Millisecond * 100)
f := func(t *testing.T, database, query, expected string) {
@ -1079,7 +1079,7 @@ func TestServer_DropMeasurement(t *testing.T) {
// Write series with one point to the database.
tags := map[string]string{"host": "serverA", "region": "uswest"}
index, err := s.WriteSeries("foo", "raw", []influxdb.Point{{Name: "cpu", Tags: tags, Timestamp: mustParseTime("2000-01-01T00:00:00Z"), Fields: map[string]interface{}{"value": float64(23.2)}}})
index, err := s.WriteSeries("foo", "raw", []influxdb.Point{{Name: "cpu", Tags: tags, Time: mustParseTime("2000-01-01T00:00:00Z"), Fields: map[string]interface{}{"value": float64(23.2)}}})
if err != nil {
t.Fatal(err)
}
@ -1142,7 +1142,7 @@ func TestServer_DropSeries(t *testing.T) {
// Write series with one point to the database.
tags := map[string]string{"host": "serverA", "region": "uswest"}
index, err := s.WriteSeries("foo", "raw", []influxdb.Point{{Name: "cpu", Tags: tags, Timestamp: mustParseTime("2000-01-01T00:00:00Z"), Fields: map[string]interface{}{"value": float64(23.2)}}})
index, err := s.WriteSeries("foo", "raw", []influxdb.Point{{Name: "cpu", Tags: tags, Time: mustParseTime("2000-01-01T00:00:00Z"), Fields: map[string]interface{}{"value": float64(23.2)}}})
if err != nil {
t.Fatal(err)
}
@ -1186,14 +1186,14 @@ func TestServer_DropSeriesFromMeasurement(t *testing.T) {
// Write series with one point to the database.
tags := map[string]string{"host": "serverA", "region": "uswest"}
index, err := s.WriteSeries("foo", "raw", []influxdb.Point{{Name: "cpu", Tags: tags, Timestamp: mustParseTime("2000-01-01T00:00:00Z"), Fields: map[string]interface{}{"value": float64(23.2)}}})
index, err := s.WriteSeries("foo", "raw", []influxdb.Point{{Name: "cpu", Tags: tags, Time: mustParseTime("2000-01-01T00:00:00Z"), Fields: map[string]interface{}{"value": float64(23.2)}}})
if err != nil {
t.Fatal(err)
}
c.Sync(index)
tags = map[string]string{"host": "serverb", "region": "useast"}
index, err = s.WriteSeries("foo", "raw", []influxdb.Point{{Name: "memory", Tags: tags, Timestamp: mustParseTime("2000-01-02T00:00:00Z"), Fields: map[string]interface{}{"value": float64(23465432423)}}})
index, err = s.WriteSeries("foo", "raw", []influxdb.Point{{Name: "memory", Tags: tags, Time: mustParseTime("2000-01-02T00:00:00Z"), Fields: map[string]interface{}{"value": float64(23465432423)}}})
if err != nil {
t.Fatal(err)
}
@ -1232,14 +1232,14 @@ func TestServer_DropSeriesTagsPreserved(t *testing.T) {
// Write series with one point to the database.
tags := map[string]string{"host": "serverA", "region": "uswest"}
index, err := s.WriteSeries("foo", "raw", []influxdb.Point{{Name: "cpu", Tags: tags, Timestamp: mustParseTime("2000-01-01T00:00:00Z"), Fields: map[string]interface{}{"value": float64(23.2)}}})
index, err := s.WriteSeries("foo", "raw", []influxdb.Point{{Name: "cpu", Tags: tags, Time: mustParseTime("2000-01-01T00:00:00Z"), Fields: map[string]interface{}{"value": float64(23.2)}}})
if err != nil {
t.Fatal(err)
}
c.Sync(index)
tags = map[string]string{"host": "serverB", "region": "uswest"}
index, err = s.WriteSeries("foo", "raw", []influxdb.Point{{Name: "cpu", Tags: tags, Timestamp: mustParseTime("2000-01-01T00:00:01Z"), Fields: map[string]interface{}{"value": float64(33.2)}}})
index, err = s.WriteSeries("foo", "raw", []influxdb.Point{{Name: "cpu", Tags: tags, Time: mustParseTime("2000-01-01T00:00:01Z"), Fields: map[string]interface{}{"value": float64(33.2)}}})
if err != nil {
t.Fatal(err)
}
@ -1307,11 +1307,11 @@ func TestServer_ShowSeriesLimitOffset(t *testing.T) {
s.SetDefaultRetentionPolicy("foo", "raw")
// Write series with one point to the database.
s.MustWriteSeries("foo", "raw", []influxdb.Point{{Name: "cpu", Tags: map[string]string{"region": "us-east", "host": "serverA"}, Timestamp: mustParseTime("2000-01-01T00:00:00Z"), Fields: map[string]interface{}{"value": float64(20)}}})
s.MustWriteSeries("foo", "raw", []influxdb.Point{{Name: "cpu", Tags: map[string]string{"region": "us-east", "host": "serverB"}, Timestamp: mustParseTime("2000-01-01T00:00:10Z"), Fields: map[string]interface{}{"value": float64(30)}}})
s.MustWriteSeries("foo", "raw", []influxdb.Point{{Name: "cpu", Tags: map[string]string{"region": "us-west", "host": "serverC"}, Timestamp: mustParseTime("2000-01-01T00:00:00Z"), Fields: map[string]interface{}{"value": float64(100)}}})
s.MustWriteSeries("foo", "raw", []influxdb.Point{{Name: "memory", Tags: map[string]string{"region": "us-west", "host": "serverB"}, Timestamp: mustParseTime("2000-01-01T00:00:00Z"), Fields: map[string]interface{}{"value": float64(100)}}})
s.MustWriteSeries("foo", "raw", []influxdb.Point{{Name: "memory", Tags: map[string]string{"region": "us-east", "host": "serverA"}, Timestamp: mustParseTime("2000-01-01T00:00:00Z"), Fields: map[string]interface{}{"value": float64(100)}}})
s.MustWriteSeries("foo", "raw", []influxdb.Point{{Name: "cpu", Tags: map[string]string{"region": "us-east", "host": "serverA"}, Time: mustParseTime("2000-01-01T00:00:00Z"), Fields: map[string]interface{}{"value": float64(20)}}})
s.MustWriteSeries("foo", "raw", []influxdb.Point{{Name: "cpu", Tags: map[string]string{"region": "us-east", "host": "serverB"}, Time: mustParseTime("2000-01-01T00:00:10Z"), Fields: map[string]interface{}{"value": float64(30)}}})
s.MustWriteSeries("foo", "raw", []influxdb.Point{{Name: "cpu", Tags: map[string]string{"region": "us-west", "host": "serverC"}, Time: mustParseTime("2000-01-01T00:00:00Z"), Fields: map[string]interface{}{"value": float64(100)}}})
s.MustWriteSeries("foo", "raw", []influxdb.Point{{Name: "memory", Tags: map[string]string{"region": "us-west", "host": "serverB"}, Time: mustParseTime("2000-01-01T00:00:00Z"), Fields: map[string]interface{}{"value": float64(100)}}})
s.MustWriteSeries("foo", "raw", []influxdb.Point{{Name: "memory", Tags: map[string]string{"region": "us-east", "host": "serverA"}, Time: mustParseTime("2000-01-01T00:00:00Z"), Fields: map[string]interface{}{"value": float64(100)}}})
// Select data from the server.
results := s.executeQuery(MustParseQuery(`SHOW SERIES LIMIT 3 OFFSET 1`), "foo", nil)
@ -1463,7 +1463,7 @@ func TestServer_Measurements(t *testing.T) {
tags := map[string]string{"host": "servera.influx.com", "region": "uswest"}
values := map[string]interface{}{"value": 23.2}
index, err := s.WriteSeries("foo", "mypolicy", []influxdb.Point{influxdb.Point{Name: "cpu_load", Tags: tags, Timestamp: timestamp, Fields: values}})
index, err := s.WriteSeries("foo", "mypolicy", []influxdb.Point{influxdb.Point{Name: "cpu_load", Tags: tags, Time: timestamp, Fields: values}})
if err != nil {
t.Fatal(err)
} else if err = s.Sync(index); err != nil {
@ -1860,9 +1860,9 @@ func TestServer_RunContinuousQueries(t *testing.T) {
}
testTime.Add(time.Millisecond * 2)
s.MustWriteSeries("foo", "raw", []influxdb.Point{{Name: "cpu", Tags: map[string]string{"region": "us-east"}, Timestamp: testTime, Fields: map[string]interface{}{"value": float64(30)}}})
s.MustWriteSeries("foo", "raw", []influxdb.Point{{Name: "cpu", Tags: map[string]string{"region": "us-east"}, Timestamp: testTime.Add(-time.Millisecond * 5), Fields: map[string]interface{}{"value": float64(20)}}})
s.MustWriteSeries("foo", "raw", []influxdb.Point{{Name: "cpu", Tags: map[string]string{"region": "us-west"}, Timestamp: testTime, Fields: map[string]interface{}{"value": float64(100)}}})
s.MustWriteSeries("foo", "raw", []influxdb.Point{{Name: "cpu", Tags: map[string]string{"region": "us-east"}, Time: testTime, Fields: map[string]interface{}{"value": float64(30)}}})
s.MustWriteSeries("foo", "raw", []influxdb.Point{{Name: "cpu", Tags: map[string]string{"region": "us-east"}, Time: testTime.Add(-time.Millisecond * 5), Fields: map[string]interface{}{"value": float64(20)}}})
s.MustWriteSeries("foo", "raw", []influxdb.Point{{Name: "cpu", Tags: map[string]string{"region": "us-west"}, Time: testTime, Fields: map[string]interface{}{"value": float64(100)}}})
// Run CQs after a period of time
time.Sleep(time.Millisecond * 50)
@ -1892,7 +1892,7 @@ func TestServer_RunContinuousQueries(t *testing.T) {
// ensure that data written into a previous window is picked up and the result recomputed.
time.Sleep(time.Millisecond * 2)
s.MustWriteSeries("foo", "raw", []influxdb.Point{{Name: "cpu", Tags: map[string]string{"region": "us-west"}, Timestamp: testTime.Add(-time.Millisecond), Fields: map[string]interface{}{"value": float64(50)}}})
s.MustWriteSeries("foo", "raw", []influxdb.Point{{Name: "cpu", Tags: map[string]string{"region": "us-west"}, Time: testTime.Add(-time.Millisecond), Fields: map[string]interface{}{"value": float64(50)}}})
s.RunContinuousQueries()
// give CQs time to run
time.Sleep(time.Millisecond * 100)
@ -1956,7 +1956,7 @@ func TestServer_CreateSnapshotWriter(t *testing.T) {
s.CreateUser("susy", "pass", false)
// Write one point.
index, err := s.WriteSeries("db", "raw", []influxdb.Point{{Name: "cpu", Timestamp: mustParseTime("2000-01-01T00:00:00Z"), Fields: map[string]interface{}{"value": float64(100)}}})
index, err := s.WriteSeries("db", "raw", []influxdb.Point{{Name: "cpu", Time: mustParseTime("2000-01-01T00:00:00Z"), Fields: map[string]interface{}{"value": float64(100)}}})
if err != nil {
t.Fatal(err)
}
@ -2378,7 +2378,7 @@ func TestbatchWrite_UnmarshalEpoch(t *testing.T) {
}
for _, test := range tests {
json := fmt.Sprintf(`"points": [{timestamp: "%d"}`, test.epoch)
json := fmt.Sprintf(`"points": [{time: "%d"}`, test.epoch)
log.Println(json)
t.Fatal("foo")
}

View File

@ -8,10 +8,10 @@ echo "creating retention policy"
curl -G http://localhost:8086/query --data-urlencode "q=CREATE RETENTION POLICY bar ON foo DURATION INF REPLICATION 1 DEFAULT"
echo "inserting data"
curl -d "{\"database\" : \"foo\", \"retentionPolicy\" : \"bar\", \"points\": [{\"name\": \"cpu\", \"tags\": {\"region\":\"uswest\",\"host\": \"server01\"},\"timestamp\": \"$now\",\"fields\": {\"value\": 100}}]}" -H "Content-Type: application/json" http://localhost:8086/write
curl -d "{\"database\" : \"foo\", \"retentionPolicy\" : \"bar\", \"points\": [{\"name\": \"cpu\", \"tags\": {\"region\":\"uswest\",\"host\": \"server01\"},\"time\": \"$now\",\"fields\": {\"value\": 100}}]}" -H "Content-Type: application/json" http://localhost:8086/write
echo "inserting data"
curl -d "{\"database\" : \"foo\", \"retentionPolicy\" : \"bar\", \"points\": [{\"name\": \"cpu\", \"tags\": {\"region\":\"uswest\",\"host\": \"server01\"},\"timestamp\": \"$tomorrow\",\"fields\": {\"value\": 200}}]}" -H "Content-Type: application/json" http://localhost:8086/write
curl -d "{\"database\" : \"foo\", \"retentionPolicy\" : \"bar\", \"points\": [{\"name\": \"cpu\", \"tags\": {\"region\":\"uswest\",\"host\": \"server01\"},\"time\": \"$tomorrow\",\"fields\": {\"value\": 200}}]}" -H "Content-Type: application/json" http://localhost:8086/write
sleep 1

View File

@ -5,10 +5,10 @@ echo "creating retention policy"
curl -G http://localhost:8086/query --data-urlencode "q=CREATE RETENTION POLICY bar ON foo DURATION 1h REPLICATION 3 DEFAULT"
echo "inserting data"
curl -d '{"database" : "foo", "retentionPolicy" : "bar", "points": [{"name": "cpu", "tags": {"host": "server01"},"timestamp": "2015-01-26T22:01:11.703Z","fields": {"value": 100}}]}' -H "Content-Type: application/json" http://localhost:8086/write
curl -d '{"database" : "foo", "retentionPolicy" : "bar", "points": [{"name": "cpu", "tags": {"host": "server01"},"timestamp": "2015-01-27T22:01:11.703Z","fields": {"value": 100}}]}' -H "Content-Type: application/json" http://localhost:8086/write
curl -d '{"database" : "foo", "retentionPolicy" : "bar", "points": [{"name": "cpu", "tags": {"host": "server01"},"timestamp": "2015-01-28T22:01:11.703Z","fields": {"value": 100}}]}' -H "Content-Type: application/json" http://localhost:8086/write
curl -d '{"database" : "foo", "retentionPolicy" : "bar", "points": [{"name": "mem", "tags": {"host": "server01"},"timestamp": "2015-01-29T22:01:11.703Z","fields": {"value": 100}}]}' -H "Content-Type: application/json" http://localhost:8086/write
curl -d '{"database" : "foo", "retentionPolicy" : "bar", "points": [{"name": "cpu", "tags": {"host": "server01"},"time": "2015-01-26T22:01:11.703Z","fields": {"value": 100}}]}' -H "Content-Type: application/json" http://localhost:8086/write
curl -d '{"database" : "foo", "retentionPolicy" : "bar", "points": [{"name": "cpu", "tags": {"host": "server01"},"time": "2015-01-27T22:01:11.703Z","fields": {"value": 100}}]}' -H "Content-Type: application/json" http://localhost:8086/write
curl -d '{"database" : "foo", "retentionPolicy" : "bar", "points": [{"name": "cpu", "tags": {"host": "server01"},"time": "2015-01-28T22:01:11.703Z","fields": {"value": 100}}]}' -H "Content-Type: application/json" http://localhost:8086/write
curl -d '{"database" : "foo", "retentionPolicy" : "bar", "points": [{"name": "mem", "tags": {"host": "server01"},"time": "2015-01-29T22:01:11.703Z","fields": {"value": 100}}]}' -H "Content-Type: application/json" http://localhost:8086/write
echo "querying data"
curl -G http://localhost:8086/query --data-urlencode "db=foo" --data-urlencode "q=SELECT sum(value) FROM \"foo\".\"bar\".cpu GROUP BY time(1h)"

View File

@ -5,7 +5,7 @@ echo "creating retention policy"
curl -G http://localhost:8086/query --data-urlencode "q=CREATE RETENTION POLICY bar ON foo DURATION 1h REPLICATION 3 DEFAULT"
echo "inserting data"
curl -d '{"database" : "foo", "retentionPolicy" : "bar", "points": [{"name": "cpu", "tags": {"host": "server01"},"timestamp": "2015-01-26T22:01:11.703Z","fields": {"value": 100}}]}' -H "Content-Type: application/json" http://localhost:8086/write
curl -d '{"database" : "foo", "retentionPolicy" : "bar", "points": [{"name": "cpu", "tags": {"host": "server01"},"time": "2015-01-26T22:01:11.703Z","fields": {"value": 100}}]}' -H "Content-Type: application/json" http://localhost:8086/write
echo "querying data"
curl -G http://localhost:8086/query --data-urlencode "db=foo" --data-urlencode "q=SELECT sum(value) FROM \"foo\".\"bar\".cpu GROUP BY time(1h)"

View File

@ -5,19 +5,19 @@ echo "creating retention policy"
curl -G http://localhost:8086/query --data-urlencode "q=CREATE RETENTION POLICY bar ON foo DURATION 300d REPLICATION 3 DEFAULT"
echo "inserting data"
curl -d '{"database" : "foo", "retentionPolicy" : "bar", "points": [{"name": "cpu", "timestamp": "2015-02-26T22:01:11.703Z","fields": {"value": 8.9}}]}' -H "Content-Type: application/json" http://localhost:8086/write
curl -d '{"database" : "foo", "retentionPolicy" : "bar", "points": [{"name": "cpu", "timestamp": "2015-02-27T22:01:11.703Z","fields": {"value": 1.3}}]}' -H "Content-Type: application/json" http://localhost:8086/write
curl -d '{"database" : "foo", "retentionPolicy" : "bar", "points": [{"name": "cpu", "timestamp": "2015-02-28T22:01:11.703Z","fields": {"value": 50.4}}]}' -H "Content-Type: application/json" http://localhost:8086/write
curl -d '{"database" : "foo", "retentionPolicy" : "bar", "points": [{"name": "cpu", "time": "2015-02-26T22:01:11.703Z","fields": {"value": 8.9}}]}' -H "Content-Type: application/json" http://localhost:8086/write
curl -d '{"database" : "foo", "retentionPolicy" : "bar", "points": [{"name": "cpu", "time": "2015-02-27T22:01:11.703Z","fields": {"value": 1.3}}]}' -H "Content-Type: application/json" http://localhost:8086/write
curl -d '{"database" : "foo", "retentionPolicy" : "bar", "points": [{"name": "cpu", "time": "2015-02-28T22:01:11.703Z","fields": {"value": 50.4}}]}' -H "Content-Type: application/json" http://localhost:8086/write
curl -d '{"database" : "foo", "retentionPolicy" : "bar", "points": [{"name": "mem", "tags": {"host": "server01"},"timestamp": "2015-02-26T22:01:11.703Z","fields": {"value": 16432}}]}' -H "Content-Type: application/json" http://localhost:8086/write
curl -d '{"database" : "foo", "retentionPolicy" : "bar", "points": [{"name": "mem", "tags": {"host": "server01"},"timestamp": "2015-02-27T22:01:11.703Z","fields": {"value": 23453}}]}' -H "Content-Type: application/json" http://localhost:8086/write
curl -d '{"database" : "foo", "retentionPolicy" : "bar", "points": [{"name": "mem", "tags": {"host": "server02"},"timestamp": "2015-02-28T22:01:11.703Z","fields": {"value": 90234}}]}' -H "Content-Type: application/json" http://localhost:8086/write
curl -d '{"database" : "foo", "retentionPolicy" : "bar", "points": [{"name": "mem", "tags": {"host": "server01"},"time": "2015-02-26T22:01:11.703Z","fields": {"value": 16432}}]}' -H "Content-Type: application/json" http://localhost:8086/write
curl -d '{"database" : "foo", "retentionPolicy" : "bar", "points": [{"name": "mem", "tags": {"host": "server01"},"time": "2015-02-27T22:01:11.703Z","fields": {"value": 23453}}]}' -H "Content-Type: application/json" http://localhost:8086/write
curl -d '{"database" : "foo", "retentionPolicy" : "bar", "points": [{"name": "mem", "tags": {"host": "server02"},"time": "2015-02-28T22:01:11.703Z","fields": {"value": 90234}}]}' -H "Content-Type: application/json" http://localhost:8086/write
curl -d '{"database" : "foo", "retentionPolicy" : "bar", "points": [{"name": "temp", "tags": {"host": "server01","region":"uswest"}, "timestamp": "2015-02-26T22:01:11.703Z","fields": {"value": 98.6}}]}' -H "Content-Type: application/json" http://localhost:8086/write
curl -d '{"database" : "foo", "retentionPolicy" : "bar", "points": [{"name": "temp", "tags": {"host": "server01","region":"useast"}, "timestamp": "2015-02-27T22:01:11.703Z","fields": {"value": 101.1}}]}' -H "Content-Type: application/json" http://localhost:8086/write
curl -d '{"database" : "foo", "retentionPolicy" : "bar", "points": [{"name": "temp", "tags": {"host": "server02","region":"useast"}, "timestamp": "2015-02-28T22:01:11.703Z","fields": {"value": 105.4}}]}' -H "Content-Type: application/json" http://localhost:8086/write
curl -d '{"database" : "foo", "retentionPolicy" : "bar", "points": [{"name": "temp", "tags": {"host": "server01","region":"uswest"}, "time": "2015-02-26T22:01:11.703Z","fields": {"value": 98.6}}]}' -H "Content-Type: application/json" http://localhost:8086/write
curl -d '{"database" : "foo", "retentionPolicy" : "bar", "points": [{"name": "temp", "tags": {"host": "server01","region":"useast"}, "time": "2015-02-27T22:01:11.703Z","fields": {"value": 101.1}}]}' -H "Content-Type: application/json" http://localhost:8086/write
curl -d '{"database" : "foo", "retentionPolicy" : "bar", "points": [{"name": "temp", "tags": {"host": "server02","region":"useast"}, "time": "2015-02-28T22:01:11.703Z","fields": {"value": 105.4}}]}' -H "Content-Type: application/json" http://localhost:8086/write
curl -d '{"database" : "foo", "retentionPolicy" : "bar", "points": [{"name": "network", "tags": {"host": "server01","region":"uswest"},"timestamp": "2015-02-26T22:01:11.703Z","fields": {"rx": 2342,"tx": 9804}}]}' -H "Content-Type: application/json" http://localhost:8086/write
curl -d '{"database" : "foo", "retentionPolicy" : "bar", "points": [{"name": "network", "tags": {"host": "server01","region":"useast"},"timestamp": "2015-02-27T22:01:11.703Z","fields": {"rx": 4324,"tx": 7930}}]}' -H "Content-Type: application/json" http://localhost:8086/write
curl -d '{"database" : "foo", "retentionPolicy" : "bar", "points": [{"name": "network", "tags": {"host": "server02","region":"useast"},"timestamp": "2015-02-28T22:01:11.703Z","fields": {"rx": 2342,"tx": 8234}}]}' -H "Content-Type: application/json" http://localhost:8086/write
curl -d '{"database" : "foo", "retentionPolicy" : "bar", "points": [{"name": "network", "tags": {"host": "server01","region":"uswest"},"time": "2015-02-26T22:01:11.703Z","fields": {"rx": 2342,"tx": 9804}}]}' -H "Content-Type: application/json" http://localhost:8086/write
curl -d '{"database" : "foo", "retentionPolicy" : "bar", "points": [{"name": "network", "tags": {"host": "server01","region":"useast"},"time": "2015-02-27T22:01:11.703Z","fields": {"rx": 4324,"tx": 7930}}]}' -H "Content-Type: application/json" http://localhost:8086/write
curl -d '{"database" : "foo", "retentionPolicy" : "bar", "points": [{"name": "network", "tags": {"host": "server02","region":"useast"},"time": "2015-02-28T22:01:11.703Z","fields": {"rx": 2342,"tx": 8234}}]}' -H "Content-Type: application/json" http://localhost:8086/write

View File

@ -5,7 +5,7 @@ echo "creating retention policy"
curl -G http://localhost:8086/query --data-urlencode "q=CREATE RETENTION POLICY bar ON foo DURATION 1h REPLICATION 3 DEFAULT"
echo "inserting data"
curl -d '{"database" : "foo", "retentionPolicy" : "bar", "points": [{"name": "cpu", "tags": {"region":"uswest","host": "server01"},"timestamp": "2015-01-26T22:01:11.703Z","fields": {"value": 100}}]}' -H "Content-Type: application/json" http://localhost:8086/write
curl -d '{"database" : "foo", "retentionPolicy" : "bar", "points": [{"name": "cpu", "tags": {"region":"uswest","host": "server01"},"time": "2015-01-26T22:01:11.703Z","fields": {"value": 100}}]}' -H "Content-Type: application/json" http://localhost:8086/write
echo "querying data"
curl -G http://localhost:8086/query --data-urlencode "db=foo" --data-urlencode "q=SELECT sum(value) FROM \"foo\".\"bar\".cpu GROUP BY time(1h)"

View File

@ -4,7 +4,7 @@ curl -G http://localhost:8086/query --data-urlencode "q=CREATE DATABASE foo"
echo "creating retention policy"
curl -G http://localhost:8086/query --data-urlencode "q=CREATE RETENTION POLICY bar ON foo DURATION 1h REPLICATION 3 DEFAULT"
echo '{"database" : "foo", "retentionPolicy" : "bar", "points": [{"name": "cpu", "tags": {"host": "server01"},"timestamp": "2015-01-26T22:01:11.703Z","fields": {"value": 123}}]}' | gzip > foo.json.gz
echo '{"database" : "foo", "retentionPolicy" : "bar", "points": [{"name": "cpu", "tags": {"host": "server01"},"time": "2015-01-26T22:01:11.703Z","fields": {"value": 123}}]}' | gzip > foo.json.gz
echo "inserting data"
curl -v -i -H "Content-encoding: gzip" -H "Content-Type: application/json" -X POST -T foo.json.gz http://localhost:8086/write

View File

@ -95,7 +95,7 @@ do
fi
# Append the point.
POINTS=$POINTS'{"name": "cpu", "tags": {"host": "server'$series'"}, "timestamp": "'$TIMESTAMP'","fields": {"value": 100}}'
POINTS=$POINTS'{"name": "cpu", "tags": {"host": "server'$series'"}, "time": "'$TIMESTAMP'","fields": {"value": 100}}'
done
# Write out point.

View File

@ -14,7 +14,7 @@ Use [http://www.json-generator.com/](http://www.json-generator.com/)
{
"name": "cpu",
"tags": {"host": "server01"},
"timestamp": "{{date(new Date(2015, 15, 1), new Date(), 'YYYY-MM-ddThh:mm:ss Z')}}",
"time": "{{date(new Date(2015, 15, 1), new Date(), 'YYYY-MM-ddThh:mm:ss Z')}}",
"fields": {
"value": '{{integer(1, 1000)}}'
}

File diff suppressed because it is too large Load Diff

View File

@ -5,9 +5,9 @@ echo "creating retention policy"
curl -G http://localhost:8086/query --data-urlencode "q=CREATE RETENTION POLICY bar ON foo DURATION 1h REPLICATION 3 DEFAULT"
echo "inserting data"
curl -d '{"database" : "foo", "retentionPolicy" : "bar", "points": [{"name": "cpu", "tags": {"host": "server01"},"timestamp": "2015-01-26T22:01:11.703Z","fields": {"value": 100}}]}' -H "Content-Type: application/json" http://localhost:8086/write
curl -d '{"database" : "foo", "retentionPolicy" : "bar", "points": [{"name": "cpu", "tags": {"host": "server01"},"timestamp": "2015-01-27T22:01:11.703Z","fields": {"value": 100}}]}' -H "Content-Type: application/json" http://localhost:8086/write
curl -d '{"database" : "foo", "retentionPolicy" : "bar", "points": [{"name": "cpu", "tags": {"host": "server01"},"timestamp": "2015-01-28T22:01:11.703Z","fields": {"value": 100}}]}' -H "Content-Type: application/json" http://localhost:8086/write
curl -d '{"database" : "foo", "retentionPolicy" : "bar", "points": [{"name": "cpu", "tags": {"host": "server01"},"time": "2015-01-26T22:01:11.703Z","fields": {"value": 100}}]}' -H "Content-Type: application/json" http://localhost:8086/write
curl -d '{"database" : "foo", "retentionPolicy" : "bar", "points": [{"name": "cpu", "tags": {"host": "server01"},"time": "2015-01-27T22:01:11.703Z","fields": {"value": 100}}]}' -H "Content-Type: application/json" http://localhost:8086/write
curl -d '{"database" : "foo", "retentionPolicy" : "bar", "points": [{"name": "cpu", "tags": {"host": "server01"},"time": "2015-01-28T22:01:11.703Z","fields": {"value": 100}}]}' -H "Content-Type: application/json" http://localhost:8086/write
echo "querying data"
curl -G http://localhost:8086/query --data-urlencode "db=foo" --data-urlencode "q=SELECT sum(value) FROM \"foo\".\"bar\".cpu GROUP BY time(1h)"

View File

@ -26,10 +26,10 @@ func main() {
for k := 0; k < *intervalN; k++ {
t = t.Add(1 * time.Second)
points = append(points, &Point{
Name: "cpu",
Timestamp: t,
Tags: map[string]string{"host": fmt.Sprintf("server%d", j+1)},
Fields: map[string]interface{}{"value": 100},
Name: "cpu",
Time: t,
Tags: map[string]string{"host": fmt.Sprintf("server%d", j+1)},
Fields: map[string]interface{}{"value": 100},
})
}
batch := &Batch{
@ -51,8 +51,8 @@ type Batch struct {
}
type Point struct {
Name string `json:"name"`
Timestamp time.Time `json:"timestamp"`
Tags map[string]string `json:"tags"`
Fields map[string]interface{} `json:"fields"`
Name string `json:"name"`
Time time.Time `json:"time"`
Tags map[string]string `json:"tags"`
Fields map[string]interface{} `json:"fields"`
}