2015-07-20 19:59:46 +00:00
|
|
|
package tsdb_test
|
2015-05-22 20:08:43 +00:00
|
|
|
|
|
|
|
import (
|
|
|
|
"io/ioutil"
|
|
|
|
"os"
|
2015-06-02 20:57:39 +00:00
|
|
|
"path"
|
2015-06-18 15:07:51 +00:00
|
|
|
"path/filepath"
|
2015-05-22 20:08:43 +00:00
|
|
|
"reflect"
|
2015-11-04 21:06:06 +00:00
|
|
|
"strings"
|
2015-05-22 20:08:43 +00:00
|
|
|
"testing"
|
|
|
|
"time"
|
2015-07-20 19:59:46 +00:00
|
|
|
|
2015-11-04 21:06:06 +00:00
|
|
|
"github.com/davecgh/go-spew/spew"
|
|
|
|
"github.com/influxdb/influxdb/influxql"
|
2015-09-16 20:33:08 +00:00
|
|
|
"github.com/influxdb/influxdb/models"
|
2015-11-04 21:06:06 +00:00
|
|
|
"github.com/influxdb/influxdb/pkg/deep"
|
2015-07-20 19:59:46 +00:00
|
|
|
"github.com/influxdb/influxdb/tsdb"
|
2015-11-04 21:06:06 +00:00
|
|
|
_ "github.com/influxdb/influxdb/tsdb/engine"
|
2015-05-22 20:08:43 +00:00
|
|
|
)
|
|
|
|
|
2015-11-04 21:06:06 +00:00
|
|
|
// DefaultPrecision is the precision used by the MustWritePointsString() function.
|
|
|
|
const DefaultPrecision = "s"
|
|
|
|
|
2015-05-22 20:08:43 +00:00
|
|
|
func TestShardWriteAndIndex(t *testing.T) {
|
2015-11-04 21:06:06 +00:00
|
|
|
t.Skip("pending tsm1 iterator impl")
|
|
|
|
|
2015-06-03 14:09:50 +00:00
|
|
|
tmpDir, _ := ioutil.TempDir("", "shard_test")
|
2015-06-02 20:57:39 +00:00
|
|
|
defer os.RemoveAll(tmpDir)
|
|
|
|
tmpShard := path.Join(tmpDir, "shard")
|
2015-08-21 15:22:04 +00:00
|
|
|
tmpWal := path.Join(tmpDir, "wal")
|
2015-05-22 20:08:43 +00:00
|
|
|
|
2015-07-20 19:59:46 +00:00
|
|
|
index := tsdb.NewDatabaseIndex()
|
2015-08-18 20:59:54 +00:00
|
|
|
opts := tsdb.NewEngineOptions()
|
|
|
|
opts.Config.WALDir = filepath.Join(tmpDir, "wal")
|
|
|
|
|
2015-08-21 15:22:04 +00:00
|
|
|
sh := tsdb.NewShard(1, index, tmpShard, tmpWal, opts)
|
2015-05-26 15:41:15 +00:00
|
|
|
if err := sh.Open(); err != nil {
|
2015-05-22 20:08:43 +00:00
|
|
|
t.Fatalf("error openeing shard: %s", err.Error())
|
|
|
|
}
|
|
|
|
|
2015-10-27 16:21:54 +00:00
|
|
|
pt := models.MustNewPoint(
|
2015-05-22 21:00:51 +00:00
|
|
|
"cpu",
|
|
|
|
map[string]string{"host": "server"},
|
|
|
|
map[string]interface{}{"value": 1.0},
|
|
|
|
time.Unix(1, 2),
|
|
|
|
)
|
2015-05-22 20:08:43 +00:00
|
|
|
|
2015-09-16 20:33:08 +00:00
|
|
|
err := sh.WritePoints([]models.Point{pt})
|
2015-05-22 20:08:43 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf(err.Error())
|
|
|
|
}
|
|
|
|
|
2015-05-22 21:00:51 +00:00
|
|
|
pt.SetTime(time.Unix(2, 3))
|
2015-09-16 20:33:08 +00:00
|
|
|
err = sh.WritePoints([]models.Point{pt})
|
2015-05-22 20:08:43 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf(err.Error())
|
|
|
|
}
|
|
|
|
|
|
|
|
validateIndex := func() {
|
2015-07-20 19:59:46 +00:00
|
|
|
if index.SeriesN() != 1 {
|
2015-05-22 20:08:43 +00:00
|
|
|
t.Fatalf("series wasn't in index")
|
|
|
|
}
|
2015-07-20 19:59:46 +00:00
|
|
|
|
|
|
|
seriesTags := index.Series(string(pt.Key())).Tags
|
2015-05-22 21:12:34 +00:00
|
|
|
if len(seriesTags) != len(pt.Tags()) || pt.Tags()["host"] != seriesTags["host"] {
|
2015-07-20 19:59:46 +00:00
|
|
|
t.Fatalf("tags weren't properly saved to series index: %v, %v", pt.Tags(), seriesTags)
|
2015-05-22 20:08:43 +00:00
|
|
|
}
|
2015-07-20 19:59:46 +00:00
|
|
|
if !reflect.DeepEqual(index.Measurement("cpu").TagKeys(), []string{"host"}) {
|
2015-05-22 20:08:43 +00:00
|
|
|
t.Fatalf("tag key wasn't saved to measurement index")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
validateIndex()
|
|
|
|
|
|
|
|
// ensure the index gets loaded after closing and opening the shard
|
|
|
|
sh.Close()
|
|
|
|
|
2015-07-20 19:59:46 +00:00
|
|
|
index = tsdb.NewDatabaseIndex()
|
2015-08-21 15:22:04 +00:00
|
|
|
sh = tsdb.NewShard(1, index, tmpShard, tmpWal, opts)
|
2015-05-26 15:41:15 +00:00
|
|
|
if err := sh.Open(); err != nil {
|
2015-05-22 20:08:43 +00:00
|
|
|
t.Fatalf("error openeing shard: %s", err.Error())
|
|
|
|
}
|
|
|
|
|
|
|
|
validateIndex()
|
|
|
|
|
|
|
|
// and ensure that we can still write data
|
2015-05-22 21:00:51 +00:00
|
|
|
pt.SetTime(time.Unix(2, 6))
|
2015-09-16 20:33:08 +00:00
|
|
|
err = sh.WritePoints([]models.Point{pt})
|
2015-05-22 20:08:43 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf(err.Error())
|
|
|
|
}
|
|
|
|
}
|
2015-06-02 20:57:39 +00:00
|
|
|
|
2015-06-10 16:27:11 +00:00
|
|
|
func TestShardWriteAddNewField(t *testing.T) {
|
2015-11-04 21:06:06 +00:00
|
|
|
t.Skip("pending tsm1 iterator impl")
|
|
|
|
|
2015-06-10 16:27:11 +00:00
|
|
|
tmpDir, _ := ioutil.TempDir("", "shard_test")
|
|
|
|
defer os.RemoveAll(tmpDir)
|
|
|
|
tmpShard := path.Join(tmpDir, "shard")
|
2015-08-21 15:22:04 +00:00
|
|
|
tmpWal := path.Join(tmpDir, "wal")
|
2015-06-10 16:27:11 +00:00
|
|
|
|
2015-07-20 19:59:46 +00:00
|
|
|
index := tsdb.NewDatabaseIndex()
|
2015-08-18 20:59:54 +00:00
|
|
|
opts := tsdb.NewEngineOptions()
|
|
|
|
opts.Config.WALDir = filepath.Join(tmpDir, "wal")
|
|
|
|
|
2015-08-21 15:22:04 +00:00
|
|
|
sh := tsdb.NewShard(1, index, tmpShard, tmpWal, opts)
|
2015-06-10 16:27:11 +00:00
|
|
|
if err := sh.Open(); err != nil {
|
|
|
|
t.Fatalf("error openeing shard: %s", err.Error())
|
|
|
|
}
|
|
|
|
defer sh.Close()
|
|
|
|
|
2015-10-27 16:21:54 +00:00
|
|
|
pt := models.MustNewPoint(
|
2015-06-10 16:27:11 +00:00
|
|
|
"cpu",
|
|
|
|
map[string]string{"host": "server"},
|
|
|
|
map[string]interface{}{"value": 1.0},
|
|
|
|
time.Unix(1, 2),
|
|
|
|
)
|
|
|
|
|
2015-09-16 20:33:08 +00:00
|
|
|
err := sh.WritePoints([]models.Point{pt})
|
2015-06-10 16:27:11 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf(err.Error())
|
|
|
|
}
|
|
|
|
|
2015-10-27 16:21:54 +00:00
|
|
|
pt = models.MustNewPoint(
|
2015-06-10 16:27:11 +00:00
|
|
|
"cpu",
|
|
|
|
map[string]string{"host": "server"},
|
|
|
|
map[string]interface{}{"value": 1.0, "value2": 2.0},
|
|
|
|
time.Unix(1, 2),
|
|
|
|
)
|
|
|
|
|
2015-09-16 20:33:08 +00:00
|
|
|
err = sh.WritePoints([]models.Point{pt})
|
2015-06-10 16:27:11 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf(err.Error())
|
|
|
|
}
|
|
|
|
|
2015-07-20 19:59:46 +00:00
|
|
|
if index.SeriesN() != 1 {
|
2015-06-10 16:27:11 +00:00
|
|
|
t.Fatalf("series wasn't in index")
|
|
|
|
}
|
2015-07-20 19:59:46 +00:00
|
|
|
seriesTags := index.Series(string(pt.Key())).Tags
|
2015-06-10 16:27:11 +00:00
|
|
|
if len(seriesTags) != len(pt.Tags()) || pt.Tags()["host"] != seriesTags["host"] {
|
2015-07-20 19:59:46 +00:00
|
|
|
t.Fatalf("tags weren't properly saved to series index: %v, %v", pt.Tags(), seriesTags)
|
2015-06-10 16:27:11 +00:00
|
|
|
}
|
2015-07-20 19:59:46 +00:00
|
|
|
if !reflect.DeepEqual(index.Measurement("cpu").TagKeys(), []string{"host"}) {
|
2015-06-10 16:27:11 +00:00
|
|
|
t.Fatalf("tag key wasn't saved to measurement index")
|
|
|
|
}
|
|
|
|
|
2015-07-20 19:59:46 +00:00
|
|
|
if len(index.Measurement("cpu").FieldNames()) != 2 {
|
2015-06-10 16:27:11 +00:00
|
|
|
t.Fatalf("field names wasn't saved to measurement index")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-11-04 21:06:06 +00:00
|
|
|
// Ensure a shard can create iterators for its underlying data.
|
|
|
|
func TestShard_CreateIterator(t *testing.T) {
|
|
|
|
sh := MustOpenShard()
|
2015-06-18 15:07:51 +00:00
|
|
|
defer sh.Close()
|
|
|
|
|
2015-11-04 21:06:06 +00:00
|
|
|
sh.MustWritePointsString(`
|
|
|
|
cpu,host=serverA,region=uswest value=100 0
|
|
|
|
cpu,host=serverA,region=uswest value=50,val2=5 10
|
|
|
|
cpu,host=serverB,region=uswest value=25 0
|
|
|
|
`)
|
|
|
|
|
|
|
|
// Create iterator.
|
|
|
|
itr, err := sh.CreateIterator(influxql.IteratorOptions{
|
|
|
|
Expr: influxql.MustParseExpr(`value`),
|
|
|
|
Aux: []string{"val2"},
|
|
|
|
Dimensions: []string{"host"},
|
|
|
|
Sources: []influxql.Source{&influxql.Measurement{Name: "cpu"}},
|
|
|
|
Ascending: true,
|
|
|
|
StartTime: influxql.MinTime,
|
|
|
|
EndTime: influxql.MaxTime,
|
2015-07-22 14:53:20 +00:00
|
|
|
})
|
2015-11-04 21:06:06 +00:00
|
|
|
if err != nil {
|
2015-06-18 15:07:51 +00:00
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2015-11-04 21:06:06 +00:00
|
|
|
defer itr.Close()
|
|
|
|
fitr := itr.(influxql.FloatIterator)
|
|
|
|
|
|
|
|
// Read values from iterator.
|
|
|
|
if p := fitr.Next(); !deep.Equal(p, &influxql.FloatPoint{
|
|
|
|
Name: "cpu",
|
|
|
|
Tags: influxql.NewTags(map[string]string{"host": "serverA"}),
|
|
|
|
Time: time.Unix(0, 0).UnixNano(),
|
|
|
|
Value: 100,
|
|
|
|
Aux: []interface{}{nil},
|
|
|
|
}) {
|
|
|
|
t.Fatalf("unexpected point(0): %s", spew.Sdump(p))
|
2015-06-18 15:07:51 +00:00
|
|
|
}
|
|
|
|
|
2015-11-04 21:06:06 +00:00
|
|
|
if p := fitr.Next(); !deep.Equal(p, &influxql.FloatPoint{
|
|
|
|
Name: "cpu",
|
|
|
|
Tags: influxql.NewTags(map[string]string{"host": "serverA"}),
|
|
|
|
Time: time.Unix(10, 0).UnixNano(),
|
|
|
|
Value: 50,
|
|
|
|
Aux: []interface{}{float64(5)},
|
|
|
|
}) {
|
|
|
|
t.Fatalf("unexpected point(1): %s", spew.Sdump(p))
|
|
|
|
}
|
2015-06-18 15:07:51 +00:00
|
|
|
|
2015-11-04 21:06:06 +00:00
|
|
|
if p := fitr.Next(); !deep.Equal(p, &influxql.FloatPoint{
|
|
|
|
Name: "cpu",
|
|
|
|
Tags: influxql.NewTags(map[string]string{"host": "serverB"}),
|
|
|
|
Time: time.Unix(0, 0).UnixNano(),
|
|
|
|
Value: 25,
|
|
|
|
Aux: []interface{}{nil},
|
|
|
|
}) {
|
|
|
|
t.Fatalf("unexpected point(1): %s", spew.Sdump(p))
|
2015-06-18 15:07:51 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-06-02 20:57:39 +00:00
|
|
|
func BenchmarkWritePoints_NewSeries_1K(b *testing.B) { benchmarkWritePoints(b, 38, 3, 3, 1) }
|
|
|
|
func BenchmarkWritePoints_NewSeries_100K(b *testing.B) { benchmarkWritePoints(b, 32, 5, 5, 1) }
|
|
|
|
func BenchmarkWritePoints_NewSeries_250K(b *testing.B) { benchmarkWritePoints(b, 80, 5, 5, 1) }
|
|
|
|
func BenchmarkWritePoints_NewSeries_500K(b *testing.B) { benchmarkWritePoints(b, 160, 5, 5, 1) }
|
|
|
|
func BenchmarkWritePoints_NewSeries_1M(b *testing.B) { benchmarkWritePoints(b, 320, 5, 5, 1) }
|
|
|
|
|
|
|
|
func BenchmarkWritePoints_ExistingSeries_1K(b *testing.B) {
|
|
|
|
benchmarkWritePointsExistingSeries(b, 38, 3, 3, 1)
|
|
|
|
}
|
|
|
|
func BenchmarkWritePoints_ExistingSeries_100K(b *testing.B) {
|
|
|
|
benchmarkWritePointsExistingSeries(b, 32, 5, 5, 1)
|
|
|
|
}
|
|
|
|
func BenchmarkWritePoints_ExistingSeries_250K(b *testing.B) {
|
|
|
|
benchmarkWritePointsExistingSeries(b, 80, 5, 5, 1)
|
|
|
|
}
|
|
|
|
func BenchmarkWritePoints_ExistingSeries_500K(b *testing.B) {
|
|
|
|
benchmarkWritePointsExistingSeries(b, 160, 5, 5, 1)
|
|
|
|
}
|
|
|
|
func BenchmarkWritePoints_ExistingSeries_1M(b *testing.B) {
|
|
|
|
benchmarkWritePointsExistingSeries(b, 320, 5, 5, 1)
|
|
|
|
}
|
|
|
|
|
2015-06-03 14:09:50 +00:00
|
|
|
// benchmarkWritePoints benchmarks writing new series to a shard.
|
2015-06-28 06:54:34 +00:00
|
|
|
// mCnt - measurement count
|
2015-06-03 14:09:50 +00:00
|
|
|
// tkCnt - tag key count
|
|
|
|
// tvCnt - tag value count (values per tag)
|
|
|
|
// pntCnt - points per series. # of series = mCnt * (tvCnt ^ tkCnt)
|
2015-06-02 20:57:39 +00:00
|
|
|
func benchmarkWritePoints(b *testing.B, mCnt, tkCnt, tvCnt, pntCnt int) {
|
|
|
|
// Generate test series (measurements + unique tag sets).
|
|
|
|
series := genTestSeries(mCnt, tkCnt, tvCnt)
|
2015-06-02 21:17:31 +00:00
|
|
|
// Create index for the shard to use.
|
2015-07-20 19:59:46 +00:00
|
|
|
index := tsdb.NewDatabaseIndex()
|
2015-06-02 20:57:39 +00:00
|
|
|
// Generate point data to write to the shard.
|
2015-09-16 20:33:08 +00:00
|
|
|
points := []models.Point{}
|
2015-06-02 20:57:39 +00:00
|
|
|
for _, s := range series {
|
|
|
|
for val := 0.0; val < float64(pntCnt); val++ {
|
2015-10-27 16:21:54 +00:00
|
|
|
p := models.MustNewPoint(s.Measurement, s.Series.Tags, map[string]interface{}{"value": val}, time.Now())
|
2015-06-02 20:57:39 +00:00
|
|
|
points = append(points, p)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Stop & reset timers and mem-stats before the main benchmark loop.
|
|
|
|
b.StopTimer()
|
|
|
|
b.ResetTimer()
|
|
|
|
|
|
|
|
// Run the benchmark loop.
|
|
|
|
for n := 0; n < b.N; n++ {
|
2015-06-03 14:09:50 +00:00
|
|
|
tmpDir, _ := ioutil.TempDir("", "shard_test")
|
2015-06-02 20:57:39 +00:00
|
|
|
tmpShard := path.Join(tmpDir, "shard")
|
2015-08-21 15:22:04 +00:00
|
|
|
tmpWal := path.Join(tmpDir, "wal")
|
|
|
|
shard := tsdb.NewShard(1, index, tmpShard, tmpWal, tsdb.NewEngineOptions())
|
2015-06-02 20:57:39 +00:00
|
|
|
shard.Open()
|
|
|
|
|
|
|
|
b.StartTimer()
|
|
|
|
// Call the function being benchmarked.
|
|
|
|
chunkedWrite(shard, points)
|
|
|
|
|
|
|
|
b.StopTimer()
|
|
|
|
shard.Close()
|
|
|
|
os.RemoveAll(tmpDir)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-06-03 14:09:50 +00:00
|
|
|
// benchmarkWritePointsExistingSeries benchmarks writing to existing series in a shard.
|
2015-06-28 06:54:34 +00:00
|
|
|
// mCnt - measurement count
|
2015-06-03 14:09:50 +00:00
|
|
|
// tkCnt - tag key count
|
|
|
|
// tvCnt - tag value count (values per tag)
|
|
|
|
// pntCnt - points per series. # of series = mCnt * (tvCnt ^ tkCnt)
|
2015-06-02 20:57:39 +00:00
|
|
|
func benchmarkWritePointsExistingSeries(b *testing.B, mCnt, tkCnt, tvCnt, pntCnt int) {
|
|
|
|
// Generate test series (measurements + unique tag sets).
|
|
|
|
series := genTestSeries(mCnt, tkCnt, tvCnt)
|
2015-06-02 21:17:31 +00:00
|
|
|
// Create index for the shard to use.
|
2015-07-20 19:59:46 +00:00
|
|
|
index := tsdb.NewDatabaseIndex()
|
2015-06-02 20:57:39 +00:00
|
|
|
// Generate point data to write to the shard.
|
2015-09-16 20:33:08 +00:00
|
|
|
points := []models.Point{}
|
2015-06-02 20:57:39 +00:00
|
|
|
for _, s := range series {
|
|
|
|
for val := 0.0; val < float64(pntCnt); val++ {
|
2015-10-27 16:21:54 +00:00
|
|
|
p := models.MustNewPoint(s.Measurement, s.Series.Tags, map[string]interface{}{"value": val}, time.Now())
|
2015-06-02 20:57:39 +00:00
|
|
|
points = append(points, p)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
tmpDir, _ := ioutil.TempDir("", "")
|
|
|
|
defer os.RemoveAll(tmpDir)
|
|
|
|
tmpShard := path.Join(tmpDir, "shard")
|
2015-08-21 15:22:04 +00:00
|
|
|
tmpWal := path.Join(tmpDir, "wal")
|
|
|
|
shard := tsdb.NewShard(1, index, tmpShard, tmpWal, tsdb.NewEngineOptions())
|
2015-06-02 20:57:39 +00:00
|
|
|
shard.Open()
|
|
|
|
defer shard.Close()
|
|
|
|
chunkedWrite(shard, points)
|
|
|
|
|
|
|
|
// Reset timers and mem-stats before the main benchmark loop.
|
|
|
|
b.ResetTimer()
|
|
|
|
|
|
|
|
// Run the benchmark loop.
|
|
|
|
for n := 0; n < b.N; n++ {
|
|
|
|
b.StopTimer()
|
|
|
|
for _, p := range points {
|
|
|
|
p.SetTime(p.Time().Add(time.Second))
|
|
|
|
}
|
|
|
|
|
|
|
|
b.StartTimer()
|
|
|
|
// Call the function being benchmarked.
|
|
|
|
chunkedWrite(shard, points)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-09-16 20:33:08 +00:00
|
|
|
func chunkedWrite(shard *tsdb.Shard, points []models.Point) {
|
2015-06-02 20:57:39 +00:00
|
|
|
nPts := len(points)
|
|
|
|
chunkSz := 10000
|
|
|
|
start := 0
|
|
|
|
end := chunkSz
|
|
|
|
|
|
|
|
for {
|
|
|
|
if end > nPts {
|
|
|
|
end = nPts
|
|
|
|
}
|
|
|
|
if end-start == 0 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
shard.WritePoints(points[start:end])
|
|
|
|
start = end
|
|
|
|
end += chunkSz
|
|
|
|
}
|
|
|
|
}
|
2015-11-04 21:06:06 +00:00
|
|
|
|
|
|
|
// Shard represents a test wrapper for tsdb.Shard.
|
|
|
|
type Shard struct {
|
|
|
|
*tsdb.Shard
|
|
|
|
path string
|
|
|
|
}
|
|
|
|
|
|
|
|
// NewShard returns a new instance of Shard with temp paths.
|
|
|
|
func NewShard() *Shard {
|
|
|
|
// Create temporary path for data and WAL.
|
|
|
|
path, err := ioutil.TempDir("", "influxdb-tsdb-")
|
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Build engine options.
|
|
|
|
opt := tsdb.NewEngineOptions()
|
|
|
|
opt.Config.WALDir = filepath.Join(path, "wal")
|
|
|
|
|
|
|
|
return &Shard{
|
|
|
|
Shard: tsdb.NewShard(0,
|
|
|
|
tsdb.NewDatabaseIndex(),
|
|
|
|
filepath.Join(path, "data"),
|
|
|
|
filepath.Join(path, "wal"),
|
|
|
|
opt,
|
|
|
|
),
|
|
|
|
path: path,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// MustOpenShard returns a new open shard. Panic on error.
|
|
|
|
func MustOpenShard() *Shard {
|
|
|
|
sh := NewShard()
|
|
|
|
if err := sh.Open(); err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
return sh
|
|
|
|
}
|
|
|
|
|
|
|
|
// Close closes the shard and removes all underlying data.
|
|
|
|
func (sh *Shard) Close() error {
|
|
|
|
defer os.RemoveAll(sh.path)
|
|
|
|
return sh.Shard.Close()
|
|
|
|
}
|
|
|
|
|
|
|
|
// MustWritePointsString parses the line protocol (with second precision) and
|
|
|
|
// inserts the resulting points into the shard. Panic on error.
|
|
|
|
func (sh *Shard) MustWritePointsString(s string) {
|
|
|
|
a, err := models.ParsePointsWithPrecision([]byte(strings.TrimSpace(s)), time.Time{}, "s")
|
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := sh.WritePoints(a); err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
}
|