refactor: move the tsm1/wal into the storage/wal package

Because the WAL relies on the tsm1.Value type, we move that into its own
tsm1/value package and set up some aliases forwarding them into tsm1. This
also required adding some methods and changing consumers to avoid the
unexported fields. I imagine this step will be useful one day when we make
the write path more efficient with respect to consuming points.

This commit additionally fixes some issues with generation. The iterator.tmpldata
and generation for array_cursor_* were removed accidentally when removing
iterators, making those generated files stale. Restore that and regenerate.

No change in functionality.
pull/11364/head
Jeff Wendling 2019-01-16 13:37:12 -07:00
parent c71c9ee094
commit c9bb55b889
23 changed files with 823 additions and 652 deletions

View File

@ -17,6 +17,7 @@ import (
"github.com/influxdata/influxdb/logger"
"github.com/influxdata/influxdb/models"
"github.com/influxdata/influxdb/storage"
"github.com/influxdata/influxdb/storage/wal"
"github.com/influxdata/influxdb/toml"
"github.com/influxdata/influxdb/tsdb"
"github.com/influxdata/influxdb/tsdb/tsi1"
@ -422,7 +423,7 @@ func collectWALFiles(path string) ([]string, error) {
var paths []string
for _, fi := range fis {
if filepath.Ext(fi.Name()) != "."+tsm1.WALFileExtension {
if filepath.Ext(fi.Name()) != "."+wal.WALFileExtension {
continue
}
paths = append(paths, filepath.Join(path, fi.Name()))

View File

@ -12,6 +12,7 @@ import (
platform "github.com/influxdata/influxdb"
"github.com/influxdata/influxdb/logger"
"github.com/influxdata/influxdb/models"
"github.com/influxdata/influxdb/storage/wal"
"github.com/influxdata/influxdb/tsdb"
"github.com/influxdata/influxdb/tsdb/tsi1"
"github.com/influxdata/influxdb/tsdb/tsm1"
@ -38,7 +39,7 @@ type Engine struct {
index *tsi1.Index
sfile *tsdb.SeriesFile
engine *tsm1.Engine
wal *tsm1.WAL
wal *wal.WAL
retentionEnforcer *retentionEnforcer
defaultMetricLabels prometheus.Labels
@ -119,17 +120,17 @@ func NewEngine(path string, c Config, options ...Option) *Engine {
tsi1.WithPath(c.GetIndexPath(path)))
// Initialize WAL
var wal tsm1.Log = new(tsm1.NopWAL)
var w tsm1.Log = new(tsm1.NopWAL)
if c.WAL.Enabled {
e.wal = tsm1.NewWAL(c.GetWALPath(path))
e.wal = wal.NewWAL(c.GetWALPath(path))
e.wal.WithFsyncDelay(time.Duration(c.WAL.FsyncDelay))
e.wal.EnableTraceLogging(c.TraceLoggingEnabled)
wal = e.wal
w = e.wal
}
// Initialise Engine
e.engine = tsm1.NewEngine(c.GetEnginePath(path), e.index, c.Engine,
tsm1.WithWAL(wal),
tsm1.WithWAL(w),
tsm1.WithTraceLogging(c.TraceLoggingEnabled))
// Apply options.

View File

@ -106,7 +106,7 @@ func (c *{{.name}}MultiShardArrayCursor) reset(cur cursors.{{.Name}}ArrayCursor,
}
func (c *{{.name}}MultiShardArrayCursor) Err() error { return c.err }
func (c *{{.name}}MultiShardArrayCursor) Err() error { return c.err }
func (c *{{.name}}MultiShardArrayCursor) Stats() cursors.CursorStats {
return c.{{.Name}}ArrayCursor.Stats()
@ -200,17 +200,17 @@ func (c {{$type}}) Next() {{$arrayType}} {
var acc {{.Type}}
for {
for _, v := range a.Values {
acc += v
}
for _, v := range a.Values {
acc += v
}
a = c.{{.Name}}ArrayCursor.Next()
if len(a.Timestamps) == 0 {
if len(a.Timestamps) == 0 {
c.ts[0] = ts
c.vs[0] = acc
c.res.Timestamps = c.ts[:]
c.res.Values = c.vs[:]
return c.res
}
}
}
}
@ -230,11 +230,11 @@ func (c *integer{{.Name}}CountArrayCursor) Next() *cursors.IntegerArray {
return &cursors.IntegerArray{}
}
ts := a.Timestamps[0]
var acc int64
for {
acc += int64(len(a.Timestamps))
a = c.{{.Name}}ArrayCursor.Next()
ts := a.Timestamps[0]
var acc int64
for {
acc += int64(len(a.Timestamps))
a = c.{{.Name}}ArrayCursor.Next()
if len(a.Timestamps) == 0 {
res := cursors.NewIntegerArrayLen(1)
res.Timestamps[0] = ts

View File

@ -0,0 +1,28 @@
package wal
import (
"fmt"
"io/ioutil"
"os"
"testing"
)
func MustTempDir() string {
dir, err := ioutil.TempDir("", "tsm1-test")
if err != nil {
panic(fmt.Sprintf("failed to create temp dir: %v", err))
}
return dir
}
func MustTempFile(dir string) *os.File {
f, err := ioutil.TempFile(dir, "tsm1test")
if err != nil {
panic(fmt.Sprintf("failed to create temp file: %v", err))
}
return f
}
func fatal(t *testing.T, msg string, err error) {
t.Fatalf("unexpected error %v: %v", msg, err)
}

69
storage/wal/metrics.go Normal file
View File

@ -0,0 +1,69 @@
package wal
import (
"sort"
"github.com/prometheus/client_golang/prometheus"
)
// namespace is the leading part of all published metrics for the Storage service.
const namespace = "storage"
const walSubsystem = "wal" // sub-system associated with metrics for the WAL.
// walMetrics are a set of metrics concerned with tracking data about compactions.
type walMetrics struct {
OldSegmentBytes *prometheus.GaugeVec
CurrentSegmentBytes *prometheus.GaugeVec
Segments *prometheus.GaugeVec
Writes *prometheus.CounterVec
}
// newWALMetrics initialises the prometheus metrics for tracking the WAL.
func newWALMetrics(labels prometheus.Labels) *walMetrics {
var names []string
for k := range labels {
names = append(names, k)
}
sort.Strings(names)
writeNames := append(append([]string(nil), names...), "status")
sort.Strings(writeNames)
return &walMetrics{
OldSegmentBytes: prometheus.NewGaugeVec(prometheus.GaugeOpts{
Namespace: namespace,
Subsystem: walSubsystem,
Name: "old_segment_bytes",
Help: "Number of bytes old WAL segments using on disk.",
}, names),
CurrentSegmentBytes: prometheus.NewGaugeVec(prometheus.GaugeOpts{
Namespace: namespace,
Subsystem: walSubsystem,
Name: "current_segment_bytes",
Help: "Number of bytes TSM files using on disk.",
}, names),
Segments: prometheus.NewGaugeVec(prometheus.GaugeOpts{
Namespace: namespace,
Subsystem: walSubsystem,
Name: "segments_total",
Help: "Number of WAL segment files on disk.",
}, names),
Writes: prometheus.NewCounterVec(prometheus.CounterOpts{
Namespace: namespace,
Subsystem: walSubsystem,
Name: "writes_total",
Help: "Number of writes to the WAL.",
}, writeNames),
}
}
// PrometheusCollectors satisfies the prom.PrometheusCollector interface.
func (m *walMetrics) PrometheusCollectors() []prometheus.Collector {
return []prometheus.Collector{
m.OldSegmentBytes,
m.CurrentSegmentBytes,
m.Segments,
m.Writes,
}
}

View File

@ -0,0 +1,75 @@
package wal
import (
"testing"
"github.com/influxdata/influxdb/kit/prom/promtest"
"github.com/prometheus/client_golang/prometheus"
)
func TestMetrics_WAL(t *testing.T) {
// metrics to be shared by multiple file stores.
metrics := newWALMetrics(prometheus.Labels{"engine_id": "", "node_id": ""})
t1 := newWALTracker(metrics, prometheus.Labels{"engine_id": "0", "node_id": "0"})
t2 := newWALTracker(metrics, prometheus.Labels{"engine_id": "1", "node_id": "0"})
reg := prometheus.NewRegistry()
reg.MustRegister(metrics.PrometheusCollectors()...)
base := namespace + "_" + walSubsystem + "_"
// All the metric names
gauges := []string{
base + "old_segment_bytes",
base + "current_segment_bytes",
base + "segments_total",
}
counters := []string{
base + "writes_total",
}
// Generate some measurements.
for i, tracker := range []*walTracker{t1, t2} {
tracker.SetOldSegmentSize(uint64(i + len(gauges[0])))
tracker.SetCurrentSegmentSize(uint64(i + len(gauges[1])))
tracker.SetSegments(uint64(i + len(gauges[2])))
labels := tracker.Labels()
labels["status"] = "ok"
tracker.metrics.Writes.With(labels).Add(float64(i + len(counters[0])))
}
// Test that all the correct metrics are present.
mfs, err := reg.Gather()
if err != nil {
t.Fatal(err)
}
// The label variants for the two caches.
labelVariants := []prometheus.Labels{
prometheus.Labels{"engine_id": "0", "node_id": "0"},
prometheus.Labels{"engine_id": "1", "node_id": "0"},
}
for i, labels := range labelVariants {
for _, name := range gauges {
exp := float64(i + len(name))
metric := promtest.MustFindMetric(t, mfs, name, labels)
if got := metric.GetGauge().GetValue(); got != exp {
t.Errorf("[%s %d] got %v, expected %v", name, i, got, exp)
}
}
for _, name := range counters {
exp := float64(i + len(name))
labels["status"] = "ok"
metric := promtest.MustFindMetric(t, mfs, name, labels)
if got := metric.GetCounter().GetValue(); got != exp {
t.Errorf("[%s %d] got %v, expected %v", name, i, got, exp)
}
}
}
}

View File

@ -1,4 +1,4 @@
package tsm1
package wal
import "sync"

View File

@ -1,4 +1,4 @@
package tsm1
package wal
import (
"bufio"
@ -17,6 +17,8 @@ import (
"sync/atomic"
"time"
"github.com/influxdata/influxdb/tsdb/tsm1/value"
"github.com/golang/snappy"
"github.com/influxdata/influxdb/pkg/limiter"
"github.com/influxdata/influxdb/pkg/pool"
@ -24,23 +26,6 @@ import (
"go.uber.org/zap"
)
// Log describes an interface for a durable disk-based log.
type Log interface {
Open() error
Close() error
Path() string
LastWriteTime() time.Time
DiskSizeBytes() int64
WriteMulti(values map[string][]Value) (int, error)
DeleteRange(keys [][]byte, min, max int64) (int, error)
CloseSegment() error
ClosedSegments() ([]string, error)
Remove(files []string) error
}
const (
// DefaultSegmentSize of 10MB is the size at which segment files will be rolled over.
DefaultSegmentSize = 10 * 1024 * 1024
@ -182,7 +167,7 @@ func (l *WAL) Open() error {
return err
}
segments, err := segmentFileNames(l.path)
segments, err := SegmentFileNames(l.path)
if err != nil {
return err
}
@ -300,7 +285,7 @@ func (l *WAL) sync() {
// WriteMulti writes the given values to the WAL. It returns the WAL segment ID to
// which the points were written. If an error is returned the segment ID should
// be ignored.
func (l *WAL) WriteMulti(values map[string][]Value) (int, error) {
func (l *WAL) WriteMulti(values map[string][]value.Value) (int, error) {
entry := &WriteWALEntry{
Values: values,
}
@ -329,7 +314,7 @@ func (l *WAL) ClosedSegments() ([]string, error) {
currentFile = l.currentSegmentWriter.path()
}
files, err := segmentFileNames(l.path)
files, err := SegmentFileNames(l.path)
if err != nil {
return nil, err
}
@ -357,7 +342,7 @@ func (l *WAL) Remove(files []string) error {
}
// Refresh the on-disk size stats
segments, err := segmentFileNames(l.path)
segments, err := SegmentFileNames(l.path)
if err != nil {
return err
}
@ -538,8 +523,8 @@ func (l *WAL) Close() error {
return nil
}
// segmentFileNames will return all files that are WAL segment files in sorted order by ascending ID.
func segmentFileNames(dir string) ([]string, error) {
// SegmentFileNames will return all files that are WAL segment files in sorted order by ascending ID.
func SegmentFileNames(dir string) ([]string, error) {
names, err := filepath.Glob(filepath.Join(dir, fmt.Sprintf("%s*.%s", WALFilePrefix, WALFileExtension)))
if err != nil {
return nil, err
@ -666,7 +651,7 @@ type WALEntry interface {
// WriteWALEntry represents a write of points.
type WriteWALEntry struct {
Values map[string][]Value
Values map[string][]value.Value
sz int
}
@ -687,17 +672,17 @@ func (w *WriteWALEntry) MarshalSize() int {
encLen += 8 * len(v) // timestamps (8)
switch v[0].(type) {
case FloatValue, IntegerValue, UnsignedValue:
case value.FloatValue, value.IntegerValue, value.UnsignedValue:
encLen += 8 * len(v)
case BooleanValue:
case value.BooleanValue:
encLen += 1 * len(v)
case StringValue:
case value.StringValue:
for _, vv := range v {
str, ok := vv.(StringValue)
str, ok := vv.(value.StringValue)
if !ok {
return 0
}
encLen += 4 + len(str.value)
encLen += 4 + len(str.RawValue())
}
default:
return 0
@ -746,15 +731,15 @@ func (w *WriteWALEntry) Encode(dst []byte) ([]byte, error) {
for k, v := range w.Values {
switch v[0].(type) {
case FloatValue:
case value.FloatValue:
curType = float64EntryType
case IntegerValue:
case value.IntegerValue:
curType = integerEntryType
case UnsignedValue:
case value.UnsignedValue:
curType = unsignedEntryType
case BooleanValue:
case value.BooleanValue:
curType = booleanEntryType
case StringValue:
case value.StringValue:
curType = stringEntryType
default:
return nil, fmt.Errorf("unsupported value type: %T", v[0])
@ -774,41 +759,41 @@ func (w *WriteWALEntry) Encode(dst []byte) ([]byte, error) {
n += 8
switch vv := vv.(type) {
case FloatValue:
case value.FloatValue:
if curType != float64EntryType {
return nil, fmt.Errorf("incorrect value found in %T slice: %T", v[0].Value(), vv)
}
binary.BigEndian.PutUint64(dst[n:n+8], math.Float64bits(vv.value))
binary.BigEndian.PutUint64(dst[n:n+8], math.Float64bits(vv.RawValue()))
n += 8
case IntegerValue:
case value.IntegerValue:
if curType != integerEntryType {
return nil, fmt.Errorf("incorrect value found in %T slice: %T", v[0].Value(), vv)
}
binary.BigEndian.PutUint64(dst[n:n+8], uint64(vv.value))
binary.BigEndian.PutUint64(dst[n:n+8], uint64(vv.RawValue()))
n += 8
case UnsignedValue:
case value.UnsignedValue:
if curType != unsignedEntryType {
return nil, fmt.Errorf("incorrect value found in %T slice: %T", v[0].Value(), vv)
}
binary.BigEndian.PutUint64(dst[n:n+8], uint64(vv.value))
binary.BigEndian.PutUint64(dst[n:n+8], uint64(vv.RawValue()))
n += 8
case BooleanValue:
case value.BooleanValue:
if curType != booleanEntryType {
return nil, fmt.Errorf("incorrect value found in %T slice: %T", v[0].Value(), vv)
}
if vv.value {
if vv.RawValue() {
dst[n] = 1
} else {
dst[n] = 0
}
n++
case StringValue:
case value.StringValue:
if curType != stringEntryType {
return nil, fmt.Errorf("incorrect value found in %T slice: %T", v[0].Value(), vv)
}
binary.BigEndian.PutUint32(dst[n:n+4], uint32(len(vv.value)))
binary.BigEndian.PutUint32(dst[n:n+4], uint32(len(vv.RawValue())))
n += 4
n += copy(dst[n:], vv.value)
n += copy(dst[n:], vv.RawValue())
default:
return nil, fmt.Errorf("unsupported value found in %T slice: %T", v[0].Value(), vv)
}
@ -863,13 +848,13 @@ func (w *WriteWALEntry) UnmarshalBinary(b []byte) error {
return ErrWALCorrupt
}
values := make([]Value, 0, nvals)
values := make([]value.Value, 0, nvals)
for j := 0; j < nvals; j++ {
un := int64(binary.BigEndian.Uint64(b[i : i+8]))
i += 8
v := math.Float64frombits((binary.BigEndian.Uint64(b[i : i+8])))
i += 8
values = append(values, NewFloatValue(un, v))
values = append(values, value.NewFloatValue(un, v))
}
w.Values[k] = values
case integerEntryType:
@ -877,13 +862,13 @@ func (w *WriteWALEntry) UnmarshalBinary(b []byte) error {
return ErrWALCorrupt
}
values := make([]Value, 0, nvals)
values := make([]value.Value, 0, nvals)
for j := 0; j < nvals; j++ {
un := int64(binary.BigEndian.Uint64(b[i : i+8]))
i += 8
v := int64(binary.BigEndian.Uint64(b[i : i+8]))
i += 8
values = append(values, NewIntegerValue(un, v))
values = append(values, value.NewIntegerValue(un, v))
}
w.Values[k] = values
@ -892,13 +877,13 @@ func (w *WriteWALEntry) UnmarshalBinary(b []byte) error {
return ErrWALCorrupt
}
values := make([]Value, 0, nvals)
values := make([]value.Value, 0, nvals)
for j := 0; j < nvals; j++ {
un := int64(binary.BigEndian.Uint64(b[i : i+8]))
i += 8
v := binary.BigEndian.Uint64(b[i : i+8])
i += 8
values = append(values, NewUnsignedValue(un, v))
values = append(values, value.NewUnsignedValue(un, v))
}
w.Values[k] = values
@ -907,7 +892,7 @@ func (w *WriteWALEntry) UnmarshalBinary(b []byte) error {
return ErrWALCorrupt
}
values := make([]Value, 0, nvals)
values := make([]value.Value, 0, nvals)
for j := 0; j < nvals; j++ {
un := int64(binary.BigEndian.Uint64(b[i : i+8]))
i += 8
@ -915,15 +900,15 @@ func (w *WriteWALEntry) UnmarshalBinary(b []byte) error {
v := b[i]
i += 1
if v == 1 {
values = append(values, NewBooleanValue(un, true))
values = append(values, value.NewBooleanValue(un, true))
} else {
values = append(values, NewBooleanValue(un, false))
values = append(values, value.NewBooleanValue(un, false))
}
}
w.Values[k] = values
case stringEntryType:
values := make([]Value, 0, nvals)
values := make([]value.Value, 0, nvals)
for j := 0; j < nvals; j++ {
if i+12 > len(b) {
return ErrWALCorrupt
@ -945,7 +930,7 @@ func (w *WriteWALEntry) UnmarshalBinary(b []byte) error {
v := string(b[i : i+length])
i += length
values = append(values, NewStringValue(un, v))
values = append(values, value.NewStringValue(un, v))
}
w.Values[k] = values
@ -1250,7 +1235,7 @@ func (r *WALSegmentReader) Next() bool {
switch WalEntryType(entryType) {
case WriteWALEntryType:
r.entry = &WriteWALEntry{
Values: make(map[string][]Value),
Values: make(map[string][]value.Value),
}
case DeleteWALEntryType:
r.entry = &DeleteWALEntry{}
@ -1310,20 +1295,3 @@ func idFromFileName(name string) (int, error) {
return int(id), err
}
// NopWAL implements the Log interface and provides a no-op WAL implementation.
type NopWAL struct{}
func (w NopWAL) Open() error { return nil }
func (w NopWAL) Close() error { return nil }
func (w NopWAL) Path() string { return "" }
func (w NopWAL) LastWriteTime() time.Time { return time.Time{} }
func (w NopWAL) DiskSizeBytes() int64 { return 0 }
func (w NopWAL) WriteMulti(values map[string][]Value) (int, error) { return 0, nil }
func (w NopWAL) DeleteRange(keys [][]byte, min, max int64) (int, error) { return 0, nil }
func (w NopWAL) CloseSegment() error { return nil }
func (w NopWAL) ClosedSegments() ([]string, error) { return nil, nil }
func (w NopWAL) Remove(files []string) error { return nil }

View File

@ -1,4 +1,4 @@
package tsm1_test
package wal
import (
"fmt"
@ -9,30 +9,30 @@ import (
"github.com/golang/snappy"
"github.com/influxdata/influxdb/pkg/slices"
"github.com/influxdata/influxdb/tsdb/tsm1"
"github.com/influxdata/influxdb/tsdb/tsm1/value"
)
func TestWALWriter_WriteMulti_Single(t *testing.T) {
dir := MustTempDir()
defer os.RemoveAll(dir)
f := MustTempFile(dir)
w := tsm1.NewWALSegmentWriter(f)
w := NewWALSegmentWriter(f)
p1 := tsm1.NewValue(1, 1.1)
p2 := tsm1.NewValue(1, int64(1))
p3 := tsm1.NewValue(1, true)
p4 := tsm1.NewValue(1, "string")
p5 := tsm1.NewValue(1, ^uint64(0))
p1 := value.NewValue(1, 1.1)
p2 := value.NewValue(1, int64(1))
p3 := value.NewValue(1, true)
p4 := value.NewValue(1, "string")
p5 := value.NewValue(1, ^uint64(0))
values := map[string][]tsm1.Value{
"cpu,host=A#!~#float": []tsm1.Value{p1},
"cpu,host=A#!~#int": []tsm1.Value{p2},
"cpu,host=A#!~#bool": []tsm1.Value{p3},
"cpu,host=A#!~#string": []tsm1.Value{p4},
"cpu,host=A#!~#unsigned": []tsm1.Value{p5},
values := map[string][]value.Value{
"cpu,host=A#!~#float": []value.Value{p1},
"cpu,host=A#!~#int": []value.Value{p2},
"cpu,host=A#!~#bool": []value.Value{p3},
"cpu,host=A#!~#string": []value.Value{p4},
"cpu,host=A#!~#unsigned": []value.Value{p5},
}
entry := &tsm1.WriteWALEntry{
entry := &WriteWALEntry{
Values: values,
}
@ -48,7 +48,7 @@ func TestWALWriter_WriteMulti_Single(t *testing.T) {
fatal(t, "seek", err)
}
r := tsm1.NewWALSegmentReader(f)
r := NewWALSegmentReader(f)
if !r.Next() {
t.Fatalf("expected next, got false")
@ -59,7 +59,7 @@ func TestWALWriter_WriteMulti_Single(t *testing.T) {
fatal(t, "read entry", err)
}
e, ok := we.(*tsm1.WriteWALEntry)
e, ok := we.(*WriteWALEntry)
if !ok {
t.Fatalf("expected WriteWALEntry: got %#v", e)
}
@ -81,19 +81,19 @@ func TestWALWriter_WriteMulti_LargeBatch(t *testing.T) {
dir := MustTempDir()
defer os.RemoveAll(dir)
f := MustTempFile(dir)
w := tsm1.NewWALSegmentWriter(f)
w := NewWALSegmentWriter(f)
var points []tsm1.Value
var points []value.Value
for i := 0; i < 100000; i++ {
points = append(points, tsm1.NewValue(int64(i), int64(1)))
points = append(points, value.NewValue(int64(i), int64(1)))
}
values := map[string][]tsm1.Value{
values := map[string][]value.Value{
"cpu,host=A,server=01,foo=bar,tag=really-long#!~#float": points,
"mem,host=A,server=01,foo=bar,tag=really-long#!~#float": points,
}
entry := &tsm1.WriteWALEntry{
entry := &WriteWALEntry{
Values: values,
}
@ -109,7 +109,7 @@ func TestWALWriter_WriteMulti_LargeBatch(t *testing.T) {
fatal(t, "seek", err)
}
r := tsm1.NewWALSegmentReader(f)
r := NewWALSegmentReader(f)
if !r.Next() {
t.Fatalf("expected next, got false")
@ -120,7 +120,7 @@ func TestWALWriter_WriteMulti_LargeBatch(t *testing.T) {
fatal(t, "read entry", err)
}
e, ok := we.(*tsm1.WriteWALEntry)
e, ok := we.(*WriteWALEntry)
if !ok {
t.Fatalf("expected WriteWALEntry: got %#v", e)
}
@ -141,22 +141,22 @@ func TestWALWriter_WriteMulti_Multiple(t *testing.T) {
dir := MustTempDir()
defer os.RemoveAll(dir)
f := MustTempFile(dir)
w := tsm1.NewWALSegmentWriter(f)
w := NewWALSegmentWriter(f)
p1 := tsm1.NewValue(1, int64(1))
p2 := tsm1.NewValue(1, int64(2))
p1 := value.NewValue(1, int64(1))
p2 := value.NewValue(1, int64(2))
exp := []struct {
key string
values []tsm1.Value
values []value.Value
}{
{"cpu,host=A#!~#value", []tsm1.Value{p1}},
{"cpu,host=B#!~#value", []tsm1.Value{p2}},
{"cpu,host=A#!~#value", []value.Value{p1}},
{"cpu,host=B#!~#value", []value.Value{p2}},
}
for _, v := range exp {
entry := &tsm1.WriteWALEntry{
Values: map[string][]tsm1.Value{v.key: v.values},
entry := &WriteWALEntry{
Values: map[string][]value.Value{v.key: v.values},
}
if err := w.Write(mustMarshalEntry(entry)); err != nil {
@ -172,7 +172,7 @@ func TestWALWriter_WriteMulti_Multiple(t *testing.T) {
fatal(t, "seek", err)
}
r := tsm1.NewWALSegmentReader(f)
r := NewWALSegmentReader(f)
for _, ep := range exp {
if !r.Next() {
@ -184,7 +184,7 @@ func TestWALWriter_WriteMulti_Multiple(t *testing.T) {
fatal(t, "read entry", err)
}
e, ok := we.(*tsm1.WriteWALEntry)
e, ok := we.(*WriteWALEntry)
if !ok {
t.Fatalf("expected WriteWALEntry: got %#v", e)
}
@ -215,9 +215,9 @@ func TestWALWriter_WriteDelete_Single(t *testing.T) {
dir := MustTempDir()
defer os.RemoveAll(dir)
f := MustTempFile(dir)
w := tsm1.NewWALSegmentWriter(f)
w := NewWALSegmentWriter(f)
entry := &tsm1.DeleteWALEntry{
entry := &DeleteWALEntry{
Keys: [][]byte{[]byte("cpu")},
}
@ -233,7 +233,7 @@ func TestWALWriter_WriteDelete_Single(t *testing.T) {
fatal(t, "seek", err)
}
r := tsm1.NewWALSegmentReader(f)
r := NewWALSegmentReader(f)
if !r.Next() {
t.Fatalf("expected next, got false")
@ -244,7 +244,7 @@ func TestWALWriter_WriteDelete_Single(t *testing.T) {
fatal(t, "read entry", err)
}
e, ok := we.(*tsm1.DeleteWALEntry)
e, ok := we.(*DeleteWALEntry)
if !ok {
t.Fatalf("expected WriteWALEntry: got %#v", e)
}
@ -262,14 +262,14 @@ func TestWALWriter_WriteMultiDelete_Multiple(t *testing.T) {
dir := MustTempDir()
defer os.RemoveAll(dir)
f := MustTempFile(dir)
w := tsm1.NewWALSegmentWriter(f)
w := NewWALSegmentWriter(f)
p1 := tsm1.NewValue(1, true)
values := map[string][]tsm1.Value{
"cpu,host=A#!~#value": []tsm1.Value{p1},
p1 := value.NewValue(1, true)
values := map[string][]value.Value{
"cpu,host=A#!~#value": []value.Value{p1},
}
writeEntry := &tsm1.WriteWALEntry{
writeEntry := &WriteWALEntry{
Values: values,
}
@ -282,7 +282,7 @@ func TestWALWriter_WriteMultiDelete_Multiple(t *testing.T) {
}
// Write the delete entry
deleteEntry := &tsm1.DeleteWALEntry{
deleteEntry := &DeleteWALEntry{
Keys: [][]byte{[]byte("cpu,host=A#!~value")},
}
@ -299,7 +299,7 @@ func TestWALWriter_WriteMultiDelete_Multiple(t *testing.T) {
fatal(t, "seek", err)
}
r := tsm1.NewWALSegmentReader(f)
r := NewWALSegmentReader(f)
// Read the write points first
if !r.Next() {
@ -311,7 +311,7 @@ func TestWALWriter_WriteMultiDelete_Multiple(t *testing.T) {
fatal(t, "read entry", err)
}
e, ok := we.(*tsm1.WriteWALEntry)
e, ok := we.(*WriteWALEntry)
if !ok {
t.Fatalf("expected WriteWALEntry: got %#v", e)
}
@ -338,7 +338,7 @@ func TestWALWriter_WriteMultiDelete_Multiple(t *testing.T) {
fatal(t, "read entry", err)
}
de, ok := we.(*tsm1.DeleteWALEntry)
de, ok := we.(*DeleteWALEntry)
if !ok {
t.Fatalf("expected DeleteWALEntry: got %#v", e)
}
@ -356,17 +356,17 @@ func TestWALWriter_WriteMultiDeleteRange_Multiple(t *testing.T) {
dir := MustTempDir()
defer os.RemoveAll(dir)
f := MustTempFile(dir)
w := tsm1.NewWALSegmentWriter(f)
w := NewWALSegmentWriter(f)
p1 := tsm1.NewValue(1, 1.0)
p2 := tsm1.NewValue(2, 2.0)
p3 := tsm1.NewValue(3, 3.0)
p1 := value.NewValue(1, 1.0)
p2 := value.NewValue(2, 2.0)
p3 := value.NewValue(3, 3.0)
values := map[string][]tsm1.Value{
"cpu,host=A#!~#value": []tsm1.Value{p1, p2, p3},
values := map[string][]value.Value{
"cpu,host=A#!~#value": []value.Value{p1, p2, p3},
}
writeEntry := &tsm1.WriteWALEntry{
writeEntry := &WriteWALEntry{
Values: values,
}
@ -379,7 +379,7 @@ func TestWALWriter_WriteMultiDeleteRange_Multiple(t *testing.T) {
}
// Write the delete entry
deleteEntry := &tsm1.DeleteRangeWALEntry{
deleteEntry := &DeleteRangeWALEntry{
Keys: [][]byte{[]byte("cpu,host=A#!~value")},
Min: 2,
Max: 3,
@ -398,7 +398,7 @@ func TestWALWriter_WriteMultiDeleteRange_Multiple(t *testing.T) {
fatal(t, "seek", err)
}
r := tsm1.NewWALSegmentReader(f)
r := NewWALSegmentReader(f)
// Read the write points first
if !r.Next() {
@ -410,7 +410,7 @@ func TestWALWriter_WriteMultiDeleteRange_Multiple(t *testing.T) {
fatal(t, "read entry", err)
}
e, ok := we.(*tsm1.WriteWALEntry)
e, ok := we.(*WriteWALEntry)
if !ok {
t.Fatalf("expected WriteWALEntry: got %#v", e)
}
@ -437,7 +437,7 @@ func TestWALWriter_WriteMultiDeleteRange_Multiple(t *testing.T) {
fatal(t, "read entry", err)
}
de, ok := we.(*tsm1.DeleteRangeWALEntry)
de, ok := we.(*DeleteRangeWALEntry)
if !ok {
t.Fatalf("expected DeleteWALEntry: got %#v", e)
}
@ -464,7 +464,7 @@ func TestWAL_ClosedSegments(t *testing.T) {
dir := MustTempDir()
defer os.RemoveAll(dir)
w := tsm1.NewWAL(dir)
w := NewWAL(dir)
if err := w.Open(); err != nil {
t.Fatalf("error opening WAL: %v", err)
}
@ -478,9 +478,9 @@ func TestWAL_ClosedSegments(t *testing.T) {
t.Fatalf("close segment length mismatch: got %v, exp %v", got, exp)
}
if _, err := w.WriteMulti(map[string][]tsm1.Value{
"cpu,host=A#!~#value": []tsm1.Value{
tsm1.NewValue(1, 1.1),
if _, err := w.WriteMulti(map[string][]value.Value{
"cpu,host=A#!~#value": []value.Value{
value.NewValue(1, 1.1),
},
}); err != nil {
t.Fatalf("error writing points: %v", err)
@ -491,7 +491,7 @@ func TestWAL_ClosedSegments(t *testing.T) {
}
// Re-open the WAL
w = tsm1.NewWAL(dir)
w = NewWAL(dir)
defer w.Close()
if err := w.Open(); err != nil {
t.Fatalf("error opening WAL: %v", err)
@ -510,7 +510,7 @@ func TestWAL_Delete(t *testing.T) {
dir := MustTempDir()
defer os.RemoveAll(dir)
w := tsm1.NewWAL(dir)
w := NewWAL(dir)
if err := w.Open(); err != nil {
t.Fatalf("error opening WAL: %v", err)
}
@ -533,7 +533,7 @@ func TestWAL_Delete(t *testing.T) {
}
// Re-open the WAL
w = tsm1.NewWAL(dir)
w = NewWAL(dir)
defer w.Close()
if err := w.Open(); err != nil {
t.Fatalf("error opening WAL: %v", err)
@ -552,15 +552,15 @@ func TestWALWriter_Corrupt(t *testing.T) {
dir := MustTempDir()
defer os.RemoveAll(dir)
f := MustTempFile(dir)
w := tsm1.NewWALSegmentWriter(f)
w := NewWALSegmentWriter(f)
corruption := []byte{1, 4, 0, 0, 0}
p1 := tsm1.NewValue(1, 1.1)
values := map[string][]tsm1.Value{
"cpu,host=A#!~#float": []tsm1.Value{p1},
p1 := value.NewValue(1, 1.1)
values := map[string][]value.Value{
"cpu,host=A#!~#float": []value.Value{p1},
}
entry := &tsm1.WriteWALEntry{
entry := &WriteWALEntry{
Values: values,
}
if err := w.Write(mustMarshalEntry(entry)); err != nil {
@ -580,7 +580,7 @@ func TestWALWriter_Corrupt(t *testing.T) {
if _, err := f.Seek(0, io.SeekStart); err != nil {
fatal(t, "seek", err)
}
r := tsm1.NewWALSegmentReader(f)
r := NewWALSegmentReader(f)
// Try to decode two entries.
@ -611,15 +611,15 @@ func TestWALSegmentReader_Corrupt(t *testing.T) {
dir := MustTempDir()
defer os.RemoveAll(dir)
f := MustTempFile(dir)
w := tsm1.NewWALSegmentWriter(f)
w := NewWALSegmentWriter(f)
p4 := tsm1.NewValue(1, "string")
p4 := value.NewValue(1, "string")
values := map[string][]tsm1.Value{
"cpu,host=A#!~#string": []tsm1.Value{p4, p4},
values := map[string][]value.Value{
"cpu,host=A#!~#string": []value.Value{p4, p4},
}
entry := &tsm1.WriteWALEntry{
entry := &WriteWALEntry{
Values: values,
}
@ -642,7 +642,7 @@ func TestWALSegmentReader_Corrupt(t *testing.T) {
fatal(t, "seek", err)
}
r := tsm1.NewWALSegmentReader(f)
r := NewWALSegmentReader(f)
defer r.Close()
// Try to decode two entries.
@ -652,21 +652,21 @@ func TestWALSegmentReader_Corrupt(t *testing.T) {
}
func TestWriteWALSegment_UnmarshalBinary_WriteWALCorrupt(t *testing.T) {
p1 := tsm1.NewValue(1, 1.1)
p2 := tsm1.NewValue(1, int64(1))
p3 := tsm1.NewValue(1, true)
p4 := tsm1.NewValue(1, "string")
p5 := tsm1.NewValue(1, uint64(1))
p1 := value.NewValue(1, 1.1)
p2 := value.NewValue(1, int64(1))
p3 := value.NewValue(1, true)
p4 := value.NewValue(1, "string")
p5 := value.NewValue(1, uint64(1))
values := map[string][]tsm1.Value{
"cpu,host=A#!~#float": []tsm1.Value{p1, p1},
"cpu,host=A#!~#int": []tsm1.Value{p2, p2},
"cpu,host=A#!~#bool": []tsm1.Value{p3, p3},
"cpu,host=A#!~#string": []tsm1.Value{p4, p4},
"cpu,host=A#!~#unsigned": []tsm1.Value{p5, p5},
values := map[string][]value.Value{
"cpu,host=A#!~#float": []value.Value{p1, p1},
"cpu,host=A#!~#int": []value.Value{p2, p2},
"cpu,host=A#!~#bool": []value.Value{p3, p3},
"cpu,host=A#!~#string": []value.Value{p4, p4},
"cpu,host=A#!~#unsigned": []value.Value{p5, p5},
}
w := &tsm1.WriteWALEntry{
w := &WriteWALEntry{
Values: values,
}
@ -681,7 +681,7 @@ func TestWriteWALSegment_UnmarshalBinary_WriteWALCorrupt(t *testing.T) {
truncated := make([]byte, i)
copy(truncated, b[:i])
err := w.UnmarshalBinary(truncated)
if err != nil && err != tsm1.ErrWALCorrupt {
if err != nil && err != ErrWALCorrupt {
t.Fatalf("unexpected error: %v", err)
}
}
@ -715,13 +715,13 @@ func TestDeleteWALEntry_UnmarshalBinary(t *testing.T) {
}
for i, example := range examples {
w := &tsm1.DeleteWALEntry{Keys: slices.StringsToBytes(example.In...)}
w := &DeleteWALEntry{Keys: slices.StringsToBytes(example.In...)}
b, err := w.MarshalBinary()
if err != nil {
t.Fatalf("[example %d] unexpected error, got %v", i, err)
}
out := &tsm1.DeleteWALEntry{}
out := &DeleteWALEntry{}
if err := out.UnmarshalBinary(b); err != nil {
t.Fatalf("[example %d] %v", i, err)
}
@ -733,7 +733,7 @@ func TestDeleteWALEntry_UnmarshalBinary(t *testing.T) {
}
func TestWriteWALSegment_UnmarshalBinary_DeleteWALCorrupt(t *testing.T) {
w := &tsm1.DeleteWALEntry{
w := &DeleteWALEntry{
Keys: [][]byte{[]byte("foo"), []byte("bar")},
}
@ -748,14 +748,14 @@ func TestWriteWALSegment_UnmarshalBinary_DeleteWALCorrupt(t *testing.T) {
truncated := make([]byte, i)
copy(truncated, b[:i])
err := w.UnmarshalBinary(truncated)
if err != nil && err != tsm1.ErrWALCorrupt {
if err != nil && err != ErrWALCorrupt {
t.Fatalf("unexpected error: %v", err)
}
}
}
func TestWriteWALSegment_UnmarshalBinary_DeleteRangeWALCorrupt(t *testing.T) {
w := &tsm1.DeleteRangeWALEntry{
w := &DeleteRangeWALEntry{
Keys: [][]byte{[]byte("foo"), []byte("bar")},
Min: 1,
Max: 2,
@ -772,26 +772,26 @@ func TestWriteWALSegment_UnmarshalBinary_DeleteRangeWALCorrupt(t *testing.T) {
truncated := make([]byte, i)
copy(truncated, b[:i])
err := w.UnmarshalBinary(truncated)
if err != nil && err != tsm1.ErrWALCorrupt {
if err != nil && err != ErrWALCorrupt {
t.Fatalf("unexpected error: %v", err)
}
}
}
func BenchmarkWALSegmentWriter(b *testing.B) {
points := map[string][]tsm1.Value{}
points := map[string][]value.Value{}
for i := 0; i < 5000; i++ {
k := "cpu,host=A#!~#value"
points[k] = append(points[k], tsm1.NewValue(int64(i), 1.1))
points[k] = append(points[k], value.NewValue(int64(i), 1.1))
}
dir := MustTempDir()
defer os.RemoveAll(dir)
f := MustTempFile(dir)
w := tsm1.NewWALSegmentWriter(f)
w := NewWALSegmentWriter(f)
write := &tsm1.WriteWALEntry{
write := &WriteWALEntry{
Values: points,
}
@ -804,19 +804,19 @@ func BenchmarkWALSegmentWriter(b *testing.B) {
}
func BenchmarkWALSegmentReader(b *testing.B) {
points := map[string][]tsm1.Value{}
points := map[string][]value.Value{}
for i := 0; i < 5000; i++ {
k := "cpu,host=A#!~#value"
points[k] = append(points[k], tsm1.NewValue(int64(i), 1.1))
points[k] = append(points[k], value.NewValue(int64(i), 1.1))
}
dir := MustTempDir()
defer os.RemoveAll(dir)
f := MustTempFile(dir)
w := tsm1.NewWALSegmentWriter(f)
w := NewWALSegmentWriter(f)
write := &tsm1.WriteWALEntry{
write := &WriteWALEntry{
Values: points,
}
@ -826,7 +826,7 @@ func BenchmarkWALSegmentReader(b *testing.B) {
}
}
r := tsm1.NewWALSegmentReader(f)
r := NewWALSegmentReader(f)
b.ResetTimer()
for i := 0; i < b.N; i++ {
@ -852,7 +852,7 @@ func MustReadFileSize(f *os.File) int64 {
return stat.Size()
}
func mustMarshalEntry(entry tsm1.WALEntry) (tsm1.WalEntryType, []byte) {
func mustMarshalEntry(entry WALEntry) (WalEntryType, []byte) {
bytes := make([]byte, 1024<<2)
b, err := entry.Encode(bytes)

View File

@ -83,12 +83,12 @@ func (c *floatArrayAscendingCursor) Next() *tsdb.FloatArray {
tkey := tvals.Timestamps[c.tsm.pos]
if ckey == tkey {
c.res.Timestamps[pos] = ckey
c.res.Values[pos] = cvals[c.cache.pos].(FloatValue).value
c.res.Values[pos] = cvals[c.cache.pos].(FloatValue).RawValue()
c.cache.pos++
c.tsm.pos++
} else if ckey < tkey {
c.res.Timestamps[pos] = ckey
c.res.Values[pos] = cvals[c.cache.pos].(FloatValue).value
c.res.Values[pos] = cvals[c.cache.pos].(FloatValue).RawValue()
c.cache.pos++
} else {
c.res.Timestamps[pos] = tkey
@ -126,7 +126,7 @@ func (c *floatArrayAscendingCursor) Next() *tsdb.FloatArray {
// TSM was exhausted
for pos < len(c.res.Timestamps) && c.cache.pos < len(cvals) {
c.res.Timestamps[pos] = cvals[c.cache.pos].UnixNano()
c.res.Values[pos] = cvals[c.cache.pos].(FloatValue).value
c.res.Values[pos] = cvals[c.cache.pos].(FloatValue).RawValue()
pos++
c.cache.pos++
}
@ -247,12 +247,12 @@ func (c *floatArrayDescendingCursor) Next() *tsdb.FloatArray {
tkey := tvals.Timestamps[c.tsm.pos]
if ckey == tkey {
c.res.Timestamps[pos] = ckey
c.res.Values[pos] = cvals[c.cache.pos].(FloatValue).value
c.res.Values[pos] = cvals[c.cache.pos].(FloatValue).RawValue()
c.cache.pos--
c.tsm.pos--
} else if ckey > tkey {
c.res.Timestamps[pos] = ckey
c.res.Values[pos] = cvals[c.cache.pos].(FloatValue).value
c.res.Values[pos] = cvals[c.cache.pos].(FloatValue).RawValue()
c.cache.pos--
} else {
c.res.Timestamps[pos] = tkey
@ -285,7 +285,7 @@ func (c *floatArrayDescendingCursor) Next() *tsdb.FloatArray {
// TSM was exhausted
for pos < len(c.res.Timestamps) && c.cache.pos >= 0 {
c.res.Timestamps[pos] = cvals[c.cache.pos].UnixNano()
c.res.Values[pos] = cvals[c.cache.pos].(FloatValue).value
c.res.Values[pos] = cvals[c.cache.pos].(FloatValue).RawValue()
pos++
c.cache.pos--
}
@ -391,12 +391,12 @@ func (c *integerArrayAscendingCursor) Next() *tsdb.IntegerArray {
tkey := tvals.Timestamps[c.tsm.pos]
if ckey == tkey {
c.res.Timestamps[pos] = ckey
c.res.Values[pos] = cvals[c.cache.pos].(IntegerValue).value
c.res.Values[pos] = cvals[c.cache.pos].(IntegerValue).RawValue()
c.cache.pos++
c.tsm.pos++
} else if ckey < tkey {
c.res.Timestamps[pos] = ckey
c.res.Values[pos] = cvals[c.cache.pos].(IntegerValue).value
c.res.Values[pos] = cvals[c.cache.pos].(IntegerValue).RawValue()
c.cache.pos++
} else {
c.res.Timestamps[pos] = tkey
@ -434,7 +434,7 @@ func (c *integerArrayAscendingCursor) Next() *tsdb.IntegerArray {
// TSM was exhausted
for pos < len(c.res.Timestamps) && c.cache.pos < len(cvals) {
c.res.Timestamps[pos] = cvals[c.cache.pos].UnixNano()
c.res.Values[pos] = cvals[c.cache.pos].(IntegerValue).value
c.res.Values[pos] = cvals[c.cache.pos].(IntegerValue).RawValue()
pos++
c.cache.pos++
}
@ -555,12 +555,12 @@ func (c *integerArrayDescendingCursor) Next() *tsdb.IntegerArray {
tkey := tvals.Timestamps[c.tsm.pos]
if ckey == tkey {
c.res.Timestamps[pos] = ckey
c.res.Values[pos] = cvals[c.cache.pos].(IntegerValue).value
c.res.Values[pos] = cvals[c.cache.pos].(IntegerValue).RawValue()
c.cache.pos--
c.tsm.pos--
} else if ckey > tkey {
c.res.Timestamps[pos] = ckey
c.res.Values[pos] = cvals[c.cache.pos].(IntegerValue).value
c.res.Values[pos] = cvals[c.cache.pos].(IntegerValue).RawValue()
c.cache.pos--
} else {
c.res.Timestamps[pos] = tkey
@ -593,7 +593,7 @@ func (c *integerArrayDescendingCursor) Next() *tsdb.IntegerArray {
// TSM was exhausted
for pos < len(c.res.Timestamps) && c.cache.pos >= 0 {
c.res.Timestamps[pos] = cvals[c.cache.pos].UnixNano()
c.res.Values[pos] = cvals[c.cache.pos].(IntegerValue).value
c.res.Values[pos] = cvals[c.cache.pos].(IntegerValue).RawValue()
pos++
c.cache.pos--
}
@ -699,12 +699,12 @@ func (c *unsignedArrayAscendingCursor) Next() *tsdb.UnsignedArray {
tkey := tvals.Timestamps[c.tsm.pos]
if ckey == tkey {
c.res.Timestamps[pos] = ckey
c.res.Values[pos] = cvals[c.cache.pos].(UnsignedValue).value
c.res.Values[pos] = cvals[c.cache.pos].(UnsignedValue).RawValue()
c.cache.pos++
c.tsm.pos++
} else if ckey < tkey {
c.res.Timestamps[pos] = ckey
c.res.Values[pos] = cvals[c.cache.pos].(UnsignedValue).value
c.res.Values[pos] = cvals[c.cache.pos].(UnsignedValue).RawValue()
c.cache.pos++
} else {
c.res.Timestamps[pos] = tkey
@ -742,7 +742,7 @@ func (c *unsignedArrayAscendingCursor) Next() *tsdb.UnsignedArray {
// TSM was exhausted
for pos < len(c.res.Timestamps) && c.cache.pos < len(cvals) {
c.res.Timestamps[pos] = cvals[c.cache.pos].UnixNano()
c.res.Values[pos] = cvals[c.cache.pos].(UnsignedValue).value
c.res.Values[pos] = cvals[c.cache.pos].(UnsignedValue).RawValue()
pos++
c.cache.pos++
}
@ -863,12 +863,12 @@ func (c *unsignedArrayDescendingCursor) Next() *tsdb.UnsignedArray {
tkey := tvals.Timestamps[c.tsm.pos]
if ckey == tkey {
c.res.Timestamps[pos] = ckey
c.res.Values[pos] = cvals[c.cache.pos].(UnsignedValue).value
c.res.Values[pos] = cvals[c.cache.pos].(UnsignedValue).RawValue()
c.cache.pos--
c.tsm.pos--
} else if ckey > tkey {
c.res.Timestamps[pos] = ckey
c.res.Values[pos] = cvals[c.cache.pos].(UnsignedValue).value
c.res.Values[pos] = cvals[c.cache.pos].(UnsignedValue).RawValue()
c.cache.pos--
} else {
c.res.Timestamps[pos] = tkey
@ -901,7 +901,7 @@ func (c *unsignedArrayDescendingCursor) Next() *tsdb.UnsignedArray {
// TSM was exhausted
for pos < len(c.res.Timestamps) && c.cache.pos >= 0 {
c.res.Timestamps[pos] = cvals[c.cache.pos].UnixNano()
c.res.Values[pos] = cvals[c.cache.pos].(UnsignedValue).value
c.res.Values[pos] = cvals[c.cache.pos].(UnsignedValue).RawValue()
pos++
c.cache.pos--
}
@ -1007,12 +1007,12 @@ func (c *stringArrayAscendingCursor) Next() *tsdb.StringArray {
tkey := tvals.Timestamps[c.tsm.pos]
if ckey == tkey {
c.res.Timestamps[pos] = ckey
c.res.Values[pos] = cvals[c.cache.pos].(StringValue).value
c.res.Values[pos] = cvals[c.cache.pos].(StringValue).RawValue()
c.cache.pos++
c.tsm.pos++
} else if ckey < tkey {
c.res.Timestamps[pos] = ckey
c.res.Values[pos] = cvals[c.cache.pos].(StringValue).value
c.res.Values[pos] = cvals[c.cache.pos].(StringValue).RawValue()
c.cache.pos++
} else {
c.res.Timestamps[pos] = tkey
@ -1050,7 +1050,7 @@ func (c *stringArrayAscendingCursor) Next() *tsdb.StringArray {
// TSM was exhausted
for pos < len(c.res.Timestamps) && c.cache.pos < len(cvals) {
c.res.Timestamps[pos] = cvals[c.cache.pos].UnixNano()
c.res.Values[pos] = cvals[c.cache.pos].(StringValue).value
c.res.Values[pos] = cvals[c.cache.pos].(StringValue).RawValue()
pos++
c.cache.pos++
}
@ -1173,12 +1173,12 @@ func (c *stringArrayDescendingCursor) Next() *tsdb.StringArray {
tkey := tvals.Timestamps[c.tsm.pos]
if ckey == tkey {
c.res.Timestamps[pos] = ckey
c.res.Values[pos] = cvals[c.cache.pos].(StringValue).value
c.res.Values[pos] = cvals[c.cache.pos].(StringValue).RawValue()
c.cache.pos--
c.tsm.pos--
} else if ckey > tkey {
c.res.Timestamps[pos] = ckey
c.res.Values[pos] = cvals[c.cache.pos].(StringValue).value
c.res.Values[pos] = cvals[c.cache.pos].(StringValue).RawValue()
c.cache.pos--
} else {
c.res.Timestamps[pos] = tkey
@ -1211,7 +1211,7 @@ func (c *stringArrayDescendingCursor) Next() *tsdb.StringArray {
// TSM was exhausted
for pos < len(c.res.Timestamps) && c.cache.pos >= 0 {
c.res.Timestamps[pos] = cvals[c.cache.pos].UnixNano()
c.res.Values[pos] = cvals[c.cache.pos].(StringValue).value
c.res.Values[pos] = cvals[c.cache.pos].(StringValue).RawValue()
pos++
c.cache.pos--
}
@ -1319,12 +1319,12 @@ func (c *booleanArrayAscendingCursor) Next() *tsdb.BooleanArray {
tkey := tvals.Timestamps[c.tsm.pos]
if ckey == tkey {
c.res.Timestamps[pos] = ckey
c.res.Values[pos] = cvals[c.cache.pos].(BooleanValue).value
c.res.Values[pos] = cvals[c.cache.pos].(BooleanValue).RawValue()
c.cache.pos++
c.tsm.pos++
} else if ckey < tkey {
c.res.Timestamps[pos] = ckey
c.res.Values[pos] = cvals[c.cache.pos].(BooleanValue).value
c.res.Values[pos] = cvals[c.cache.pos].(BooleanValue).RawValue()
c.cache.pos++
} else {
c.res.Timestamps[pos] = tkey
@ -1362,7 +1362,7 @@ func (c *booleanArrayAscendingCursor) Next() *tsdb.BooleanArray {
// TSM was exhausted
for pos < len(c.res.Timestamps) && c.cache.pos < len(cvals) {
c.res.Timestamps[pos] = cvals[c.cache.pos].UnixNano()
c.res.Values[pos] = cvals[c.cache.pos].(BooleanValue).value
c.res.Values[pos] = cvals[c.cache.pos].(BooleanValue).RawValue()
pos++
c.cache.pos++
}
@ -1483,12 +1483,12 @@ func (c *booleanArrayDescendingCursor) Next() *tsdb.BooleanArray {
tkey := tvals.Timestamps[c.tsm.pos]
if ckey == tkey {
c.res.Timestamps[pos] = ckey
c.res.Values[pos] = cvals[c.cache.pos].(BooleanValue).value
c.res.Values[pos] = cvals[c.cache.pos].(BooleanValue).RawValue()
c.cache.pos--
c.tsm.pos--
} else if ckey > tkey {
c.res.Timestamps[pos] = ckey
c.res.Values[pos] = cvals[c.cache.pos].(BooleanValue).value
c.res.Values[pos] = cvals[c.cache.pos].(BooleanValue).RawValue()
c.cache.pos--
} else {
c.res.Timestamps[pos] = tkey
@ -1521,7 +1521,7 @@ func (c *booleanArrayDescendingCursor) Next() *tsdb.BooleanArray {
// TSM was exhausted
for pos < len(c.res.Timestamps) && c.cache.pos >= 0 {
c.res.Timestamps[pos] = cvals[c.cache.pos].UnixNano()
c.res.Values[pos] = cvals[c.cache.pos].(BooleanValue).value
c.res.Values[pos] = cvals[c.cache.pos].(BooleanValue).RawValue()
pos++
c.cache.pos--
}

View File

@ -54,7 +54,7 @@ c.end = end
})
}
func (c *{{$type}}) Err() error { return nil }
func (c *{{$type}}) Err() error { return nil }
// close closes the cursor and any dependent cursors.
func (c *{{$type}}) Close() {
@ -82,12 +82,12 @@ func (c *{{$type}}) Next() {{$arrayType}} {
tkey := tvals.Timestamps[c.tsm.pos]
if ckey == tkey {
c.res.Timestamps[pos] = ckey
c.res.Values[pos] = cvals[c.cache.pos].({{.Name}}Value).value
c.res.Values[pos] = cvals[c.cache.pos].({{.Name}}Value).RawValue()
c.cache.pos++
c.tsm.pos++
} else if ckey < tkey {
c.res.Timestamps[pos] = ckey
c.res.Values[pos] = cvals[c.cache.pos].({{.Name}}Value).value
c.res.Values[pos] = cvals[c.cache.pos].({{.Name}}Value).RawValue()
c.cache.pos++
} else {
c.res.Timestamps[pos] = tkey
@ -125,7 +125,7 @@ func (c *{{$type}}) Next() {{$arrayType}} {
// TSM was exhausted
for pos < len(c.res.Timestamps) && c.cache.pos < len(cvals) {
c.res.Timestamps[pos] = cvals[c.cache.pos].UnixNano()
c.res.Values[pos] = cvals[c.cache.pos].({{.Name}}Value).value
c.res.Values[pos] = cvals[c.cache.pos].({{.Name}}Value).RawValue()
pos++
c.cache.pos++
}
@ -228,7 +228,7 @@ func (c *{{$type}}) reset(seek, end int64, cacheValues Values, tsmKeyCursor *Key
}
}
func (c *{{$type}}) Err() error { return nil }
func (c *{{$type}}) Err() error { return nil }
func (c *{{$type}}) Close() {
if c.tsm.keyCursor != nil {
@ -254,12 +254,12 @@ func (c *{{$type}}) Next() {{$arrayType}} {
tkey := tvals.Timestamps[c.tsm.pos]
if ckey == tkey {
c.res.Timestamps[pos] = ckey
c.res.Values[pos] = cvals[c.cache.pos].({{.Name}}Value).value
c.res.Values[pos] = cvals[c.cache.pos].({{.Name}}Value).RawValue()
c.cache.pos--
c.tsm.pos--
} else if ckey > tkey {
c.res.Timestamps[pos] = ckey
c.res.Values[pos] = cvals[c.cache.pos].({{.Name}}Value).value
c.res.Values[pos] = cvals[c.cache.pos].({{.Name}}Value).RawValue()
c.cache.pos--
} else {
c.res.Timestamps[pos] = tkey
@ -292,7 +292,7 @@ func (c *{{$type}}) Next() {{$arrayType}} {
// TSM was exhausted
for pos < len(c.res.Timestamps) && c.cache.pos >= 0 {
c.res.Timestamps[pos] = cvals[c.cache.pos].UnixNano()
c.res.Values[pos] = cvals[c.cache.pos].({{.Name}}Value).value
c.res.Values[pos] = cvals[c.cache.pos].({{.Name}}Value).RawValue()
pos++
c.cache.pos--
}

View File

@ -0,0 +1,42 @@
[
{
"Name":"Float",
"name":"float",
"Type":"float64",
"ValueType":"FloatValue",
"Nil":"0",
"Size":"8"
},
{
"Name":"Integer",
"name":"integer",
"Type":"int64",
"ValueType":"IntegerValue",
"Nil":"0",
"Size":"8"
},
{
"Name":"Unsigned",
"name":"unsigned",
"Type":"uint64",
"ValueType":"UnsignedValue",
"Nil":"0",
"Size":"8"
},
{
"Name":"String",
"name":"string",
"Type":"string",
"ValueType":"StringValue",
"Nil":"\"\"",
"Size":"0"
},
{
"Name":"Boolean",
"name":"boolean",
"Type":"bool",
"ValueType":"BooleanValue",
"Nil":"false",
"Size":"1"
}
]

View File

@ -9,6 +9,7 @@ import (
"time"
"github.com/influxdata/influxdb/models"
"github.com/influxdata/influxdb/storage/wal"
"github.com/influxdata/influxdb/tsdb"
"github.com/influxdata/influxql"
"github.com/prometheus/client_golang/prometheus"
@ -646,7 +647,7 @@ func NewCacheLoader(files []string) *CacheLoader {
// continues with the next segment file.
func (cl *CacheLoader) Load(cache *Cache) error {
var r *WALSegmentReader
var r *wal.WALSegmentReader
for _, fn := range cl.files {
if err := func() error {
f, err := os.OpenFile(fn, os.O_CREATE|os.O_RDWR, 0666)
@ -668,7 +669,7 @@ func (cl *CacheLoader) Load(cache *Cache) error {
}
if r == nil {
r = NewWALSegmentReader(f)
r = wal.NewWALSegmentReader(f)
defer r.Close()
} else {
r.Reset(f)
@ -686,13 +687,13 @@ func (cl *CacheLoader) Load(cache *Cache) error {
}
switch t := entry.(type) {
case *WriteWALEntry:
case *wal.WriteWALEntry:
if err := cache.WriteMulti(t.Values); err != nil {
return err
}
case *DeleteRangeWALEntry:
case *wal.DeleteRangeWALEntry:
cache.DeleteRange(t.Keys, t.Min, t.Max)
case *DeleteWALEntry:
case *wal.DeleteWALEntry:
cache.Delete(t.Keys)
}
}

View File

@ -15,6 +15,8 @@ import (
"sync/atomic"
"testing"
"github.com/influxdata/influxdb/storage/wal"
"github.com/golang/snappy"
)
@ -604,7 +606,7 @@ func TestCacheLoader_LoadSingle(t *testing.T) {
dir := mustTempDir()
defer os.RemoveAll(dir)
f := mustTempFile(dir)
w := NewWALSegmentWriter(f)
w := wal.NewWALSegmentWriter(f)
p1 := NewValue(1, 1.1)
p2 := NewValue(1, int64(1))
@ -616,7 +618,7 @@ func TestCacheLoader_LoadSingle(t *testing.T) {
"baz": {p3},
}
entry := &WriteWALEntry{
entry := &wal.WriteWALEntry{
Values: values,
}
@ -676,7 +678,7 @@ func TestCacheLoader_LoadDouble(t *testing.T) {
dir := mustTempDir()
defer os.RemoveAll(dir)
f1, f2 := mustTempFile(dir), mustTempFile(dir)
w1, w2 := NewWALSegmentWriter(f1), NewWALSegmentWriter(f2)
w1, w2 := wal.NewWALSegmentWriter(f1), wal.NewWALSegmentWriter(f2)
p1 := NewValue(1, 1.1)
p2 := NewValue(1, int64(1))
@ -685,8 +687,8 @@ func TestCacheLoader_LoadDouble(t *testing.T) {
// Write first and second segment.
segmentWrite := func(w *WALSegmentWriter, values map[string][]Value) {
entry := &WriteWALEntry{
segmentWrite := func(w *wal.WALSegmentWriter, values map[string][]Value) {
entry := &wal.WriteWALEntry{
Values: values,
}
if err := w1.Write(mustMarshalEntry(entry)); err != nil {
@ -741,7 +743,7 @@ func TestCacheLoader_LoadDeleted(t *testing.T) {
dir := mustTempDir()
defer os.RemoveAll(dir)
f := mustTempFile(dir)
w := NewWALSegmentWriter(f)
w := wal.NewWALSegmentWriter(f)
p1 := NewValue(1, 1.0)
p2 := NewValue(2, 2.0)
@ -751,7 +753,7 @@ func TestCacheLoader_LoadDeleted(t *testing.T) {
"foo": {p1, p2, p3},
}
entry := &WriteWALEntry{
entry := &wal.WriteWALEntry{
Values: values,
}
@ -763,7 +765,7 @@ func TestCacheLoader_LoadDeleted(t *testing.T) {
t.Fatalf("flush error: %v", err)
}
dentry := &DeleteRangeWALEntry{
dentry := &wal.DeleteRangeWALEntry{
Keys: [][]byte{[]byte("foo")},
Min: 2,
Max: 3,
@ -857,7 +859,7 @@ func mustTempFile(dir string) *os.File {
return f
}
func mustMarshalEntry(entry WALEntry) (WalEntryType, []byte) {
func mustMarshalEntry(entry wal.WALEntry) (wal.WalEntryType, []byte) {
bytes := make([]byte, 1024<<2)
b, err := entry.Encode(bytes)

View File

@ -212,8 +212,8 @@ type FloatValues []FloatValue
func NewFloatArrayFromValues(v FloatValues) *tsdb.FloatArray {
a := tsdb.NewFloatArrayLen(len(v))
for i, val := range v {
a.Timestamps[i] = val.unixnano
a.Values[i] = val.value
a.Timestamps[i] = val.UnixNano()
a.Values[i] = val.RawValue()
}
return a
}
@ -442,8 +442,8 @@ func encodeFloatValuesBlock(buf []byte, values []FloatValue) ([]byte, error) {
var b []byte
err := func() error {
for _, v := range values {
tsenc.Write(v.unixnano)
venc.Write(v.value)
tsenc.Write(v.UnixNano())
venc.Write(v.RawValue())
}
venc.Flush()
@ -482,8 +482,8 @@ type IntegerValues []IntegerValue
func NewIntegerArrayFromValues(v IntegerValues) *tsdb.IntegerArray {
a := tsdb.NewIntegerArrayLen(len(v))
for i, val := range v {
a.Timestamps[i] = val.unixnano
a.Values[i] = val.value
a.Timestamps[i] = val.UnixNano()
a.Values[i] = val.RawValue()
}
return a
}
@ -712,8 +712,8 @@ func encodeIntegerValuesBlock(buf []byte, values []IntegerValue) ([]byte, error)
var b []byte
err := func() error {
for _, v := range values {
tsenc.Write(v.unixnano)
venc.Write(v.value)
tsenc.Write(v.UnixNano())
venc.Write(v.RawValue())
}
venc.Flush()
@ -752,8 +752,8 @@ type UnsignedValues []UnsignedValue
func NewUnsignedArrayFromValues(v UnsignedValues) *tsdb.UnsignedArray {
a := tsdb.NewUnsignedArrayLen(len(v))
for i, val := range v {
a.Timestamps[i] = val.unixnano
a.Values[i] = val.value
a.Timestamps[i] = val.UnixNano()
a.Values[i] = val.RawValue()
}
return a
}
@ -982,8 +982,8 @@ func encodeUnsignedValuesBlock(buf []byte, values []UnsignedValue) ([]byte, erro
var b []byte
err := func() error {
for _, v := range values {
tsenc.Write(v.unixnano)
venc.Write(int64(v.value))
tsenc.Write(v.UnixNano())
venc.Write(int64(v.RawValue()))
}
venc.Flush()
@ -1022,8 +1022,8 @@ type StringValues []StringValue
func NewStringArrayFromValues(v StringValues) *tsdb.StringArray {
a := tsdb.NewStringArrayLen(len(v))
for i, val := range v {
a.Timestamps[i] = val.unixnano
a.Values[i] = val.value
a.Timestamps[i] = val.UnixNano()
a.Values[i] = val.RawValue()
}
return a
}
@ -1252,8 +1252,8 @@ func encodeStringValuesBlock(buf []byte, values []StringValue) ([]byte, error) {
var b []byte
err := func() error {
for _, v := range values {
tsenc.Write(v.unixnano)
venc.Write(v.value)
tsenc.Write(v.UnixNano())
venc.Write(v.RawValue())
}
venc.Flush()
@ -1292,8 +1292,8 @@ type BooleanValues []BooleanValue
func NewBooleanArrayFromValues(v BooleanValues) *tsdb.BooleanArray {
a := tsdb.NewBooleanArrayLen(len(v))
for i, val := range v {
a.Timestamps[i] = val.unixnano
a.Values[i] = val.value
a.Timestamps[i] = val.UnixNano()
a.Values[i] = val.RawValue()
}
return a
}
@ -1522,8 +1522,8 @@ func encodeBooleanValuesBlock(buf []byte, values []BooleanValue) ([]byte, error)
var b []byte
err := func() error {
for _, v := range values {
tsenc.Write(v.unixnano)
venc.Write(v.value)
tsenc.Write(v.UnixNano())
venc.Write(v.RawValue())
}
venc.Flush()

View File

@ -15,8 +15,8 @@ type {{.Name}}Values []{{.Name}}Value
func New{{.Name}}ArrayFromValues(v {{.Name}}Values) *tsdb.{{.Name}}Array {
a := tsdb.New{{.Name}}ArrayLen(len(v))
for i, val := range v {
a.Timestamps[i] = val.unixnano
a.Values[i] = val.value
a.Timestamps[i] = val.UnixNano()
a.Values[i] = val.RawValue()
}
return a
}
@ -247,8 +247,8 @@ func encode{{ .Name }}ValuesBlock(buf []byte, values []{{.Name}}Value) ([]byte,
var b []byte
err := func() error {
for _, v := range values {
tsenc.Write(v.unixnano)
venc.Write({{if .CastType}}{{.CastType}}(v.value){{else}}v.value{{end}})
tsenc.Write(v.UnixNano())
venc.Write({{if .CastType}}{{.CastType}}(v.RawValue()){{else}}v.RawValue(){{end}})
}
venc.Flush()

View File

@ -14,7 +14,7 @@ func makeIntegerValues(count int, min, max int64) IntegerValues {
inc := (max - min) / int64(count)
for i := 0; i < count; i++ {
vals[i].unixnano = ts
vals[i] = NewRawIntegerValue(ts, 0)
ts += inc
}
@ -24,7 +24,7 @@ func makeIntegerValues(count int, min, max int64) IntegerValues {
func makeIntegerValuesFromSlice(t []int64) IntegerValues {
iv := make(IntegerValues, len(t))
for i, v := range t {
iv[i].unixnano = v
iv[i] = NewRawIntegerValue(v, 0)
}
return iv
}
@ -91,7 +91,7 @@ func TestIntegerValues_Exclude(t *testing.T) {
vals = vals.Exclude(tc.min, tc.max)
var got []int64
for _, v := range vals {
got = append(got, v.unixnano)
got = append(got, v.UnixNano())
}
opt := cmp.AllowUnexported(IntegerValue{})
if !cmp.Equal(tc.exp, got, opt) {
@ -122,7 +122,7 @@ func TestIntegerValues_Include(t *testing.T) {
vals = vals.Include(tc.min, tc.max)
var got []int64
for _, v := range vals {
got = append(got, v.unixnano)
got = append(got, v.UnixNano())
}
opt := cmp.AllowUnexported(IntegerValue{})
if !cmp.Equal(tc.exp, got, opt) {

View File

@ -4,10 +4,9 @@ import (
"encoding/binary"
"fmt"
"runtime"
"time"
"github.com/influxdata/influxdb/pkg/pool"
"github.com/influxdata/influxdb/tsdb"
"github.com/influxdata/influxdb/tsdb/tsm1/value"
"github.com/influxdata/influxql"
)
@ -32,6 +31,55 @@ const (
encodedBlockHeaderSize = 1
)
//
// These aliases and functions forward to the value package so that other packages can depend
// on them without pulling in tsm1. Specifically, this is due to moving the WAL code out of
// this package and into the storage package. When we have a better value implementation across
// the board, we should be able to remove these.
//
type (
Value = value.Value
IntegerValue = value.IntegerValue
UnsignedValue = value.UnsignedValue
FloatValue = value.FloatValue
BooleanValue = value.BooleanValue
StringValue = value.StringValue
)
func NewValue(t int64, v interface{}) Value { return value.NewValue(t, v) }
// NewRawIntegerValue returns a new integer value.
func NewRawIntegerValue(t int64, v int64) IntegerValue { return value.NewRawIntegerValue(t, v) }
// NewRawUnsignedValue returns a new unsigned integer value.
func NewRawUnsignedValue(t int64, v uint64) UnsignedValue { return value.NewRawUnsignedValue(t, v) }
// NewRawFloatValue returns a new float value.
func NewRawFloatValue(t int64, v float64) FloatValue { return value.NewRawFloatValue(t, v) }
// NewRawBooleanValue returns a new boolean value.
func NewRawBooleanValue(t int64, v bool) BooleanValue { return value.NewRawBooleanValue(t, v) }
// NewRawStringValue returns a new string value.
func NewRawStringValue(t int64, v string) StringValue { return value.NewRawStringValue(t, v) }
// NewIntegerValue returns a new integer value.
func NewIntegerValue(t int64, v int64) Value { return value.NewIntegerValue(t, v) }
// NewUnsignedValue returns a new unsigned integer value.
func NewUnsignedValue(t int64, v uint64) Value { return value.NewUnsignedValue(t, v) }
// NewFloatValue returns a new float value.
func NewFloatValue(t int64, v float64) Value { return value.NewFloatValue(t, v) }
// NewBooleanValue returns a new boolean value.
func NewBooleanValue(t int64, v bool) Value { return value.NewBooleanValue(t, v) }
// NewStringValue returns a new string value.
func NewStringValue(t int64, v string) Value { return value.NewStringValue(t, v) }
func init() {
// Prime the pools with one encoder/decoder for each available CPU.
vals := make([]interface{}, 0, runtime.NumCPU())
@ -93,89 +141,6 @@ var (
})
)
// Value represents a TSM-encoded value.
type Value interface {
// UnixNano returns the timestamp of the value in nanoseconds since unix epoch.
UnixNano() int64
// Value returns the underlying value.
Value() interface{}
// Size returns the number of bytes necessary to represent the value and its timestamp.
Size() int
// String returns the string representation of the value and its timestamp.
String() string
// internalOnly is unexported to ensure implementations of Value
// can only originate in this package.
internalOnly()
}
// NewValue returns a new Value with the underlying type dependent on value.
func NewValue(t int64, value interface{}) Value {
switch v := value.(type) {
case int64:
return IntegerValue{unixnano: t, value: v}
case uint64:
return UnsignedValue{unixnano: t, value: v}
case float64:
return FloatValue{unixnano: t, value: v}
case bool:
return BooleanValue{unixnano: t, value: v}
case string:
return StringValue{unixnano: t, value: v}
}
return EmptyValue{}
}
// NewIntegerValue returns a new integer value.
func NewIntegerValue(t int64, v int64) Value {
return IntegerValue{unixnano: t, value: v}
}
// NewUnsignedValue returns a new unsigned integer value.
func NewUnsignedValue(t int64, v uint64) Value {
return UnsignedValue{unixnano: t, value: v}
}
// NewFloatValue returns a new float value.
func NewFloatValue(t int64, v float64) Value {
return FloatValue{unixnano: t, value: v}
}
// NewBooleanValue returns a new boolean value.
func NewBooleanValue(t int64, v bool) Value {
return BooleanValue{unixnano: t, value: v}
}
// NewStringValue returns a new string value.
func NewStringValue(t int64, v string) Value {
return StringValue{unixnano: t, value: v}
}
// EmptyValue is used when there is no appropriate other value.
type EmptyValue struct{}
// UnixNano returns tsdb.EOF.
func (e EmptyValue) UnixNano() int64 { return tsdb.EOF }
// Value returns nil.
func (e EmptyValue) Value() interface{} { return nil }
// Size returns 0.
func (e EmptyValue) Size() int { return 0 }
// String returns the empty string.
func (e EmptyValue) String() string { return "" }
func (EmptyValue) internalOnly() {}
func (StringValue) internalOnly() {}
func (IntegerValue) internalOnly() {}
func (UnsignedValue) internalOnly() {}
func (BooleanValue) internalOnly() {}
func (FloatValue) internalOnly() {}
// Encode converts the values to a byte slice. If there are no values,
// this function panics.
func (a Values) Encode(buf []byte) ([]byte, error) {
@ -318,32 +283,6 @@ func DecodeBlock(block []byte, vals []Value) ([]Value, error) {
}
}
// FloatValue represents a float64 value.
type FloatValue struct {
unixnano int64
value float64
}
// UnixNano returns the timestamp of the value.
func (v FloatValue) UnixNano() int64 {
return v.unixnano
}
// Value returns the underlying float64 value.
func (v FloatValue) Value() interface{} {
return v.value
}
// Size returns the number of bytes necessary to represent the value and its timestamp.
func (v FloatValue) Size() int {
return 16
}
// String returns the string representation of the value and its timestamp.
func (v FloatValue) String() string {
return fmt.Sprintf("%v %v", time.Unix(0, v.unixnano), v.value)
}
func encodeFloatBlock(buf []byte, values []Value) ([]byte, error) {
if len(values) == 0 {
return nil, nil
@ -373,8 +312,8 @@ func encodeFloatBlockUsing(buf []byte, values []Value, tsenc TimeEncoder, venc *
for _, v := range values {
vv := v.(FloatValue)
tsenc.Write(vv.unixnano)
venc.Write(vv.value)
tsenc.Write(vv.UnixNano())
venc.Write(vv.RawValue())
}
venc.Flush()
@ -432,7 +371,7 @@ func DecodeFloatBlock(block []byte, a *[]FloatValue) ([]FloatValue, error) {
// Decode both a timestamp and value
j := 0
for j < len(a) && tdec.Next() && vdec.Next() {
a[j] = FloatValue{unixnano: tdec.Read(), value: vdec.Values()}
a[j] = NewRawFloatValue(tdec.Read(), vdec.Values())
j++
}
i = j
@ -453,32 +392,6 @@ func DecodeFloatBlock(block []byte, a *[]FloatValue) ([]FloatValue, error) {
return (*a)[:i], err
}
// BooleanValue represents a boolean value.
type BooleanValue struct {
unixnano int64
value bool
}
// Size returns the number of bytes necessary to represent the value and its timestamp.
func (v BooleanValue) Size() int {
return 9
}
// UnixNano returns the timestamp of the value in nanoseconds since unix epoch.
func (v BooleanValue) UnixNano() int64 {
return v.unixnano
}
// Value returns the underlying boolean value.
func (v BooleanValue) Value() interface{} {
return v.value
}
// String returns the string representation of the value and its timestamp.
func (v BooleanValue) String() string {
return fmt.Sprintf("%v %v", time.Unix(0, v.unixnano), v.Value())
}
func encodeBooleanBlock(buf []byte, values []Value) ([]byte, error) {
if len(values) == 0 {
return nil, nil
@ -505,8 +418,8 @@ func encodeBooleanBlockUsing(buf []byte, values []Value, tenc TimeEncoder, venc
for _, v := range values {
vv := v.(BooleanValue)
tenc.Write(vv.unixnano)
venc.Write(vv.value)
tenc.Write(vv.UnixNano())
venc.Write(vv.RawValue())
}
// Encoded timestamp values
@ -560,7 +473,7 @@ func DecodeBooleanBlock(block []byte, a *[]BooleanValue) ([]BooleanValue, error)
// Decode both a timestamp and value
j := 0
for j < len(a) && tdec.Next() && vdec.Next() {
a[j] = BooleanValue{unixnano: tdec.Read(), value: vdec.Read()}
a[j] = NewRawBooleanValue(tdec.Read(), vdec.Read())
j++
}
i = j
@ -580,32 +493,6 @@ func DecodeBooleanBlock(block []byte, a *[]BooleanValue) ([]BooleanValue, error)
return (*a)[:i], err
}
// IntegerValue represents an int64 value.
type IntegerValue struct {
unixnano int64
value int64
}
// Value returns the underlying int64 value.
func (v IntegerValue) Value() interface{} {
return v.value
}
// UnixNano returns the timestamp of the value.
func (v IntegerValue) UnixNano() int64 {
return v.unixnano
}
// Size returns the number of bytes necessary to represent the value and its timestamp.
func (v IntegerValue) Size() int {
return 16
}
// String returns the string representation of the value and its timestamp.
func (v IntegerValue) String() string {
return fmt.Sprintf("%v %v", time.Unix(0, v.unixnano), v.Value())
}
func encodeIntegerBlock(buf []byte, values []Value) ([]byte, error) {
tenc := getTimeEncoder(len(values))
venc := getIntegerEncoder(len(values))
@ -624,8 +511,8 @@ func encodeIntegerBlockUsing(buf []byte, values []Value, tenc TimeEncoder, venc
for _, v := range values {
vv := v.(IntegerValue)
tenc.Write(vv.unixnano)
venc.Write(vv.value)
tenc.Write(vv.UnixNano())
venc.Write(vv.RawValue())
}
// Encoded timestamp values
@ -679,7 +566,7 @@ func DecodeIntegerBlock(block []byte, a *[]IntegerValue) ([]IntegerValue, error)
// Decode both a timestamp and value
j := 0
for j < len(a) && tdec.Next() && vdec.Next() {
a[j] = IntegerValue{unixnano: tdec.Read(), value: vdec.Read()}
a[j] = NewRawIntegerValue(tdec.Read(), vdec.Read())
j++
}
i = j
@ -699,32 +586,6 @@ func DecodeIntegerBlock(block []byte, a *[]IntegerValue) ([]IntegerValue, error)
return (*a)[:i], err
}
// UnsignedValue represents an int64 value.
type UnsignedValue struct {
unixnano int64
value uint64
}
// Value returns the underlying int64 value.
func (v UnsignedValue) Value() interface{} {
return v.value
}
// UnixNano returns the timestamp of the value.
func (v UnsignedValue) UnixNano() int64 {
return v.unixnano
}
// Size returns the number of bytes necessary to represent the value and its timestamp.
func (v UnsignedValue) Size() int {
return 16
}
// String returns the string representation of the value and its timestamp.
func (v UnsignedValue) String() string {
return fmt.Sprintf("%v %v", time.Unix(0, v.unixnano), v.Value())
}
func encodeUnsignedBlock(buf []byte, values []Value) ([]byte, error) {
tenc := getTimeEncoder(len(values))
venc := getUnsignedEncoder(len(values))
@ -743,8 +604,8 @@ func encodeUnsignedBlockUsing(buf []byte, values []Value, tenc TimeEncoder, venc
for _, v := range values {
vv := v.(UnsignedValue)
tenc.Write(vv.unixnano)
venc.Write(int64(vv.value))
tenc.Write(vv.UnixNano())
venc.Write(int64(vv.RawValue()))
}
// Encoded timestamp values
@ -798,7 +659,7 @@ func DecodeUnsignedBlock(block []byte, a *[]UnsignedValue) ([]UnsignedValue, err
// Decode both a timestamp and value
j := 0
for j < len(a) && tdec.Next() && vdec.Next() {
a[j] = UnsignedValue{unixnano: tdec.Read(), value: uint64(vdec.Read())}
a[j] = NewRawUnsignedValue(tdec.Read(), uint64(vdec.Read()))
j++
}
i = j
@ -818,35 +679,9 @@ func DecodeUnsignedBlock(block []byte, a *[]UnsignedValue) ([]UnsignedValue, err
return (*a)[:i], err
}
// StringValue represents a string value.
type StringValue struct {
unixnano int64
value string
}
// Value returns the underlying string value.
func (v StringValue) Value() interface{} {
return v.value
}
// UnixNano returns the timestamp of the value.
func (v StringValue) UnixNano() int64 {
return v.unixnano
}
// Size returns the number of bytes necessary to represent the value and its timestamp.
func (v StringValue) Size() int {
return 8 + len(v.value)
}
// String returns the string representation of the value and its timestamp.
func (v StringValue) String() string {
return fmt.Sprintf("%v %v", time.Unix(0, v.unixnano), v.Value())
}
func encodeStringBlock(buf []byte, values []Value) ([]byte, error) {
tenc := getTimeEncoder(len(values))
venc := getStringEncoder(len(values) * len(values[0].(StringValue).value))
venc := getStringEncoder(len(values) * len(values[0].(StringValue).RawValue()))
b, err := encodeStringBlockUsing(buf, values, tenc, venc)
@ -862,8 +697,8 @@ func encodeStringBlockUsing(buf []byte, values []Value, tenc TimeEncoder, venc S
for _, v := range values {
vv := v.(StringValue)
tenc.Write(vv.unixnano)
venc.Write(vv.value)
tenc.Write(vv.UnixNano())
venc.Write(vv.RawValue())
}
// Encoded timestamp values
@ -920,7 +755,7 @@ func DecodeStringBlock(block []byte, a *[]StringValue) ([]StringValue, error) {
// Decode both a timestamp and value
j := 0
for j < len(a) && tdec.Next() && vdec.Next() {
a[j] = StringValue{unixnano: tdec.Read(), value: vdec.Read()}
a[j] = NewRawStringValue(tdec.Read(), vdec.Read())
j++
}
i = j

View File

@ -24,6 +24,7 @@ import (
"github.com/influxdata/influxdb/pkg/limiter"
"github.com/influxdata/influxdb/pkg/metrics"
"github.com/influxdata/influxdb/query"
"github.com/influxdata/influxdb/storage/wal"
"github.com/influxdata/influxdb/tsdb"
"github.com/influxdata/influxdb/tsdb/tsi1"
"github.com/influxdata/influxql"
@ -31,6 +32,7 @@ import (
"go.uber.org/zap"
)
//go:generate env GO111MODULE=on go run github.com/benbjohnson/tmpl -data=@array_cursor.gen.go.tmpldata array_cursor.gen.go.tmpl array_cursor_iterator.gen.go.tmpl
//go:generate env GO111MODULE=on go run github.com/influxdata/influxdb/tools/tmpl -i -data=file_store.gen.go.tmpldata file_store.gen.go.tmpl=file_store.gen.go
//go:generate env GO111MODULE=on go run github.com/influxdata/influxdb/tools/tmpl -i -d isArray=y -data=file_store.gen.go.tmpldata file_store.gen.go.tmpl=file_store_array.gen.go
//go:generate env GO111MODULE=on go run github.com/benbjohnson/tmpl -data=@encoding.gen.go.tmpldata encoding.gen.go.tmpl
@ -78,15 +80,15 @@ const (
type EngineOption func(i *Engine)
// WithWAL sets the WAL for the Engine
var WithWAL = func(wal Log) EngineOption {
var WithWAL = func(w Log) EngineOption {
// be defensive: it's very easy to pass in a nil WAL here
// which will panic. Set any nil WALs to the NopWAL.
if pwal, _ := wal.(*WAL); pwal == nil {
wal = NopWAL{}
if pwal, _ := w.(*wal.WAL); pwal == nil {
w = NopWAL{}
}
return func(e *Engine) {
e.WAL = wal
e.WAL = w
}
}
@ -512,10 +514,6 @@ func (e *Engine) initTrackers() {
e.FileStore.tracker = newFileTracker(bms.fileMetrics, e.defaultMetricLabels)
e.Cache.tracker = newCacheTracker(bms.cacheMetrics, e.defaultMetricLabels)
// Set default metrics on WAL if enabled.
if wal, ok := e.WAL.(*WAL); ok {
wal.tracker = newWALTracker(bms.walMetrics, e.defaultMetricLabels)
}
e.scheduler.setCompactionTracker(e.compactionTracker)
}
@ -575,10 +573,7 @@ func (e *Engine) WithLogger(log *zap.Logger) {
e.traceLogger = e.logger
}
if wal, ok := e.WAL.(*WAL); ok {
wal.WithLogger(e.logger)
}
e.WAL.WithLogger(e.logger)
e.FileStore.WithLogger(e.logger)
}
@ -1571,7 +1566,7 @@ func (e *Engine) fullCompactionStrategy(group CompactionGroup, optimize bool) *c
// reloadCache reads the WAL segment files and loads them into the cache.
func (e *Engine) reloadCache() error {
now := time.Now()
files, err := segmentFileNames(e.WAL.Path())
files, err := wal.SegmentFileNames(e.WAL.Path())
if err != nil {
return err
}

View File

@ -25,7 +25,6 @@ func PrometheusCollectors() []prometheus.Collector {
collectors = append(collectors, bms.compactionMetrics.PrometheusCollectors()...)
collectors = append(collectors, bms.fileMetrics.PrometheusCollectors()...)
collectors = append(collectors, bms.cacheMetrics.PrometheusCollectors()...)
collectors = append(collectors, bms.walMetrics.PrometheusCollectors()...)
}
return collectors
}
@ -36,7 +35,6 @@ const namespace = "storage"
const compactionSubsystem = "compactions" // sub-system associated with metrics for compactions.
const fileStoreSubsystem = "tsm_files" // sub-system associated with metrics for TSM files.
const cacheSubsystem = "cache" // sub-system associated with metrics for the cache.
const walSubsystem = "wal" // sub-system associated with metrics for the WAL.
// blockMetrics are a set of metrics concerned with tracking data about block storage.
type blockMetrics struct {
@ -44,7 +42,6 @@ type blockMetrics struct {
*compactionMetrics
*fileMetrics
*cacheMetrics
*walMetrics
}
// newBlockMetrics initialises the prometheus metrics for the block subsystem.
@ -54,7 +51,6 @@ func newBlockMetrics(labels prometheus.Labels) *blockMetrics {
compactionMetrics: newCompactionMetrics(labels),
fileMetrics: newFileMetrics(labels),
cacheMetrics: newCacheMetrics(labels),
walMetrics: newWALMetrics(labels),
}
}
@ -64,7 +60,6 @@ func (m *blockMetrics) PrometheusCollectors() []prometheus.Collector {
metrics = append(metrics, m.compactionMetrics.PrometheusCollectors()...)
metrics = append(metrics, m.fileMetrics.PrometheusCollectors()...)
metrics = append(metrics, m.cacheMetrics.PrometheusCollectors()...)
metrics = append(metrics, m.walMetrics.PrometheusCollectors()...)
return metrics
}
@ -249,60 +244,3 @@ func (m *cacheMetrics) PrometheusCollectors() []prometheus.Collector {
m.Writes,
}
}
// walMetrics are a set of metrics concerned with tracking data about compactions.
type walMetrics struct {
OldSegmentBytes *prometheus.GaugeVec
CurrentSegmentBytes *prometheus.GaugeVec
Segments *prometheus.GaugeVec
Writes *prometheus.CounterVec
}
// newWALMetrics initialises the prometheus metrics for tracking the WAL.
func newWALMetrics(labels prometheus.Labels) *walMetrics {
var names []string
for k := range labels {
names = append(names, k)
}
sort.Strings(names)
writeNames := append(append([]string(nil), names...), "status")
sort.Strings(writeNames)
return &walMetrics{
OldSegmentBytes: prometheus.NewGaugeVec(prometheus.GaugeOpts{
Namespace: namespace,
Subsystem: walSubsystem,
Name: "old_segment_bytes",
Help: "Number of bytes old WAL segments using on disk.",
}, names),
CurrentSegmentBytes: prometheus.NewGaugeVec(prometheus.GaugeOpts{
Namespace: namespace,
Subsystem: walSubsystem,
Name: "current_segment_bytes",
Help: "Number of bytes TSM files using on disk.",
}, names),
Segments: prometheus.NewGaugeVec(prometheus.GaugeOpts{
Namespace: namespace,
Subsystem: walSubsystem,
Name: "segments_total",
Help: "Number of WAL segment files on disk.",
}, names),
Writes: prometheus.NewCounterVec(prometheus.CounterOpts{
Namespace: namespace,
Subsystem: walSubsystem,
Name: "writes_total",
Help: "Number of writes to the WAL.",
}, writeNames),
}
}
// PrometheusCollectors satisfies the prom.PrometheusCollector interface.
func (m *walMetrics) PrometheusCollectors() []prometheus.Collector {
return []prometheus.Collector{
m.OldSegmentBytes,
m.CurrentSegmentBytes,
m.Segments,
m.Writes,
}
}

View File

@ -130,73 +130,6 @@ func TestMetrics_Cache(t *testing.T) {
}
}
func TestMetrics_WAL(t *testing.T) {
// metrics to be shared by multiple file stores.
metrics := newWALMetrics(prometheus.Labels{"engine_id": "", "node_id": ""})
t1 := newWALTracker(metrics, prometheus.Labels{"engine_id": "0", "node_id": "0"})
t2 := newWALTracker(metrics, prometheus.Labels{"engine_id": "1", "node_id": "0"})
reg := prometheus.NewRegistry()
reg.MustRegister(metrics.PrometheusCollectors()...)
base := namespace + "_" + walSubsystem + "_"
// All the metric names
gauges := []string{
base + "old_segment_bytes",
base + "current_segment_bytes",
base + "segments_total",
}
counters := []string{
base + "writes_total",
}
// Generate some measurements.
for i, tracker := range []*walTracker{t1, t2} {
tracker.SetOldSegmentSize(uint64(i + len(gauges[0])))
tracker.SetCurrentSegmentSize(uint64(i + len(gauges[1])))
tracker.SetSegments(uint64(i + len(gauges[2])))
labels := tracker.Labels()
labels["status"] = "ok"
tracker.metrics.Writes.With(labels).Add(float64(i + len(counters[0])))
}
// Test that all the correct metrics are present.
mfs, err := reg.Gather()
if err != nil {
t.Fatal(err)
}
// The label variants for the two caches.
labelVariants := []prometheus.Labels{
prometheus.Labels{"engine_id": "0", "node_id": "0"},
prometheus.Labels{"engine_id": "1", "node_id": "0"},
}
for i, labels := range labelVariants {
for _, name := range gauges {
exp := float64(i + len(name))
metric := promtest.MustFindMetric(t, mfs, name, labels)
if got := metric.GetGauge().GetValue(); got != exp {
t.Errorf("[%s %d] got %v, expected %v", name, i, got, exp)
}
}
for _, name := range counters {
exp := float64(i + len(name))
labels["status"] = "ok"
metric := promtest.MustFindMetric(t, mfs, name, labels)
if got := metric.GetCounter().GetValue(); got != exp {
t.Errorf("[%s %d] got %v, expected %v", name, i, got, exp)
}
}
}
}
func TestMetrics_Compactions(t *testing.T) {
// metrics to be shared by multiple file stores.
metrics := newCompactionMetrics(prometheus.Labels{"engine_id": "", "node_id": ""})

47
tsdb/tsm1/temp_log.go Normal file
View File

@ -0,0 +1,47 @@
package tsm1
import (
"time"
"go.uber.org/zap"
)
// TODO(jeff): this only exists temporarily while we move the WAL into storage
// Log describes an interface for a durable disk-based log.
type Log interface {
WithLogger(*zap.Logger)
Open() error
Close() error
Path() string
LastWriteTime() time.Time
DiskSizeBytes() int64
WriteMulti(values map[string][]Value) (int, error)
DeleteRange(keys [][]byte, min, max int64) (int, error)
CloseSegment() error
ClosedSegments() ([]string, error)
Remove(files []string) error
}
// NopWAL implements the Log interface and provides a no-op WAL implementation.
type NopWAL struct{}
func (w NopWAL) WithLogger(*zap.Logger) {}
func (w NopWAL) Open() error { return nil }
func (w NopWAL) Close() error { return nil }
func (w NopWAL) Path() string { return "" }
func (w NopWAL) LastWriteTime() time.Time { return time.Time{} }
func (w NopWAL) DiskSizeBytes() int64 { return 0 }
func (w NopWAL) WriteMulti(values map[string][]Value) (int, error) { return 0, nil }
func (w NopWAL) DeleteRange(keys [][]byte, min, max int64) (int, error) { return 0, nil }
func (w NopWAL) CloseSegment() error { return nil }
func (w NopWAL) ClosedSegments() ([]string, error) { return nil, nil }
func (w NopWAL) Remove(files []string) error { return nil }

236
tsdb/tsm1/value/value.go Normal file
View File

@ -0,0 +1,236 @@
package value
import (
"fmt"
"time"
"github.com/influxdata/influxdb/tsdb"
)
// Value represents a TSM-encoded value.
type Value interface {
// UnixNano returns the timestamp of the value in nanoseconds since unix epoch.
UnixNano() int64
// Value returns the underlying value.
Value() interface{}
// Size returns the number of bytes necessary to represent the value and its timestamp.
Size() int
// String returns the string representation of the value and its timestamp.
String() string
// internalOnly is unexported to ensure implementations of Value
// can only originate in this package.
internalOnly()
}
// NewValue returns a new Value with the underlying type dependent on value.
func NewValue(t int64, value interface{}) Value {
switch v := value.(type) {
case int64:
return IntegerValue{unixnano: t, value: v}
case uint64:
return UnsignedValue{unixnano: t, value: v}
case float64:
return FloatValue{unixnano: t, value: v}
case bool:
return BooleanValue{unixnano: t, value: v}
case string:
return StringValue{unixnano: t, value: v}
}
return EmptyValue{}
}
// NewRawIntegerValue returns a new integer value.
func NewRawIntegerValue(t int64, v int64) IntegerValue { return IntegerValue{unixnano: t, value: v} }
// NewRawUnsignedValue returns a new unsigned integer value.
func NewRawUnsignedValue(t int64, v uint64) UnsignedValue { return UnsignedValue{unixnano: t, value: v} }
// NewRawFloatValue returns a new float value.
func NewRawFloatValue(t int64, v float64) FloatValue { return FloatValue{unixnano: t, value: v} }
// NewRawBooleanValue returns a new boolean value.
func NewRawBooleanValue(t int64, v bool) BooleanValue { return BooleanValue{unixnano: t, value: v} }
// NewRawStringValue returns a new string value.
func NewRawStringValue(t int64, v string) StringValue { return StringValue{unixnano: t, value: v} }
// NewIntegerValue returns a new integer value.
func NewIntegerValue(t int64, v int64) Value { return NewRawIntegerValue(t, v) }
// NewUnsignedValue returns a new unsigned integer value.
func NewUnsignedValue(t int64, v uint64) Value { return NewRawUnsignedValue(t, v) }
// NewFloatValue returns a new float value.
func NewFloatValue(t int64, v float64) Value { return NewRawFloatValue(t, v) }
// NewBooleanValue returns a new boolean value.
func NewBooleanValue(t int64, v bool) Value { return NewRawBooleanValue(t, v) }
// NewStringValue returns a new string value.
func NewStringValue(t int64, v string) Value { return NewRawStringValue(t, v) }
// EmptyValue is used when there is no appropriate other value.
type EmptyValue struct{}
// UnixNano returns tsdb.EOF.
func (e EmptyValue) UnixNano() int64 { return tsdb.EOF }
// Value returns nil.
func (e EmptyValue) Value() interface{} { return nil }
// Size returns 0.
func (e EmptyValue) Size() int { return 0 }
// String returns the empty string.
func (e EmptyValue) String() string { return "" }
func (EmptyValue) internalOnly() {}
func (StringValue) internalOnly() {}
func (IntegerValue) internalOnly() {}
func (UnsignedValue) internalOnly() {}
func (BooleanValue) internalOnly() {}
func (FloatValue) internalOnly() {}
// IntegerValue represents an int64 value.
type IntegerValue struct {
unixnano int64
value int64
}
// Value returns the underlying int64 value.
func (v IntegerValue) Value() interface{} {
return v.value
}
// UnixNano returns the timestamp of the value.
func (v IntegerValue) UnixNano() int64 {
return v.unixnano
}
// Size returns the number of bytes necessary to represent the value and its timestamp.
func (v IntegerValue) Size() int {
return 16
}
// String returns the string representation of the value and its timestamp.
func (v IntegerValue) String() string {
return fmt.Sprintf("%v %v", time.Unix(0, v.unixnano), v.Value())
}
func (v IntegerValue) RawValue() int64 { return v.value }
// UnsignedValue represents an int64 value.
type UnsignedValue struct {
unixnano int64
value uint64
}
// Value returns the underlying int64 value.
func (v UnsignedValue) Value() interface{} {
return v.value
}
// UnixNano returns the timestamp of the value.
func (v UnsignedValue) UnixNano() int64 {
return v.unixnano
}
// Size returns the number of bytes necessary to represent the value and its timestamp.
func (v UnsignedValue) Size() int {
return 16
}
// String returns the string representation of the value and its timestamp.
func (v UnsignedValue) String() string {
return fmt.Sprintf("%v %v", time.Unix(0, v.unixnano), v.Value())
}
func (v UnsignedValue) RawValue() uint64 { return v.value }
// FloatValue represents a float64 value.
type FloatValue struct {
unixnano int64
value float64
}
// UnixNano returns the timestamp of the value.
func (v FloatValue) UnixNano() int64 {
return v.unixnano
}
// Value returns the underlying float64 value.
func (v FloatValue) Value() interface{} {
return v.value
}
// Size returns the number of bytes necessary to represent the value and its timestamp.
func (v FloatValue) Size() int {
return 16
}
// String returns the string representation of the value and its timestamp.
func (v FloatValue) String() string {
return fmt.Sprintf("%v %v", time.Unix(0, v.unixnano), v.value)
}
func (v FloatValue) RawValue() float64 { return v.value }
// BooleanValue represents a boolean value.
type BooleanValue struct {
unixnano int64
value bool
}
// Size returns the number of bytes necessary to represent the value and its timestamp.
func (v BooleanValue) Size() int {
return 9
}
// UnixNano returns the timestamp of the value in nanoseconds since unix epoch.
func (v BooleanValue) UnixNano() int64 {
return v.unixnano
}
// Value returns the underlying boolean value.
func (v BooleanValue) Value() interface{} {
return v.value
}
// String returns the string representation of the value and its timestamp.
func (v BooleanValue) String() string {
return fmt.Sprintf("%v %v", time.Unix(0, v.unixnano), v.Value())
}
func (v BooleanValue) RawValue() bool { return v.value }
// StringValue represents a string value.
type StringValue struct {
unixnano int64
value string
}
// Value returns the underlying string value.
func (v StringValue) Value() interface{} {
return v.value
}
// UnixNano returns the timestamp of the value.
func (v StringValue) UnixNano() int64 {
return v.unixnano
}
// Size returns the number of bytes necessary to represent the value and its timestamp.
func (v StringValue) Size() int {
return 8 + len(v.value)
}
// String returns the string representation of the value and its timestamp.
func (v StringValue) String() string {
return fmt.Sprintf("%v %v", time.Unix(0, v.unixnano), v.Value())
}
func (v StringValue) RawValue() string { return v.value }