influxdb/tsdb/engine/tsm1/cache_test.go

583 lines
17 KiB
Go
Raw Normal View History

2015-11-11 20:06:02 +00:00
package tsm1
import (
"fmt"
"io/ioutil"
"math"
"os"
2015-11-11 20:06:02 +00:00
"reflect"
"testing"
"github.com/golang/snappy"
2015-11-11 20:06:02 +00:00
)
func TestCache_NewCache(t *testing.T) {
2016-02-19 17:42:20 +00:00
c := NewCache(100, "")
2015-11-11 20:06:02 +00:00
if c == nil {
t.Fatalf("failed to create new cache")
}
if c.MaxSize() != 100 {
t.Fatalf("new cache max size not correct")
}
if c.Size() != 0 {
t.Fatalf("new cache size not correct")
}
if len(c.Keys()) != 0 {
t.Fatalf("new cache keys not correct: %v", c.Keys())
}
}
func TestCache_CacheWrite(t *testing.T) {
2016-02-23 03:47:17 +00:00
v0 := NewValue(1, 1.0)
v1 := NewValue(2, 2.0)
v2 := NewValue(3, 3.0)
2015-11-11 20:06:02 +00:00
values := Values{v0, v1, v2}
valuesSize := uint64(v0.Size() + v1.Size() + v2.Size())
2016-02-19 17:42:20 +00:00
c := NewCache(3*valuesSize, "")
2015-11-11 20:06:02 +00:00
if err := c.Write("foo", values); err != nil {
2015-11-11 20:06:02 +00:00
t.Fatalf("failed to write key foo to cache: %s", err.Error())
}
if err := c.Write("bar", values); err != nil {
2015-11-11 20:06:02 +00:00
t.Fatalf("failed to write key foo to cache: %s", err.Error())
}
if n := c.Size(); n != 2*valuesSize {
t.Fatalf("cache size incorrect after 2 writes, exp %d, got %d", 2*valuesSize, n)
}
if exp, keys := []string{"bar", "foo"}, c.Keys(); !reflect.DeepEqual(keys, exp) {
t.Fatalf("cache keys incorrect after 2 writes, exp %v, got %v", exp, keys)
}
}
func TestCache_CacheWriteMulti(t *testing.T) {
2016-02-23 03:47:17 +00:00
v0 := NewValue(1, 1.0)
v1 := NewValue(2, 2.0)
v2 := NewValue(3, 3.0)
values := Values{v0, v1, v2}
valuesSize := uint64(v0.Size() + v1.Size() + v2.Size())
2016-02-19 17:42:20 +00:00
c := NewCache(3*valuesSize, "")
if err := c.WriteMulti(map[string][]Value{"foo": values, "bar": values}); err != nil {
t.Fatalf("failed to write key foo to cache: %s", err.Error())
2015-11-11 20:06:02 +00:00
}
if n := c.Size(); n != 2*valuesSize {
t.Fatalf("cache size incorrect after 2 writes, exp %d, got %d", 2*valuesSize, n)
}
if exp, keys := []string{"bar", "foo"}, c.Keys(); !reflect.DeepEqual(keys, exp) {
t.Fatalf("cache keys incorrect after 2 writes, exp %v, got %v", exp, keys)
}
}
func TestCache_Cache_DeleteRange(t *testing.T) {
v0 := NewValue(1, 1.0)
v1 := NewValue(2, 2.0)
v2 := NewValue(3, 3.0)
values := Values{v0, v1, v2}
valuesSize := uint64(v0.Size() + v1.Size() + v2.Size())
c := NewCache(3*valuesSize, "")
if err := c.WriteMulti(map[string][]Value{"foo": values, "bar": values}); err != nil {
t.Fatalf("failed to write key foo to cache: %s", err.Error())
}
if n := c.Size(); n != 2*valuesSize {
t.Fatalf("cache size incorrect after 2 writes, exp %d, got %d", 2*valuesSize, n)
}
if exp, keys := []string{"bar", "foo"}, c.Keys(); !reflect.DeepEqual(keys, exp) {
t.Fatalf("cache keys incorrect after 2 writes, exp %v, got %v", exp, keys)
}
c.DeleteRange([]string{"bar"}, 2, math.MaxInt64)
if exp, keys := []string{"bar", "foo"}, c.Keys(); !reflect.DeepEqual(keys, exp) {
t.Fatalf("cache keys incorrect after 2 writes, exp %v, got %v", exp, keys)
}
if got, exp := c.Size(), valuesSize+uint64(v0.Size()); exp != got {
t.Fatalf("cache size incorrect after 2 writes, exp %d, got %d", exp, got)
}
if got, exp := len(c.Values("bar")), 1; got != exp {
t.Fatalf("cache values mismatch: got %v, exp %v", got, exp)
}
if got, exp := len(c.Values("foo")), 3; got != exp {
t.Fatalf("cache values mismatch: got %v, exp %v", got, exp)
}
}
func TestCache_Cache_Delete(t *testing.T) {
v0 := NewValue(1, 1.0)
v1 := NewValue(2, 2.0)
v2 := NewValue(3, 3.0)
values := Values{v0, v1, v2}
valuesSize := uint64(v0.Size() + v1.Size() + v2.Size())
c := NewCache(3*valuesSize, "")
if err := c.WriteMulti(map[string][]Value{"foo": values, "bar": values}); err != nil {
t.Fatalf("failed to write key foo to cache: %s", err.Error())
}
if n := c.Size(); n != 2*valuesSize {
t.Fatalf("cache size incorrect after 2 writes, exp %d, got %d", 2*valuesSize, n)
}
if exp, keys := []string{"bar", "foo"}, c.Keys(); !reflect.DeepEqual(keys, exp) {
t.Fatalf("cache keys incorrect after 2 writes, exp %v, got %v", exp, keys)
}
c.Delete([]string{"bar"})
if exp, keys := []string{"foo"}, c.Keys(); !reflect.DeepEqual(keys, exp) {
t.Fatalf("cache keys incorrect after 2 writes, exp %v, got %v", exp, keys)
}
if got, exp := c.Size(), valuesSize; exp != got {
t.Fatalf("cache size incorrect after 2 writes, exp %d, got %d", exp, got)
}
if got, exp := len(c.Values("bar")), 0; got != exp {
t.Fatalf("cache values mismatch: got %v, exp %v", got, exp)
}
if got, exp := len(c.Values("foo")), 3; got != exp {
t.Fatalf("cache values mismatch: got %v, exp %v", got, exp)
}
}
// This tests writing two batches to the same series. The first batch
// is sorted. The second batch is also sorted but contains duplicates.
func TestCache_CacheWriteMulti_Duplicates(t *testing.T) {
2016-02-23 03:47:17 +00:00
v0 := NewValue(2, 1.0)
v1 := NewValue(3, 1.0)
values0 := Values{v0, v1}
2016-02-23 03:47:17 +00:00
v3 := NewValue(4, 2.0)
v4 := NewValue(5, 3.0)
v5 := NewValue(5, 3.0)
values1 := Values{v3, v4, v5}
2016-02-22 21:49:11 +00:00
c := NewCache(0, "")
if err := c.WriteMulti(map[string][]Value{"foo": values0}); err != nil {
t.Fatalf("failed to write key foo to cache: %s", err.Error())
}
if err := c.WriteMulti(map[string][]Value{"foo": values1}); err != nil {
t.Fatalf("failed to write key foo to cache: %s", err.Error())
}
if exp, keys := []string{"foo"}, c.Keys(); !reflect.DeepEqual(keys, exp) {
t.Fatalf("cache keys incorrect after 2 writes, exp %v, got %v", exp, keys)
}
expAscValues := Values{v0, v1, v3, v5}
if exp, got := len(expAscValues), len(c.Values("foo")); exp != got {
t.Fatalf("value count mismatch: exp: %v, got %v", exp, got)
}
if deduped := c.Values("foo"); !reflect.DeepEqual(expAscValues, deduped) {
t.Fatalf("deduped ascending values for foo incorrect, exp: %v, got %v", expAscValues, deduped)
}
}
func TestCache_CacheValues(t *testing.T) {
2016-02-23 03:47:17 +00:00
v0 := NewValue(1, 0.0)
v1 := NewValue(2, 2.0)
v2 := NewValue(3, 3.0)
v3 := NewValue(1, 1.0)
v4 := NewValue(4, 4.0)
2016-02-19 17:42:20 +00:00
c := NewCache(512, "")
if deduped := c.Values("no such key"); deduped != nil {
t.Fatalf("Values returned for no such key")
}
if err := c.Write("foo", Values{v0, v1, v2, v3}); err != nil {
t.Fatalf("failed to write 3 values, key foo to cache: %s", err.Error())
}
if err := c.Write("foo", Values{v4}); err != nil {
t.Fatalf("failed to write 1 value, key foo to cache: %s", err.Error())
}
2015-12-01 00:29:38 +00:00
expAscValues := Values{v3, v1, v2, v4}
if deduped := c.Values("foo"); !reflect.DeepEqual(expAscValues, deduped) {
2015-12-01 00:29:38 +00:00
t.Fatalf("deduped ascending values for foo incorrect, exp: %v, got %v", expAscValues, deduped)
}
2015-11-11 20:06:02 +00:00
}
func TestCache_CacheSnapshot(t *testing.T) {
2016-02-23 03:47:17 +00:00
v0 := NewValue(2, 0.0)
v1 := NewValue(3, 2.0)
v2 := NewValue(4, 3.0)
v3 := NewValue(5, 4.0)
v4 := NewValue(6, 5.0)
v5 := NewValue(1, 5.0)
v6 := NewValue(7, 5.0)
v7 := NewValue(2, 5.0)
2016-02-19 17:42:20 +00:00
c := NewCache(512, "")
if err := c.Write("foo", Values{v0, v1, v2, v3}); err != nil {
t.Fatalf("failed to write 3 values, key foo to cache: %s", err.Error())
}
// Grab snapshot, and ensure it's as expected.
snapshot, err := c.Snapshot()
if err != nil {
t.Fatalf("failed to snapshot cache: %v", err)
}
expValues := Values{v0, v1, v2, v3}
if deduped := snapshot.values("foo"); !reflect.DeepEqual(expValues, deduped) {
t.Fatalf("snapshotted values for foo incorrect, exp: %v, got %v", expValues, deduped)
}
// Ensure cache is still as expected.
if deduped := c.Values("foo"); !reflect.DeepEqual(expValues, deduped) {
t.Fatalf("post-snapshot values for foo incorrect, exp: %v, got %v", expValues, deduped)
}
// Write a new value to the cache.
if err := c.Write("foo", Values{v4}); err != nil {
t.Fatalf("failed to write post-snap value, key foo to cache: %s", err.Error())
}
expValues = Values{v0, v1, v2, v3, v4}
if deduped := c.Values("foo"); !reflect.DeepEqual(expValues, deduped) {
t.Fatalf("post-snapshot write values for foo incorrect, exp: %v, got %v", expValues, deduped)
}
// Write a new, out-of-order, value to the cache.
if err := c.Write("foo", Values{v5}); err != nil {
t.Fatalf("failed to write post-snap value, key foo to cache: %s", err.Error())
}
expValues = Values{v5, v0, v1, v2, v3, v4}
if deduped := c.Values("foo"); !reflect.DeepEqual(expValues, deduped) {
t.Fatalf("post-snapshot out-of-order write values for foo incorrect, exp: %v, got %v", expValues, deduped)
}
// Clear snapshot, ensuring non-snapshot data untouched.
c.ClearSnapshot(true)
expValues = Values{v5, v4}
if deduped := c.Values("foo"); !reflect.DeepEqual(expValues, deduped) {
t.Fatalf("post-clear values for foo incorrect, exp: %v, got %v", expValues, deduped)
}
// Create another snapshot
snapshot, err = c.Snapshot()
if err != nil {
t.Fatalf("failed to snapshot cache: %v", err)
}
if err := c.Write("foo", Values{v4, v5}); err != nil {
t.Fatalf("failed to write post-snap value, key foo to cache: %s", err.Error())
}
c.ClearSnapshot(true)
snapshot, err = c.Snapshot()
if err != nil {
t.Fatalf("failed to snapshot cache: %v", err)
}
if err := c.Write("foo", Values{v6, v7}); err != nil {
t.Fatalf("failed to write post-snap value, key foo to cache: %s", err.Error())
}
expValues = Values{v5, v7, v4, v6}
if deduped := c.Values("foo"); !reflect.DeepEqual(expValues, deduped) {
t.Fatalf("post-snapshot out-of-order write values for foo incorrect, exp: %v, got %v", expValues, deduped)
}
}
func TestCache_CacheEmptySnapshot(t *testing.T) {
2016-02-19 17:42:20 +00:00
c := NewCache(512, "")
// Grab snapshot, and ensure it's as expected.
snapshot, err := c.Snapshot()
if err != nil {
t.Fatalf("failed to snapshot cache: %v", err)
}
if deduped := snapshot.values("foo"); !reflect.DeepEqual(Values(nil), deduped) {
t.Fatalf("snapshotted values for foo incorrect, exp: %v, got %v", nil, deduped)
}
// Ensure cache is still as expected.
if deduped := c.Values("foo"); !reflect.DeepEqual(Values(nil), deduped) {
t.Fatalf("post-snapshotted values for foo incorrect, exp: %v, got %v", Values(nil), deduped)
}
// Clear snapshot.
c.ClearSnapshot(true)
if deduped := c.Values("foo"); !reflect.DeepEqual(Values(nil), deduped) {
t.Fatalf("post-snapshot-clear values for foo incorrect, exp: %v, got %v", Values(nil), deduped)
}
}
func TestCache_CacheWriteMemoryExceeded(t *testing.T) {
2016-02-23 03:47:17 +00:00
v0 := NewValue(1, 1.0)
v1 := NewValue(2, 2.0)
2015-11-11 20:06:02 +00:00
2016-02-19 17:42:20 +00:00
c := NewCache(uint64(v1.Size()), "")
2015-11-11 20:06:02 +00:00
if err := c.Write("foo", Values{v0}); err != nil {
2015-11-11 20:06:02 +00:00
t.Fatalf("failed to write key foo to cache: %s", err.Error())
}
if exp, keys := []string{"foo"}, c.Keys(); !reflect.DeepEqual(keys, exp) {
t.Fatalf("cache keys incorrect after writes, exp %v, got %v", exp, keys)
}
if err := c.Write("bar", Values{v1}); err != ErrCacheMemoryExceeded {
2015-11-11 20:06:02 +00:00
t.Fatalf("wrong error writing key bar to cache")
}
// Grab snapshot, write should still fail since we're still using the memory.
_, err := c.Snapshot()
if err != nil {
t.Fatalf("failed to snapshot cache: %v", err)
}
if err := c.Write("bar", Values{v1}); err != ErrCacheMemoryExceeded {
2015-11-11 20:06:02 +00:00
t.Fatalf("wrong error writing key bar to cache")
}
// Clear the snapshot and the write should now succeed.
c.ClearSnapshot(true)
if err := c.Write("bar", Values{v1}); err != nil {
t.Fatalf("failed to write key foo to cache: %s", err.Error())
2015-11-21 04:45:12 +00:00
}
expAscValues := Values{v1}
if deduped := c.Values("bar"); !reflect.DeepEqual(expAscValues, deduped) {
t.Fatalf("deduped ascending values for bar incorrect, exp: %v, got %v", expAscValues, deduped)
2015-11-11 20:06:02 +00:00
}
}
// Ensure the CacheLoader can correctly load from a single segment, even if it's corrupted.
func TestCacheLoader_LoadSingle(t *testing.T) {
// Create a WAL segment.
dir := mustTempDir()
defer os.RemoveAll(dir)
f := mustTempFile(dir)
w := NewWALSegmentWriter(f)
2016-02-23 03:47:17 +00:00
p1 := NewValue(1, 1.1)
p2 := NewValue(1, int64(1))
p3 := NewValue(1, true)
values := map[string][]Value{
"foo": []Value{p1},
"bar": []Value{p2},
"baz": []Value{p3},
}
entry := &WriteWALEntry{
Values: values,
}
if err := w.Write(mustMarshalEntry(entry)); err != nil {
t.Fatal("write points", err)
}
// Load the cache using the segment.
2016-02-19 17:42:20 +00:00
cache := NewCache(1024, "")
loader := NewCacheLoader([]string{f.Name()})
if err := loader.Load(cache); err != nil {
t.Fatalf("failed to load cache: %s", err.Error())
}
// Check the cache.
if values := cache.Values("foo"); !reflect.DeepEqual(values, Values{p1}) {
t.Fatalf("cache key foo not as expected, got %v, exp %v", values, Values{p1})
}
if values := cache.Values("bar"); !reflect.DeepEqual(values, Values{p2}) {
t.Fatalf("cache key foo not as expected, got %v, exp %v", values, Values{p2})
}
if values := cache.Values("baz"); !reflect.DeepEqual(values, Values{p3}) {
t.Fatalf("cache key foo not as expected, got %v, exp %v", values, Values{p3})
}
// Corrupt the WAL segment.
if _, err := f.Write([]byte{1, 4, 0, 0, 0}); err != nil {
t.Fatalf("corrupt WAL segment: %s", err.Error())
}
// Reload the cache using the segment.
2016-02-19 17:42:20 +00:00
cache = NewCache(1024, "")
loader = NewCacheLoader([]string{f.Name()})
if err := loader.Load(cache); err != nil {
t.Fatalf("failed to load cache: %s", err.Error())
}
// Check the cache.
if values := cache.Values("foo"); !reflect.DeepEqual(values, Values{p1}) {
t.Fatalf("cache key foo not as expected, got %v, exp %v", values, Values{p1})
}
if values := cache.Values("bar"); !reflect.DeepEqual(values, Values{p2}) {
t.Fatalf("cache key bar not as expected, got %v, exp %v", values, Values{p2})
}
if values := cache.Values("baz"); !reflect.DeepEqual(values, Values{p3}) {
t.Fatalf("cache key baz not as expected, got %v, exp %v", values, Values{p3})
}
}
// Ensure the CacheLoader can correctly load from two segments, even if one is corrupted.
func TestCacheLoader_LoadDouble(t *testing.T) {
// Create a WAL segment.
dir := mustTempDir()
defer os.RemoveAll(dir)
f1, f2 := mustTempFile(dir), mustTempFile(dir)
w1, w2 := NewWALSegmentWriter(f1), NewWALSegmentWriter(f2)
2016-02-23 03:47:17 +00:00
p1 := NewValue(1, 1.1)
p2 := NewValue(1, int64(1))
p3 := NewValue(1, true)
p4 := NewValue(1, "string")
// Write first and second segment.
segmentWrite := func(w *WALSegmentWriter, values map[string][]Value) {
entry := &WriteWALEntry{
Values: values,
}
if err := w1.Write(mustMarshalEntry(entry)); err != nil {
t.Fatal("write points", err)
}
}
values := map[string][]Value{
"foo": []Value{p1},
"bar": []Value{p2},
}
segmentWrite(w1, values)
values = map[string][]Value{
"baz": []Value{p3},
"qux": []Value{p4},
}
segmentWrite(w2, values)
// Corrupt the first WAL segment.
if _, err := f1.Write([]byte{1, 4, 0, 0, 0}); err != nil {
t.Fatalf("corrupt WAL segment: %s", err.Error())
}
// Load the cache using the segments.
2016-02-19 17:42:20 +00:00
cache := NewCache(1024, "")
loader := NewCacheLoader([]string{f1.Name(), f2.Name()})
if err := loader.Load(cache); err != nil {
t.Fatalf("failed to load cache: %s", err.Error())
}
// Check the cache.
if values := cache.Values("foo"); !reflect.DeepEqual(values, Values{p1}) {
t.Fatalf("cache key foo not as expected, got %v, exp %v", values, Values{p1})
}
if values := cache.Values("bar"); !reflect.DeepEqual(values, Values{p2}) {
t.Fatalf("cache key bar not as expected, got %v, exp %v", values, Values{p2})
}
if values := cache.Values("baz"); !reflect.DeepEqual(values, Values{p3}) {
t.Fatalf("cache key baz not as expected, got %v, exp %v", values, Values{p3})
}
if values := cache.Values("qux"); !reflect.DeepEqual(values, Values{p4}) {
t.Fatalf("cache key qux not as expected, got %v, exp %v", values, Values{p4})
}
}
// Ensure the CacheLoader can load deleted series
func TestCacheLoader_LoadDeleted(t *testing.T) {
// Create a WAL segment.
dir := mustTempDir()
defer os.RemoveAll(dir)
f := mustTempFile(dir)
w := NewWALSegmentWriter(f)
p1 := NewValue(1, 1.0)
p2 := NewValue(2, 2.0)
p3 := NewValue(3, 3.0)
values := map[string][]Value{
"foo": []Value{p1, p2, p3},
}
entry := &WriteWALEntry{
Values: values,
}
if err := w.Write(mustMarshalEntry(entry)); err != nil {
t.Fatal("write points", err)
}
dentry := &DeleteRangeWALEntry{
Keys: []string{"foo"},
Min: 2,
Max: 3,
}
if err := w.Write(mustMarshalEntry(dentry)); err != nil {
t.Fatal("write points", err)
}
// Load the cache using the segment.
cache := NewCache(1024, "")
loader := NewCacheLoader([]string{f.Name()})
if err := loader.Load(cache); err != nil {
t.Fatalf("failed to load cache: %s", err.Error())
}
// Check the cache.
if values := cache.Values("foo"); !reflect.DeepEqual(values, Values{p1}) {
t.Fatalf("cache key foo not as expected, got %v, exp %v", values, Values{p1})
}
// Reload the cache using the segment.
cache = NewCache(1024, "")
loader = NewCacheLoader([]string{f.Name()})
if err := loader.Load(cache); err != nil {
t.Fatalf("failed to load cache: %s", err.Error())
}
// Check the cache.
if values := cache.Values("foo"); !reflect.DeepEqual(values, Values{p1}) {
t.Fatalf("cache key foo not as expected, got %v, exp %v", values, Values{p1})
}
}
func mustTempDir() string {
dir, err := ioutil.TempDir("", "tsm1-test")
if err != nil {
panic(fmt.Sprintf("failed to create temp dir: %v", err))
}
return dir
}
func mustTempFile(dir string) *os.File {
f, err := ioutil.TempFile(dir, "tsm1test")
if err != nil {
panic(fmt.Sprintf("failed to create temp file: %v", err))
}
return f
}
func mustMarshalEntry(entry WALEntry) (WalEntryType, []byte) {
bytes := make([]byte, 1024<<2)
b, err := entry.Encode(bytes)
if err != nil {
panic(fmt.Sprintf("error encoding: %v", err))
}
return entry.Type(), snappy.Encode(b, b)
}
2016-02-02 18:12:16 +00:00
func BenchmarkCacheFloatEntries(b *testing.B) {
for i := 0; i < b.N; i++ {
2016-02-19 17:42:20 +00:00
cache := NewCache(10000, "")
2016-02-02 18:12:16 +00:00
for j := 0; j < 10000; j++ {
2016-02-23 03:47:17 +00:00
v := NewValue(1, float64(j))
2016-02-02 18:12:16 +00:00
cache.Write("test", []Value{v})
}
}
}