Conver all keys from string to []byte in TSM engine
This switches all the interfaces that take string series key to take a []byte. This eliminates many small allocations where we convert between to two repeatedly. Eventually, this change should propogate futher up the stack.pull/8645/head
parent
8009da0187
commit
778000435a
|
|
@ -110,7 +110,7 @@ func (cmd *Command) dump() error {
|
|||
var pos int
|
||||
for i := 0; i < keyCount; i++ {
|
||||
key, _ := r.KeyAt(i)
|
||||
for _, e := range r.Entries(string(key)) {
|
||||
for _, e := range r.Entries(key) {
|
||||
pos++
|
||||
split := strings.Split(string(key), "#!~#")
|
||||
|
||||
|
|
@ -146,7 +146,7 @@ func (cmd *Command) dump() error {
|
|||
// Start at the beginning and read every block
|
||||
for j := 0; j < keyCount; j++ {
|
||||
key, _ := r.KeyAt(j)
|
||||
for _, e := range r.Entries(string(key)) {
|
||||
for _, e := range r.Entries(key) {
|
||||
|
||||
f.Seek(int64(e.Offset), 0)
|
||||
f.Read(b[:4])
|
||||
|
|
|
|||
|
|
@ -278,7 +278,7 @@ func (cmd *Command) exportTSMFile(tsmFilePath string, w io.Writer) error {
|
|||
|
||||
for i := 0; i < r.KeyCount(); i++ {
|
||||
key, _ := r.KeyAt(i)
|
||||
values, err := r.ReadAll(string(key))
|
||||
values, err := r.ReadAll(key)
|
||||
if err != nil {
|
||||
fmt.Fprintf(cmd.Stderr, "unable to read key %q in %s, skipping: %s\n", string(key), tsmFilePath, err.Error())
|
||||
continue
|
||||
|
|
|
|||
|
|
@ -323,7 +323,7 @@ func writeCorpusToTSMFile(c corpus) *os.File {
|
|||
}
|
||||
sort.Strings(keys)
|
||||
for _, k := range keys {
|
||||
if err := w.Write(k, c[k]); err != nil {
|
||||
if err := w.Write([]byte(k), c[k]); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -60,7 +60,7 @@ func (c *Converter) Process(iter KeyIterator) error {
|
|||
}
|
||||
keyCount = map[string]int{}
|
||||
}
|
||||
if err := w.Write(k, v); err != nil {
|
||||
if err := w.Write([]byte(k), v); err != nil {
|
||||
return err
|
||||
}
|
||||
keyCount[k]++
|
||||
|
|
|
|||
|
|
@ -168,13 +168,13 @@ const (
|
|||
|
||||
// storer is the interface that descibes a cache's store.
|
||||
type storer interface {
|
||||
entry(key string) (*entry, bool) // Get an entry by its key.
|
||||
write(key string, values Values) error // Write an entry to the store.
|
||||
add(key string, entry *entry) // Add a new entry to the store.
|
||||
remove(key string) // Remove an entry from the store.
|
||||
keys(sorted bool) []string // Return an optionally sorted slice of entry keys.
|
||||
apply(f func(string, *entry) error) error // Apply f to all entries in the store in parallel.
|
||||
applySerial(f func(string, *entry) error) error // Apply f to all entries in serial.
|
||||
entry(key []byte) (*entry, bool) // Get an entry by its key.
|
||||
write(key []byte, values Values) error // Write an entry to the store.
|
||||
add(key []byte, entry *entry) // Add a new entry to the store.
|
||||
remove(key []byte) // Remove an entry from the store.
|
||||
keys(sorted bool) [][]byte // Return an optionally sorted slice of entry keys.
|
||||
apply(f func([]byte, *entry) error) error // Apply f to all entries in the store in parallel.
|
||||
applySerial(f func([]byte, *entry) error) error // Apply f to all entries in serial.
|
||||
reset() // Reset the store to an initial unused state.
|
||||
}
|
||||
|
||||
|
|
@ -255,7 +255,7 @@ func (c *Cache) Statistics(tags map[string]string) []models.Statistic {
|
|||
|
||||
// Write writes the set of values for the key to the cache. This function is goroutine-safe.
|
||||
// It returns an error if the cache will exceed its max size by adding the new values.
|
||||
func (c *Cache) Write(key string, values []Value) error {
|
||||
func (c *Cache) Write(key []byte, values []Value) error {
|
||||
addedSize := uint64(Values(values).Size())
|
||||
|
||||
// Enough room in the cache?
|
||||
|
|
@ -307,7 +307,7 @@ func (c *Cache) WriteMulti(values map[string][]Value) error {
|
|||
// We'll optimistially set size here, and then decrement it for write errors.
|
||||
c.increaseSize(addedSize)
|
||||
for k, v := range values {
|
||||
if err := store.write(k, v); err != nil {
|
||||
if err := store.write([]byte(k), v); err != nil {
|
||||
// The write failed, hold onto the error and adjust the size delta.
|
||||
werr = err
|
||||
addedSize -= uint64(Values(v).Size())
|
||||
|
|
@ -388,7 +388,7 @@ func (c *Cache) Deduplicate() {
|
|||
|
||||
// Apply a function that simply calls deduplicate on each entry in the ring.
|
||||
// apply cannot return an error in this invocation.
|
||||
_ = store.apply(func(_ string, e *entry) error { e.deduplicate(); return nil })
|
||||
_ = store.apply(func(_ []byte, e *entry) error { e.deduplicate(); return nil })
|
||||
}
|
||||
|
||||
// ClearSnapshot removes the snapshot cache from the list of flushing caches and
|
||||
|
|
@ -436,7 +436,7 @@ func (c *Cache) MaxSize() uint64 {
|
|||
}
|
||||
|
||||
// Keys returns a sorted slice of all keys under management by the cache.
|
||||
func (c *Cache) Keys() []string {
|
||||
func (c *Cache) Keys() [][]byte {
|
||||
c.mu.RLock()
|
||||
store := c.store
|
||||
c.mu.RUnlock()
|
||||
|
|
@ -445,7 +445,7 @@ func (c *Cache) Keys() []string {
|
|||
|
||||
// unsortedKeys returns a slice of all keys under management by the cache. The
|
||||
// keys are not sorted.
|
||||
func (c *Cache) unsortedKeys() []string {
|
||||
func (c *Cache) unsortedKeys() [][]byte {
|
||||
c.mu.RLock()
|
||||
store := c.store
|
||||
c.mu.RUnlock()
|
||||
|
|
@ -453,7 +453,7 @@ func (c *Cache) unsortedKeys() []string {
|
|||
}
|
||||
|
||||
// Values returns a copy of all values, deduped and sorted, for the given key.
|
||||
func (c *Cache) Values(key string) Values {
|
||||
func (c *Cache) Values(key []byte) Values {
|
||||
var snapshotEntries *entry
|
||||
|
||||
c.mu.RLock()
|
||||
|
|
@ -510,7 +510,7 @@ func (c *Cache) Values(key string) Values {
|
|||
}
|
||||
|
||||
// Delete removes all values for the given keys from the cache.
|
||||
func (c *Cache) Delete(keys []string) {
|
||||
func (c *Cache) Delete(keys [][]byte) {
|
||||
c.DeleteRange(keys, math.MinInt64, math.MaxInt64)
|
||||
}
|
||||
|
||||
|
|
@ -518,7 +518,7 @@ func (c *Cache) Delete(keys []string) {
|
|||
// with timestamps between between min and max from the cache.
|
||||
//
|
||||
// TODO(edd): Lock usage could possibly be optimised if necessary.
|
||||
func (c *Cache) DeleteRange(keys []string, min, max int64) {
|
||||
func (c *Cache) DeleteRange(keys [][]byte, min, max int64) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
|
|
@ -558,7 +558,7 @@ func (c *Cache) SetMaxSize(size uint64) {
|
|||
// values returns the values for the key. It assumes the data is already sorted.
|
||||
// It doesn't lock the cache but it does read-lock the entry if there is one for the key.
|
||||
// values should only be used in compact.go in the CacheKeyIterator.
|
||||
func (c *Cache) values(key string) Values {
|
||||
func (c *Cache) values(key []byte) Values {
|
||||
e, _ := c.store.entry(key)
|
||||
if e == nil {
|
||||
return nil
|
||||
|
|
@ -572,7 +572,7 @@ func (c *Cache) values(key string) Values {
|
|||
// ApplyEntryFn applies the function f to each entry in the Cache.
|
||||
// ApplyEntryFn calls f on each entry in turn, within the same goroutine.
|
||||
// It is safe for use by multiple goroutines.
|
||||
func (c *Cache) ApplyEntryFn(f func(key string, entry *entry) error) error {
|
||||
func (c *Cache) ApplyEntryFn(f func(key []byte, entry *entry) error) error {
|
||||
c.mu.RLock()
|
||||
store := c.store
|
||||
c.mu.RUnlock()
|
||||
|
|
|
|||
|
|
@ -12,7 +12,7 @@ import (
|
|||
func TestCacheCheckConcurrentReadsAreSafe(t *testing.T) {
|
||||
values := make(tsm1.Values, 1000)
|
||||
timestamps := make([]int64, len(values))
|
||||
series := make([]string, 100)
|
||||
series := make([][]byte, 100)
|
||||
for i := range timestamps {
|
||||
timestamps[i] = int64(rand.Int63n(int64(len(values))))
|
||||
}
|
||||
|
|
@ -22,7 +22,7 @@ func TestCacheCheckConcurrentReadsAreSafe(t *testing.T) {
|
|||
}
|
||||
|
||||
for i := range series {
|
||||
series[i] = fmt.Sprintf("series%d", i)
|
||||
series[i] = []byte(fmt.Sprintf("series%d", i))
|
||||
}
|
||||
|
||||
wg := sync.WaitGroup{}
|
||||
|
|
@ -34,17 +34,17 @@ func TestCacheCheckConcurrentReadsAreSafe(t *testing.T) {
|
|||
c.Write(s, tsm1.Values{v})
|
||||
}
|
||||
wg.Add(3)
|
||||
go func(s string) {
|
||||
go func(s []byte) {
|
||||
defer wg.Done()
|
||||
<-ch
|
||||
c.Values(s)
|
||||
}(s)
|
||||
go func(s string) {
|
||||
go func(s []byte) {
|
||||
defer wg.Done()
|
||||
<-ch
|
||||
c.Values(s)
|
||||
}(s)
|
||||
go func(s string) {
|
||||
go func(s []byte) {
|
||||
defer wg.Done()
|
||||
<-ch
|
||||
c.Values(s)
|
||||
|
|
@ -57,7 +57,7 @@ func TestCacheCheckConcurrentReadsAreSafe(t *testing.T) {
|
|||
func TestCacheRace(t *testing.T) {
|
||||
values := make(tsm1.Values, 1000)
|
||||
timestamps := make([]int64, len(values))
|
||||
series := make([]string, 100)
|
||||
series := make([][]byte, 100)
|
||||
for i := range timestamps {
|
||||
timestamps[i] = int64(rand.Int63n(int64(len(values))))
|
||||
}
|
||||
|
|
@ -67,7 +67,7 @@ func TestCacheRace(t *testing.T) {
|
|||
}
|
||||
|
||||
for i := range series {
|
||||
series[i] = fmt.Sprintf("series%d", i)
|
||||
series[i] = []byte(fmt.Sprintf("series%d", i))
|
||||
}
|
||||
|
||||
wg := sync.WaitGroup{}
|
||||
|
|
@ -79,7 +79,7 @@ func TestCacheRace(t *testing.T) {
|
|||
c.Write(s, tsm1.Values{v})
|
||||
}
|
||||
wg.Add(1)
|
||||
go func(s string) {
|
||||
go func(s []byte) {
|
||||
defer wg.Done()
|
||||
<-ch
|
||||
c.Values(s)
|
||||
|
|
@ -122,7 +122,7 @@ func TestCacheRace(t *testing.T) {
|
|||
func TestCacheRace2Compacters(t *testing.T) {
|
||||
values := make(tsm1.Values, 1000)
|
||||
timestamps := make([]int64, len(values))
|
||||
series := make([]string, 100)
|
||||
series := make([][]byte, 100)
|
||||
for i := range timestamps {
|
||||
timestamps[i] = int64(rand.Int63n(int64(len(values))))
|
||||
}
|
||||
|
|
@ -132,7 +132,7 @@ func TestCacheRace2Compacters(t *testing.T) {
|
|||
}
|
||||
|
||||
for i := range series {
|
||||
series[i] = fmt.Sprintf("series%d", i)
|
||||
series[i] = []byte(fmt.Sprintf("series%d", i))
|
||||
}
|
||||
|
||||
wg := sync.WaitGroup{}
|
||||
|
|
@ -144,7 +144,7 @@ func TestCacheRace2Compacters(t *testing.T) {
|
|||
c.Write(s, tsm1.Values{v})
|
||||
}
|
||||
wg.Add(1)
|
||||
go func(s string) {
|
||||
go func(s []byte) {
|
||||
defer wg.Done()
|
||||
<-ch
|
||||
c.Values(s)
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
package tsm1
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
|
|
@ -43,17 +44,17 @@ func TestCache_CacheWrite(t *testing.T) {
|
|||
|
||||
c := NewCache(3*valuesSize, "")
|
||||
|
||||
if err := c.Write("foo", values); err != nil {
|
||||
if err := c.Write([]byte("foo"), values); err != nil {
|
||||
t.Fatalf("failed to write key foo to cache: %s", err.Error())
|
||||
}
|
||||
if err := c.Write("bar", values); err != nil {
|
||||
if err := c.Write([]byte("bar"), values); err != nil {
|
||||
t.Fatalf("failed to write key foo to cache: %s", err.Error())
|
||||
}
|
||||
if n := c.Size(); n != 2*valuesSize {
|
||||
t.Fatalf("cache size incorrect after 2 writes, exp %d, got %d", 2*valuesSize, n)
|
||||
}
|
||||
|
||||
if exp, keys := []string{"bar", "foo"}, c.Keys(); !reflect.DeepEqual(keys, exp) {
|
||||
if exp, keys := [][]byte{[]byte("bar"), []byte("foo")}, c.Keys(); !reflect.DeepEqual(keys, exp) {
|
||||
t.Fatalf("cache keys incorrect after 2 writes, exp %v, got %v", exp, keys)
|
||||
}
|
||||
}
|
||||
|
|
@ -66,11 +67,11 @@ func TestCache_CacheWrite_TypeConflict(t *testing.T) {
|
|||
|
||||
c := NewCache(uint64(2*valuesSize), "")
|
||||
|
||||
if err := c.Write("foo", values[:1]); err != nil {
|
||||
if err := c.Write([]byte("foo"), values[:1]); err != nil {
|
||||
t.Fatalf("failed to write key foo to cache: %s", err.Error())
|
||||
}
|
||||
|
||||
if err := c.Write("foo", values[1:]); err == nil {
|
||||
if err := c.Write([]byte("foo"), values[1:]); err == nil {
|
||||
t.Fatalf("expected field type conflict")
|
||||
}
|
||||
|
||||
|
|
@ -95,7 +96,7 @@ func TestCache_CacheWriteMulti(t *testing.T) {
|
|||
t.Fatalf("cache size incorrect after 2 writes, exp %d, got %d", 2*valuesSize, n)
|
||||
}
|
||||
|
||||
if exp, keys := []string{"bar", "foo"}, c.Keys(); !reflect.DeepEqual(keys, exp) {
|
||||
if exp, keys := [][]byte{[]byte("bar"), []byte("foo")}, c.Keys(); !reflect.DeepEqual(keys, exp) {
|
||||
t.Fatalf("cache keys incorrect after 2 writes, exp %v, got %v", exp, keys)
|
||||
}
|
||||
}
|
||||
|
|
@ -118,8 +119,8 @@ func TestCache_WriteMulti_Stats(t *testing.T) {
|
|||
c = NewCache(50, "")
|
||||
c.store = ms
|
||||
|
||||
ms.writef = func(key string, v Values) error {
|
||||
if key == "foo" {
|
||||
ms.writef = func(key []byte, v Values) error {
|
||||
if bytes.Equal(key, []byte("foo")) {
|
||||
return errors.New("write failed")
|
||||
}
|
||||
return nil
|
||||
|
|
@ -160,7 +161,7 @@ func TestCache_CacheWriteMulti_TypeConflict(t *testing.T) {
|
|||
t.Fatalf("cache size incorrect after 2 writes, exp %d, got %d", exp, got)
|
||||
}
|
||||
|
||||
if exp, keys := []string{"foo"}, c.Keys(); !reflect.DeepEqual(keys, exp) {
|
||||
if exp, keys := [][]byte{[]byte("foo")}, c.Keys(); !reflect.DeepEqual(keys, exp) {
|
||||
t.Fatalf("cache keys incorrect after 2 writes, exp %v, got %v", exp, keys)
|
||||
}
|
||||
}
|
||||
|
|
@ -181,13 +182,13 @@ func TestCache_Cache_DeleteRange(t *testing.T) {
|
|||
t.Fatalf("cache size incorrect after 2 writes, exp %d, got %d", 2*valuesSize, n)
|
||||
}
|
||||
|
||||
if exp, keys := []string{"bar", "foo"}, c.Keys(); !reflect.DeepEqual(keys, exp) {
|
||||
if exp, keys := [][]byte{[]byte("bar"), []byte("foo")}, c.Keys(); !reflect.DeepEqual(keys, exp) {
|
||||
t.Fatalf("cache keys incorrect after 2 writes, exp %v, got %v", exp, keys)
|
||||
}
|
||||
|
||||
c.DeleteRange([]string{"bar"}, 2, math.MaxInt64)
|
||||
c.DeleteRange([][]byte{[]byte("bar")}, 2, math.MaxInt64)
|
||||
|
||||
if exp, keys := []string{"bar", "foo"}, c.Keys(); !reflect.DeepEqual(keys, exp) {
|
||||
if exp, keys := [][]byte{[]byte("bar"), []byte("foo")}, c.Keys(); !reflect.DeepEqual(keys, exp) {
|
||||
t.Fatalf("cache keys incorrect after 2 writes, exp %v, got %v", exp, keys)
|
||||
}
|
||||
|
||||
|
|
@ -195,11 +196,11 @@ func TestCache_Cache_DeleteRange(t *testing.T) {
|
|||
t.Fatalf("cache size incorrect after 2 writes, exp %d, got %d", exp, got)
|
||||
}
|
||||
|
||||
if got, exp := len(c.Values("bar")), 1; got != exp {
|
||||
if got, exp := len(c.Values([]byte("bar"))), 1; got != exp {
|
||||
t.Fatalf("cache values mismatch: got %v, exp %v", got, exp)
|
||||
}
|
||||
|
||||
if got, exp := len(c.Values("foo")), 3; got != exp {
|
||||
if got, exp := len(c.Values([]byte("foo"))), 3; got != exp {
|
||||
t.Fatalf("cache values mismatch: got %v, exp %v", got, exp)
|
||||
}
|
||||
}
|
||||
|
|
@ -220,11 +221,11 @@ func TestCache_DeleteRange_NoValues(t *testing.T) {
|
|||
t.Fatalf("cache size incorrect after 2 writes, exp %d, got %d", 2*valuesSize, n)
|
||||
}
|
||||
|
||||
if exp, keys := []string{"foo"}, c.Keys(); !reflect.DeepEqual(keys, exp) {
|
||||
if exp, keys := [][]byte{[]byte("foo")}, c.Keys(); !reflect.DeepEqual(keys, exp) {
|
||||
t.Fatalf("cache keys incorrect after 2 writes, exp %v, got %v", exp, keys)
|
||||
}
|
||||
|
||||
c.DeleteRange([]string{"foo"}, math.MinInt64, math.MaxInt64)
|
||||
c.DeleteRange([][]byte{[]byte("foo")}, math.MinInt64, math.MaxInt64)
|
||||
|
||||
if exp, keys := 0, len(c.Keys()); !reflect.DeepEqual(keys, exp) {
|
||||
t.Fatalf("cache keys incorrect after 2 writes, exp %v, got %v", exp, keys)
|
||||
|
|
@ -234,7 +235,7 @@ func TestCache_DeleteRange_NoValues(t *testing.T) {
|
|||
t.Fatalf("cache size incorrect after 2 writes, exp %d, got %d", exp, got)
|
||||
}
|
||||
|
||||
if got, exp := len(c.Values("foo")), 0; got != exp {
|
||||
if got, exp := len(c.Values([]byte("foo"))), 0; got != exp {
|
||||
t.Fatalf("cache values mismatch: got %v, exp %v", got, exp)
|
||||
}
|
||||
}
|
||||
|
|
@ -255,13 +256,13 @@ func TestCache_Cache_Delete(t *testing.T) {
|
|||
t.Fatalf("cache size incorrect after 2 writes, exp %d, got %d", 2*valuesSize, n)
|
||||
}
|
||||
|
||||
if exp, keys := []string{"bar", "foo"}, c.Keys(); !reflect.DeepEqual(keys, exp) {
|
||||
if exp, keys := [][]byte{[]byte("bar"), []byte("foo")}, c.Keys(); !reflect.DeepEqual(keys, exp) {
|
||||
t.Fatalf("cache keys incorrect after 2 writes, exp %v, got %v", exp, keys)
|
||||
}
|
||||
|
||||
c.Delete([]string{"bar"})
|
||||
c.Delete([][]byte{[]byte("bar")})
|
||||
|
||||
if exp, keys := []string{"foo"}, c.Keys(); !reflect.DeepEqual(keys, exp) {
|
||||
if exp, keys := [][]byte{[]byte("foo")}, c.Keys(); !reflect.DeepEqual(keys, exp) {
|
||||
t.Fatalf("cache keys incorrect after 2 writes, exp %v, got %v", exp, keys)
|
||||
}
|
||||
|
||||
|
|
@ -269,11 +270,11 @@ func TestCache_Cache_Delete(t *testing.T) {
|
|||
t.Fatalf("cache size incorrect after 2 writes, exp %d, got %d", exp, got)
|
||||
}
|
||||
|
||||
if got, exp := len(c.Values("bar")), 0; got != exp {
|
||||
if got, exp := len(c.Values([]byte("bar"))), 0; got != exp {
|
||||
t.Fatalf("cache values mismatch: got %v, exp %v", got, exp)
|
||||
}
|
||||
|
||||
if got, exp := len(c.Values("foo")), 3; got != exp {
|
||||
if got, exp := len(c.Values([]byte("foo"))), 3; got != exp {
|
||||
t.Fatalf("cache values mismatch: got %v, exp %v", got, exp)
|
||||
}
|
||||
}
|
||||
|
|
@ -281,7 +282,7 @@ func TestCache_Cache_Delete(t *testing.T) {
|
|||
func TestCache_Cache_Delete_NonExistent(t *testing.T) {
|
||||
c := NewCache(1024, "")
|
||||
|
||||
c.Delete([]string{"bar"})
|
||||
c.Delete([][]byte{[]byte("bar")})
|
||||
|
||||
if got, exp := c.Size(), uint64(0); exp != got {
|
||||
t.Fatalf("cache size incorrect exp %d, got %d", exp, got)
|
||||
|
|
@ -310,15 +311,15 @@ func TestCache_CacheWriteMulti_Duplicates(t *testing.T) {
|
|||
t.Fatalf("failed to write key foo to cache: %s", err.Error())
|
||||
}
|
||||
|
||||
if exp, keys := []string{"foo"}, c.Keys(); !reflect.DeepEqual(keys, exp) {
|
||||
if exp, keys := [][]byte{[]byte("foo")}, c.Keys(); !reflect.DeepEqual(keys, exp) {
|
||||
t.Fatalf("cache keys incorrect after 2 writes, exp %v, got %v", exp, keys)
|
||||
}
|
||||
|
||||
expAscValues := Values{v0, v1, v3, v5}
|
||||
if exp, got := len(expAscValues), len(c.Values("foo")); exp != got {
|
||||
if exp, got := len(expAscValues), len(c.Values([]byte("foo"))); exp != got {
|
||||
t.Fatalf("value count mismatch: exp: %v, got %v", exp, got)
|
||||
}
|
||||
if deduped := c.Values("foo"); !reflect.DeepEqual(expAscValues, deduped) {
|
||||
if deduped := c.Values([]byte("foo")); !reflect.DeepEqual(expAscValues, deduped) {
|
||||
t.Fatalf("deduped ascending values for foo incorrect, exp: %v, got %v", expAscValues, deduped)
|
||||
}
|
||||
}
|
||||
|
|
@ -331,19 +332,19 @@ func TestCache_CacheValues(t *testing.T) {
|
|||
v4 := NewValue(4, 4.0)
|
||||
|
||||
c := NewCache(512, "")
|
||||
if deduped := c.Values("no such key"); deduped != nil {
|
||||
if deduped := c.Values([]byte("no such key")); deduped != nil {
|
||||
t.Fatalf("Values returned for no such key")
|
||||
}
|
||||
|
||||
if err := c.Write("foo", Values{v0, v1, v2, v3}); err != nil {
|
||||
if err := c.Write([]byte("foo"), Values{v0, v1, v2, v3}); err != nil {
|
||||
t.Fatalf("failed to write 3 values, key foo to cache: %s", err.Error())
|
||||
}
|
||||
if err := c.Write("foo", Values{v4}); err != nil {
|
||||
if err := c.Write([]byte("foo"), Values{v4}); err != nil {
|
||||
t.Fatalf("failed to write 1 value, key foo to cache: %s", err.Error())
|
||||
}
|
||||
|
||||
expAscValues := Values{v3, v1, v2, v4}
|
||||
if deduped := c.Values("foo"); !reflect.DeepEqual(expAscValues, deduped) {
|
||||
if deduped := c.Values([]byte("foo")); !reflect.DeepEqual(expAscValues, deduped) {
|
||||
t.Fatalf("deduped ascending values for foo incorrect, exp: %v, got %v", expAscValues, deduped)
|
||||
}
|
||||
}
|
||||
|
|
@ -359,7 +360,7 @@ func TestCache_CacheSnapshot(t *testing.T) {
|
|||
v7 := NewValue(2, 5.0)
|
||||
|
||||
c := NewCache(512, "")
|
||||
if err := c.Write("foo", Values{v0, v1, v2, v3}); err != nil {
|
||||
if err := c.Write([]byte("foo"), Values{v0, v1, v2, v3}); err != nil {
|
||||
t.Fatalf("failed to write 3 values, key foo to cache: %s", err.Error())
|
||||
}
|
||||
|
||||
|
|
@ -370,30 +371,30 @@ func TestCache_CacheSnapshot(t *testing.T) {
|
|||
}
|
||||
|
||||
expValues := Values{v0, v1, v2, v3}
|
||||
if deduped := snapshot.values("foo"); !reflect.DeepEqual(expValues, deduped) {
|
||||
if deduped := snapshot.values([]byte("foo")); !reflect.DeepEqual(expValues, deduped) {
|
||||
t.Fatalf("snapshotted values for foo incorrect, exp: %v, got %v", expValues, deduped)
|
||||
}
|
||||
|
||||
// Ensure cache is still as expected.
|
||||
if deduped := c.Values("foo"); !reflect.DeepEqual(expValues, deduped) {
|
||||
if deduped := c.Values([]byte("foo")); !reflect.DeepEqual(expValues, deduped) {
|
||||
t.Fatalf("post-snapshot values for foo incorrect, exp: %v, got %v", expValues, deduped)
|
||||
}
|
||||
|
||||
// Write a new value to the cache.
|
||||
if err := c.Write("foo", Values{v4}); err != nil {
|
||||
if err := c.Write([]byte("foo"), Values{v4}); err != nil {
|
||||
t.Fatalf("failed to write post-snap value, key foo to cache: %s", err.Error())
|
||||
}
|
||||
expValues = Values{v0, v1, v2, v3, v4}
|
||||
if deduped := c.Values("foo"); !reflect.DeepEqual(expValues, deduped) {
|
||||
if deduped := c.Values([]byte("foo")); !reflect.DeepEqual(expValues, deduped) {
|
||||
t.Fatalf("post-snapshot write values for foo incorrect, exp: %v, got %v", expValues, deduped)
|
||||
}
|
||||
|
||||
// Write a new, out-of-order, value to the cache.
|
||||
if err := c.Write("foo", Values{v5}); err != nil {
|
||||
if err := c.Write([]byte("foo"), Values{v5}); err != nil {
|
||||
t.Fatalf("failed to write post-snap value, key foo to cache: %s", err.Error())
|
||||
}
|
||||
expValues = Values{v5, v0, v1, v2, v3, v4}
|
||||
if deduped := c.Values("foo"); !reflect.DeepEqual(expValues, deduped) {
|
||||
if deduped := c.Values([]byte("foo")); !reflect.DeepEqual(expValues, deduped) {
|
||||
t.Fatalf("post-snapshot out-of-order write values for foo incorrect, exp: %v, got %v", expValues, deduped)
|
||||
}
|
||||
|
||||
|
|
@ -401,7 +402,7 @@ func TestCache_CacheSnapshot(t *testing.T) {
|
|||
c.ClearSnapshot(true)
|
||||
|
||||
expValues = Values{v5, v4}
|
||||
if deduped := c.Values("foo"); !reflect.DeepEqual(expValues, deduped) {
|
||||
if deduped := c.Values([]byte("foo")); !reflect.DeepEqual(expValues, deduped) {
|
||||
t.Fatalf("post-clear values for foo incorrect, exp: %v, got %v", expValues, deduped)
|
||||
}
|
||||
|
||||
|
|
@ -411,7 +412,7 @@ func TestCache_CacheSnapshot(t *testing.T) {
|
|||
t.Fatalf("failed to snapshot cache: %v", err)
|
||||
}
|
||||
|
||||
if err := c.Write("foo", Values{v4, v5}); err != nil {
|
||||
if err := c.Write([]byte("foo"), Values{v4, v5}); err != nil {
|
||||
t.Fatalf("failed to write post-snap value, key foo to cache: %s", err.Error())
|
||||
}
|
||||
|
||||
|
|
@ -422,12 +423,12 @@ func TestCache_CacheSnapshot(t *testing.T) {
|
|||
t.Fatalf("failed to snapshot cache: %v", err)
|
||||
}
|
||||
|
||||
if err := c.Write("foo", Values{v6, v7}); err != nil {
|
||||
if err := c.Write([]byte("foo"), Values{v6, v7}); err != nil {
|
||||
t.Fatalf("failed to write post-snap value, key foo to cache: %s", err.Error())
|
||||
}
|
||||
|
||||
expValues = Values{v5, v7, v4, v6}
|
||||
if deduped := c.Values("foo"); !reflect.DeepEqual(expValues, deduped) {
|
||||
if deduped := c.Values([]byte("foo")); !reflect.DeepEqual(expValues, deduped) {
|
||||
t.Fatalf("post-snapshot out-of-order write values for foo incorrect, exp: %v, got %v", expValues, deduped)
|
||||
}
|
||||
}
|
||||
|
|
@ -466,18 +467,18 @@ func TestCache_CacheEmptySnapshot(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Fatalf("failed to snapshot cache: %v", err)
|
||||
}
|
||||
if deduped := snapshot.values("foo"); !reflect.DeepEqual(Values(nil), deduped) {
|
||||
if deduped := snapshot.values([]byte("foo")); !reflect.DeepEqual(Values(nil), deduped) {
|
||||
t.Fatalf("snapshotted values for foo incorrect, exp: %v, got %v", nil, deduped)
|
||||
}
|
||||
|
||||
// Ensure cache is still as expected.
|
||||
if deduped := c.Values("foo"); !reflect.DeepEqual(Values(nil), deduped) {
|
||||
if deduped := c.Values([]byte("foo")); !reflect.DeepEqual(Values(nil), deduped) {
|
||||
t.Fatalf("post-snapshotted values for foo incorrect, exp: %v, got %v", Values(nil), deduped)
|
||||
}
|
||||
|
||||
// Clear snapshot.
|
||||
c.ClearSnapshot(true)
|
||||
if deduped := c.Values("foo"); !reflect.DeepEqual(Values(nil), deduped) {
|
||||
if deduped := c.Values([]byte("foo")); !reflect.DeepEqual(Values(nil), deduped) {
|
||||
t.Fatalf("post-snapshot-clear values for foo incorrect, exp: %v, got %v", Values(nil), deduped)
|
||||
}
|
||||
}
|
||||
|
|
@ -488,13 +489,13 @@ func TestCache_CacheWriteMemoryExceeded(t *testing.T) {
|
|||
|
||||
c := NewCache(uint64(v1.Size()), "")
|
||||
|
||||
if err := c.Write("foo", Values{v0}); err != nil {
|
||||
if err := c.Write([]byte("foo"), Values{v0}); err != nil {
|
||||
t.Fatalf("failed to write key foo to cache: %s", err.Error())
|
||||
}
|
||||
if exp, keys := []string{"foo"}, c.Keys(); !reflect.DeepEqual(keys, exp) {
|
||||
if exp, keys := [][]byte{[]byte("foo")}, c.Keys(); !reflect.DeepEqual(keys, exp) {
|
||||
t.Fatalf("cache keys incorrect after writes, exp %v, got %v", exp, keys)
|
||||
}
|
||||
if err := c.Write("bar", Values{v1}); err == nil || !strings.Contains(err.Error(), "cache-max-memory-size") {
|
||||
if err := c.Write([]byte("bar"), Values{v1}); err == nil || !strings.Contains(err.Error(), "cache-max-memory-size") {
|
||||
t.Fatalf("wrong error writing key bar to cache: %v", err)
|
||||
}
|
||||
|
||||
|
|
@ -503,17 +504,17 @@ func TestCache_CacheWriteMemoryExceeded(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Fatalf("failed to snapshot cache: %v", err)
|
||||
}
|
||||
if err := c.Write("bar", Values{v1}); err == nil || !strings.Contains(err.Error(), "cache-max-memory-size") {
|
||||
if err := c.Write([]byte("bar"), Values{v1}); err == nil || !strings.Contains(err.Error(), "cache-max-memory-size") {
|
||||
t.Fatalf("wrong error writing key bar to cache: %v", err)
|
||||
}
|
||||
|
||||
// Clear the snapshot and the write should now succeed.
|
||||
c.ClearSnapshot(true)
|
||||
if err := c.Write("bar", Values{v1}); err != nil {
|
||||
if err := c.Write([]byte("bar"), Values{v1}); err != nil {
|
||||
t.Fatalf("failed to write key foo to cache: %s", err.Error())
|
||||
}
|
||||
expAscValues := Values{v1}
|
||||
if deduped := c.Values("bar"); !reflect.DeepEqual(expAscValues, deduped) {
|
||||
if deduped := c.Values([]byte("bar")); !reflect.DeepEqual(expAscValues, deduped) {
|
||||
t.Fatalf("deduped ascending values for bar incorrect, exp: %v, got %v", expAscValues, deduped)
|
||||
}
|
||||
}
|
||||
|
|
@ -591,13 +592,13 @@ func TestCacheLoader_LoadSingle(t *testing.T) {
|
|||
}
|
||||
|
||||
// Check the cache.
|
||||
if values := cache.Values("foo"); !reflect.DeepEqual(values, Values{p1}) {
|
||||
if values := cache.Values([]byte("foo")); !reflect.DeepEqual(values, Values{p1}) {
|
||||
t.Fatalf("cache key foo not as expected, got %v, exp %v", values, Values{p1})
|
||||
}
|
||||
if values := cache.Values("bar"); !reflect.DeepEqual(values, Values{p2}) {
|
||||
if values := cache.Values([]byte("bar")); !reflect.DeepEqual(values, Values{p2}) {
|
||||
t.Fatalf("cache key foo not as expected, got %v, exp %v", values, Values{p2})
|
||||
}
|
||||
if values := cache.Values("baz"); !reflect.DeepEqual(values, Values{p3}) {
|
||||
if values := cache.Values([]byte("baz")); !reflect.DeepEqual(values, Values{p3}) {
|
||||
t.Fatalf("cache key foo not as expected, got %v, exp %v", values, Values{p3})
|
||||
}
|
||||
|
||||
|
|
@ -614,13 +615,13 @@ func TestCacheLoader_LoadSingle(t *testing.T) {
|
|||
}
|
||||
|
||||
// Check the cache.
|
||||
if values := cache.Values("foo"); !reflect.DeepEqual(values, Values{p1}) {
|
||||
if values := cache.Values([]byte("foo")); !reflect.DeepEqual(values, Values{p1}) {
|
||||
t.Fatalf("cache key foo not as expected, got %v, exp %v", values, Values{p1})
|
||||
}
|
||||
if values := cache.Values("bar"); !reflect.DeepEqual(values, Values{p2}) {
|
||||
if values := cache.Values([]byte("bar")); !reflect.DeepEqual(values, Values{p2}) {
|
||||
t.Fatalf("cache key bar not as expected, got %v, exp %v", values, Values{p2})
|
||||
}
|
||||
if values := cache.Values("baz"); !reflect.DeepEqual(values, Values{p3}) {
|
||||
if values := cache.Values([]byte("baz")); !reflect.DeepEqual(values, Values{p3}) {
|
||||
t.Fatalf("cache key baz not as expected, got %v, exp %v", values, Values{p3})
|
||||
}
|
||||
}
|
||||
|
|
@ -676,16 +677,16 @@ func TestCacheLoader_LoadDouble(t *testing.T) {
|
|||
}
|
||||
|
||||
// Check the cache.
|
||||
if values := cache.Values("foo"); !reflect.DeepEqual(values, Values{p1}) {
|
||||
if values := cache.Values([]byte("foo")); !reflect.DeepEqual(values, Values{p1}) {
|
||||
t.Fatalf("cache key foo not as expected, got %v, exp %v", values, Values{p1})
|
||||
}
|
||||
if values := cache.Values("bar"); !reflect.DeepEqual(values, Values{p2}) {
|
||||
if values := cache.Values([]byte("bar")); !reflect.DeepEqual(values, Values{p2}) {
|
||||
t.Fatalf("cache key bar not as expected, got %v, exp %v", values, Values{p2})
|
||||
}
|
||||
if values := cache.Values("baz"); !reflect.DeepEqual(values, Values{p3}) {
|
||||
if values := cache.Values([]byte("baz")); !reflect.DeepEqual(values, Values{p3}) {
|
||||
t.Fatalf("cache key baz not as expected, got %v, exp %v", values, Values{p3})
|
||||
}
|
||||
if values := cache.Values("qux"); !reflect.DeepEqual(values, Values{p4}) {
|
||||
if values := cache.Values([]byte("qux")); !reflect.DeepEqual(values, Values{p4}) {
|
||||
t.Fatalf("cache key qux not as expected, got %v, exp %v", values, Values{p4})
|
||||
}
|
||||
}
|
||||
|
|
@ -719,7 +720,7 @@ func TestCacheLoader_LoadDeleted(t *testing.T) {
|
|||
}
|
||||
|
||||
dentry := &DeleteRangeWALEntry{
|
||||
Keys: []string{"foo"},
|
||||
Keys: [][]byte{[]byte("foo")},
|
||||
Min: 2,
|
||||
Max: 3,
|
||||
}
|
||||
|
|
@ -740,7 +741,7 @@ func TestCacheLoader_LoadDeleted(t *testing.T) {
|
|||
}
|
||||
|
||||
// Check the cache.
|
||||
if values := cache.Values("foo"); !reflect.DeepEqual(values, Values{p1}) {
|
||||
if values := cache.Values([]byte("foo")); !reflect.DeepEqual(values, Values{p1}) {
|
||||
t.Fatalf("cache key foo not as expected, got %v, exp %v", values, Values{p1})
|
||||
}
|
||||
|
||||
|
|
@ -752,7 +753,7 @@ func TestCacheLoader_LoadDeleted(t *testing.T) {
|
|||
}
|
||||
|
||||
// Check the cache.
|
||||
if values := cache.Values("foo"); !reflect.DeepEqual(values, Values{p1}) {
|
||||
if values := cache.Values([]byte("foo")); !reflect.DeepEqual(values, Values{p1}) {
|
||||
t.Fatalf("cache key foo not as expected, got %v, exp %v", values, Values{p1})
|
||||
}
|
||||
}
|
||||
|
|
@ -787,24 +788,24 @@ func mustMarshalEntry(entry WALEntry) (WalEntryType, []byte) {
|
|||
// TestStore implements the storer interface and can be used to mock out a
|
||||
// Cache's storer implememation.
|
||||
type TestStore struct {
|
||||
entryf func(key string) (*entry, bool)
|
||||
writef func(key string, values Values) error
|
||||
addf func(key string, entry *entry)
|
||||
removef func(key string)
|
||||
keysf func(sorted bool) []string
|
||||
applyf func(f func(string, *entry) error) error
|
||||
applySerialf func(f func(string, *entry) error) error
|
||||
entryf func(key []byte) (*entry, bool)
|
||||
writef func(key []byte, values Values) error
|
||||
addf func(key []byte, entry *entry)
|
||||
removef func(key []byte)
|
||||
keysf func(sorted bool) [][]byte
|
||||
applyf func(f func([]byte, *entry) error) error
|
||||
applySerialf func(f func([]byte, *entry) error) error
|
||||
resetf func()
|
||||
}
|
||||
|
||||
func NewTestStore() *TestStore { return &TestStore{} }
|
||||
func (s *TestStore) entry(key string) (*entry, bool) { return s.entryf(key) }
|
||||
func (s *TestStore) write(key string, values Values) error { return s.writef(key, values) }
|
||||
func (s *TestStore) add(key string, entry *entry) { s.addf(key, entry) }
|
||||
func (s *TestStore) remove(key string) { s.removef(key) }
|
||||
func (s *TestStore) keys(sorted bool) []string { return s.keysf(sorted) }
|
||||
func (s *TestStore) apply(f func(string, *entry) error) error { return s.applyf(f) }
|
||||
func (s *TestStore) applySerial(f func(string, *entry) error) error { return s.applySerialf(f) }
|
||||
func (s *TestStore) entry(key []byte) (*entry, bool) { return s.entryf(key) }
|
||||
func (s *TestStore) write(key []byte, values Values) error { return s.writef(key, values) }
|
||||
func (s *TestStore) add(key []byte, entry *entry) { s.addf(key, entry) }
|
||||
func (s *TestStore) remove(key []byte) { s.removef(key) }
|
||||
func (s *TestStore) keys(sorted bool) [][]byte { return s.keysf(sorted) }
|
||||
func (s *TestStore) apply(f func([]byte, *entry) error) error { return s.applyf(f) }
|
||||
func (s *TestStore) applySerial(f func([]byte, *entry) error) error { return s.applySerialf(f) }
|
||||
func (s *TestStore) reset() { s.resetf() }
|
||||
|
||||
var fvSize = uint64(NewValue(1, float64(1)).Size())
|
||||
|
|
@ -818,14 +819,14 @@ func BenchmarkCacheFloatEntries(b *testing.B) {
|
|||
b.ResetTimer()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
if err := cache.Write("test", vals[i]); err != nil {
|
||||
if err := cache.Write([]byte("test"), vals[i]); err != nil {
|
||||
b.Fatal("err:", err, "i:", i, "N:", b.N)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type points struct {
|
||||
key string
|
||||
key []byte
|
||||
vals []Value
|
||||
}
|
||||
|
||||
|
|
@ -838,7 +839,7 @@ func BenchmarkCacheParallelFloatEntries(b *testing.B) {
|
|||
for j := 0; j < 10; j++ {
|
||||
v[j] = NewValue(1, float64(i+j))
|
||||
}
|
||||
vals[i] = points{key: fmt.Sprintf("cpu%v", rand.Intn(20)), vals: v}
|
||||
vals[i] = points{key: []byte(fmt.Sprintf("cpu%v", rand.Intn(20))), vals: v}
|
||||
}
|
||||
i := int32(-1)
|
||||
b.ResetTimer()
|
||||
|
|
|
|||
|
|
@ -13,6 +13,7 @@ package tsm1
|
|||
// one-pass writing of a new TSM file.
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"math"
|
||||
"os"
|
||||
|
|
@ -974,7 +975,7 @@ type KeyIterator interface {
|
|||
|
||||
// Read returns the key, time range, and raw data for the next block,
|
||||
// or any error that occurred.
|
||||
Read() (key string, minTime int64, maxTime int64, data []byte, err error)
|
||||
Read() (key []byte, minTime int64, maxTime int64, data []byte, err error)
|
||||
|
||||
// Close closes the iterator.
|
||||
Close() error
|
||||
|
|
@ -1010,7 +1011,7 @@ type tsmKeyIterator struct {
|
|||
|
||||
// key is the current key lowest key across all readers that has not be fully exhausted
|
||||
// of values.
|
||||
key string
|
||||
key []byte
|
||||
typ byte
|
||||
|
||||
iterators []*BlockIterator
|
||||
|
|
@ -1032,7 +1033,7 @@ type tsmKeyIterator struct {
|
|||
}
|
||||
|
||||
type block struct {
|
||||
key string
|
||||
key []byte
|
||||
minTime, maxTime int64
|
||||
typ byte
|
||||
b []byte
|
||||
|
|
@ -1070,10 +1071,11 @@ type blocks []*block
|
|||
func (a blocks) Len() int { return len(a) }
|
||||
|
||||
func (a blocks) Less(i, j int) bool {
|
||||
if a[i].key == a[j].key {
|
||||
cmp := bytes.Compare(a[i].key, a[j].key)
|
||||
if cmp == 0 {
|
||||
return a[i].minTime < a[j].minTime
|
||||
}
|
||||
return a[i].key < a[j].key
|
||||
return cmp < 0
|
||||
}
|
||||
|
||||
func (a blocks) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
|
|
@ -1157,7 +1159,7 @@ func (k *tsmKeyIterator) Next() bool {
|
|||
})
|
||||
|
||||
blockKey := key
|
||||
for iter.PeekNext() == blockKey {
|
||||
for bytes.Equal(iter.PeekNext(), blockKey) {
|
||||
iter.Next()
|
||||
key, minTime, maxTime, typ, _, b, err := iter.Read()
|
||||
if err != nil {
|
||||
|
|
@ -1183,14 +1185,14 @@ func (k *tsmKeyIterator) Next() bool {
|
|||
|
||||
// Each reader could have a different key that it's currently at, need to find
|
||||
// the next smallest one to keep the sort ordering.
|
||||
var minKey string
|
||||
var minKey []byte
|
||||
var minType byte
|
||||
for _, b := range k.buf {
|
||||
// block could be nil if the iterator has been exhausted for that file
|
||||
if len(b) == 0 {
|
||||
continue
|
||||
}
|
||||
if minKey == "" || b[0].key < minKey {
|
||||
if len(minKey) == 0 || bytes.Compare(b[0].key, minKey) < 0 {
|
||||
minKey = b[0].key
|
||||
minType = b[0].typ
|
||||
}
|
||||
|
|
@ -1204,7 +1206,7 @@ func (k *tsmKeyIterator) Next() bool {
|
|||
if len(b) == 0 {
|
||||
continue
|
||||
}
|
||||
if b[0].key == k.key {
|
||||
if bytes.Equal(b[0].key, k.key) {
|
||||
k.blocks = append(k.blocks, b...)
|
||||
k.buf[i] = nil
|
||||
}
|
||||
|
|
@ -1237,16 +1239,16 @@ func (k *tsmKeyIterator) merge() {
|
|||
}
|
||||
}
|
||||
|
||||
func (k *tsmKeyIterator) Read() (string, int64, int64, []byte, error) {
|
||||
func (k *tsmKeyIterator) Read() ([]byte, int64, int64, []byte, error) {
|
||||
// See if compactions were disabled while we were running.
|
||||
select {
|
||||
case <-k.interrupt:
|
||||
return "", 0, 0, nil, errCompactionAborted
|
||||
return nil, 0, 0, nil, errCompactionAborted
|
||||
default:
|
||||
}
|
||||
|
||||
if len(k.merged) == 0 {
|
||||
return "", 0, 0, nil, k.err
|
||||
return nil, 0, 0, nil, k.err
|
||||
}
|
||||
|
||||
block := k.merged[0]
|
||||
|
|
@ -1268,7 +1270,7 @@ func (k *tsmKeyIterator) Close() error {
|
|||
type cacheKeyIterator struct {
|
||||
cache *Cache
|
||||
size int
|
||||
order []string
|
||||
order [][]byte
|
||||
|
||||
i int
|
||||
blocks [][]cacheBlock
|
||||
|
|
@ -1277,7 +1279,7 @@ type cacheKeyIterator struct {
|
|||
}
|
||||
|
||||
type cacheBlock struct {
|
||||
k string
|
||||
k []byte
|
||||
minTime, maxTime int64
|
||||
b []byte
|
||||
err error
|
||||
|
|
@ -1377,11 +1379,11 @@ func (c *cacheKeyIterator) Next() bool {
|
|||
return true
|
||||
}
|
||||
|
||||
func (c *cacheKeyIterator) Read() (string, int64, int64, []byte, error) {
|
||||
func (c *cacheKeyIterator) Read() ([]byte, int64, int64, []byte, error) {
|
||||
// See if snapshot compactions were disabled while we were running.
|
||||
select {
|
||||
case <-c.interrupt:
|
||||
return "", 0, 0, nil, errCompactionAborted
|
||||
return nil, 0, 0, nil, errCompactionAborted
|
||||
default:
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -28,7 +28,7 @@ func TestCompactor_Snapshot(t *testing.T) {
|
|||
|
||||
c := tsm1.NewCache(0, "")
|
||||
for k, v := range points1 {
|
||||
if err := c.Write(k, v); err != nil {
|
||||
if err := c.Write([]byte(k), v); err != nil {
|
||||
t.Fatalf("failed to write key foo to cache: %s", err.Error())
|
||||
}
|
||||
}
|
||||
|
|
@ -73,7 +73,7 @@ func TestCompactor_Snapshot(t *testing.T) {
|
|||
}
|
||||
|
||||
for _, p := range data {
|
||||
values, err := r.ReadAll(p.key)
|
||||
values, err := r.ReadAll([]byte(p.key))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error reading: %v", err)
|
||||
}
|
||||
|
|
@ -176,7 +176,7 @@ func TestCompactor_CompactFull(t *testing.T) {
|
|||
}
|
||||
|
||||
for _, p := range data {
|
||||
values, err := r.ReadAll(p.key)
|
||||
values, err := r.ReadAll([]byte(p.key))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error reading: %v", err)
|
||||
}
|
||||
|
|
@ -246,7 +246,7 @@ func TestCompactor_Compact_OverlappingBlocks(t *testing.T) {
|
|||
}
|
||||
|
||||
for _, p := range data {
|
||||
values, err := r.ReadAll(p.key)
|
||||
values, err := r.ReadAll([]byte(p.key))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error reading: %v", err)
|
||||
}
|
||||
|
|
@ -325,7 +325,7 @@ func TestCompactor_Compact_OverlappingBlocksMultiple(t *testing.T) {
|
|||
}
|
||||
|
||||
for _, p := range data {
|
||||
values, err := r.ReadAll(p.key)
|
||||
values, err := r.ReadAll([]byte(p.key))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error reading: %v", err)
|
||||
}
|
||||
|
|
@ -414,7 +414,7 @@ func TestCompactor_CompactFull_SkipFullBlocks(t *testing.T) {
|
|||
}
|
||||
|
||||
for _, p := range data {
|
||||
values, err := r.ReadAll(p.key)
|
||||
values, err := r.ReadAll([]byte(p.key))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error reading: %v", err)
|
||||
}
|
||||
|
|
@ -428,7 +428,7 @@ func TestCompactor_CompactFull_SkipFullBlocks(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
if got, exp := len(r.Entries("cpu,host=A#!~#value")), 2; got != exp {
|
||||
if got, exp := len(r.Entries([]byte("cpu,host=A#!~#value"))), 2; got != exp {
|
||||
t.Fatalf("block count mismatch: got %v, exp %v", got, exp)
|
||||
}
|
||||
}
|
||||
|
|
@ -450,7 +450,7 @@ func TestCompactor_CompactFull_TombstonedSkipBlock(t *testing.T) {
|
|||
ts := tsm1.Tombstoner{
|
||||
Path: f1,
|
||||
}
|
||||
ts.AddRange([]string{"cpu,host=A#!~#value"}, math.MinInt64, math.MaxInt64)
|
||||
ts.AddRange([][]byte{[]byte("cpu,host=A#!~#value")}, math.MinInt64, math.MaxInt64)
|
||||
|
||||
a3 := tsm1.NewValue(3, 1.3)
|
||||
writes = map[string][]tsm1.Value{
|
||||
|
|
@ -513,7 +513,7 @@ func TestCompactor_CompactFull_TombstonedSkipBlock(t *testing.T) {
|
|||
}
|
||||
|
||||
for _, p := range data {
|
||||
values, err := r.ReadAll(p.key)
|
||||
values, err := r.ReadAll([]byte(p.key))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error reading: %v", err)
|
||||
}
|
||||
|
|
@ -527,7 +527,7 @@ func TestCompactor_CompactFull_TombstonedSkipBlock(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
if got, exp := len(r.Entries("cpu,host=A#!~#value")), 1; got != exp {
|
||||
if got, exp := len(r.Entries([]byte("cpu,host=A#!~#value"))), 1; got != exp {
|
||||
t.Fatalf("block count mismatch: got %v, exp %v", got, exp)
|
||||
}
|
||||
}
|
||||
|
|
@ -550,7 +550,7 @@ func TestCompactor_CompactFull_TombstonedPartialBlock(t *testing.T) {
|
|||
Path: f1,
|
||||
}
|
||||
// a1 should remain after compaction
|
||||
ts.AddRange([]string{"cpu,host=A#!~#value"}, 2, math.MaxInt64)
|
||||
ts.AddRange([][]byte{[]byte("cpu,host=A#!~#value")}, 2, math.MaxInt64)
|
||||
|
||||
a3 := tsm1.NewValue(3, 1.3)
|
||||
writes = map[string][]tsm1.Value{
|
||||
|
|
@ -613,7 +613,7 @@ func TestCompactor_CompactFull_TombstonedPartialBlock(t *testing.T) {
|
|||
}
|
||||
|
||||
for _, p := range data {
|
||||
values, err := r.ReadAll(p.key)
|
||||
values, err := r.ReadAll([]byte(p.key))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error reading: %v", err)
|
||||
}
|
||||
|
|
@ -627,7 +627,7 @@ func TestCompactor_CompactFull_TombstonedPartialBlock(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
if got, exp := len(r.Entries("cpu,host=A#!~#value")), 2; got != exp {
|
||||
if got, exp := len(r.Entries([]byte("cpu,host=A#!~#value"))), 2; got != exp {
|
||||
t.Fatalf("block count mismatch: got %v, exp %v", got, exp)
|
||||
}
|
||||
}
|
||||
|
|
@ -654,8 +654,8 @@ func TestCompactor_CompactFull_TombstonedMultipleRanges(t *testing.T) {
|
|||
Path: f1,
|
||||
}
|
||||
// a1, a3 should remain after compaction
|
||||
ts.AddRange([]string{"cpu,host=A#!~#value"}, 2, 2)
|
||||
ts.AddRange([]string{"cpu,host=A#!~#value"}, 4, 4)
|
||||
ts.AddRange([][]byte{[]byte("cpu,host=A#!~#value")}, 2, 2)
|
||||
ts.AddRange([][]byte{[]byte("cpu,host=A#!~#value")}, 4, 4)
|
||||
|
||||
a5 := tsm1.NewValue(5, 1.5)
|
||||
writes = map[string][]tsm1.Value{
|
||||
|
|
@ -718,7 +718,7 @@ func TestCompactor_CompactFull_TombstonedMultipleRanges(t *testing.T) {
|
|||
}
|
||||
|
||||
for _, p := range data {
|
||||
values, err := r.ReadAll(p.key)
|
||||
values, err := r.ReadAll([]byte(p.key))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error reading: %v", err)
|
||||
}
|
||||
|
|
@ -732,7 +732,7 @@ func TestCompactor_CompactFull_TombstonedMultipleRanges(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
if got, exp := len(r.Entries("cpu,host=A#!~#value")), 2; got != exp {
|
||||
if got, exp := len(r.Entries([]byte("cpu,host=A#!~#value"))), 2; got != exp {
|
||||
t.Fatalf("block count mismatch: got %v, exp %v", got, exp)
|
||||
}
|
||||
}
|
||||
|
|
@ -756,7 +756,7 @@ func TestCompactor_CompactFull_MaxKeys(t *testing.T) {
|
|||
for j := 0; j < 1000; j++ {
|
||||
values = append(values, tsm1.NewValue(int64(i*1000+j), int64(1)))
|
||||
}
|
||||
if err := f1.Write("cpu,host=A#!~#value", values); err != nil {
|
||||
if err := f1.Write([]byte("cpu,host=A#!~#value"), values); err != nil {
|
||||
t.Fatalf("write tsm f1: %v", err)
|
||||
}
|
||||
}
|
||||
|
|
@ -773,7 +773,7 @@ func TestCompactor_CompactFull_MaxKeys(t *testing.T) {
|
|||
for j := lastTimeStamp; j < lastTimeStamp+1000; j++ {
|
||||
values = append(values, tsm1.NewValue(int64(j), int64(1)))
|
||||
}
|
||||
if err := f2.Write("cpu,host=A#!~#value", values); err != nil {
|
||||
if err := f2.Write([]byte("cpu,host=A#!~#value"), values); err != nil {
|
||||
t.Fatalf("write tsm f1: %v", err)
|
||||
}
|
||||
|
||||
|
|
@ -847,7 +847,7 @@ func TestTSMKeyIterator_Single(t *testing.T) {
|
|||
t.Fatalf("unexpected error decode: %v", err)
|
||||
}
|
||||
|
||||
if got, exp := key, "cpu,host=A#!~#value"; got != exp {
|
||||
if got, exp := string(key), "cpu,host=A#!~#value"; got != exp {
|
||||
t.Fatalf("key mismatch: got %v, exp %v", got, exp)
|
||||
}
|
||||
|
||||
|
|
@ -907,7 +907,7 @@ func TestTSMKeyIterator_Duplicate(t *testing.T) {
|
|||
t.Fatalf("unexpected error decode: %v", err)
|
||||
}
|
||||
|
||||
if got, exp := key, "cpu,host=A#!~#value"; got != exp {
|
||||
if got, exp := string(key), "cpu,host=A#!~#value"; got != exp {
|
||||
t.Fatalf("key mismatch: got %v, exp %v", got, exp)
|
||||
}
|
||||
|
||||
|
|
@ -936,7 +936,7 @@ func TestTSMKeyIterator_MultipleKeysDeleted(t *testing.T) {
|
|||
}
|
||||
|
||||
r1 := MustTSMReader(dir, 1, points1)
|
||||
if e := r1.Delete([]string{"cpu,host=A#!~#value"}); nil != e {
|
||||
if e := r1.Delete([][]byte{[]byte("cpu,host=A#!~#value")}); nil != e {
|
||||
t.Fatal(e)
|
||||
}
|
||||
|
||||
|
|
@ -949,7 +949,7 @@ func TestTSMKeyIterator_MultipleKeysDeleted(t *testing.T) {
|
|||
}
|
||||
|
||||
r2 := MustTSMReader(dir, 2, points2)
|
||||
r2.Delete([]string{"cpu,host=A#!~#count"})
|
||||
r2.Delete([][]byte{[]byte("cpu,host=A#!~#count")})
|
||||
|
||||
iter, err := tsm1.NewTSMKeyIterator(1, false, nil, r1, r2)
|
||||
if err != nil {
|
||||
|
|
@ -975,7 +975,7 @@ func TestTSMKeyIterator_MultipleKeysDeleted(t *testing.T) {
|
|||
t.Fatalf("unexpected error decode: %v", err)
|
||||
}
|
||||
|
||||
if got, exp := key, data[0].key; got != exp {
|
||||
if got, exp := string(key), data[0].key; got != exp {
|
||||
t.Fatalf("key mismatch: got %v, exp %v", got, exp)
|
||||
}
|
||||
|
||||
|
|
@ -1038,7 +1038,7 @@ func TestCacheKeyIterator_Single(t *testing.T) {
|
|||
c := tsm1.NewCache(0, "")
|
||||
|
||||
for k, v := range writes {
|
||||
if err := c.Write(k, v); err != nil {
|
||||
if err := c.Write([]byte(k), v); err != nil {
|
||||
t.Fatalf("failed to write key foo to cache: %s", err.Error())
|
||||
}
|
||||
}
|
||||
|
|
@ -1056,7 +1056,7 @@ func TestCacheKeyIterator_Single(t *testing.T) {
|
|||
t.Fatalf("unexpected error decode: %v", err)
|
||||
}
|
||||
|
||||
if got, exp := key, "cpu,host=A#!~#value"; got != exp {
|
||||
if got, exp := string(key), "cpu,host=A#!~#value"; got != exp {
|
||||
t.Fatalf("key mismatch: got %v, exp %v", got, exp)
|
||||
}
|
||||
|
||||
|
|
@ -1086,7 +1086,7 @@ func TestCacheKeyIterator_Chunked(t *testing.T) {
|
|||
c := tsm1.NewCache(0, "")
|
||||
|
||||
for k, v := range writes {
|
||||
if err := c.Write(k, v); err != nil {
|
||||
if err := c.Write([]byte(k), v); err != nil {
|
||||
t.Fatalf("failed to write key foo to cache: %s", err.Error())
|
||||
}
|
||||
}
|
||||
|
|
@ -1105,7 +1105,7 @@ func TestCacheKeyIterator_Chunked(t *testing.T) {
|
|||
t.Fatalf("unexpected error decode: %v", err)
|
||||
}
|
||||
|
||||
if got, exp := key, "cpu,host=A#!~#value"; got != exp {
|
||||
if got, exp := string(key), "cpu,host=A#!~#value"; got != exp {
|
||||
t.Fatalf("key mismatch: got %v, exp %v", got, exp)
|
||||
}
|
||||
|
||||
|
|
@ -1136,7 +1136,7 @@ func TestCacheKeyIterator_Abort(t *testing.T) {
|
|||
c := tsm1.NewCache(0, "")
|
||||
|
||||
for k, v := range writes {
|
||||
if err := c.Write(k, v); err != nil {
|
||||
if err := c.Write([]byte(k), v); err != nil {
|
||||
t.Fatalf("failed to write key foo to cache: %s", err.Error())
|
||||
}
|
||||
}
|
||||
|
|
@ -2397,7 +2397,7 @@ func MustWriteTSM(dir string, gen int, values map[string][]tsm1.Value) string {
|
|||
w, name := MustTSMWriter(dir, gen)
|
||||
|
||||
for k, v := range values {
|
||||
if err := w.Write(k, v); err != nil {
|
||||
if err := w.Write([]byte(k), v); err != nil {
|
||||
panic(fmt.Sprintf("write TSM value: %v", err))
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -534,13 +534,13 @@ func (e *Engine) LoadMetadataIndex(shardID uint64, index tsdb.Index) error {
|
|||
}
|
||||
|
||||
// load metadata from the Cache
|
||||
if err := e.Cache.ApplyEntryFn(func(key string, entry *entry) error {
|
||||
if err := e.Cache.ApplyEntryFn(func(key []byte, entry *entry) error {
|
||||
fieldType, err := entry.values.InfluxQLType()
|
||||
if err != nil {
|
||||
e.logger.Info(fmt.Sprintf("error getting the data type of values for key %s: %s", key, err.Error()))
|
||||
}
|
||||
|
||||
if err := e.addToIndexFromKey([]byte(key), fieldType); err != nil {
|
||||
if err := e.addToIndexFromKey(key, fieldType); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
|
@ -938,7 +938,7 @@ func (e *Engine) DeleteSeriesRange(seriesKeys [][]byte, min, max int64) error {
|
|||
defer e.enableLevelCompactions(true)
|
||||
|
||||
tempKeys := seriesKeys[:]
|
||||
deleteKeys := make([]string, 0, len(seriesKeys))
|
||||
deleteKeys := make([][]byte, 0, len(seriesKeys))
|
||||
// go through the keys in the file store
|
||||
if err := e.FileStore.WalkKeys(func(k []byte, _ byte) error {
|
||||
seriesKey, _ := SeriesAndFieldFromCompositeKey(k)
|
||||
|
|
@ -951,7 +951,7 @@ func (e *Engine) DeleteSeriesRange(seriesKeys [][]byte, min, max int64) error {
|
|||
|
||||
// Keys match, add the full series key to delete.
|
||||
if len(tempKeys) > 0 && bytes.Equal(tempKeys[0], seriesKey) {
|
||||
deleteKeys = append(deleteKeys, string(k))
|
||||
deleteKeys = append(deleteKeys, k)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
@ -967,7 +967,7 @@ func (e *Engine) DeleteSeriesRange(seriesKeys [][]byte, min, max int64) error {
|
|||
walKeys := deleteKeys[:0]
|
||||
|
||||
// ApplySerialEntryFn cannot return an error in this invocation.
|
||||
_ = e.Cache.ApplyEntryFn(func(k string, _ *entry) error {
|
||||
_ = e.Cache.ApplyEntryFn(func(k []byte, _ *entry) error {
|
||||
seriesKey, _ := SeriesAndFieldFromCompositeKey([]byte(k))
|
||||
|
||||
// Cache does not walk keys in sorted order, so search the sorted
|
||||
|
|
@ -1277,14 +1277,14 @@ func (e *Engine) onFileStoreReplace(newFiles []TSMFile) {
|
|||
}
|
||||
|
||||
// load metadata from the Cache
|
||||
e.Cache.ApplyEntryFn(func(key string, entry *entry) error {
|
||||
e.Cache.ApplyEntryFn(func(key []byte, entry *entry) error {
|
||||
fieldType, err := entry.values.InfluxQLType()
|
||||
if err != nil {
|
||||
e.logger.Error(fmt.Sprintf("refresh index (3): %v", err))
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := e.addToIndexFromKey([]byte(key), fieldType); err != nil {
|
||||
if err := e.addToIndexFromKey(key, fieldType); err != nil {
|
||||
e.logger.Error(fmt.Sprintf("refresh index (4): %v", err))
|
||||
return nil
|
||||
}
|
||||
|
|
@ -1532,7 +1532,7 @@ func (e *Engine) cleanupTempTSMFiles() error {
|
|||
}
|
||||
|
||||
// KeyCursor returns a KeyCursor for the given key starting at time t.
|
||||
func (e *Engine) KeyCursor(key string, t int64, ascending bool) *KeyCursor {
|
||||
func (e *Engine) KeyCursor(key []byte, t int64, ascending bool) *KeyCursor {
|
||||
return e.FileStore.KeyCursor(key, t, ascending)
|
||||
}
|
||||
|
||||
|
|
@ -2025,36 +2025,41 @@ func (e *Engine) buildCursor(measurement, seriesKey string, ref *influxql.VarRef
|
|||
|
||||
// buildFloatCursor creates a cursor for a float field.
|
||||
func (e *Engine) buildFloatCursor(measurement, seriesKey, field string, opt influxql.IteratorOptions) floatCursor {
|
||||
cacheValues := e.Cache.Values(SeriesFieldKey(seriesKey, field))
|
||||
keyCursor := e.KeyCursor(SeriesFieldKey(seriesKey, field), opt.SeekTime(), opt.Ascending)
|
||||
key := SeriesFieldKeyBytes(seriesKey, field)
|
||||
cacheValues := e.Cache.Values(key)
|
||||
keyCursor := e.KeyCursor(key, opt.SeekTime(), opt.Ascending)
|
||||
return newFloatCursor(opt.SeekTime(), opt.Ascending, cacheValues, keyCursor)
|
||||
}
|
||||
|
||||
// buildIntegerCursor creates a cursor for an integer field.
|
||||
func (e *Engine) buildIntegerCursor(measurement, seriesKey, field string, opt influxql.IteratorOptions) integerCursor {
|
||||
cacheValues := e.Cache.Values(SeriesFieldKey(seriesKey, field))
|
||||
keyCursor := e.KeyCursor(SeriesFieldKey(seriesKey, field), opt.SeekTime(), opt.Ascending)
|
||||
key := SeriesFieldKeyBytes(seriesKey, field)
|
||||
cacheValues := e.Cache.Values(key)
|
||||
keyCursor := e.KeyCursor(key, opt.SeekTime(), opt.Ascending)
|
||||
return newIntegerCursor(opt.SeekTime(), opt.Ascending, cacheValues, keyCursor)
|
||||
}
|
||||
|
||||
// buildUnsignedCursor creates a cursor for an unsigned field.
|
||||
func (e *Engine) buildUnsignedCursor(measurement, seriesKey, field string, opt influxql.IteratorOptions) unsignedCursor {
|
||||
cacheValues := e.Cache.Values(SeriesFieldKey(seriesKey, field))
|
||||
keyCursor := e.KeyCursor(SeriesFieldKey(seriesKey, field), opt.SeekTime(), opt.Ascending)
|
||||
key := SeriesFieldKeyBytes(seriesKey, field)
|
||||
cacheValues := e.Cache.Values(key)
|
||||
keyCursor := e.KeyCursor(key, opt.SeekTime(), opt.Ascending)
|
||||
return newUnsignedCursor(opt.SeekTime(), opt.Ascending, cacheValues, keyCursor)
|
||||
}
|
||||
|
||||
// buildStringCursor creates a cursor for a string field.
|
||||
func (e *Engine) buildStringCursor(measurement, seriesKey, field string, opt influxql.IteratorOptions) stringCursor {
|
||||
cacheValues := e.Cache.Values(SeriesFieldKey(seriesKey, field))
|
||||
keyCursor := e.KeyCursor(SeriesFieldKey(seriesKey, field), opt.SeekTime(), opt.Ascending)
|
||||
key := SeriesFieldKeyBytes(seriesKey, field)
|
||||
cacheValues := e.Cache.Values(key)
|
||||
keyCursor := e.KeyCursor(key, opt.SeekTime(), opt.Ascending)
|
||||
return newStringCursor(opt.SeekTime(), opt.Ascending, cacheValues, keyCursor)
|
||||
}
|
||||
|
||||
// buildBooleanCursor creates a cursor for a boolean field.
|
||||
func (e *Engine) buildBooleanCursor(measurement, seriesKey, field string, opt influxql.IteratorOptions) booleanCursor {
|
||||
cacheValues := e.Cache.Values(SeriesFieldKey(seriesKey, field))
|
||||
keyCursor := e.KeyCursor(SeriesFieldKey(seriesKey, field), opt.SeekTime(), opt.Ascending)
|
||||
key := SeriesFieldKeyBytes(seriesKey, field)
|
||||
cacheValues := e.Cache.Values(key)
|
||||
keyCursor := e.KeyCursor(key, opt.SeekTime(), opt.Ascending)
|
||||
return newBooleanCursor(opt.SeekTime(), opt.Ascending, cacheValues, keyCursor)
|
||||
}
|
||||
|
||||
|
|
@ -2067,6 +2072,14 @@ func SeriesFieldKey(seriesKey, field string) string {
|
|||
return seriesKey + keyFieldSeparator + field
|
||||
}
|
||||
|
||||
func SeriesFieldKeyBytes(seriesKey, field string) []byte {
|
||||
b := make([]byte, len(seriesKey)+len(keyFieldSeparator)+len(field))
|
||||
i := copy(b[:], seriesKey)
|
||||
i += copy(b[i:], keyFieldSeparatorBytes)
|
||||
copy(b[i:], field)
|
||||
return b
|
||||
}
|
||||
|
||||
func tsmFieldTypeToInfluxQLDataType(typ byte) (influxql.DataType, error) {
|
||||
switch typ {
|
||||
case BlockFloat64:
|
||||
|
|
|
|||
|
|
@ -136,11 +136,11 @@ func TestEngine_DeleteWALLoadMetadata(t *testing.T) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if exp, got := 0, len(e.Cache.Values(tsm1.SeriesFieldKey("cpu,host=A", "value"))); exp != got {
|
||||
if exp, got := 0, len(e.Cache.Values(tsm1.SeriesFieldKeyBytes("cpu,host=A", "value"))); exp != got {
|
||||
t.Fatalf("unexpected number of values: got: %d. exp: %d", got, exp)
|
||||
}
|
||||
|
||||
if exp, got := 1, len(e.Cache.Values(tsm1.SeriesFieldKey("cpu,host=B", "value"))); exp != got {
|
||||
if exp, got := 1, len(e.Cache.Values(tsm1.SeriesFieldKeyBytes("cpu,host=B", "value"))); exp != got {
|
||||
t.Fatalf("unexpected number of values: got: %d. exp: %d", got, exp)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -25,7 +25,7 @@ type TSMFile interface {
|
|||
Path() string
|
||||
|
||||
// Read returns all the values in the block where time t resides.
|
||||
Read(key string, t int64) ([]Value, error)
|
||||
Read(key []byte, t int64) ([]Value, error)
|
||||
|
||||
// ReadAt returns all the values in the block identified by entry.
|
||||
ReadAt(entry *IndexEntry, values []Value) ([]Value, error)
|
||||
|
|
@ -36,25 +36,25 @@ type TSMFile interface {
|
|||
ReadBooleanBlockAt(entry *IndexEntry, values *[]BooleanValue) ([]BooleanValue, error)
|
||||
|
||||
// Entries returns the index entries for all blocks for the given key.
|
||||
Entries(key string) []IndexEntry
|
||||
ReadEntries(key string, entries *[]IndexEntry)
|
||||
Entries(key []byte) []IndexEntry
|
||||
ReadEntries(key []byte, entries *[]IndexEntry)
|
||||
|
||||
// Returns true if the TSMFile may contain a value with the specified
|
||||
// key and time.
|
||||
ContainsValue(key string, t int64) bool
|
||||
ContainsValue(key []byte, t int64) bool
|
||||
|
||||
// Contains returns true if the file contains any values for the given
|
||||
// key.
|
||||
Contains(key string) bool
|
||||
Contains(key []byte) bool
|
||||
|
||||
// TimeRange returns the min and max time across all keys in the file.
|
||||
TimeRange() (int64, int64)
|
||||
|
||||
// TombstoneRange returns ranges of time that are deleted for the given key.
|
||||
TombstoneRange(key string) []TimeRange
|
||||
TombstoneRange(key []byte) []TimeRange
|
||||
|
||||
// KeyRange returns the min and max keys in the file.
|
||||
KeyRange() (string, string)
|
||||
KeyRange() ([]byte, []byte)
|
||||
|
||||
// KeyCount returns the number of distinct keys in the file.
|
||||
KeyCount() int
|
||||
|
|
@ -65,13 +65,13 @@ type TSMFile interface {
|
|||
// Type returns the block type of the values stored for the key. Returns one of
|
||||
// BlockFloat64, BlockInt64, BlockBoolean, BlockString. If key does not exist,
|
||||
// an error is returned.
|
||||
Type(key string) (byte, error)
|
||||
Type(key []byte) (byte, error)
|
||||
|
||||
// Delete removes the keys from the set of keys available in this file.
|
||||
Delete(keys []string) error
|
||||
Delete(keys [][]byte) error
|
||||
|
||||
// DeleteRange removes the values for keys between timestamps min and max.
|
||||
DeleteRange(keys []string, min, max int64) error
|
||||
DeleteRange(keys [][]byte, min, max int64) error
|
||||
|
||||
// HasTombstones returns true if file contains values that have been deleted.
|
||||
HasTombstones() bool
|
||||
|
|
@ -149,7 +149,7 @@ type FileStat struct {
|
|||
Size uint32
|
||||
LastModified int64
|
||||
MinTime, MaxTime int64
|
||||
MinKey, MaxKey string
|
||||
MinKey, MaxKey []byte
|
||||
}
|
||||
|
||||
// OverlapsTimeRange returns true if the time range of the file intersect min and max.
|
||||
|
|
@ -158,13 +158,13 @@ func (f FileStat) OverlapsTimeRange(min, max int64) bool {
|
|||
}
|
||||
|
||||
// OverlapsKeyRange returns true if the min and max keys of the file overlap the arguments min and max.
|
||||
func (f FileStat) OverlapsKeyRange(min, max string) bool {
|
||||
return min != "" && max != "" && f.MinKey <= max && f.MaxKey >= min
|
||||
func (f FileStat) OverlapsKeyRange(min, max []byte) bool {
|
||||
return len(min) != 0 && len(max) != 0 && bytes.Compare(f.MinKey, max) <= 0 && bytes.Compare(f.MaxKey, min) >= 0
|
||||
}
|
||||
|
||||
// ContainsKey returns true if the min and max keys of the file overlap the arguments min and max.
|
||||
func (f FileStat) ContainsKey(key string) bool {
|
||||
return f.MinKey >= key || key <= f.MaxKey
|
||||
func (f FileStat) ContainsKey(key []byte) bool {
|
||||
return bytes.Compare(f.MinKey, key) >= 0 || bytes.Compare(key, f.MaxKey) <= 0
|
||||
}
|
||||
|
||||
// NewFileStore returns a new instance of FileStore based on the given directory.
|
||||
|
|
@ -302,7 +302,7 @@ func (f *FileStore) Keys() map[string]byte {
|
|||
}
|
||||
|
||||
// Type returns the type of values store at the block for key.
|
||||
func (f *FileStore) Type(key string) (byte, error) {
|
||||
func (f *FileStore) Type(key []byte) (byte, error) {
|
||||
f.mu.RLock()
|
||||
defer f.mu.RUnlock()
|
||||
|
||||
|
|
@ -315,12 +315,12 @@ func (f *FileStore) Type(key string) (byte, error) {
|
|||
}
|
||||
|
||||
// Delete removes the keys from the set of keys available in this file.
|
||||
func (f *FileStore) Delete(keys []string) error {
|
||||
func (f *FileStore) Delete(keys [][]byte) error {
|
||||
return f.DeleteRange(keys, math.MinInt64, math.MaxInt64)
|
||||
}
|
||||
|
||||
// DeleteRange removes the values for keys between timestamps min and max.
|
||||
func (f *FileStore) DeleteRange(keys []string, min, max int64) error {
|
||||
func (f *FileStore) DeleteRange(keys [][]byte, min, max int64) error {
|
||||
if err := f.walkFiles(func(tsm TSMFile) error {
|
||||
return tsm.DeleteRange(keys, min, max)
|
||||
}); err != nil {
|
||||
|
|
@ -452,7 +452,7 @@ func (f *FileStore) DiskSizeBytes() int64 {
|
|||
|
||||
// Read returns the slice of values for the given key and the given timestamp,
|
||||
// if any file matches those constraints.
|
||||
func (f *FileStore) Read(key string, t int64) ([]Value, error) {
|
||||
func (f *FileStore) Read(key []byte, t int64) ([]Value, error) {
|
||||
f.mu.RLock()
|
||||
defer f.mu.RUnlock()
|
||||
|
||||
|
|
@ -476,7 +476,7 @@ func (f *FileStore) Read(key string, t int64) ([]Value, error) {
|
|||
}
|
||||
|
||||
// KeyCursor returns a KeyCursor for key and t across the files in the FileStore.
|
||||
func (f *FileStore) KeyCursor(key string, t int64, ascending bool) *KeyCursor {
|
||||
func (f *FileStore) KeyCursor(key []byte, t int64, ascending bool) *KeyCursor {
|
||||
f.mu.RLock()
|
||||
defer f.mu.RUnlock()
|
||||
return newKeyCursor(f, key, t, ascending)
|
||||
|
|
@ -723,7 +723,7 @@ func (f *FileStore) walkFiles(fn func(f TSMFile) error) error {
|
|||
// locations returns the files and index blocks for a key and time. ascending indicates
|
||||
// whether the key will be scan in ascending time order or descenging time order.
|
||||
// This function assumes the read-lock has been taken.
|
||||
func (f *FileStore) locations(key string, t int64, ascending bool) []*location {
|
||||
func (f *FileStore) locations(key []byte, t int64, ascending bool) []*location {
|
||||
var entries []IndexEntry
|
||||
locations := make([]*location, 0, len(f.files))
|
||||
for _, fd := range f.files {
|
||||
|
|
@ -858,7 +858,7 @@ func ParseTSMFileName(name string) (int, int, error) {
|
|||
|
||||
// KeyCursor allows iteration through keys in a set of files within a FileStore.
|
||||
type KeyCursor struct {
|
||||
key string
|
||||
key []byte
|
||||
fs *FileStore
|
||||
|
||||
// seeks is all the file locations that we need to return during iteration.
|
||||
|
|
@ -929,7 +929,7 @@ func (a ascLocations) Less(i, j int) bool {
|
|||
|
||||
// newKeyCursor returns a new instance of KeyCursor.
|
||||
// This function assumes the read-lock has been taken.
|
||||
func newKeyCursor(fs *FileStore, key string, t int64, ascending bool) *KeyCursor {
|
||||
func newKeyCursor(fs *FileStore, key []byte, t int64, ascending bool) *KeyCursor {
|
||||
c := &KeyCursor{
|
||||
key: key,
|
||||
fs: fs,
|
||||
|
|
|
|||
|
|
@ -34,7 +34,7 @@ func TestFileStore_Read(t *testing.T) {
|
|||
fs.Replace(nil, files)
|
||||
|
||||
// Search for an entry that exists in the second file
|
||||
values, err := fs.Read("cpu", 1)
|
||||
values, err := fs.Read([]byte("cpu"), 1)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error reading values: %v", err)
|
||||
}
|
||||
|
|
@ -71,7 +71,7 @@ func TestFileStore_SeekToAsc_FromStart(t *testing.T) {
|
|||
fs.Replace(nil, files)
|
||||
|
||||
buf := make([]tsm1.FloatValue, 1000)
|
||||
c := fs.KeyCursor("cpu", 0, true)
|
||||
c := fs.KeyCursor([]byte("cpu"), 0, true)
|
||||
// Search for an entry that exists in the second file
|
||||
values, err := c.ReadFloatBlock(&buf)
|
||||
if err != nil {
|
||||
|
|
@ -111,7 +111,7 @@ func TestFileStore_SeekToAsc_Duplicate(t *testing.T) {
|
|||
fs.Replace(nil, files)
|
||||
|
||||
buf := make([]tsm1.FloatValue, 1000)
|
||||
c := fs.KeyCursor("cpu", 0, true)
|
||||
c := fs.KeyCursor([]byte("cpu"), 0, true)
|
||||
// Search for an entry that exists in the second file
|
||||
values, err := c.ReadFloatBlock(&buf)
|
||||
if err != nil {
|
||||
|
|
@ -184,7 +184,7 @@ func TestFileStore_SeekToAsc_BeforeStart(t *testing.T) {
|
|||
|
||||
// Search for an entry that exists in the second file
|
||||
buf := make([]tsm1.FloatValue, 1000)
|
||||
c := fs.KeyCursor("cpu", 0, true)
|
||||
c := fs.KeyCursor([]byte("cpu"), 0, true)
|
||||
values, err := c.ReadFloatBlock(&buf)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error reading values: %v", err)
|
||||
|
|
@ -226,7 +226,7 @@ func TestFileStore_SeekToAsc_BeforeStart_OverlapFloat(t *testing.T) {
|
|||
|
||||
// Search for an entry that exists in the second file
|
||||
buf := make([]tsm1.FloatValue, 1000)
|
||||
c := fs.KeyCursor("cpu", 0, true)
|
||||
c := fs.KeyCursor([]byte("cpu"), 0, true)
|
||||
values, err := c.ReadFloatBlock(&buf)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error reading values: %v", err)
|
||||
|
|
@ -293,7 +293,7 @@ func TestFileStore_SeekToAsc_BeforeStart_OverlapInteger(t *testing.T) {
|
|||
|
||||
// Search for an entry that exists in the second file
|
||||
buf := make([]tsm1.IntegerValue, 1000)
|
||||
c := fs.KeyCursor("cpu", 0, true)
|
||||
c := fs.KeyCursor([]byte("cpu"), 0, true)
|
||||
values, err := c.ReadIntegerBlock(&buf)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error reading values: %v", err)
|
||||
|
|
@ -359,7 +359,7 @@ func TestFileStore_SeekToAsc_BeforeStart_OverlapUnsigned(t *testing.T) {
|
|||
|
||||
// Search for an entry that exists in the second file
|
||||
buf := make([]tsm1.UnsignedValue, 1000)
|
||||
c := fs.KeyCursor("cpu", 0, true)
|
||||
c := fs.KeyCursor([]byte("cpu"), 0, true)
|
||||
values, err := c.ReadUnsignedBlock(&buf)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error reading values: %v", err)
|
||||
|
|
@ -425,7 +425,7 @@ func TestFileStore_SeekToAsc_BeforeStart_OverlapBoolean(t *testing.T) {
|
|||
|
||||
// Search for an entry that exists in the second file
|
||||
buf := make([]tsm1.BooleanValue, 1000)
|
||||
c := fs.KeyCursor("cpu", 0, true)
|
||||
c := fs.KeyCursor([]byte("cpu"), 0, true)
|
||||
values, err := c.ReadBooleanBlock(&buf)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error reading values: %v", err)
|
||||
|
|
@ -491,7 +491,7 @@ func TestFileStore_SeekToAsc_BeforeStart_OverlapString(t *testing.T) {
|
|||
|
||||
// Search for an entry that exists in the second file
|
||||
buf := make([]tsm1.StringValue, 1000)
|
||||
c := fs.KeyCursor("cpu", 0, true)
|
||||
c := fs.KeyCursor([]byte("cpu"), 0, true)
|
||||
values, err := c.ReadStringBlock(&buf)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error reading values: %v", err)
|
||||
|
|
@ -556,7 +556,7 @@ func TestFileStore_SeekToAsc_OverlapMinFloat(t *testing.T) {
|
|||
fs.Replace(nil, files)
|
||||
|
||||
buf := make([]tsm1.FloatValue, 1000)
|
||||
c := fs.KeyCursor("cpu", 0, true)
|
||||
c := fs.KeyCursor([]byte("cpu"), 0, true)
|
||||
// Search for an entry that exists in the second file
|
||||
values, err := c.ReadFloatBlock(&buf)
|
||||
if err != nil {
|
||||
|
|
@ -636,7 +636,7 @@ func TestFileStore_SeekToAsc_OverlapMinInteger(t *testing.T) {
|
|||
fs.Replace(nil, files)
|
||||
|
||||
buf := make([]tsm1.IntegerValue, 1000)
|
||||
c := fs.KeyCursor("cpu", 0, true)
|
||||
c := fs.KeyCursor([]byte("cpu"), 0, true)
|
||||
// Search for an entry that exists in the second file
|
||||
values, err := c.ReadIntegerBlock(&buf)
|
||||
if err != nil {
|
||||
|
|
@ -715,7 +715,7 @@ func TestFileStore_SeekToAsc_OverlapMinUnsigned(t *testing.T) {
|
|||
fs.Replace(nil, files)
|
||||
|
||||
buf := make([]tsm1.UnsignedValue, 1000)
|
||||
c := fs.KeyCursor("cpu", 0, true)
|
||||
c := fs.KeyCursor([]byte("cpu"), 0, true)
|
||||
// Search for an entry that exists in the second file
|
||||
values, err := c.ReadUnsignedBlock(&buf)
|
||||
if err != nil {
|
||||
|
|
@ -794,7 +794,7 @@ func TestFileStore_SeekToAsc_OverlapMinBoolean(t *testing.T) {
|
|||
fs.Replace(nil, files)
|
||||
|
||||
buf := make([]tsm1.BooleanValue, 1000)
|
||||
c := fs.KeyCursor("cpu", 0, true)
|
||||
c := fs.KeyCursor([]byte("cpu"), 0, true)
|
||||
// Search for an entry that exists in the second file
|
||||
values, err := c.ReadBooleanBlock(&buf)
|
||||
if err != nil {
|
||||
|
|
@ -873,7 +873,7 @@ func TestFileStore_SeekToAsc_OverlapMinString(t *testing.T) {
|
|||
fs.Replace(nil, files)
|
||||
|
||||
buf := make([]tsm1.StringValue, 1000)
|
||||
c := fs.KeyCursor("cpu", 0, true)
|
||||
c := fs.KeyCursor([]byte("cpu"), 0, true)
|
||||
// Search for an entry that exists in the second file
|
||||
values, err := c.ReadStringBlock(&buf)
|
||||
if err != nil {
|
||||
|
|
@ -951,7 +951,7 @@ func TestFileStore_SeekToAsc_Middle(t *testing.T) {
|
|||
|
||||
// Search for an entry that exists in the second file
|
||||
buf := make([]tsm1.FloatValue, 1000)
|
||||
c := fs.KeyCursor("cpu", 3, true)
|
||||
c := fs.KeyCursor([]byte("cpu"), 3, true)
|
||||
values, err := c.ReadFloatBlock(&buf)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error reading values: %v", err)
|
||||
|
|
@ -1007,7 +1007,7 @@ func TestFileStore_SeekToAsc_End(t *testing.T) {
|
|||
fs.Replace(nil, files)
|
||||
|
||||
buf := make([]tsm1.FloatValue, 1000)
|
||||
c := fs.KeyCursor("cpu", 2, true)
|
||||
c := fs.KeyCursor([]byte("cpu"), 2, true)
|
||||
values, err := c.ReadFloatBlock(&buf)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error reading values: %v", err)
|
||||
|
|
@ -1046,7 +1046,7 @@ func TestFileStore_SeekToDesc_FromStart(t *testing.T) {
|
|||
|
||||
// Search for an entry that exists in the second file
|
||||
buf := make([]tsm1.FloatValue, 1000)
|
||||
c := fs.KeyCursor("cpu", 0, false)
|
||||
c := fs.KeyCursor([]byte("cpu"), 0, false)
|
||||
values, err := c.ReadFloatBlock(&buf)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error reading values: %v", err)
|
||||
|
|
@ -1085,7 +1085,7 @@ func TestFileStore_SeekToDesc_Duplicate(t *testing.T) {
|
|||
|
||||
// Search for an entry that exists in the second file
|
||||
buf := make([]tsm1.FloatValue, 1000)
|
||||
c := fs.KeyCursor("cpu", 2, false)
|
||||
c := fs.KeyCursor([]byte("cpu"), 2, false)
|
||||
values, err := c.ReadFloatBlock(&buf)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error reading values: %v", err)
|
||||
|
|
@ -1144,7 +1144,7 @@ func TestFileStore_SeekToDesc_OverlapMaxFloat(t *testing.T) {
|
|||
|
||||
// Search for an entry that exists in the second file
|
||||
buf := make([]tsm1.FloatValue, 1000)
|
||||
c := fs.KeyCursor("cpu", 5, false)
|
||||
c := fs.KeyCursor([]byte("cpu"), 5, false)
|
||||
values, err := c.ReadFloatBlock(&buf)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error reading values: %v", err)
|
||||
|
|
@ -1209,7 +1209,7 @@ func TestFileStore_SeekToDesc_OverlapMaxInteger(t *testing.T) {
|
|||
|
||||
// Search for an entry that exists in the second file
|
||||
buf := make([]tsm1.IntegerValue, 1000)
|
||||
c := fs.KeyCursor("cpu", 5, false)
|
||||
c := fs.KeyCursor([]byte("cpu"), 5, false)
|
||||
values, err := c.ReadIntegerBlock(&buf)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error reading values: %v", err)
|
||||
|
|
@ -1271,7 +1271,7 @@ func TestFileStore_SeekToDesc_OverlapMaxUnsigned(t *testing.T) {
|
|||
|
||||
// Search for an entry that exists in the second file
|
||||
buf := make([]tsm1.UnsignedValue, 1000)
|
||||
c := fs.KeyCursor("cpu", 5, false)
|
||||
c := fs.KeyCursor([]byte("cpu"), 5, false)
|
||||
values, err := c.ReadUnsignedBlock(&buf)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error reading values: %v", err)
|
||||
|
|
@ -1334,7 +1334,7 @@ func TestFileStore_SeekToDesc_OverlapMaxBoolean(t *testing.T) {
|
|||
|
||||
// Search for an entry that exists in the second file
|
||||
buf := make([]tsm1.BooleanValue, 1000)
|
||||
c := fs.KeyCursor("cpu", 5, false)
|
||||
c := fs.KeyCursor([]byte("cpu"), 5, false)
|
||||
values, err := c.ReadBooleanBlock(&buf)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error reading values: %v", err)
|
||||
|
|
@ -1397,7 +1397,7 @@ func TestFileStore_SeekToDesc_OverlapMaxString(t *testing.T) {
|
|||
|
||||
// Search for an entry that exists in the second file
|
||||
buf := make([]tsm1.StringValue, 1000)
|
||||
c := fs.KeyCursor("cpu", 5, false)
|
||||
c := fs.KeyCursor([]byte("cpu"), 5, false)
|
||||
values, err := c.ReadStringBlock(&buf)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error reading values: %v", err)
|
||||
|
|
@ -1458,7 +1458,7 @@ func TestFileStore_SeekToDesc_AfterEnd(t *testing.T) {
|
|||
fs.Replace(nil, files)
|
||||
|
||||
buf := make([]tsm1.FloatValue, 1000)
|
||||
c := fs.KeyCursor("cpu", 4, false)
|
||||
c := fs.KeyCursor([]byte("cpu"), 4, false)
|
||||
values, err := c.ReadFloatBlock(&buf)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error reading values: %v", err)
|
||||
|
|
@ -1497,7 +1497,7 @@ func TestFileStore_SeekToDesc_AfterEnd_OverlapFloat(t *testing.T) {
|
|||
fs.Replace(nil, files)
|
||||
|
||||
buf := make([]tsm1.FloatValue, 1000)
|
||||
c := fs.KeyCursor("cpu", 10, false)
|
||||
c := fs.KeyCursor([]byte("cpu"), 10, false)
|
||||
values, err := c.ReadFloatBlock(&buf)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error reading values: %v", err)
|
||||
|
|
@ -1594,7 +1594,7 @@ func TestFileStore_SeekToDesc_AfterEnd_OverlapInteger(t *testing.T) {
|
|||
fs.Replace(nil, files)
|
||||
|
||||
buf := make([]tsm1.IntegerValue, 1000)
|
||||
c := fs.KeyCursor("cpu", 11, false)
|
||||
c := fs.KeyCursor([]byte("cpu"), 11, false)
|
||||
values, err := c.ReadIntegerBlock(&buf)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error reading values: %v", err)
|
||||
|
|
@ -1671,7 +1671,7 @@ func TestFileStore_SeekToDesc_AfterEnd_OverlapUnsigned(t *testing.T) {
|
|||
fs.Replace(nil, files)
|
||||
|
||||
buf := make([]tsm1.UnsignedValue, 1000)
|
||||
c := fs.KeyCursor("cpu", 11, false)
|
||||
c := fs.KeyCursor([]byte("cpu"), 11, false)
|
||||
values, err := c.ReadUnsignedBlock(&buf)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error reading values: %v", err)
|
||||
|
|
@ -1748,7 +1748,7 @@ func TestFileStore_SeekToDesc_AfterEnd_OverlapBoolean(t *testing.T) {
|
|||
fs.Replace(nil, files)
|
||||
|
||||
buf := make([]tsm1.BooleanValue, 1000)
|
||||
c := fs.KeyCursor("cpu", 11, false)
|
||||
c := fs.KeyCursor([]byte("cpu"), 11, false)
|
||||
values, err := c.ReadBooleanBlock(&buf)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error reading values: %v", err)
|
||||
|
|
@ -1845,7 +1845,7 @@ func TestFileStore_SeekToDesc_AfterEnd_OverlapString(t *testing.T) {
|
|||
fs.Replace(nil, files)
|
||||
|
||||
buf := make([]tsm1.StringValue, 1000)
|
||||
c := fs.KeyCursor("cpu", 11, false)
|
||||
c := fs.KeyCursor([]byte("cpu"), 11, false)
|
||||
values, err := c.ReadStringBlock(&buf)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error reading values: %v", err)
|
||||
|
|
@ -1945,7 +1945,7 @@ func TestFileStore_SeekToDesc_Middle(t *testing.T) {
|
|||
|
||||
// Search for an entry that exists in the second file
|
||||
buf := make([]tsm1.FloatValue, 1000)
|
||||
c := fs.KeyCursor("cpu", 3, false)
|
||||
c := fs.KeyCursor([]byte("cpu"), 3, false)
|
||||
values, err := c.ReadFloatBlock(&buf)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error reading values: %v", err)
|
||||
|
|
@ -2018,7 +2018,7 @@ func TestFileStore_SeekToDesc_End(t *testing.T) {
|
|||
fs.Replace(nil, files)
|
||||
|
||||
buf := make([]tsm1.FloatValue, 1000)
|
||||
c := fs.KeyCursor("cpu", 2, false)
|
||||
c := fs.KeyCursor([]byte("cpu"), 2, false)
|
||||
values, err := c.ReadFloatBlock(&buf)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error reading values: %v", err)
|
||||
|
|
@ -2055,12 +2055,12 @@ func TestKeyCursor_TombstoneRange(t *testing.T) {
|
|||
|
||||
fs.Replace(nil, files)
|
||||
|
||||
if err := fs.DeleteRange([]string{"cpu"}, 1, 1); err != nil {
|
||||
if err := fs.DeleteRange([][]byte{[]byte("cpu")}, 1, 1); err != nil {
|
||||
t.Fatalf("unexpected error delete range: %v", err)
|
||||
}
|
||||
|
||||
buf := make([]tsm1.FloatValue, 1000)
|
||||
c := fs.KeyCursor("cpu", 0, true)
|
||||
c := fs.KeyCursor([]byte("cpu"), 0, true)
|
||||
expValues := []int{0, 2}
|
||||
for _, v := range expValues {
|
||||
values, err := c.ReadFloatBlock(&buf)
|
||||
|
|
@ -2100,12 +2100,12 @@ func TestKeyCursor_TombstoneRange_PartialFloat(t *testing.T) {
|
|||
|
||||
fs.Replace(nil, files)
|
||||
|
||||
if err := fs.DeleteRange([]string{"cpu"}, 1, 1); err != nil {
|
||||
if err := fs.DeleteRange([][]byte{[]byte("cpu")}, 1, 1); err != nil {
|
||||
t.Fatalf("unexpected error delete range: %v", err)
|
||||
}
|
||||
|
||||
buf := make([]tsm1.FloatValue, 1000)
|
||||
c := fs.KeyCursor("cpu", 0, true)
|
||||
c := fs.KeyCursor([]byte("cpu"), 0, true)
|
||||
values, err := c.ReadFloatBlock(&buf)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error reading values: %v", err)
|
||||
|
|
@ -2144,12 +2144,12 @@ func TestKeyCursor_TombstoneRange_PartialInteger(t *testing.T) {
|
|||
|
||||
fs.Replace(nil, files)
|
||||
|
||||
if err := fs.DeleteRange([]string{"cpu"}, 1, 1); err != nil {
|
||||
if err := fs.DeleteRange([][]byte{[]byte("cpu")}, 1, 1); err != nil {
|
||||
t.Fatalf("unexpected error delete range: %v", err)
|
||||
}
|
||||
|
||||
buf := make([]tsm1.IntegerValue, 1000)
|
||||
c := fs.KeyCursor("cpu", 0, true)
|
||||
c := fs.KeyCursor([]byte("cpu"), 0, true)
|
||||
values, err := c.ReadIntegerBlock(&buf)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error reading values: %v", err)
|
||||
|
|
@ -2188,12 +2188,12 @@ func TestKeyCursor_TombstoneRange_PartialUnsigned(t *testing.T) {
|
|||
|
||||
fs.Replace(nil, files)
|
||||
|
||||
if err := fs.DeleteRange([]string{"cpu"}, 1, 1); err != nil {
|
||||
if err := fs.DeleteRange([][]byte{[]byte("cpu")}, 1, 1); err != nil {
|
||||
t.Fatalf("unexpected error delete range: %v", err)
|
||||
}
|
||||
|
||||
buf := make([]tsm1.UnsignedValue, 1000)
|
||||
c := fs.KeyCursor("cpu", 0, true)
|
||||
c := fs.KeyCursor([]byte("cpu"), 0, true)
|
||||
values, err := c.ReadUnsignedBlock(&buf)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error reading values: %v", err)
|
||||
|
|
@ -2232,12 +2232,12 @@ func TestKeyCursor_TombstoneRange_PartialString(t *testing.T) {
|
|||
|
||||
fs.Replace(nil, files)
|
||||
|
||||
if err := fs.DeleteRange([]string{"cpu"}, 1, 1); err != nil {
|
||||
if err := fs.DeleteRange([][]byte{[]byte("cpu")}, 1, 1); err != nil {
|
||||
t.Fatalf("unexpected error delete range: %v", err)
|
||||
}
|
||||
|
||||
buf := make([]tsm1.StringValue, 1000)
|
||||
c := fs.KeyCursor("cpu", 0, true)
|
||||
c := fs.KeyCursor([]byte("cpu"), 0, true)
|
||||
values, err := c.ReadStringBlock(&buf)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error reading values: %v", err)
|
||||
|
|
@ -2276,12 +2276,12 @@ func TestKeyCursor_TombstoneRange_PartialBoolean(t *testing.T) {
|
|||
|
||||
fs.Replace(nil, files)
|
||||
|
||||
if err := fs.DeleteRange([]string{"cpu"}, 1, 1); err != nil {
|
||||
if err := fs.DeleteRange([][]byte{[]byte("cpu")}, 1, 1); err != nil {
|
||||
t.Fatalf("unexpected error delete range: %v", err)
|
||||
}
|
||||
|
||||
buf := make([]tsm1.BooleanValue, 1000)
|
||||
c := fs.KeyCursor("cpu", 0, true)
|
||||
c := fs.KeyCursor([]byte("cpu"), 0, true)
|
||||
values, err := c.ReadBooleanBlock(&buf)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error reading values: %v", err)
|
||||
|
|
@ -2403,7 +2403,7 @@ func TestFileStore_Replace(t *testing.T) {
|
|||
}
|
||||
|
||||
// Should record references to the two existing TSM files
|
||||
cur := fs.KeyCursor("cpu", 0, true)
|
||||
cur := fs.KeyCursor([]byte("cpu"), 0, true)
|
||||
|
||||
// Should move the existing files out of the way, but allow query to complete
|
||||
if err := fs.Replace(files[:2], []string{replacement}); err != nil {
|
||||
|
|
@ -2489,7 +2489,7 @@ func TestFileStore_Open_Deleted(t *testing.T) {
|
|||
t.Fatalf("file count mismatch: got %v, exp %v", got, exp)
|
||||
}
|
||||
|
||||
if err := fs.Delete([]string{"cpu,host=server2!~#!value"}); err != nil {
|
||||
if err := fs.Delete([][]byte{[]byte("cpu,host=server2!~#!value")}); err != nil {
|
||||
fatal(t, "deleting", err)
|
||||
}
|
||||
|
||||
|
|
@ -2528,7 +2528,7 @@ func TestFileStore_Delete(t *testing.T) {
|
|||
t.Fatalf("key length mismatch: got %v, exp %v", got, exp)
|
||||
}
|
||||
|
||||
if err := fs.Delete([]string{"cpu,host=server2!~#!value"}); err != nil {
|
||||
if err := fs.Delete([][]byte{[]byte("cpu,host=server2!~#!value")}); err != nil {
|
||||
fatal(t, "deleting", err)
|
||||
}
|
||||
|
||||
|
|
@ -2633,7 +2633,7 @@ func TestFileStore_CreateSnapshot(t *testing.T) {
|
|||
fs.Replace(nil, files)
|
||||
|
||||
// Create a tombstone
|
||||
if err := fs.DeleteRange([]string{"cpu"}, 1, 1); err != nil {
|
||||
if err := fs.DeleteRange([][]byte{[]byte("cpu")}, 1, 1); err != nil {
|
||||
t.Fatalf("unexpected error delete range: %v", err)
|
||||
}
|
||||
|
||||
|
|
@ -2678,7 +2678,7 @@ func newFileDir(dir string, values ...keyValues) ([]string, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
if err := w.Write(v.key, v.values); err != nil {
|
||||
if err := w.Write([]byte(v.key), v.values); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
|
@ -2712,7 +2712,7 @@ func newFiles(dir string, values ...keyValues) ([]string, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
if err := w.Write(v.key, v.values); err != nil {
|
||||
if err := w.Write([]byte(v.key), v.values); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -10,6 +10,8 @@ import (
|
|||
"sort"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/influxdata/influxdb/pkg/bytesutil"
|
||||
)
|
||||
|
||||
// ErrFileInUse is returned when attempting to remove or close a TSM file that is still being used.
|
||||
|
|
@ -42,31 +44,31 @@ type TSMReader struct {
|
|||
// blocks, their locations, sizes, min and max times.
|
||||
type TSMIndex interface {
|
||||
// Delete removes the given keys from the index.
|
||||
Delete(keys []string)
|
||||
Delete(keys [][]byte)
|
||||
|
||||
// DeleteRange removes the given keys with data between minTime and maxTime from the index.
|
||||
DeleteRange(keys []string, minTime, maxTime int64)
|
||||
DeleteRange(keys [][]byte, minTime, maxTime int64)
|
||||
|
||||
// Contains return true if the given key exists in the index.
|
||||
Contains(key string) bool
|
||||
Contains(key []byte) bool
|
||||
|
||||
// ContainsValue returns true if key and time might exist in this file. This function could
|
||||
// return true even though the actual point does not exists. For example, the key may
|
||||
// exist in this file, but not have a point exactly at time t.
|
||||
ContainsValue(key string, timestamp int64) bool
|
||||
ContainsValue(key []byte, timestamp int64) bool
|
||||
|
||||
// Entries returns all index entries for a key.
|
||||
Entries(key string) []IndexEntry
|
||||
Entries(key []byte) []IndexEntry
|
||||
|
||||
// ReadEntries reads the index entries for key into entries.
|
||||
ReadEntries(key string, entries *[]IndexEntry)
|
||||
ReadEntries(key []byte, entries *[]IndexEntry)
|
||||
|
||||
// Entry returns the index entry for the specified key and timestamp. If no entry
|
||||
// matches the key and timestamp, nil is returned.
|
||||
Entry(key string, timestamp int64) *IndexEntry
|
||||
Entry(key []byte, timestamp int64) *IndexEntry
|
||||
|
||||
// Key returns the key in the index at the given position.
|
||||
Key(index int) (string, byte, []IndexEntry)
|
||||
Key(index int) ([]byte, byte, []IndexEntry)
|
||||
|
||||
// KeyAt returns the key in the index at the given position.
|
||||
KeyAt(index int) ([]byte, byte)
|
||||
|
|
@ -78,7 +80,7 @@ type TSMIndex interface {
|
|||
OverlapsTimeRange(min, max int64) bool
|
||||
|
||||
// OverlapsKeyRange returns true if the min and max keys of the file overlap the arguments min and max.
|
||||
OverlapsKeyRange(min, max string) bool
|
||||
OverlapsKeyRange(min, max []byte) bool
|
||||
|
||||
// Size returns the size of the current index in bytes.
|
||||
Size() uint32
|
||||
|
|
@ -87,15 +89,15 @@ type TSMIndex interface {
|
|||
TimeRange() (int64, int64)
|
||||
|
||||
// TombstoneRange returns ranges of time that are deleted for the given key.
|
||||
TombstoneRange(key string) []TimeRange
|
||||
TombstoneRange(key []byte) []TimeRange
|
||||
|
||||
// KeyRange returns the min and max keys in the file.
|
||||
KeyRange() (string, string)
|
||||
KeyRange() ([]byte, []byte)
|
||||
|
||||
// Type returns the block type of the values stored for the key. Returns one of
|
||||
// BlockFloat64, BlockInt64, BlockBool, BlockString. If key does not exist,
|
||||
// an error is returned.
|
||||
Type(key string) (byte, error)
|
||||
Type(key []byte) (byte, error)
|
||||
|
||||
// UnmarshalBinary populates an index from an encoded byte slice
|
||||
// representation of an index.
|
||||
|
|
@ -113,21 +115,21 @@ type BlockIterator struct {
|
|||
// n is the total number of keys
|
||||
n int
|
||||
|
||||
key string
|
||||
key []byte
|
||||
entries []IndexEntry
|
||||
err error
|
||||
typ byte
|
||||
}
|
||||
|
||||
// PeekNext returns the next key to be iterated or an empty string.
|
||||
func (b *BlockIterator) PeekNext() string {
|
||||
func (b *BlockIterator) PeekNext() []byte {
|
||||
if len(b.entries) > 1 {
|
||||
return b.key
|
||||
} else if b.n-b.i > 1 {
|
||||
key, _ := b.r.KeyAt(b.i + 1)
|
||||
return string(key)
|
||||
return key
|
||||
}
|
||||
return ""
|
||||
return nil
|
||||
}
|
||||
|
||||
// Next returns true if there are more blocks to iterate through.
|
||||
|
|
@ -156,13 +158,13 @@ func (b *BlockIterator) Next() bool {
|
|||
}
|
||||
|
||||
// Read reads information about the next block to be iterated.
|
||||
func (b *BlockIterator) Read() (key string, minTime int64, maxTime int64, typ byte, checksum uint32, buf []byte, err error) {
|
||||
func (b *BlockIterator) Read() (key []byte, minTime int64, maxTime int64, typ byte, checksum uint32, buf []byte, err error) {
|
||||
if b.err != nil {
|
||||
return "", 0, 0, 0, 0, nil, b.err
|
||||
return nil, 0, 0, 0, 0, nil, b.err
|
||||
}
|
||||
checksum, buf, err = b.r.ReadBytes(&b.entries[0], nil)
|
||||
if err != nil {
|
||||
return "", 0, 0, 0, 0, nil, err
|
||||
return nil, 0, 0, 0, 0, nil, err
|
||||
}
|
||||
return b.key, b.entries[0].MinTime, b.entries[0].MaxTime, b.typ, checksum, buf, err
|
||||
}
|
||||
|
|
@ -171,8 +173,8 @@ func (b *BlockIterator) Read() (key string, minTime int64, maxTime int64, typ by
|
|||
// TSM file.
|
||||
type blockAccessor interface {
|
||||
init() (*indirectIndex, error)
|
||||
read(key string, timestamp int64) ([]Value, error)
|
||||
readAll(key string) ([]Value, error)
|
||||
read(key []byte, timestamp int64) ([]Value, error)
|
||||
readAll(key []byte) ([]Value, error)
|
||||
readBlock(entry *IndexEntry, values []Value) ([]Value, error)
|
||||
readFloatBlock(entry *IndexEntry, values *[]FloatValue) ([]FloatValue, error)
|
||||
readIntegerBlock(entry *IndexEntry, values *[]IntegerValue) ([]IntegerValue, error)
|
||||
|
|
@ -216,7 +218,7 @@ func NewTSMReader(f *os.File) (*TSMReader, error) {
|
|||
|
||||
func (t *TSMReader) applyTombstones() error {
|
||||
var cur, prev Tombstone
|
||||
batch := make([]string, 0, 4096)
|
||||
batch := make([][]byte, 0, 4096)
|
||||
|
||||
if err := t.tombstoner.Walk(func(ts Tombstone) error {
|
||||
cur = ts
|
||||
|
|
@ -253,7 +255,7 @@ func (t *TSMReader) Path() string {
|
|||
}
|
||||
|
||||
// Key returns the key and the underlying entry at the numeric index.
|
||||
func (t *TSMReader) Key(index int) (string, byte, []IndexEntry) {
|
||||
func (t *TSMReader) Key(index int) ([]byte, byte, []IndexEntry) {
|
||||
return t.index.Key(index)
|
||||
}
|
||||
|
||||
|
|
@ -311,7 +313,7 @@ func (t *TSMReader) ReadBooleanBlockAt(entry *IndexEntry, vals *[]BooleanValue)
|
|||
}
|
||||
|
||||
// Read returns the values corresponding to the block at the given key and timestamp.
|
||||
func (t *TSMReader) Read(key string, timestamp int64) ([]Value, error) {
|
||||
func (t *TSMReader) Read(key []byte, timestamp int64) ([]Value, error) {
|
||||
t.mu.RLock()
|
||||
v, err := t.accessor.read(key, timestamp)
|
||||
t.mu.RUnlock()
|
||||
|
|
@ -319,7 +321,7 @@ func (t *TSMReader) Read(key string, timestamp int64) ([]Value, error) {
|
|||
}
|
||||
|
||||
// ReadAll returns all values for a key in all blocks.
|
||||
func (t *TSMReader) ReadAll(key string) ([]Value, error) {
|
||||
func (t *TSMReader) ReadAll(key []byte) ([]Value, error) {
|
||||
t.mu.RLock()
|
||||
v, err := t.accessor.readAll(key)
|
||||
t.mu.RUnlock()
|
||||
|
|
@ -334,7 +336,7 @@ func (t *TSMReader) ReadBytes(e *IndexEntry, b []byte) (uint32, []byte, error) {
|
|||
}
|
||||
|
||||
// Type returns the type of values stored at the given key.
|
||||
func (t *TSMReader) Type(key string) (byte, error) {
|
||||
func (t *TSMReader) Type(key []byte) (byte, error) {
|
||||
return t.index.Type(key)
|
||||
}
|
||||
|
||||
|
|
@ -407,20 +409,20 @@ func (t *TSMReader) remove() error {
|
|||
}
|
||||
|
||||
// Contains returns whether the given key is present in the index.
|
||||
func (t *TSMReader) Contains(key string) bool {
|
||||
func (t *TSMReader) Contains(key []byte) bool {
|
||||
return t.index.Contains(key)
|
||||
}
|
||||
|
||||
// ContainsValue returns true if key and time might exists in this file. This function could
|
||||
// return true even though the actual point does not exist. For example, the key may
|
||||
// exist in this file, but not have a point exactly at time t.
|
||||
func (t *TSMReader) ContainsValue(key string, ts int64) bool {
|
||||
func (t *TSMReader) ContainsValue(key []byte, ts int64) bool {
|
||||
return t.index.ContainsValue(key, ts)
|
||||
}
|
||||
|
||||
// DeleteRange removes the given points for keys between minTime and maxTime. The series
|
||||
// keys passed in must be sorted.
|
||||
func (t *TSMReader) DeleteRange(keys []string, minTime, maxTime int64) error {
|
||||
func (t *TSMReader) DeleteRange(keys [][]byte, minTime, maxTime int64) error {
|
||||
if len(keys) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
|
@ -445,7 +447,7 @@ func (t *TSMReader) DeleteRange(keys []string, minTime, maxTime int64) error {
|
|||
}
|
||||
|
||||
// Delete deletes blocks indicated by keys.
|
||||
func (t *TSMReader) Delete(keys []string) error {
|
||||
func (t *TSMReader) Delete(keys [][]byte) error {
|
||||
if err := t.tombstoner.Add(keys); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -460,7 +462,7 @@ func (t *TSMReader) TimeRange() (int64, int64) {
|
|||
}
|
||||
|
||||
// KeyRange returns the min and max key across all keys in the file.
|
||||
func (t *TSMReader) KeyRange() (string, string) {
|
||||
func (t *TSMReader) KeyRange() ([]byte, []byte) {
|
||||
return t.index.KeyRange()
|
||||
}
|
||||
|
||||
|
|
@ -470,12 +472,12 @@ func (t *TSMReader) KeyCount() int {
|
|||
}
|
||||
|
||||
// Entries returns all index entries for key.
|
||||
func (t *TSMReader) Entries(key string) []IndexEntry {
|
||||
func (t *TSMReader) Entries(key []byte) []IndexEntry {
|
||||
return t.index.Entries(key)
|
||||
}
|
||||
|
||||
// ReadEntries reads the index entries for key into entries.
|
||||
func (t *TSMReader) ReadEntries(key string, entries *[]IndexEntry) {
|
||||
func (t *TSMReader) ReadEntries(key []byte, entries *[]IndexEntry) {
|
||||
t.index.ReadEntries(key, entries)
|
||||
}
|
||||
|
||||
|
|
@ -522,7 +524,7 @@ func (t *TSMReader) TombstoneFiles() []FileStat {
|
|||
}
|
||||
|
||||
// TombstoneRange returns ranges of time that are deleted for the given key.
|
||||
func (t *TSMReader) TombstoneRange(key string) []TimeRange {
|
||||
func (t *TSMReader) TombstoneRange(key []byte) []TimeRange {
|
||||
t.mu.RLock()
|
||||
tr := t.index.TombstoneRange(key)
|
||||
t.mu.RUnlock()
|
||||
|
|
@ -600,7 +602,7 @@ type indirectIndex struct {
|
|||
|
||||
// minKey, maxKey are the minium and maximum (lexicographically sorted) contained in the
|
||||
// file
|
||||
minKey, maxKey string
|
||||
minKey, maxKey []byte
|
||||
|
||||
// minTime, maxTime are the minimum and maximum times contained in the file across all
|
||||
// series.
|
||||
|
|
@ -664,13 +666,11 @@ func (d *indirectIndex) search(key []byte) int {
|
|||
}
|
||||
|
||||
// Entries returns all index entries for a key.
|
||||
func (d *indirectIndex) Entries(key string) []IndexEntry {
|
||||
func (d *indirectIndex) Entries(key []byte) []IndexEntry {
|
||||
d.mu.RLock()
|
||||
defer d.mu.RUnlock()
|
||||
|
||||
kb := []byte(key)
|
||||
|
||||
ofs := d.search(kb)
|
||||
ofs := d.search(key)
|
||||
if ofs < len(d.b) {
|
||||
n, k, err := readKey(d.b[ofs:])
|
||||
if err != nil {
|
||||
|
|
@ -680,7 +680,7 @@ func (d *indirectIndex) Entries(key string) []IndexEntry {
|
|||
// The search may have returned an i == 0 which could indicated that the value
|
||||
// searched should be inserted at position 0. Make sure the key in the index
|
||||
// matches the search value.
|
||||
if !bytes.Equal(kb, k) {
|
||||
if !bytes.Equal(key, k) {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
@ -698,13 +698,13 @@ func (d *indirectIndex) Entries(key string) []IndexEntry {
|
|||
}
|
||||
|
||||
// ReadEntries returns all index entries for a key.
|
||||
func (d *indirectIndex) ReadEntries(key string, entries *[]IndexEntry) {
|
||||
func (d *indirectIndex) ReadEntries(key []byte, entries *[]IndexEntry) {
|
||||
*entries = d.Entries(key)
|
||||
}
|
||||
|
||||
// Entry returns the index entry for the specified key and timestamp. If no entry
|
||||
// matches the key an timestamp, nil is returned.
|
||||
func (d *indirectIndex) Entry(key string, timestamp int64) *IndexEntry {
|
||||
func (d *indirectIndex) Entry(key []byte, timestamp int64) *IndexEntry {
|
||||
entries := d.Entries(key)
|
||||
for _, entry := range entries {
|
||||
if entry.Contains(timestamp) {
|
||||
|
|
@ -715,25 +715,25 @@ func (d *indirectIndex) Entry(key string, timestamp int64) *IndexEntry {
|
|||
}
|
||||
|
||||
// Key returns the key in the index at the given position.
|
||||
func (d *indirectIndex) Key(idx int) (string, byte, []IndexEntry) {
|
||||
func (d *indirectIndex) Key(idx int) ([]byte, byte, []IndexEntry) {
|
||||
d.mu.RLock()
|
||||
defer d.mu.RUnlock()
|
||||
|
||||
if idx < 0 || idx >= len(d.offsets) {
|
||||
return "", 0, nil
|
||||
return nil, 0, nil
|
||||
}
|
||||
n, key, err := readKey(d.b[d.offsets[idx]:])
|
||||
if err != nil {
|
||||
return "", 0, nil
|
||||
return nil, 0, nil
|
||||
}
|
||||
|
||||
typ := d.b[int(d.offsets[idx])+n]
|
||||
|
||||
var entries indexEntries
|
||||
if _, err := readEntries(d.b[int(d.offsets[idx])+n:], &entries); err != nil {
|
||||
return "", 0, nil
|
||||
return nil, 0, nil
|
||||
}
|
||||
return string(key), typ, entries.entries
|
||||
return key, typ, entries.entries
|
||||
}
|
||||
|
||||
// KeyAt returns the key in the index at the given position.
|
||||
|
|
@ -759,13 +759,13 @@ func (d *indirectIndex) KeyCount() int {
|
|||
}
|
||||
|
||||
// Delete removes the given keys from the index.
|
||||
func (d *indirectIndex) Delete(keys []string) {
|
||||
func (d *indirectIndex) Delete(keys [][]byte) {
|
||||
if len(keys) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
if !sort.StringsAreSorted(keys) {
|
||||
sort.Strings(keys)
|
||||
if !bytesutil.IsSorted(keys) {
|
||||
bytesutil.Sort(keys)
|
||||
}
|
||||
|
||||
d.mu.Lock()
|
||||
|
|
@ -777,11 +777,11 @@ func (d *indirectIndex) Delete(keys []string) {
|
|||
for _, offset := range d.offsets {
|
||||
_, indexKey, _ := readKey(d.b[offset:])
|
||||
|
||||
for len(keys) > 0 && keys[0] < string(indexKey) {
|
||||
for len(keys) > 0 && bytes.Compare(keys[0], indexKey) < 0 {
|
||||
keys = keys[1:]
|
||||
}
|
||||
|
||||
if len(keys) > 0 && keys[0] == string(indexKey) {
|
||||
if len(keys) > 0 && bytes.Equal(keys[0], indexKey) {
|
||||
keys = keys[1:]
|
||||
continue
|
||||
}
|
||||
|
|
@ -792,7 +792,7 @@ func (d *indirectIndex) Delete(keys []string) {
|
|||
}
|
||||
|
||||
// DeleteRange removes the given keys with data between minTime and maxTime from the index.
|
||||
func (d *indirectIndex) DeleteRange(keys []string, minTime, maxTime int64) {
|
||||
func (d *indirectIndex) DeleteRange(keys [][]byte, minTime, maxTime int64) {
|
||||
// No keys, nothing to do
|
||||
if len(keys) == 0 {
|
||||
return
|
||||
|
|
@ -832,7 +832,7 @@ func (d *indirectIndex) DeleteRange(keys []string, minTime, maxTime int64) {
|
|||
continue
|
||||
}
|
||||
|
||||
tombstones[k] = append(tombstones[k], TimeRange{minTime, maxTime})
|
||||
tombstones[string(k)] = append(tombstones[string(k)], TimeRange{minTime, maxTime})
|
||||
}
|
||||
|
||||
if len(tombstones) == 0 {
|
||||
|
|
@ -847,27 +847,27 @@ func (d *indirectIndex) DeleteRange(keys []string, minTime, maxTime int64) {
|
|||
}
|
||||
|
||||
// TombstoneRange returns ranges of time that are deleted for the given key.
|
||||
func (d *indirectIndex) TombstoneRange(key string) []TimeRange {
|
||||
func (d *indirectIndex) TombstoneRange(key []byte) []TimeRange {
|
||||
d.mu.RLock()
|
||||
r := d.tombstones[key]
|
||||
r := d.tombstones[string(key)]
|
||||
d.mu.RUnlock()
|
||||
return r
|
||||
}
|
||||
|
||||
// Contains return true if the given key exists in the index.
|
||||
func (d *indirectIndex) Contains(key string) bool {
|
||||
func (d *indirectIndex) Contains(key []byte) bool {
|
||||
return len(d.Entries(key)) > 0
|
||||
}
|
||||
|
||||
// ContainsValue returns true if key and time might exist in this file.
|
||||
func (d *indirectIndex) ContainsValue(key string, timestamp int64) bool {
|
||||
func (d *indirectIndex) ContainsValue(key []byte, timestamp int64) bool {
|
||||
entry := d.Entry(key, timestamp)
|
||||
if entry == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
d.mu.RLock()
|
||||
tombstones := d.tombstones[key]
|
||||
tombstones := d.tombstones[string(key)]
|
||||
d.mu.RUnlock()
|
||||
|
||||
for _, t := range tombstones {
|
||||
|
|
@ -879,12 +879,11 @@ func (d *indirectIndex) ContainsValue(key string, timestamp int64) bool {
|
|||
}
|
||||
|
||||
// Type returns the block type of the values stored for the key.
|
||||
func (d *indirectIndex) Type(key string) (byte, error) {
|
||||
func (d *indirectIndex) Type(key []byte) (byte, error) {
|
||||
d.mu.RLock()
|
||||
defer d.mu.RUnlock()
|
||||
|
||||
kb := []byte(key)
|
||||
ofs := d.search(kb)
|
||||
ofs := d.search(key)
|
||||
if ofs < len(d.b) {
|
||||
n, _, err := readKey(d.b[ofs:])
|
||||
if err != nil {
|
||||
|
|
@ -894,7 +893,7 @@ func (d *indirectIndex) Type(key string) (byte, error) {
|
|||
ofs += n
|
||||
return d.b[ofs], nil
|
||||
}
|
||||
return 0, fmt.Errorf("key does not exist: %v", key)
|
||||
return 0, fmt.Errorf("key does not exist: %s", key)
|
||||
}
|
||||
|
||||
// OverlapsTimeRange returns true if the time range of the file intersect min and max.
|
||||
|
|
@ -903,12 +902,12 @@ func (d *indirectIndex) OverlapsTimeRange(min, max int64) bool {
|
|||
}
|
||||
|
||||
// OverlapsKeyRange returns true if the min and max keys of the file overlap the arguments min and max.
|
||||
func (d *indirectIndex) OverlapsKeyRange(min, max string) bool {
|
||||
return d.minKey <= max && d.maxKey >= min
|
||||
func (d *indirectIndex) OverlapsKeyRange(min, max []byte) bool {
|
||||
return bytes.Compare(d.minKey, max) <= 0 && bytes.Compare(d.maxKey, min) >= 0
|
||||
}
|
||||
|
||||
// KeyRange returns the min and max keys in the index.
|
||||
func (d *indirectIndex) KeyRange() (string, string) {
|
||||
func (d *indirectIndex) KeyRange() ([]byte, []byte) {
|
||||
return d.minKey, d.maxKey
|
||||
}
|
||||
|
||||
|
|
@ -992,14 +991,14 @@ func (d *indirectIndex) UnmarshalBinary(b []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
d.minKey = string(key)
|
||||
d.minKey = key
|
||||
|
||||
lastOfs := d.offsets[len(d.offsets)-1]
|
||||
_, key, err = readKey(b[lastOfs:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
d.maxKey = string(key)
|
||||
d.maxKey = key
|
||||
|
||||
d.minTime = minTime
|
||||
d.maxTime = maxTime
|
||||
|
|
@ -1105,7 +1104,7 @@ func (m *mmapAccessor) rename(path string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (m *mmapAccessor) read(key string, timestamp int64) ([]Value, error) {
|
||||
func (m *mmapAccessor) read(key []byte, timestamp int64) ([]Value, error) {
|
||||
entry := m.index.Entry(key, timestamp)
|
||||
if entry == nil {
|
||||
return nil, nil
|
||||
|
|
@ -1234,7 +1233,7 @@ func (m *mmapAccessor) readBytes(entry *IndexEntry, b []byte) (uint32, []byte, e
|
|||
}
|
||||
|
||||
// readAll returns all values for a key in all blocks.
|
||||
func (m *mmapAccessor) readAll(key string) ([]Value, error) {
|
||||
func (m *mmapAccessor) readAll(key []byte) ([]Value, error) {
|
||||
blocks := m.index.Entries(key)
|
||||
if len(blocks) == 0 {
|
||||
return nil, nil
|
||||
|
|
|
|||
|
|
@ -22,7 +22,7 @@ func TestTSMReader_Type(t *testing.T) {
|
|||
}
|
||||
|
||||
values := []tsm1.Value{tsm1.NewValue(0, int64(1))}
|
||||
if err := w.Write("cpu", values); err != nil {
|
||||
if err := w.Write([]byte("cpu"), values); err != nil {
|
||||
t.Fatalf("unexpected error writing: %v", err)
|
||||
|
||||
}
|
||||
|
|
@ -43,7 +43,7 @@ func TestTSMReader_Type(t *testing.T) {
|
|||
t.Fatalf("unexpected error created reader: %v", err)
|
||||
}
|
||||
|
||||
typ, err := r.Type("cpu")
|
||||
typ, err := r.Type([]byte("cpu"))
|
||||
if err != nil {
|
||||
fatal(t, "reading type", err)
|
||||
}
|
||||
|
|
@ -86,7 +86,7 @@ func TestTSMReader_MMAP_ReadAll(t *testing.T) {
|
|||
}
|
||||
|
||||
for _, d := range data {
|
||||
if err := w.Write(d.key, d.values); err != nil {
|
||||
if err := w.Write([]byte(d.key), d.values); err != nil {
|
||||
t.Fatalf("unexpected error writing: %v", err)
|
||||
}
|
||||
}
|
||||
|
|
@ -112,7 +112,7 @@ func TestTSMReader_MMAP_ReadAll(t *testing.T) {
|
|||
|
||||
var count int
|
||||
for _, d := range data {
|
||||
readValues, err := r.ReadAll(d.key)
|
||||
readValues, err := r.ReadAll([]byte(d.key))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error readin: %v", err)
|
||||
}
|
||||
|
|
@ -166,7 +166,7 @@ func TestTSMReader_MMAP_Read(t *testing.T) {
|
|||
},
|
||||
}
|
||||
for _, d := range data {
|
||||
if err := w.Write(d.key, d.values); err != nil {
|
||||
if err := w.Write([]byte(d.key), d.values); err != nil {
|
||||
t.Fatalf("unexpected error writing: %v", err)
|
||||
}
|
||||
}
|
||||
|
|
@ -192,7 +192,7 @@ func TestTSMReader_MMAP_Read(t *testing.T) {
|
|||
|
||||
var count int
|
||||
for _, d := range data {
|
||||
readValues, err := r.Read(d.key, d.values[0].UnixNano())
|
||||
readValues, err := r.Read([]byte(d.key), d.values[0].UnixNano())
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error readin: %v", err)
|
||||
}
|
||||
|
|
@ -247,7 +247,7 @@ func TestTSMReader_MMAP_Keys(t *testing.T) {
|
|||
}
|
||||
|
||||
for _, d := range data {
|
||||
if err := w.Write(d.key, d.values); err != nil {
|
||||
if err := w.Write([]byte(d.key), d.values); err != nil {
|
||||
t.Fatalf("unexpected error writing: %v", err)
|
||||
}
|
||||
}
|
||||
|
|
@ -273,7 +273,7 @@ func TestTSMReader_MMAP_Keys(t *testing.T) {
|
|||
|
||||
var count int
|
||||
for _, d := range data {
|
||||
readValues, err := r.Read(d.key, d.values[0].UnixNano())
|
||||
readValues, err := r.Read([]byte(d.key), d.values[0].UnixNano())
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error readin: %v", err)
|
||||
}
|
||||
|
|
@ -307,11 +307,11 @@ func TestTSMReader_MMAP_Tombstone(t *testing.T) {
|
|||
}
|
||||
|
||||
values := []tsm1.Value{tsm1.NewValue(0, 1.0)}
|
||||
if err := w.Write("cpu", values); err != nil {
|
||||
if err := w.Write([]byte("cpu"), values); err != nil {
|
||||
t.Fatalf("unexpected error writing: %v", err)
|
||||
}
|
||||
|
||||
if err := w.Write("mem", values); err != nil {
|
||||
if err := w.Write([]byte("mem"), values); err != nil {
|
||||
t.Fatalf("unexpected error writing: %v", err)
|
||||
}
|
||||
|
||||
|
|
@ -333,7 +333,7 @@ func TestTSMReader_MMAP_Tombstone(t *testing.T) {
|
|||
t.Fatalf("unexpected error created reader: %v", err)
|
||||
}
|
||||
|
||||
if err := r.Delete([]string{"mem"}); err != nil {
|
||||
if err := r.Delete([][]byte{[]byte("mem")}); err != nil {
|
||||
t.Fatalf("unexpected error deleting: %v", err)
|
||||
}
|
||||
|
||||
|
|
@ -364,7 +364,7 @@ func TestTSMReader_MMAP_TombstoneRange(t *testing.T) {
|
|||
tsm1.NewValue(2, 2.0),
|
||||
tsm1.NewValue(3, 3.0),
|
||||
}
|
||||
if err := w.Write("cpu", expValues); err != nil {
|
||||
if err := w.Write([]byte("cpu"), expValues); err != nil {
|
||||
t.Fatalf("unexpected error writing: %v", err)
|
||||
}
|
||||
|
||||
|
|
@ -386,20 +386,20 @@ func TestTSMReader_MMAP_TombstoneRange(t *testing.T) {
|
|||
t.Fatalf("unexpected error created reader: %v", err)
|
||||
}
|
||||
|
||||
if err := r.DeleteRange([]string{"cpu"}, 2, math.MaxInt64); err != nil {
|
||||
if err := r.DeleteRange([][]byte{[]byte("cpu")}, 2, math.MaxInt64); err != nil {
|
||||
t.Fatalf("unexpected error deleting: %v", err)
|
||||
}
|
||||
defer r.Close()
|
||||
|
||||
if got, exp := r.ContainsValue("cpu", 1), true; got != exp {
|
||||
if got, exp := r.ContainsValue([]byte("cpu"), 1), true; got != exp {
|
||||
t.Fatalf("ContainsValue mismatch: got %v, exp %v", got, exp)
|
||||
}
|
||||
|
||||
if got, exp := r.ContainsValue("cpu", 3), false; got != exp {
|
||||
if got, exp := r.ContainsValue([]byte("cpu"), 3), false; got != exp {
|
||||
t.Fatalf("ContainsValue mismatch: got %v, exp %v", got, exp)
|
||||
}
|
||||
|
||||
values, err := r.ReadAll("cpu")
|
||||
values, err := r.ReadAll([]byte("cpu"))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error reading all: %v", err)
|
||||
}
|
||||
|
|
@ -429,7 +429,7 @@ func TestTSMReader_MMAP_TombstoneOutsideTimeRange(t *testing.T) {
|
|||
tsm1.NewValue(2, 2.0),
|
||||
tsm1.NewValue(3, 3.0),
|
||||
}
|
||||
if err := w.Write("cpu", expValues); err != nil {
|
||||
if err := w.Write([]byte("cpu"), expValues); err != nil {
|
||||
t.Fatalf("unexpected error writing: %v", err)
|
||||
}
|
||||
|
||||
|
|
@ -451,20 +451,20 @@ func TestTSMReader_MMAP_TombstoneOutsideTimeRange(t *testing.T) {
|
|||
t.Fatalf("unexpected error created reader: %v", err)
|
||||
}
|
||||
|
||||
if err := r.DeleteRange([]string{"cpu"}, 0, 0); err != nil {
|
||||
if err := r.DeleteRange([][]byte{[]byte("cpu")}, 0, 0); err != nil {
|
||||
t.Fatalf("unexpected error deleting: %v", err)
|
||||
}
|
||||
defer r.Close()
|
||||
|
||||
if got, exp := r.ContainsValue("cpu", 1), true; got != exp {
|
||||
if got, exp := r.ContainsValue([]byte("cpu"), 1), true; got != exp {
|
||||
t.Fatalf("ContainsValue mismatch: got %v, exp %v", got, exp)
|
||||
}
|
||||
|
||||
if got, exp := r.ContainsValue("cpu", 2), true; got != exp {
|
||||
if got, exp := r.ContainsValue([]byte("cpu"), 2), true; got != exp {
|
||||
t.Fatalf("ContainsValue mismatch: got %v, exp %v", got, exp)
|
||||
}
|
||||
|
||||
if got, exp := r.ContainsValue("cpu", 3), true; got != exp {
|
||||
if got, exp := r.ContainsValue([]byte("cpu"), 3), true; got != exp {
|
||||
t.Fatalf("ContainsValue mismatch: got %v, exp %v", got, exp)
|
||||
}
|
||||
|
||||
|
|
@ -493,7 +493,7 @@ func TestTSMReader_MMAP_TombstoneOutsideKeyRange(t *testing.T) {
|
|||
tsm1.NewValue(2, 2.0),
|
||||
tsm1.NewValue(3, 3.0),
|
||||
}
|
||||
if err := w.Write("cpu", expValues); err != nil {
|
||||
if err := w.Write([]byte("cpu"), expValues); err != nil {
|
||||
t.Fatalf("unexpected error writing: %v", err)
|
||||
}
|
||||
|
||||
|
|
@ -515,20 +515,20 @@ func TestTSMReader_MMAP_TombstoneOutsideKeyRange(t *testing.T) {
|
|||
t.Fatalf("unexpected error created reader: %v", err)
|
||||
}
|
||||
|
||||
if err := r.DeleteRange([]string{"mem"}, 0, 3); err != nil {
|
||||
if err := r.DeleteRange([][]byte{[]byte("mem")}, 0, 3); err != nil {
|
||||
t.Fatalf("unexpected error deleting: %v", err)
|
||||
}
|
||||
defer r.Close()
|
||||
|
||||
if got, exp := r.ContainsValue("cpu", 1), true; got != exp {
|
||||
if got, exp := r.ContainsValue([]byte("cpu"), 1), true; got != exp {
|
||||
t.Fatalf("ContainsValue mismatch: got %v, exp %v", got, exp)
|
||||
}
|
||||
|
||||
if got, exp := r.ContainsValue("cpu", 2), true; got != exp {
|
||||
if got, exp := r.ContainsValue([]byte("cpu"), 2), true; got != exp {
|
||||
t.Fatalf("ContainsValue mismatch: got %v, exp %v", got, exp)
|
||||
}
|
||||
|
||||
if got, exp := r.ContainsValue("cpu", 3), true; got != exp {
|
||||
if got, exp := r.ContainsValue([]byte("cpu"), 3), true; got != exp {
|
||||
t.Fatalf("ContainsValue mismatch: got %v, exp %v", got, exp)
|
||||
}
|
||||
|
||||
|
|
@ -558,11 +558,11 @@ func TestTSMReader_MMAP_TombstoneOverlapKeyRange(t *testing.T) {
|
|||
tsm1.NewValue(2, 2.0),
|
||||
tsm1.NewValue(3, 3.0),
|
||||
}
|
||||
if err := w.Write("cpu,app=foo,host=server-0#!~#value", expValues); err != nil {
|
||||
if err := w.Write([]byte("cpu,app=foo,host=server-0#!~#value"), expValues); err != nil {
|
||||
t.Fatalf("unexpected error writing: %v", err)
|
||||
}
|
||||
|
||||
if err := w.Write("cpu,app=foo,host=server-73379#!~#value", expValues); err != nil {
|
||||
if err := w.Write([]byte("cpu,app=foo,host=server-73379#!~#value"), expValues); err != nil {
|
||||
t.Fatalf("unexpected error writing: %v", err)
|
||||
}
|
||||
|
||||
|
|
@ -584,20 +584,20 @@ func TestTSMReader_MMAP_TombstoneOverlapKeyRange(t *testing.T) {
|
|||
t.Fatalf("unexpected error created reader: %v", err)
|
||||
}
|
||||
|
||||
if err := r.DeleteRange([]string{
|
||||
"cpu,app=foo,host=server-0#!~#value",
|
||||
"cpu,app=foo,host=server-73379#!~#value",
|
||||
"cpu,app=foo,host=server-99999#!~#value"},
|
||||
if err := r.DeleteRange([][]byte{
|
||||
[]byte("cpu,app=foo,host=server-0#!~#value"),
|
||||
[]byte("cpu,app=foo,host=server-73379#!~#value"),
|
||||
[]byte("cpu,app=foo,host=server-99999#!~#value")},
|
||||
math.MinInt64, math.MaxInt64); err != nil {
|
||||
t.Fatalf("unexpected error deleting: %v", err)
|
||||
}
|
||||
defer r.Close()
|
||||
|
||||
if got, exp := r.Contains("cpu,app=foo,host=server-0#!~#value"), false; got != exp {
|
||||
if got, exp := r.Contains([]byte("cpu,app=foo,host=server-0#!~#value")), false; got != exp {
|
||||
t.Fatalf("ContainsValue mismatch: got %v, exp %v", got, exp)
|
||||
}
|
||||
|
||||
if got, exp := r.Contains("cpu,app=foo,host=server-73379#!~#value"), false; got != exp {
|
||||
if got, exp := r.Contains([]byte("cpu,app=foo,host=server-73379#!~#value")), false; got != exp {
|
||||
t.Fatalf("ContainsValue mismatch: got %v, exp %v", got, exp)
|
||||
}
|
||||
|
||||
|
|
@ -626,7 +626,7 @@ func TestTSMReader_MMAP_TombstoneFullRange(t *testing.T) {
|
|||
tsm1.NewValue(2, 2.0),
|
||||
tsm1.NewValue(3, 3.0),
|
||||
}
|
||||
if err := w.Write("cpu", expValues); err != nil {
|
||||
if err := w.Write([]byte("cpu"), expValues); err != nil {
|
||||
t.Fatalf("unexpected error writing: %v", err)
|
||||
}
|
||||
|
||||
|
|
@ -648,12 +648,12 @@ func TestTSMReader_MMAP_TombstoneFullRange(t *testing.T) {
|
|||
t.Fatalf("unexpected error created reader: %v", err)
|
||||
}
|
||||
|
||||
if err := r.DeleteRange([]string{"cpu"}, math.MinInt64, math.MaxInt64); err != nil {
|
||||
if err := r.DeleteRange([][]byte{[]byte("cpu")}, math.MinInt64, math.MaxInt64); err != nil {
|
||||
t.Fatalf("unexpected error deleting: %v", err)
|
||||
}
|
||||
defer r.Close()
|
||||
|
||||
values, err := r.ReadAll("cpu")
|
||||
values, err := r.ReadAll([]byte("cpu"))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error reading all: %v", err)
|
||||
}
|
||||
|
|
@ -681,7 +681,7 @@ func TestTSMReader_MMAP_TombstoneMultipleRanges(t *testing.T) {
|
|||
tsm1.NewValue(4, 4.0),
|
||||
tsm1.NewValue(5, 5.0),
|
||||
}
|
||||
if err := w.Write("cpu", expValues); err != nil {
|
||||
if err := w.Write([]byte("cpu"), expValues); err != nil {
|
||||
t.Fatalf("unexpected error writing: %v", err)
|
||||
}
|
||||
|
||||
|
|
@ -704,15 +704,15 @@ func TestTSMReader_MMAP_TombstoneMultipleRanges(t *testing.T) {
|
|||
}
|
||||
defer r.Close()
|
||||
|
||||
if err := r.DeleteRange([]string{"cpu"}, 2, 2); err != nil {
|
||||
if err := r.DeleteRange([][]byte{[]byte("cpu")}, 2, 2); err != nil {
|
||||
t.Fatalf("unexpected error deleting: %v", err)
|
||||
}
|
||||
|
||||
if err := r.DeleteRange([]string{"cpu"}, 4, 4); err != nil {
|
||||
if err := r.DeleteRange([][]byte{[]byte("cpu")}, 4, 4); err != nil {
|
||||
t.Fatalf("unexpected error deleting: %v", err)
|
||||
}
|
||||
|
||||
values, err := r.ReadAll("cpu")
|
||||
values, err := r.ReadAll([]byte("cpu"))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error reading all: %v", err)
|
||||
}
|
||||
|
|
@ -738,7 +738,7 @@ func TestTSMReader_MMAP_TombstoneOutsideRange(t *testing.T) {
|
|||
tsm1.NewValue(2, 2.0),
|
||||
tsm1.NewValue(3, 3.0),
|
||||
}
|
||||
if err := w.Write("cpu", cpuValues); err != nil {
|
||||
if err := w.Write([]byte("cpu"), cpuValues); err != nil {
|
||||
t.Fatalf("unexpected error writing: %v", err)
|
||||
}
|
||||
|
||||
|
|
@ -747,7 +747,7 @@ func TestTSMReader_MMAP_TombstoneOutsideRange(t *testing.T) {
|
|||
tsm1.NewValue(2, 2.0),
|
||||
tsm1.NewValue(30, 3.0),
|
||||
}
|
||||
if err := w.Write("mem", memValues); err != nil {
|
||||
if err := w.Write([]byte("mem"), memValues); err != nil {
|
||||
t.Fatalf("unexpected error writing: %v", err)
|
||||
}
|
||||
|
||||
|
|
@ -769,7 +769,7 @@ func TestTSMReader_MMAP_TombstoneOutsideRange(t *testing.T) {
|
|||
t.Fatalf("unexpected error created reader: %v", err)
|
||||
}
|
||||
|
||||
if err := r.DeleteRange([]string{"cpu", "mem"}, 5, math.MaxInt64); err != nil {
|
||||
if err := r.DeleteRange([][]byte{[]byte("cpu"), []byte("mem")}, 5, math.MaxInt64); err != nil {
|
||||
t.Fatalf("unexpected error deleting: %v", err)
|
||||
}
|
||||
defer r.Close()
|
||||
|
|
@ -778,11 +778,11 @@ func TestTSMReader_MMAP_TombstoneOutsideRange(t *testing.T) {
|
|||
t.Fatalf("key count mismatch: got %v, exp %v", got, exp)
|
||||
}
|
||||
|
||||
if got, exp := len(r.TombstoneRange("cpu")), 0; got != exp {
|
||||
if got, exp := len(r.TombstoneRange([]byte("cpu"))), 0; got != exp {
|
||||
t.Fatalf("tombstone range mismatch: got %v, exp %v", got, exp)
|
||||
}
|
||||
|
||||
values, err := r.ReadAll("cpu")
|
||||
values, err := r.ReadAll([]byte("cpu"))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error reading all: %v", err)
|
||||
}
|
||||
|
|
@ -791,11 +791,11 @@ func TestTSMReader_MMAP_TombstoneOutsideRange(t *testing.T) {
|
|||
t.Fatalf("values length mismatch: got %v, exp %v", got, exp)
|
||||
}
|
||||
|
||||
if got, exp := len(r.TombstoneRange("mem")), 1; got != exp {
|
||||
if got, exp := len(r.TombstoneRange([]byte("mem"))), 1; got != exp {
|
||||
t.Fatalf("tombstone range mismatch: got %v, exp %v", got, exp)
|
||||
}
|
||||
|
||||
values, err = r.ReadAll("mem")
|
||||
values, err = r.ReadAll([]byte("mem"))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error reading all: %v", err)
|
||||
}
|
||||
|
|
@ -818,12 +818,12 @@ func TestTSMReader_MMAP_Stats(t *testing.T) {
|
|||
}
|
||||
|
||||
values1 := []tsm1.Value{tsm1.NewValue(0, 1.0)}
|
||||
if err := w.Write("cpu", values1); err != nil {
|
||||
if err := w.Write([]byte("cpu"), values1); err != nil {
|
||||
t.Fatalf("unexpected error writing: %v", err)
|
||||
}
|
||||
|
||||
values2 := []tsm1.Value{tsm1.NewValue(1, 1.0)}
|
||||
if err := w.Write("mem", values2); err != nil {
|
||||
if err := w.Write([]byte("mem"), values2); err != nil {
|
||||
t.Fatalf("unexpected error writing: %v", err)
|
||||
}
|
||||
|
||||
|
|
@ -847,11 +847,11 @@ func TestTSMReader_MMAP_Stats(t *testing.T) {
|
|||
defer r.Close()
|
||||
|
||||
stats := r.Stats()
|
||||
if got, exp := stats.MinKey, "cpu"; got != exp {
|
||||
if got, exp := string(stats.MinKey), "cpu"; got != exp {
|
||||
t.Fatalf("min key mismatch: got %v, exp %v", got, exp)
|
||||
}
|
||||
|
||||
if got, exp := stats.MaxKey, "mem"; got != exp {
|
||||
if got, exp := string(stats.MaxKey), "mem"; got != exp {
|
||||
t.Fatalf("max key mismatch: got %v, exp %v", got, exp)
|
||||
}
|
||||
|
||||
|
|
@ -886,9 +886,9 @@ func TestTSMReader_VerifiesFileType(t *testing.T) {
|
|||
|
||||
func TestIndirectIndex_Entries(t *testing.T) {
|
||||
index := tsm1.NewIndexWriter()
|
||||
index.Add("cpu", tsm1.BlockFloat64, 0, 1, 10, 100)
|
||||
index.Add("cpu", tsm1.BlockFloat64, 2, 3, 20, 200)
|
||||
index.Add("mem", tsm1.BlockFloat64, 0, 1, 10, 100)
|
||||
index.Add([]byte("cpu"), tsm1.BlockFloat64, 0, 1, 10, 100)
|
||||
index.Add([]byte("cpu"), tsm1.BlockFloat64, 2, 3, 20, 200)
|
||||
index.Add([]byte("mem"), tsm1.BlockFloat64, 0, 1, 10, 100)
|
||||
|
||||
b, err := index.MarshalBinary()
|
||||
if err != nil {
|
||||
|
|
@ -900,8 +900,8 @@ func TestIndirectIndex_Entries(t *testing.T) {
|
|||
t.Fatalf("unexpected error unmarshaling index: %v", err)
|
||||
}
|
||||
|
||||
exp := index.Entries("cpu")
|
||||
entries := indirect.Entries("cpu")
|
||||
exp := index.Entries([]byte("cpu"))
|
||||
entries := indirect.Entries([]byte("cpu"))
|
||||
|
||||
if got, exp := len(entries), len(exp); got != exp {
|
||||
t.Fatalf("entries length mismatch: got %v, exp %v", got, exp)
|
||||
|
|
@ -928,8 +928,8 @@ func TestIndirectIndex_Entries(t *testing.T) {
|
|||
|
||||
func TestIndirectIndex_Entries_NonExistent(t *testing.T) {
|
||||
index := tsm1.NewIndexWriter()
|
||||
index.Add("cpu", tsm1.BlockFloat64, 0, 1, 10, 100)
|
||||
index.Add("cpu", tsm1.BlockFloat64, 2, 3, 20, 200)
|
||||
index.Add([]byte("cpu"), tsm1.BlockFloat64, 0, 1, 10, 100)
|
||||
index.Add([]byte("cpu"), tsm1.BlockFloat64, 2, 3, 20, 200)
|
||||
|
||||
b, err := index.MarshalBinary()
|
||||
if err != nil {
|
||||
|
|
@ -943,8 +943,8 @@ func TestIndirectIndex_Entries_NonExistent(t *testing.T) {
|
|||
|
||||
// mem has not been added to the index so we should get no entries back
|
||||
// for both
|
||||
exp := index.Entries("mem")
|
||||
entries := indirect.Entries("mem")
|
||||
exp := index.Entries([]byte("mem"))
|
||||
entries := indirect.Entries([]byte("mem"))
|
||||
|
||||
if got, exp := len(entries), len(exp); got != exp && exp != 0 {
|
||||
t.Fatalf("entries length mismatch: got %v, exp %v", got, exp)
|
||||
|
|
@ -954,7 +954,7 @@ func TestIndirectIndex_Entries_NonExistent(t *testing.T) {
|
|||
func TestIndirectIndex_MaxBlocks(t *testing.T) {
|
||||
index := tsm1.NewIndexWriter()
|
||||
for i := 0; i < 1<<16; i++ {
|
||||
index.Add("cpu", tsm1.BlockFloat64, 0, 1, 10, 20)
|
||||
index.Add([]byte("cpu"), tsm1.BlockFloat64, 0, 1, 10, 20)
|
||||
}
|
||||
|
||||
if _, err := index.MarshalBinary(); err == nil {
|
||||
|
|
@ -966,7 +966,7 @@ func TestIndirectIndex_MaxBlocks(t *testing.T) {
|
|||
|
||||
func TestIndirectIndex_Type(t *testing.T) {
|
||||
index := tsm1.NewIndexWriter()
|
||||
index.Add("cpu", tsm1.BlockInteger, 0, 1, 10, 20)
|
||||
index.Add([]byte("cpu"), tsm1.BlockInteger, 0, 1, 10, 20)
|
||||
|
||||
b, err := index.MarshalBinary()
|
||||
if err != nil {
|
||||
|
|
@ -978,7 +978,7 @@ func TestIndirectIndex_Type(t *testing.T) {
|
|||
fatal(t, "unmarshal binary", err)
|
||||
}
|
||||
|
||||
typ, err := ind.Type("cpu")
|
||||
typ, err := ind.Type([]byte("cpu"))
|
||||
if err != nil {
|
||||
fatal(t, "reading type", err)
|
||||
}
|
||||
|
|
@ -990,9 +990,9 @@ func TestIndirectIndex_Type(t *testing.T) {
|
|||
|
||||
func TestIndirectIndex_Keys(t *testing.T) {
|
||||
index := tsm1.NewIndexWriter()
|
||||
index.Add("cpu", tsm1.BlockFloat64, 0, 1, 10, 20)
|
||||
index.Add("mem", tsm1.BlockFloat64, 0, 1, 10, 20)
|
||||
index.Add("cpu", tsm1.BlockFloat64, 1, 2, 20, 30)
|
||||
index.Add([]byte("cpu"), tsm1.BlockFloat64, 0, 1, 10, 20)
|
||||
index.Add([]byte("mem"), tsm1.BlockFloat64, 0, 1, 10, 20)
|
||||
index.Add([]byte("cpu"), tsm1.BlockFloat64, 1, 2, 20, 30)
|
||||
|
||||
keys := index.Keys()
|
||||
|
||||
|
|
@ -1002,11 +1002,11 @@ func TestIndirectIndex_Keys(t *testing.T) {
|
|||
}
|
||||
|
||||
// Keys should be sorted
|
||||
if got, exp := keys[0], "cpu"; got != exp {
|
||||
if got, exp := string(keys[0]), "cpu"; got != exp {
|
||||
t.Fatalf("key mismatch: got %v, exp %v", got, exp)
|
||||
}
|
||||
|
||||
if got, exp := keys[1], "mem"; got != exp {
|
||||
if got, exp := string(keys[1]), "mem"; got != exp {
|
||||
t.Fatalf("key mismatch: got %v, exp %v", got, exp)
|
||||
}
|
||||
}
|
||||
|
|
@ -1022,7 +1022,7 @@ func TestBlockIterator_Single(t *testing.T) {
|
|||
}
|
||||
|
||||
values := []tsm1.Value{tsm1.NewValue(0, int64(1))}
|
||||
if err := w.Write("cpu", values); err != nil {
|
||||
if err := w.Write([]byte("cpu"), values); err != nil {
|
||||
t.Fatalf("unexpected error writing: %v", err)
|
||||
|
||||
}
|
||||
|
|
@ -1052,7 +1052,7 @@ func TestBlockIterator_Single(t *testing.T) {
|
|||
t.Fatalf("unexpected error creating iterator: %v", err)
|
||||
}
|
||||
|
||||
if got, exp := key, "cpu"; got != exp {
|
||||
if got, exp := string(key), "cpu"; got != exp {
|
||||
t.Fatalf("key mismatch: got %v, exp %v", got, exp)
|
||||
}
|
||||
|
||||
|
|
@ -1091,12 +1091,12 @@ func TestBlockIterator_MultipleBlocks(t *testing.T) {
|
|||
}
|
||||
|
||||
values1 := []tsm1.Value{tsm1.NewValue(0, int64(1))}
|
||||
if err := w.Write("cpu", values1); err != nil {
|
||||
if err := w.Write([]byte("cpu"), values1); err != nil {
|
||||
t.Fatalf("unexpected error writing: %v", err)
|
||||
}
|
||||
|
||||
values2 := []tsm1.Value{tsm1.NewValue(1, int64(2))}
|
||||
if err := w.Write("cpu", values2); err != nil {
|
||||
if err := w.Write([]byte("cpu"), values2); err != nil {
|
||||
t.Fatalf("unexpected error writing: %v", err)
|
||||
}
|
||||
|
||||
|
|
@ -1129,7 +1129,7 @@ func TestBlockIterator_MultipleBlocks(t *testing.T) {
|
|||
t.Fatalf("unexpected error creating iterator: %v", err)
|
||||
}
|
||||
|
||||
if got, exp := key, "cpu"; got != exp {
|
||||
if got, exp := string(key), "cpu"; got != exp {
|
||||
t.Fatalf("key mismatch: got %v, exp %v", got, exp)
|
||||
}
|
||||
|
||||
|
|
@ -1177,7 +1177,7 @@ func TestBlockIterator_Sorted(t *testing.T) {
|
|||
}
|
||||
|
||||
for k, v := range values {
|
||||
if err := w.Write(k, v); err != nil {
|
||||
if err := w.Write([]byte(k), v); err != nil {
|
||||
t.Fatalf("unexpected error writing: %v", err)
|
||||
|
||||
}
|
||||
|
|
@ -1207,11 +1207,11 @@ func TestBlockIterator_Sorted(t *testing.T) {
|
|||
for iter.Next() {
|
||||
key, _, _, _, _, buf, err := iter.Read()
|
||||
|
||||
if key < lastKey {
|
||||
if string(key) < lastKey {
|
||||
t.Fatalf("keys not sorted: got %v, last %v", key, lastKey)
|
||||
}
|
||||
|
||||
lastKey = key
|
||||
lastKey = string(key)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error creating iterator: %v", err)
|
||||
|
|
@ -1241,7 +1241,7 @@ func TestIndirectIndex_UnmarshalBinary_BlockCountOverflow(t *testing.T) {
|
|||
}
|
||||
|
||||
for i := 0; i < 3280; i++ {
|
||||
w.Write("cpu", []tsm1.Value{tsm1.NewValue(int64(i), float64(i))})
|
||||
w.Write([]byte("cpu"), []tsm1.Value{tsm1.NewValue(int64(i), float64(i))})
|
||||
}
|
||||
|
||||
if err := w.WriteIndex(); err != nil {
|
||||
|
|
@ -1275,7 +1275,7 @@ func TestCompacted_NotFull(t *testing.T) {
|
|||
}
|
||||
|
||||
values := []tsm1.Value{tsm1.NewValue(0, 1.0)}
|
||||
if err := w.Write("cpu", values); err != nil {
|
||||
if err := w.Write([]byte("cpu"), values); err != nil {
|
||||
t.Fatalf("unexpected error writing: %v", err)
|
||||
|
||||
}
|
||||
|
|
@ -1345,7 +1345,7 @@ func TestTSMReader_File_ReadAll(t *testing.T) {
|
|||
}
|
||||
|
||||
for _, d := range data {
|
||||
if err := w.Write(d.key, d.values); err != nil {
|
||||
if err := w.Write([]byte(d.key), d.values); err != nil {
|
||||
t.Fatalf("unexpected error writing: %v", err)
|
||||
}
|
||||
}
|
||||
|
|
@ -1371,7 +1371,7 @@ func TestTSMReader_File_ReadAll(t *testing.T) {
|
|||
|
||||
var count int
|
||||
for _, d := range data {
|
||||
readValues, err := r.ReadAll(d.key)
|
||||
readValues, err := r.ReadAll([]byte(d.key))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error reading: %v", err)
|
||||
}
|
||||
|
|
@ -1494,7 +1494,7 @@ func TestTSMReader_File_Read(t *testing.T) {
|
|||
},
|
||||
}
|
||||
for _, d := range data {
|
||||
if err := w.Write(d.key, d.values); err != nil {
|
||||
if err := w.Write([]byte(d.key), d.values); err != nil {
|
||||
t.Fatalf("unexpected error writing: %v", err)
|
||||
}
|
||||
}
|
||||
|
|
@ -1520,7 +1520,7 @@ func TestTSMReader_File_Read(t *testing.T) {
|
|||
|
||||
var count int
|
||||
for _, d := range data {
|
||||
readValues, err := r.Read(d.key, d.values[0].UnixNano())
|
||||
readValues, err := r.Read([]byte(d.key), d.values[0].UnixNano())
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error readin: %v", err)
|
||||
}
|
||||
|
|
@ -1574,7 +1574,7 @@ func TestTSMReader_References(t *testing.T) {
|
|||
},
|
||||
}
|
||||
for _, d := range data {
|
||||
if err := w.Write(d.key, d.values); err != nil {
|
||||
if err := w.Write([]byte(d.key), d.values); err != nil {
|
||||
t.Fatalf("unexpected error writing: %v", err)
|
||||
}
|
||||
}
|
||||
|
|
@ -1610,7 +1610,7 @@ func TestTSMReader_References(t *testing.T) {
|
|||
|
||||
var count int
|
||||
for _, d := range data {
|
||||
readValues, err := r.Read(d.key, d.values[0].UnixNano())
|
||||
readValues, err := r.Read([]byte(d.key), d.values[0].UnixNano())
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error readin: %v", err)
|
||||
}
|
||||
|
|
@ -1644,7 +1644,7 @@ func TestTSMReader_References(t *testing.T) {
|
|||
func BenchmarkIndirectIndex_UnmarshalBinary(b *testing.B) {
|
||||
index := tsm1.NewIndexWriter()
|
||||
for i := 0; i < 100000; i++ {
|
||||
index.Add(fmt.Sprintf("cpu-%d", i), tsm1.BlockFloat64, int64(i*2), int64(i*2+1), 10, 100)
|
||||
index.Add([]byte(fmt.Sprintf("cpu-%d", i)), tsm1.BlockFloat64, int64(i*2), int64(i*2+1), 10, 100)
|
||||
}
|
||||
|
||||
bytes, err := index.MarshalBinary()
|
||||
|
|
|
|||
|
|
@ -2,11 +2,11 @@ package tsm1
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/cespare/xxhash"
|
||||
"github.com/influxdata/influxdb/pkg/bytesutil"
|
||||
)
|
||||
|
||||
// partitions is the number of partitions we used in the ring's continuum. It
|
||||
|
|
@ -91,32 +91,32 @@ func (r *ring) reset() {
|
|||
|
||||
// getPartition retrieves the hash ring partition associated with the provided
|
||||
// key.
|
||||
func (r *ring) getPartition(key string) *partition {
|
||||
return r.continuum[int(xxhash.Sum64String(key)%partitions)]
|
||||
func (r *ring) getPartition(key []byte) *partition {
|
||||
return r.continuum[int(xxhash.Sum64(key)%partitions)]
|
||||
}
|
||||
|
||||
// entry returns the entry for the given key.
|
||||
// entry is safe for use by multiple goroutines.
|
||||
func (r *ring) entry(key string) (*entry, bool) {
|
||||
func (r *ring) entry(key []byte) (*entry, bool) {
|
||||
return r.getPartition(key).entry(key)
|
||||
}
|
||||
|
||||
// write writes values to the entry in the ring's partition associated with key.
|
||||
// If no entry exists for the key then one will be created.
|
||||
// write is safe for use by multiple goroutines.
|
||||
func (r *ring) write(key string, values Values) error {
|
||||
func (r *ring) write(key []byte, values Values) error {
|
||||
return r.getPartition(key).write(key, values)
|
||||
}
|
||||
|
||||
// add adds an entry to the ring.
|
||||
func (r *ring) add(key string, entry *entry) {
|
||||
func (r *ring) add(key []byte, entry *entry) {
|
||||
r.getPartition(key).add(key, entry)
|
||||
atomic.AddInt64(&r.keysHint, 1)
|
||||
}
|
||||
|
||||
// remove deletes the entry for the given key.
|
||||
// remove is safe for use by multiple goroutines.
|
||||
func (r *ring) remove(key string) {
|
||||
func (r *ring) remove(key []byte) {
|
||||
r.getPartition(key).remove(key)
|
||||
if r.keysHint > 0 {
|
||||
atomic.AddInt64(&r.keysHint, -1)
|
||||
|
|
@ -125,14 +125,14 @@ func (r *ring) remove(key string) {
|
|||
|
||||
// keys returns all the keys from all partitions in the hash ring. The returned
|
||||
// keys will be in order if sorted is true.
|
||||
func (r *ring) keys(sorted bool) []string {
|
||||
keys := make([]string, 0, atomic.LoadInt64(&r.keysHint))
|
||||
func (r *ring) keys(sorted bool) [][]byte {
|
||||
keys := make([][]byte, 0, atomic.LoadInt64(&r.keysHint))
|
||||
for _, p := range r.partitions {
|
||||
keys = append(keys, p.keys()...)
|
||||
}
|
||||
|
||||
if sorted {
|
||||
sort.Strings(keys)
|
||||
bytesutil.Sort(keys)
|
||||
}
|
||||
return keys
|
||||
}
|
||||
|
|
@ -142,7 +142,7 @@ func (r *ring) keys(sorted bool) []string {
|
|||
// will be called with each key and the corresponding entry. The first error
|
||||
// encountered will be returned, if any. apply is safe for use by multiple
|
||||
// goroutines.
|
||||
func (r *ring) apply(f func(string, *entry) error) error {
|
||||
func (r *ring) apply(f func([]byte, *entry) error) error {
|
||||
|
||||
var (
|
||||
wg sync.WaitGroup
|
||||
|
|
@ -157,7 +157,7 @@ func (r *ring) apply(f func(string, *entry) error) error {
|
|||
|
||||
p.mu.RLock()
|
||||
for k, e := range p.store {
|
||||
if err := f(k, e); err != nil {
|
||||
if err := f([]byte(k), e); err != nil {
|
||||
res <- err
|
||||
p.mu.RUnlock()
|
||||
return
|
||||
|
|
@ -184,11 +184,11 @@ func (r *ring) apply(f func(string, *entry) error) error {
|
|||
// applySerial is similar to apply, but invokes f on each partition in the same
|
||||
// goroutine.
|
||||
// apply is safe for use by multiple goroutines.
|
||||
func (r *ring) applySerial(f func(string, *entry) error) error {
|
||||
func (r *ring) applySerial(f func([]byte, *entry) error) error {
|
||||
for _, p := range r.partitions {
|
||||
p.mu.RLock()
|
||||
for k, e := range p.store {
|
||||
if err := f(k, e); err != nil {
|
||||
if err := f([]byte(k), e); err != nil {
|
||||
p.mu.RUnlock()
|
||||
return err
|
||||
}
|
||||
|
|
@ -212,9 +212,9 @@ type partition struct {
|
|||
|
||||
// entry returns the partition's entry for the provided key.
|
||||
// It's safe for use by multiple goroutines.
|
||||
func (p *partition) entry(key string) (*entry, bool) {
|
||||
func (p *partition) entry(key []byte) (*entry, bool) {
|
||||
p.mu.RLock()
|
||||
e, ok := p.store[key]
|
||||
e, ok := p.store[string(key)]
|
||||
p.mu.RUnlock()
|
||||
return e, ok
|
||||
}
|
||||
|
|
@ -222,9 +222,9 @@ func (p *partition) entry(key string) (*entry, bool) {
|
|||
// write writes the values to the entry in the partition, creating the entry
|
||||
// if it does not exist.
|
||||
// write is safe for use by multiple goroutines.
|
||||
func (p *partition) write(key string, values Values) error {
|
||||
func (p *partition) write(key []byte, values Values) error {
|
||||
p.mu.RLock()
|
||||
e, ok := p.store[key]
|
||||
e, ok := p.store[string(key)]
|
||||
p.mu.RUnlock()
|
||||
if ok {
|
||||
// Hot path.
|
||||
|
|
@ -235,42 +235,42 @@ func (p *partition) write(key string, values Values) error {
|
|||
defer p.mu.Unlock()
|
||||
|
||||
// Check again.
|
||||
if e, ok = p.store[key]; ok {
|
||||
if e, ok = p.store[string(key)]; ok {
|
||||
return e.add(values)
|
||||
}
|
||||
|
||||
// Create a new entry using a preallocated size if we have a hint available.
|
||||
hint, _ := p.entrySizeHints[xxhash.Sum64String(key)]
|
||||
hint, _ := p.entrySizeHints[xxhash.Sum64(key)]
|
||||
e, err := newEntryValues(values, hint)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
p.store[key] = e
|
||||
p.store[string(key)] = e
|
||||
return nil
|
||||
}
|
||||
|
||||
// add adds a new entry for key to the partition.
|
||||
func (p *partition) add(key string, entry *entry) {
|
||||
func (p *partition) add(key []byte, entry *entry) {
|
||||
p.mu.Lock()
|
||||
p.store[key] = entry
|
||||
p.store[string(key)] = entry
|
||||
p.mu.Unlock()
|
||||
}
|
||||
|
||||
// remove deletes the entry associated with the provided key.
|
||||
// remove is safe for use by multiple goroutines.
|
||||
func (p *partition) remove(key string) {
|
||||
func (p *partition) remove(key []byte) {
|
||||
p.mu.Lock()
|
||||
delete(p.store, key)
|
||||
delete(p.store, string(key))
|
||||
p.mu.Unlock()
|
||||
}
|
||||
|
||||
// keys returns an unsorted slice of the keys in the partition.
|
||||
func (p *partition) keys() []string {
|
||||
func (p *partition) keys() [][]byte {
|
||||
p.mu.RLock()
|
||||
keys := make([]string, 0, len(p.store))
|
||||
keys := make([][]byte, 0, len(p.store))
|
||||
for k := range p.store {
|
||||
keys = append(keys, k)
|
||||
keys = append(keys, []byte(k))
|
||||
}
|
||||
p.mu.RUnlock()
|
||||
return keys
|
||||
|
|
|
|||
|
|
@ -43,12 +43,12 @@ func TestRing_newRing(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
var strSliceRes []string
|
||||
var strSliceRes [][]byte
|
||||
|
||||
func benchmarkRingkeys(b *testing.B, r *ring, keys int) {
|
||||
// Add some keys
|
||||
for i := 0; i < keys; i++ {
|
||||
r.add(fmt.Sprintf("cpu,host=server-%d value=1", i), nil)
|
||||
r.add([]byte(fmt.Sprintf("cpu,host=server-%d value=1", i)), nil)
|
||||
}
|
||||
|
||||
b.ReportAllocs()
|
||||
|
|
@ -64,11 +64,11 @@ func BenchmarkRing_keys_10000(b *testing.B) { benchmarkRingkeys(b, MustNewRing(
|
|||
func BenchmarkRing_keys_100000(b *testing.B) { benchmarkRingkeys(b, MustNewRing(256), 100000) }
|
||||
|
||||
func benchmarkRingGetPartition(b *testing.B, r *ring, keys int) {
|
||||
vals := make([]string, keys)
|
||||
vals := make([][]byte, keys)
|
||||
|
||||
// Add some keys
|
||||
for i := 0; i < keys; i++ {
|
||||
vals[i] = fmt.Sprintf("cpu,host=server-%d field1=value1,field2=value2,field4=value4,field5=value5,field6=value6,field7=value7,field8=value1,field9=value2,field10=value4,field11=value5,field12=value6,field13=value7", i)
|
||||
vals[i] = []byte(fmt.Sprintf("cpu,host=server-%d field1=value1,field2=value2,field4=value4,field5=value5,field6=value6,field7=value7,field8=value1,field9=value2,field10=value4,field11=value5,field12=value6,field13=value7", i))
|
||||
r.add(vals[i], nil)
|
||||
}
|
||||
|
||||
|
|
@ -94,7 +94,7 @@ func benchmarkRingWrite(b *testing.B, r *ring, n int) {
|
|||
go func() {
|
||||
defer wg.Done()
|
||||
for j := 0; j < n; j++ {
|
||||
if err := r.write(fmt.Sprintf("cpu,host=server-%d value=1", j), Values{}); err != nil {
|
||||
if err := r.write([]byte(fmt.Sprintf("cpu,host=server-%d value=1", j)), Values{}); err != nil {
|
||||
errC <- err
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -37,7 +37,7 @@ type Tombstoner struct {
|
|||
// Tombstone represents an individual deletion.
|
||||
type Tombstone struct {
|
||||
// Key is the tombstoned series key.
|
||||
Key string
|
||||
Key []byte
|
||||
|
||||
// Min and Max are the min and max unix nanosecond time ranges of Key that are deleted. If
|
||||
// the full range is deleted, both values are -1.
|
||||
|
|
@ -45,12 +45,12 @@ type Tombstone struct {
|
|||
}
|
||||
|
||||
// Add adds the all keys, across all timestamps, to the tombstone.
|
||||
func (t *Tombstoner) Add(keys []string) error {
|
||||
func (t *Tombstoner) Add(keys [][]byte) error {
|
||||
return t.AddRange(keys, math.MinInt64, math.MaxInt64)
|
||||
}
|
||||
|
||||
// AddRange adds all keys to the tombstone specifying only the data between min and max to be removed.
|
||||
func (t *Tombstoner) AddRange(keys []string, min, max int64) error {
|
||||
func (t *Tombstoner) AddRange(keys [][]byte, min, max int64) error {
|
||||
if len(keys) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
|
@ -259,7 +259,7 @@ func (t *Tombstoner) readTombstoneV1(f *os.File, fn func(t Tombstone) error) err
|
|||
continue
|
||||
}
|
||||
if err := fn(Tombstone{
|
||||
Key: line,
|
||||
Key: []byte(line),
|
||||
Min: math.MinInt64,
|
||||
Max: math.MaxInt64,
|
||||
}); err != nil {
|
||||
|
|
@ -287,7 +287,7 @@ func (t *Tombstoner) readTombstoneV2(f *os.File, fn func(t Tombstone) error) err
|
|||
|
||||
var (
|
||||
min, max int64
|
||||
key string
|
||||
key []byte
|
||||
)
|
||||
b := make([]byte, 4096)
|
||||
for {
|
||||
|
|
@ -308,7 +308,7 @@ func (t *Tombstoner) readTombstoneV2(f *os.File, fn func(t Tombstone) error) err
|
|||
if _, err := f.Read(b[:keyLen]); err != nil {
|
||||
return err
|
||||
}
|
||||
key = string(b[:keyLen])
|
||||
key = b[:keyLen]
|
||||
n += int64(keyLen)
|
||||
|
||||
if _, err := f.Read(b[:8]); err != nil {
|
||||
|
|
@ -345,7 +345,7 @@ func (t *Tombstoner) readTombstoneV3(f *os.File, fn func(t Tombstone) error) err
|
|||
|
||||
var (
|
||||
min, max int64
|
||||
key string
|
||||
key []byte
|
||||
)
|
||||
|
||||
gr, err := gzip.NewReader(bufio.NewReader(f))
|
||||
|
|
@ -370,7 +370,10 @@ func (t *Tombstoner) readTombstoneV3(f *os.File, fn func(t Tombstone) error) err
|
|||
if _, err := io.ReadFull(gr, b[:keyLen]); err != nil {
|
||||
return err
|
||||
}
|
||||
key = string(b[:keyLen])
|
||||
|
||||
// Copy the key since b is re-used
|
||||
key = make([]byte, keyLen)
|
||||
copy(key, b[:keyLen])
|
||||
|
||||
if _, err := io.ReadFull(gr, b[:8]); err != nil {
|
||||
return err
|
||||
|
|
|
|||
|
|
@ -29,7 +29,7 @@ func TestTombstoner_Add(t *testing.T) {
|
|||
t.Fatalf("stat length mismatch: got %v, exp %v", got, exp)
|
||||
}
|
||||
|
||||
ts.Add([]string{"foo"})
|
||||
ts.Add([][]byte{[]byte("foo")})
|
||||
|
||||
entries, err = ts.ReadAll()
|
||||
if err != nil {
|
||||
|
|
@ -57,7 +57,7 @@ func TestTombstoner_Add(t *testing.T) {
|
|||
t.Fatalf("length mismatch: got %v, exp %v", got, exp)
|
||||
}
|
||||
|
||||
if got, exp := entries[0].Key, "foo"; got != exp {
|
||||
if got, exp := string(entries[0].Key), "foo"; got != exp {
|
||||
t.Fatalf("value mismatch: got %v, exp %v", got, exp)
|
||||
}
|
||||
|
||||
|
|
@ -72,7 +72,7 @@ func TestTombstoner_Add(t *testing.T) {
|
|||
t.Fatalf("length mismatch: got %v, exp %v", got, exp)
|
||||
}
|
||||
|
||||
if got, exp := entries[0].Key, "foo"; got != exp {
|
||||
if got, exp := string(entries[0].Key), "foo"; got != exp {
|
||||
t.Fatalf("value mismatch: got %v, exp %v", got, exp)
|
||||
}
|
||||
}
|
||||
|
|
@ -93,7 +93,7 @@ func TestTombstoner_Add_Empty(t *testing.T) {
|
|||
t.Fatalf("length mismatch: got %v, exp %v", got, exp)
|
||||
}
|
||||
|
||||
ts.Add([]string{})
|
||||
ts.Add([][]byte{})
|
||||
|
||||
// Use a new Tombstoner to verify values are persisted
|
||||
ts = &tsm1.Tombstoner{Path: f.Name()}
|
||||
|
|
@ -120,7 +120,7 @@ func TestTombstoner_Delete(t *testing.T) {
|
|||
f := MustTempFile(dir)
|
||||
ts := &tsm1.Tombstoner{Path: f.Name()}
|
||||
|
||||
ts.Add([]string{"foo"})
|
||||
ts.Add([][]byte{[]byte("foo")})
|
||||
|
||||
// Use a new Tombstoner to verify values are persisted
|
||||
ts = &tsm1.Tombstoner{Path: f.Name()}
|
||||
|
|
@ -133,8 +133,8 @@ func TestTombstoner_Delete(t *testing.T) {
|
|||
t.Fatalf("length mismatch: got %v, exp %v", got, exp)
|
||||
}
|
||||
|
||||
if got, exp := entries[0].Key, "foo"; got != exp {
|
||||
t.Fatalf("value mismatch: got %v, exp %v", got, exp)
|
||||
if got, exp := string(entries[0].Key), "foo"; got != exp {
|
||||
t.Fatalf("value mismatch: got %s, exp %s", got, exp)
|
||||
}
|
||||
|
||||
if err := ts.Delete(); err != nil {
|
||||
|
|
@ -187,7 +187,7 @@ func TestTombstoner_ReadV1(t *testing.T) {
|
|||
t.Fatalf("length mismatch: got %v, exp %v", got, exp)
|
||||
}
|
||||
|
||||
if got, exp := entries[0].Key, "foo"; got != exp {
|
||||
if got, exp := string(entries[0].Key), "foo"; got != exp {
|
||||
t.Fatalf("value mismatch: got %v, exp %v", got, exp)
|
||||
}
|
||||
|
||||
|
|
@ -202,7 +202,7 @@ func TestTombstoner_ReadV1(t *testing.T) {
|
|||
t.Fatalf("length mismatch: got %v, exp %v", got, exp)
|
||||
}
|
||||
|
||||
if got, exp := entries[0].Key, "foo"; got != exp {
|
||||
if got, exp := string(entries[0].Key), "foo"; got != exp {
|
||||
t.Fatalf("value mismatch: got %v, exp %v", got, exp)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -2,6 +2,7 @@ package tsm1
|
|||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
|
|
@ -479,7 +480,7 @@ func (l *WAL) CloseSegment() error {
|
|||
}
|
||||
|
||||
// Delete deletes the given keys, returning the segment ID for the operation.
|
||||
func (l *WAL) Delete(keys []string) (int, error) {
|
||||
func (l *WAL) Delete(keys [][]byte) (int, error) {
|
||||
if len(keys) == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
|
|
@ -496,7 +497,7 @@ func (l *WAL) Delete(keys []string) (int, error) {
|
|||
|
||||
// DeleteRange deletes the given keys within the given time range,
|
||||
// returning the segment ID for the operation.
|
||||
func (l *WAL) DeleteRange(keys []string, min, max int64) (int, error) {
|
||||
func (l *WAL) DeleteRange(keys [][]byte, min, max int64) (int, error) {
|
||||
if len(keys) == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
|
|
@ -877,7 +878,7 @@ func (w *WriteWALEntry) Type() WalEntryType {
|
|||
|
||||
// DeleteWALEntry represents the deletion of multiple series.
|
||||
type DeleteWALEntry struct {
|
||||
Keys []string
|
||||
Keys [][]byte
|
||||
sz int
|
||||
}
|
||||
|
||||
|
|
@ -889,7 +890,7 @@ func (w *DeleteWALEntry) MarshalBinary() ([]byte, error) {
|
|||
|
||||
// UnmarshalBinary deserializes the byte slice into w.
|
||||
func (w *DeleteWALEntry) UnmarshalBinary(b []byte) error {
|
||||
w.Keys = strings.Split(string(b), "\n")
|
||||
w.Keys = bytes.Split(b, []byte("\n"))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
@ -934,7 +935,7 @@ func (w *DeleteWALEntry) Type() WalEntryType {
|
|||
|
||||
// DeleteRangeWALEntry represents the deletion of multiple series.
|
||||
type DeleteRangeWALEntry struct {
|
||||
Keys []string
|
||||
Keys [][]byte
|
||||
Min, Max int64
|
||||
sz int
|
||||
}
|
||||
|
|
@ -965,7 +966,7 @@ func (w *DeleteRangeWALEntry) UnmarshalBinary(b []byte) error {
|
|||
if i+sz > len(b) {
|
||||
return ErrWALCorrupt
|
||||
}
|
||||
w.Keys = append(w.Keys, string(b[i:i+sz]))
|
||||
w.Keys = append(w.Keys, b[i:i+sz])
|
||||
i += sz
|
||||
}
|
||||
return nil
|
||||
|
|
|
|||
|
|
@ -216,7 +216,7 @@ func TestWALWriter_WriteDelete_Single(t *testing.T) {
|
|||
w := tsm1.NewWALSegmentWriter(f)
|
||||
|
||||
entry := &tsm1.DeleteWALEntry{
|
||||
Keys: []string{"cpu"},
|
||||
Keys: [][]byte{[]byte("cpu")},
|
||||
}
|
||||
|
||||
if err := w.Write(mustMarshalEntry(entry)); err != nil {
|
||||
|
|
@ -251,7 +251,7 @@ func TestWALWriter_WriteDelete_Single(t *testing.T) {
|
|||
t.Fatalf("key length mismatch: got %v, exp %v", got, exp)
|
||||
}
|
||||
|
||||
if got, exp := e.Keys[0], entry.Keys[0]; got != exp {
|
||||
if got, exp := string(e.Keys[0]), string(entry.Keys[0]); got != exp {
|
||||
t.Fatalf("key mismatch: got %v, exp %v", got, exp)
|
||||
}
|
||||
}
|
||||
|
|
@ -281,7 +281,7 @@ func TestWALWriter_WriteMultiDelete_Multiple(t *testing.T) {
|
|||
|
||||
// Write the delete entry
|
||||
deleteEntry := &tsm1.DeleteWALEntry{
|
||||
Keys: []string{"cpu,host=A#!~value"},
|
||||
Keys: [][]byte{[]byte("cpu,host=A#!~value")},
|
||||
}
|
||||
|
||||
if err := w.Write(mustMarshalEntry(deleteEntry)); err != nil {
|
||||
|
|
@ -345,7 +345,7 @@ func TestWALWriter_WriteMultiDelete_Multiple(t *testing.T) {
|
|||
t.Fatalf("key length mismatch: got %v, exp %v", got, exp)
|
||||
}
|
||||
|
||||
if got, exp := de.Keys[0], deleteEntry.Keys[0]; got != exp {
|
||||
if got, exp := string(de.Keys[0]), string(deleteEntry.Keys[0]); got != exp {
|
||||
t.Fatalf("key mismatch: got %v, exp %v", got, exp)
|
||||
}
|
||||
}
|
||||
|
|
@ -378,7 +378,7 @@ func TestWALWriter_WriteMultiDeleteRange_Multiple(t *testing.T) {
|
|||
|
||||
// Write the delete entry
|
||||
deleteEntry := &tsm1.DeleteRangeWALEntry{
|
||||
Keys: []string{"cpu,host=A#!~value"},
|
||||
Keys: [][]byte{[]byte("cpu,host=A#!~value")},
|
||||
Min: 2,
|
||||
Max: 3,
|
||||
}
|
||||
|
|
@ -444,7 +444,7 @@ func TestWALWriter_WriteMultiDeleteRange_Multiple(t *testing.T) {
|
|||
t.Fatalf("key length mismatch: got %v, exp %v", got, exp)
|
||||
}
|
||||
|
||||
if got, exp := de.Keys[0], deleteEntry.Keys[0]; got != exp {
|
||||
if got, exp := string(de.Keys[0]), string(deleteEntry.Keys[0]); got != exp {
|
||||
t.Fatalf("key mismatch: got %v, exp %v", got, exp)
|
||||
}
|
||||
|
||||
|
|
@ -522,7 +522,7 @@ func TestWAL_Delete(t *testing.T) {
|
|||
t.Fatalf("close segment length mismatch: got %v, exp %v", got, exp)
|
||||
}
|
||||
|
||||
if _, err := w.Delete([]string{"cpu"}); err != nil {
|
||||
if _, err := w.Delete([][]byte{[]byte("cpu")}); err != nil {
|
||||
t.Fatalf("error writing points: %v", err)
|
||||
}
|
||||
|
||||
|
|
@ -641,7 +641,7 @@ func TestWriteWALSegment_UnmarshalBinary_WriteWALCorrupt(t *testing.T) {
|
|||
|
||||
func TestWriteWALSegment_UnmarshalBinary_DeleteWALCorrupt(t *testing.T) {
|
||||
w := &tsm1.DeleteWALEntry{
|
||||
Keys: []string{"foo", "bar"},
|
||||
Keys: [][]byte{[]byte("foo"), []byte("bar")},
|
||||
}
|
||||
|
||||
b, err := w.MarshalBinary()
|
||||
|
|
@ -663,7 +663,7 @@ func TestWriteWALSegment_UnmarshalBinary_DeleteWALCorrupt(t *testing.T) {
|
|||
|
||||
func TestWriteWALSegment_UnmarshalBinary_DeleteRangeWALCorrupt(t *testing.T) {
|
||||
w := &tsm1.DeleteRangeWALEntry{
|
||||
Keys: []string{"foo", "bar"},
|
||||
Keys: [][]byte{[]byte("foo"), []byte("bar")},
|
||||
Min: 1,
|
||||
Max: 2,
|
||||
}
|
||||
|
|
|
|||
|
|
@ -73,6 +73,8 @@ import (
|
|||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/influxdb/pkg/bytesutil"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
@ -122,14 +124,14 @@ type TSMWriter interface {
|
|||
// ensuring a fixed number of values are encoded in each block as well as
|
||||
// ensuring the Values are sorted. The first and last timestamp values are
|
||||
// used as the minimum and maximum values for the index entry.
|
||||
Write(key string, values Values) error
|
||||
Write(key []byte, values Values) error
|
||||
|
||||
// WriteBlock writes a new block for key containing the bytes in block. WriteBlock appends
|
||||
// blocks in the order that the WriteBlock function is called. The caller is
|
||||
// responsible for ensuring keys and blocks are sorted appropriately, and that the
|
||||
// block and index information is correct for the block. The minTime and maxTime
|
||||
// timestamp values are used as the minimum and maximum values for the index entry.
|
||||
WriteBlock(key string, minTime, maxTime int64, block []byte) error
|
||||
WriteBlock(key []byte, minTime, maxTime int64, block []byte) error
|
||||
|
||||
// WriteIndex finishes the TSM write streams and writes the index.
|
||||
WriteIndex() error
|
||||
|
|
@ -147,13 +149,13 @@ type TSMWriter interface {
|
|||
// IndexWriter writes a TSMIndex.
|
||||
type IndexWriter interface {
|
||||
// Add records a new block entry for a key in the index.
|
||||
Add(key string, blockType byte, minTime, maxTime int64, offset int64, size uint32)
|
||||
Add(key []byte, blockType byte, minTime, maxTime int64, offset int64, size uint32)
|
||||
|
||||
// Entries returns all index entries for a key.
|
||||
Entries(key string) []IndexEntry
|
||||
Entries(key []byte) []IndexEntry
|
||||
|
||||
// Keys returns the unique set of keys in the index.
|
||||
Keys() []string
|
||||
Keys() [][]byte
|
||||
|
||||
// KeyCount returns the count of unique keys in the index.
|
||||
KeyCount() int
|
||||
|
|
@ -243,16 +245,16 @@ type directIndex struct {
|
|||
blocks map[string]*indexEntries
|
||||
}
|
||||
|
||||
func (d *directIndex) Add(key string, blockType byte, minTime, maxTime int64, offset int64, size uint32) {
|
||||
func (d *directIndex) Add(key []byte, blockType byte, minTime, maxTime int64, offset int64, size uint32) {
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
|
||||
entries := d.blocks[key]
|
||||
entries := d.blocks[string(key)]
|
||||
if entries == nil {
|
||||
entries = &indexEntries{
|
||||
Type: blockType,
|
||||
}
|
||||
d.blocks[key] = entries
|
||||
d.blocks[string(key)] = entries
|
||||
// size of the key stored in the index
|
||||
d.size += uint32(2 + len(key))
|
||||
|
||||
|
|
@ -270,22 +272,22 @@ func (d *directIndex) Add(key string, blockType byte, minTime, maxTime int64, of
|
|||
d.size += indexEntrySize
|
||||
}
|
||||
|
||||
func (d *directIndex) entries(key string) []IndexEntry {
|
||||
entries := d.blocks[key]
|
||||
func (d *directIndex) entries(key []byte) []IndexEntry {
|
||||
entries := d.blocks[string(key)]
|
||||
if entries == nil {
|
||||
return nil
|
||||
}
|
||||
return entries.entries
|
||||
}
|
||||
|
||||
func (d *directIndex) Entries(key string) []IndexEntry {
|
||||
func (d *directIndex) Entries(key []byte) []IndexEntry {
|
||||
d.mu.RLock()
|
||||
defer d.mu.RUnlock()
|
||||
|
||||
return d.entries(key)
|
||||
}
|
||||
|
||||
func (d *directIndex) Entry(key string, t int64) *IndexEntry {
|
||||
func (d *directIndex) Entry(key []byte, t int64) *IndexEntry {
|
||||
d.mu.RLock()
|
||||
defer d.mu.RUnlock()
|
||||
|
||||
|
|
@ -298,15 +300,15 @@ func (d *directIndex) Entry(key string, t int64) *IndexEntry {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (d *directIndex) Keys() []string {
|
||||
func (d *directIndex) Keys() [][]byte {
|
||||
d.mu.RLock()
|
||||
defer d.mu.RUnlock()
|
||||
|
||||
var keys []string
|
||||
keys := make([][]byte, 0, len(d.blocks))
|
||||
for k := range d.blocks {
|
||||
keys = append(keys, k)
|
||||
keys = append(keys, []byte(k))
|
||||
}
|
||||
sort.Strings(keys)
|
||||
bytesutil.Sort(keys)
|
||||
return keys
|
||||
}
|
||||
|
||||
|
|
@ -454,7 +456,7 @@ func (t *tsmWriter) writeHeader() error {
|
|||
}
|
||||
|
||||
// Write writes a new block containing key and values.
|
||||
func (t *tsmWriter) Write(key string, values Values) error {
|
||||
func (t *tsmWriter) Write(key []byte, values Values) error {
|
||||
if len(key) > maxKeyLength {
|
||||
return ErrMaxKeyLengthExceeded
|
||||
}
|
||||
|
|
@ -506,7 +508,7 @@ func (t *tsmWriter) Write(key string, values Values) error {
|
|||
// WriteBlock writes block for the given key and time range to the TSM file. If the write
|
||||
// exceeds max entries for a given key, ErrMaxBlocksExceeded is returned. This indicates
|
||||
// that the index is now full for this key and no future writes to this key will succeed.
|
||||
func (t *tsmWriter) WriteBlock(key string, minTime, maxTime int64, block []byte) error {
|
||||
func (t *tsmWriter) WriteBlock(key []byte, minTime, maxTime int64, block []byte) error {
|
||||
if len(key) > maxKeyLength {
|
||||
return ErrMaxKeyLengthExceeded
|
||||
}
|
||||
|
|
|
|||
|
|
@ -34,7 +34,7 @@ func TestTSMWriter_Write_NoValues(t *testing.T) {
|
|||
t.Fatalf("unexpected error created writer: %v", err)
|
||||
}
|
||||
|
||||
if err := w.Write("foo", []tsm1.Value{}); err != nil {
|
||||
if err := w.Write([]byte("foo"), []tsm1.Value{}); err != nil {
|
||||
t.Fatalf("unexpected error writing: %v", err)
|
||||
}
|
||||
|
||||
|
|
@ -58,7 +58,7 @@ func TestTSMWriter_Write_Single(t *testing.T) {
|
|||
}
|
||||
|
||||
values := []tsm1.Value{tsm1.NewValue(0, 1.0)}
|
||||
if err := w.Write("cpu", values); err != nil {
|
||||
if err := w.Write([]byte("cpu"), values); err != nil {
|
||||
t.Fatalf("unexpected error writing: %v", err)
|
||||
|
||||
}
|
||||
|
|
@ -97,7 +97,7 @@ func TestTSMWriter_Write_Single(t *testing.T) {
|
|||
}
|
||||
defer r.Close()
|
||||
|
||||
readValues, err := r.ReadAll("cpu")
|
||||
readValues, err := r.ReadAll([]byte("cpu"))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error readin: %v", err)
|
||||
}
|
||||
|
|
@ -132,7 +132,7 @@ func TestTSMWriter_Write_Multiple(t *testing.T) {
|
|||
}
|
||||
|
||||
for _, d := range data {
|
||||
if err := w.Write(d.key, d.values); err != nil {
|
||||
if err := w.Write([]byte(d.key), d.values); err != nil {
|
||||
t.Fatalf("unexpected error writing: %v", err)
|
||||
}
|
||||
}
|
||||
|
|
@ -157,7 +157,7 @@ func TestTSMWriter_Write_Multiple(t *testing.T) {
|
|||
defer r.Close()
|
||||
|
||||
for _, d := range data {
|
||||
readValues, err := r.ReadAll(d.key)
|
||||
readValues, err := r.ReadAll([]byte(d.key))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error readin: %v", err)
|
||||
}
|
||||
|
|
@ -199,7 +199,7 @@ func TestTSMWriter_Write_MultipleKeyValues(t *testing.T) {
|
|||
}
|
||||
|
||||
for _, d := range data {
|
||||
if err := w.Write(d.key, d.values); err != nil {
|
||||
if err := w.Write([]byte(d.key), d.values); err != nil {
|
||||
t.Fatalf("unexpected error writing: %v", err)
|
||||
}
|
||||
}
|
||||
|
|
@ -224,7 +224,7 @@ func TestTSMWriter_Write_MultipleKeyValues(t *testing.T) {
|
|||
defer r.Close()
|
||||
|
||||
for _, d := range data {
|
||||
readValues, err := r.ReadAll(d.key)
|
||||
readValues, err := r.ReadAll([]byte(d.key))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error readin: %v", err)
|
||||
}
|
||||
|
|
@ -267,7 +267,7 @@ func TestTSMWriter_Write_ReverseKeys(t *testing.T) {
|
|||
}
|
||||
|
||||
for _, d := range data {
|
||||
if err := w.Write(d.key, d.values); err != nil {
|
||||
if err := w.Write([]byte(d.key), d.values); err != nil {
|
||||
t.Fatalf("unexpected error writing: %v", err)
|
||||
}
|
||||
}
|
||||
|
|
@ -292,7 +292,7 @@ func TestTSMWriter_Write_ReverseKeys(t *testing.T) {
|
|||
defer r.Close()
|
||||
|
||||
for _, d := range data {
|
||||
readValues, err := r.ReadAll(d.key)
|
||||
readValues, err := r.ReadAll([]byte(d.key))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error readin: %v", err)
|
||||
}
|
||||
|
|
@ -335,7 +335,7 @@ func TestTSMWriter_Write_SameKey(t *testing.T) {
|
|||
}
|
||||
|
||||
for _, d := range data {
|
||||
if err := w.Write(d.key, d.values); err != nil {
|
||||
if err := w.Write([]byte(d.key), d.values); err != nil {
|
||||
t.Fatalf("unexpected error writing: %v", err)
|
||||
}
|
||||
}
|
||||
|
|
@ -361,7 +361,7 @@ func TestTSMWriter_Write_SameKey(t *testing.T) {
|
|||
|
||||
values := append(data[0].values, data[1].values...)
|
||||
|
||||
readValues, err := r.ReadAll("cpu")
|
||||
readValues, err := r.ReadAll([]byte("cpu"))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error readin: %v", err)
|
||||
}
|
||||
|
|
@ -404,7 +404,7 @@ func TestTSMWriter_Read_Multiple(t *testing.T) {
|
|||
}
|
||||
|
||||
for _, d := range data {
|
||||
if err := w.Write(d.key, d.values); err != nil {
|
||||
if err := w.Write([]byte(d.key), d.values); err != nil {
|
||||
t.Fatalf("unexpected error writing: %v", err)
|
||||
}
|
||||
}
|
||||
|
|
@ -430,7 +430,7 @@ func TestTSMWriter_Read_Multiple(t *testing.T) {
|
|||
|
||||
for _, values := range data {
|
||||
// Try the first timestamp
|
||||
readValues, err := r.Read("cpu", values.values[0].UnixNano())
|
||||
readValues, err := r.Read([]byte("cpu"), values.values[0].UnixNano())
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error readin: %v", err)
|
||||
}
|
||||
|
|
@ -446,7 +446,7 @@ func TestTSMWriter_Read_Multiple(t *testing.T) {
|
|||
}
|
||||
|
||||
// Try the last timestamp too
|
||||
readValues, err = r.Read("cpu", values.values[1].UnixNano())
|
||||
readValues, err = r.Read([]byte("cpu"), values.values[1].UnixNano())
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error readin: %v", err)
|
||||
}
|
||||
|
|
@ -473,7 +473,7 @@ func TestTSMWriter_WriteBlock_Empty(t *testing.T) {
|
|||
t.Fatalf("unexpected error creating writer: %v", err)
|
||||
}
|
||||
|
||||
if err := w.WriteBlock("cpu", 0, 0, nil); err != nil {
|
||||
if err := w.WriteBlock([]byte("cpu"), 0, 0, nil); err != nil {
|
||||
t.Fatalf("unexpected error writing block: %v", err)
|
||||
}
|
||||
|
||||
|
|
@ -516,7 +516,7 @@ func TestTSMWriter_WriteBlock_Multiple(t *testing.T) {
|
|||
}
|
||||
|
||||
for _, d := range data {
|
||||
if err := w.Write(d.key, d.values); err != nil {
|
||||
if err := w.Write([]byte(d.key), d.values); err != nil {
|
||||
t.Fatalf("unexpected error writing: %v", err)
|
||||
}
|
||||
}
|
||||
|
|
@ -569,7 +569,7 @@ func TestTSMWriter_WriteBlock_Multiple(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Fatalf("unexpected error reading block: %v", err)
|
||||
}
|
||||
if err := w.WriteBlock(key, minTime, maxTime, b); err != nil {
|
||||
if err := w.WriteBlock([]byte(key), minTime, maxTime, b); err != nil {
|
||||
t.Fatalf("unexpected error writing block: %v", err)
|
||||
}
|
||||
}
|
||||
|
|
@ -595,7 +595,7 @@ func TestTSMWriter_WriteBlock_Multiple(t *testing.T) {
|
|||
defer r.Close()
|
||||
|
||||
for _, d := range data {
|
||||
readValues, err := r.ReadAll(d.key)
|
||||
readValues, err := r.ReadAll([]byte(d.key))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error readin: %v", err)
|
||||
}
|
||||
|
|
@ -627,7 +627,7 @@ func TestTSMWriter_WriteBlock_MaxKey(t *testing.T) {
|
|||
key += "a"
|
||||
}
|
||||
|
||||
if err := w.WriteBlock(key, 0, 0, nil); err != tsm1.ErrMaxKeyLengthExceeded {
|
||||
if err := w.WriteBlock([]byte(key), 0, 0, nil); err != tsm1.ErrMaxKeyLengthExceeded {
|
||||
t.Fatalf("expected max key length error writing key: %v", err)
|
||||
}
|
||||
}
|
||||
|
|
@ -647,7 +647,7 @@ func TestTSMWriter_Write_MaxKey(t *testing.T) {
|
|||
for i := 0; i < 100000; i++ {
|
||||
key += "a"
|
||||
}
|
||||
if err := w.Write(key, []tsm1.Value{tsm1.NewValue(0, 1.0)}); err != tsm1.ErrMaxKeyLengthExceeded {
|
||||
if err := w.Write([]byte(key), []tsm1.Value{tsm1.NewValue(0, 1.0)}); err != tsm1.ErrMaxKeyLengthExceeded {
|
||||
t.Fatalf("expected max key length error writing key: %v", err)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
Loading…
Reference in New Issue