refactor(tsm1): delete unused Write method on cache (#20890)
parent
2931100576
commit
7169df3b51
|
@ -279,38 +279,6 @@ func (c *Cache) Free() {
|
|||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
// Write writes the set of values for the key to the cache. This function is goroutine-safe.
|
||||
// It returns an error if the cache will exceed its max size by adding the new values.
|
||||
func (c *Cache) Write(key []byte, values []Value) error {
|
||||
c.init()
|
||||
addedSize := uint64(Values(values).Size())
|
||||
|
||||
// Enough room in the cache?
|
||||
limit := c.maxSize
|
||||
n := c.Size() + addedSize
|
||||
|
||||
if limit > 0 && n > limit {
|
||||
atomic.AddInt64(&c.stats.WriteErr, 1)
|
||||
return ErrCacheMemorySizeLimitExceeded(n, limit)
|
||||
}
|
||||
|
||||
newKey, err := c.store.write(key, values)
|
||||
if err != nil {
|
||||
atomic.AddInt64(&c.stats.WriteErr, 1)
|
||||
return err
|
||||
}
|
||||
|
||||
if newKey {
|
||||
addedSize += uint64(len(key))
|
||||
}
|
||||
// Update the cache size and the memory size stat.
|
||||
c.increaseSize(addedSize)
|
||||
c.updateMemSize(int64(addedSize))
|
||||
atomic.AddInt64(&c.stats.WriteOK, 1)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// WriteMulti writes the map of keys and associated values to the cache. This
|
||||
// function is goroutine-safe. It returns an error if the cache will exceeded
|
||||
// its max size by adding the new values. The write attempts to write as many
|
||||
|
|
|
@ -18,6 +18,11 @@ import (
|
|||
"github.com/golang/snappy"
|
||||
)
|
||||
|
||||
// Convenience method for testing.
|
||||
func (c *Cache) Write(key []byte, values []Value) error {
|
||||
return c.WriteMulti(map[string][]Value{string(key): values})
|
||||
}
|
||||
|
||||
func TestCache_NewCache(t *testing.T) {
|
||||
c := NewCache(100)
|
||||
if c == nil {
|
||||
|
@ -35,51 +40,6 @@ func TestCache_NewCache(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestCache_CacheWrite(t *testing.T) {
|
||||
v0 := NewValue(1, 1.0)
|
||||
v1 := NewValue(2, 2.0)
|
||||
v2 := NewValue(3, 3.0)
|
||||
values := Values{v0, v1, v2}
|
||||
valuesSize := uint64(v0.Size() + v1.Size() + v2.Size())
|
||||
|
||||
c := NewCache(3 * valuesSize)
|
||||
|
||||
if err := c.Write([]byte("foo"), values); err != nil {
|
||||
t.Fatalf("failed to write key foo to cache: %s", err.Error())
|
||||
}
|
||||
if err := c.Write([]byte("bar"), values); err != nil {
|
||||
t.Fatalf("failed to write key foo to cache: %s", err.Error())
|
||||
}
|
||||
if n := c.Size(); n != 2*valuesSize+6 {
|
||||
t.Fatalf("cache size incorrect after 2 writes, exp %d, got %d", 2*valuesSize, n)
|
||||
}
|
||||
|
||||
if exp, keys := [][]byte{[]byte("bar"), []byte("foo")}, c.Keys(); !reflect.DeepEqual(keys, exp) {
|
||||
t.Fatalf("cache keys incorrect after 2 writes, exp %v, got %v", exp, keys)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCache_CacheWrite_TypeConflict(t *testing.T) {
|
||||
v0 := NewValue(1, 1.0)
|
||||
v1 := NewValue(2, int(64))
|
||||
values := Values{v0, v1}
|
||||
valuesSize := v0.Size() + v1.Size()
|
||||
|
||||
c := NewCache(uint64(2 * valuesSize))
|
||||
|
||||
if err := c.Write([]byte("foo"), values[:1]); err != nil {
|
||||
t.Fatalf("failed to write key foo to cache: %s", err.Error())
|
||||
}
|
||||
|
||||
if err := c.Write([]byte("foo"), values[1:]); err == nil {
|
||||
t.Fatalf("expected field type conflict")
|
||||
}
|
||||
|
||||
if exp, got := uint64(v0.Size())+3, c.Size(); exp != got {
|
||||
t.Fatalf("cache size incorrect after 2 writes, exp %d, got %d", exp, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCache_CacheWriteMulti(t *testing.T) {
|
||||
v0 := NewValue(1, 1.0)
|
||||
v1 := NewValue(2, 2.0)
|
||||
|
|
Loading…
Reference in New Issue