Update to conversion tool to work in current versions

After adding type-switches to the tsm1 packages, the custom
implementation found in the conversion tool broke. This change uses
tsm1.NewValue() instead of a custom implementation.

This change also ensures that the tsm1.Value interface can only be
implemented internally to allow for the optimized type-switch based
encoding
pull/6159/head
Joe LeGasse 2016-03-30 12:48:23 -04:00
parent acc8f3e211
commit f10c300765
5 changed files with 20 additions and 67 deletions

View File

@ -30,10 +30,9 @@ type Reader struct {
cursors []*cursor
currCursor int
keyBuf string
tsmValues []tsm1.Value
values []tsdb.Value
valuePos int
keyBuf string
values []tsm1.Value
valuePos int
fields map[string]*tsdb.MeasurementFields
codecs map[string]*tsdb.FieldCodec
@ -54,12 +53,7 @@ func NewReader(path string, stats *stats.Stats, chunkSize int) *Reader {
chunkSize = DefaultChunkSize
}
// known-sized slice of a known type, in a contiguous chunk
r.values = make([]tsdb.Value, chunkSize)
r.tsmValues = make([]tsm1.Value, len(r.values))
for i := range r.values {
r.tsmValues[i] = &r.values[i]
}
r.values = make([]tsm1.Value, chunkSize)
return r
}
@ -173,8 +167,7 @@ OUTER:
}
}
r.values[r.valuePos].T = k
r.values[r.valuePos].Val = v
r.values[r.valuePos] = tsm1.NewValue(k, v)
r.valuePos++
if r.valuePos >= len(r.values) {
@ -188,7 +181,7 @@ OUTER:
// emitted completely for every field, in every series, before the next field is processed.
// Data from Read() adheres to the requirements for writing to tsm1 shards
func (r *Reader) Read() (string, []tsm1.Value, error) {
return r.keyBuf, r.tsmValues[:r.valuePos], nil
return r.keyBuf, r.values[:r.valuePos], nil
}
// Close closes the reader.

View File

@ -28,10 +28,9 @@ type Reader struct {
cursors []*cursor
currCursor int
keyBuf string
tsmValues []tsm1.Value
values []tsdb.Value
valuePos int
keyBuf string
values []tsm1.Value
valuePos int
fields map[string]*tsdb.MeasurementFields
codecs map[string]*tsdb.FieldCodec
@ -52,12 +51,7 @@ func NewReader(path string, stats *stats.Stats, chunkSize int) *Reader {
chunkSize = DefaultChunkSize
}
// known-sized slice of a known type, in a contiguous chunk
r.values = make([]tsdb.Value, chunkSize)
r.tsmValues = make([]tsm1.Value, len(r.values))
for i := range r.values {
r.tsmValues[i] = &r.values[i]
}
r.values = make([]tsm1.Value, chunkSize)
return r
}
@ -185,8 +179,7 @@ OUTER:
}
}
r.values[r.valuePos].T = k
r.values[r.valuePos].Val = v
r.values[r.valuePos] = tsm1.NewValue(k, v)
r.valuePos++
if r.valuePos >= len(r.values) {
@ -200,7 +193,7 @@ OUTER:
// emitted completely for every field, in every series, before the next field is processed.
// Data from Read() adheres to the requirements for writing to tsm1 shards
func (r *Reader) Read() (string, []tsm1.Value, error) {
return r.keyBuf, r.tsmValues[:r.valuePos], nil
return r.keyBuf, r.values[:r.valuePos], nil
}
// Close closes the reader.

View File

@ -7,8 +7,6 @@ import (
"math"
)
const maxStringLength = 64 * 1024
const (
fieldFloat = 1
fieldInteger = 2

View File

@ -1,39 +0,0 @@
package tsdb
import (
"fmt"
"time"
)
type Value struct {
T int64
Val interface{}
}
func (v *Value) Time() time.Time {
return time.Unix(0, v.T)
}
func (v *Value) UnixNano() int64 {
return v.T
}
func (v *Value) Value() interface{} {
return v.Val
}
func (v *Value) String() string {
return fmt.Sprintf("%v %v", v.Time(), v.Val)
}
func (v *Value) Size() int {
switch vv := v.Val.(type) {
case int64, float64:
return 16
case bool:
return 9
case string:
return 8 + len(vv)
}
return 0
}

View File

@ -33,6 +33,8 @@ type Value interface {
Value() interface{}
Size() int
String() string
internalOnly()
}
func NewValue(t int64, value interface{}) Value {
@ -57,6 +59,12 @@ func (e *EmptyValue) Value() interface{} { return nil }
func (e *EmptyValue) Size() int { return 0 }
func (e *EmptyValue) String() string { return "" }
func (_ *EmptyValue) internalOnly() {}
func (_ *StringValue) internalOnly() {}
func (_ *IntegerValue) internalOnly() {}
func (_ *BooleanValue) internalOnly() {}
func (_ *FloatValue) internalOnly() {}
// Values represented a time ascending sorted collection of Value types.
// the underlying type should be the same across all values, but the interface
// makes the code cleaner.