2015-07-22 14:53:20 +00:00
|
|
|
package tsdb
|
|
|
|
|
|
|
|
import (
|
2015-08-16 19:45:09 +00:00
|
|
|
"bytes"
|
2015-07-22 14:53:20 +00:00
|
|
|
"errors"
|
|
|
|
"fmt"
|
|
|
|
"io"
|
|
|
|
"os"
|
2015-08-16 19:45:09 +00:00
|
|
|
"sort"
|
2015-07-22 14:53:20 +00:00
|
|
|
"time"
|
|
|
|
|
2015-11-04 21:06:06 +00:00
|
|
|
"github.com/influxdb/influxdb/influxql"
|
2015-09-16 20:33:08 +00:00
|
|
|
"github.com/influxdb/influxdb/models"
|
2015-07-22 14:53:20 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
var (
|
|
|
|
// ErrFormatNotFound is returned when no format can be determined from a path.
|
|
|
|
ErrFormatNotFound = errors.New("format not found")
|
|
|
|
)
|
|
|
|
|
|
|
|
// Engine represents a swappable storage engine for the shard.
|
|
|
|
type Engine interface {
|
|
|
|
Open() error
|
|
|
|
Close() error
|
|
|
|
|
|
|
|
SetLogOutput(io.Writer)
|
2015-09-06 22:49:15 +00:00
|
|
|
LoadMetadataIndex(shard *Shard, index *DatabaseIndex, measurementFields map[string]*MeasurementFields) error
|
2015-07-22 14:53:20 +00:00
|
|
|
|
|
|
|
Begin(writable bool) (Tx, error)
|
2015-09-16 20:33:08 +00:00
|
|
|
WritePoints(points []models.Point, measurementFieldsToSave map[string]*MeasurementFields, seriesToCreate []*SeriesCreate) error
|
2015-07-22 14:53:20 +00:00
|
|
|
DeleteSeries(keys []string) error
|
|
|
|
DeleteMeasurement(name string, seriesKeys []string) error
|
|
|
|
SeriesCount() (n int, err error)
|
2015-09-03 16:48:37 +00:00
|
|
|
|
2015-09-29 02:50:00 +00:00
|
|
|
// PerformMaintenance will get called periodically by the store
|
|
|
|
PerformMaintenance()
|
|
|
|
|
|
|
|
// Format will return the format for the engine
|
|
|
|
Format() EngineFormat
|
|
|
|
|
2015-09-03 16:48:37 +00:00
|
|
|
io.WriterTo
|
2015-12-25 13:23:22 +00:00
|
|
|
|
|
|
|
Backup(w io.Writer, basePath string, since time.Time) error
|
2015-07-22 14:53:20 +00:00
|
|
|
}
|
|
|
|
|
2015-09-29 02:50:00 +00:00
|
|
|
type EngineFormat int
|
|
|
|
|
|
|
|
const (
|
2015-11-04 21:06:06 +00:00
|
|
|
TSM1Format EngineFormat = 2
|
2015-09-29 02:50:00 +00:00
|
|
|
)
|
|
|
|
|
2015-07-22 14:53:20 +00:00
|
|
|
// NewEngineFunc creates a new engine.
|
2015-08-21 15:22:04 +00:00
|
|
|
type NewEngineFunc func(path string, walPath string, options EngineOptions) Engine
|
2015-07-22 14:53:20 +00:00
|
|
|
|
|
|
|
// newEngineFuncs is a lookup of engine constructors by name.
|
|
|
|
var newEngineFuncs = make(map[string]NewEngineFunc)
|
|
|
|
|
|
|
|
// RegisterEngine registers a storage engine initializer by name.
|
|
|
|
func RegisterEngine(name string, fn NewEngineFunc) {
|
|
|
|
if _, ok := newEngineFuncs[name]; ok {
|
|
|
|
panic("engine already registered: " + name)
|
|
|
|
}
|
|
|
|
newEngineFuncs[name] = fn
|
|
|
|
}
|
|
|
|
|
2015-10-27 18:57:21 +00:00
|
|
|
// RegisteredEngines returns the slice of currently registered engines.
|
|
|
|
func RegisteredEngines() []string {
|
|
|
|
a := make([]string, 0, len(newEngineFuncs))
|
|
|
|
for k, _ := range newEngineFuncs {
|
|
|
|
a = append(a, k)
|
|
|
|
}
|
|
|
|
sort.Strings(a)
|
|
|
|
return a
|
|
|
|
}
|
|
|
|
|
2015-07-22 14:53:20 +00:00
|
|
|
// NewEngine returns an instance of an engine based on its format.
|
|
|
|
// If the path does not exist then the DefaultFormat is used.
|
2015-08-21 15:22:04 +00:00
|
|
|
func NewEngine(path string, walPath string, options EngineOptions) (Engine, error) {
|
2015-07-22 14:53:20 +00:00
|
|
|
// Create a new engine
|
|
|
|
if _, err := os.Stat(path); os.IsNotExist(err) {
|
2015-08-21 15:22:04 +00:00
|
|
|
return newEngineFuncs[options.EngineVersion](path, walPath, options), nil
|
2015-07-22 14:53:20 +00:00
|
|
|
}
|
|
|
|
|
2015-11-04 21:06:06 +00:00
|
|
|
// If it's a dir then it's a tsm1 engine
|
2015-07-22 14:53:20 +00:00
|
|
|
var format string
|
2015-11-04 21:06:06 +00:00
|
|
|
if fi, err := os.Stat(path); err != nil {
|
2015-07-22 14:53:20 +00:00
|
|
|
return nil, err
|
2015-11-04 21:06:06 +00:00
|
|
|
} else if !fi.Mode().IsDir() {
|
|
|
|
return nil, errors.New("unknown engine type")
|
|
|
|
} else {
|
|
|
|
format = "tsm1"
|
2015-07-22 14:53:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Lookup engine by format.
|
|
|
|
fn := newEngineFuncs[format]
|
|
|
|
if fn == nil {
|
|
|
|
return nil, fmt.Errorf("invalid engine format: %q", format)
|
|
|
|
}
|
|
|
|
|
2015-08-21 15:22:04 +00:00
|
|
|
return fn(path, walPath, options), nil
|
2015-07-22 14:53:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// EngineOptions represents the options used to initialize the engine.
|
|
|
|
type EngineOptions struct {
|
2015-08-08 15:09:48 +00:00
|
|
|
EngineVersion string
|
2015-07-22 14:53:20 +00:00
|
|
|
MaxWALSize int
|
|
|
|
WALFlushInterval time.Duration
|
|
|
|
WALPartitionFlushDelay time.Duration
|
2015-08-18 20:59:54 +00:00
|
|
|
|
|
|
|
Config Config
|
2015-07-22 14:53:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// NewEngineOptions returns the default options.
|
|
|
|
func NewEngineOptions() EngineOptions {
|
|
|
|
return EngineOptions{
|
2015-08-08 15:09:48 +00:00
|
|
|
EngineVersion: DefaultEngine,
|
2015-07-22 14:53:20 +00:00
|
|
|
MaxWALSize: DefaultMaxWALSize,
|
|
|
|
WALFlushInterval: DefaultWALFlushInterval,
|
|
|
|
WALPartitionFlushDelay: DefaultWALPartitionFlushDelay,
|
2015-08-18 20:59:54 +00:00
|
|
|
Config: NewConfig(),
|
2015-07-22 14:53:20 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Tx represents a transaction.
|
|
|
|
type Tx interface {
|
|
|
|
io.WriterTo
|
|
|
|
|
|
|
|
Size() int64
|
|
|
|
Commit() error
|
|
|
|
Rollback() error
|
2015-09-20 22:27:22 +00:00
|
|
|
|
|
|
|
Cursor(series string, fields []string, dec *FieldCodec, ascending bool) Cursor
|
2015-07-22 14:53:20 +00:00
|
|
|
}
|
|
|
|
|
2015-11-04 21:06:06 +00:00
|
|
|
func newTxIterator(tx Tx, sh *Shard, opt influxql.IteratorOptions, dimensions map[string]struct{}) (influxql.Iterator, error) {
|
|
|
|
// If there's no expression then it's just an auxilary field selection.
|
|
|
|
if opt.Expr == nil {
|
|
|
|
return newTxVarRefIterator(tx, sh, opt, dimensions)
|
|
|
|
}
|
|
|
|
|
|
|
|
// If raw data is being read then read it directly.
|
|
|
|
// Otherwise wrap it in a call iterator.
|
|
|
|
switch expr := opt.Expr.(type) {
|
|
|
|
case *influxql.VarRef:
|
|
|
|
return newTxVarRefIterator(tx, sh, opt, dimensions)
|
|
|
|
case *influxql.Call:
|
|
|
|
refOpt := opt
|
|
|
|
refOpt.Expr = expr.Args[0].(*influxql.VarRef)
|
|
|
|
input, err := newTxVarRefIterator(tx, sh, refOpt, dimensions)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return influxql.NewCallIterator(input, opt), nil
|
|
|
|
default:
|
|
|
|
panic(fmt.Sprintf("unsupported tx iterator expr: %T", expr))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// newTxVarRefIterator creates an tx iterator for a variable reference.
|
|
|
|
func newTxVarRefIterator(tx Tx, sh *Shard, opt influxql.IteratorOptions, dimensions map[string]struct{}) (influxql.Iterator, error) {
|
|
|
|
ref, _ := opt.Expr.(*influxql.VarRef)
|
|
|
|
|
|
|
|
var itrs []influxql.Iterator
|
|
|
|
if err := func() error {
|
|
|
|
mms := Measurements(sh.index.MeasurementsByName(influxql.Sources(opt.Sources).Names()))
|
|
|
|
for _, mm := range mms {
|
|
|
|
// Determine tagsets for this measurement based on dimensions and filters.
|
|
|
|
tagSets, err := mm.TagSets(opt.Dimensions, opt.Condition)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2015-12-22 22:46:10 +00:00
|
|
|
// Calculate tag sets and apply SLIMIT/SOFFSET.
|
|
|
|
tagSets = influxql.LimitTagSets(tagSets, opt.SLimit, opt.SOffset)
|
2015-11-04 21:06:06 +00:00
|
|
|
|
|
|
|
for _, t := range tagSets {
|
|
|
|
for i, seriesKey := range t.SeriesKeys {
|
|
|
|
// Create field list for cursor.
|
|
|
|
fields := make([]string, 0, len(opt.Aux)+1)
|
|
|
|
if ref != nil {
|
|
|
|
fields = append(fields, ref.Val)
|
|
|
|
}
|
|
|
|
fields = append(fields, opt.Aux...)
|
|
|
|
|
|
|
|
// Create cursor.
|
|
|
|
cur := tx.Cursor(seriesKey, fields, sh.FieldCodec(mm.Name), opt.Ascending)
|
|
|
|
if cur == nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create options specific for this series.
|
|
|
|
curOpt := opt
|
|
|
|
curOpt.Condition = t.Filters[i]
|
|
|
|
|
|
|
|
itr := NewFloatCursorIterator(mm.Name, sh.index.TagsForSeries(seriesKey), cur, curOpt)
|
|
|
|
itrs = append(itrs, itr)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}(); err != nil {
|
|
|
|
influxql.Iterators(itrs).Close()
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Merge iterators into one.
|
|
|
|
itr := influxql.NewMergeIterator(itrs, opt)
|
|
|
|
switch itr := itr.(type) {
|
|
|
|
case influxql.FloatIterator:
|
|
|
|
return &txFloatIterator{tx: tx, itr: itr}, nil
|
|
|
|
default:
|
|
|
|
panic(fmt.Sprintf("unsupported tx iterator input type: %T", itr))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// txFloatIterator represents an iterator with an attached transaction.
|
|
|
|
// It is used to track the Tx so it can be closed.
|
|
|
|
type txFloatIterator struct {
|
|
|
|
tx Tx
|
|
|
|
itr influxql.FloatIterator
|
|
|
|
}
|
|
|
|
|
|
|
|
// Close closes the underlying iterator and rolls back the transaction.
|
|
|
|
func (itr *txFloatIterator) Close() error {
|
|
|
|
defer itr.tx.Rollback()
|
|
|
|
return itr.itr.Close()
|
|
|
|
}
|
|
|
|
|
|
|
|
// Next returns the next point.
|
|
|
|
func (itr *txFloatIterator) Next() *influxql.FloatPoint { return itr.itr.Next() }
|
|
|
|
|
2015-08-16 19:45:09 +00:00
|
|
|
// DedupeEntries returns slices with unique keys (the first 8 bytes).
|
|
|
|
func DedupeEntries(a [][]byte) [][]byte {
|
|
|
|
// Convert to a map where the last slice is used.
|
|
|
|
m := make(map[string][]byte)
|
|
|
|
for _, b := range a {
|
|
|
|
m[string(b[0:8])] = b
|
|
|
|
}
|
|
|
|
|
|
|
|
// Convert map back to a slice of byte slices.
|
|
|
|
other := make([][]byte, 0, len(m))
|
|
|
|
for _, v := range m {
|
|
|
|
other = append(other, v)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Sort entries.
|
|
|
|
sort.Sort(ByteSlices(other))
|
|
|
|
|
|
|
|
return other
|
|
|
|
}
|
|
|
|
|
|
|
|
type ByteSlices [][]byte
|
|
|
|
|
|
|
|
func (a ByteSlices) Len() int { return len(a) }
|
|
|
|
func (a ByteSlices) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
|
|
|
func (a ByteSlices) Less(i, j int) bool { return bytes.Compare(a[i], a[j]) == -1 }
|