2015-05-22 20:08:43 +00:00
|
|
|
package tsdb
|
|
|
|
|
|
|
|
import (
|
2016-09-21 17:12:09 +00:00
|
|
|
"bytes"
|
2017-10-11 14:08:31 +00:00
|
|
|
"context"
|
2015-05-22 20:08:43 +00:00
|
|
|
"errors"
|
|
|
|
"fmt"
|
2015-06-18 15:07:51 +00:00
|
|
|
"io"
|
2017-11-21 23:52:58 +00:00
|
|
|
"io/ioutil"
|
|
|
|
"os"
|
2016-05-18 14:34:06 +00:00
|
|
|
"path/filepath"
|
2016-11-23 20:32:42 +00:00
|
|
|
"regexp"
|
2017-08-24 16:27:29 +00:00
|
|
|
"runtime"
|
2016-02-04 15:12:52 +00:00
|
|
|
"sort"
|
2016-06-30 16:49:53 +00:00
|
|
|
"strings"
|
2015-05-22 20:08:43 +00:00
|
|
|
"sync"
|
2016-07-07 16:13:56 +00:00
|
|
|
"sync/atomic"
|
2016-04-30 04:56:57 +00:00
|
|
|
"time"
|
2018-05-04 15:47:05 +00:00
|
|
|
"unsafe"
|
2015-05-22 20:08:43 +00:00
|
|
|
|
2015-11-04 21:06:06 +00:00
|
|
|
"github.com/gogo/protobuf/proto"
|
2016-02-10 17:26:18 +00:00
|
|
|
"github.com/influxdata/influxdb/models"
|
2017-12-16 01:41:49 +00:00
|
|
|
"github.com/influxdata/influxdb/pkg/bytesutil"
|
2017-03-15 12:16:28 +00:00
|
|
|
"github.com/influxdata/influxdb/pkg/estimator"
|
2017-11-21 23:52:58 +00:00
|
|
|
"github.com/influxdata/influxdb/pkg/file"
|
2017-08-24 16:27:29 +00:00
|
|
|
"github.com/influxdata/influxdb/pkg/limiter"
|
2017-08-15 19:24:22 +00:00
|
|
|
"github.com/influxdata/influxdb/query"
|
2016-04-05 12:54:11 +00:00
|
|
|
internal "github.com/influxdata/influxdb/tsdb/internal"
|
2017-10-30 21:40:26 +00:00
|
|
|
"github.com/influxdata/influxql"
|
2017-10-27 17:27:01 +00:00
|
|
|
"go.uber.org/zap"
|
2015-05-22 20:08:43 +00:00
|
|
|
)
|
|
|
|
|
2015-09-04 22:43:57 +00:00
|
|
|
const (
|
2016-10-06 23:57:33 +00:00
|
|
|
statWriteReq = "writeReq"
|
|
|
|
statWriteReqOK = "writeReqOk"
|
|
|
|
statWriteReqErr = "writeReqErr"
|
|
|
|
statSeriesCreate = "seriesCreate"
|
|
|
|
statFieldsCreate = "fieldsCreate"
|
|
|
|
statWritePointsErr = "writePointsErr"
|
|
|
|
statWritePointsDropped = "writePointsDropped"
|
|
|
|
statWritePointsOK = "writePointsOk"
|
|
|
|
statWriteBytes = "writeBytes"
|
|
|
|
statDiskBytes = "diskBytes"
|
2015-09-04 22:43:57 +00:00
|
|
|
)
|
|
|
|
|
2015-06-18 15:07:51 +00:00
|
|
|
var (
|
|
|
|
// ErrFieldOverflow is returned when too many fields are created on a measurement.
|
|
|
|
ErrFieldOverflow = errors.New("field overflow")
|
|
|
|
|
|
|
|
// ErrFieldTypeConflict is returned when a new field already exists with a different type.
|
|
|
|
ErrFieldTypeConflict = errors.New("field type conflict")
|
|
|
|
|
|
|
|
// ErrFieldNotFound is returned when a field cannot be found.
|
|
|
|
ErrFieldNotFound = errors.New("field not found")
|
|
|
|
|
|
|
|
// ErrFieldUnmappedID is returned when the system is presented, during decode, with a field ID
|
|
|
|
// there is no mapping for.
|
|
|
|
ErrFieldUnmappedID = errors.New("field ID not mapped")
|
2016-03-29 22:32:34 +00:00
|
|
|
|
|
|
|
// ErrEngineClosed is returned when a caller attempts indirectly to
|
|
|
|
// access the shard's underlying engine.
|
|
|
|
ErrEngineClosed = errors.New("engine is closed")
|
2016-05-27 22:47:33 +00:00
|
|
|
|
|
|
|
// ErrShardDisabled is returned when a the shard is not available for
|
|
|
|
// queries or writes.
|
|
|
|
ErrShardDisabled = errors.New("shard is disabled")
|
2017-11-22 02:51:28 +00:00
|
|
|
|
|
|
|
// ErrUnknownFieldsFormat is returned when the fields index file is not identifiable by
|
2017-11-22 18:12:32 +00:00
|
|
|
// the file's magic number.
|
2017-11-22 02:51:28 +00:00
|
|
|
ErrUnknownFieldsFormat = errors.New("unknown field index format")
|
|
|
|
|
2018-04-18 04:50:07 +00:00
|
|
|
// ErrUnknownFieldType is returned when the type of a field cannot be determined.
|
|
|
|
ErrUnknownFieldType = errors.New("unknown field type")
|
|
|
|
|
2017-11-14 01:35:53 +00:00
|
|
|
// ErrShardNotIdle is returned when an operation requring the shard to be idle/cold is
|
|
|
|
// attempted on a hot shard.
|
|
|
|
ErrShardNotIdle = errors.New("shard not idle")
|
|
|
|
|
2017-11-22 02:51:28 +00:00
|
|
|
// fieldsIndexMagicNumber is the file magic number for the fields index file.
|
|
|
|
fieldsIndexMagicNumber = []byte{0, 6, 1, 3}
|
2015-06-18 15:07:51 +00:00
|
|
|
)
|
|
|
|
|
2016-09-23 00:49:59 +00:00
|
|
|
var (
|
|
|
|
// Static objects to prevent small allocs.
|
2016-09-21 17:12:09 +00:00
|
|
|
timeBytes = []byte("time")
|
2016-09-23 00:49:59 +00:00
|
|
|
)
|
|
|
|
|
2016-02-24 13:33:07 +00:00
|
|
|
// A ShardError implements the error interface, and contains extra
|
|
|
|
// context about the shard that generated the error.
|
|
|
|
type ShardError struct {
|
|
|
|
id uint64
|
|
|
|
Err error
|
|
|
|
}
|
|
|
|
|
|
|
|
// NewShardError returns a new ShardError.
|
2016-02-24 13:34:19 +00:00
|
|
|
func NewShardError(id uint64, err error) error {
|
2016-02-24 13:33:07 +00:00
|
|
|
if err == nil {
|
2016-02-24 13:34:19 +00:00
|
|
|
return nil
|
2016-02-24 13:33:07 +00:00
|
|
|
}
|
|
|
|
return ShardError{id: id, Err: err}
|
|
|
|
}
|
|
|
|
|
2016-12-31 05:12:37 +00:00
|
|
|
// Error returns the string representation of the error, to satisfy the error interface.
|
2016-02-24 13:33:07 +00:00
|
|
|
func (e ShardError) Error() string {
|
|
|
|
return fmt.Sprintf("[shard %d] %s", e.id, e.Err)
|
|
|
|
}
|
|
|
|
|
2016-11-17 13:24:32 +00:00
|
|
|
// PartialWriteError indicates a write request could only write a portion of the
|
2016-10-06 23:57:33 +00:00
|
|
|
// requested values.
|
|
|
|
type PartialWriteError struct {
|
|
|
|
Reason string
|
|
|
|
Dropped int
|
2017-03-24 15:48:10 +00:00
|
|
|
|
2017-12-16 01:41:49 +00:00
|
|
|
// A sorted slice of series keys that were dropped.
|
|
|
|
DroppedKeys [][]byte
|
2016-10-06 23:57:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (e PartialWriteError) Error() string {
|
2017-04-28 17:45:00 +00:00
|
|
|
return fmt.Sprintf("partial write: %s dropped=%d", e.Reason, e.Dropped)
|
2016-10-06 23:57:33 +00:00
|
|
|
}
|
|
|
|
|
2015-06-18 15:07:51 +00:00
|
|
|
// Shard represents a self-contained time series database. An inverted index of
|
|
|
|
// the measurement and tag data is kept along with the raw time series data.
|
|
|
|
// Data can be split across many shards. The query engine in TSDB is responsible
|
|
|
|
// for combining the output of many shards into a single query result.
|
2015-05-22 20:08:43 +00:00
|
|
|
type Shard struct {
|
2016-02-26 19:41:54 +00:00
|
|
|
path string
|
|
|
|
walPath string
|
|
|
|
id uint64
|
2016-02-23 20:07:21 +00:00
|
|
|
|
2016-02-26 19:41:54 +00:00
|
|
|
database string
|
|
|
|
retentionPolicy string
|
2015-06-18 15:07:51 +00:00
|
|
|
|
2017-11-15 23:09:25 +00:00
|
|
|
sfile *SeriesFile
|
2016-11-16 18:57:55 +00:00
|
|
|
options EngineOptions
|
2015-05-23 22:06:07 +00:00
|
|
|
|
2017-09-19 17:07:34 +00:00
|
|
|
mu sync.RWMutex
|
|
|
|
_engine Engine
|
|
|
|
index Index
|
2016-05-27 22:47:33 +00:00
|
|
|
enabled bool
|
2015-06-18 15:07:51 +00:00
|
|
|
|
2015-09-04 22:43:57 +00:00
|
|
|
// expvar-based stats.
|
2016-08-24 10:40:13 +00:00
|
|
|
stats *ShardStatistics
|
|
|
|
defaultTags models.StatisticTags
|
2015-09-04 22:43:57 +00:00
|
|
|
|
2017-10-27 17:27:01 +00:00
|
|
|
baseLogger *zap.Logger
|
|
|
|
logger *zap.Logger
|
2016-04-30 04:56:57 +00:00
|
|
|
|
2016-06-01 22:17:18 +00:00
|
|
|
EnableOnOpen bool
|
2018-06-12 14:52:00 +00:00
|
|
|
|
|
|
|
// CompactionDisabled specifies the shard should not schedule compactions.
|
|
|
|
// This option is intended for offline tooling.
|
|
|
|
CompactionDisabled bool
|
2015-05-22 20:08:43 +00:00
|
|
|
}
|
|
|
|
|
2016-11-15 16:20:00 +00:00
|
|
|
// NewShard returns a new initialized Shard. walPath doesn't apply to the b1 type index
|
2017-11-15 23:09:25 +00:00
|
|
|
func NewShard(id uint64, path string, walPath string, sfile *SeriesFile, opt EngineOptions) *Shard {
|
2016-09-14 13:55:44 +00:00
|
|
|
db, rp := decodeStorePath(path)
|
2017-10-27 17:27:01 +00:00
|
|
|
logger := zap.NewNop()
|
2018-04-23 16:35:55 +00:00
|
|
|
if opt.FieldValidator == nil {
|
|
|
|
opt.FieldValidator = defaultFieldValidator{}
|
|
|
|
}
|
2016-09-14 13:55:44 +00:00
|
|
|
|
2016-04-30 04:56:57 +00:00
|
|
|
s := &Shard{
|
2016-11-16 18:57:55 +00:00
|
|
|
id: id,
|
|
|
|
path: path,
|
|
|
|
walPath: walPath,
|
2017-11-15 23:09:25 +00:00
|
|
|
sfile: sfile,
|
2016-11-16 18:57:55 +00:00
|
|
|
options: opt,
|
2015-06-18 15:07:51 +00:00
|
|
|
|
2016-07-07 16:13:56 +00:00
|
|
|
stats: &ShardStatistics{},
|
2016-08-24 10:40:13 +00:00
|
|
|
defaultTags: models.StatisticTags{
|
2016-07-07 16:13:56 +00:00
|
|
|
"path": path,
|
2016-09-29 16:09:03 +00:00
|
|
|
"walPath": walPath,
|
2016-07-07 16:13:56 +00:00
|
|
|
"id": fmt.Sprintf("%d", id),
|
|
|
|
"database": db,
|
|
|
|
"retentionPolicy": rp,
|
2016-11-16 18:57:55 +00:00
|
|
|
"engine": opt.EngineVersion,
|
2016-08-19 10:12:35 +00:00
|
|
|
},
|
2016-07-07 16:13:56 +00:00
|
|
|
|
2016-02-26 19:41:54 +00:00
|
|
|
database: db,
|
|
|
|
retentionPolicy: rp,
|
|
|
|
|
2016-12-01 18:26:23 +00:00
|
|
|
logger: logger,
|
|
|
|
baseLogger: logger,
|
2016-06-01 22:17:18 +00:00
|
|
|
EnableOnOpen: true,
|
2015-05-22 20:08:43 +00:00
|
|
|
}
|
2016-04-30 04:56:57 +00:00
|
|
|
return s
|
2015-05-22 20:08:43 +00:00
|
|
|
}
|
|
|
|
|
2017-12-15 17:54:58 +00:00
|
|
|
// WithLogger sets the logger on the shard. It must be called before Open.
|
2017-10-27 17:27:01 +00:00
|
|
|
func (s *Shard) WithLogger(log *zap.Logger) {
|
2016-12-01 18:26:23 +00:00
|
|
|
s.baseLogger = log
|
2018-06-04 19:25:03 +00:00
|
|
|
engine, err := s.Engine()
|
2017-09-19 17:07:34 +00:00
|
|
|
if err == nil {
|
|
|
|
engine.WithLogger(s.baseLogger)
|
2017-05-25 14:56:47 +00:00
|
|
|
s.index.WithLogger(s.baseLogger)
|
2016-04-20 20:07:08 +00:00
|
|
|
}
|
2016-12-01 18:26:23 +00:00
|
|
|
s.logger = s.baseLogger.With(zap.String("service", "shard"))
|
2016-04-20 20:07:08 +00:00
|
|
|
}
|
|
|
|
|
2016-05-27 22:47:33 +00:00
|
|
|
// SetEnabled enables the shard for queries and write. When disabled, all
|
|
|
|
// writes and queries return an error and compactions are stopped for the shard.
|
|
|
|
func (s *Shard) SetEnabled(enabled bool) {
|
|
|
|
s.mu.Lock()
|
|
|
|
// Prevent writes and queries
|
|
|
|
s.enabled = enabled
|
2018-06-12 14:52:00 +00:00
|
|
|
if s._engine != nil && !s.CompactionDisabled {
|
2016-06-01 22:17:18 +00:00
|
|
|
// Disable background compactions and snapshotting
|
2017-09-19 17:07:34 +00:00
|
|
|
s._engine.SetEnabled(enabled)
|
2016-06-01 22:17:18 +00:00
|
|
|
}
|
2016-05-27 22:47:33 +00:00
|
|
|
s.mu.Unlock()
|
|
|
|
}
|
|
|
|
|
2017-11-14 19:50:56 +00:00
|
|
|
// ScheduleFullCompaction forces a full compaction to be schedule on the shard.
|
|
|
|
func (s *Shard) ScheduleFullCompaction() error {
|
2018-06-04 19:25:03 +00:00
|
|
|
engine, err := s.Engine()
|
2017-11-14 19:50:56 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
return engine.ScheduleFullCompaction()
|
|
|
|
}
|
|
|
|
|
2017-05-16 17:18:02 +00:00
|
|
|
// ID returns the shards ID.
|
|
|
|
func (s *Shard) ID() uint64 {
|
|
|
|
return s.id
|
|
|
|
}
|
|
|
|
|
|
|
|
// Database returns the database of the shard.
|
|
|
|
func (s *Shard) Database() string {
|
|
|
|
return s.database
|
|
|
|
}
|
|
|
|
|
|
|
|
// RetentionPolicy returns the retention policy of the shard.
|
|
|
|
func (s *Shard) RetentionPolicy() string {
|
|
|
|
return s.retentionPolicy
|
|
|
|
}
|
|
|
|
|
2016-07-07 16:13:56 +00:00
|
|
|
// ShardStatistics maintains statistics for a shard.
|
|
|
|
type ShardStatistics struct {
|
2016-10-11 16:45:33 +00:00
|
|
|
WriteReq int64
|
|
|
|
WriteReqOK int64
|
|
|
|
WriteReqErr int64
|
|
|
|
FieldsCreated int64
|
|
|
|
WritePointsErr int64
|
2016-10-06 23:57:33 +00:00
|
|
|
WritePointsDropped int64
|
2016-10-11 16:45:33 +00:00
|
|
|
WritePointsOK int64
|
|
|
|
BytesWritten int64
|
|
|
|
DiskBytes int64
|
2016-07-07 16:13:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Statistics returns statistics for periodic monitoring.
|
|
|
|
func (s *Shard) Statistics(tags map[string]string) []models.Statistic {
|
2018-06-04 19:25:03 +00:00
|
|
|
engine, err := s.Engine()
|
2017-09-19 17:07:34 +00:00
|
|
|
if err != nil {
|
2016-07-15 20:53:06 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-05-03 04:42:09 +00:00
|
|
|
// Refresh our disk size stat
|
2017-08-29 09:22:45 +00:00
|
|
|
if _, err := s.DiskSize(); err != nil {
|
|
|
|
return nil
|
|
|
|
}
|
2017-09-19 17:07:34 +00:00
|
|
|
seriesN := engine.SeriesN()
|
2016-09-21 15:04:37 +00:00
|
|
|
|
2016-09-09 22:16:53 +00:00
|
|
|
tags = s.defaultTags.Merge(tags)
|
2016-07-07 16:13:56 +00:00
|
|
|
statistics := []models.Statistic{{
|
|
|
|
Name: "shard",
|
2016-09-09 22:16:53 +00:00
|
|
|
Tags: tags,
|
2016-07-07 16:13:56 +00:00
|
|
|
Values: map[string]interface{}{
|
2016-11-21 17:14:45 +00:00
|
|
|
statWriteReq: atomic.LoadInt64(&s.stats.WriteReq),
|
|
|
|
statWriteReqOK: atomic.LoadInt64(&s.stats.WriteReqOK),
|
|
|
|
statWriteReqErr: atomic.LoadInt64(&s.stats.WriteReqErr),
|
2016-11-29 12:26:52 +00:00
|
|
|
statSeriesCreate: seriesN,
|
2016-11-21 17:14:45 +00:00
|
|
|
statFieldsCreate: atomic.LoadInt64(&s.stats.FieldsCreated),
|
|
|
|
statWritePointsErr: atomic.LoadInt64(&s.stats.WritePointsErr),
|
2016-10-06 23:57:33 +00:00
|
|
|
statWritePointsDropped: atomic.LoadInt64(&s.stats.WritePointsDropped),
|
2016-11-21 17:14:45 +00:00
|
|
|
statWritePointsOK: atomic.LoadInt64(&s.stats.WritePointsOK),
|
|
|
|
statWriteBytes: atomic.LoadInt64(&s.stats.BytesWritten),
|
|
|
|
statDiskBytes: atomic.LoadInt64(&s.stats.DiskBytes),
|
2016-07-07 16:13:56 +00:00
|
|
|
},
|
|
|
|
}}
|
2016-09-01 12:40:16 +00:00
|
|
|
|
|
|
|
// Add the index and engine statistics.
|
2017-09-19 17:07:34 +00:00
|
|
|
statistics = append(statistics, engine.Statistics(tags)...)
|
2016-07-07 16:13:56 +00:00
|
|
|
return statistics
|
|
|
|
}
|
|
|
|
|
2015-06-08 19:07:05 +00:00
|
|
|
// Path returns the path set on the shard when it was created.
|
2016-02-26 19:41:54 +00:00
|
|
|
func (s *Shard) Path() string { return s.path }
|
2015-06-08 19:07:05 +00:00
|
|
|
|
2016-02-10 20:04:18 +00:00
|
|
|
// Open initializes and opens the shard's store.
|
2015-05-26 15:41:15 +00:00
|
|
|
func (s *Shard) Open() error {
|
2015-06-18 15:07:51 +00:00
|
|
|
if err := func() error {
|
|
|
|
s.mu.Lock()
|
|
|
|
defer s.mu.Unlock()
|
2015-05-22 20:08:43 +00:00
|
|
|
|
2015-06-18 15:07:51 +00:00
|
|
|
// Return if the shard is already open
|
2017-09-19 17:07:34 +00:00
|
|
|
if s._engine != nil {
|
2015-06-18 15:07:51 +00:00
|
|
|
return nil
|
|
|
|
}
|
2015-05-22 20:08:43 +00:00
|
|
|
|
2018-01-08 22:21:42 +00:00
|
|
|
seriesIDSet := NewSeriesIDSet()
|
|
|
|
|
2016-11-15 16:20:00 +00:00
|
|
|
// Initialize underlying index.
|
|
|
|
ipath := filepath.Join(s.path, "index")
|
2018-01-08 22:21:42 +00:00
|
|
|
idx, err := NewIndex(s.id, s.database, ipath, seriesIDSet, s.sfile, s.options)
|
2016-11-15 16:20:00 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2018-07-05 20:54:14 +00:00
|
|
|
idx.WithLogger(s.baseLogger)
|
|
|
|
|
2016-11-15 16:20:00 +00:00
|
|
|
// Open index.
|
|
|
|
if err := idx.Open(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
s.index = idx
|
|
|
|
|
2015-07-22 14:53:20 +00:00
|
|
|
// Initialize underlying engine.
|
2018-05-10 21:59:31 +00:00
|
|
|
e, err := NewEngine(s.id, idx, s.path, s.walPath, s.sfile, s.options)
|
2015-06-18 15:07:51 +00:00
|
|
|
if err != nil {
|
2016-02-24 13:33:07 +00:00
|
|
|
return err
|
2015-06-18 15:07:51 +00:00
|
|
|
}
|
2015-05-22 20:08:43 +00:00
|
|
|
|
2015-07-22 14:53:20 +00:00
|
|
|
// Set log output on the engine.
|
2016-12-01 18:26:23 +00:00
|
|
|
e.WithLogger(s.baseLogger)
|
2015-06-18 15:07:51 +00:00
|
|
|
|
2016-06-01 22:17:18 +00:00
|
|
|
// Disable compactions while loading the index
|
|
|
|
e.SetEnabled(false)
|
|
|
|
|
2015-07-22 14:53:20 +00:00
|
|
|
// Open engine.
|
2016-05-27 12:45:16 +00:00
|
|
|
if err := e.Open(); err != nil {
|
2016-02-24 13:33:07 +00:00
|
|
|
return err
|
2015-06-18 15:07:51 +00:00
|
|
|
}
|
2016-11-16 18:57:55 +00:00
|
|
|
|
2017-02-09 17:59:14 +00:00
|
|
|
// Load metadata index for the inmem index only.
|
2016-11-16 18:57:55 +00:00
|
|
|
if err := e.LoadMetadataIndex(s.id, s.index); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2017-09-19 17:07:34 +00:00
|
|
|
s._engine = e
|
2016-11-16 18:57:55 +00:00
|
|
|
|
2015-05-22 20:08:43 +00:00
|
|
|
return nil
|
2015-06-18 15:07:51 +00:00
|
|
|
}(); err != nil {
|
2018-01-15 15:06:14 +00:00
|
|
|
s.close()
|
2016-02-24 13:33:07 +00:00
|
|
|
return NewShardError(s.id, err)
|
2015-06-18 15:07:51 +00:00
|
|
|
}
|
|
|
|
|
2016-06-01 22:17:18 +00:00
|
|
|
if s.EnableOnOpen {
|
|
|
|
// enable writes, queries and compactions
|
|
|
|
s.SetEnabled(true)
|
|
|
|
}
|
2016-05-27 22:47:33 +00:00
|
|
|
|
2015-06-18 15:07:51 +00:00
|
|
|
return nil
|
2015-05-22 20:08:43 +00:00
|
|
|
}
|
|
|
|
|
2015-06-18 15:07:51 +00:00
|
|
|
// Close shuts down the shard's store.
|
2015-05-22 20:08:43 +00:00
|
|
|
func (s *Shard) Close() error {
|
2015-06-04 18:50:32 +00:00
|
|
|
s.mu.Lock()
|
2015-07-22 14:53:20 +00:00
|
|
|
defer s.mu.Unlock()
|
2018-01-15 15:06:14 +00:00
|
|
|
return s.close()
|
2017-03-10 14:45:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// close closes the shard an removes reference to the shard from associated
|
|
|
|
// indexes, unless clean is false.
|
2018-01-15 15:06:14 +00:00
|
|
|
func (s *Shard) close() error {
|
2017-09-19 17:07:34 +00:00
|
|
|
if s._engine == nil {
|
2016-02-02 15:33:20 +00:00
|
|
|
return nil
|
2015-05-22 20:08:43 +00:00
|
|
|
}
|
2016-02-02 15:33:20 +00:00
|
|
|
|
2017-09-19 17:07:34 +00:00
|
|
|
err := s._engine.Close()
|
2016-02-02 15:33:20 +00:00
|
|
|
if err == nil {
|
2017-09-19 17:07:34 +00:00
|
|
|
s._engine = nil
|
2016-02-02 15:33:20 +00:00
|
|
|
}
|
2016-11-15 16:20:00 +00:00
|
|
|
|
|
|
|
if e := s.index.Close(); e == nil {
|
|
|
|
s.index = nil
|
|
|
|
}
|
2016-02-02 15:33:20 +00:00
|
|
|
return err
|
2015-05-22 20:08:43 +00:00
|
|
|
}
|
|
|
|
|
2018-04-25 12:02:09 +00:00
|
|
|
// IndexType returns the index version being used for this shard.
|
|
|
|
//
|
|
|
|
// IndexType returns the empty string if it is called before the shard is opened,
|
|
|
|
// since it is only that point that the underlying index type is known.
|
2017-05-03 04:42:09 +00:00
|
|
|
func (s *Shard) IndexType() string {
|
2017-09-03 07:14:58 +00:00
|
|
|
s.mu.RLock()
|
|
|
|
defer s.mu.RUnlock()
|
2018-04-25 12:02:09 +00:00
|
|
|
if s._engine == nil || s.index == nil { // Shard not open yet.
|
2017-05-08 18:48:35 +00:00
|
|
|
return ""
|
|
|
|
}
|
2017-05-03 04:42:09 +00:00
|
|
|
return s.index.Type()
|
|
|
|
}
|
|
|
|
|
2016-05-27 22:47:33 +00:00
|
|
|
// ready determines if the Shard is ready for queries or writes.
|
2018-04-25 12:02:09 +00:00
|
|
|
// It returns nil if ready, otherwise ErrShardClosed or ErrShardDisabled
|
2016-05-27 22:47:33 +00:00
|
|
|
func (s *Shard) ready() error {
|
|
|
|
var err error
|
2017-09-19 17:07:34 +00:00
|
|
|
if s._engine == nil {
|
2016-05-27 22:47:33 +00:00
|
|
|
err = ErrEngineClosed
|
|
|
|
} else if !s.enabled {
|
|
|
|
err = ErrShardDisabled
|
|
|
|
}
|
|
|
|
return err
|
2016-03-29 22:32:34 +00:00
|
|
|
}
|
|
|
|
|
2016-12-31 05:12:37 +00:00
|
|
|
// LastModified returns the time when this shard was last modified.
|
2016-11-21 22:40:00 +00:00
|
|
|
func (s *Shard) LastModified() time.Time {
|
2018-06-04 19:25:03 +00:00
|
|
|
engine, err := s.Engine()
|
2017-09-19 17:07:34 +00:00
|
|
|
if err != nil {
|
2016-11-21 22:40:00 +00:00
|
|
|
return time.Time{}
|
|
|
|
}
|
2017-09-19 17:07:34 +00:00
|
|
|
return engine.LastModified()
|
2016-11-21 22:40:00 +00:00
|
|
|
}
|
|
|
|
|
2017-12-15 17:54:58 +00:00
|
|
|
// Index returns a reference to the underlying index. It returns an error if
|
|
|
|
// the index is nil.
|
|
|
|
func (s *Shard) Index() (Index, error) {
|
2017-09-19 17:07:34 +00:00
|
|
|
s.mu.RLock()
|
|
|
|
defer s.mu.RUnlock()
|
2017-12-15 17:54:58 +00:00
|
|
|
if err := s.ready(); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return s.index, nil
|
2017-08-07 21:20:28 +00:00
|
|
|
}
|
|
|
|
|
2018-03-15 16:35:15 +00:00
|
|
|
func (s *Shard) seriesFile() (*SeriesFile, error) {
|
|
|
|
s.mu.RLock()
|
|
|
|
defer s.mu.RUnlock()
|
|
|
|
if err := s.ready(); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return s.sfile, nil
|
|
|
|
}
|
|
|
|
|
2017-05-02 15:20:01 +00:00
|
|
|
// IsIdle return true if the shard is not receiving writes and is fully compacted.
|
|
|
|
func (s *Shard) IsIdle() bool {
|
2018-06-04 19:25:03 +00:00
|
|
|
engine, err := s.Engine()
|
2017-09-19 17:07:34 +00:00
|
|
|
if err != nil {
|
2017-05-02 15:20:01 +00:00
|
|
|
return true
|
|
|
|
}
|
2017-09-19 17:07:34 +00:00
|
|
|
return engine.IsIdle()
|
2017-05-02 15:20:01 +00:00
|
|
|
}
|
|
|
|
|
2017-09-14 18:42:34 +00:00
|
|
|
func (s *Shard) Free() error {
|
2018-06-04 19:25:03 +00:00
|
|
|
engine, err := s.Engine()
|
2017-09-19 17:07:34 +00:00
|
|
|
if err != nil {
|
2017-09-14 18:42:34 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Disable compactions to stop background goroutines
|
|
|
|
s.SetCompactionsEnabled(false)
|
|
|
|
|
2017-09-19 17:07:34 +00:00
|
|
|
return engine.Free()
|
2017-09-14 18:42:34 +00:00
|
|
|
}
|
|
|
|
|
2017-05-02 15:20:01 +00:00
|
|
|
// SetCompactionsEnabled enables or disable shard background compactions.
|
|
|
|
func (s *Shard) SetCompactionsEnabled(enabled bool) {
|
2018-06-04 19:25:03 +00:00
|
|
|
engine, err := s.Engine()
|
2017-09-19 17:07:34 +00:00
|
|
|
if err != nil {
|
2017-05-02 15:20:01 +00:00
|
|
|
return
|
|
|
|
}
|
2017-09-19 17:07:34 +00:00
|
|
|
engine.SetCompactionsEnabled(enabled)
|
2017-05-02 15:20:01 +00:00
|
|
|
}
|
|
|
|
|
2017-09-19 17:07:34 +00:00
|
|
|
// DiskSize returns the size on disk of this shard.
|
2015-08-25 21:44:42 +00:00
|
|
|
func (s *Shard) DiskSize() (int64, error) {
|
2017-08-29 09:22:45 +00:00
|
|
|
s.mu.RLock()
|
|
|
|
defer s.mu.RUnlock()
|
2017-09-19 17:07:34 +00:00
|
|
|
// We don't use engine() becuase we still want to report the shard's disk
|
|
|
|
// size even if the shard has been disabled.
|
|
|
|
if s._engine == nil {
|
2017-08-29 09:22:45 +00:00
|
|
|
return 0, ErrEngineClosed
|
|
|
|
}
|
2017-09-19 17:07:34 +00:00
|
|
|
size := s._engine.DiskSize()
|
2017-05-03 04:42:09 +00:00
|
|
|
atomic.StoreInt64(&s.stats.DiskBytes, size)
|
|
|
|
return size, nil
|
2015-08-25 21:44:42 +00:00
|
|
|
}
|
|
|
|
|
2016-12-31 05:12:37 +00:00
|
|
|
// FieldCreate holds information for a field to create on a measurement.
|
2015-07-22 14:53:20 +00:00
|
|
|
type FieldCreate struct {
|
2017-05-09 05:20:29 +00:00
|
|
|
Measurement []byte
|
2015-07-23 16:33:37 +00:00
|
|
|
Field *Field
|
2015-05-22 20:08:43 +00:00
|
|
|
}
|
|
|
|
|
2016-12-31 05:12:37 +00:00
|
|
|
// WritePoints will write the raw data points and any new metadata to the index in the shard.
|
2015-09-16 20:33:08 +00:00
|
|
|
func (s *Shard) WritePoints(points []models.Point) error {
|
2017-09-03 07:14:58 +00:00
|
|
|
s.mu.RLock()
|
|
|
|
defer s.mu.RUnlock()
|
2017-09-19 17:07:34 +00:00
|
|
|
|
|
|
|
engine, err := s.engineNoLock()
|
|
|
|
if err != nil {
|
2016-05-27 22:47:33 +00:00
|
|
|
return err
|
2016-03-29 22:32:34 +00:00
|
|
|
}
|
2016-04-01 19:30:09 +00:00
|
|
|
|
2016-10-06 23:57:33 +00:00
|
|
|
var writeError error
|
2016-07-07 16:13:56 +00:00
|
|
|
atomic.AddInt64(&s.stats.WriteReq, 1)
|
2015-09-04 22:43:57 +00:00
|
|
|
|
2016-10-06 23:57:33 +00:00
|
|
|
points, fieldsToCreate, err := s.validateSeriesAndFields(points)
|
2015-05-23 22:06:07 +00:00
|
|
|
if err != nil {
|
2016-10-06 23:57:33 +00:00
|
|
|
if _, ok := err.(PartialWriteError); !ok {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
// There was a partial write (points dropped), hold onto the error to return
|
|
|
|
// to the caller, but continue on writing the remaining points.
|
|
|
|
writeError = err
|
2015-05-22 20:08:43 +00:00
|
|
|
}
|
2016-07-07 16:13:56 +00:00
|
|
|
atomic.AddInt64(&s.stats.FieldsCreated, int64(len(fieldsToCreate)))
|
2015-05-22 20:08:43 +00:00
|
|
|
|
2015-05-23 22:06:07 +00:00
|
|
|
// add any new fields and keep track of what needs to be saved
|
2016-04-01 19:30:09 +00:00
|
|
|
if err := s.createFieldsAndMeasurements(fieldsToCreate); err != nil {
|
2015-05-23 22:06:07 +00:00
|
|
|
return err
|
2015-05-22 20:08:43 +00:00
|
|
|
}
|
|
|
|
|
2015-07-22 14:53:20 +00:00
|
|
|
// Write to the engine.
|
2017-09-19 17:07:34 +00:00
|
|
|
if err := engine.WritePoints(points); err != nil {
|
2016-08-17 19:33:46 +00:00
|
|
|
atomic.AddInt64(&s.stats.WritePointsErr, int64(len(points)))
|
|
|
|
atomic.AddInt64(&s.stats.WriteReqErr, 1)
|
2015-07-22 14:53:20 +00:00
|
|
|
return fmt.Errorf("engine: %s", err)
|
2015-06-18 15:07:51 +00:00
|
|
|
}
|
2016-07-07 16:13:56 +00:00
|
|
|
atomic.AddInt64(&s.stats.WritePointsOK, int64(len(points)))
|
2016-08-17 19:33:46 +00:00
|
|
|
atomic.AddInt64(&s.stats.WriteReqOK, 1)
|
2015-06-18 15:07:51 +00:00
|
|
|
|
2016-10-06 23:57:33 +00:00
|
|
|
return writeError
|
2015-05-22 20:08:43 +00:00
|
|
|
}
|
|
|
|
|
2016-12-31 05:12:37 +00:00
|
|
|
// validateSeriesAndFields checks which series and fields are new and whose metadata should be saved and indexed.
|
2016-10-06 23:57:33 +00:00
|
|
|
func (s *Shard) validateSeriesAndFields(points []models.Point) ([]models.Point, []*FieldCreate, error) {
|
|
|
|
var (
|
|
|
|
fieldsToCreate []*FieldCreate
|
|
|
|
err error
|
2017-02-06 18:14:13 +00:00
|
|
|
dropped int
|
2017-04-28 17:45:00 +00:00
|
|
|
reason string // only first error reason is set unless returned from CreateSeriesListIfNotExists
|
2016-10-06 23:57:33 +00:00
|
|
|
)
|
2016-11-11 16:25:53 +00:00
|
|
|
|
2016-12-22 18:50:56 +00:00
|
|
|
// Create all series against the index in bulk.
|
|
|
|
keys := make([][]byte, len(points))
|
|
|
|
names := make([][]byte, len(points))
|
|
|
|
tagsSlice := make([]models.Tags, len(points))
|
|
|
|
|
2017-04-14 20:36:54 +00:00
|
|
|
var j int
|
2016-10-06 23:57:33 +00:00
|
|
|
for i, p := range points {
|
2016-08-10 15:01:59 +00:00
|
|
|
tags := p.Tags()
|
2018-04-23 16:35:55 +00:00
|
|
|
|
|
|
|
// Drop any series w/ a "time" tag, these are illegal
|
2016-09-21 17:12:09 +00:00
|
|
|
if v := tags.Get(timeBytes); v != nil {
|
2017-04-07 17:20:07 +00:00
|
|
|
dropped++
|
2017-04-28 17:45:00 +00:00
|
|
|
if reason == "" {
|
2018-04-23 16:35:55 +00:00
|
|
|
reason = fmt.Sprintf(
|
|
|
|
"invalid tag key: input tag \"%s\" on measurement \"%s\" is invalid",
|
|
|
|
"time", string(p.Name()))
|
2017-04-28 17:45:00 +00:00
|
|
|
}
|
2017-04-07 17:20:07 +00:00
|
|
|
continue
|
2016-08-10 15:01:59 +00:00
|
|
|
}
|
2018-04-23 16:35:55 +00:00
|
|
|
|
2017-04-14 20:36:54 +00:00
|
|
|
keys[j] = p.Key()
|
2017-05-09 05:20:29 +00:00
|
|
|
names[j] = p.Name()
|
2017-04-14 20:36:54 +00:00
|
|
|
tagsSlice[j] = tags
|
|
|
|
points[j] = points[i]
|
|
|
|
j++
|
2016-12-22 18:50:56 +00:00
|
|
|
}
|
2017-04-14 20:36:54 +00:00
|
|
|
points, keys, names, tagsSlice = points[:j], keys[:j], names[:j], tagsSlice[:j]
|
2016-12-22 18:50:56 +00:00
|
|
|
|
2017-09-19 17:07:34 +00:00
|
|
|
engine, err := s.engineNoLock()
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, err
|
|
|
|
}
|
|
|
|
|
2017-02-06 18:14:13 +00:00
|
|
|
// Add new series. Check for partial writes.
|
2017-12-16 01:41:49 +00:00
|
|
|
var droppedKeys [][]byte
|
2017-09-19 17:07:34 +00:00
|
|
|
if err := engine.CreateSeriesListIfNotExists(keys, names, tagsSlice); err != nil {
|
2017-02-06 18:14:13 +00:00
|
|
|
switch err := err.(type) {
|
2018-04-23 16:35:55 +00:00
|
|
|
// TODO(jmw): why is this a *PartialWriteError when everything else is not a pointer?
|
|
|
|
// Maybe we can just change it to be consistent if we change it also in all
|
|
|
|
// the places that construct it.
|
2017-02-06 18:14:13 +00:00
|
|
|
case *PartialWriteError:
|
|
|
|
reason = err.Reason
|
|
|
|
dropped += err.Dropped
|
2017-03-24 15:48:10 +00:00
|
|
|
droppedKeys = err.DroppedKeys
|
2017-02-06 18:14:13 +00:00
|
|
|
atomic.AddInt64(&s.stats.WritePointsDropped, int64(err.Dropped))
|
|
|
|
default:
|
|
|
|
return nil, nil, err
|
|
|
|
}
|
2016-12-22 18:50:56 +00:00
|
|
|
}
|
2016-08-10 15:01:59 +00:00
|
|
|
|
2018-04-23 16:35:55 +00:00
|
|
|
// Create a MeasurementFields cache.
|
2017-04-17 22:51:24 +00:00
|
|
|
mfCache := make(map[string]*MeasurementFields, 16)
|
2018-04-23 16:35:55 +00:00
|
|
|
j = 0
|
2016-12-22 18:50:56 +00:00
|
|
|
for i, p := range points {
|
2018-04-23 16:35:55 +00:00
|
|
|
// Skip any points with only invalid fields.
|
2016-09-21 17:12:09 +00:00
|
|
|
iter := p.FieldIterator()
|
2018-04-23 16:35:55 +00:00
|
|
|
validField := false
|
2016-09-21 17:12:09 +00:00
|
|
|
for iter.Next() {
|
|
|
|
if bytes.Equal(iter.FieldKey(), timeBytes) {
|
2016-08-10 15:01:59 +00:00
|
|
|
continue
|
|
|
|
}
|
2016-09-21 17:12:09 +00:00
|
|
|
validField = true
|
2017-04-07 17:20:07 +00:00
|
|
|
break
|
2016-09-21 17:12:09 +00:00
|
|
|
}
|
|
|
|
if !validField {
|
2017-04-28 17:45:00 +00:00
|
|
|
if reason == "" {
|
2018-04-23 16:35:55 +00:00
|
|
|
reason = fmt.Sprintf(
|
|
|
|
"invalid field name: input field \"%s\" on measurement \"%s\" is invalid",
|
|
|
|
"time", string(p.Name()))
|
2017-04-28 17:45:00 +00:00
|
|
|
}
|
2018-04-23 16:35:55 +00:00
|
|
|
dropped++
|
2016-09-21 17:12:09 +00:00
|
|
|
continue
|
2016-08-10 15:01:59 +00:00
|
|
|
}
|
|
|
|
|
2018-04-23 16:35:55 +00:00
|
|
|
// Skip any points whos keys have been dropped. Dropped has already been incremented for them.
|
2017-12-16 01:41:49 +00:00
|
|
|
if len(droppedKeys) > 0 && bytesutil.Contains(droppedKeys, keys[i]) {
|
|
|
|
continue
|
2016-11-29 12:26:52 +00:00
|
|
|
}
|
|
|
|
|
2018-04-23 16:35:55 +00:00
|
|
|
// Grab the MeasurementFields checking the local cache to avoid lock contention.
|
2017-04-17 22:51:24 +00:00
|
|
|
name := p.Name()
|
2017-05-09 05:20:29 +00:00
|
|
|
mf := mfCache[string(name)]
|
2015-05-23 22:06:07 +00:00
|
|
|
if mf == nil {
|
2017-09-19 17:07:34 +00:00
|
|
|
mf = engine.MeasurementFields(name).Clone()
|
2017-05-09 05:20:29 +00:00
|
|
|
mfCache[string(name)] = mf
|
2015-05-23 22:06:07 +00:00
|
|
|
}
|
2016-09-21 17:12:09 +00:00
|
|
|
|
2018-04-23 16:35:55 +00:00
|
|
|
// Check with the field validator.
|
|
|
|
if err := s.options.FieldValidator.Validate(mf, p); err != nil {
|
|
|
|
switch err := err.(type) {
|
|
|
|
case PartialWriteError:
|
|
|
|
if reason == "" {
|
|
|
|
reason = err.Reason
|
|
|
|
}
|
|
|
|
dropped += err.Dropped
|
|
|
|
atomic.AddInt64(&s.stats.WritePointsDropped, int64(err.Dropped))
|
|
|
|
default:
|
|
|
|
return nil, nil, err
|
|
|
|
}
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
points[j] = points[i]
|
|
|
|
j++
|
|
|
|
|
|
|
|
// Create any fields that are missing.
|
|
|
|
iter.Reset()
|
2016-09-21 17:12:09 +00:00
|
|
|
for iter.Next() {
|
2018-04-23 16:35:55 +00:00
|
|
|
fieldKey := iter.FieldKey()
|
2017-04-07 17:20:07 +00:00
|
|
|
|
2018-04-23 16:35:55 +00:00
|
|
|
// Skip fields named "time". They are illegal.
|
|
|
|
if bytes.Equal(fieldKey, timeBytes) {
|
2017-04-07 17:20:07 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2018-04-23 16:35:55 +00:00
|
|
|
if mf.FieldBytes(fieldKey) != nil {
|
2016-09-21 17:12:09 +00:00
|
|
|
continue
|
|
|
|
}
|
2017-04-17 22:51:24 +00:00
|
|
|
|
2018-04-25 15:48:24 +00:00
|
|
|
dataType := dataTypeFromModelsFieldType(iter.Type())
|
|
|
|
if dataType == influxql.Unknown {
|
2018-04-23 16:35:55 +00:00
|
|
|
continue
|
2015-05-23 22:06:07 +00:00
|
|
|
}
|
|
|
|
|
2018-04-23 16:35:55 +00:00
|
|
|
fieldsToCreate = append(fieldsToCreate, &FieldCreate{
|
|
|
|
Measurement: name,
|
|
|
|
Field: &Field{
|
|
|
|
Name: string(fieldKey),
|
|
|
|
Type: dataType,
|
|
|
|
},
|
|
|
|
})
|
2015-05-23 22:06:07 +00:00
|
|
|
}
|
2016-10-06 23:57:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if dropped > 0 {
|
|
|
|
err = PartialWriteError{Reason: reason, Dropped: dropped}
|
2015-05-23 22:06:07 +00:00
|
|
|
}
|
|
|
|
|
2018-04-23 16:35:55 +00:00
|
|
|
return points[:j], fieldsToCreate, err
|
2015-05-23 22:06:07 +00:00
|
|
|
}
|
|
|
|
|
2017-09-19 17:07:34 +00:00
|
|
|
func (s *Shard) createFieldsAndMeasurements(fieldsToCreate []*FieldCreate) error {
|
|
|
|
if len(fieldsToCreate) == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
engine, err := s.engineNoLock()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// add fields
|
|
|
|
for _, f := range fieldsToCreate {
|
|
|
|
mf := engine.MeasurementFields(f.Measurement)
|
2017-11-22 00:04:44 +00:00
|
|
|
if err := mf.CreateFieldIfNotExists([]byte(f.Field.Name), f.Field.Type); err != nil {
|
2017-09-19 17:07:34 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
s.index.SetFieldName(f.Measurement, f.Field.Name)
|
|
|
|
}
|
|
|
|
|
2017-11-21 23:52:58 +00:00
|
|
|
if len(fieldsToCreate) > 0 {
|
|
|
|
return engine.MeasurementFieldSet().Save()
|
|
|
|
}
|
|
|
|
|
2017-09-19 17:07:34 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// DeleteSeriesRange deletes all values from for seriesKeys between min and max (inclusive)
|
2018-01-03 12:11:17 +00:00
|
|
|
func (s *Shard) DeleteSeriesRange(itr SeriesIterator, min, max int64) error {
|
2018-06-04 19:25:03 +00:00
|
|
|
engine, err := s.Engine()
|
2017-09-19 17:07:34 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2018-01-03 12:11:17 +00:00
|
|
|
return engine.DeleteSeriesRange(itr, min, max)
|
2017-09-19 17:07:34 +00:00
|
|
|
}
|
|
|
|
|
2018-03-28 15:32:18 +00:00
|
|
|
// DeleteSeriesRangeWithPredicate deletes all values from for seriesKeys between min and max (inclusive)
|
|
|
|
// for which predicate() returns true. If predicate() is nil, then all values in range are deleted.
|
2018-04-10 02:01:33 +00:00
|
|
|
func (s *Shard) DeleteSeriesRangeWithPredicate(itr SeriesIterator, predicate func(name []byte, tags models.Tags) (int64, int64, bool)) error {
|
2018-06-04 19:25:03 +00:00
|
|
|
engine, err := s.Engine()
|
2018-03-28 15:32:18 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2018-04-10 02:01:33 +00:00
|
|
|
return engine.DeleteSeriesRangeWithPredicate(itr, predicate)
|
2018-03-28 15:32:18 +00:00
|
|
|
}
|
|
|
|
|
2017-09-19 17:07:34 +00:00
|
|
|
// DeleteMeasurement deletes a measurement and all underlying series.
|
|
|
|
func (s *Shard) DeleteMeasurement(name []byte) error {
|
2018-06-04 19:25:03 +00:00
|
|
|
engine, err := s.Engine()
|
2017-09-19 17:07:34 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
return engine.DeleteMeasurement(name)
|
|
|
|
}
|
|
|
|
|
|
|
|
// SeriesN returns the unique number of series in the shard.
|
|
|
|
func (s *Shard) SeriesN() int64 {
|
2018-06-04 19:25:03 +00:00
|
|
|
engine, err := s.Engine()
|
2017-09-19 17:07:34 +00:00
|
|
|
if err != nil {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
return engine.SeriesN()
|
|
|
|
}
|
|
|
|
|
2018-02-01 16:20:52 +00:00
|
|
|
// SeriesSketches returns the measurement sketches for the shard.
|
|
|
|
func (s *Shard) SeriesSketches() (estimator.Sketch, estimator.Sketch, error) {
|
2018-06-04 19:25:03 +00:00
|
|
|
engine, err := s.Engine()
|
2018-02-01 16:20:52 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, nil, err
|
|
|
|
}
|
|
|
|
return engine.SeriesSketches()
|
|
|
|
}
|
|
|
|
|
2017-09-19 17:07:34 +00:00
|
|
|
// MeasurementsSketches returns the measurement sketches for the shard.
|
|
|
|
func (s *Shard) MeasurementsSketches() (estimator.Sketch, estimator.Sketch, error) {
|
2018-06-04 19:25:03 +00:00
|
|
|
engine, err := s.Engine()
|
2017-09-19 17:07:34 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, nil, err
|
|
|
|
}
|
|
|
|
return engine.MeasurementsSketches()
|
|
|
|
}
|
|
|
|
|
|
|
|
// MeasurementNamesByRegex returns names of measurements matching the regular expression.
|
|
|
|
func (s *Shard) MeasurementNamesByRegex(re *regexp.Regexp) ([][]byte, error) {
|
2018-06-04 19:25:03 +00:00
|
|
|
engine, err := s.Engine()
|
2017-09-19 17:07:34 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return engine.MeasurementNamesByRegex(re)
|
2016-09-01 12:40:16 +00:00
|
|
|
}
|
|
|
|
|
2017-09-18 18:05:12 +00:00
|
|
|
// MeasurementTagKeysByExpr returns all the tag keys for the provided expression.
|
|
|
|
func (s *Shard) MeasurementTagKeysByExpr(name []byte, expr influxql.Expr) (map[string]struct{}, error) {
|
2018-06-04 19:25:03 +00:00
|
|
|
engine, err := s.Engine()
|
2017-09-19 17:07:34 +00:00
|
|
|
if err != nil {
|
2017-09-18 18:05:12 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
2017-09-19 17:07:34 +00:00
|
|
|
return engine.MeasurementTagKeysByExpr(name, expr)
|
2017-09-18 18:05:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// MeasurementTagKeyValuesByExpr returns all the tag keys values for the
|
|
|
|
// provided expression.
|
2017-09-19 14:38:16 +00:00
|
|
|
func (s *Shard) MeasurementTagKeyValuesByExpr(auth query.Authorizer, name []byte, key []string, expr influxql.Expr, keysSorted bool) ([][]string, error) {
|
2017-12-15 17:54:58 +00:00
|
|
|
index, err := s.Index()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
indexSet := IndexSet{Indexes: []Index{index}, SeriesFile: s.sfile}
|
2017-12-12 21:22:42 +00:00
|
|
|
return indexSet.MeasurementTagKeyValuesByExpr(auth, name, key, expr, keysSorted)
|
2017-09-18 18:05:12 +00:00
|
|
|
}
|
|
|
|
|
2016-11-16 18:57:55 +00:00
|
|
|
// MeasurementFields returns fields for a measurement.
|
2017-09-18 17:56:02 +00:00
|
|
|
// TODO(edd): This method is currently only being called from tests; do we
|
|
|
|
// really need it?
|
2016-11-16 18:57:55 +00:00
|
|
|
func (s *Shard) MeasurementFields(name []byte) *MeasurementFields {
|
2018-06-04 19:25:03 +00:00
|
|
|
engine, err := s.Engine()
|
2017-09-19 17:07:34 +00:00
|
|
|
if err != nil {
|
2017-09-18 17:56:02 +00:00
|
|
|
return nil
|
|
|
|
}
|
2017-09-19 17:07:34 +00:00
|
|
|
return engine.MeasurementFields(name)
|
2016-11-16 18:57:55 +00:00
|
|
|
}
|
|
|
|
|
2017-09-18 17:56:02 +00:00
|
|
|
// MeasurementExists returns true if the shard contains name.
|
|
|
|
// TODO(edd): This method is currently only being called from tests; do we
|
|
|
|
// really need it?
|
2017-04-14 20:36:54 +00:00
|
|
|
func (s *Shard) MeasurementExists(name []byte) (bool, error) {
|
2018-06-04 19:25:03 +00:00
|
|
|
engine, err := s.Engine()
|
2017-09-19 17:07:34 +00:00
|
|
|
if err != nil {
|
|
|
|
return false, err
|
2017-09-18 17:56:02 +00:00
|
|
|
}
|
2017-09-19 17:07:34 +00:00
|
|
|
return engine.MeasurementExists(name)
|
2016-03-29 21:55:09 +00:00
|
|
|
}
|
2015-06-18 15:07:51 +00:00
|
|
|
|
2015-09-03 16:48:37 +00:00
|
|
|
// WriteTo writes the shard's data to w.
|
2015-09-04 22:43:57 +00:00
|
|
|
func (s *Shard) WriteTo(w io.Writer) (int64, error) {
|
2018-06-04 19:25:03 +00:00
|
|
|
engine, err := s.Engine()
|
2017-09-19 17:07:34 +00:00
|
|
|
if err != nil {
|
2016-05-27 22:47:33 +00:00
|
|
|
return 0, err
|
2016-03-29 22:32:34 +00:00
|
|
|
}
|
2017-09-19 17:07:34 +00:00
|
|
|
n, err := engine.WriteTo(w)
|
2016-07-07 16:13:56 +00:00
|
|
|
atomic.AddInt64(&s.stats.BytesWritten, int64(n))
|
2015-09-04 22:43:57 +00:00
|
|
|
return n, err
|
|
|
|
}
|
2015-09-03 16:48:37 +00:00
|
|
|
|
2015-11-04 21:06:06 +00:00
|
|
|
// CreateIterator returns an iterator for the data in the shard.
|
2017-11-01 11:22:52 +00:00
|
|
|
func (s *Shard) CreateIterator(ctx context.Context, m *influxql.Measurement, opt query.IteratorOptions) (query.Iterator, error) {
|
2018-06-04 19:25:03 +00:00
|
|
|
engine, err := s.Engine()
|
2017-09-19 17:07:34 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2017-11-01 11:22:52 +00:00
|
|
|
switch m.SystemIterator {
|
|
|
|
case "_fieldKeys":
|
2017-12-05 17:49:58 +00:00
|
|
|
return NewFieldKeysIterator(s, opt)
|
2017-11-01 11:22:52 +00:00
|
|
|
case "_series":
|
2017-11-29 18:20:18 +00:00
|
|
|
// TODO(benbjohnson): Move up to the Shards.CreateIterator().
|
2017-12-15 17:54:58 +00:00
|
|
|
index, err := s.Index()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
indexSet := IndexSet{Indexes: []Index{index}, SeriesFile: s.sfile}
|
2018-03-15 17:22:34 +00:00
|
|
|
|
|
|
|
itr, err := NewSeriesPointIterator(indexSet, opt)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return query.NewInterruptIterator(itr, opt.InterruptCh), nil
|
2017-11-01 11:22:52 +00:00
|
|
|
case "_tagKeys":
|
2017-12-05 17:49:58 +00:00
|
|
|
return NewTagKeysIterator(s, opt)
|
2016-02-19 20:38:02 +00:00
|
|
|
}
|
2017-11-01 11:22:52 +00:00
|
|
|
return engine.CreateIterator(ctx, m.Name, opt)
|
2015-11-04 21:06:06 +00:00
|
|
|
}
|
|
|
|
|
2018-03-23 03:38:44 +00:00
|
|
|
func (s *Shard) CreateSeriesCursor(ctx context.Context, req SeriesCursorRequest, cond influxql.Expr) (SeriesCursor, error) {
|
2018-03-13 19:15:24 +00:00
|
|
|
index, err := s.Index()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2018-03-23 03:38:44 +00:00
|
|
|
return newSeriesCursor(req, IndexSet{Indexes: []Index{index}, SeriesFile: s.sfile}, cond)
|
2018-03-13 19:15:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (s *Shard) CreateCursorIterator(ctx context.Context) (CursorIterator, error) {
|
2018-06-04 19:25:03 +00:00
|
|
|
engine, err := s.Engine()
|
2017-10-23 21:42:58 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2018-03-13 19:15:24 +00:00
|
|
|
return engine.CreateCursorIterator(ctx)
|
2017-10-23 21:42:58 +00:00
|
|
|
}
|
|
|
|
|
2015-11-04 21:06:06 +00:00
|
|
|
// FieldDimensions returns unique sets of fields and dimensions across a list of sources.
|
2016-11-23 20:32:42 +00:00
|
|
|
func (s *Shard) FieldDimensions(measurements []string) (fields map[string]influxql.DataType, dimensions map[string]struct{}, err error) {
|
2018-06-04 19:25:03 +00:00
|
|
|
engine, err := s.Engine()
|
2017-09-19 17:07:34 +00:00
|
|
|
if err != nil {
|
Fix panic:runtime error: invalid memory address or nil pointer dereference
github.com/influxdata/influxdb/tsdb.(*Shard).FieldDimensions(0xc820244000, 0xc821b70fb0, 0x1, 0x1, 0xc822b9cc00, 0xc822b9cc30, 0x0, 0x0)
/Users/jason/go/src/github.com/influxdata/influxdb/tsdb/shard.go:588 +0xa62
github.com/influxdata/influxdb/tsdb.(*shardIteratorCreator).FieldDimensions(0xc8202b6078, 0xc821b70fb0, 0x1, 0x1, 0xc822b9cbd0, 0x0, 0x0, 0x0)
/Users/jason/go/src/github.com/influxdata/influxdb/tsdb/shard.go:818 +0x53
github.com/influxdata/influxdb/influxql.IteratorCreators.FieldDimensions(0xc821b71250, 0x1, 0x1, 0xc821b70fb0, 0x1, 0x1, 0xc822b9cba0, 0xc822b9cbd0, 0x0, 0x0)
/Users/jason/go/src/github.com/influxdata/influxdb/influxql/iterator.go:639 +0x15a
github.com/influxdata/influxdb/influxql.(*IteratorCreators).FieldDimensions(0xc822a32ae0, 0xc821b70fb0, 0x1, 0x1, 0x20, 0x18, 0x0, 0x0)
<autogenerated>:163 +0xd3
2016-07-18 22:35:33 +00:00
|
|
|
return nil, nil, err
|
|
|
|
}
|
|
|
|
|
2016-11-23 20:32:42 +00:00
|
|
|
fields = make(map[string]influxql.DataType)
|
|
|
|
dimensions = make(map[string]struct{})
|
2016-05-16 16:08:28 +00:00
|
|
|
|
2017-12-15 17:54:58 +00:00
|
|
|
index, err := s.Index()
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, err
|
|
|
|
}
|
2016-11-23 20:32:42 +00:00
|
|
|
for _, name := range measurements {
|
|
|
|
// Handle system sources.
|
|
|
|
if strings.HasPrefix(name, "_") {
|
|
|
|
var keys []string
|
|
|
|
switch name {
|
2016-05-16 16:08:28 +00:00
|
|
|
case "_fieldKeys":
|
2016-11-23 20:32:42 +00:00
|
|
|
keys = []string{"fieldKey", "fieldType"}
|
2016-05-16 16:08:28 +00:00
|
|
|
case "_series":
|
2016-11-23 20:32:42 +00:00
|
|
|
keys = []string{"key"}
|
2016-05-16 16:08:28 +00:00
|
|
|
case "_tagKeys":
|
2016-11-23 20:32:42 +00:00
|
|
|
keys = []string{"tagKey"}
|
2016-05-16 16:08:28 +00:00
|
|
|
}
|
2016-11-23 20:32:42 +00:00
|
|
|
|
2017-02-27 22:46:20 +00:00
|
|
|
if len(keys) > 0 {
|
|
|
|
for _, k := range keys {
|
2017-09-14 17:28:13 +00:00
|
|
|
if fields[k].LessThan(influxql.String) {
|
2017-02-27 22:46:20 +00:00
|
|
|
fields[k] = influxql.String
|
|
|
|
}
|
2016-11-23 20:32:42 +00:00
|
|
|
}
|
2017-02-27 22:46:20 +00:00
|
|
|
continue
|
2016-05-16 16:08:28 +00:00
|
|
|
}
|
2017-02-27 22:46:20 +00:00
|
|
|
// Unknown system source so default to looking for a measurement.
|
2016-05-16 16:08:28 +00:00
|
|
|
}
|
|
|
|
|
2016-11-23 20:32:42 +00:00
|
|
|
// Retrieve measurement.
|
2017-09-19 17:07:34 +00:00
|
|
|
if exists, err := engine.MeasurementExists([]byte(name)); err != nil {
|
2017-01-24 16:27:47 +00:00
|
|
|
return nil, nil, err
|
|
|
|
} else if !exists {
|
2016-11-23 20:32:42 +00:00
|
|
|
continue
|
|
|
|
}
|
2015-11-04 21:06:06 +00:00
|
|
|
|
2016-11-23 20:32:42 +00:00
|
|
|
// Append fields and dimensions.
|
2017-09-19 17:07:34 +00:00
|
|
|
mf := engine.MeasurementFields([]byte(name))
|
2016-11-23 20:32:42 +00:00
|
|
|
if mf != nil {
|
|
|
|
for k, typ := range mf.FieldSet() {
|
2017-09-14 17:28:13 +00:00
|
|
|
if fields[k].LessThan(typ) {
|
2016-11-23 20:32:42 +00:00
|
|
|
fields[k] = typ
|
2016-05-16 16:08:28 +00:00
|
|
|
}
|
2015-11-04 21:06:06 +00:00
|
|
|
}
|
2016-11-23 20:32:42 +00:00
|
|
|
}
|
2016-12-05 17:51:06 +00:00
|
|
|
|
2017-12-15 17:54:58 +00:00
|
|
|
indexSet := IndexSet{Indexes: []Index{index}, SeriesFile: s.sfile}
|
2017-12-12 21:22:42 +00:00
|
|
|
if err := indexSet.ForEachMeasurementTagKey([]byte(name), func(key []byte) error {
|
2017-01-24 16:27:47 +00:00
|
|
|
dimensions[string(key)] = struct{}{}
|
|
|
|
return nil
|
|
|
|
}); err != nil {
|
|
|
|
return nil, nil, err
|
2015-11-04 21:06:06 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-09-13 12:04:18 +00:00
|
|
|
return fields, dimensions, nil
|
2015-11-04 21:06:06 +00:00
|
|
|
}
|
|
|
|
|
2017-09-19 17:07:34 +00:00
|
|
|
// mapType returns the data type for the field within the measurement.
|
|
|
|
func (s *Shard) mapType(measurement, field string) (influxql.DataType, error) {
|
|
|
|
engine, err := s.engineNoLock()
|
|
|
|
if err != nil {
|
|
|
|
return 0, err
|
2016-11-23 20:32:42 +00:00
|
|
|
}
|
|
|
|
|
2017-07-26 17:03:15 +00:00
|
|
|
switch field {
|
|
|
|
case "_name", "_tagKey", "_tagValue", "_seriesKey":
|
2017-09-19 17:07:34 +00:00
|
|
|
return influxql.String, nil
|
2017-07-26 17:03:15 +00:00
|
|
|
}
|
|
|
|
|
2016-11-23 20:32:42 +00:00
|
|
|
// Process system measurements.
|
2017-11-01 11:22:52 +00:00
|
|
|
switch measurement {
|
|
|
|
case "_fieldKeys":
|
|
|
|
if field == "fieldKey" || field == "fieldType" {
|
|
|
|
return influxql.String, nil
|
2015-11-04 21:06:06 +00:00
|
|
|
}
|
2017-11-01 11:22:52 +00:00
|
|
|
return influxql.Unknown, nil
|
|
|
|
case "_series":
|
|
|
|
if field == "key" {
|
|
|
|
return influxql.String, nil
|
|
|
|
}
|
|
|
|
return influxql.Unknown, nil
|
|
|
|
case "_tagKeys":
|
|
|
|
if field == "tagKey" {
|
|
|
|
return influxql.String, nil
|
|
|
|
}
|
|
|
|
return influxql.Unknown, nil
|
2015-11-04 21:06:06 +00:00
|
|
|
}
|
2017-11-01 11:22:52 +00:00
|
|
|
// Unknown system source so default to looking for a measurement.
|
2015-11-04 21:06:06 +00:00
|
|
|
|
2017-09-19 17:07:34 +00:00
|
|
|
if exists, _ := engine.MeasurementExists([]byte(measurement)); !exists {
|
|
|
|
return influxql.Unknown, nil
|
2016-11-23 20:32:42 +00:00
|
|
|
}
|
|
|
|
|
2017-09-19 17:07:34 +00:00
|
|
|
mf := engine.MeasurementFields([]byte(measurement))
|
2016-11-23 20:32:42 +00:00
|
|
|
if mf != nil {
|
|
|
|
f := mf.Field(field)
|
|
|
|
if f != nil {
|
2017-09-19 17:07:34 +00:00
|
|
|
return f.Type, nil
|
2016-11-23 20:32:42 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-09-19 17:07:34 +00:00
|
|
|
if exists, _ := engine.HasTagKey([]byte(measurement), []byte(field)); exists {
|
|
|
|
return influxql.Tag, nil
|
2016-11-23 20:32:42 +00:00
|
|
|
}
|
2017-01-24 16:27:47 +00:00
|
|
|
|
2017-09-19 17:07:34 +00:00
|
|
|
return influxql.Unknown, nil
|
2015-11-04 21:06:06 +00:00
|
|
|
}
|
|
|
|
|
2017-09-19 17:07:34 +00:00
|
|
|
// expandSources expands regex sources and removes duplicates.
|
2016-03-04 18:01:41 +00:00
|
|
|
// NOTE: sources must be normalized (db and rp set) before calling this function.
|
2017-09-19 17:07:34 +00:00
|
|
|
func (s *Shard) expandSources(sources influxql.Sources) (influxql.Sources, error) {
|
|
|
|
engine, err := s.engineNoLock()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2016-03-04 18:01:41 +00:00
|
|
|
// Use a map as a set to prevent duplicates.
|
|
|
|
set := map[string]influxql.Source{}
|
|
|
|
|
|
|
|
// Iterate all sources, expanding regexes when they're found.
|
|
|
|
for _, source := range sources {
|
|
|
|
switch src := source.(type) {
|
|
|
|
case *influxql.Measurement:
|
|
|
|
// Add non-regex measurements directly to the set.
|
|
|
|
if src.Regex == nil {
|
|
|
|
set[src.String()] = src
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// Loop over matching measurements.
|
2017-09-19 17:07:34 +00:00
|
|
|
names, err := engine.MeasurementNamesByRegex(src.Regex.Val)
|
2016-09-13 12:04:18 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2016-11-11 16:25:53 +00:00
|
|
|
for _, name := range names {
|
2016-03-04 18:01:41 +00:00
|
|
|
other := &influxql.Measurement{
|
|
|
|
Database: src.Database,
|
|
|
|
RetentionPolicy: src.RetentionPolicy,
|
2016-11-11 16:25:53 +00:00
|
|
|
Name: string(name),
|
2016-03-04 18:01:41 +00:00
|
|
|
}
|
|
|
|
set[other.String()] = other
|
|
|
|
}
|
|
|
|
|
|
|
|
default:
|
|
|
|
return nil, fmt.Errorf("expandSources: unsupported source type: %T", source)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Convert set to sorted slice.
|
|
|
|
names := make([]string, 0, len(set))
|
|
|
|
for name := range set {
|
|
|
|
names = append(names, name)
|
|
|
|
}
|
|
|
|
sort.Strings(names)
|
|
|
|
|
|
|
|
// Convert set to a list of Sources.
|
|
|
|
expanded := make(influxql.Sources, 0, len(set))
|
|
|
|
for _, name := range names {
|
|
|
|
expanded = append(expanded, set[name])
|
|
|
|
}
|
|
|
|
|
|
|
|
return expanded, nil
|
|
|
|
}
|
|
|
|
|
2017-09-19 12:33:45 +00:00
|
|
|
// Backup backs up the shard by creating a tar archive of all TSM files that
|
|
|
|
// have been modified since the provided time. See Engine.Backup for more details.
|
|
|
|
func (s *Shard) Backup(w io.Writer, basePath string, since time.Time) error {
|
2018-06-04 19:25:03 +00:00
|
|
|
engine, err := s.Engine()
|
2017-09-19 17:07:34 +00:00
|
|
|
if err != nil {
|
2017-09-19 12:33:45 +00:00
|
|
|
return err
|
|
|
|
}
|
2017-09-19 17:07:34 +00:00
|
|
|
return engine.Backup(w, basePath, since)
|
2017-09-19 12:33:45 +00:00
|
|
|
}
|
|
|
|
|
2017-12-07 16:35:20 +00:00
|
|
|
func (s *Shard) Export(w io.Writer, basePath string, start time.Time, end time.Time) error {
|
2018-06-04 19:25:03 +00:00
|
|
|
engine, err := s.Engine()
|
2017-12-07 16:35:20 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
return engine.Export(w, basePath, start, end)
|
|
|
|
}
|
|
|
|
|
2016-04-29 00:29:09 +00:00
|
|
|
// Restore restores data to the underlying engine for the shard.
|
|
|
|
// The shard is reopened after restore.
|
|
|
|
func (s *Shard) Restore(r io.Reader, basePath string) error {
|
2017-09-19 17:07:34 +00:00
|
|
|
if err := func() error {
|
|
|
|
s.mu.Lock()
|
|
|
|
defer s.mu.Unlock()
|
|
|
|
|
|
|
|
// Special case - we can still restore to a disabled shard, so we should
|
|
|
|
// only check if the engine is closed and not care if the shard is
|
|
|
|
// disabled.
|
|
|
|
if s._engine == nil {
|
|
|
|
return ErrEngineClosed
|
|
|
|
}
|
2016-04-29 00:29:09 +00:00
|
|
|
|
2017-09-19 17:07:34 +00:00
|
|
|
// Restore to engine.
|
|
|
|
return s._engine.Restore(r, basePath)
|
|
|
|
}(); err != nil {
|
2016-04-29 00:29:09 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Close shard.
|
|
|
|
if err := s.Close(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Reopen engine.
|
2016-05-02 17:47:31 +00:00
|
|
|
return s.Open()
|
2016-04-29 00:29:09 +00:00
|
|
|
}
|
|
|
|
|
2017-04-26 23:16:59 +00:00
|
|
|
// Import imports data to the underlying engine for the shard. r should
|
|
|
|
// be a reader from a backup created by Backup.
|
|
|
|
func (s *Shard) Import(r io.Reader, basePath string) error {
|
2017-09-19 17:07:34 +00:00
|
|
|
// Special case - we can still import to a disabled shard, so we should
|
|
|
|
// only check if the engine is closed and not care if the shard is
|
|
|
|
// disabled.
|
2017-04-26 23:16:59 +00:00
|
|
|
s.mu.Lock()
|
|
|
|
defer s.mu.Unlock()
|
2017-09-19 17:07:34 +00:00
|
|
|
if s._engine == nil {
|
|
|
|
return ErrEngineClosed
|
|
|
|
}
|
2017-04-26 23:16:59 +00:00
|
|
|
|
2017-09-19 17:07:34 +00:00
|
|
|
// Import to engine.
|
|
|
|
return s._engine.Import(r, basePath)
|
2017-04-26 23:16:59 +00:00
|
|
|
}
|
|
|
|
|
2016-05-09 15:53:34 +00:00
|
|
|
// CreateSnapshot will return a path to a temp directory
|
2016-12-31 05:12:37 +00:00
|
|
|
// containing hard links to the underlying shard files.
|
2016-05-09 15:53:34 +00:00
|
|
|
func (s *Shard) CreateSnapshot() (string, error) {
|
2018-06-04 19:25:03 +00:00
|
|
|
engine, err := s.Engine()
|
2017-09-19 17:07:34 +00:00
|
|
|
if err != nil {
|
|
|
|
return "", err
|
2017-08-29 09:22:45 +00:00
|
|
|
}
|
2017-09-19 17:07:34 +00:00
|
|
|
return engine.CreateSnapshot()
|
2016-05-09 15:53:34 +00:00
|
|
|
}
|
|
|
|
|
2017-09-19 12:33:45 +00:00
|
|
|
// ForEachMeasurementName iterates over each measurement in the shard.
|
|
|
|
func (s *Shard) ForEachMeasurementName(fn func(name []byte) error) error {
|
2018-06-04 19:25:03 +00:00
|
|
|
engine, err := s.Engine()
|
2017-09-19 17:07:34 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
2017-09-19 12:33:45 +00:00
|
|
|
}
|
2017-09-19 17:07:34 +00:00
|
|
|
return engine.ForEachMeasurementName(fn)
|
2017-09-19 12:33:45 +00:00
|
|
|
}
|
|
|
|
|
2017-05-03 04:42:09 +00:00
|
|
|
func (s *Shard) TagKeyCardinality(name, key []byte) int {
|
2018-06-04 19:25:03 +00:00
|
|
|
engine, err := s.Engine()
|
2017-09-19 17:07:34 +00:00
|
|
|
if err != nil {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
return engine.TagKeyCardinality(name, key)
|
|
|
|
}
|
|
|
|
|
2017-11-14 01:35:53 +00:00
|
|
|
// Digest returns a digest of the shard.
|
2018-01-05 18:39:33 +00:00
|
|
|
func (s *Shard) Digest() (io.ReadCloser, int64, error) {
|
2018-06-04 19:25:03 +00:00
|
|
|
engine, err := s.Engine()
|
2017-11-14 01:35:53 +00:00
|
|
|
if err != nil {
|
2018-01-05 18:39:33 +00:00
|
|
|
return nil, 0, err
|
2017-11-14 01:35:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Make sure the shard is idle/cold. (No use creating a digest of a
|
|
|
|
// hot shard that is rapidly changing.)
|
|
|
|
if !engine.IsIdle() {
|
2018-01-05 18:39:33 +00:00
|
|
|
return nil, 0, ErrShardNotIdle
|
2017-11-14 01:35:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return engine.Digest()
|
|
|
|
}
|
|
|
|
|
2017-09-19 17:07:34 +00:00
|
|
|
// engine safely (under an RLock) returns a reference to the shard's Engine, or
|
|
|
|
// an error if the Engine is closed, or the shard is currently disabled.
|
|
|
|
//
|
|
|
|
// The shard's Engine should always be accessed via a call to engine(), rather
|
|
|
|
// than directly referencing Shard.engine.
|
|
|
|
//
|
|
|
|
// If a caller needs an Engine reference but is already under a lock, then they
|
|
|
|
// should use engineNoLock().
|
2018-06-04 19:25:03 +00:00
|
|
|
func (s *Shard) Engine() (Engine, error) {
|
2017-09-03 07:14:58 +00:00
|
|
|
s.mu.RLock()
|
|
|
|
defer s.mu.RUnlock()
|
2017-09-19 17:07:34 +00:00
|
|
|
return s.engineNoLock()
|
|
|
|
}
|
|
|
|
|
|
|
|
// engineNoLock is similar to calling engine(), but the caller must guarantee
|
|
|
|
// that they already hold an appropriate lock.
|
|
|
|
func (s *Shard) engineNoLock() (Engine, error) {
|
2017-06-13 18:04:32 +00:00
|
|
|
if err := s.ready(); err != nil {
|
2017-09-19 17:07:34 +00:00
|
|
|
return nil, err
|
2017-06-13 18:04:32 +00:00
|
|
|
}
|
2017-09-19 17:07:34 +00:00
|
|
|
return s._engine, nil
|
2016-05-18 14:34:06 +00:00
|
|
|
}
|
|
|
|
|
2016-11-23 20:32:42 +00:00
|
|
|
type ShardGroup interface {
|
|
|
|
MeasurementsByRegex(re *regexp.Regexp) []string
|
|
|
|
FieldDimensions(measurements []string) (fields map[string]influxql.DataType, dimensions map[string]struct{}, err error)
|
|
|
|
MapType(measurement, field string) influxql.DataType
|
2017-11-01 11:22:52 +00:00
|
|
|
CreateIterator(ctx context.Context, measurement *influxql.Measurement, opt query.IteratorOptions) (query.Iterator, error)
|
2017-08-24 16:27:29 +00:00
|
|
|
IteratorCost(measurement string, opt query.IteratorOptions) (query.IteratorCost, error)
|
2016-11-23 20:32:42 +00:00
|
|
|
ExpandSources(sources influxql.Sources) (influxql.Sources, error)
|
|
|
|
}
|
|
|
|
|
2016-02-19 20:38:02 +00:00
|
|
|
// Shards represents a sortable list of shards.
|
|
|
|
type Shards []*Shard
|
2015-11-04 21:06:06 +00:00
|
|
|
|
2016-12-31 05:12:37 +00:00
|
|
|
// Len implements sort.Interface.
|
|
|
|
func (a Shards) Len() int { return len(a) }
|
|
|
|
|
|
|
|
// Less implements sort.Interface.
|
2016-02-19 20:38:02 +00:00
|
|
|
func (a Shards) Less(i, j int) bool { return a[i].id < a[j].id }
|
2016-12-31 05:12:37 +00:00
|
|
|
|
|
|
|
// Swap implements sort.Interface.
|
|
|
|
func (a Shards) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
2015-11-04 21:06:06 +00:00
|
|
|
|
2017-09-19 17:07:34 +00:00
|
|
|
// MeasurementsByRegex returns the unique set of measurements matching the
|
|
|
|
// provided regex, for all the shards.
|
2016-11-23 20:32:42 +00:00
|
|
|
func (a Shards) MeasurementsByRegex(re *regexp.Regexp) []string {
|
2017-09-19 17:07:34 +00:00
|
|
|
var m map[string]struct{}
|
2016-11-23 20:32:42 +00:00
|
|
|
for _, sh := range a {
|
2017-09-19 17:07:34 +00:00
|
|
|
names, err := sh.MeasurementNamesByRegex(re)
|
|
|
|
if err != nil {
|
|
|
|
continue // Skip this shard's results—previous behaviour.
|
|
|
|
}
|
|
|
|
|
|
|
|
if m == nil {
|
|
|
|
m = make(map[string]struct{}, len(names))
|
|
|
|
}
|
|
|
|
|
2016-11-23 20:32:42 +00:00
|
|
|
for _, name := range names {
|
2017-09-19 17:07:34 +00:00
|
|
|
m[string(name)] = struct{}{}
|
2016-11-23 20:32:42 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(m) == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
names := make([]string, 0, len(m))
|
|
|
|
for key := range m {
|
|
|
|
names = append(names, key)
|
|
|
|
}
|
|
|
|
sort.Strings(names)
|
|
|
|
return names
|
|
|
|
}
|
|
|
|
|
|
|
|
func (a Shards) FieldDimensions(measurements []string) (fields map[string]influxql.DataType, dimensions map[string]struct{}, err error) {
|
|
|
|
fields = make(map[string]influxql.DataType)
|
|
|
|
dimensions = make(map[string]struct{})
|
|
|
|
|
|
|
|
for _, sh := range a {
|
|
|
|
f, d, err := sh.FieldDimensions(measurements)
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, err
|
|
|
|
}
|
|
|
|
for k, typ := range f {
|
2017-09-14 17:28:13 +00:00
|
|
|
if fields[k].LessThan(typ) {
|
2016-11-23 20:32:42 +00:00
|
|
|
fields[k] = typ
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for k := range d {
|
|
|
|
dimensions[k] = struct{}{}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
func (a Shards) MapType(measurement, field string) influxql.DataType {
|
|
|
|
var typ influxql.DataType
|
|
|
|
for _, sh := range a {
|
2017-09-19 17:07:34 +00:00
|
|
|
sh.mu.RLock()
|
|
|
|
if t, err := sh.mapType(measurement, field); err == nil && typ.LessThan(t) {
|
2016-11-23 20:32:42 +00:00
|
|
|
typ = t
|
|
|
|
}
|
2017-09-19 17:07:34 +00:00
|
|
|
sh.mu.RUnlock()
|
2016-11-23 20:32:42 +00:00
|
|
|
}
|
|
|
|
return typ
|
|
|
|
}
|
|
|
|
|
Refactor the math engine to compile the query and use eval
This change makes it so that we simplify the math engine so it doesn't
use a complicated set of nested iterators. That way, we have to change
math in one fewer place.
It also greatly simplifies the query engine as now we can create the
necessary iterators, join them by time, name, and tags, and then use the
cursor interface to read them and use eval to compute the result. It
makes it so the auxiliary iterators and all of their complexity can be
removed.
This also makes use of the new eval functionality that was recently
added to the influxql package.
No math functions have been added, but the scaffolding has been included
so things like trigonometry functions are just a single commit away.
This also introduces a small breaking change. Because of the call
optimization, it is now possible to use the same selector multiple times
as a selector. So if you do this:
SELECT max(value) * 2, max(value) / 2 FROM cpu
This will now return the timestamp of the max value rather than zero
since this query is considered to have only a single selector rather
than multiple separate selectors. If any aspect of the selector is
different, such as different selector functions or different arguments,
it will consider the selectors to be aggregates like the old behavior.
2018-03-19 17:05:55 +00:00
|
|
|
func (a Shards) CallType(name string, args []influxql.DataType) (influxql.DataType, error) {
|
|
|
|
typmap := query.CallTypeMapper{}
|
|
|
|
return typmap.CallType(name, args)
|
|
|
|
}
|
|
|
|
|
2017-11-01 11:22:52 +00:00
|
|
|
func (a Shards) CreateIterator(ctx context.Context, measurement *influxql.Measurement, opt query.IteratorOptions) (query.Iterator, error) {
|
2018-03-15 16:35:15 +00:00
|
|
|
switch measurement.SystemIterator {
|
|
|
|
case "_series":
|
|
|
|
return a.createSeriesIterator(ctx, opt)
|
|
|
|
}
|
|
|
|
|
2017-08-15 19:24:22 +00:00
|
|
|
itrs := make([]query.Iterator, 0, len(a))
|
2016-11-23 20:32:42 +00:00
|
|
|
for _, sh := range a {
|
2017-10-11 14:08:31 +00:00
|
|
|
itr, err := sh.CreateIterator(ctx, measurement, opt)
|
2016-11-23 20:32:42 +00:00
|
|
|
if err != nil {
|
2017-08-15 19:24:22 +00:00
|
|
|
query.Iterators(itrs).Close()
|
2016-11-23 20:32:42 +00:00
|
|
|
return nil, err
|
|
|
|
} else if itr == nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
itrs = append(itrs, itr)
|
|
|
|
|
2017-03-17 22:00:54 +00:00
|
|
|
select {
|
|
|
|
case <-opt.InterruptCh:
|
2017-08-15 19:24:22 +00:00
|
|
|
query.Iterators(itrs).Close()
|
2017-10-24 21:10:28 +00:00
|
|
|
return nil, query.ErrQueryInterrupted
|
2017-03-17 22:00:54 +00:00
|
|
|
default:
|
|
|
|
}
|
|
|
|
|
2016-11-23 20:32:42 +00:00
|
|
|
// Enforce series limit at creation time.
|
|
|
|
if opt.MaxSeriesN > 0 {
|
|
|
|
stats := itr.Stats()
|
|
|
|
if stats.SeriesN > opt.MaxSeriesN {
|
2017-08-15 19:24:22 +00:00
|
|
|
query.Iterators(itrs).Close()
|
2016-11-23 20:32:42 +00:00
|
|
|
return nil, fmt.Errorf("max-select-series limit exceeded: (%d/%d)", stats.SeriesN, opt.MaxSeriesN)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2017-08-15 19:24:22 +00:00
|
|
|
return query.Iterators(itrs).Merge(opt)
|
2018-03-15 16:35:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (a Shards) createSeriesIterator(ctx context.Context, opt query.IteratorOptions) (_ query.Iterator, err error) {
|
|
|
|
var (
|
|
|
|
idxs = make([]Index, 0, len(a))
|
|
|
|
sfile *SeriesFile
|
|
|
|
)
|
|
|
|
for _, sh := range a {
|
|
|
|
var idx Index
|
|
|
|
if idx, err = sh.Index(); err == nil {
|
|
|
|
idxs = append(idxs, idx)
|
|
|
|
}
|
|
|
|
if sfile == nil {
|
|
|
|
sfile, _ = sh.seriesFile()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if sfile == nil {
|
2018-03-21 20:41:11 +00:00
|
|
|
return nil, nil
|
2018-03-15 16:35:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return NewSeriesPointIterator(IndexSet{Indexes: idxs, SeriesFile: sfile}, opt)
|
2016-11-23 20:32:42 +00:00
|
|
|
}
|
|
|
|
|
2017-08-24 16:27:29 +00:00
|
|
|
func (a Shards) IteratorCost(measurement string, opt query.IteratorOptions) (query.IteratorCost, error) {
|
|
|
|
var costs query.IteratorCost
|
|
|
|
var costerr error
|
|
|
|
var mu sync.RWMutex
|
|
|
|
|
2017-09-19 17:07:34 +00:00
|
|
|
setErr := func(err error) {
|
|
|
|
mu.Lock()
|
|
|
|
defer mu.Unlock()
|
|
|
|
if costerr == nil {
|
|
|
|
costerr = err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-08-24 16:27:29 +00:00
|
|
|
limit := limiter.NewFixed(runtime.GOMAXPROCS(0))
|
|
|
|
var wg sync.WaitGroup
|
|
|
|
for _, sh := range a {
|
|
|
|
limit.Take()
|
|
|
|
wg.Add(1)
|
|
|
|
|
|
|
|
mu.RLock()
|
|
|
|
if costerr != nil {
|
|
|
|
mu.RUnlock()
|
|
|
|
break
|
|
|
|
}
|
|
|
|
mu.RUnlock()
|
|
|
|
|
|
|
|
go func(sh *Shard) {
|
|
|
|
defer limit.Release()
|
|
|
|
defer wg.Done()
|
|
|
|
|
2018-06-04 19:25:03 +00:00
|
|
|
engine, err := sh.Engine()
|
2017-09-19 17:07:34 +00:00
|
|
|
if err != nil {
|
|
|
|
setErr(err)
|
|
|
|
return
|
|
|
|
}
|
2017-08-24 16:27:29 +00:00
|
|
|
|
2017-09-19 17:07:34 +00:00
|
|
|
cost, err := engine.IteratorCost(measurement, opt)
|
2017-08-24 16:27:29 +00:00
|
|
|
if err != nil {
|
2017-09-19 17:07:34 +00:00
|
|
|
setErr(err)
|
2017-08-24 16:27:29 +00:00
|
|
|
return
|
|
|
|
}
|
2017-09-19 17:07:34 +00:00
|
|
|
|
|
|
|
mu.Lock()
|
2017-08-24 16:27:29 +00:00
|
|
|
costs = costs.Combine(cost)
|
2017-09-19 17:07:34 +00:00
|
|
|
mu.Unlock()
|
2017-08-24 16:27:29 +00:00
|
|
|
}(sh)
|
|
|
|
}
|
|
|
|
wg.Wait()
|
|
|
|
return costs, costerr
|
|
|
|
}
|
|
|
|
|
2018-03-23 03:38:44 +00:00
|
|
|
func (a Shards) CreateSeriesCursor(ctx context.Context, req SeriesCursorRequest, cond influxql.Expr) (_ SeriesCursor, err error) {
|
2018-03-13 19:15:24 +00:00
|
|
|
var (
|
|
|
|
idxs []Index
|
|
|
|
sfile *SeriesFile
|
|
|
|
)
|
|
|
|
for _, sh := range a {
|
|
|
|
var idx Index
|
|
|
|
if idx, err = sh.Index(); err == nil {
|
|
|
|
idxs = append(idxs, idx)
|
|
|
|
}
|
|
|
|
if sfile == nil {
|
|
|
|
sfile, _ = sh.seriesFile()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if sfile == nil {
|
|
|
|
return nil, errors.New("CreateSeriesCursor: no series file")
|
|
|
|
}
|
|
|
|
|
2018-03-23 03:38:44 +00:00
|
|
|
return newSeriesCursor(req, IndexSet{Indexes: idxs, SeriesFile: sfile}, cond)
|
2018-03-13 19:15:24 +00:00
|
|
|
}
|
|
|
|
|
2016-11-23 20:32:42 +00:00
|
|
|
func (a Shards) ExpandSources(sources influxql.Sources) (influxql.Sources, error) {
|
|
|
|
// Use a map as a set to prevent duplicates.
|
|
|
|
set := map[string]influxql.Source{}
|
|
|
|
|
|
|
|
// Iterate through every shard and expand the sources.
|
|
|
|
for _, sh := range a {
|
2017-09-19 17:07:34 +00:00
|
|
|
sh.mu.RLock()
|
|
|
|
expanded, err := sh.expandSources(sources)
|
|
|
|
sh.mu.RUnlock()
|
2016-11-23 20:32:42 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, src := range expanded {
|
|
|
|
switch src := src.(type) {
|
|
|
|
case *influxql.Measurement:
|
|
|
|
set[src.String()] = src
|
|
|
|
default:
|
|
|
|
return nil, fmt.Errorf("Store.ExpandSources: unsupported source type: %T", src)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Convert set to sorted slice.
|
|
|
|
names := make([]string, 0, len(set))
|
|
|
|
for name := range set {
|
|
|
|
names = append(names, name)
|
|
|
|
}
|
|
|
|
sort.Strings(names)
|
|
|
|
|
|
|
|
// Convert set to a list of Sources.
|
|
|
|
sorted := make([]influxql.Source, 0, len(set))
|
|
|
|
for _, name := range names {
|
|
|
|
sorted = append(sorted, set[name])
|
|
|
|
}
|
|
|
|
return sorted, nil
|
|
|
|
}
|
|
|
|
|
2016-02-10 20:04:18 +00:00
|
|
|
// MeasurementFields holds the fields of a measurement and their codec.
|
2015-07-22 14:53:20 +00:00
|
|
|
type MeasurementFields struct {
|
2016-04-01 19:30:09 +00:00
|
|
|
mu sync.RWMutex
|
|
|
|
|
2016-05-18 12:34:11 +00:00
|
|
|
fields map[string]*Field
|
2015-05-23 22:06:07 +00:00
|
|
|
}
|
|
|
|
|
2016-11-17 13:24:32 +00:00
|
|
|
// NewMeasurementFields returns an initialised *MeasurementFields value.
|
2016-04-01 19:30:09 +00:00
|
|
|
func NewMeasurementFields() *MeasurementFields {
|
|
|
|
return &MeasurementFields{fields: make(map[string]*Field)}
|
|
|
|
}
|
|
|
|
|
2017-07-26 17:03:15 +00:00
|
|
|
func (m *MeasurementFields) FieldKeys() []string {
|
|
|
|
m.mu.RLock()
|
|
|
|
defer m.mu.RUnlock()
|
|
|
|
|
|
|
|
a := make([]string, 0, len(m.fields))
|
|
|
|
for key := range m.fields {
|
|
|
|
a = append(a, key)
|
|
|
|
}
|
|
|
|
sort.Strings(a)
|
|
|
|
return a
|
|
|
|
}
|
|
|
|
|
2018-05-04 15:47:05 +00:00
|
|
|
// bytes estimates the memory footprint of this MeasurementFields, in bytes.
|
|
|
|
func (m *MeasurementFields) bytes() int {
|
|
|
|
var b int
|
|
|
|
m.mu.RLock()
|
|
|
|
b += 24 // mu RWMutex is 24 bytes
|
|
|
|
b += int(unsafe.Sizeof(m.fields))
|
|
|
|
for k, v := range m.fields {
|
|
|
|
b += int(unsafe.Sizeof(k)) + len(k)
|
|
|
|
b += int(unsafe.Sizeof(v)+unsafe.Sizeof(*v)) + len(v.Name)
|
|
|
|
}
|
|
|
|
m.mu.RUnlock()
|
|
|
|
return b
|
|
|
|
}
|
|
|
|
|
2015-08-10 18:46:57 +00:00
|
|
|
// CreateFieldIfNotExists creates a new field with an autoincrementing ID.
|
2015-05-23 22:06:07 +00:00
|
|
|
// Returns an error if 255 fields have already been created on the measurement or
|
|
|
|
// the fields already exists with a different type.
|
2017-11-22 00:04:44 +00:00
|
|
|
func (m *MeasurementFields) CreateFieldIfNotExists(name []byte, typ influxql.DataType) error {
|
2016-04-02 02:41:22 +00:00
|
|
|
m.mu.RLock()
|
2016-04-01 19:30:09 +00:00
|
|
|
|
2015-05-23 22:06:07 +00:00
|
|
|
// Ignore if the field already exists.
|
2017-05-09 05:20:29 +00:00
|
|
|
if f := m.fields[string(name)]; f != nil {
|
2015-05-23 22:06:07 +00:00
|
|
|
if f.Type != typ {
|
2016-04-02 02:41:22 +00:00
|
|
|
m.mu.RUnlock()
|
2015-05-23 22:06:07 +00:00
|
|
|
return ErrFieldTypeConflict
|
|
|
|
}
|
2016-04-02 02:41:22 +00:00
|
|
|
m.mu.RUnlock()
|
2015-05-23 22:06:07 +00:00
|
|
|
return nil
|
2015-05-22 20:08:43 +00:00
|
|
|
}
|
2016-04-02 02:41:22 +00:00
|
|
|
m.mu.RUnlock()
|
2015-05-22 20:08:43 +00:00
|
|
|
|
2016-04-02 02:41:22 +00:00
|
|
|
m.mu.Lock()
|
2016-04-25 14:45:14 +00:00
|
|
|
defer m.mu.Unlock()
|
2017-03-03 16:27:01 +00:00
|
|
|
|
|
|
|
// Re-check field and type under write lock.
|
2017-05-09 05:20:29 +00:00
|
|
|
if f := m.fields[string(name)]; f != nil {
|
2017-03-03 16:27:01 +00:00
|
|
|
if f.Type != typ {
|
|
|
|
return ErrFieldTypeConflict
|
|
|
|
}
|
2016-04-02 02:41:22 +00:00
|
|
|
return nil
|
2015-05-23 22:06:07 +00:00
|
|
|
}
|
2015-05-22 20:08:43 +00:00
|
|
|
|
2015-05-23 22:06:07 +00:00
|
|
|
// Create and append a new field.
|
2015-07-23 16:33:37 +00:00
|
|
|
f := &Field{
|
2016-04-01 19:30:09 +00:00
|
|
|
ID: uint8(len(m.fields) + 1),
|
2017-05-09 05:20:29 +00:00
|
|
|
Name: string(name),
|
2015-05-23 22:06:07 +00:00
|
|
|
Type: typ,
|
|
|
|
}
|
2017-05-09 05:20:29 +00:00
|
|
|
m.fields[string(name)] = f
|
2015-05-22 20:08:43 +00:00
|
|
|
|
2015-05-23 22:06:07 +00:00
|
|
|
return nil
|
|
|
|
}
|
2015-05-22 20:08:43 +00:00
|
|
|
|
2016-11-16 18:57:55 +00:00
|
|
|
func (m *MeasurementFields) FieldN() int {
|
|
|
|
m.mu.RLock()
|
|
|
|
n := len(m.fields)
|
|
|
|
m.mu.RUnlock()
|
|
|
|
return n
|
|
|
|
}
|
|
|
|
|
2016-11-17 13:24:32 +00:00
|
|
|
// Field returns the field for name, or nil if there is no field for name.
|
2016-04-01 19:30:09 +00:00
|
|
|
func (m *MeasurementFields) Field(name string) *Field {
|
|
|
|
m.mu.RLock()
|
|
|
|
f := m.fields[name]
|
|
|
|
m.mu.RUnlock()
|
|
|
|
return f
|
|
|
|
}
|
|
|
|
|
2016-11-27 20:15:32 +00:00
|
|
|
func (m *MeasurementFields) HasField(name string) bool {
|
2018-03-12 15:25:20 +00:00
|
|
|
if m == nil {
|
|
|
|
return false
|
|
|
|
}
|
2016-11-27 20:15:32 +00:00
|
|
|
m.mu.RLock()
|
|
|
|
f := m.fields[name]
|
|
|
|
m.mu.RUnlock()
|
|
|
|
return f != nil
|
|
|
|
}
|
|
|
|
|
2016-11-17 13:24:32 +00:00
|
|
|
// FieldBytes returns the field for name, or nil if there is no field for name.
|
|
|
|
// FieldBytes should be preferred to Field when the caller has a []byte, because
|
|
|
|
// it avoids a string allocation, which can't be avoided if the caller converts
|
|
|
|
// the []byte to a string and calls Field.
|
2016-09-21 17:12:09 +00:00
|
|
|
func (m *MeasurementFields) FieldBytes(name []byte) *Field {
|
|
|
|
m.mu.RLock()
|
|
|
|
f := m.fields[string(name)]
|
|
|
|
m.mu.RUnlock()
|
|
|
|
return f
|
|
|
|
}
|
|
|
|
|
2016-11-17 13:24:32 +00:00
|
|
|
// FieldSet returns the set of fields and their types for the measurement.
|
2016-05-16 16:08:28 +00:00
|
|
|
func (m *MeasurementFields) FieldSet() map[string]influxql.DataType {
|
|
|
|
m.mu.RLock()
|
|
|
|
defer m.mu.RUnlock()
|
|
|
|
|
|
|
|
fields := make(map[string]influxql.DataType)
|
|
|
|
for name, f := range m.fields {
|
|
|
|
fields[name] = f.Type
|
|
|
|
}
|
|
|
|
return fields
|
|
|
|
}
|
|
|
|
|
2017-11-21 23:52:58 +00:00
|
|
|
func (m *MeasurementFields) ForEachField(fn func(name string, typ influxql.DataType) bool) {
|
|
|
|
m.mu.RLock()
|
|
|
|
defer m.mu.RUnlock()
|
|
|
|
for name, f := range m.fields {
|
|
|
|
if !fn(name, f.Type) {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-04-17 22:51:24 +00:00
|
|
|
// Clone returns copy of the MeasurementFields
|
|
|
|
func (m *MeasurementFields) Clone() *MeasurementFields {
|
|
|
|
m.mu.RLock()
|
|
|
|
defer m.mu.RUnlock()
|
|
|
|
fields := make(map[string]*Field, len(m.fields))
|
|
|
|
for key, field := range m.fields {
|
|
|
|
fields[key] = field
|
|
|
|
}
|
|
|
|
return &MeasurementFields{
|
|
|
|
fields: fields,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-11-28 16:59:36 +00:00
|
|
|
// MeasurementFieldSet represents a collection of fields by measurement.
|
|
|
|
// This safe for concurrent use.
|
|
|
|
type MeasurementFieldSet struct {
|
|
|
|
mu sync.RWMutex
|
|
|
|
fields map[string]*MeasurementFields
|
2017-11-21 23:52:58 +00:00
|
|
|
|
|
|
|
// path is the location to persist field sets
|
|
|
|
path string
|
2016-11-28 16:59:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// NewMeasurementFieldSet returns a new instance of MeasurementFieldSet.
|
2017-11-21 23:52:58 +00:00
|
|
|
func NewMeasurementFieldSet(path string) (*MeasurementFieldSet, error) {
|
|
|
|
fs := &MeasurementFieldSet{
|
2016-11-28 16:59:36 +00:00
|
|
|
fields: make(map[string]*MeasurementFields),
|
2017-11-21 23:52:58 +00:00
|
|
|
path: path,
|
2016-11-28 16:59:36 +00:00
|
|
|
}
|
2018-01-16 18:31:07 +00:00
|
|
|
|
|
|
|
// If there is a load error, return the error and an empty set so
|
|
|
|
// it can be rebuild manually.
|
2018-01-21 17:41:27 +00:00
|
|
|
return fs, fs.load()
|
2016-11-28 16:59:36 +00:00
|
|
|
}
|
|
|
|
|
2018-05-04 15:47:05 +00:00
|
|
|
// Bytes estimates the memory footprint of this MeasurementFieldSet, in bytes.
|
|
|
|
func (fs *MeasurementFieldSet) Bytes() int {
|
|
|
|
var b int
|
|
|
|
fs.mu.RLock()
|
|
|
|
b += 24 // mu RWMutex is 24 bytes
|
|
|
|
for k, v := range fs.fields {
|
|
|
|
b += int(unsafe.Sizeof(k)) + len(k)
|
|
|
|
b += int(unsafe.Sizeof(v)) + v.bytes()
|
|
|
|
}
|
|
|
|
b += int(unsafe.Sizeof(fs.fields))
|
|
|
|
b += int(unsafe.Sizeof(fs.path)) + len(fs.path)
|
|
|
|
fs.mu.RUnlock()
|
|
|
|
return b
|
|
|
|
}
|
|
|
|
|
2016-11-28 16:59:36 +00:00
|
|
|
// Fields returns fields for a measurement by name.
|
2018-03-12 15:25:20 +00:00
|
|
|
func (fs *MeasurementFieldSet) Fields(name []byte) *MeasurementFields {
|
|
|
|
fs.mu.RLock()
|
|
|
|
mf := fs.fields[string(name)]
|
|
|
|
fs.mu.RUnlock()
|
|
|
|
return mf
|
|
|
|
}
|
|
|
|
|
|
|
|
// FieldsByString returns fields for a measurment by name.
|
|
|
|
func (fs *MeasurementFieldSet) FieldsByString(name string) *MeasurementFields {
|
2016-11-28 16:59:36 +00:00
|
|
|
fs.mu.RLock()
|
|
|
|
mf := fs.fields[name]
|
|
|
|
fs.mu.RUnlock()
|
|
|
|
return mf
|
|
|
|
}
|
|
|
|
|
|
|
|
// CreateFieldsIfNotExists returns fields for a measurement by name.
|
2017-05-09 05:20:29 +00:00
|
|
|
func (fs *MeasurementFieldSet) CreateFieldsIfNotExists(name []byte) *MeasurementFields {
|
2016-11-28 16:59:36 +00:00
|
|
|
fs.mu.RLock()
|
2017-05-09 05:20:29 +00:00
|
|
|
mf := fs.fields[string(name)]
|
2016-11-28 16:59:36 +00:00
|
|
|
fs.mu.RUnlock()
|
|
|
|
|
|
|
|
if mf != nil {
|
|
|
|
return mf
|
|
|
|
}
|
|
|
|
|
|
|
|
fs.mu.Lock()
|
2017-05-09 05:20:29 +00:00
|
|
|
mf = fs.fields[string(name)]
|
2016-11-28 16:59:36 +00:00
|
|
|
if mf == nil {
|
|
|
|
mf = NewMeasurementFields()
|
2017-05-09 05:20:29 +00:00
|
|
|
fs.fields[string(name)] = mf
|
2016-11-28 16:59:36 +00:00
|
|
|
}
|
|
|
|
fs.mu.Unlock()
|
|
|
|
return mf
|
|
|
|
}
|
|
|
|
|
|
|
|
// Delete removes a field set for a measurement.
|
|
|
|
func (fs *MeasurementFieldSet) Delete(name string) {
|
|
|
|
fs.mu.Lock()
|
|
|
|
delete(fs.fields, name)
|
|
|
|
fs.mu.Unlock()
|
|
|
|
}
|
|
|
|
|
2017-03-15 16:13:32 +00:00
|
|
|
// DeleteWithLock executes fn and removes a field set from a measurement under lock.
|
|
|
|
func (fs *MeasurementFieldSet) DeleteWithLock(name string, fn func() error) error {
|
|
|
|
fs.mu.Lock()
|
|
|
|
defer fs.mu.Unlock()
|
|
|
|
|
|
|
|
if err := fn(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
delete(fs.fields, name)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-11-21 23:52:58 +00:00
|
|
|
func (fs *MeasurementFieldSet) IsEmpty() bool {
|
|
|
|
fs.mu.RLock()
|
|
|
|
defer fs.mu.RUnlock()
|
|
|
|
return len(fs.fields) == 0
|
|
|
|
}
|
|
|
|
|
|
|
|
func (fs *MeasurementFieldSet) Save() error {
|
|
|
|
fs.mu.Lock()
|
|
|
|
defer fs.mu.Unlock()
|
|
|
|
|
|
|
|
return fs.saveNoLock()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (fs *MeasurementFieldSet) saveNoLock() error {
|
|
|
|
// No fields left, remove the fields index file
|
|
|
|
if len(fs.fields) == 0 {
|
|
|
|
return os.RemoveAll(fs.path)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Write the new index to a temp file and rename when it's sync'd
|
|
|
|
path := fs.path + ".tmp"
|
|
|
|
fd, err := os.OpenFile(path, os.O_CREATE|os.O_RDWR|os.O_EXCL|os.O_SYNC, 0666)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer os.RemoveAll(path)
|
|
|
|
|
2017-11-22 02:51:28 +00:00
|
|
|
if _, err := fd.Write(fieldsIndexMagicNumber); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2017-11-21 23:52:58 +00:00
|
|
|
pb := internal.MeasurementFieldSet{
|
|
|
|
Measurements: make([]*internal.MeasurementFields, 0, len(fs.fields)),
|
|
|
|
}
|
|
|
|
for name, mf := range fs.fields {
|
|
|
|
fs := &internal.MeasurementFields{
|
|
|
|
Name: name,
|
|
|
|
Fields: make([]*internal.Field, 0, mf.FieldN()),
|
|
|
|
}
|
|
|
|
|
|
|
|
mf.ForEachField(func(field string, typ influxql.DataType) bool {
|
|
|
|
fs.Fields = append(fs.Fields, &internal.Field{Name: field, Type: int32(typ)})
|
|
|
|
return true
|
|
|
|
})
|
|
|
|
|
|
|
|
pb.Measurements = append(pb.Measurements, fs)
|
|
|
|
}
|
|
|
|
|
|
|
|
b, err := proto.Marshal(&pb)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if _, err := fd.Write(b); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if err = fd.Sync(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
//close file handle before renaming to support Windows
|
|
|
|
if err = fd.Close(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2018-01-16 18:31:07 +00:00
|
|
|
if err := file.RenameFile(path, fs.path); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
return file.SyncDir(filepath.Dir(fs.path))
|
2017-11-21 23:52:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (fs *MeasurementFieldSet) load() error {
|
|
|
|
fs.mu.Lock()
|
|
|
|
defer fs.mu.Unlock()
|
|
|
|
|
|
|
|
fd, err := os.Open(fs.path)
|
|
|
|
if os.IsNotExist(err) {
|
|
|
|
return nil
|
|
|
|
} else if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer fd.Close()
|
|
|
|
|
2017-11-22 02:51:28 +00:00
|
|
|
var magic [4]byte
|
|
|
|
if _, err := fd.Read(magic[:]); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if !bytes.Equal(magic[:], fieldsIndexMagicNumber) {
|
|
|
|
return ErrUnknownFieldsFormat
|
|
|
|
}
|
|
|
|
|
2017-11-21 23:52:58 +00:00
|
|
|
var pb internal.MeasurementFieldSet
|
|
|
|
b, err := ioutil.ReadAll(fd)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := proto.Unmarshal(b, &pb); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
fs.fields = make(map[string]*MeasurementFields, len(pb.GetMeasurements()))
|
|
|
|
for _, measurement := range pb.GetMeasurements() {
|
|
|
|
set := &MeasurementFields{
|
|
|
|
fields: make(map[string]*Field, len(measurement.GetFields())),
|
|
|
|
}
|
|
|
|
for _, field := range measurement.GetFields() {
|
|
|
|
set.fields[field.GetName()] = &Field{Name: field.GetName(), Type: influxql.DataType(field.GetType())}
|
|
|
|
}
|
|
|
|
fs.fields[measurement.GetName()] = set
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2015-05-23 22:06:07 +00:00
|
|
|
// Field represents a series field.
|
2015-07-23 16:33:37 +00:00
|
|
|
type Field struct {
|
2015-05-23 22:06:07 +00:00
|
|
|
ID uint8 `json:"id,omitempty"`
|
|
|
|
Name string `json:"name,omitempty"`
|
|
|
|
Type influxql.DataType `json:"type,omitempty"`
|
|
|
|
}
|
2015-05-22 20:08:43 +00:00
|
|
|
|
2016-11-17 13:24:32 +00:00
|
|
|
// NewFieldKeysIterator returns an iterator that can be iterated over to
|
|
|
|
// retrieve field keys.
|
2017-12-05 17:49:58 +00:00
|
|
|
func NewFieldKeysIterator(sh *Shard, opt query.IteratorOptions) (query.Iterator, error) {
|
|
|
|
itr := &fieldKeysIterator{shard: sh}
|
2016-05-10 17:16:55 +00:00
|
|
|
|
2017-12-15 17:54:58 +00:00
|
|
|
index, err := sh.Index()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2016-05-10 17:16:55 +00:00
|
|
|
// Retrieve measurements from shard. Filter if condition specified.
|
2017-11-15 15:48:23 +00:00
|
|
|
//
|
|
|
|
// FGA is currently not supported when retrieving field keys.
|
2017-12-15 17:54:58 +00:00
|
|
|
indexSet := IndexSet{Indexes: []Index{index}, SeriesFile: sh.sfile}
|
2017-12-12 21:22:42 +00:00
|
|
|
names, err := indexSet.MeasurementNamesByExpr(query.OpenAuthorizer, opt.Condition)
|
2016-12-05 17:51:06 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2016-05-10 17:16:55 +00:00
|
|
|
}
|
2016-12-05 17:51:06 +00:00
|
|
|
itr.names = names
|
2016-05-10 17:16:55 +00:00
|
|
|
|
|
|
|
return itr, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// fieldKeysIterator iterates over measurements and gets field keys from each measurement.
|
|
|
|
type fieldKeysIterator struct {
|
2017-12-05 17:49:58 +00:00
|
|
|
shard *Shard
|
|
|
|
names [][]byte // remaining measurement names
|
|
|
|
buf struct {
|
2016-12-05 17:51:06 +00:00
|
|
|
name []byte // current measurement name
|
|
|
|
fields []Field // current measurement's fields
|
2016-05-10 17:16:55 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Stats returns stats about the points processed.
|
2017-08-15 19:24:22 +00:00
|
|
|
func (itr *fieldKeysIterator) Stats() query.IteratorStats { return query.IteratorStats{} }
|
2016-05-10 17:16:55 +00:00
|
|
|
|
|
|
|
// Close closes the iterator.
|
|
|
|
func (itr *fieldKeysIterator) Close() error { return nil }
|
|
|
|
|
|
|
|
// Next emits the next tag key name.
|
2017-08-15 19:24:22 +00:00
|
|
|
func (itr *fieldKeysIterator) Next() (*query.FloatPoint, error) {
|
2016-05-10 17:16:55 +00:00
|
|
|
for {
|
|
|
|
// If there are no more keys then move to the next measurements.
|
|
|
|
if len(itr.buf.fields) == 0 {
|
2016-12-05 17:51:06 +00:00
|
|
|
if len(itr.names) == 0 {
|
2016-05-10 17:16:55 +00:00
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
2016-12-05 17:51:06 +00:00
|
|
|
itr.buf.name = itr.names[0]
|
2017-12-05 17:49:58 +00:00
|
|
|
mf := itr.shard.MeasurementFields(itr.buf.name)
|
2016-05-18 18:43:48 +00:00
|
|
|
if mf != nil {
|
|
|
|
fset := mf.FieldSet()
|
|
|
|
if len(fset) == 0 {
|
2016-12-05 17:51:06 +00:00
|
|
|
itr.names = itr.names[1:]
|
2016-05-18 18:43:48 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
keys := make([]string, 0, len(fset))
|
|
|
|
for k := range fset {
|
|
|
|
keys = append(keys, k)
|
|
|
|
}
|
2016-05-10 17:16:55 +00:00
|
|
|
sort.Strings(keys)
|
2016-05-18 18:43:48 +00:00
|
|
|
|
|
|
|
itr.buf.fields = make([]Field, len(keys))
|
2016-05-10 17:16:55 +00:00
|
|
|
for i, name := range keys {
|
2016-05-18 18:43:48 +00:00
|
|
|
itr.buf.fields[i] = Field{Name: name, Type: fset[name]}
|
2016-05-10 17:16:55 +00:00
|
|
|
}
|
|
|
|
}
|
2016-12-05 17:51:06 +00:00
|
|
|
itr.names = itr.names[1:]
|
2016-05-10 17:16:55 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// Return next key.
|
|
|
|
field := itr.buf.fields[0]
|
2017-08-15 19:24:22 +00:00
|
|
|
p := &query.FloatPoint{
|
2016-12-05 17:51:06 +00:00
|
|
|
Name: string(itr.buf.name),
|
2016-05-10 17:16:55 +00:00
|
|
|
Aux: []interface{}{field.Name, field.Type.String()},
|
|
|
|
}
|
|
|
|
itr.buf.fields = itr.buf.fields[1:]
|
|
|
|
|
|
|
|
return p, nil
|
2016-02-23 23:43:19 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// NewTagKeysIterator returns a new instance of TagKeysIterator.
|
2017-12-05 17:49:58 +00:00
|
|
|
func NewTagKeysIterator(sh *Shard, opt query.IteratorOptions) (query.Iterator, error) {
|
2016-12-05 17:51:06 +00:00
|
|
|
fn := func(name []byte) ([][]byte, error) {
|
2017-12-15 17:54:58 +00:00
|
|
|
index, err := sh.Index()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
indexSet := IndexSet{Indexes: []Index{index}, SeriesFile: sh.sfile}
|
2016-12-05 17:51:06 +00:00
|
|
|
var keys [][]byte
|
2017-12-12 21:22:42 +00:00
|
|
|
if err := indexSet.ForEachMeasurementTagKey(name, func(key []byte) error {
|
2016-12-05 17:51:06 +00:00
|
|
|
keys = append(keys, key)
|
|
|
|
return nil
|
|
|
|
}); err != nil {
|
2016-06-30 16:49:53 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
2016-12-05 17:51:06 +00:00
|
|
|
return keys, nil
|
2016-06-30 16:49:53 +00:00
|
|
|
}
|
2017-12-05 17:49:58 +00:00
|
|
|
return newMeasurementKeysIterator(sh, fn, opt)
|
2016-06-30 16:49:53 +00:00
|
|
|
}
|
|
|
|
|
2016-02-23 23:43:19 +00:00
|
|
|
// measurementKeyFunc is the function called by measurementKeysIterator.
|
2016-12-05 17:51:06 +00:00
|
|
|
type measurementKeyFunc func(name []byte) ([][]byte, error)
|
2016-02-23 23:43:19 +00:00
|
|
|
|
2017-12-05 17:49:58 +00:00
|
|
|
func newMeasurementKeysIterator(sh *Shard, fn measurementKeyFunc, opt query.IteratorOptions) (*measurementKeysIterator, error) {
|
2017-12-15 17:54:58 +00:00
|
|
|
index, err := sh.Index()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
indexSet := IndexSet{Indexes: []Index{index}, SeriesFile: sh.sfile}
|
2016-02-23 23:43:19 +00:00
|
|
|
itr := &measurementKeysIterator{fn: fn}
|
2017-12-12 21:22:42 +00:00
|
|
|
names, err := indexSet.MeasurementNamesByExpr(opt.Authorizer, opt.Condition)
|
2016-12-05 17:51:06 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2016-02-04 18:00:50 +00:00
|
|
|
}
|
2016-12-05 17:51:06 +00:00
|
|
|
itr.names = names
|
2016-02-04 18:00:50 +00:00
|
|
|
|
|
|
|
return itr, nil
|
|
|
|
}
|
|
|
|
|
2016-02-23 23:43:19 +00:00
|
|
|
// measurementKeysIterator iterates over measurements and gets keys from each measurement.
|
|
|
|
type measurementKeysIterator struct {
|
2016-12-05 17:51:06 +00:00
|
|
|
names [][]byte // remaining measurement names
|
|
|
|
buf struct {
|
|
|
|
name []byte // current measurement name
|
|
|
|
keys [][]byte // current measurement's keys
|
2016-02-23 23:43:19 +00:00
|
|
|
}
|
|
|
|
fn measurementKeyFunc
|
|
|
|
}
|
|
|
|
|
2016-03-17 15:55:37 +00:00
|
|
|
// Stats returns stats about the points processed.
|
2017-08-15 19:24:22 +00:00
|
|
|
func (itr *measurementKeysIterator) Stats() query.IteratorStats { return query.IteratorStats{} }
|
2016-03-17 15:55:37 +00:00
|
|
|
|
2016-02-04 18:00:50 +00:00
|
|
|
// Close closes the iterator.
|
2016-02-23 23:43:19 +00:00
|
|
|
func (itr *measurementKeysIterator) Close() error { return nil }
|
2016-02-04 18:00:50 +00:00
|
|
|
|
|
|
|
// Next emits the next tag key name.
|
2017-08-15 19:24:22 +00:00
|
|
|
func (itr *measurementKeysIterator) Next() (*query.FloatPoint, error) {
|
2016-02-04 18:00:50 +00:00
|
|
|
for {
|
|
|
|
// If there are no more keys then move to the next measurements.
|
|
|
|
if len(itr.buf.keys) == 0 {
|
2016-12-05 17:51:06 +00:00
|
|
|
if len(itr.names) == 0 {
|
2016-04-17 20:00:59 +00:00
|
|
|
return nil, nil
|
2016-02-04 18:00:50 +00:00
|
|
|
}
|
|
|
|
|
2016-12-05 17:51:06 +00:00
|
|
|
itr.buf.name, itr.names = itr.names[0], itr.names[1:]
|
|
|
|
|
|
|
|
keys, err := itr.fn(itr.buf.name)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
itr.buf.keys = keys
|
2016-02-04 18:00:50 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// Return next key.
|
2017-08-15 19:24:22 +00:00
|
|
|
p := &query.FloatPoint{
|
2016-12-05 17:51:06 +00:00
|
|
|
Name: string(itr.buf.name),
|
|
|
|
Aux: []interface{}{string(itr.buf.keys[0])},
|
2016-02-04 18:00:50 +00:00
|
|
|
}
|
|
|
|
itr.buf.keys = itr.buf.keys[1:]
|
|
|
|
|
2016-04-17 20:00:59 +00:00
|
|
|
return p, nil
|
2016-02-04 18:00:50 +00:00
|
|
|
}
|
|
|
|
}
|
2016-11-15 16:20:00 +00:00
|
|
|
|
|
|
|
// LimitError represents an error caused by a configurable limit.
|
|
|
|
type LimitError struct {
|
|
|
|
Reason string
|
|
|
|
}
|
|
|
|
|
|
|
|
func (e *LimitError) Error() string { return e.Reason }
|