2020-04-22 20:19:20 +00:00
|
|
|
package tsdb
|
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
|
|
|
"context"
|
|
|
|
"errors"
|
|
|
|
"fmt"
|
|
|
|
"io"
|
|
|
|
"io/ioutil"
|
|
|
|
"os"
|
|
|
|
"path/filepath"
|
|
|
|
"regexp"
|
|
|
|
"runtime"
|
|
|
|
"sort"
|
|
|
|
"strings"
|
|
|
|
"sync"
|
|
|
|
"sync/atomic"
|
|
|
|
"time"
|
|
|
|
"unsafe"
|
|
|
|
|
|
|
|
"github.com/gogo/protobuf/proto"
|
|
|
|
"github.com/influxdata/influxdb/v2/influxql/query"
|
2020-07-28 22:59:11 +00:00
|
|
|
"github.com/influxdata/influxdb/v2/models"
|
2020-04-22 20:19:20 +00:00
|
|
|
"github.com/influxdata/influxdb/v2/pkg/bytesutil"
|
|
|
|
"github.com/influxdata/influxdb/v2/pkg/estimator"
|
|
|
|
"github.com/influxdata/influxdb/v2/pkg/file"
|
|
|
|
"github.com/influxdata/influxdb/v2/pkg/limiter"
|
|
|
|
"github.com/influxdata/influxdb/v2/pkg/slices"
|
2020-08-26 17:46:47 +00:00
|
|
|
internal "github.com/influxdata/influxdb/v2/tsdb/internal"
|
2020-04-22 20:19:20 +00:00
|
|
|
"github.com/influxdata/influxql"
|
|
|
|
"go.uber.org/zap"
|
|
|
|
)
|
|
|
|
|
|
|
|
const (
|
|
|
|
statWriteReq = "writeReq"
|
|
|
|
statWriteReqOK = "writeReqOk"
|
|
|
|
statWriteReqErr = "writeReqErr"
|
|
|
|
statSeriesCreate = "seriesCreate"
|
|
|
|
statFieldsCreate = "fieldsCreate"
|
|
|
|
statWritePointsErr = "writePointsErr"
|
|
|
|
statWritePointsDropped = "writePointsDropped"
|
|
|
|
statWritePointsOK = "writePointsOk"
|
|
|
|
statWriteBytes = "writeBytes"
|
|
|
|
statDiskBytes = "diskBytes"
|
2021-09-13 19:00:08 +00:00
|
|
|
measurementKey = "_name"
|
2020-04-22 20:19:20 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
var (
|
|
|
|
// ErrFieldOverflow is returned when too many fields are created on a measurement.
|
|
|
|
ErrFieldOverflow = errors.New("field overflow")
|
|
|
|
|
|
|
|
// ErrFieldTypeConflict is returned when a new field already exists with a different type.
|
|
|
|
ErrFieldTypeConflict = errors.New("field type conflict")
|
|
|
|
|
|
|
|
// ErrFieldNotFound is returned when a field cannot be found.
|
|
|
|
ErrFieldNotFound = errors.New("field not found")
|
|
|
|
|
|
|
|
// ErrFieldUnmappedID is returned when the system is presented, during decode, with a field ID
|
|
|
|
// there is no mapping for.
|
|
|
|
ErrFieldUnmappedID = errors.New("field ID not mapped")
|
|
|
|
|
|
|
|
// ErrEngineClosed is returned when a caller attempts indirectly to
|
|
|
|
// access the shard's underlying engine.
|
|
|
|
ErrEngineClosed = errors.New("engine is closed")
|
|
|
|
|
|
|
|
// ErrShardDisabled is returned when a the shard is not available for
|
|
|
|
// queries or writes.
|
|
|
|
ErrShardDisabled = errors.New("shard is disabled")
|
|
|
|
|
|
|
|
// ErrUnknownFieldsFormat is returned when the fields index file is not identifiable by
|
|
|
|
// the file's magic number.
|
|
|
|
ErrUnknownFieldsFormat = errors.New("unknown field index format")
|
|
|
|
|
|
|
|
// ErrUnknownFieldType is returned when the type of a field cannot be determined.
|
|
|
|
ErrUnknownFieldType = errors.New("unknown field type")
|
|
|
|
|
2020-11-11 18:54:21 +00:00
|
|
|
// ErrShardNotIdle is returned when an operation requiring the shard to be idle/cold is
|
2020-04-22 20:19:20 +00:00
|
|
|
// attempted on a hot shard.
|
|
|
|
ErrShardNotIdle = errors.New("shard not idle")
|
|
|
|
|
|
|
|
// fieldsIndexMagicNumber is the file magic number for the fields index file.
|
|
|
|
fieldsIndexMagicNumber = []byte{0, 6, 1, 3}
|
|
|
|
)
|
|
|
|
|
|
|
|
var (
|
|
|
|
// Static objects to prevent small allocs.
|
|
|
|
timeBytes = []byte("time")
|
|
|
|
)
|
|
|
|
|
|
|
|
// A ShardError implements the error interface, and contains extra
|
|
|
|
// context about the shard that generated the error.
|
|
|
|
type ShardError struct {
|
|
|
|
id uint64
|
|
|
|
Err error
|
|
|
|
}
|
|
|
|
|
|
|
|
// NewShardError returns a new ShardError.
|
|
|
|
func NewShardError(id uint64, err error) error {
|
|
|
|
if err == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return ShardError{id: id, Err: err}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Error returns the string representation of the error, to satisfy the error interface.
|
|
|
|
func (e ShardError) Error() string {
|
|
|
|
return fmt.Sprintf("[shard %d] %s", e.id, e.Err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// PartialWriteError indicates a write request could only write a portion of the
|
|
|
|
// requested values.
|
|
|
|
type PartialWriteError struct {
|
|
|
|
Reason string
|
|
|
|
Dropped int
|
|
|
|
|
|
|
|
// A sorted slice of series keys that were dropped.
|
|
|
|
DroppedKeys [][]byte
|
|
|
|
}
|
|
|
|
|
|
|
|
func (e PartialWriteError) Error() string {
|
|
|
|
return fmt.Sprintf("partial write: %s dropped=%d", e.Reason, e.Dropped)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Shard represents a self-contained time series database. An inverted index of
|
|
|
|
// the measurement and tag data is kept along with the raw time series data.
|
|
|
|
// Data can be split across many shards. The query engine in TSDB is responsible
|
|
|
|
// for combining the output of many shards into a single query result.
|
|
|
|
type Shard struct {
|
|
|
|
path string
|
|
|
|
walPath string
|
|
|
|
id uint64
|
|
|
|
|
|
|
|
database string
|
|
|
|
retentionPolicy string
|
|
|
|
|
|
|
|
sfile *SeriesFile
|
|
|
|
options EngineOptions
|
|
|
|
|
|
|
|
mu sync.RWMutex
|
|
|
|
_engine Engine
|
|
|
|
index Index
|
|
|
|
enabled bool
|
|
|
|
|
|
|
|
// expvar-based stats.
|
|
|
|
stats *ShardStatistics
|
|
|
|
defaultTags models.StatisticTags
|
|
|
|
|
|
|
|
baseLogger *zap.Logger
|
|
|
|
logger *zap.Logger
|
|
|
|
|
|
|
|
EnableOnOpen bool
|
|
|
|
|
|
|
|
// CompactionDisabled specifies the shard should not schedule compactions.
|
|
|
|
// This option is intended for offline tooling.
|
|
|
|
CompactionDisabled bool
|
|
|
|
}
|
|
|
|
|
|
|
|
// NewShard returns a new initialized Shard. walPath doesn't apply to the b1 type index
|
|
|
|
func NewShard(id uint64, path string, walPath string, sfile *SeriesFile, opt EngineOptions) *Shard {
|
|
|
|
db, rp := decodeStorePath(path)
|
|
|
|
logger := zap.NewNop()
|
|
|
|
|
|
|
|
s := &Shard{
|
|
|
|
id: id,
|
|
|
|
path: path,
|
|
|
|
walPath: walPath,
|
|
|
|
sfile: sfile,
|
|
|
|
options: opt,
|
|
|
|
|
|
|
|
stats: &ShardStatistics{},
|
|
|
|
defaultTags: models.StatisticTags{
|
|
|
|
"path": path,
|
|
|
|
"walPath": walPath,
|
|
|
|
"id": fmt.Sprintf("%d", id),
|
|
|
|
"database": db,
|
|
|
|
"retentionPolicy": rp,
|
|
|
|
"engine": opt.EngineVersion,
|
|
|
|
},
|
|
|
|
|
|
|
|
database: db,
|
|
|
|
retentionPolicy: rp,
|
|
|
|
|
|
|
|
logger: logger,
|
|
|
|
baseLogger: logger,
|
|
|
|
EnableOnOpen: true,
|
|
|
|
}
|
|
|
|
return s
|
|
|
|
}
|
|
|
|
|
|
|
|
// WithLogger sets the logger on the shard. It must be called before Open.
|
|
|
|
func (s *Shard) WithLogger(log *zap.Logger) {
|
|
|
|
s.baseLogger = log
|
|
|
|
engine, err := s.Engine()
|
|
|
|
if err == nil {
|
|
|
|
engine.WithLogger(s.baseLogger)
|
|
|
|
s.index.WithLogger(s.baseLogger)
|
|
|
|
}
|
|
|
|
s.logger = s.baseLogger.With(zap.String("service", "shard"))
|
|
|
|
}
|
|
|
|
|
|
|
|
// SetEnabled enables the shard for queries and write. When disabled, all
|
|
|
|
// writes and queries return an error and compactions are stopped for the shard.
|
|
|
|
func (s *Shard) SetEnabled(enabled bool) {
|
|
|
|
s.mu.Lock()
|
|
|
|
// Prevent writes and queries
|
|
|
|
s.enabled = enabled
|
|
|
|
if s._engine != nil && !s.CompactionDisabled {
|
|
|
|
// Disable background compactions and snapshotting
|
|
|
|
s._engine.SetEnabled(enabled)
|
|
|
|
}
|
|
|
|
s.mu.Unlock()
|
|
|
|
}
|
|
|
|
|
|
|
|
// ScheduleFullCompaction forces a full compaction to be schedule on the shard.
|
|
|
|
func (s *Shard) ScheduleFullCompaction() error {
|
|
|
|
engine, err := s.Engine()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
return engine.ScheduleFullCompaction()
|
|
|
|
}
|
|
|
|
|
|
|
|
// ID returns the shards ID.
|
|
|
|
func (s *Shard) ID() uint64 {
|
|
|
|
return s.id
|
|
|
|
}
|
|
|
|
|
|
|
|
// Database returns the database of the shard.
|
|
|
|
func (s *Shard) Database() string {
|
|
|
|
return s.database
|
|
|
|
}
|
|
|
|
|
|
|
|
// RetentionPolicy returns the retention policy of the shard.
|
|
|
|
func (s *Shard) RetentionPolicy() string {
|
|
|
|
return s.retentionPolicy
|
|
|
|
}
|
|
|
|
|
|
|
|
// ShardStatistics maintains statistics for a shard.
|
|
|
|
type ShardStatistics struct {
|
|
|
|
WriteReq int64
|
|
|
|
WriteReqOK int64
|
|
|
|
WriteReqErr int64
|
|
|
|
FieldsCreated int64
|
|
|
|
WritePointsErr int64
|
|
|
|
WritePointsDropped int64
|
|
|
|
WritePointsOK int64
|
|
|
|
BytesWritten int64
|
|
|
|
DiskBytes int64
|
|
|
|
}
|
|
|
|
|
|
|
|
// Statistics returns statistics for periodic monitoring.
|
|
|
|
func (s *Shard) Statistics(tags map[string]string) []models.Statistic {
|
|
|
|
engine, err := s.Engine()
|
|
|
|
if err != nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Refresh our disk size stat
|
|
|
|
if _, err := s.DiskSize(); err != nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
seriesN := engine.SeriesN()
|
|
|
|
|
|
|
|
tags = s.defaultTags.Merge(tags)
|
|
|
|
|
|
|
|
// Set the index type on the tags. N.B this needs to be checked since it's
|
|
|
|
// only set when the shard is opened.
|
|
|
|
if indexType := s.IndexType(); indexType != "" {
|
|
|
|
tags["indexType"] = indexType
|
|
|
|
}
|
|
|
|
|
|
|
|
statistics := []models.Statistic{{
|
|
|
|
Name: "shard",
|
|
|
|
Tags: tags,
|
|
|
|
Values: map[string]interface{}{
|
|
|
|
statWriteReq: atomic.LoadInt64(&s.stats.WriteReq),
|
|
|
|
statWriteReqOK: atomic.LoadInt64(&s.stats.WriteReqOK),
|
|
|
|
statWriteReqErr: atomic.LoadInt64(&s.stats.WriteReqErr),
|
|
|
|
statSeriesCreate: seriesN,
|
|
|
|
statFieldsCreate: atomic.LoadInt64(&s.stats.FieldsCreated),
|
|
|
|
statWritePointsErr: atomic.LoadInt64(&s.stats.WritePointsErr),
|
|
|
|
statWritePointsDropped: atomic.LoadInt64(&s.stats.WritePointsDropped),
|
|
|
|
statWritePointsOK: atomic.LoadInt64(&s.stats.WritePointsOK),
|
|
|
|
statWriteBytes: atomic.LoadInt64(&s.stats.BytesWritten),
|
|
|
|
statDiskBytes: atomic.LoadInt64(&s.stats.DiskBytes),
|
|
|
|
},
|
|
|
|
}}
|
|
|
|
|
|
|
|
// Add the index and engine statistics.
|
|
|
|
statistics = append(statistics, engine.Statistics(tags)...)
|
|
|
|
return statistics
|
|
|
|
}
|
|
|
|
|
|
|
|
// Path returns the path set on the shard when it was created.
|
|
|
|
func (s *Shard) Path() string { return s.path }
|
|
|
|
|
|
|
|
// Open initializes and opens the shard's store.
|
2021-06-09 15:03:53 +00:00
|
|
|
func (s *Shard) Open(ctx context.Context) error {
|
2020-04-22 20:19:20 +00:00
|
|
|
if err := func() error {
|
|
|
|
s.mu.Lock()
|
|
|
|
defer s.mu.Unlock()
|
|
|
|
|
|
|
|
// Return if the shard is already open
|
|
|
|
if s._engine != nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
seriesIDSet := NewSeriesIDSet()
|
|
|
|
|
|
|
|
// Initialize underlying index.
|
|
|
|
ipath := filepath.Join(s.path, "index")
|
|
|
|
idx, err := NewIndex(s.id, s.database, ipath, seriesIDSet, s.sfile, s.options)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
idx.WithLogger(s.baseLogger)
|
|
|
|
|
2020-12-23 15:46:57 +00:00
|
|
|
// Check if the index needs to be rebuilt before Open() initializes
|
|
|
|
// its file system layout.
|
|
|
|
var shouldReindex bool
|
|
|
|
if _, err := os.Stat(ipath); os.IsNotExist(err) {
|
|
|
|
shouldReindex = true
|
|
|
|
}
|
|
|
|
|
2020-04-22 20:19:20 +00:00
|
|
|
// Open index.
|
|
|
|
if err := idx.Open(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
s.index = idx
|
|
|
|
|
|
|
|
// Initialize underlying engine.
|
|
|
|
e, err := NewEngine(s.id, idx, s.path, s.walPath, s.sfile, s.options)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Set log output on the engine.
|
|
|
|
e.WithLogger(s.baseLogger)
|
|
|
|
|
|
|
|
// Disable compactions while loading the index
|
|
|
|
e.SetEnabled(false)
|
|
|
|
|
|
|
|
// Open engine.
|
2021-06-09 15:03:53 +00:00
|
|
|
if err := e.Open(ctx); err != nil {
|
2020-04-22 20:19:20 +00:00
|
|
|
return err
|
|
|
|
}
|
2020-12-23 15:46:57 +00:00
|
|
|
if shouldReindex {
|
|
|
|
if err := e.Reindex(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
2020-04-22 20:19:20 +00:00
|
|
|
|
|
|
|
if err := e.LoadMetadataIndex(s.id, s.index); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
s._engine = e
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}(); err != nil {
|
|
|
|
s.close()
|
|
|
|
return NewShardError(s.id, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if s.EnableOnOpen {
|
|
|
|
// enable writes, queries and compactions
|
|
|
|
s.SetEnabled(true)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Close shuts down the shard's store.
|
|
|
|
func (s *Shard) Close() error {
|
|
|
|
s.mu.Lock()
|
|
|
|
defer s.mu.Unlock()
|
|
|
|
return s.close()
|
|
|
|
}
|
|
|
|
|
|
|
|
// close closes the shard an removes reference to the shard from associated
|
|
|
|
// indexes, unless clean is false.
|
|
|
|
func (s *Shard) close() error {
|
|
|
|
if s._engine == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
err := s._engine.Close()
|
|
|
|
if err == nil {
|
|
|
|
s._engine = nil
|
|
|
|
}
|
|
|
|
|
|
|
|
if e := s.index.Close(); e == nil {
|
|
|
|
s.index = nil
|
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// IndexType returns the index version being used for this shard.
|
|
|
|
//
|
|
|
|
// IndexType returns the empty string if it is called before the shard is opened,
|
|
|
|
// since it is only that point that the underlying index type is known.
|
|
|
|
func (s *Shard) IndexType() string {
|
|
|
|
s.mu.RLock()
|
|
|
|
defer s.mu.RUnlock()
|
|
|
|
if s._engine == nil || s.index == nil { // Shard not open yet.
|
|
|
|
return ""
|
|
|
|
}
|
|
|
|
return s.index.Type()
|
|
|
|
}
|
|
|
|
|
|
|
|
// ready determines if the Shard is ready for queries or writes.
|
|
|
|
// It returns nil if ready, otherwise ErrShardClosed or ErrShardDisabled
|
|
|
|
func (s *Shard) ready() error {
|
|
|
|
var err error
|
|
|
|
if s._engine == nil {
|
|
|
|
err = ErrEngineClosed
|
|
|
|
} else if !s.enabled {
|
|
|
|
err = ErrShardDisabled
|
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// LastModified returns the time when this shard was last modified.
|
|
|
|
func (s *Shard) LastModified() time.Time {
|
|
|
|
engine, err := s.Engine()
|
|
|
|
if err != nil {
|
|
|
|
return time.Time{}
|
|
|
|
}
|
|
|
|
return engine.LastModified()
|
|
|
|
}
|
|
|
|
|
|
|
|
// Index returns a reference to the underlying index. It returns an error if
|
|
|
|
// the index is nil.
|
|
|
|
func (s *Shard) Index() (Index, error) {
|
|
|
|
s.mu.RLock()
|
|
|
|
defer s.mu.RUnlock()
|
|
|
|
if err := s.ready(); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return s.index, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// SeriesFile returns a reference the underlying series file. If return an error
|
|
|
|
// if the series file is nil.
|
|
|
|
func (s *Shard) SeriesFile() (*SeriesFile, error) {
|
|
|
|
s.mu.RLock()
|
|
|
|
defer s.mu.RUnlock()
|
|
|
|
if err := s.ready(); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return s.sfile, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// IsIdle return true if the shard is not receiving writes and is fully compacted.
|
2021-07-20 18:57:52 +00:00
|
|
|
func (s *Shard) IsIdle() (state bool, reason string) {
|
2020-04-22 20:19:20 +00:00
|
|
|
engine, err := s.Engine()
|
|
|
|
if err != nil {
|
2021-07-20 18:57:52 +00:00
|
|
|
return true, ""
|
2020-04-22 20:19:20 +00:00
|
|
|
}
|
|
|
|
return engine.IsIdle()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *Shard) Free() error {
|
|
|
|
engine, err := s.Engine()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Disable compactions to stop background goroutines
|
|
|
|
s.SetCompactionsEnabled(false)
|
|
|
|
|
|
|
|
return engine.Free()
|
|
|
|
}
|
|
|
|
|
|
|
|
// SetCompactionsEnabled enables or disable shard background compactions.
|
|
|
|
func (s *Shard) SetCompactionsEnabled(enabled bool) {
|
|
|
|
engine, err := s.Engine()
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
engine.SetCompactionsEnabled(enabled)
|
|
|
|
}
|
|
|
|
|
|
|
|
// DiskSize returns the size on disk of this shard.
|
|
|
|
func (s *Shard) DiskSize() (int64, error) {
|
|
|
|
s.mu.RLock()
|
|
|
|
defer s.mu.RUnlock()
|
2020-11-11 18:54:21 +00:00
|
|
|
// We don't use engine() because we still want to report the shard's disk
|
2020-04-22 20:19:20 +00:00
|
|
|
// size even if the shard has been disabled.
|
|
|
|
if s._engine == nil {
|
|
|
|
return 0, ErrEngineClosed
|
|
|
|
}
|
|
|
|
size := s._engine.DiskSize()
|
|
|
|
atomic.StoreInt64(&s.stats.DiskBytes, size)
|
|
|
|
return size, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// FieldCreate holds information for a field to create on a measurement.
|
|
|
|
type FieldCreate struct {
|
|
|
|
Measurement []byte
|
|
|
|
Field *Field
|
|
|
|
}
|
|
|
|
|
|
|
|
// WritePoints will write the raw data points and any new metadata to the index in the shard.
|
2021-06-09 15:03:53 +00:00
|
|
|
func (s *Shard) WritePoints(ctx context.Context, points []models.Point) error {
|
2020-04-22 20:19:20 +00:00
|
|
|
s.mu.RLock()
|
|
|
|
defer s.mu.RUnlock()
|
|
|
|
|
|
|
|
engine, err := s.engineNoLock()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
var writeError error
|
|
|
|
atomic.AddInt64(&s.stats.WriteReq, 1)
|
|
|
|
|
|
|
|
points, fieldsToCreate, err := s.validateSeriesAndFields(points)
|
|
|
|
if err != nil {
|
|
|
|
if _, ok := err.(PartialWriteError); !ok {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
// There was a partial write (points dropped), hold onto the error to return
|
|
|
|
// to the caller, but continue on writing the remaining points.
|
|
|
|
writeError = err
|
|
|
|
}
|
|
|
|
atomic.AddInt64(&s.stats.FieldsCreated, int64(len(fieldsToCreate)))
|
|
|
|
|
|
|
|
// add any new fields and keep track of what needs to be saved
|
|
|
|
if err := s.createFieldsAndMeasurements(fieldsToCreate); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Write to the engine.
|
2021-06-09 15:03:53 +00:00
|
|
|
if err := engine.WritePoints(ctx, points); err != nil {
|
2020-04-22 20:19:20 +00:00
|
|
|
atomic.AddInt64(&s.stats.WritePointsErr, int64(len(points)))
|
|
|
|
atomic.AddInt64(&s.stats.WriteReqErr, 1)
|
|
|
|
return fmt.Errorf("engine: %s", err)
|
|
|
|
}
|
|
|
|
atomic.AddInt64(&s.stats.WritePointsOK, int64(len(points)))
|
|
|
|
atomic.AddInt64(&s.stats.WriteReqOK, 1)
|
|
|
|
|
|
|
|
return writeError
|
|
|
|
}
|
|
|
|
|
|
|
|
// validateSeriesAndFields checks which series and fields are new and whose metadata should be saved and indexed.
|
|
|
|
func (s *Shard) validateSeriesAndFields(points []models.Point) ([]models.Point, []*FieldCreate, error) {
|
|
|
|
var (
|
|
|
|
fieldsToCreate []*FieldCreate
|
|
|
|
err error
|
|
|
|
dropped int
|
|
|
|
reason string // only first error reason is set unless returned from CreateSeriesListIfNotExists
|
|
|
|
)
|
|
|
|
|
|
|
|
// Create all series against the index in bulk.
|
|
|
|
keys := make([][]byte, len(points))
|
|
|
|
names := make([][]byte, len(points))
|
|
|
|
tagsSlice := make([]models.Tags, len(points))
|
|
|
|
|
|
|
|
// Check if keys should be unicode validated.
|
|
|
|
validateKeys := s.options.Config.ValidateKeys
|
|
|
|
|
|
|
|
var j int
|
|
|
|
for i, p := range points {
|
|
|
|
tags := p.Tags()
|
|
|
|
|
|
|
|
// Drop any series w/ a "time" tag, these are illegal
|
|
|
|
if v := tags.Get(timeBytes); v != nil {
|
|
|
|
dropped++
|
|
|
|
if reason == "" {
|
|
|
|
reason = fmt.Sprintf(
|
|
|
|
"invalid tag key: input tag \"%s\" on measurement \"%s\" is invalid",
|
|
|
|
"time", string(p.Name()))
|
|
|
|
}
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// Drop any series with invalid unicode characters in the key.
|
|
|
|
if validateKeys && !models.ValidKeyTokens(string(p.Name()), tags) {
|
|
|
|
dropped++
|
|
|
|
if reason == "" {
|
|
|
|
reason = fmt.Sprintf("key contains invalid unicode: \"%s\"", string(p.Key()))
|
|
|
|
}
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
keys[j] = p.Key()
|
|
|
|
names[j] = p.Name()
|
|
|
|
tagsSlice[j] = tags
|
|
|
|
points[j] = points[i]
|
|
|
|
j++
|
|
|
|
}
|
|
|
|
points, keys, names, tagsSlice = points[:j], keys[:j], names[:j], tagsSlice[:j]
|
|
|
|
|
|
|
|
engine, err := s.engineNoLock()
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Add new series. Check for partial writes.
|
|
|
|
var droppedKeys [][]byte
|
|
|
|
if err := engine.CreateSeriesListIfNotExists(keys, names, tagsSlice); err != nil {
|
|
|
|
switch err := err.(type) {
|
|
|
|
// TODO(jmw): why is this a *PartialWriteError when everything else is not a pointer?
|
|
|
|
// Maybe we can just change it to be consistent if we change it also in all
|
|
|
|
// the places that construct it.
|
|
|
|
case *PartialWriteError:
|
|
|
|
reason = err.Reason
|
|
|
|
dropped += err.Dropped
|
|
|
|
droppedKeys = err.DroppedKeys
|
|
|
|
atomic.AddInt64(&s.stats.WritePointsDropped, int64(err.Dropped))
|
|
|
|
default:
|
|
|
|
return nil, nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
j = 0
|
|
|
|
for i, p := range points {
|
|
|
|
// Skip any points with only invalid fields.
|
|
|
|
iter := p.FieldIterator()
|
|
|
|
validField := false
|
|
|
|
for iter.Next() {
|
|
|
|
if bytes.Equal(iter.FieldKey(), timeBytes) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
validField = true
|
|
|
|
break
|
|
|
|
}
|
|
|
|
if !validField {
|
|
|
|
if reason == "" {
|
|
|
|
reason = fmt.Sprintf(
|
|
|
|
"invalid field name: input field \"%s\" on measurement \"%s\" is invalid",
|
|
|
|
"time", string(p.Name()))
|
|
|
|
}
|
|
|
|
dropped++
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// Skip any points whos keys have been dropped. Dropped has already been incremented for them.
|
|
|
|
if len(droppedKeys) > 0 && bytesutil.Contains(droppedKeys, keys[i]) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
name := p.Name()
|
|
|
|
mf := engine.MeasurementFields(name)
|
|
|
|
|
|
|
|
// Check with the field validator.
|
2021-08-27 14:01:17 +00:00
|
|
|
if err := ValidateFields(mf, p, s.options.Config.SkipFieldSizeValidation); err != nil {
|
2020-04-22 20:19:20 +00:00
|
|
|
switch err := err.(type) {
|
|
|
|
case PartialWriteError:
|
|
|
|
if reason == "" {
|
|
|
|
reason = err.Reason
|
|
|
|
}
|
|
|
|
dropped += err.Dropped
|
|
|
|
atomic.AddInt64(&s.stats.WritePointsDropped, int64(err.Dropped))
|
|
|
|
default:
|
|
|
|
return nil, nil, err
|
|
|
|
}
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
points[j] = points[i]
|
|
|
|
j++
|
|
|
|
|
|
|
|
// Create any fields that are missing.
|
|
|
|
iter.Reset()
|
|
|
|
for iter.Next() {
|
|
|
|
fieldKey := iter.FieldKey()
|
|
|
|
|
|
|
|
// Skip fields named "time". They are illegal.
|
|
|
|
if bytes.Equal(fieldKey, timeBytes) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
if mf.FieldBytes(fieldKey) != nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
dataType := dataTypeFromModelsFieldType(iter.Type())
|
|
|
|
if dataType == influxql.Unknown {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
fieldsToCreate = append(fieldsToCreate, &FieldCreate{
|
|
|
|
Measurement: name,
|
|
|
|
Field: &Field{
|
|
|
|
Name: string(fieldKey),
|
|
|
|
Type: dataType,
|
|
|
|
},
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if dropped > 0 {
|
|
|
|
err = PartialWriteError{Reason: reason, Dropped: dropped}
|
|
|
|
}
|
|
|
|
|
|
|
|
return points[:j], fieldsToCreate, err
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *Shard) createFieldsAndMeasurements(fieldsToCreate []*FieldCreate) error {
|
|
|
|
if len(fieldsToCreate) == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
engine, err := s.engineNoLock()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// add fields
|
|
|
|
for _, f := range fieldsToCreate {
|
|
|
|
mf := engine.MeasurementFields(f.Measurement)
|
|
|
|
if err := mf.CreateFieldIfNotExists([]byte(f.Field.Name), f.Field.Type); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-04-15 18:08:28 +00:00
|
|
|
return engine.MeasurementFieldSet().Save()
|
2020-04-22 20:19:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// DeleteSeriesRange deletes all values from for seriesKeys between min and max (inclusive)
|
2021-06-09 15:03:53 +00:00
|
|
|
func (s *Shard) DeleteSeriesRange(ctx context.Context, itr SeriesIterator, min, max int64) error {
|
2020-04-22 20:19:20 +00:00
|
|
|
engine, err := s.Engine()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2021-06-09 15:03:53 +00:00
|
|
|
return engine.DeleteSeriesRange(ctx, itr, min, max)
|
2020-04-22 20:19:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// DeleteSeriesRangeWithPredicate deletes all values from for seriesKeys between min and max (inclusive)
|
|
|
|
// for which predicate() returns true. If predicate() is nil, then all values in range are deleted.
|
2021-06-09 15:03:53 +00:00
|
|
|
func (s *Shard) DeleteSeriesRangeWithPredicate(
|
|
|
|
ctx context.Context,
|
|
|
|
itr SeriesIterator,
|
|
|
|
predicate func(name []byte, tags models.Tags) (int64, int64, bool),
|
|
|
|
) error {
|
2020-04-22 20:19:20 +00:00
|
|
|
engine, err := s.Engine()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2021-06-09 15:03:53 +00:00
|
|
|
return engine.DeleteSeriesRangeWithPredicate(ctx, itr, predicate)
|
2020-04-22 20:19:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// DeleteMeasurement deletes a measurement and all underlying series.
|
2021-06-09 15:03:53 +00:00
|
|
|
func (s *Shard) DeleteMeasurement(ctx context.Context, name []byte) error {
|
2020-04-22 20:19:20 +00:00
|
|
|
engine, err := s.Engine()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2021-06-09 15:03:53 +00:00
|
|
|
return engine.DeleteMeasurement(ctx, name)
|
2020-04-22 20:19:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// SeriesN returns the unique number of series in the shard.
|
|
|
|
func (s *Shard) SeriesN() int64 {
|
|
|
|
engine, err := s.Engine()
|
|
|
|
if err != nil {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
return engine.SeriesN()
|
|
|
|
}
|
|
|
|
|
|
|
|
// SeriesSketches returns the measurement sketches for the shard.
|
|
|
|
func (s *Shard) SeriesSketches() (estimator.Sketch, estimator.Sketch, error) {
|
|
|
|
engine, err := s.Engine()
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, err
|
|
|
|
}
|
|
|
|
return engine.SeriesSketches()
|
|
|
|
}
|
|
|
|
|
|
|
|
// MeasurementsSketches returns the measurement sketches for the shard.
|
|
|
|
func (s *Shard) MeasurementsSketches() (estimator.Sketch, estimator.Sketch, error) {
|
|
|
|
engine, err := s.Engine()
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, err
|
|
|
|
}
|
|
|
|
return engine.MeasurementsSketches()
|
|
|
|
}
|
|
|
|
|
|
|
|
// MeasurementNamesByRegex returns names of measurements matching the regular expression.
|
|
|
|
func (s *Shard) MeasurementNamesByRegex(re *regexp.Regexp) ([][]byte, error) {
|
|
|
|
engine, err := s.Engine()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return engine.MeasurementNamesByRegex(re)
|
|
|
|
}
|
|
|
|
|
2020-09-17 19:28:24 +00:00
|
|
|
// MeasurementNamesByPredicate returns fields for a measurement filtered by an expression.
|
|
|
|
func (s *Shard) MeasurementNamesByPredicate(expr influxql.Expr) ([][]byte, error) {
|
|
|
|
index, err := s.Index()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
indexSet := IndexSet{Indexes: []Index{index}, SeriesFile: s.sfile}
|
|
|
|
return indexSet.MeasurementNamesByPredicate(query.OpenAuthorizer, expr)
|
|
|
|
}
|
|
|
|
|
2020-04-22 20:19:20 +00:00
|
|
|
// MeasurementFields returns fields for a measurement.
|
|
|
|
func (s *Shard) MeasurementFields(name []byte) *MeasurementFields {
|
|
|
|
engine, err := s.Engine()
|
|
|
|
if err != nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return engine.MeasurementFields(name)
|
|
|
|
}
|
|
|
|
|
|
|
|
// MeasurementExists returns true if the shard contains name.
|
|
|
|
// TODO(edd): This method is currently only being called from tests; do we
|
|
|
|
// really need it?
|
|
|
|
func (s *Shard) MeasurementExists(name []byte) (bool, error) {
|
|
|
|
engine, err := s.Engine()
|
|
|
|
if err != nil {
|
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
return engine.MeasurementExists(name)
|
|
|
|
}
|
|
|
|
|
|
|
|
// WriteTo writes the shard's data to w.
|
|
|
|
func (s *Shard) WriteTo(w io.Writer) (int64, error) {
|
|
|
|
engine, err := s.Engine()
|
|
|
|
if err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
n, err := engine.WriteTo(w)
|
|
|
|
atomic.AddInt64(&s.stats.BytesWritten, int64(n))
|
|
|
|
return n, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// CreateIterator returns an iterator for the data in the shard.
|
|
|
|
func (s *Shard) CreateIterator(ctx context.Context, m *influxql.Measurement, opt query.IteratorOptions) (query.Iterator, error) {
|
|
|
|
engine, err := s.Engine()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
switch m.SystemIterator {
|
|
|
|
case "_fieldKeys":
|
|
|
|
return NewFieldKeysIterator(s, opt)
|
|
|
|
case "_series":
|
|
|
|
// TODO(benbjohnson): Move up to the Shards.CreateIterator().
|
|
|
|
index, err := s.Index()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
indexSet := IndexSet{Indexes: []Index{index}, SeriesFile: s.sfile}
|
|
|
|
|
|
|
|
itr, err := NewSeriesPointIterator(indexSet, opt)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return query.NewInterruptIterator(itr, opt.InterruptCh), nil
|
|
|
|
case "_tagKeys":
|
|
|
|
return NewTagKeysIterator(s, opt)
|
|
|
|
}
|
|
|
|
return engine.CreateIterator(ctx, m.Name, opt)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *Shard) CreateSeriesCursor(ctx context.Context, req SeriesCursorRequest, cond influxql.Expr) (SeriesCursor, error) {
|
|
|
|
index, err := s.Index()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return newSeriesCursor(req, IndexSet{Indexes: []Index{index}, SeriesFile: s.sfile}, cond)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *Shard) CreateCursorIterator(ctx context.Context) (CursorIterator, error) {
|
|
|
|
engine, err := s.Engine()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return engine.CreateCursorIterator(ctx)
|
|
|
|
}
|
|
|
|
|
|
|
|
// FieldDimensions returns unique sets of fields and dimensions across a list of sources.
|
|
|
|
func (s *Shard) FieldDimensions(measurements []string) (fields map[string]influxql.DataType, dimensions map[string]struct{}, err error) {
|
|
|
|
engine, err := s.Engine()
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
fields = make(map[string]influxql.DataType)
|
|
|
|
dimensions = make(map[string]struct{})
|
|
|
|
|
|
|
|
index, err := s.Index()
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, err
|
|
|
|
}
|
|
|
|
for _, name := range measurements {
|
|
|
|
// Handle system sources.
|
|
|
|
if strings.HasPrefix(name, "_") {
|
|
|
|
var keys []string
|
|
|
|
switch name {
|
|
|
|
case "_fieldKeys":
|
|
|
|
keys = []string{"fieldKey", "fieldType"}
|
|
|
|
case "_series":
|
|
|
|
keys = []string{"key"}
|
|
|
|
case "_tagKeys":
|
|
|
|
keys = []string{"tagKey"}
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(keys) > 0 {
|
|
|
|
for _, k := range keys {
|
|
|
|
if fields[k].LessThan(influxql.String) {
|
|
|
|
fields[k] = influxql.String
|
|
|
|
}
|
|
|
|
}
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
// Unknown system source so default to looking for a measurement.
|
|
|
|
}
|
|
|
|
|
|
|
|
// Retrieve measurement.
|
|
|
|
if exists, err := engine.MeasurementExists([]byte(name)); err != nil {
|
|
|
|
return nil, nil, err
|
|
|
|
} else if !exists {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// Append fields and dimensions.
|
|
|
|
mf := engine.MeasurementFields([]byte(name))
|
|
|
|
if mf != nil {
|
|
|
|
for k, typ := range mf.FieldSet() {
|
|
|
|
if fields[k].LessThan(typ) {
|
|
|
|
fields[k] = typ
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
indexSet := IndexSet{Indexes: []Index{index}, SeriesFile: s.sfile}
|
|
|
|
if err := indexSet.ForEachMeasurementTagKey([]byte(name), func(key []byte) error {
|
|
|
|
dimensions[string(key)] = struct{}{}
|
|
|
|
return nil
|
|
|
|
}); err != nil {
|
|
|
|
return nil, nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return fields, dimensions, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// mapType returns the data type for the field within the measurement.
|
|
|
|
func (s *Shard) mapType(measurement, field string) (influxql.DataType, error) {
|
|
|
|
engine, err := s.engineNoLock()
|
|
|
|
if err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
|
|
|
|
switch field {
|
|
|
|
case "_name", "_tagKey", "_tagValue", "_seriesKey":
|
|
|
|
return influxql.String, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Process system measurements.
|
|
|
|
switch measurement {
|
|
|
|
case "_fieldKeys":
|
|
|
|
if field == "fieldKey" || field == "fieldType" {
|
|
|
|
return influxql.String, nil
|
|
|
|
}
|
|
|
|
return influxql.Unknown, nil
|
|
|
|
case "_series":
|
|
|
|
if field == "key" {
|
|
|
|
return influxql.String, nil
|
|
|
|
}
|
|
|
|
return influxql.Unknown, nil
|
|
|
|
case "_tagKeys":
|
|
|
|
if field == "tagKey" {
|
|
|
|
return influxql.String, nil
|
|
|
|
}
|
|
|
|
return influxql.Unknown, nil
|
|
|
|
}
|
|
|
|
// Unknown system source so default to looking for a measurement.
|
|
|
|
|
|
|
|
if exists, _ := engine.MeasurementExists([]byte(measurement)); !exists {
|
|
|
|
return influxql.Unknown, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
mf := engine.MeasurementFields([]byte(measurement))
|
|
|
|
if mf != nil {
|
|
|
|
f := mf.Field(field)
|
|
|
|
if f != nil {
|
|
|
|
return f.Type, nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if exists, _ := engine.HasTagKey([]byte(measurement), []byte(field)); exists {
|
|
|
|
return influxql.Tag, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
return influxql.Unknown, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// expandSources expands regex sources and removes duplicates.
|
|
|
|
// NOTE: sources must be normalized (db and rp set) before calling this function.
|
|
|
|
func (s *Shard) expandSources(sources influxql.Sources) (influxql.Sources, error) {
|
|
|
|
engine, err := s.engineNoLock()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Use a map as a set to prevent duplicates.
|
|
|
|
set := map[string]influxql.Source{}
|
|
|
|
|
|
|
|
// Iterate all sources, expanding regexes when they're found.
|
|
|
|
for _, source := range sources {
|
|
|
|
switch src := source.(type) {
|
|
|
|
case *influxql.Measurement:
|
|
|
|
// Add non-regex measurements directly to the set.
|
|
|
|
if src.Regex == nil {
|
|
|
|
set[src.String()] = src
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// Loop over matching measurements.
|
|
|
|
names, err := engine.MeasurementNamesByRegex(src.Regex.Val)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, name := range names {
|
|
|
|
other := &influxql.Measurement{
|
|
|
|
Database: src.Database,
|
|
|
|
RetentionPolicy: src.RetentionPolicy,
|
|
|
|
Name: string(name),
|
|
|
|
}
|
|
|
|
set[other.String()] = other
|
|
|
|
}
|
|
|
|
|
|
|
|
default:
|
|
|
|
return nil, fmt.Errorf("expandSources: unsupported source type: %T", source)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Convert set to sorted slice.
|
|
|
|
names := make([]string, 0, len(set))
|
|
|
|
for name := range set {
|
|
|
|
names = append(names, name)
|
|
|
|
}
|
|
|
|
sort.Strings(names)
|
|
|
|
|
|
|
|
// Convert set to a list of Sources.
|
|
|
|
expanded := make(influxql.Sources, 0, len(set))
|
|
|
|
for _, name := range names {
|
|
|
|
expanded = append(expanded, set[name])
|
|
|
|
}
|
|
|
|
|
|
|
|
return expanded, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Backup backs up the shard by creating a tar archive of all TSM files that
|
|
|
|
// have been modified since the provided time. See Engine.Backup for more details.
|
|
|
|
func (s *Shard) Backup(w io.Writer, basePath string, since time.Time) error {
|
|
|
|
engine, err := s.Engine()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
return engine.Backup(w, basePath, since)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *Shard) Export(w io.Writer, basePath string, start time.Time, end time.Time) error {
|
|
|
|
engine, err := s.Engine()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
return engine.Export(w, basePath, start, end)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Restore restores data to the underlying engine for the shard.
|
|
|
|
// The shard is reopened after restore.
|
2021-06-09 15:03:53 +00:00
|
|
|
func (s *Shard) Restore(ctx context.Context, r io.Reader, basePath string) error {
|
2020-04-22 20:19:20 +00:00
|
|
|
if err := func() error {
|
|
|
|
s.mu.Lock()
|
|
|
|
defer s.mu.Unlock()
|
|
|
|
|
|
|
|
// Special case - we can still restore to a disabled shard, so we should
|
|
|
|
// only check if the engine is closed and not care if the shard is
|
|
|
|
// disabled.
|
|
|
|
if s._engine == nil {
|
|
|
|
return ErrEngineClosed
|
|
|
|
}
|
|
|
|
|
|
|
|
// Restore to engine.
|
|
|
|
return s._engine.Restore(r, basePath)
|
|
|
|
}(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Close shard.
|
|
|
|
if err := s.Close(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Reopen engine.
|
2021-06-09 15:03:53 +00:00
|
|
|
return s.Open(ctx)
|
2020-04-22 20:19:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Import imports data to the underlying engine for the shard. r should
|
|
|
|
// be a reader from a backup created by Backup.
|
|
|
|
func (s *Shard) Import(r io.Reader, basePath string) error {
|
|
|
|
// Special case - we can still import to a disabled shard, so we should
|
|
|
|
// only check if the engine is closed and not care if the shard is
|
|
|
|
// disabled.
|
|
|
|
s.mu.Lock()
|
|
|
|
defer s.mu.Unlock()
|
|
|
|
if s._engine == nil {
|
|
|
|
return ErrEngineClosed
|
|
|
|
}
|
|
|
|
|
|
|
|
// Import to engine.
|
|
|
|
return s._engine.Import(r, basePath)
|
|
|
|
}
|
|
|
|
|
|
|
|
// CreateSnapshot will return a path to a temp directory
|
|
|
|
// containing hard links to the underlying shard files.
|
2021-01-19 03:02:26 +00:00
|
|
|
func (s *Shard) CreateSnapshot(skipCacheOk bool) (string, error) {
|
2020-04-22 20:19:20 +00:00
|
|
|
engine, err := s.Engine()
|
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
2021-01-19 03:02:26 +00:00
|
|
|
return engine.CreateSnapshot(skipCacheOk)
|
2020-04-22 20:19:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// ForEachMeasurementName iterates over each measurement in the shard.
|
|
|
|
func (s *Shard) ForEachMeasurementName(fn func(name []byte) error) error {
|
|
|
|
engine, err := s.Engine()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
return engine.ForEachMeasurementName(fn)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *Shard) TagKeyCardinality(name, key []byte) int {
|
|
|
|
engine, err := s.Engine()
|
|
|
|
if err != nil {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
return engine.TagKeyCardinality(name, key)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Digest returns a digest of the shard.
|
2021-07-20 18:57:52 +00:00
|
|
|
func (s *Shard) Digest() (io.ReadCloser, int64, string, error) {
|
2020-04-22 20:19:20 +00:00
|
|
|
engine, err := s.Engine()
|
|
|
|
if err != nil {
|
2021-07-20 18:57:52 +00:00
|
|
|
return nil, 0, "", err
|
2020-04-22 20:19:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Make sure the shard is idle/cold. (No use creating a digest of a
|
|
|
|
// hot shard that is rapidly changing.)
|
2021-07-20 18:57:52 +00:00
|
|
|
if isIdle, reason := engine.IsIdle(); !isIdle {
|
|
|
|
return nil, 0, reason, ErrShardNotIdle
|
2020-04-22 20:19:20 +00:00
|
|
|
}
|
|
|
|
|
2021-07-20 18:57:52 +00:00
|
|
|
readCloser, size, err := engine.Digest()
|
|
|
|
return readCloser, size, "", err
|
2020-04-22 20:19:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// engine safely (under an RLock) returns a reference to the shard's Engine, or
|
|
|
|
// an error if the Engine is closed, or the shard is currently disabled.
|
|
|
|
//
|
|
|
|
// The shard's Engine should always be accessed via a call to engine(), rather
|
|
|
|
// than directly referencing Shard.engine.
|
|
|
|
//
|
|
|
|
// If a caller needs an Engine reference but is already under a lock, then they
|
|
|
|
// should use engineNoLock().
|
|
|
|
func (s *Shard) Engine() (Engine, error) {
|
|
|
|
s.mu.RLock()
|
|
|
|
defer s.mu.RUnlock()
|
|
|
|
return s.engineNoLock()
|
|
|
|
}
|
|
|
|
|
|
|
|
// engineNoLock is similar to calling engine(), but the caller must guarantee
|
|
|
|
// that they already hold an appropriate lock.
|
|
|
|
func (s *Shard) engineNoLock() (Engine, error) {
|
|
|
|
if err := s.ready(); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return s._engine, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
type ShardGroup interface {
|
|
|
|
MeasurementsByRegex(re *regexp.Regexp) []string
|
|
|
|
FieldKeysByMeasurement(name []byte) []string
|
|
|
|
FieldDimensions(measurements []string) (fields map[string]influxql.DataType, dimensions map[string]struct{}, err error)
|
|
|
|
MapType(measurement, field string) influxql.DataType
|
|
|
|
CreateIterator(ctx context.Context, measurement *influxql.Measurement, opt query.IteratorOptions) (query.Iterator, error)
|
2021-06-09 15:03:53 +00:00
|
|
|
IteratorCost(ctx context.Context, measurement string, opt query.IteratorOptions) (query.IteratorCost, error)
|
2020-04-22 20:19:20 +00:00
|
|
|
ExpandSources(sources influxql.Sources) (influxql.Sources, error)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Shards represents a sortable list of shards.
|
|
|
|
type Shards []*Shard
|
|
|
|
|
|
|
|
// Len implements sort.Interface.
|
|
|
|
func (a Shards) Len() int { return len(a) }
|
|
|
|
|
|
|
|
// Less implements sort.Interface.
|
|
|
|
func (a Shards) Less(i, j int) bool { return a[i].id < a[j].id }
|
|
|
|
|
|
|
|
// Swap implements sort.Interface.
|
|
|
|
func (a Shards) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
|
|
|
|
|
|
|
// MeasurementsByRegex returns the unique set of measurements matching the
|
|
|
|
// provided regex, for all the shards.
|
|
|
|
func (a Shards) MeasurementsByRegex(re *regexp.Regexp) []string {
|
|
|
|
var m map[string]struct{}
|
|
|
|
for _, sh := range a {
|
|
|
|
names, err := sh.MeasurementNamesByRegex(re)
|
|
|
|
if err != nil {
|
|
|
|
continue // Skip this shard's results—previous behaviour.
|
|
|
|
}
|
|
|
|
|
|
|
|
if m == nil {
|
|
|
|
m = make(map[string]struct{}, len(names))
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, name := range names {
|
|
|
|
m[string(name)] = struct{}{}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(m) == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
names := make([]string, 0, len(m))
|
|
|
|
for key := range m {
|
|
|
|
names = append(names, key)
|
|
|
|
}
|
|
|
|
sort.Strings(names)
|
|
|
|
return names
|
|
|
|
}
|
|
|
|
|
|
|
|
// FieldKeysByMeasurement returns a de-duplicated, sorted, set of field keys for
|
|
|
|
// the provided measurement name.
|
|
|
|
func (a Shards) FieldKeysByMeasurement(name []byte) []string {
|
|
|
|
if len(a) == 1 {
|
|
|
|
mf := a[0].MeasurementFields(name)
|
|
|
|
if mf == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return mf.FieldKeys()
|
|
|
|
}
|
|
|
|
|
|
|
|
all := make([][]string, 0, len(a))
|
|
|
|
for _, shard := range a {
|
|
|
|
mf := shard.MeasurementFields(name)
|
|
|
|
if mf == nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
all = append(all, mf.FieldKeys())
|
|
|
|
}
|
|
|
|
return slices.MergeSortedStrings(all...)
|
|
|
|
}
|
|
|
|
|
2020-09-17 19:28:24 +00:00
|
|
|
// MeasurementNamesByPredicate returns the measurements that match the given predicate.
|
|
|
|
func (a Shards) MeasurementNamesByPredicate(expr influxql.Expr) ([][]byte, error) {
|
|
|
|
if len(a) == 1 {
|
|
|
|
return a[0].MeasurementNamesByPredicate(expr)
|
|
|
|
}
|
|
|
|
|
|
|
|
all := make([][][]byte, len(a))
|
|
|
|
for i, shard := range a {
|
|
|
|
names, err := shard.MeasurementNamesByPredicate(expr)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
all[i] = names
|
|
|
|
}
|
|
|
|
return slices.MergeSortedBytes(all...), nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// FieldKeysByPredicate returns the field keys for series that match
|
|
|
|
// the given predicate.
|
|
|
|
func (a Shards) FieldKeysByPredicate(expr influxql.Expr) (map[string][]string, error) {
|
2021-09-13 19:00:08 +00:00
|
|
|
names, ok := measurementOptimization(expr, measurementKey)
|
|
|
|
if !ok {
|
|
|
|
var err error
|
|
|
|
if names, err = a.MeasurementNamesByPredicate(expr); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2020-09-17 19:28:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
all := make(map[string][]string, len(names))
|
|
|
|
for _, name := range names {
|
|
|
|
all[string(name)] = a.FieldKeysByMeasurement(name)
|
|
|
|
}
|
|
|
|
return all, nil
|
|
|
|
}
|
|
|
|
|
2021-09-13 19:00:08 +00:00
|
|
|
// consecutiveAndChildren finds all child nodes of consecutive
|
|
|
|
// influxql.BinaryExpr with AND operator nodes ("AND nodes") which are not
|
|
|
|
// themselves AND nodes. This may be the root of the tree if the root of the
|
|
|
|
// tree is not an AND node.
|
|
|
|
type consecutiveAndChildren struct {
|
|
|
|
children []influxql.Node
|
|
|
|
}
|
|
|
|
|
|
|
|
func (v *consecutiveAndChildren) Visit(node influxql.Node) influxql.Visitor {
|
|
|
|
switch n := node.(type) {
|
|
|
|
case *influxql.BinaryExpr:
|
|
|
|
if n.Op == influxql.AND {
|
|
|
|
return v
|
|
|
|
}
|
|
|
|
case *influxql.ParenExpr:
|
|
|
|
// Parens are essentially a no-op and can be traversed through.
|
|
|
|
return v
|
|
|
|
}
|
|
|
|
|
|
|
|
// If this wasn't a BinaryExpr with an AND operator or a Paren, record this
|
|
|
|
// child node and stop the search for this branch.
|
|
|
|
v.children = append(v.children, node)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// orMeasurementTree determines if a tree (or subtree) represents a grouping of
|
|
|
|
// exclusively measurement names OR'd together with EQ operators for the
|
|
|
|
// measurements themselves. It collects the list of measurement names
|
|
|
|
// encountered and records the validity of the tree.
|
|
|
|
type orMeasurementTree struct {
|
|
|
|
measurementKey string
|
|
|
|
measurementNames []string
|
|
|
|
valid bool
|
|
|
|
}
|
|
|
|
|
|
|
|
func (v *orMeasurementTree) Visit(node influxql.Node) influxql.Visitor {
|
|
|
|
// Return early if this tree has already been invalidated - no reason to
|
|
|
|
// continue evaluating at that point.
|
|
|
|
if !v.valid {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
switch n := node.(type) {
|
|
|
|
case *influxql.BinaryExpr:
|
|
|
|
// A BinaryExpr must have an operation of OR or EQ in a valid tree
|
|
|
|
if n.Op == influxql.OR {
|
|
|
|
return v
|
|
|
|
} else if n.Op == influxql.EQ {
|
|
|
|
// An EQ must be in the form of "v.measurementKey == measurementName" in a
|
|
|
|
// valid tree
|
|
|
|
if name, ok := measurementNameFromEqBinary(n, v.measurementKey); ok {
|
|
|
|
v.measurementNames = append(v.measurementNames, name)
|
|
|
|
// If a valid measurement key/value was found, there is no need to
|
|
|
|
// continue evaluating the VarRef/StringLiteral child nodes of this
|
|
|
|
// node.
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
case *influxql.ParenExpr:
|
|
|
|
// Parens are essentially a no-op and can be traversed through.
|
|
|
|
return v
|
|
|
|
}
|
|
|
|
|
|
|
|
// The the type switch didn't already return, this tree is invalid.
|
|
|
|
v.valid = false
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func measurementOptimization(expr influxql.Expr, key string) ([][]byte, bool) {
|
|
|
|
// A measurement optimization is possible if the query contains a single group
|
|
|
|
// of one or more measurements (in the form of _measurement = measName,
|
|
|
|
// equality operator only) grouped together by OR operators, with the subtree
|
|
|
|
// containing the OR'd measurements accessible from root of the tree either
|
|
|
|
// directly (tree contains nothing but OR'd measurements) or by traversing AND
|
|
|
|
// binary expression nodes.
|
|
|
|
|
|
|
|
// Get a list of "candidate" measurement subtrees.
|
|
|
|
v := consecutiveAndChildren{}
|
|
|
|
influxql.Walk(&v, expr)
|
|
|
|
possibleSubtrees := v.children
|
|
|
|
|
|
|
|
// Evaluate the candidate subtrees to determine which measurement names they
|
|
|
|
// contain, and to see if they are valid for the optimization.
|
|
|
|
validSubtrees := []orMeasurementTree{}
|
|
|
|
for _, h := range possibleSubtrees {
|
|
|
|
t := orMeasurementTree{
|
|
|
|
measurementKey: key,
|
|
|
|
valid: true,
|
|
|
|
}
|
|
|
|
influxql.Walk(&t, h)
|
|
|
|
if t.valid {
|
|
|
|
validSubtrees = append(validSubtrees, t)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// There must be exactly one valid measurement subtree for this optimization
|
|
|
|
// to be applied. Note: It may also be possible to have measurements in
|
|
|
|
// multiple subtrees, as long as there are no measurements in invalid
|
|
|
|
// subtrees, by determining an intersection of the measurement names across
|
|
|
|
// all valid subtrees - this is not currently implemented.
|
|
|
|
if len(validSubtrees) != 1 {
|
|
|
|
return nil, false
|
|
|
|
}
|
|
|
|
|
|
|
|
return slices.StringsToBytes(validSubtrees[0].measurementNames...), true
|
|
|
|
}
|
|
|
|
|
|
|
|
// measurementNameFromEqBinary returns the name of a measurement from a binary
|
|
|
|
// expression if possible, and a boolean status indicating if the binary
|
|
|
|
// expression contained a measurement name. A meausurement name will only be
|
|
|
|
// returned if the operator for the binary is EQ, and the measurement key is on
|
|
|
|
// the LHS with the measurement name on the RHS.
|
|
|
|
func measurementNameFromEqBinary(be *influxql.BinaryExpr, key string) (string, bool) {
|
|
|
|
lhs, ok := be.LHS.(*influxql.VarRef)
|
|
|
|
if !ok {
|
|
|
|
return "", false
|
|
|
|
} else if lhs.Val != key {
|
|
|
|
return "", false
|
|
|
|
}
|
|
|
|
|
|
|
|
rhs, ok := be.RHS.(*influxql.StringLiteral)
|
|
|
|
if !ok {
|
|
|
|
return "", false
|
|
|
|
}
|
|
|
|
|
|
|
|
return rhs.Val, true
|
|
|
|
}
|
|
|
|
|
2020-04-22 20:19:20 +00:00
|
|
|
func (a Shards) FieldDimensions(measurements []string) (fields map[string]influxql.DataType, dimensions map[string]struct{}, err error) {
|
|
|
|
fields = make(map[string]influxql.DataType)
|
|
|
|
dimensions = make(map[string]struct{})
|
|
|
|
|
|
|
|
for _, sh := range a {
|
|
|
|
f, d, err := sh.FieldDimensions(measurements)
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, err
|
|
|
|
}
|
|
|
|
for k, typ := range f {
|
|
|
|
if fields[k].LessThan(typ) {
|
|
|
|
fields[k] = typ
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for k := range d {
|
|
|
|
dimensions[k] = struct{}{}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
func (a Shards) MapType(measurement, field string) influxql.DataType {
|
|
|
|
var typ influxql.DataType
|
|
|
|
for _, sh := range a {
|
|
|
|
sh.mu.RLock()
|
|
|
|
if t, err := sh.mapType(measurement, field); err == nil && typ.LessThan(t) {
|
|
|
|
typ = t
|
|
|
|
}
|
|
|
|
sh.mu.RUnlock()
|
|
|
|
}
|
|
|
|
return typ
|
|
|
|
}
|
|
|
|
|
|
|
|
func (a Shards) CallType(name string, args []influxql.DataType) (influxql.DataType, error) {
|
|
|
|
typmap := query.CallTypeMapper{}
|
|
|
|
return typmap.CallType(name, args)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (a Shards) CreateIterator(ctx context.Context, measurement *influxql.Measurement, opt query.IteratorOptions) (query.Iterator, error) {
|
|
|
|
switch measurement.SystemIterator {
|
|
|
|
case "_series":
|
|
|
|
return a.createSeriesIterator(ctx, opt)
|
|
|
|
}
|
|
|
|
|
|
|
|
itrs := make([]query.Iterator, 0, len(a))
|
|
|
|
for _, sh := range a {
|
|
|
|
itr, err := sh.CreateIterator(ctx, measurement, opt)
|
|
|
|
if err != nil {
|
|
|
|
query.Iterators(itrs).Close()
|
|
|
|
return nil, err
|
|
|
|
} else if itr == nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
itrs = append(itrs, itr)
|
|
|
|
|
|
|
|
select {
|
|
|
|
case <-opt.InterruptCh:
|
|
|
|
query.Iterators(itrs).Close()
|
|
|
|
return nil, query.ErrQueryInterrupted
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
|
|
|
|
// Enforce series limit at creation time.
|
|
|
|
if opt.MaxSeriesN > 0 {
|
|
|
|
stats := itr.Stats()
|
|
|
|
if stats.SeriesN > opt.MaxSeriesN {
|
|
|
|
query.Iterators(itrs).Close()
|
|
|
|
return nil, fmt.Errorf("max-select-series limit exceeded: (%d/%d)", stats.SeriesN, opt.MaxSeriesN)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return query.Iterators(itrs).Merge(opt)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (a Shards) createSeriesIterator(ctx context.Context, opt query.IteratorOptions) (_ query.Iterator, err error) {
|
|
|
|
var (
|
|
|
|
idxs = make([]Index, 0, len(a))
|
|
|
|
sfile *SeriesFile
|
|
|
|
)
|
|
|
|
for _, sh := range a {
|
|
|
|
var idx Index
|
|
|
|
if idx, err = sh.Index(); err == nil {
|
|
|
|
idxs = append(idxs, idx)
|
|
|
|
}
|
|
|
|
if sfile == nil {
|
|
|
|
sfile, _ = sh.SeriesFile()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if sfile == nil {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
return NewSeriesPointIterator(IndexSet{Indexes: idxs, SeriesFile: sfile}, opt)
|
|
|
|
}
|
|
|
|
|
2021-06-09 15:03:53 +00:00
|
|
|
func (a Shards) IteratorCost(ctx context.Context, measurement string, opt query.IteratorOptions) (query.IteratorCost, error) {
|
2020-04-22 20:19:20 +00:00
|
|
|
var costs query.IteratorCost
|
|
|
|
var costerr error
|
|
|
|
var mu sync.RWMutex
|
|
|
|
|
|
|
|
setErr := func(err error) {
|
|
|
|
mu.Lock()
|
|
|
|
defer mu.Unlock()
|
|
|
|
if costerr == nil {
|
|
|
|
costerr = err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
limit := limiter.NewFixed(runtime.GOMAXPROCS(0))
|
|
|
|
var wg sync.WaitGroup
|
|
|
|
for _, sh := range a {
|
2021-06-09 15:03:53 +00:00
|
|
|
costerr = limit.Take(ctx)
|
2020-04-22 20:19:20 +00:00
|
|
|
wg.Add(1)
|
|
|
|
|
|
|
|
mu.RLock()
|
|
|
|
if costerr != nil {
|
2021-06-09 15:03:53 +00:00
|
|
|
limit.Release()
|
2020-04-22 20:19:20 +00:00
|
|
|
mu.RUnlock()
|
|
|
|
break
|
|
|
|
}
|
|
|
|
mu.RUnlock()
|
|
|
|
|
|
|
|
go func(sh *Shard) {
|
|
|
|
defer limit.Release()
|
|
|
|
defer wg.Done()
|
|
|
|
|
|
|
|
engine, err := sh.Engine()
|
|
|
|
if err != nil {
|
|
|
|
setErr(err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
cost, err := engine.IteratorCost(measurement, opt)
|
|
|
|
if err != nil {
|
|
|
|
setErr(err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
mu.Lock()
|
|
|
|
costs = costs.Combine(cost)
|
|
|
|
mu.Unlock()
|
|
|
|
}(sh)
|
|
|
|
}
|
|
|
|
wg.Wait()
|
|
|
|
return costs, costerr
|
|
|
|
}
|
|
|
|
|
|
|
|
func (a Shards) CreateSeriesCursor(ctx context.Context, req SeriesCursorRequest, cond influxql.Expr) (_ SeriesCursor, err error) {
|
|
|
|
var (
|
|
|
|
idxs []Index
|
|
|
|
sfile *SeriesFile
|
|
|
|
)
|
|
|
|
for _, sh := range a {
|
|
|
|
var idx Index
|
|
|
|
if idx, err = sh.Index(); err == nil {
|
|
|
|
idxs = append(idxs, idx)
|
|
|
|
}
|
|
|
|
if sfile == nil {
|
|
|
|
sfile, _ = sh.SeriesFile()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if sfile == nil {
|
|
|
|
return nil, errors.New("CreateSeriesCursor: no series file")
|
|
|
|
}
|
|
|
|
|
|
|
|
return newSeriesCursor(req, IndexSet{Indexes: idxs, SeriesFile: sfile}, cond)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (a Shards) ExpandSources(sources influxql.Sources) (influxql.Sources, error) {
|
|
|
|
// Use a map as a set to prevent duplicates.
|
|
|
|
set := map[string]influxql.Source{}
|
|
|
|
|
|
|
|
// Iterate through every shard and expand the sources.
|
|
|
|
for _, sh := range a {
|
|
|
|
sh.mu.RLock()
|
|
|
|
expanded, err := sh.expandSources(sources)
|
|
|
|
sh.mu.RUnlock()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, src := range expanded {
|
|
|
|
switch src := src.(type) {
|
|
|
|
case *influxql.Measurement:
|
|
|
|
set[src.String()] = src
|
|
|
|
default:
|
|
|
|
return nil, fmt.Errorf("Store.ExpandSources: unsupported source type: %T", src)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Convert set to sorted slice.
|
|
|
|
names := make([]string, 0, len(set))
|
|
|
|
for name := range set {
|
|
|
|
names = append(names, name)
|
|
|
|
}
|
|
|
|
sort.Strings(names)
|
|
|
|
|
|
|
|
// Convert set to a list of Sources.
|
|
|
|
sorted := make([]influxql.Source, 0, len(set))
|
|
|
|
for _, name := range names {
|
|
|
|
sorted = append(sorted, set[name])
|
|
|
|
}
|
|
|
|
return sorted, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// MeasurementFields holds the fields of a measurement and their codec.
|
|
|
|
type MeasurementFields struct {
|
|
|
|
mu sync.Mutex
|
|
|
|
|
|
|
|
fields atomic.Value // map[string]*Field
|
|
|
|
}
|
|
|
|
|
|
|
|
// NewMeasurementFields returns an initialised *MeasurementFields value.
|
|
|
|
func NewMeasurementFields() *MeasurementFields {
|
|
|
|
fields := make(map[string]*Field)
|
|
|
|
mf := &MeasurementFields{}
|
|
|
|
mf.fields.Store(fields)
|
|
|
|
return mf
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *MeasurementFields) FieldKeys() []string {
|
|
|
|
fields := m.fields.Load().(map[string]*Field)
|
|
|
|
a := make([]string, 0, len(fields))
|
|
|
|
for key := range fields {
|
|
|
|
a = append(a, key)
|
|
|
|
}
|
|
|
|
sort.Strings(a)
|
|
|
|
return a
|
|
|
|
}
|
|
|
|
|
|
|
|
// bytes estimates the memory footprint of this MeasurementFields, in bytes.
|
|
|
|
func (m *MeasurementFields) bytes() int {
|
|
|
|
var b int
|
|
|
|
b += 24 // mu RWMutex is 24 bytes
|
|
|
|
fields := m.fields.Load().(map[string]*Field)
|
|
|
|
b += int(unsafe.Sizeof(fields))
|
|
|
|
for k, v := range fields {
|
|
|
|
b += int(unsafe.Sizeof(k)) + len(k)
|
|
|
|
b += int(unsafe.Sizeof(v)+unsafe.Sizeof(*v)) + len(v.Name)
|
|
|
|
}
|
|
|
|
return b
|
|
|
|
}
|
|
|
|
|
|
|
|
// CreateFieldIfNotExists creates a new field with an autoincrementing ID.
|
|
|
|
// Returns an error if 255 fields have already been created on the measurement or
|
|
|
|
// the fields already exists with a different type.
|
|
|
|
func (m *MeasurementFields) CreateFieldIfNotExists(name []byte, typ influxql.DataType) error {
|
|
|
|
fields := m.fields.Load().(map[string]*Field)
|
|
|
|
|
|
|
|
// Ignore if the field already exists.
|
|
|
|
if f := fields[string(name)]; f != nil {
|
|
|
|
if f.Type != typ {
|
|
|
|
return ErrFieldTypeConflict
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
m.mu.Lock()
|
|
|
|
defer m.mu.Unlock()
|
|
|
|
|
|
|
|
fields = m.fields.Load().(map[string]*Field)
|
|
|
|
// Re-check field and type under write lock.
|
|
|
|
if f := fields[string(name)]; f != nil {
|
|
|
|
if f.Type != typ {
|
|
|
|
return ErrFieldTypeConflict
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
fieldsUpdate := make(map[string]*Field, len(fields)+1)
|
|
|
|
for k, v := range fields {
|
|
|
|
fieldsUpdate[k] = v
|
|
|
|
}
|
|
|
|
// Create and append a new field.
|
|
|
|
f := &Field{
|
|
|
|
ID: uint8(len(fields) + 1),
|
|
|
|
Name: string(name),
|
|
|
|
Type: typ,
|
|
|
|
}
|
|
|
|
fieldsUpdate[string(name)] = f
|
|
|
|
m.fields.Store(fieldsUpdate)
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *MeasurementFields) FieldN() int {
|
|
|
|
n := len(m.fields.Load().(map[string]*Field))
|
|
|
|
return n
|
|
|
|
}
|
|
|
|
|
|
|
|
// Field returns the field for name, or nil if there is no field for name.
|
|
|
|
func (m *MeasurementFields) Field(name string) *Field {
|
|
|
|
f := m.fields.Load().(map[string]*Field)[name]
|
|
|
|
return f
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *MeasurementFields) HasField(name string) bool {
|
|
|
|
if m == nil {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
f := m.fields.Load().(map[string]*Field)[name]
|
|
|
|
return f != nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// FieldBytes returns the field for name, or nil if there is no field for name.
|
|
|
|
// FieldBytes should be preferred to Field when the caller has a []byte, because
|
|
|
|
// it avoids a string allocation, which can't be avoided if the caller converts
|
|
|
|
// the []byte to a string and calls Field.
|
|
|
|
func (m *MeasurementFields) FieldBytes(name []byte) *Field {
|
|
|
|
f := m.fields.Load().(map[string]*Field)[string(name)]
|
|
|
|
return f
|
|
|
|
}
|
|
|
|
|
|
|
|
// FieldSet returns the set of fields and their types for the measurement.
|
|
|
|
func (m *MeasurementFields) FieldSet() map[string]influxql.DataType {
|
|
|
|
fields := m.fields.Load().(map[string]*Field)
|
|
|
|
fieldTypes := make(map[string]influxql.DataType)
|
|
|
|
for name, f := range fields {
|
|
|
|
fieldTypes[name] = f.Type
|
|
|
|
}
|
|
|
|
return fieldTypes
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *MeasurementFields) ForEachField(fn func(name string, typ influxql.DataType) bool) {
|
|
|
|
fields := m.fields.Load().(map[string]*Field)
|
|
|
|
for name, f := range fields {
|
|
|
|
if !fn(name, f.Type) {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// MeasurementFieldSet represents a collection of fields by measurement.
|
|
|
|
// This safe for concurrent use.
|
|
|
|
type MeasurementFieldSet struct {
|
|
|
|
mu sync.RWMutex
|
|
|
|
fields map[string]*MeasurementFields
|
|
|
|
// path is the location to persist field sets
|
2021-06-04 20:17:53 +00:00
|
|
|
path string
|
|
|
|
writer *MeasurementFieldSetWriter
|
2020-04-22 20:19:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// NewMeasurementFieldSet returns a new instance of MeasurementFieldSet.
|
|
|
|
func NewMeasurementFieldSet(path string) (*MeasurementFieldSet, error) {
|
2021-06-04 20:17:53 +00:00
|
|
|
const MaxCombinedWrites = 100
|
2020-04-22 20:19:20 +00:00
|
|
|
fs := &MeasurementFieldSet{
|
2021-06-04 20:17:53 +00:00
|
|
|
fields: make(map[string]*MeasurementFields),
|
|
|
|
path: path,
|
2020-04-22 20:19:20 +00:00
|
|
|
}
|
2021-06-04 20:17:53 +00:00
|
|
|
fs.SetMeasurementFieldSetWriter(MaxCombinedWrites)
|
2020-04-22 20:19:20 +00:00
|
|
|
// If there is a load error, return the error and an empty set so
|
|
|
|
// it can be rebuild manually.
|
|
|
|
return fs, fs.load()
|
|
|
|
}
|
|
|
|
|
2021-06-04 20:17:53 +00:00
|
|
|
func (fs *MeasurementFieldSet) Close() {
|
|
|
|
if fs != nil && fs.writer != nil {
|
|
|
|
fs.writer.Close()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-22 20:19:20 +00:00
|
|
|
// Bytes estimates the memory footprint of this MeasurementFieldSet, in bytes.
|
|
|
|
func (fs *MeasurementFieldSet) Bytes() int {
|
|
|
|
var b int
|
|
|
|
fs.mu.RLock()
|
|
|
|
b += 24 // mu RWMutex is 24 bytes
|
|
|
|
for k, v := range fs.fields {
|
|
|
|
b += int(unsafe.Sizeof(k)) + len(k)
|
|
|
|
b += int(unsafe.Sizeof(v)) + v.bytes()
|
|
|
|
}
|
|
|
|
b += int(unsafe.Sizeof(fs.fields))
|
|
|
|
b += int(unsafe.Sizeof(fs.path)) + len(fs.path)
|
|
|
|
fs.mu.RUnlock()
|
|
|
|
return b
|
|
|
|
}
|
|
|
|
|
|
|
|
// Fields returns fields for a measurement by name.
|
|
|
|
func (fs *MeasurementFieldSet) Fields(name []byte) *MeasurementFields {
|
|
|
|
fs.mu.RLock()
|
|
|
|
mf := fs.fields[string(name)]
|
|
|
|
fs.mu.RUnlock()
|
|
|
|
return mf
|
|
|
|
}
|
|
|
|
|
2020-11-11 18:54:21 +00:00
|
|
|
// FieldsByString returns fields for a measurement by name.
|
2020-04-22 20:19:20 +00:00
|
|
|
func (fs *MeasurementFieldSet) FieldsByString(name string) *MeasurementFields {
|
|
|
|
fs.mu.RLock()
|
|
|
|
mf := fs.fields[name]
|
|
|
|
fs.mu.RUnlock()
|
|
|
|
return mf
|
|
|
|
}
|
|
|
|
|
|
|
|
// CreateFieldsIfNotExists returns fields for a measurement by name.
|
|
|
|
func (fs *MeasurementFieldSet) CreateFieldsIfNotExists(name []byte) *MeasurementFields {
|
|
|
|
fs.mu.RLock()
|
|
|
|
mf := fs.fields[string(name)]
|
|
|
|
fs.mu.RUnlock()
|
|
|
|
|
|
|
|
if mf != nil {
|
|
|
|
return mf
|
|
|
|
}
|
|
|
|
|
|
|
|
fs.mu.Lock()
|
|
|
|
mf = fs.fields[string(name)]
|
|
|
|
if mf == nil {
|
|
|
|
mf = NewMeasurementFields()
|
|
|
|
fs.fields[string(name)] = mf
|
|
|
|
}
|
|
|
|
fs.mu.Unlock()
|
|
|
|
return mf
|
|
|
|
}
|
|
|
|
|
|
|
|
// Delete removes a field set for a measurement.
|
|
|
|
func (fs *MeasurementFieldSet) Delete(name string) {
|
|
|
|
fs.mu.Lock()
|
|
|
|
delete(fs.fields, name)
|
|
|
|
fs.mu.Unlock()
|
|
|
|
}
|
|
|
|
|
|
|
|
// DeleteWithLock executes fn and removes a field set from a measurement under lock.
|
|
|
|
func (fs *MeasurementFieldSet) DeleteWithLock(name string, fn func() error) error {
|
|
|
|
fs.mu.Lock()
|
|
|
|
defer fs.mu.Unlock()
|
|
|
|
|
|
|
|
if err := fn(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
delete(fs.fields, name)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (fs *MeasurementFieldSet) IsEmpty() bool {
|
|
|
|
fs.mu.RLock()
|
|
|
|
defer fs.mu.RUnlock()
|
|
|
|
return len(fs.fields) == 0
|
|
|
|
}
|
|
|
|
|
2021-06-04 20:17:53 +00:00
|
|
|
type errorChannel chan<- error
|
2021-04-15 18:08:28 +00:00
|
|
|
|
2021-06-04 20:17:53 +00:00
|
|
|
type writeRequest struct {
|
|
|
|
done errorChannel
|
|
|
|
}
|
2020-04-22 20:19:20 +00:00
|
|
|
|
2021-06-04 20:17:53 +00:00
|
|
|
type MeasurementFieldSetWriter struct {
|
|
|
|
wg sync.WaitGroup
|
|
|
|
writeRequests chan writeRequest
|
|
|
|
}
|
2020-04-22 20:19:20 +00:00
|
|
|
|
2021-06-04 20:17:53 +00:00
|
|
|
// SetMeasurementFieldSetWriter - initialize the queue for write requests
|
|
|
|
// and start the background write process
|
|
|
|
func (fs *MeasurementFieldSet) SetMeasurementFieldSetWriter(queueLength int) {
|
|
|
|
fs.mu.Lock()
|
|
|
|
defer fs.mu.Unlock()
|
|
|
|
fs.writer = &MeasurementFieldSetWriter{writeRequests: make(chan writeRequest, queueLength)}
|
|
|
|
fs.writer.wg.Add(1)
|
|
|
|
go fs.saveWriter()
|
|
|
|
}
|
2021-04-15 18:08:28 +00:00
|
|
|
|
2021-06-04 20:17:53 +00:00
|
|
|
func (w *MeasurementFieldSetWriter) Close() {
|
|
|
|
if w != nil {
|
|
|
|
close(w.writeRequests)
|
2021-06-15 18:51:51 +00:00
|
|
|
w.wg.Wait()
|
2020-04-22 20:19:20 +00:00
|
|
|
}
|
2021-06-04 20:17:53 +00:00
|
|
|
}
|
2020-04-22 20:19:20 +00:00
|
|
|
|
2021-06-04 20:17:53 +00:00
|
|
|
func (fs *MeasurementFieldSet) Save() error {
|
|
|
|
return fs.writer.RequestSave()
|
|
|
|
}
|
2020-04-22 20:19:20 +00:00
|
|
|
|
2021-06-04 20:17:53 +00:00
|
|
|
func (w *MeasurementFieldSetWriter) RequestSave() error {
|
|
|
|
done := make(chan error)
|
|
|
|
wr := writeRequest{done: done}
|
|
|
|
w.writeRequests <- wr
|
|
|
|
return <-done
|
|
|
|
}
|
|
|
|
|
|
|
|
func (fs *MeasurementFieldSet) saveWriter() {
|
|
|
|
defer fs.writer.wg.Done()
|
|
|
|
// Block until someone modifies the MeasurementFieldSet and
|
|
|
|
// it needs to be written to disk.
|
|
|
|
for req, ok := <-fs.writer.writeRequests; ok; req, ok = <-fs.writer.writeRequests {
|
|
|
|
fs.writeToFile(req)
|
2020-04-22 20:19:20 +00:00
|
|
|
}
|
2021-06-04 20:17:53 +00:00
|
|
|
}
|
2020-04-22 20:19:20 +00:00
|
|
|
|
2021-06-04 20:17:53 +00:00
|
|
|
// writeToFile: Write the new index to a temp file and rename when it's sync'd
|
|
|
|
func (fs *MeasurementFieldSet) writeToFile(first writeRequest) {
|
|
|
|
var err error
|
|
|
|
// Put the errorChannel on which we blocked into a slice to allow more invocations
|
|
|
|
// to share the return code from the file write
|
|
|
|
errorChannels := []errorChannel{first.done}
|
|
|
|
defer func() {
|
|
|
|
for _, c := range errorChannels {
|
|
|
|
c <- err
|
|
|
|
close(c)
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
// Do some blocking IO operations before marshalling the in-memory index
|
|
|
|
// to allow other changes to it to be queued up and be captured in one
|
|
|
|
// write operation, in case we are under heavy field creation load
|
|
|
|
path := fs.path + ".tmp"
|
|
|
|
|
|
|
|
// Open the temp file
|
|
|
|
fd, err := os.OpenFile(path, os.O_CREATE|os.O_RDWR|os.O_EXCL|os.O_SYNC, 0666)
|
|
|
|
if err != nil {
|
|
|
|
return
|
2020-04-22 20:19:20 +00:00
|
|
|
}
|
2021-06-04 20:17:53 +00:00
|
|
|
// Ensure temp file is cleaned up
|
|
|
|
defer func() {
|
|
|
|
if e := os.RemoveAll(path); err == nil {
|
|
|
|
err = e
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
isEmpty, err := func() (isEmpty bool, err error) {
|
|
|
|
// ensure temp file closed before rename (for Windows)
|
|
|
|
defer func() {
|
|
|
|
if e := fd.Close(); err == nil {
|
|
|
|
err = e
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
if _, err = fd.Write(fieldsIndexMagicNumber); err != nil {
|
|
|
|
return true, err
|
|
|
|
}
|
2020-04-22 20:19:20 +00:00
|
|
|
|
2021-06-04 20:17:53 +00:00
|
|
|
// Read all the pending new field and measurement write requests
|
|
|
|
// that will be captured in the marshaling of the in-memory copy
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case ec := <-fs.writer.writeRequests:
|
|
|
|
errorChannels = append(errorChannels, ec.done)
|
|
|
|
continue
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
break
|
|
|
|
}
|
|
|
|
// Lock, copy, and marshal the in-memory index
|
|
|
|
b, err := fs.marshalMeasurementFieldSet()
|
|
|
|
if err != nil {
|
|
|
|
return true, err
|
|
|
|
}
|
|
|
|
if b == nil {
|
|
|
|
// No fields, file removed, all done
|
|
|
|
return true, nil
|
|
|
|
}
|
|
|
|
if _, err := fd.Write(b); err != nil {
|
|
|
|
return true, err
|
|
|
|
}
|
|
|
|
return false, fd.Sync()
|
|
|
|
}()
|
|
|
|
if err != nil || isEmpty {
|
|
|
|
return
|
2020-04-22 20:19:20 +00:00
|
|
|
}
|
2021-06-04 20:17:53 +00:00
|
|
|
err = fs.renameFile(path)
|
|
|
|
}
|
2020-04-22 20:19:20 +00:00
|
|
|
|
2021-06-04 20:17:53 +00:00
|
|
|
// marshalMeasurementFieldSet: remove the fields.idx file if no fields
|
|
|
|
// otherwise, copy the in-memory version into a protobuf to write to
|
|
|
|
// disk
|
|
|
|
func (fs *MeasurementFieldSet) marshalMeasurementFieldSet() ([]byte, error) {
|
2021-04-15 18:08:28 +00:00
|
|
|
fs.mu.Lock()
|
|
|
|
defer fs.mu.Unlock()
|
2021-06-04 20:17:53 +00:00
|
|
|
if len(fs.fields) == 0 {
|
|
|
|
// If no fields left, remove the fields index file
|
|
|
|
if err := os.RemoveAll(fs.path); err != nil {
|
|
|
|
return nil, err
|
|
|
|
} else {
|
|
|
|
return nil, nil
|
|
|
|
}
|
2021-04-15 18:08:28 +00:00
|
|
|
}
|
2021-06-04 20:17:53 +00:00
|
|
|
return fs.marshalMeasurementFieldSetNoLock()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (fs *MeasurementFieldSet) renameFile(path string) error {
|
|
|
|
fs.mu.Lock()
|
|
|
|
defer fs.mu.Unlock()
|
2021-04-15 18:08:28 +00:00
|
|
|
|
2020-04-22 20:19:20 +00:00
|
|
|
if err := file.RenameFile(path, fs.path); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2021-06-04 20:17:53 +00:00
|
|
|
if err := file.SyncDir(filepath.Dir(fs.path)); err != nil {
|
2021-04-15 18:08:28 +00:00
|
|
|
return err
|
|
|
|
}
|
2021-06-04 20:17:53 +00:00
|
|
|
|
2021-04-15 18:08:28 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (fs *MeasurementFieldSet) marshalMeasurementFieldSetNoLock() (marshalled []byte, err error) {
|
|
|
|
pb := internal.MeasurementFieldSet{
|
|
|
|
Measurements: make([]*internal.MeasurementFields, 0, len(fs.fields)),
|
|
|
|
}
|
|
|
|
|
|
|
|
for name, mf := range fs.fields {
|
|
|
|
imf := &internal.MeasurementFields{
|
|
|
|
Name: []byte(name),
|
|
|
|
Fields: make([]*internal.Field, 0, mf.FieldN()),
|
|
|
|
}
|
|
|
|
|
|
|
|
mf.ForEachField(func(field string, typ influxql.DataType) bool {
|
|
|
|
imf.Fields = append(imf.Fields, &internal.Field{Name: []byte(field), Type: int32(typ)})
|
|
|
|
return true
|
|
|
|
})
|
|
|
|
|
|
|
|
pb.Measurements = append(pb.Measurements, imf)
|
|
|
|
}
|
|
|
|
b, err := proto.Marshal(&pb)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
} else {
|
|
|
|
return b, nil
|
|
|
|
}
|
2020-04-22 20:19:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (fs *MeasurementFieldSet) load() error {
|
|
|
|
fs.mu.Lock()
|
|
|
|
defer fs.mu.Unlock()
|
|
|
|
|
|
|
|
fd, err := os.Open(fs.path)
|
|
|
|
if os.IsNotExist(err) {
|
|
|
|
return nil
|
|
|
|
} else if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer fd.Close()
|
|
|
|
|
|
|
|
var magic [4]byte
|
|
|
|
if _, err := fd.Read(magic[:]); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if !bytes.Equal(magic[:], fieldsIndexMagicNumber) {
|
|
|
|
return ErrUnknownFieldsFormat
|
|
|
|
}
|
|
|
|
|
|
|
|
var pb internal.MeasurementFieldSet
|
|
|
|
b, err := ioutil.ReadAll(fd)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := proto.Unmarshal(b, &pb); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
fs.fields = make(map[string]*MeasurementFields, len(pb.GetMeasurements()))
|
|
|
|
for _, measurement := range pb.GetMeasurements() {
|
|
|
|
fields := make(map[string]*Field, len(measurement.GetFields()))
|
|
|
|
for _, field := range measurement.GetFields() {
|
|
|
|
fields[string(field.GetName())] = &Field{Name: string(field.GetName()), Type: influxql.DataType(field.GetType())}
|
|
|
|
}
|
|
|
|
set := &MeasurementFields{}
|
|
|
|
set.fields.Store(fields)
|
|
|
|
fs.fields[string(measurement.GetName())] = set
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Field represents a series field. All of the fields must be hashable.
|
|
|
|
type Field struct {
|
|
|
|
ID uint8 `json:"id,omitempty"`
|
|
|
|
Name string `json:"name,omitempty"`
|
|
|
|
Type influxql.DataType `json:"type,omitempty"`
|
|
|
|
}
|
|
|
|
|
|
|
|
// NewFieldKeysIterator returns an iterator that can be iterated over to
|
|
|
|
// retrieve field keys.
|
|
|
|
func NewFieldKeysIterator(sh *Shard, opt query.IteratorOptions) (query.Iterator, error) {
|
|
|
|
itr := &fieldKeysIterator{shard: sh}
|
|
|
|
|
|
|
|
index, err := sh.Index()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Retrieve measurements from shard. Filter if condition specified.
|
|
|
|
//
|
|
|
|
// FGA is currently not supported when retrieving field keys.
|
|
|
|
indexSet := IndexSet{Indexes: []Index{index}, SeriesFile: sh.sfile}
|
|
|
|
names, err := indexSet.MeasurementNamesByExpr(query.OpenAuthorizer, opt.Condition)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
itr.names = names
|
|
|
|
|
|
|
|
return itr, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// fieldKeysIterator iterates over measurements and gets field keys from each measurement.
|
|
|
|
type fieldKeysIterator struct {
|
|
|
|
shard *Shard
|
|
|
|
names [][]byte // remaining measurement names
|
|
|
|
buf struct {
|
|
|
|
name []byte // current measurement name
|
|
|
|
fields []Field // current measurement's fields
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Stats returns stats about the points processed.
|
|
|
|
func (itr *fieldKeysIterator) Stats() query.IteratorStats { return query.IteratorStats{} }
|
|
|
|
|
|
|
|
// Close closes the iterator.
|
|
|
|
func (itr *fieldKeysIterator) Close() error { return nil }
|
|
|
|
|
|
|
|
// Next emits the next tag key name.
|
|
|
|
func (itr *fieldKeysIterator) Next() (*query.FloatPoint, error) {
|
|
|
|
for {
|
|
|
|
// If there are no more keys then move to the next measurements.
|
|
|
|
if len(itr.buf.fields) == 0 {
|
|
|
|
if len(itr.names) == 0 {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
itr.buf.name = itr.names[0]
|
|
|
|
mf := itr.shard.MeasurementFields(itr.buf.name)
|
|
|
|
if mf != nil {
|
|
|
|
fset := mf.FieldSet()
|
|
|
|
if len(fset) == 0 {
|
|
|
|
itr.names = itr.names[1:]
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
keys := make([]string, 0, len(fset))
|
|
|
|
for k := range fset {
|
|
|
|
keys = append(keys, k)
|
|
|
|
}
|
|
|
|
sort.Strings(keys)
|
|
|
|
|
|
|
|
itr.buf.fields = make([]Field, len(keys))
|
|
|
|
for i, name := range keys {
|
|
|
|
itr.buf.fields[i] = Field{Name: name, Type: fset[name]}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
itr.names = itr.names[1:]
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// Return next key.
|
|
|
|
field := itr.buf.fields[0]
|
|
|
|
p := &query.FloatPoint{
|
|
|
|
Name: string(itr.buf.name),
|
|
|
|
Aux: []interface{}{field.Name, field.Type.String()},
|
|
|
|
}
|
|
|
|
itr.buf.fields = itr.buf.fields[1:]
|
|
|
|
|
|
|
|
return p, nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// NewTagKeysIterator returns a new instance of TagKeysIterator.
|
|
|
|
func NewTagKeysIterator(sh *Shard, opt query.IteratorOptions) (query.Iterator, error) {
|
|
|
|
fn := func(name []byte) ([][]byte, error) {
|
|
|
|
index, err := sh.Index()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
indexSet := IndexSet{Indexes: []Index{index}, SeriesFile: sh.sfile}
|
|
|
|
var keys [][]byte
|
|
|
|
if err := indexSet.ForEachMeasurementTagKey(name, func(key []byte) error {
|
|
|
|
keys = append(keys, key)
|
|
|
|
return nil
|
|
|
|
}); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return keys, nil
|
|
|
|
}
|
|
|
|
return newMeasurementKeysIterator(sh, fn, opt)
|
|
|
|
}
|
|
|
|
|
|
|
|
// measurementKeyFunc is the function called by measurementKeysIterator.
|
|
|
|
type measurementKeyFunc func(name []byte) ([][]byte, error)
|
|
|
|
|
|
|
|
func newMeasurementKeysIterator(sh *Shard, fn measurementKeyFunc, opt query.IteratorOptions) (*measurementKeysIterator, error) {
|
|
|
|
index, err := sh.Index()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
indexSet := IndexSet{Indexes: []Index{index}, SeriesFile: sh.sfile}
|
|
|
|
itr := &measurementKeysIterator{fn: fn}
|
|
|
|
names, err := indexSet.MeasurementNamesByExpr(opt.Authorizer, opt.Condition)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
itr.names = names
|
|
|
|
|
|
|
|
return itr, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// measurementKeysIterator iterates over measurements and gets keys from each measurement.
|
|
|
|
type measurementKeysIterator struct {
|
|
|
|
names [][]byte // remaining measurement names
|
|
|
|
buf struct {
|
|
|
|
name []byte // current measurement name
|
|
|
|
keys [][]byte // current measurement's keys
|
|
|
|
}
|
|
|
|
fn measurementKeyFunc
|
|
|
|
}
|
|
|
|
|
|
|
|
// Stats returns stats about the points processed.
|
|
|
|
func (itr *measurementKeysIterator) Stats() query.IteratorStats { return query.IteratorStats{} }
|
|
|
|
|
|
|
|
// Close closes the iterator.
|
|
|
|
func (itr *measurementKeysIterator) Close() error { return nil }
|
|
|
|
|
|
|
|
// Next emits the next tag key name.
|
|
|
|
func (itr *measurementKeysIterator) Next() (*query.FloatPoint, error) {
|
|
|
|
for {
|
|
|
|
// If there are no more keys then move to the next measurements.
|
|
|
|
if len(itr.buf.keys) == 0 {
|
|
|
|
if len(itr.names) == 0 {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
itr.buf.name, itr.names = itr.names[0], itr.names[1:]
|
|
|
|
|
|
|
|
keys, err := itr.fn(itr.buf.name)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
itr.buf.keys = keys
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// Return next key.
|
|
|
|
p := &query.FloatPoint{
|
|
|
|
Name: string(itr.buf.name),
|
|
|
|
Aux: []interface{}{string(itr.buf.keys[0])},
|
|
|
|
}
|
|
|
|
itr.buf.keys = itr.buf.keys[1:]
|
|
|
|
|
|
|
|
return p, nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// LimitError represents an error caused by a configurable limit.
|
|
|
|
type LimitError struct {
|
|
|
|
Reason string
|
|
|
|
}
|
|
|
|
|
|
|
|
func (e *LimitError) Error() string { return e.Reason }
|