2015-05-22 20:08:43 +00:00
|
|
|
package tsdb
|
|
|
|
|
|
|
|
import (
|
|
|
|
"fmt"
|
|
|
|
"regexp"
|
|
|
|
"sort"
|
|
|
|
"strings"
|
2015-05-23 22:06:07 +00:00
|
|
|
"sync"
|
2015-05-22 20:08:43 +00:00
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/influxdb/influxdb/influxql"
|
2015-06-02 23:08:48 +00:00
|
|
|
"github.com/influxdb/influxdb/tsdb/internal"
|
|
|
|
|
|
|
|
"github.com/gogo/protobuf/proto"
|
2015-05-22 20:08:43 +00:00
|
|
|
)
|
|
|
|
|
2015-06-02 23:08:48 +00:00
|
|
|
//go:generate protoc --gogo_out=. internal/meta.proto
|
|
|
|
|
2015-05-22 20:08:43 +00:00
|
|
|
const (
|
|
|
|
maxStringLength = 64 * 1024
|
|
|
|
)
|
|
|
|
|
2015-05-24 11:39:45 +00:00
|
|
|
// DatabaseIndex is the in memory index of a collection of measurements, time series, and their tags.
|
2015-06-04 18:50:32 +00:00
|
|
|
// Exported functions are goroutine safe while un-exported functions assume the caller will use the appropriate locks
|
2015-05-24 11:39:45 +00:00
|
|
|
type DatabaseIndex struct {
|
2015-05-23 22:06:07 +00:00
|
|
|
// in memory metadata index, built on load and updated when new series come in
|
|
|
|
mu sync.RWMutex
|
|
|
|
measurements map[string]*Measurement // measurement name to object and index
|
|
|
|
series map[string]*Series // map series key to the Series object
|
|
|
|
names []string // sorted list of the measurement names
|
|
|
|
lastID uint64 // last used series ID. They're in memory only for this shard
|
|
|
|
}
|
|
|
|
|
2015-05-24 11:39:45 +00:00
|
|
|
func NewDatabaseIndex() *DatabaseIndex {
|
|
|
|
return &DatabaseIndex{
|
2015-05-23 22:06:07 +00:00
|
|
|
measurements: make(map[string]*Measurement),
|
|
|
|
series: make(map[string]*Series),
|
|
|
|
names: make([]string, 0),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-07-20 19:59:46 +00:00
|
|
|
// Names returns a sorted list of measurement names.
|
|
|
|
func (d *DatabaseIndex) Names() []string {
|
|
|
|
d.mu.RLock()
|
|
|
|
defer d.mu.RUnlock()
|
|
|
|
return d.names
|
|
|
|
}
|
|
|
|
|
|
|
|
// Series returns a series by key.
|
|
|
|
func (d *DatabaseIndex) Series(key string) *Series {
|
|
|
|
d.mu.RLock()
|
|
|
|
defer d.mu.RUnlock()
|
|
|
|
return d.series[key]
|
|
|
|
}
|
|
|
|
|
|
|
|
// SeriesN returns the number of series.
|
|
|
|
func (d *DatabaseIndex) SeriesN() int {
|
|
|
|
d.mu.RLock()
|
|
|
|
defer d.mu.RUnlock()
|
|
|
|
return len(d.series)
|
|
|
|
}
|
|
|
|
|
2015-06-03 15:32:50 +00:00
|
|
|
// Measurement returns the measurement object from the index by the name
|
|
|
|
func (d *DatabaseIndex) Measurement(name string) *Measurement {
|
|
|
|
d.mu.RLock()
|
|
|
|
defer d.mu.RUnlock()
|
|
|
|
return d.measurements[name]
|
|
|
|
}
|
|
|
|
|
2015-06-11 11:48:26 +00:00
|
|
|
// MeasurementSeriesCounts returns the number of measurements and series currently indexed by the database.
|
2015-06-11 04:50:20 +00:00
|
|
|
// Useful for reporting and monitoring.
|
|
|
|
func (d *DatabaseIndex) MeasurementSeriesCounts() (nMeasurements int, nSeries int) {
|
|
|
|
d.mu.RLock()
|
|
|
|
defer d.mu.RUnlock()
|
|
|
|
nMeasurements, nSeries = len(d.measurements), len(d.series)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2015-07-20 19:59:46 +00:00
|
|
|
// CreateSeriesIndexIfNotExists adds the series for the given measurement to the index and sets its ID or returns the existing series object
|
|
|
|
func (s *DatabaseIndex) CreateSeriesIndexIfNotExists(measurementName string, series *Series) *Series {
|
2015-05-23 22:06:07 +00:00
|
|
|
// if there is a measurement for this id, it's already been added
|
|
|
|
ss := s.series[series.Key]
|
|
|
|
if ss != nil {
|
|
|
|
return ss
|
|
|
|
}
|
|
|
|
|
|
|
|
// get or create the measurement index
|
2015-07-22 14:53:20 +00:00
|
|
|
m := s.CreateMeasurementIndexIfNotExists(measurementName)
|
2015-05-23 22:06:07 +00:00
|
|
|
|
|
|
|
// set the in memory ID for query processing on this shard
|
|
|
|
series.id = s.lastID + 1
|
|
|
|
s.lastID += 1
|
|
|
|
|
|
|
|
series.measurement = m
|
|
|
|
s.series[series.Key] = series
|
|
|
|
|
2015-06-04 18:50:32 +00:00
|
|
|
m.AddSeries(series)
|
2015-05-23 22:06:07 +00:00
|
|
|
|
|
|
|
return series
|
|
|
|
}
|
|
|
|
|
2015-07-22 14:53:20 +00:00
|
|
|
// CreateMeasurementIndexIfNotExists creates or retrieves an in memory index object for the measurement
|
|
|
|
func (s *DatabaseIndex) CreateMeasurementIndexIfNotExists(name string) *Measurement {
|
2015-07-22 18:37:41 +00:00
|
|
|
name = unescapeString(name)
|
2015-05-23 22:06:07 +00:00
|
|
|
m := s.measurements[name]
|
|
|
|
if m == nil {
|
2015-05-28 22:02:12 +00:00
|
|
|
m = NewMeasurement(name, s)
|
2015-05-23 22:06:07 +00:00
|
|
|
s.measurements[name] = m
|
|
|
|
s.names = append(s.names, name)
|
|
|
|
sort.Strings(s.names)
|
|
|
|
}
|
|
|
|
return m
|
|
|
|
}
|
|
|
|
|
|
|
|
// measurementsByExpr takes and expression containing only tags and returns
|
|
|
|
// a list of matching *Measurement.
|
2015-05-24 11:39:45 +00:00
|
|
|
func (db *DatabaseIndex) measurementsByExpr(expr influxql.Expr) (Measurements, error) {
|
2015-05-23 22:06:07 +00:00
|
|
|
switch e := expr.(type) {
|
|
|
|
case *influxql.BinaryExpr:
|
|
|
|
switch e.Op {
|
|
|
|
case influxql.EQ, influxql.NEQ, influxql.EQREGEX, influxql.NEQREGEX:
|
|
|
|
tag, ok := e.LHS.(*influxql.VarRef)
|
|
|
|
if !ok {
|
|
|
|
return nil, fmt.Errorf("left side of '%s' must be a tag name", e.Op.String())
|
|
|
|
}
|
|
|
|
|
|
|
|
tf := &TagFilter{
|
|
|
|
Op: e.Op,
|
|
|
|
Key: tag.Val,
|
|
|
|
}
|
|
|
|
|
|
|
|
if influxql.IsRegexOp(e.Op) {
|
|
|
|
re, ok := e.RHS.(*influxql.RegexLiteral)
|
|
|
|
if !ok {
|
|
|
|
return nil, fmt.Errorf("right side of '%s' must be a regular expression", e.Op.String())
|
|
|
|
}
|
|
|
|
tf.Regex = re.Val
|
|
|
|
} else {
|
|
|
|
s, ok := e.RHS.(*influxql.StringLiteral)
|
|
|
|
if !ok {
|
|
|
|
return nil, fmt.Errorf("right side of '%s' must be a tag value string", e.Op.String())
|
|
|
|
}
|
|
|
|
tf.Value = s.Val
|
|
|
|
}
|
|
|
|
|
|
|
|
return db.measurementsByTagFilters([]*TagFilter{tf}), nil
|
|
|
|
case influxql.OR, influxql.AND:
|
|
|
|
lhsIDs, err := db.measurementsByExpr(e.LHS)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
rhsIDs, err := db.measurementsByExpr(e.RHS)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
if e.Op == influxql.OR {
|
|
|
|
return lhsIDs.union(rhsIDs), nil
|
|
|
|
}
|
|
|
|
|
|
|
|
return lhsIDs.intersect(rhsIDs), nil
|
|
|
|
default:
|
|
|
|
return nil, fmt.Errorf("invalid operator")
|
|
|
|
}
|
|
|
|
case *influxql.ParenExpr:
|
|
|
|
return db.measurementsByExpr(e.Expr)
|
|
|
|
}
|
|
|
|
return nil, fmt.Errorf("%#v", expr)
|
|
|
|
}
|
|
|
|
|
2015-06-11 11:48:26 +00:00
|
|
|
// measurementsByTagFilters returns the measurements matching the filters on tag values.
|
2015-05-24 11:39:45 +00:00
|
|
|
func (db *DatabaseIndex) measurementsByTagFilters(filters []*TagFilter) Measurements {
|
2015-05-23 22:06:07 +00:00
|
|
|
// If no filters, then return all measurements.
|
|
|
|
if len(filters) == 0 {
|
|
|
|
measurements := make(Measurements, 0, len(db.measurements))
|
|
|
|
for _, m := range db.measurements {
|
|
|
|
measurements = append(measurements, m)
|
|
|
|
}
|
|
|
|
return measurements
|
|
|
|
}
|
|
|
|
|
|
|
|
// Build a list of measurements matching the filters.
|
|
|
|
var measurements Measurements
|
|
|
|
var tagMatch bool
|
|
|
|
|
|
|
|
// Iterate through all measurements in the database.
|
|
|
|
for _, m := range db.measurements {
|
|
|
|
// Iterate filters seeing if the measurement has a matching tag.
|
|
|
|
for _, f := range filters {
|
|
|
|
tagVals, ok := m.seriesByTagKeyValue[f.Key]
|
|
|
|
if !ok {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
tagMatch = false
|
|
|
|
|
|
|
|
// If the operator is non-regex, only check the specified value.
|
|
|
|
if f.Op == influxql.EQ || f.Op == influxql.NEQ {
|
|
|
|
if _, ok := tagVals[f.Value]; ok {
|
|
|
|
tagMatch = true
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// Else, the operator is regex and we have to check all tag
|
|
|
|
// values against the regular expression.
|
|
|
|
for tagVal := range tagVals {
|
|
|
|
if f.Regex.MatchString(tagVal) {
|
|
|
|
tagMatch = true
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
isEQ := (f.Op == influxql.EQ || f.Op == influxql.EQREGEX)
|
|
|
|
|
|
|
|
// tags match | operation is EQ | measurement matches
|
|
|
|
// --------------------------------------------------
|
|
|
|
// True | True | True
|
|
|
|
// True | False | False
|
|
|
|
// False | True | False
|
|
|
|
// False | False | True
|
|
|
|
|
|
|
|
if tagMatch == isEQ {
|
|
|
|
measurements = append(measurements, m)
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return measurements
|
|
|
|
}
|
|
|
|
|
|
|
|
// measurementsByRegex returns the measurements that match the regex.
|
2015-05-24 11:39:45 +00:00
|
|
|
func (db *DatabaseIndex) measurementsByRegex(re *regexp.Regexp) Measurements {
|
2015-05-23 22:06:07 +00:00
|
|
|
var matches Measurements
|
|
|
|
for _, m := range db.measurements {
|
|
|
|
if re.MatchString(m.Name) {
|
|
|
|
matches = append(matches, m)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return matches
|
|
|
|
}
|
|
|
|
|
|
|
|
// Measurements returns a list of all measurements.
|
2015-05-24 11:39:45 +00:00
|
|
|
func (db *DatabaseIndex) Measurements() Measurements {
|
2015-05-23 22:06:07 +00:00
|
|
|
measurements := make(Measurements, 0, len(db.measurements))
|
|
|
|
for _, m := range db.measurements {
|
|
|
|
measurements = append(measurements, m)
|
|
|
|
}
|
|
|
|
return measurements
|
|
|
|
}
|
|
|
|
|
2015-06-03 17:26:49 +00:00
|
|
|
// DropMeasurement removes the measurement and all of its underlying series from the database index
|
2015-06-03 15:32:50 +00:00
|
|
|
func (db *DatabaseIndex) DropMeasurement(name string) {
|
|
|
|
db.mu.Lock()
|
|
|
|
defer db.mu.Unlock()
|
|
|
|
|
|
|
|
m := db.measurements[name]
|
|
|
|
if m == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
delete(db.measurements, name)
|
|
|
|
for _, s := range m.seriesByID {
|
|
|
|
delete(db.series, s.Key)
|
|
|
|
}
|
|
|
|
|
|
|
|
var names []string
|
|
|
|
for _, n := range db.names {
|
|
|
|
if n != name {
|
|
|
|
names = append(names, n)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
db.names = names
|
|
|
|
}
|
|
|
|
|
|
|
|
// DropSeries removes the series keys and their tags from the index
|
|
|
|
func (db *DatabaseIndex) DropSeries(keys []string) {
|
2015-06-02 15:20:20 +00:00
|
|
|
db.mu.Lock()
|
|
|
|
defer db.mu.Unlock()
|
|
|
|
for _, k := range keys {
|
|
|
|
series := db.series[k]
|
|
|
|
if series == nil {
|
|
|
|
continue
|
|
|
|
}
|
2015-06-03 15:32:50 +00:00
|
|
|
series.measurement.DropSeries(series.id)
|
2015-06-02 15:20:20 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-05-22 20:08:43 +00:00
|
|
|
// Measurement represents a collection of time series in a database. It also contains in memory
|
2015-06-04 18:50:32 +00:00
|
|
|
// structures for indexing tags. Exported functions are goroutine safe while un-exported functions
|
|
|
|
// assume the caller will use the appropriate locks
|
2015-05-22 20:08:43 +00:00
|
|
|
type Measurement struct {
|
2015-06-03 15:32:50 +00:00
|
|
|
mu sync.RWMutex
|
2015-06-04 20:08:12 +00:00
|
|
|
Name string `json:"name,omitempty"`
|
|
|
|
fieldNames map[string]struct{}
|
2015-05-28 22:02:12 +00:00
|
|
|
index *DatabaseIndex
|
2015-05-22 20:08:43 +00:00
|
|
|
|
|
|
|
// in-memory index fields
|
|
|
|
series map[string]*Series // sorted tagset string to the series object
|
|
|
|
seriesByID map[uint64]*Series // lookup table for series by their id
|
|
|
|
measurement *Measurement
|
2015-07-20 19:59:46 +00:00
|
|
|
seriesByTagKeyValue map[string]map[string]SeriesIDs // map from tag key to value to sorted set of series ids
|
|
|
|
seriesIDs SeriesIDs // sorted list of series IDs in this measurement
|
2015-05-22 20:08:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// NewMeasurement allocates and initializes a new Measurement.
|
2015-05-28 22:02:12 +00:00
|
|
|
func NewMeasurement(name string, idx *DatabaseIndex) *Measurement {
|
2015-05-22 20:08:43 +00:00
|
|
|
return &Measurement{
|
2015-05-23 22:06:07 +00:00
|
|
|
Name: name,
|
2015-06-04 18:50:32 +00:00
|
|
|
fieldNames: make(map[string]struct{}),
|
2015-05-28 22:02:12 +00:00
|
|
|
index: idx,
|
2015-05-22 20:08:43 +00:00
|
|
|
|
|
|
|
series: make(map[string]*Series),
|
|
|
|
seriesByID: make(map[uint64]*Series),
|
2015-07-20 19:59:46 +00:00
|
|
|
seriesByTagKeyValue: make(map[string]map[string]SeriesIDs),
|
|
|
|
seriesIDs: make(SeriesIDs, 0),
|
2015-05-22 20:08:43 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-06-04 20:08:12 +00:00
|
|
|
// HasField returns true if the measurement has a field by the given name
|
2015-06-04 18:50:32 +00:00
|
|
|
func (m *Measurement) HasField(name string) bool {
|
|
|
|
m.mu.RLock()
|
|
|
|
defer m.mu.RUnlock()
|
|
|
|
_, hasField := m.fieldNames[name]
|
|
|
|
return hasField
|
|
|
|
}
|
|
|
|
|
2015-06-04 20:08:12 +00:00
|
|
|
// SeriesKeys returns the keys of every series in this measurement
|
2015-06-04 18:50:32 +00:00
|
|
|
func (m *Measurement) SeriesKeys() []string {
|
2015-06-03 15:32:50 +00:00
|
|
|
m.mu.RLock()
|
|
|
|
defer m.mu.RUnlock()
|
|
|
|
var keys []string
|
|
|
|
for _, s := range m.seriesByID {
|
|
|
|
keys = append(keys, s.Key)
|
|
|
|
}
|
|
|
|
return keys
|
|
|
|
}
|
|
|
|
|
2015-07-06 12:31:52 +00:00
|
|
|
// ValidateGroupBy ensures that the GROUP BY is not a field.
|
|
|
|
func (m *Measurement) ValidateGroupBy(stmt *influxql.SelectStatement) error {
|
|
|
|
for _, d := range stmt.Dimensions {
|
|
|
|
switch e := d.Expr.(type) {
|
|
|
|
case *influxql.VarRef:
|
|
|
|
if !m.HasTagKey(e.Val) {
|
|
|
|
return fmt.Errorf("can not use field in GROUP BY clause: %s", e.Val)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2015-05-16 00:00:01 +00:00
|
|
|
// HasTagKey returns true if at least one series in this measurement has written a value for the passed in tag key
|
2015-05-22 20:08:43 +00:00
|
|
|
func (m *Measurement) HasTagKey(k string) bool {
|
2015-06-04 18:50:32 +00:00
|
|
|
m.mu.RLock()
|
|
|
|
defer m.mu.RUnlock()
|
|
|
|
_, hasTag := m.seriesByTagKeyValue[k]
|
|
|
|
return hasTag
|
|
|
|
}
|
|
|
|
|
|
|
|
// HasSeries returns true if there is at least 1 series under this measurement
|
|
|
|
func (m *Measurement) HasSeries() bool {
|
|
|
|
m.mu.RLock()
|
|
|
|
defer m.mu.RUnlock()
|
|
|
|
return len(m.seriesByID) > 0
|
2015-05-22 20:08:43 +00:00
|
|
|
}
|
|
|
|
|
2015-06-04 20:08:12 +00:00
|
|
|
// AddSeries will add a series to the measurementIndex. Returns false if already present
|
2015-06-04 18:50:32 +00:00
|
|
|
func (m *Measurement) AddSeries(s *Series) bool {
|
|
|
|
m.mu.Lock()
|
|
|
|
defer m.mu.Unlock()
|
|
|
|
|
2015-05-22 20:08:43 +00:00
|
|
|
if _, ok := m.seriesByID[s.id]; ok {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
m.seriesByID[s.id] = s
|
2015-07-20 19:59:46 +00:00
|
|
|
tagset := string(MarshalTags(s.Tags))
|
2015-05-22 20:08:43 +00:00
|
|
|
m.series[tagset] = s
|
|
|
|
m.seriesIDs = append(m.seriesIDs, s.id)
|
|
|
|
|
|
|
|
// the series ID should always be higher than all others because it's a new
|
|
|
|
// series. So don't do the sort if we don't have to.
|
|
|
|
if len(m.seriesIDs) > 1 && m.seriesIDs[len(m.seriesIDs)-1] < m.seriesIDs[len(m.seriesIDs)-2] {
|
|
|
|
sort.Sort(m.seriesIDs)
|
|
|
|
}
|
|
|
|
|
|
|
|
// add this series id to the tag index on the measurement
|
|
|
|
for k, v := range s.Tags {
|
|
|
|
valueMap := m.seriesByTagKeyValue[k]
|
|
|
|
if valueMap == nil {
|
2015-07-20 19:59:46 +00:00
|
|
|
valueMap = make(map[string]SeriesIDs)
|
2015-05-22 20:08:43 +00:00
|
|
|
m.seriesByTagKeyValue[k] = valueMap
|
|
|
|
}
|
|
|
|
ids := valueMap[v]
|
|
|
|
ids = append(ids, s.id)
|
|
|
|
|
|
|
|
// most of the time the series ID will be higher than all others because it's a new
|
|
|
|
// series. So don't do the sort if we don't have to.
|
|
|
|
if len(ids) > 1 && ids[len(ids)-1] < ids[len(ids)-2] {
|
|
|
|
sort.Sort(ids)
|
|
|
|
}
|
|
|
|
valueMap[v] = ids
|
|
|
|
}
|
|
|
|
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2015-06-11 03:50:07 +00:00
|
|
|
// DropSeries will remove a series from the measurementIndex.
|
|
|
|
func (m *Measurement) DropSeries(seriesID uint64) {
|
2015-06-03 15:32:50 +00:00
|
|
|
m.mu.Lock()
|
|
|
|
defer m.mu.Unlock()
|
|
|
|
|
2015-05-22 20:08:43 +00:00
|
|
|
if _, ok := m.seriesByID[seriesID]; !ok {
|
2015-06-11 03:50:07 +00:00
|
|
|
return
|
2015-05-22 20:08:43 +00:00
|
|
|
}
|
|
|
|
s := m.seriesByID[seriesID]
|
2015-07-20 19:59:46 +00:00
|
|
|
tagset := string(MarshalTags(s.Tags))
|
2015-05-22 20:08:43 +00:00
|
|
|
|
|
|
|
delete(m.series, tagset)
|
|
|
|
delete(m.seriesByID, seriesID)
|
|
|
|
|
|
|
|
var ids []uint64
|
|
|
|
for _, id := range m.seriesIDs {
|
|
|
|
if id != seriesID {
|
|
|
|
ids = append(ids, id)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
m.seriesIDs = ids
|
|
|
|
|
|
|
|
// remove this series id to the tag index on the measurement
|
2015-07-20 19:59:46 +00:00
|
|
|
// s.seriesByTagKeyValue is defined as map[string]map[string]SeriesIDs
|
2015-05-22 20:08:43 +00:00
|
|
|
for k, v := range m.seriesByTagKeyValue {
|
|
|
|
values := v
|
|
|
|
for kk, vv := range values {
|
|
|
|
var ids []uint64
|
|
|
|
for _, id := range vv {
|
|
|
|
if id != seriesID {
|
|
|
|
ids = append(ids, id)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Check to see if we have any ids, if not, remove the key
|
|
|
|
if len(ids) == 0 {
|
|
|
|
delete(values, kk)
|
|
|
|
} else {
|
|
|
|
values[kk] = ids
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// If we have no values, then we delete the key
|
|
|
|
if len(values) == 0 {
|
|
|
|
delete(m.seriesByTagKeyValue, k)
|
|
|
|
} else {
|
|
|
|
m.seriesByTagKeyValue[k] = values
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-06-11 03:50:07 +00:00
|
|
|
return
|
2015-05-22 20:08:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// filters walks the where clause of a select statement and returns a map with all series ids
|
|
|
|
// matching the where clause and any filter expression that should be applied to each
|
|
|
|
func (m *Measurement) filters(stmt *influxql.SelectStatement) (map[uint64]influxql.Expr, error) {
|
|
|
|
if stmt.Condition == nil || stmt.OnlyTimeDimensions() {
|
2015-06-02 15:20:20 +00:00
|
|
|
seriesIdsToExpr := make(map[uint64]influxql.Expr)
|
2015-05-22 20:08:43 +00:00
|
|
|
for _, id := range m.seriesIDs {
|
|
|
|
seriesIdsToExpr[id] = nil
|
|
|
|
}
|
|
|
|
return seriesIdsToExpr, nil
|
|
|
|
}
|
|
|
|
|
2015-06-02 15:20:20 +00:00
|
|
|
ids, seriesIdsToExpr, err := m.walkWhereForSeriesIds(stmt.Condition)
|
2015-05-22 20:08:43 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2015-06-02 15:20:20 +00:00
|
|
|
// Ensure every id is in the map and replace literal true expressions with
|
|
|
|
// nil so the engine doesn't waste time evaluating them.
|
2015-05-22 20:08:43 +00:00
|
|
|
for _, id := range ids {
|
2015-06-02 15:20:20 +00:00
|
|
|
if expr, ok := seriesIdsToExpr[id]; !ok {
|
|
|
|
seriesIdsToExpr[id] = nil
|
|
|
|
} else if b, ok := expr.(*influxql.BooleanLiteral); ok && b.Val {
|
2015-05-22 20:08:43 +00:00
|
|
|
seriesIdsToExpr[id] = nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return seriesIdsToExpr, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// tagSets returns the unique tag sets that exist for the given tag keys. This is used to determine
|
|
|
|
// what composite series will be created by a group by. i.e. "group by region" should return:
|
|
|
|
// {"region":"uswest"}, {"region":"useast"}
|
|
|
|
// or region, service returns
|
|
|
|
// {"region": "uswest", "service": "redis"}, {"region": "uswest", "service": "mysql"}, etc...
|
|
|
|
// This will also populate the TagSet objects with the series IDs that match each tagset and any
|
|
|
|
// influx filter expression that goes with the series
|
2015-05-28 22:02:12 +00:00
|
|
|
// TODO: this shouldn't be exported. However, until tx.go and the engine get refactored into tsdb, we need it.
|
|
|
|
func (m *Measurement) TagSets(stmt *influxql.SelectStatement, dimensions []string) ([]*influxql.TagSet, error) {
|
|
|
|
m.index.mu.RLock()
|
|
|
|
defer m.index.mu.RUnlock()
|
2015-06-04 18:50:32 +00:00
|
|
|
m.mu.RLock()
|
|
|
|
defer m.mu.RUnlock()
|
2015-05-28 22:02:12 +00:00
|
|
|
|
2015-05-22 20:08:43 +00:00
|
|
|
// get the unique set of series ids and the filters that should be applied to each
|
|
|
|
filters, err := m.filters(stmt)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2015-06-20 18:18:33 +00:00
|
|
|
// For every series, get the tag values for the requested tag keys i.e. dimensions. This is the
|
|
|
|
// TagSet for that series. Series with the same TagSet are then grouped together, because for the
|
|
|
|
// purpose of GROUP BY they are part of the same composite series.
|
2015-05-22 20:08:43 +00:00
|
|
|
tagSets := make(map[string]*influxql.TagSet)
|
|
|
|
for id, filter := range filters {
|
|
|
|
s := m.seriesByID[id]
|
2015-06-20 18:18:33 +00:00
|
|
|
tags := make(map[string]string)
|
|
|
|
|
|
|
|
// Build the TagSet for this series.
|
|
|
|
for _, dim := range dimensions {
|
|
|
|
tags[dim] = s.Tags[dim]
|
2015-05-22 20:08:43 +00:00
|
|
|
}
|
|
|
|
|
2015-06-20 18:18:33 +00:00
|
|
|
// Convert the TagSet to a string, so it can be added to a map allowing TagSets to be handled
|
|
|
|
// as a set.
|
2015-07-20 19:59:46 +00:00
|
|
|
tagsAsKey := string(MarshalTags(tags))
|
2015-06-20 18:18:33 +00:00
|
|
|
tagSet, ok := tagSets[tagsAsKey]
|
2015-05-22 20:08:43 +00:00
|
|
|
if !ok {
|
2015-06-20 18:18:33 +00:00
|
|
|
// This TagSet is new, create a new entry for it.
|
|
|
|
tagSet = &influxql.TagSet{}
|
2015-05-22 20:08:43 +00:00
|
|
|
tagsForSet := make(map[string]string)
|
2015-06-20 18:18:33 +00:00
|
|
|
for k, v := range tags {
|
|
|
|
tagsForSet[k] = v
|
2015-05-22 20:08:43 +00:00
|
|
|
}
|
2015-06-20 18:18:33 +00:00
|
|
|
tagSet.Tags = tagsForSet
|
2015-07-20 19:59:46 +00:00
|
|
|
tagSet.Key = MarshalTags(tagsForSet)
|
2015-05-22 20:08:43 +00:00
|
|
|
}
|
2015-06-20 18:18:33 +00:00
|
|
|
|
|
|
|
// Associate the series and filter with the Tagset.
|
|
|
|
tagSet.AddFilter(m.seriesByID[id].Key, filter)
|
|
|
|
|
|
|
|
// Ensure it's back in the map.
|
|
|
|
tagSets[tagsAsKey] = tagSet
|
|
|
|
}
|
|
|
|
|
|
|
|
// The TagSets have been created, as a map of TagSets. Just send
|
|
|
|
// the values back as a slice, sorting for consistency.
|
|
|
|
sortedTagSetKeys := make([]string, 0, len(tagSets))
|
|
|
|
for k, _ := range tagSets {
|
|
|
|
sortedTagSetKeys = append(sortedTagSetKeys, k)
|
2015-05-22 20:08:43 +00:00
|
|
|
}
|
2015-06-20 18:18:33 +00:00
|
|
|
sort.Strings(sortedTagSetKeys)
|
2015-05-22 20:08:43 +00:00
|
|
|
|
2015-06-20 18:18:33 +00:00
|
|
|
sortedTagsSets := make([]*influxql.TagSet, 0, len(sortedTagSetKeys))
|
|
|
|
for _, k := range sortedTagSetKeys {
|
|
|
|
sortedTagsSets = append(sortedTagsSets, tagSets[k])
|
2015-05-22 20:08:43 +00:00
|
|
|
}
|
|
|
|
|
2015-06-20 18:18:33 +00:00
|
|
|
return sortedTagsSets, nil
|
2015-05-22 20:08:43 +00:00
|
|
|
}
|
|
|
|
|
2015-06-02 15:20:20 +00:00
|
|
|
// mergeSeriesFilters merges two sets of filter expressions and culls series IDs.
|
2015-07-20 19:59:46 +00:00
|
|
|
func mergeSeriesFilters(op influxql.Token, ids SeriesIDs, lfilters, rfilters map[uint64]influxql.Expr) (SeriesIDs, map[uint64]influxql.Expr) {
|
2015-06-02 15:20:20 +00:00
|
|
|
// Create a map to hold the final set of series filter expressions.
|
|
|
|
filters := make(map[uint64]influxql.Expr, 0)
|
|
|
|
// Resulting list of series IDs
|
2015-07-20 19:59:46 +00:00
|
|
|
var series SeriesIDs
|
2015-06-02 15:20:20 +00:00
|
|
|
|
|
|
|
// Combining logic:
|
|
|
|
// +==========+==========+==========+=======================+=======================+
|
|
|
|
// | operator | LHS | RHS | intermediate expr | reduced filter |
|
|
|
|
// +==========+==========+==========+=======================+=======================+
|
|
|
|
// | | <nil> | <r-expr> | true OR <r-expr> | true |
|
|
|
|
// | |----------+----------+-----------------------+-----------------------+
|
|
|
|
// | OR | <l-expr> | <nil> | <l-expr> OR true | true |
|
|
|
|
// | |----------+----------+-----------------------+-----------------------+
|
|
|
|
// | | <nil> | <nil> | true OR true | true |
|
|
|
|
// | |----------+----------+-----------------------+-----------------------+
|
|
|
|
// | | <l-expr> | <r-expr> | <l-expr> OR <r-expr> | <l-expr> OR <r-expr> |
|
|
|
|
// +----------+----------+----------+-----------------------+-----------------------+
|
|
|
|
// | | <nil> | <r-expr> | false AND <r-expr> | false* |
|
|
|
|
// | |----------+----------+-----------------------+-----------------------+
|
|
|
|
// | AND | <l-expr> | <nil> | <l-expr> AND false | false |
|
|
|
|
// | |----------+----------+-----------------------+-----------------------+
|
|
|
|
// | | <nil> | <nil> | false AND false | false |
|
|
|
|
// | |----------+----------+-----------------------+-----------------------+
|
|
|
|
// | | <l-expr> | <r-expr> | <l-expr> AND <r-expr> | <l-expr> AND <r-expr> |
|
|
|
|
// +----------+----------+----------+-----------------------+-----------------------+
|
|
|
|
// *literal false filters and series IDs should be excluded from the results
|
|
|
|
|
|
|
|
def := false
|
|
|
|
if op == influxql.OR {
|
|
|
|
def = true
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, id := range ids {
|
|
|
|
// Get LHS and RHS filter expressions for this series ID.
|
|
|
|
lfilter, rfilter := lfilters[id], rfilters[id]
|
|
|
|
|
|
|
|
// Set default filters if either LHS or RHS expressions were nil.
|
|
|
|
if lfilter == nil {
|
|
|
|
lfilter = &influxql.BooleanLiteral{Val: def}
|
|
|
|
}
|
|
|
|
if rfilter == nil {
|
|
|
|
rfilter = &influxql.BooleanLiteral{Val: def}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create the intermediate filter expression for this series ID.
|
|
|
|
be := &influxql.BinaryExpr{
|
|
|
|
Op: op,
|
|
|
|
LHS: lfilter,
|
|
|
|
RHS: rfilter,
|
|
|
|
}
|
|
|
|
|
|
|
|
// Reduce the intermediate expression.
|
|
|
|
expr := influxql.Reduce(be, nil)
|
|
|
|
|
|
|
|
// If the expression reduced to false, exclude this series ID and filter.
|
|
|
|
if b, ok := expr.(*influxql.BooleanLiteral); ok && !b.Val {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// Store the series ID and merged filter in the final results.
|
|
|
|
filters[id] = expr
|
|
|
|
series = append(series, id)
|
|
|
|
}
|
|
|
|
return series, filters
|
|
|
|
}
|
|
|
|
|
|
|
|
// idsForExpr will return a collection of series ids and a filter expression that should
|
|
|
|
// be used to filter points from those series.
|
2015-07-20 19:59:46 +00:00
|
|
|
func (m *Measurement) idsForExpr(n *influxql.BinaryExpr) (SeriesIDs, influxql.Expr, error) {
|
2015-05-22 20:08:43 +00:00
|
|
|
name, ok := n.LHS.(*influxql.VarRef)
|
|
|
|
value := n.RHS
|
|
|
|
if !ok {
|
|
|
|
name, ok = n.RHS.(*influxql.VarRef)
|
|
|
|
if !ok {
|
2015-06-02 15:20:20 +00:00
|
|
|
return nil, nil, fmt.Errorf("invalid expression: %s", n.String())
|
2015-05-22 20:08:43 +00:00
|
|
|
}
|
|
|
|
value = n.LHS
|
|
|
|
}
|
|
|
|
|
2015-06-02 15:20:20 +00:00
|
|
|
// For time literals, return all series IDs and "true" as the filter.
|
2015-05-22 20:08:43 +00:00
|
|
|
if _, ok := value.(*influxql.TimeLiteral); ok || name.Val == "time" {
|
2015-06-02 15:20:20 +00:00
|
|
|
return m.seriesIDs, &influxql.BooleanLiteral{Val: true}, nil
|
2015-05-22 20:08:43 +00:00
|
|
|
}
|
|
|
|
|
2015-06-02 15:20:20 +00:00
|
|
|
// For fields, return all series IDs from this measurement and return
|
|
|
|
// the expression passed in, as the filter.
|
2015-06-04 18:50:32 +00:00
|
|
|
if m.HasField(name.Val) {
|
2015-06-02 15:20:20 +00:00
|
|
|
return m.seriesIDs, n, nil
|
2015-05-22 20:08:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
tagVals, ok := m.seriesByTagKeyValue[name.Val]
|
|
|
|
if !ok {
|
2015-06-02 15:20:20 +00:00
|
|
|
return nil, nil, nil
|
2015-05-22 20:08:43 +00:00
|
|
|
}
|
|
|
|
|
2015-06-02 15:20:20 +00:00
|
|
|
// if we're looking for series with a specific tag value
|
2015-05-22 20:08:43 +00:00
|
|
|
if str, ok := value.(*influxql.StringLiteral); ok {
|
2015-07-20 19:59:46 +00:00
|
|
|
var ids SeriesIDs
|
2015-05-22 20:08:43 +00:00
|
|
|
|
|
|
|
if n.Op == influxql.EQ {
|
|
|
|
// return series that have a tag of specific value.
|
|
|
|
ids = tagVals[str.Val]
|
|
|
|
} else if n.Op == influxql.NEQ {
|
2015-07-20 19:59:46 +00:00
|
|
|
ids = m.seriesIDs.Reject(tagVals[str.Val])
|
2015-05-22 20:08:43 +00:00
|
|
|
}
|
2015-06-02 15:20:20 +00:00
|
|
|
return ids, &influxql.BooleanLiteral{Val: true}, nil
|
2015-05-22 20:08:43 +00:00
|
|
|
}
|
|
|
|
|
2015-06-02 15:20:20 +00:00
|
|
|
// if we're looking for series with a tag value that matches a regex
|
2015-05-22 20:08:43 +00:00
|
|
|
if re, ok := value.(*influxql.RegexLiteral); ok {
|
2015-07-20 19:59:46 +00:00
|
|
|
var ids SeriesIDs
|
2015-05-22 20:08:43 +00:00
|
|
|
|
|
|
|
// The operation is a NEQREGEX, code must start by assuming all match, even
|
|
|
|
// series without any tags.
|
|
|
|
if n.Op == influxql.NEQREGEX {
|
|
|
|
ids = m.seriesIDs
|
|
|
|
}
|
|
|
|
|
|
|
|
for k := range tagVals {
|
|
|
|
match := re.Val.MatchString(k)
|
|
|
|
|
|
|
|
if match && n.Op == influxql.EQREGEX {
|
2015-07-20 19:59:46 +00:00
|
|
|
ids = ids.Union(tagVals[k])
|
2015-05-22 20:08:43 +00:00
|
|
|
} else if match && n.Op == influxql.NEQREGEX {
|
2015-07-20 19:59:46 +00:00
|
|
|
ids = ids.Reject(tagVals[k])
|
2015-05-22 20:08:43 +00:00
|
|
|
}
|
|
|
|
}
|
2015-06-02 15:20:20 +00:00
|
|
|
return ids, &influxql.BooleanLiteral{Val: true}, nil
|
2015-05-22 20:08:43 +00:00
|
|
|
}
|
|
|
|
|
2015-06-02 15:20:20 +00:00
|
|
|
return nil, nil, nil
|
2015-05-22 20:08:43 +00:00
|
|
|
}
|
|
|
|
|
2015-06-02 15:20:20 +00:00
|
|
|
// walkWhereForSeriesIds recursively walks the WHERE clause and returns an ordered set of series IDs and
|
|
|
|
// a map from those series IDs to filter expressions that should be used to limit points returned in
|
|
|
|
// the final query result.
|
2015-07-20 19:59:46 +00:00
|
|
|
func (m *Measurement) walkWhereForSeriesIds(expr influxql.Expr) (SeriesIDs, map[uint64]influxql.Expr, error) {
|
2015-05-22 20:08:43 +00:00
|
|
|
switch n := expr.(type) {
|
|
|
|
case *influxql.BinaryExpr:
|
|
|
|
switch n.Op {
|
|
|
|
case influxql.EQ, influxql.NEQ, influxql.LT, influxql.LTE, influxql.GT, influxql.GTE, influxql.EQREGEX, influxql.NEQREGEX:
|
2015-06-02 15:20:20 +00:00
|
|
|
// Get the series IDs and filter expression for the tag or field comparison.
|
|
|
|
ids, expr, err := m.idsForExpr(n)
|
2015-05-22 20:08:43 +00:00
|
|
|
if err != nil {
|
2015-06-02 15:20:20 +00:00
|
|
|
return nil, nil, err
|
2015-05-22 20:08:43 +00:00
|
|
|
}
|
|
|
|
|
2015-06-02 15:20:20 +00:00
|
|
|
filters := map[uint64]influxql.Expr{}
|
2015-05-22 20:08:43 +00:00
|
|
|
for _, id := range ids {
|
|
|
|
filters[id] = expr
|
|
|
|
}
|
|
|
|
|
2015-06-02 15:20:20 +00:00
|
|
|
return ids, filters, nil
|
2015-05-22 20:08:43 +00:00
|
|
|
case influxql.AND, influxql.OR:
|
2015-06-02 15:20:20 +00:00
|
|
|
// Get the series IDs and filter expressions for the LHS.
|
|
|
|
lids, lfilters, err := m.walkWhereForSeriesIds(n.LHS)
|
2015-05-22 20:08:43 +00:00
|
|
|
if err != nil {
|
2015-06-02 15:20:20 +00:00
|
|
|
return nil, nil, err
|
2015-05-22 20:08:43 +00:00
|
|
|
}
|
|
|
|
|
2015-06-02 15:20:20 +00:00
|
|
|
// Get the series IDs and filter expressions for the RHS.
|
|
|
|
rids, rfilters, err := m.walkWhereForSeriesIds(n.RHS)
|
2015-05-22 20:08:43 +00:00
|
|
|
if err != nil {
|
2015-06-02 15:20:20 +00:00
|
|
|
return nil, nil, err
|
2015-05-22 20:08:43 +00:00
|
|
|
}
|
|
|
|
|
2015-06-02 15:20:20 +00:00
|
|
|
// Combine the series IDs from the LHS and RHS.
|
2015-07-20 19:59:46 +00:00
|
|
|
var ids SeriesIDs
|
2015-06-02 15:20:20 +00:00
|
|
|
switch n.Op {
|
|
|
|
case influxql.AND:
|
2015-07-20 19:59:46 +00:00
|
|
|
ids = lids.Intersect(rids)
|
2015-06-02 15:20:20 +00:00
|
|
|
case influxql.OR:
|
2015-07-20 19:59:46 +00:00
|
|
|
ids = lids.Union(rids)
|
2015-05-22 20:08:43 +00:00
|
|
|
}
|
|
|
|
|
2015-06-02 15:20:20 +00:00
|
|
|
// Merge the filter expressions for the LHS and RHS.
|
|
|
|
ids, filters := mergeSeriesFilters(n.Op, ids, lfilters, rfilters)
|
2015-05-22 20:08:43 +00:00
|
|
|
|
2015-06-02 15:20:20 +00:00
|
|
|
return ids, filters, nil
|
2015-05-22 20:08:43 +00:00
|
|
|
}
|
|
|
|
|
2015-06-02 15:20:20 +00:00
|
|
|
ids, _, err := m.idsForExpr(n)
|
|
|
|
return ids, nil, err
|
2015-05-22 20:08:43 +00:00
|
|
|
case *influxql.ParenExpr:
|
|
|
|
// walk down the tree
|
2015-06-02 15:20:20 +00:00
|
|
|
return m.walkWhereForSeriesIds(n.Expr)
|
2015-05-22 20:08:43 +00:00
|
|
|
default:
|
2015-06-02 15:20:20 +00:00
|
|
|
return nil, nil, nil
|
2015-05-22 20:08:43 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// expandExpr returns a list of expressions expanded by all possible tag combinations.
|
|
|
|
func (m *Measurement) expandExpr(expr influxql.Expr) []tagSetExpr {
|
|
|
|
// Retrieve list of unique values for each tag.
|
|
|
|
valuesByTagKey := m.uniqueTagValues(expr)
|
|
|
|
|
|
|
|
// Convert keys to slices.
|
|
|
|
keys := make([]string, 0, len(valuesByTagKey))
|
|
|
|
for key := range valuesByTagKey {
|
|
|
|
keys = append(keys, key)
|
|
|
|
}
|
|
|
|
sort.Strings(keys)
|
|
|
|
|
|
|
|
// Order uniques by key.
|
|
|
|
uniques := make([][]string, len(keys))
|
|
|
|
for i, key := range keys {
|
|
|
|
uniques[i] = valuesByTagKey[key]
|
|
|
|
}
|
|
|
|
|
|
|
|
// Reduce a condition for each combination of tag values.
|
|
|
|
return expandExprWithValues(expr, keys, []tagExpr{}, uniques, 0)
|
|
|
|
}
|
|
|
|
|
|
|
|
func expandExprWithValues(expr influxql.Expr, keys []string, tagExprs []tagExpr, uniques [][]string, index int) []tagSetExpr {
|
|
|
|
// If we have no more keys left then execute the reduction and return.
|
|
|
|
if index == len(keys) {
|
|
|
|
// Create a map of tag key/values.
|
|
|
|
m := make(map[string]*string, len(keys))
|
|
|
|
for i, key := range keys {
|
|
|
|
if tagExprs[i].op == influxql.EQ {
|
|
|
|
m[key] = &tagExprs[i].values[0]
|
|
|
|
} else {
|
|
|
|
m[key] = nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// TODO: Rewrite full expressions instead of VarRef replacement.
|
|
|
|
|
|
|
|
// Reduce using the current tag key/value set.
|
|
|
|
// Ignore it if reduces down to "false".
|
|
|
|
e := influxql.Reduce(expr, &tagValuer{tags: m})
|
|
|
|
if e, ok := e.(*influxql.BooleanLiteral); ok && e.Val == false {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
return []tagSetExpr{{values: copyTagExprs(tagExprs), expr: e}}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Otherwise expand for each possible equality value of the key.
|
|
|
|
var exprs []tagSetExpr
|
|
|
|
for _, v := range uniques[index] {
|
|
|
|
exprs = append(exprs, expandExprWithValues(expr, keys, append(tagExprs, tagExpr{keys[index], []string{v}, influxql.EQ}), uniques, index+1)...)
|
|
|
|
}
|
|
|
|
exprs = append(exprs, expandExprWithValues(expr, keys, append(tagExprs, tagExpr{keys[index], uniques[index], influxql.NEQ}), uniques, index+1)...)
|
|
|
|
|
|
|
|
return exprs
|
|
|
|
}
|
|
|
|
|
|
|
|
// seriesIDsAllOrByExpr walks an expressions for matching series IDs
|
|
|
|
// or, if no expressions is given, returns all series IDs for the measurement.
|
2015-07-20 19:59:46 +00:00
|
|
|
func (m *Measurement) seriesIDsAllOrByExpr(expr influxql.Expr) (SeriesIDs, error) {
|
2015-05-22 20:08:43 +00:00
|
|
|
// If no expression given or the measurement has no series,
|
|
|
|
// we can take just return the ids or nil accordingly.
|
|
|
|
if expr == nil {
|
|
|
|
return m.seriesIDs, nil
|
|
|
|
} else if len(m.seriesIDs) == 0 {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Get series IDs that match the WHERE clause.
|
2015-06-02 15:20:20 +00:00
|
|
|
ids, _, err := m.walkWhereForSeriesIds(expr)
|
2015-05-22 20:08:43 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return ids, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// tagValuer is used during expression expansion to evaluate all sets of tag values.
|
|
|
|
type tagValuer struct {
|
|
|
|
tags map[string]*string
|
|
|
|
}
|
|
|
|
|
|
|
|
// Value returns the string value of a tag and true if it's listed in the tagset.
|
|
|
|
func (v *tagValuer) Value(name string) (interface{}, bool) {
|
|
|
|
if value, ok := v.tags[name]; ok {
|
|
|
|
if value == nil {
|
|
|
|
return nil, true
|
|
|
|
}
|
|
|
|
return *value, true
|
|
|
|
}
|
|
|
|
return nil, false
|
|
|
|
}
|
|
|
|
|
|
|
|
// tagSetExpr represents a set of tag keys/values and associated expression.
|
|
|
|
type tagSetExpr struct {
|
|
|
|
values []tagExpr
|
|
|
|
expr influxql.Expr
|
|
|
|
}
|
|
|
|
|
|
|
|
// tagExpr represents one or more values assigned to a given tag.
|
|
|
|
type tagExpr struct {
|
|
|
|
key string
|
|
|
|
values []string
|
|
|
|
op influxql.Token // EQ or NEQ
|
|
|
|
}
|
|
|
|
|
|
|
|
func copyTagExprs(a []tagExpr) []tagExpr {
|
|
|
|
other := make([]tagExpr, len(a))
|
|
|
|
copy(other, a)
|
|
|
|
return other
|
|
|
|
}
|
|
|
|
|
|
|
|
// uniqueTagValues returns a list of unique tag values used in an expression.
|
|
|
|
func (m *Measurement) uniqueTagValues(expr influxql.Expr) map[string][]string {
|
|
|
|
// Track unique value per tag.
|
|
|
|
tags := make(map[string]map[string]struct{})
|
|
|
|
|
|
|
|
// Find all tag values referenced in the expression.
|
|
|
|
influxql.WalkFunc(expr, func(n influxql.Node) {
|
|
|
|
switch n := n.(type) {
|
|
|
|
case *influxql.BinaryExpr:
|
|
|
|
// Ignore operators that are not equality.
|
|
|
|
if n.Op != influxql.EQ {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Extract ref and string literal.
|
|
|
|
var key, value string
|
|
|
|
switch lhs := n.LHS.(type) {
|
|
|
|
case *influxql.VarRef:
|
|
|
|
if rhs, ok := n.RHS.(*influxql.StringLiteral); ok {
|
|
|
|
key, value = lhs.Val, rhs.Val
|
|
|
|
}
|
|
|
|
case *influxql.StringLiteral:
|
|
|
|
if rhs, ok := n.RHS.(*influxql.VarRef); ok {
|
|
|
|
key, value = rhs.Val, lhs.Val
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if key == "" {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Add value to set.
|
|
|
|
if tags[key] == nil {
|
|
|
|
tags[key] = make(map[string]struct{})
|
|
|
|
}
|
|
|
|
tags[key][value] = struct{}{}
|
|
|
|
}
|
|
|
|
})
|
|
|
|
|
|
|
|
// Convert to map of slices.
|
|
|
|
out := make(map[string][]string)
|
|
|
|
for k, values := range tags {
|
|
|
|
out[k] = make([]string, 0, len(values))
|
|
|
|
for v := range values {
|
|
|
|
out[k] = append(out[k], v)
|
|
|
|
}
|
|
|
|
sort.Strings(out[k])
|
|
|
|
}
|
|
|
|
return out
|
|
|
|
}
|
|
|
|
|
|
|
|
// Measurements represents a list of *Measurement.
|
|
|
|
type Measurements []*Measurement
|
|
|
|
|
|
|
|
func (a Measurements) Len() int { return len(a) }
|
|
|
|
func (a Measurements) Less(i, j int) bool { return a[i].Name < a[j].Name }
|
|
|
|
func (a Measurements) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
|
|
|
|
|
|
|
func (a Measurements) intersect(other Measurements) Measurements {
|
|
|
|
l := a
|
|
|
|
r := other
|
|
|
|
|
|
|
|
// we want to iterate through the shortest one and stop
|
|
|
|
if len(other) < len(a) {
|
|
|
|
l = other
|
|
|
|
r = a
|
|
|
|
}
|
|
|
|
|
|
|
|
// they're in sorted order so advance the counter as needed.
|
|
|
|
// That is, don't run comparisons against lower values that we've already passed
|
|
|
|
var i, j int
|
|
|
|
|
|
|
|
result := make(Measurements, 0, len(l))
|
|
|
|
for i < len(l) && j < len(r) {
|
|
|
|
if l[i].Name == r[j].Name {
|
|
|
|
result = append(result, l[i])
|
|
|
|
i++
|
|
|
|
j++
|
|
|
|
} else if l[i].Name < r[j].Name {
|
|
|
|
i++
|
|
|
|
} else {
|
|
|
|
j++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return result
|
|
|
|
}
|
|
|
|
|
|
|
|
func (a Measurements) union(other Measurements) Measurements {
|
|
|
|
result := make(Measurements, 0, len(a)+len(other))
|
|
|
|
var i, j int
|
|
|
|
for i < len(a) && j < len(other) {
|
|
|
|
if a[i].Name == other[j].Name {
|
|
|
|
result = append(result, a[i])
|
|
|
|
i++
|
|
|
|
j++
|
|
|
|
} else if a[i].Name < other[j].Name {
|
|
|
|
result = append(result, a[i])
|
|
|
|
i++
|
|
|
|
} else {
|
|
|
|
result = append(result, other[j])
|
|
|
|
j++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// now append the remainder
|
|
|
|
if i < len(a) {
|
|
|
|
result = append(result, a[i:]...)
|
|
|
|
} else if j < len(other) {
|
|
|
|
result = append(result, other[j:]...)
|
|
|
|
}
|
|
|
|
|
|
|
|
return result
|
|
|
|
}
|
|
|
|
|
|
|
|
// Series belong to a Measurement and represent unique time series in a database
|
|
|
|
type Series struct {
|
|
|
|
Key string
|
|
|
|
Tags map[string]string
|
|
|
|
|
|
|
|
id uint64
|
|
|
|
measurement *Measurement
|
|
|
|
}
|
|
|
|
|
2015-06-02 23:08:48 +00:00
|
|
|
// MarshalBinary encodes the object to a binary format.
|
|
|
|
func (s *Series) MarshalBinary() ([]byte, error) {
|
|
|
|
var pb internal.Series
|
|
|
|
pb.Key = &s.Key
|
|
|
|
for k, v := range s.Tags {
|
|
|
|
key := k
|
|
|
|
value := v
|
|
|
|
pb.Tags = append(pb.Tags, &internal.Tag{Key: &key, Value: &value})
|
|
|
|
}
|
|
|
|
return proto.Marshal(&pb)
|
|
|
|
}
|
|
|
|
|
2015-06-03 11:36:39 +00:00
|
|
|
// UnmarshalBinary decodes the object from a binary format.
|
2015-06-02 23:08:48 +00:00
|
|
|
func (s *Series) UnmarshalBinary(buf []byte) error {
|
|
|
|
var pb internal.Series
|
|
|
|
if err := proto.Unmarshal(buf, &pb); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
s.Key = pb.GetKey()
|
|
|
|
s.Tags = make(map[string]string)
|
|
|
|
for _, t := range pb.Tags {
|
|
|
|
s.Tags[t.GetKey()] = t.GetValue()
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2015-05-22 20:08:43 +00:00
|
|
|
// match returns true if all tags match the series' tags.
|
|
|
|
func (s *Series) match(tags map[string]string) bool {
|
|
|
|
for k, v := range tags {
|
|
|
|
if s.Tags[k] != v {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2015-07-20 19:59:46 +00:00
|
|
|
// SeriesIDs is a convenience type for sorting, checking equality, and doing
|
2015-05-22 20:08:43 +00:00
|
|
|
// union and intersection of collections of series ids.
|
2015-07-20 19:59:46 +00:00
|
|
|
type SeriesIDs []uint64
|
2015-05-22 20:08:43 +00:00
|
|
|
|
2015-07-20 19:59:46 +00:00
|
|
|
func (a SeriesIDs) Len() int { return len(a) }
|
|
|
|
func (a SeriesIDs) Less(i, j int) bool { return a[i] < a[j] }
|
|
|
|
func (a SeriesIDs) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
2015-05-22 20:08:43 +00:00
|
|
|
|
2015-07-20 19:59:46 +00:00
|
|
|
// Equals assumes that both are sorted.
|
|
|
|
func (a SeriesIDs) Equals(other SeriesIDs) bool {
|
2015-05-22 20:08:43 +00:00
|
|
|
if len(a) != len(other) {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
for i, s := range other {
|
|
|
|
if a[i] != s {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2015-07-20 19:59:46 +00:00
|
|
|
// Intersect returns a new collection of series ids in sorted order that is the intersection of the two.
|
2015-05-22 20:08:43 +00:00
|
|
|
// The two collections must already be sorted.
|
2015-07-20 19:59:46 +00:00
|
|
|
func (a SeriesIDs) Intersect(other SeriesIDs) SeriesIDs {
|
2015-05-22 20:08:43 +00:00
|
|
|
l := a
|
|
|
|
r := other
|
|
|
|
|
|
|
|
// we want to iterate through the shortest one and stop
|
|
|
|
if len(other) < len(a) {
|
|
|
|
l = other
|
|
|
|
r = a
|
|
|
|
}
|
|
|
|
|
|
|
|
// they're in sorted order so advance the counter as needed.
|
|
|
|
// That is, don't run comparisons against lower values that we've already passed
|
|
|
|
var i, j int
|
|
|
|
|
|
|
|
ids := make([]uint64, 0, len(l))
|
|
|
|
for i < len(l) && j < len(r) {
|
|
|
|
if l[i] == r[j] {
|
|
|
|
ids = append(ids, l[i])
|
|
|
|
i++
|
|
|
|
j++
|
|
|
|
} else if l[i] < r[j] {
|
|
|
|
i++
|
|
|
|
} else {
|
|
|
|
j++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-07-20 19:59:46 +00:00
|
|
|
return SeriesIDs(ids)
|
2015-05-22 20:08:43 +00:00
|
|
|
}
|
|
|
|
|
2015-07-20 19:59:46 +00:00
|
|
|
// Union returns a new collection of series ids in sorted order that is the union of the two.
|
2015-05-22 20:08:43 +00:00
|
|
|
// The two collections must already be sorted.
|
2015-07-20 19:59:46 +00:00
|
|
|
func (a SeriesIDs) Union(other SeriesIDs) SeriesIDs {
|
2015-05-22 20:08:43 +00:00
|
|
|
l := a
|
|
|
|
r := other
|
|
|
|
ids := make([]uint64, 0, len(l)+len(r))
|
|
|
|
var i, j int
|
|
|
|
for i < len(l) && j < len(r) {
|
|
|
|
if l[i] == r[j] {
|
|
|
|
ids = append(ids, l[i])
|
|
|
|
i++
|
|
|
|
j++
|
|
|
|
} else if l[i] < r[j] {
|
|
|
|
ids = append(ids, l[i])
|
|
|
|
i++
|
|
|
|
} else {
|
|
|
|
ids = append(ids, r[j])
|
|
|
|
j++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// now append the remainder
|
|
|
|
if i < len(l) {
|
|
|
|
ids = append(ids, l[i:]...)
|
|
|
|
} else if j < len(r) {
|
|
|
|
ids = append(ids, r[j:]...)
|
|
|
|
}
|
|
|
|
|
|
|
|
return ids
|
|
|
|
}
|
|
|
|
|
2015-07-20 19:59:46 +00:00
|
|
|
// Reject returns a new collection of series ids in sorted order with the passed in set removed from the original.
|
2015-05-22 20:08:43 +00:00
|
|
|
// This is useful for the NOT operator. The two collections must already be sorted.
|
2015-07-20 19:59:46 +00:00
|
|
|
func (a SeriesIDs) Reject(other SeriesIDs) SeriesIDs {
|
2015-05-22 20:08:43 +00:00
|
|
|
l := a
|
|
|
|
r := other
|
|
|
|
var i, j int
|
|
|
|
|
|
|
|
ids := make([]uint64, 0, len(l))
|
|
|
|
for i < len(l) && j < len(r) {
|
|
|
|
if l[i] == r[j] {
|
|
|
|
i++
|
|
|
|
j++
|
|
|
|
} else if l[i] < r[j] {
|
|
|
|
ids = append(ids, l[i])
|
|
|
|
i++
|
|
|
|
} else {
|
|
|
|
j++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Append the remainder
|
|
|
|
if i < len(l) {
|
|
|
|
ids = append(ids, l[i:]...)
|
|
|
|
}
|
|
|
|
|
2015-07-20 19:59:46 +00:00
|
|
|
return SeriesIDs(ids)
|
2015-05-22 20:08:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// TagFilter represents a tag filter when looking up other tags or measurements.
|
|
|
|
type TagFilter struct {
|
|
|
|
Op influxql.Token
|
|
|
|
Key string
|
|
|
|
Value string
|
|
|
|
Regex *regexp.Regexp
|
|
|
|
}
|
|
|
|
|
|
|
|
// used to convert the tag set to bytes for use as a lookup key
|
2015-07-20 19:59:46 +00:00
|
|
|
func MarshalTags(tags map[string]string) []byte {
|
2015-05-22 20:08:43 +00:00
|
|
|
// Empty maps marshal to empty bytes.
|
|
|
|
if len(tags) == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Extract keys and determine final size.
|
|
|
|
sz := (len(tags) * 2) - 1 // separators
|
|
|
|
keys := make([]string, 0, len(tags))
|
|
|
|
for k, v := range tags {
|
|
|
|
keys = append(keys, k)
|
|
|
|
sz += len(k) + len(v)
|
|
|
|
}
|
|
|
|
sort.Strings(keys)
|
|
|
|
|
|
|
|
// Generate marshaled bytes.
|
|
|
|
b := make([]byte, sz)
|
|
|
|
buf := b
|
|
|
|
for _, k := range keys {
|
|
|
|
copy(buf, k)
|
|
|
|
buf[len(k)] = '|'
|
|
|
|
buf = buf[len(k)+1:]
|
|
|
|
}
|
|
|
|
for i, k := range keys {
|
|
|
|
v := tags[k]
|
|
|
|
copy(buf, v)
|
|
|
|
if i < len(keys)-1 {
|
|
|
|
buf[len(v)] = '|'
|
|
|
|
buf = buf[len(v)+1:]
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return b
|
|
|
|
}
|
|
|
|
|
|
|
|
// timeBetweenInclusive returns true if t is between min and max, inclusive.
|
|
|
|
func timeBetweenInclusive(t, min, max time.Time) bool {
|
|
|
|
return (t.Equal(min) || t.After(min)) && (t.Equal(max) || t.Before(max))
|
|
|
|
}
|
|
|
|
|
2015-06-04 18:50:32 +00:00
|
|
|
// TagKeys returns a list of the measurement's tag names.
|
|
|
|
func (m *Measurement) TagKeys() []string {
|
|
|
|
m.mu.RLock()
|
|
|
|
defer m.mu.RUnlock()
|
2015-05-22 20:08:43 +00:00
|
|
|
keys := make([]string, 0, len(m.seriesByTagKeyValue))
|
|
|
|
for k := range m.seriesByTagKeyValue {
|
|
|
|
keys = append(keys, k)
|
|
|
|
}
|
|
|
|
sort.Strings(keys)
|
|
|
|
return keys
|
|
|
|
}
|
|
|
|
|
2015-07-22 14:53:20 +00:00
|
|
|
// SetFieldName adds the field name to the measurement.
|
|
|
|
func (m *Measurement) SetFieldName(name string) {
|
|
|
|
m.mu.Lock()
|
|
|
|
m.fieldNames[name] = struct{}{}
|
|
|
|
m.mu.Unlock()
|
|
|
|
}
|
|
|
|
|
2015-06-04 18:50:32 +00:00
|
|
|
// FieldNames returns a list of the measurement's field names
|
|
|
|
func (m *Measurement) FieldNames() (a []string) {
|
|
|
|
m.mu.RLock()
|
|
|
|
defer m.mu.RUnlock()
|
|
|
|
|
|
|
|
for n, _ := range m.fieldNames {
|
|
|
|
a = append(a, n)
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2015-07-20 19:59:46 +00:00
|
|
|
func (m *Measurement) tagValuesByKeyAndSeriesID(tagKeys []string, ids SeriesIDs) map[string]stringSet {
|
2015-05-22 20:08:43 +00:00
|
|
|
// If no tag keys were passed, get all tag keys for the measurement.
|
|
|
|
if len(tagKeys) == 0 {
|
|
|
|
for k := range m.seriesByTagKeyValue {
|
|
|
|
tagKeys = append(tagKeys, k)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Mapping between tag keys to all existing tag values.
|
|
|
|
tagValues := make(map[string]stringSet, 0)
|
|
|
|
|
|
|
|
// Iterate all series to collect tag values.
|
|
|
|
for _, id := range ids {
|
|
|
|
s, ok := m.seriesByID[id]
|
|
|
|
if !ok {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// Iterate the tag keys we're interested in and collect values
|
|
|
|
// from this series, if they exist.
|
|
|
|
for _, tagKey := range tagKeys {
|
|
|
|
if tagVal, ok := s.Tags[tagKey]; ok {
|
|
|
|
if _, ok = tagValues[tagKey]; !ok {
|
|
|
|
tagValues[tagKey] = newStringSet()
|
|
|
|
}
|
|
|
|
tagValues[tagKey].add(tagVal)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return tagValues
|
|
|
|
}
|
|
|
|
|
2015-07-06 16:01:41 +00:00
|
|
|
// stringSet represents a set of strings.
|
2015-05-22 20:08:43 +00:00
|
|
|
type stringSet map[string]struct{}
|
|
|
|
|
2015-07-06 16:01:41 +00:00
|
|
|
// newStringSet returns an empty stringSet.
|
2015-05-22 20:08:43 +00:00
|
|
|
func newStringSet() stringSet {
|
|
|
|
return make(map[string]struct{})
|
|
|
|
}
|
|
|
|
|
2015-07-20 21:40:39 +00:00
|
|
|
// add adds strings to the set.
|
|
|
|
func (s stringSet) add(ss ...string) {
|
|
|
|
for _, n := range ss {
|
|
|
|
s[n] = struct{}{}
|
|
|
|
}
|
2015-05-22 20:08:43 +00:00
|
|
|
}
|
|
|
|
|
2015-07-06 16:01:41 +00:00
|
|
|
// contains returns whether the set contains the given string.
|
2015-05-22 20:08:43 +00:00
|
|
|
func (s stringSet) contains(ss string) bool {
|
|
|
|
_, ok := s[ss]
|
|
|
|
return ok
|
|
|
|
}
|
|
|
|
|
2015-07-06 16:01:41 +00:00
|
|
|
// list returns the current elements in the set, in sorted order.
|
2015-05-22 20:08:43 +00:00
|
|
|
func (s stringSet) list() []string {
|
|
|
|
l := make([]string, 0, len(s))
|
|
|
|
for k := range s {
|
|
|
|
l = append(l, k)
|
|
|
|
}
|
2015-07-06 16:01:41 +00:00
|
|
|
sort.Strings(l)
|
2015-05-22 20:08:43 +00:00
|
|
|
return l
|
|
|
|
}
|
|
|
|
|
2015-07-06 16:01:41 +00:00
|
|
|
// union returns the union of this set and another.
|
2015-05-22 20:08:43 +00:00
|
|
|
func (s stringSet) union(o stringSet) stringSet {
|
|
|
|
ns := newStringSet()
|
|
|
|
for k := range s {
|
|
|
|
ns[k] = struct{}{}
|
|
|
|
}
|
|
|
|
for k := range o {
|
|
|
|
ns[k] = struct{}{}
|
|
|
|
}
|
|
|
|
return ns
|
|
|
|
}
|
|
|
|
|
2015-07-06 16:01:41 +00:00
|
|
|
// union returns the intersection of this set and another.
|
2015-05-22 20:08:43 +00:00
|
|
|
func (s stringSet) intersect(o stringSet) stringSet {
|
|
|
|
ns := newStringSet()
|
|
|
|
for k := range s {
|
|
|
|
if _, ok := o[k]; ok {
|
|
|
|
ns[k] = struct{}{}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for k := range o {
|
|
|
|
if _, ok := s[k]; ok {
|
|
|
|
ns[k] = struct{}{}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return ns
|
|
|
|
}
|
|
|
|
|
2015-07-22 14:53:20 +00:00
|
|
|
func MeasurementFromSeriesKey(key string) string {
|
2015-06-01 23:19:29 +00:00
|
|
|
idx := strings.Index(key, ",")
|
|
|
|
if idx == -1 {
|
|
|
|
return key
|
|
|
|
}
|
2015-05-22 20:08:43 +00:00
|
|
|
return key[:strings.Index(key, ",")]
|
|
|
|
}
|