Merge pull request #8947 from influxdata/sgc-explain

EXPLAIN ANALYZE implementation
pull/8992/head
Stuart Carnie 2017-10-20 08:35:13 -07:00 committed by GitHub
commit 618f0d0aa7
64 changed files with 3997 additions and 438 deletions

1
Godeps
View File

@ -20,5 +20,6 @@ github.com/spaolacci/murmur3 0d12bf811670bf6a1a63828dfbd003eded177fce
github.com/tinylib/msgp ad0ff2e232ad2e37faf67087fb24bf8d04a8ce20 github.com/tinylib/msgp ad0ff2e232ad2e37faf67087fb24bf8d04a8ce20
github.com/uber-go/atomic 74ca5ec650841aee9f289dce76e928313a37cbc6 github.com/uber-go/atomic 74ca5ec650841aee9f289dce76e928313a37cbc6
github.com/uber-go/zap fbae0281ffd546fa6d1959fec6075ac5da7fb577 github.com/uber-go/zap fbae0281ffd546fa6d1959fec6075ac5da7fb577
github.com/xlab/treeprint 06dfc6fa17cdde904617990a0c2d89e3e332dbb3
golang.org/x/crypto 9477e0b78b9ac3d0b03822fd95422e2fe07627cd golang.org/x/crypto 9477e0b78b9ac3d0b03822fd95422e2fe07627cd
golang.org/x/sys 062cd7e4e68206d8bab9b18396626e855c992658 golang.org/x/sys 062cd7e4e68206d8bab9b18396626e855c992658

View File

@ -25,3 +25,4 @@
- github.com/uber-go/zap [MIT LICENSE](https://github.com/uber-go/zap/blob/master/LICENSE.txt) - github.com/uber-go/zap [MIT LICENSE](https://github.com/uber-go/zap/blob/master/LICENSE.txt)
- golang.org/x/crypto [BSD LICENSE](https://github.com/golang/crypto/blob/master/LICENSE) - golang.org/x/crypto [BSD LICENSE](https://github.com/golang/crypto/blob/master/LICENSE)
- jquery 2.1.4 [MIT LICENSE](https://github.com/jquery/jquery/blob/master/LICENSE.txt) - jquery 2.1.4 [MIT LICENSE](https://github.com/jquery/jquery/blob/master/LICENSE.txt)
- github.com/xlab/treeprint [MIT LICENSE](https://github.com/xlab/treeprint/blob/master/LICENSE)

View File

@ -1,6 +1,7 @@
package coordinator package coordinator
import ( import (
"context"
"io" "io"
"time" "time"
@ -160,7 +161,7 @@ func (a *LocalShardMapping) MapType(m *influxql.Measurement, field string) influ
return typ return typ
} }
func (a *LocalShardMapping) CreateIterator(m *influxql.Measurement, opt query.IteratorOptions) (query.Iterator, error) { func (a *LocalShardMapping) CreateIterator(ctx context.Context, m *influxql.Measurement, opt query.IteratorOptions) (query.Iterator, error) {
source := Source{ source := Source{
Database: m.Database, Database: m.Database,
RetentionPolicy: m.RetentionPolicy, RetentionPolicy: m.RetentionPolicy,
@ -184,7 +185,7 @@ func (a *LocalShardMapping) CreateIterator(m *influxql.Measurement, opt query.It
inputs := make([]query.Iterator, 0, len(measurements)) inputs := make([]query.Iterator, 0, len(measurements))
if err := func() error { if err := func() error {
for _, measurement := range measurements { for _, measurement := range measurements {
input, err := sg.CreateIterator(measurement, opt) input, err := sg.CreateIterator(ctx, measurement, opt)
if err != nil { if err != nil {
return err return err
} }
@ -197,7 +198,7 @@ func (a *LocalShardMapping) CreateIterator(m *influxql.Measurement, opt query.It
} }
return query.Iterators(inputs).Merge(opt) return query.Iterators(inputs).Merge(opt)
} }
return sg.CreateIterator(m.Name, opt) return sg.CreateIterator(ctx, m.Name, opt)
} }
func (a *LocalShardMapping) IteratorCost(m *influxql.Measurement, opt query.IteratorOptions) (query.IteratorCost, error) { func (a *LocalShardMapping) IteratorCost(m *influxql.Measurement, opt query.IteratorOptions) (query.IteratorCost, error) {

View File

@ -1,6 +1,7 @@
package coordinator_test package coordinator_test
import ( import (
"context"
"reflect" "reflect"
"testing" "testing"
"time" "time"
@ -40,7 +41,7 @@ func TestLocalShardMapper(t *testing.T) {
} }
var sh MockShard var sh MockShard
sh.CreateIteratorFn = func(measurement string, opt query.IteratorOptions) (query.Iterator, error) { sh.CreateIteratorFn = func(ctx context.Context, measurement string, opt query.IteratorOptions) (query.Iterator, error) {
if measurement != "cpu" { if measurement != "cpu" {
t.Errorf("unexpected measurement: %s", measurement) t.Errorf("unexpected measurement: %s", measurement)
} }
@ -74,7 +75,7 @@ func TestLocalShardMapper(t *testing.T) {
t.Fatalf("unexpected number of shard mappings: %d", len(m.ShardMap)) t.Fatalf("unexpected number of shard mappings: %d", len(m.ShardMap))
} }
if _, err := ic.CreateIterator(measurement, query.IteratorOptions{}); err != nil { if _, err := ic.CreateIterator(context.Background(), measurement, query.IteratorOptions{}); err != nil {
t.Fatalf("unexpected error: %s", err) t.Fatalf("unexpected error: %s", err)
} }
@ -97,7 +98,7 @@ func TestLocalShardMapper(t *testing.T) {
t.Fatalf("unexpected number of shard mappings: %d", len(m.ShardMap)) t.Fatalf("unexpected number of shard mappings: %d", len(m.ShardMap))
} }
if _, err := ic.CreateIterator(measurement, query.IteratorOptions{}); err != nil { if _, err := ic.CreateIterator(context.Background(), measurement, query.IteratorOptions{}); err != nil {
t.Fatalf("unexpected error: %s", err) t.Fatalf("unexpected error: %s", err)
} }
} }

View File

@ -2,6 +2,7 @@ package coordinator
import ( import (
"bytes" "bytes"
"context"
"errors" "errors"
"fmt" "fmt"
"io" "io"
@ -14,6 +15,8 @@ import (
"github.com/influxdata/influxdb/influxql" "github.com/influxdata/influxdb/influxql"
"github.com/influxdata/influxdb/models" "github.com/influxdata/influxdb/models"
"github.com/influxdata/influxdb/monitor" "github.com/influxdata/influxdb/monitor"
"github.com/influxdata/influxdb/pkg/tracing"
"github.com/influxdata/influxdb/pkg/tracing/fields"
"github.com/influxdata/influxdb/query" "github.com/influxdata/influxdb/query"
"github.com/influxdata/influxdb/services/meta" "github.com/influxdata/influxdb/services/meta"
"github.com/influxdata/influxdb/tsdb" "github.com/influxdata/influxdb/tsdb"
@ -56,7 +59,7 @@ type StatementExecutor struct {
func (e *StatementExecutor) ExecuteStatement(stmt influxql.Statement, ctx query.ExecutionContext) error { func (e *StatementExecutor) ExecuteStatement(stmt influxql.Statement, ctx query.ExecutionContext) error {
// Select statements are handled separately so that they can be streamed. // Select statements are handled separately so that they can be streamed.
if stmt, ok := stmt.(*influxql.SelectStatement); ok { if stmt, ok := stmt.(*influxql.SelectStatement); ok {
return e.executeSelectStatement(stmt, &ctx) return e.executeSelectStatement(context.Background(), stmt, &ctx)
} }
var rows models.Rows var rows models.Rows
@ -136,7 +139,11 @@ func (e *StatementExecutor) ExecuteStatement(stmt influxql.Statement, ctx query.
} }
err = e.executeDropUserStatement(stmt) err = e.executeDropUserStatement(stmt)
case *influxql.ExplainStatement: case *influxql.ExplainStatement:
rows, err = e.executeExplainStatement(stmt, &ctx) if stmt.Analyze {
rows, err = e.executeExplainAnalyzeStatement(stmt, &ctx)
} else {
rows, err = e.executeExplainStatement(stmt, &ctx)
}
case *influxql.GrantStatement: case *influxql.GrantStatement:
if ctx.ReadOnly { if ctx.ReadOnly {
messages = append(messages, query.ReadOnlyWarning(stmt.String())) messages = append(messages, query.ReadOnlyWarning(stmt.String()))
@ -401,17 +408,13 @@ func (e *StatementExecutor) executeDropUserStatement(q *influxql.DropUserStateme
return e.MetaClient.DropUser(q.Name) return e.MetaClient.DropUser(q.Name)
} }
func (e *StatementExecutor) executeExplainStatement(q *influxql.ExplainStatement, ctx *query.ExecutionContext) (models.Rows, error) { func (e *StatementExecutor) executeExplainStatement(q *influxql.ExplainStatement, ectx *query.ExecutionContext) (models.Rows, error) {
if q.Analyze {
return nil, errors.New("analyze is currently unimplemented")
}
opt := query.SelectOptions{ opt := query.SelectOptions{
InterruptCh: ctx.InterruptCh, InterruptCh: ectx.InterruptCh,
NodeID: ctx.ExecutionOptions.NodeID, NodeID: ectx.ExecutionOptions.NodeID,
MaxSeriesN: e.MaxSelectSeriesN, MaxSeriesN: e.MaxSelectSeriesN,
MaxBucketsN: e.MaxSelectBucketsN, MaxBucketsN: e.MaxSelectBucketsN,
Authorizer: ctx.Authorizer, Authorizer: ectx.Authorizer,
} }
// Prepare the query for execution, but do not actually execute it. // Prepare the query for execution, but do not actually execute it.
@ -437,6 +440,74 @@ func (e *StatementExecutor) executeExplainStatement(q *influxql.ExplainStatement
return models.Rows{row}, nil return models.Rows{row}, nil
} }
func (e *StatementExecutor) executeExplainAnalyzeStatement(q *influxql.ExplainStatement, ectx *query.ExecutionContext) (models.Rows, error) {
stmt := q.Statement
t, span := tracing.NewTrace("select")
ctx := tracing.NewContextWithTrace(context.Background(), t)
ctx = tracing.NewContextWithSpan(ctx, span)
start := time.Now()
itrs, columns, err := e.createIterators(ctx, stmt, ectx)
if err != nil {
return nil, err
}
iterTime := time.Since(start)
// Generate a row emitter from the iterator set.
em := query.NewEmitter(itrs, stmt.TimeAscending(), ectx.ChunkSize)
em.Columns = columns
if stmt.Location != nil {
em.Location = stmt.Location
}
em.OmitTime = stmt.OmitTime
em.EmitName = stmt.EmitName
// Emit rows to the results channel.
var writeN int64
for {
var row *models.Row
row, _, err = em.Emit()
if err != nil {
goto CLEANUP
} else if row == nil {
// Check if the query was interrupted while emitting.
select {
case <-ectx.InterruptCh:
err = query.ErrQueryInterrupted
goto CLEANUP
default:
}
break
}
writeN += int64(len(row.Values))
}
CLEANUP:
em.Close()
if err != nil {
return nil, err
}
totalTime := time.Since(start)
span.MergeFields(
fields.Duration("total_time", totalTime),
fields.Duration("planning_time", iterTime),
fields.Duration("execution_time", totalTime-iterTime),
)
span.Finish()
row := &models.Row{
Columns: []string{"EXPLAIN ANALYZE"},
}
for _, s := range strings.Split(t.Tree().String(), "\n") {
row.Values = append(row.Values, []interface{}{s})
}
return models.Rows{row}, nil
}
func (e *StatementExecutor) executeGrantStatement(stmt *influxql.GrantStatement) error { func (e *StatementExecutor) executeGrantStatement(stmt *influxql.GrantStatement) error {
return e.MetaClient.SetPrivilege(stmt.User, stmt.On, stmt.Privilege) return e.MetaClient.SetPrivilege(stmt.User, stmt.On, stmt.Privilege)
} }
@ -469,14 +540,14 @@ func (e *StatementExecutor) executeSetPasswordUserStatement(q *influxql.SetPassw
return e.MetaClient.UpdateUser(q.Name, q.Password) return e.MetaClient.UpdateUser(q.Name, q.Password)
} }
func (e *StatementExecutor) executeSelectStatement(stmt *influxql.SelectStatement, ctx *query.ExecutionContext) error { func (e *StatementExecutor) executeSelectStatement(ctx context.Context, stmt *influxql.SelectStatement, ectx *query.ExecutionContext) error {
itrs, columns, err := e.createIterators(stmt, ctx) itrs, columns, err := e.createIterators(ctx, stmt, ectx)
if err != nil { if err != nil {
return err return err
} }
// Generate a row emitter from the iterator set. // Generate a row emitter from the iterator set.
em := query.NewEmitter(itrs, stmt.TimeAscending(), ctx.ChunkSize) em := query.NewEmitter(itrs, stmt.TimeAscending(), ectx.ChunkSize)
em.Columns = columns em.Columns = columns
if stmt.Location != nil { if stmt.Location != nil {
em.Location = stmt.Location em.Location = stmt.Location
@ -501,7 +572,7 @@ func (e *StatementExecutor) executeSelectStatement(stmt *influxql.SelectStatemen
} else if row == nil { } else if row == nil {
// Check if the query was interrupted while emitting. // Check if the query was interrupted while emitting.
select { select {
case <-ctx.InterruptCh: case <-ectx.InterruptCh:
return query.ErrQueryInterrupted return query.ErrQueryInterrupted
default: default:
} }
@ -518,13 +589,13 @@ func (e *StatementExecutor) executeSelectStatement(stmt *influxql.SelectStatemen
} }
result := &query.Result{ result := &query.Result{
StatementID: ctx.StatementID, StatementID: ectx.StatementID,
Series: []*models.Row{row}, Series: []*models.Row{row},
Partial: partial, Partial: partial,
} }
// Send results or exit if closing. // Send results or exit if closing.
if err := ctx.Send(result); err != nil { if err := ectx.Send(result); err != nil {
return err return err
} }
@ -538,12 +609,12 @@ func (e *StatementExecutor) executeSelectStatement(stmt *influxql.SelectStatemen
} }
var messages []*query.Message var messages []*query.Message
if ctx.ReadOnly { if ectx.ReadOnly {
messages = append(messages, query.ReadOnlyWarning(stmt.String())) messages = append(messages, query.ReadOnlyWarning(stmt.String()))
} }
return ctx.Send(&query.Result{ return ectx.Send(&query.Result{
StatementID: ctx.StatementID, StatementID: ectx.StatementID,
Messages: messages, Messages: messages,
Series: []*models.Row{{ Series: []*models.Row{{
Name: "result", Name: "result",
@ -555,8 +626,8 @@ func (e *StatementExecutor) executeSelectStatement(stmt *influxql.SelectStatemen
// Always emit at least one result. // Always emit at least one result.
if !emitted { if !emitted {
return ctx.Send(&query.Result{ return ectx.Send(&query.Result{
StatementID: ctx.StatementID, StatementID: ectx.StatementID,
Series: make([]*models.Row, 0), Series: make([]*models.Row, 0),
}) })
} }
@ -564,24 +635,24 @@ func (e *StatementExecutor) executeSelectStatement(stmt *influxql.SelectStatemen
return nil return nil
} }
func (e *StatementExecutor) createIterators(stmt *influxql.SelectStatement, ctx *query.ExecutionContext) ([]query.Iterator, []string, error) { func (e *StatementExecutor) createIterators(ctx context.Context, stmt *influxql.SelectStatement, ectx *query.ExecutionContext) ([]query.Iterator, []string, error) {
opt := query.SelectOptions{ opt := query.SelectOptions{
InterruptCh: ctx.InterruptCh, InterruptCh: ectx.InterruptCh,
NodeID: ctx.ExecutionOptions.NodeID, NodeID: ectx.ExecutionOptions.NodeID,
MaxSeriesN: e.MaxSelectSeriesN, MaxSeriesN: e.MaxSelectSeriesN,
MaxBucketsN: e.MaxSelectBucketsN, MaxBucketsN: e.MaxSelectBucketsN,
Authorizer: ctx.Authorizer, Authorizer: ectx.Authorizer,
} }
// Create a set of iterators from a selection. // Create a set of iterators from a selection.
itrs, columns, err := query.Select(stmt, e.ShardMapper, opt) itrs, columns, err := query.Select(ctx, stmt, e.ShardMapper, opt)
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }
if e.MaxSelectPointN > 0 { if e.MaxSelectPointN > 0 {
monitor := query.PointLimitMonitor(itrs, query.DefaultStatsInterval, e.MaxSelectPointN) monitor := query.PointLimitMonitor(itrs, query.DefaultStatsInterval, e.MaxSelectPointN)
ctx.Query.Monitor(monitor) ectx.Query.Monitor(monitor)
} }
return itrs, columns, nil return itrs, columns, nil
} }
@ -1073,8 +1144,8 @@ func (e *StatementExecutor) NormalizeStatement(stmt influxql.Statement, defaultD
case *influxql.Measurement: case *influxql.Measurement:
switch stmt.(type) { switch stmt.(type) {
case *influxql.DropSeriesStatement, *influxql.DeleteSeriesStatement: case *influxql.DropSeriesStatement, *influxql.DeleteSeriesStatement:
// DB and RP not supported by these statements so don't rewrite into invalid // DB and RP not supported by these statements so don't rewrite into invalid
// statements // statements
default: default:
err = e.normalizeMeasurement(node, defaultDatabase) err = e.normalizeMeasurement(node, defaultDatabase)
} }

View File

@ -2,6 +2,7 @@ package coordinator_test
import ( import (
"bytes" "bytes"
"context"
"errors" "errors"
"io" "io"
"os" "os"
@ -50,7 +51,7 @@ func TestQueryExecutor_ExecuteQuery_SelectStatement(t *testing.T) {
} }
var sh MockShard var sh MockShard
sh.CreateIteratorFn = func(m string, opt query.IteratorOptions) (query.Iterator, error) { sh.CreateIteratorFn = func(ctx context.Context, m string, opt query.IteratorOptions) (query.Iterator, error) {
return &FloatIterator{Points: []query.FloatPoint{ return &FloatIterator{Points: []query.FloatPoint{
{Name: "cpu", Time: int64(0 * time.Second), Aux: []interface{}{float64(100)}}, {Name: "cpu", Time: int64(0 * time.Second), Aux: []interface{}{float64(100)}},
{Name: "cpu", Time: int64(1 * time.Second), Aux: []interface{}{float64(200)}}, {Name: "cpu", Time: int64(1 * time.Second), Aux: []interface{}{float64(200)}},
@ -103,7 +104,7 @@ func TestQueryExecutor_ExecuteQuery_MaxSelectBucketsN(t *testing.T) {
} }
var sh MockShard var sh MockShard
sh.CreateIteratorFn = func(m string, opt query.IteratorOptions) (query.Iterator, error) { sh.CreateIteratorFn = func(ctx context.Context, m string, opt query.IteratorOptions) (query.Iterator, error) {
return &FloatIterator{ return &FloatIterator{
Points: []query.FloatPoint{{Name: "cpu", Time: int64(0 * time.Second), Aux: []interface{}{float64(100)}}}, Points: []query.FloatPoint{{Name: "cpu", Time: int64(0 * time.Second), Aux: []interface{}{float64(100)}}},
}, nil }, nil
@ -384,7 +385,7 @@ func (s *TSDBStore) TagValues(_ query.Authorizer, database string, cond influxql
type MockShard struct { type MockShard struct {
Measurements []string Measurements []string
FieldDimensionsFn func(measurements []string) (fields map[string]influxql.DataType, dimensions map[string]struct{}, err error) FieldDimensionsFn func(measurements []string) (fields map[string]influxql.DataType, dimensions map[string]struct{}, err error)
CreateIteratorFn func(m string, opt query.IteratorOptions) (query.Iterator, error) CreateIteratorFn func(ctx context.Context, m string, opt query.IteratorOptions) (query.Iterator, error)
IteratorCostFn func(m string, opt query.IteratorOptions) (query.IteratorCost, error) IteratorCostFn func(m string, opt query.IteratorOptions) (query.IteratorCost, error)
ExpandSourcesFn func(sources influxql.Sources) (influxql.Sources, error) ExpandSourcesFn func(sources influxql.Sources) (influxql.Sources, error)
} }
@ -417,8 +418,8 @@ func (sh *MockShard) MapType(measurement, field string) influxql.DataType {
return influxql.Unknown return influxql.Unknown
} }
func (sh *MockShard) CreateIterator(measurement string, opt query.IteratorOptions) (query.Iterator, error) { func (sh *MockShard) CreateIterator(ctx context.Context, measurement string, opt query.IteratorOptions) (query.Iterator, error) {
return sh.CreateIteratorFn(measurement, opt) return sh.CreateIteratorFn(ctx, measurement, opt)
} }
func (sh *MockShard) IteratorCost(measurement string, opt query.IteratorOptions) (query.IteratorCost, error) { func (sh *MockShard) IteratorCost(measurement string, opt query.IteratorOptions) (query.IteratorCost, error) {

20
pkg/metrics/context.go Normal file
View File

@ -0,0 +1,20 @@
package metrics
import "context"
type key int
const (
groupKey key = iota
)
// NewContextWithGroup returns a new context with the given Group added.
func NewContextWithGroup(ctx context.Context, c *Group) context.Context {
return context.WithValue(ctx, groupKey, c)
}
// GroupFromContext returns the Group associated with ctx or nil if no Group has been assigned.
func GroupFromContext(ctx context.Context) *Group {
c, _ := ctx.Value(groupKey).(*Group)
return c
}

28
pkg/metrics/counter.go Normal file
View File

@ -0,0 +1,28 @@
package metrics
import (
"strconv"
"sync/atomic"
)
// The Counter type represents a numeric counter that is safe to use from concurrent goroutines.
type Counter struct {
val int64
desc *desc
}
// Name identifies the name of the counter.
func (c *Counter) Name() string { return c.desc.Name }
// Value atomically returns the current value of the counter.
func (c *Counter) Value() int64 { return atomic.LoadInt64(&c.val) }
// Add atomically adds d to the counter.
func (c *Counter) Add(d int64) { atomic.AddInt64(&c.val, d) }
// String returns a string representation using the name and value of the counter.
func (c *Counter) String() string {
var buf [16]byte
v := strconv.AppendInt(buf[:0], c.val, 10)
return c.desc.Name + ": " + string(v)
}

View File

@ -0,0 +1,14 @@
package metrics
import (
"testing"
)
func TestCounter_Add(t *testing.T) {
c := Counter{}
c.Add(5)
c.Add(5)
if exp, got := int64(10), c.Value(); exp != got {
t.Errorf("unexpected value; exp=%d, got=%d", exp, got)
}
}

View File

@ -0,0 +1,36 @@
package metrics
var defaultRegistry = NewRegistry()
// MustRegisterGroup registers a new group using the specified name.
// If the group name is not unique, MustRegisterGroup will panic.
//
// MustRegisterGroup is not safe to call from multiple goroutines.
func MustRegisterGroup(name string) GID {
return defaultRegistry.MustRegisterGroup(name)
}
// MustRegisterCounter registers a new counter metric with the default registry
// using the provided descriptor.
// If the metric name is not unique, MustRegisterCounter will panic.
//
// MustRegisterCounter is not safe to call from multiple goroutines.
func MustRegisterCounter(name string, opts ...descOption) ID {
return defaultRegistry.MustRegisterCounter(name, opts...)
}
// MustRegisterTimer registers a new timer metric with the default registry
// using the provided descriptor.
// If the metric name is not unique, MustRegisterTimer will panic.
//
// MustRegisterTimer is not safe to call from multiple goroutines.
func MustRegisterTimer(name string, opts ...descOption) ID {
return defaultRegistry.MustRegisterTimer(name, opts...)
}
// NewGroup returns a new measurement group from the default registry.
//
// NewGroup is safe to call from multiple goroutines.
func NewGroup(gid GID) *Group {
return defaultRegistry.NewGroup(gid)
}

View File

@ -0,0 +1,64 @@
package metrics
type groupDesc struct {
Name string
id GID
}
type metricType int
const (
counterMetricType metricType = iota
timerMetricType
)
type desc struct {
Name string
mt metricType
gid GID
id ID
}
type descOption func(*desc)
// WithGroup assigns the associated measurement to the group identified by gid originally
// returned from MustRegisterGroup.
func WithGroup(gid GID) descOption {
return func(d *desc) {
d.gid = gid
}
}
func newDesc(name string, opts ...descOption) *desc {
desc := &desc{Name: name}
for _, o := range opts {
o(desc)
}
return desc
}
const (
idMask = (1 << 32) - 1
gidShift = 32
)
type (
GID uint32
ID uint64
)
func newID(id int, gid GID) ID {
return ID(gid)<<gidShift + (ID(id) & idMask)
}
func (id ID) id() int {
return int(id & idMask)
}
func (id ID) gid() int {
return int(id >> gidShift)
}
func (id *ID) setGID(gid GID) {
*id |= ID(gid) << gidShift
}

View File

@ -0,0 +1,21 @@
package metrics
import (
"testing"
"github.com/influxdata/influxdb/pkg/testing/assert"
)
func TestID_newID(t *testing.T) {
var id = newID(0xff, 0xff0f0fff)
assert.Equal(t, id, ID(0xff0f0fff000000ff))
assert.Equal(t, id.id(), 0xff)
assert.Equal(t, id.gid(), 0xff0f0fff)
}
func TestID_setGID(t *testing.T) {
var id = ID(1)
assert.Equal(t, id.gid(), 0)
id.setGID(1)
assert.Equal(t, id.gid(), 1)
}

6
pkg/metrics/doc.go Normal file
View File

@ -0,0 +1,6 @@
/*
Package metrics provides various measurements that are safe for concurrent access.
Measurements are arranged into groups that are efficient to create and access.
*/
package metrics

37
pkg/metrics/group.go Normal file
View File

@ -0,0 +1,37 @@
package metrics
// The Group type represents an instance of a set of measurements that are used for
// instrumenting a specific request.
type Group struct {
g *groupRegistry
counters []Counter
timers []Timer
}
// Name returns the name of the group.
func (g *Group) Name() string { return g.g.desc.Name }
// GetCounter returns the counter identified by the id that was returned
// by MustRegisterCounter for the same group.
// Using an id from a different group will result in undefined behavior.
func (g *Group) GetCounter(id ID) *Counter { return &g.counters[id.id()] }
// GetTimer returns the timer identified by the id that was returned
// by MustRegisterTimer for the same group.
// Using an id from a different group will result in undefined behavior.
func (g *Group) GetTimer(id ID) *Timer { return &g.timers[id.id()] }
// The Metric type defines a Name
type Metric interface {
Name() string
}
// ForEach calls fn for all measurements of the group.
func (g *Group) ForEach(fn func(v Metric)) {
for i := range g.counters {
fn(&g.counters[i])
}
for i := range g.timers {
fn(&g.timers[i])
}
}

View File

@ -0,0 +1,79 @@
package metrics
import (
"fmt"
"sort"
)
// The groupRegistry type represents a set of metrics that are measured together.
type groupRegistry struct {
desc *groupDesc
descriptors []*desc
group Group
}
func (g *groupRegistry) register(desc *desc) error {
p := sort.Search(len(g.descriptors), func(i int) bool {
return g.descriptors[i].Name == desc.Name
})
if p != len(g.descriptors) {
return fmt.Errorf("metric name '%s' already in use", desc.Name)
}
g.descriptors = append(g.descriptors, desc)
sort.Slice(g.descriptors, func(i, j int) bool {
return g.descriptors[i].Name < g.descriptors[j].Name
})
return nil
}
func (g *groupRegistry) mustRegister(desc *desc) {
if err := g.register(desc); err != nil {
panic(err.Error())
}
}
// MustRegisterCounter registers a new counter metric using the provided descriptor.
// If the metric name is not unique, MustRegisterCounter will panic.
//
// MustRegisterCounter is not safe to call from multiple goroutines.
func (g *groupRegistry) mustRegisterCounter(desc *desc) ID {
desc.mt = counterMetricType
g.mustRegister(desc)
desc.id = newID(len(g.group.counters), g.desc.id)
g.group.counters = append(g.group.counters, Counter{desc: desc})
return desc.id
}
// MustRegisterTimer registers a new timer metric using the provided descriptor.
// If the metric name is not unique, MustRegisterTimer will panic.
//
// MustRegisterTimer is not safe to call from multiple goroutines.
func (g *groupRegistry) mustRegisterTimer(desc *desc) ID {
desc.mt = timerMetricType
g.mustRegister(desc)
desc.id = newID(len(g.group.timers), g.desc.id)
g.group.timers = append(g.group.timers, Timer{desc: desc})
return desc.id
}
// newCollector returns a Collector with a copy of all the registered counters.
//
// newCollector is safe to call from multiple goroutines.
func (g *groupRegistry) newGroup() *Group {
c := &Group{
g: g,
counters: make([]Counter, len(g.group.counters)),
timers: make([]Timer, len(g.group.timers)),
}
copy(c.counters, g.group.counters)
copy(c.timers, g.group.timers)
return c
}

87
pkg/metrics/registry.go Normal file
View File

@ -0,0 +1,87 @@
package metrics
import (
"fmt"
"sort"
)
type Registry struct {
descriptors []*groupDesc
groups []groupRegistry
}
const (
// DefaultGroup is the identifier for the default group.
DefaultGroup = GID(0)
)
// NewRegistry creates a new Registry with a single group identified by DefaultGroup.
func NewRegistry() *Registry {
var r Registry
r.MustRegisterGroup("global")
return &r
}
func (r *Registry) register(gd *groupDesc) error {
p := sort.Search(len(r.descriptors), func(i int) bool {
return r.descriptors[i].Name == gd.Name
})
if p != len(r.descriptors) {
return fmt.Errorf("group name '%s' already in use", gd.Name)
}
r.descriptors = append(r.descriptors, gd)
sort.Slice(r.descriptors, func(i, j int) bool {
return r.descriptors[i].Name < r.descriptors[j].Name
})
gd.id = GID(len(r.groups))
r.groups = append(r.groups, groupRegistry{desc: gd})
return nil
}
func (r *Registry) mustRegister(gd *groupDesc) {
if err := r.register(gd); err != nil {
panic(err.Error())
}
}
// MustRegisterGroup registers a new group and panics if a group already exists with the same name.
//
// MustRegisterGroup is not safe to call from concurrent goroutines.
func (r *Registry) MustRegisterGroup(name string) GID {
gd := &groupDesc{Name: name}
r.mustRegister(gd)
return gd.id
}
func (r *Registry) mustGetGroupRegistry(id GID) *groupRegistry {
if int(id) >= len(r.groups) {
panic(fmt.Sprintf("invalid group ID"))
}
return &r.groups[id]
}
// MustRegisterCounter registers a new counter metric using the provided descriptor.
// If the metric name is not unique within the group, MustRegisterCounter will panic.
//
// MustRegisterCounter is not safe to call from concurrent goroutines.
func (r *Registry) MustRegisterCounter(name string, opts ...descOption) ID {
desc := newDesc(name, opts...)
return r.mustGetGroupRegistry(desc.gid).mustRegisterCounter(desc)
}
// MustRegisterTimer registers a new timer metric using the provided descriptor.
// If the metric name is not unique within the group, MustRegisterTimer will panic.
//
// MustRegisterTimer is not safe to call from concurrent goroutines.
func (r *Registry) MustRegisterTimer(name string, opts ...descOption) ID {
desc := newDesc(name, opts...)
return r.mustGetGroupRegistry(desc.gid).mustRegisterTimer(desc)
}
func (r *Registry) NewGroup(gid GID) *Group {
return r.mustGetGroupRegistry(gid).newGroup()
}

View File

@ -0,0 +1,63 @@
package metrics
import (
"testing"
"github.com/influxdata/influxdb/pkg/testing/assert"
)
func TestRegistry_MustRegisterCounter(t *testing.T) {
r := NewRegistry()
id := r.MustRegisterCounter("counter")
assert.Equal(t, id, ID(0), "invalid id")
}
func TestRegistry_MustRegisterCounter_Panics(t *testing.T) {
r := NewRegistry()
r.MustRegisterCounter("counter")
assert.PanicsWithValue(t, "metric name 'counter' already in use", func() {
r.MustRegisterCounter("counter")
})
}
func TestRegistry_NewGroup_CounterIsZero(t *testing.T) {
r := NewRegistry()
id := r.MustRegisterCounter("counter")
c := r.NewGroup(DefaultGroup).GetCounter(id)
c.Add(1)
assert.Equal(t, int64(1), c.Value())
c = r.NewGroup(DefaultGroup).GetCounter(id)
assert.Equal(t, int64(0), c.Value())
}
func TestRegistry_MustRegisterTimer(t *testing.T) {
r := NewRegistry()
id := r.MustRegisterTimer("timer")
assert.Equal(t, ID(0), id, "invalid id")
}
func TestRegistry_MustRegisterTimer_Panics(t *testing.T) {
r := NewRegistry()
r.MustRegisterCounter("timer")
assert.PanicsWithValue(t, "metric name 'timer' already in use", func() {
r.MustRegisterCounter("timer")
})
}
func TestRegistry_MustRegisterMultiple(t *testing.T) {
r := NewRegistry()
cnt := r.MustRegisterCounter("counter")
tmr := r.MustRegisterTimer("timer")
assert.Equal(t, ID(0), cnt, "invalid id")
assert.Equal(t, ID(0), tmr, "invalid id")
}
func TestRegistry_MustRegister_Panics_Across_Measurements(t *testing.T) {
r := NewRegistry()
r.MustRegisterCounter("foo")
assert.PanicsWithValue(t, "metric name 'foo' already in use", func() {
r.MustRegisterCounter("foo")
})
}

34
pkg/metrics/timer.go Normal file
View File

@ -0,0 +1,34 @@
package metrics
import (
"sync/atomic"
"time"
)
// The timer type is used to store a duration.
type Timer struct {
val int64
desc *desc
}
// Name returns the name of the timer.
func (t *Timer) Name() string { return t.desc.Name }
// Value atomically returns the value of the timer.
func (t *Timer) Value() time.Duration { return time.Duration(atomic.LoadInt64(&t.val)) }
// Update sets the timer value to d.
func (t *Timer) Update(d time.Duration) { atomic.StoreInt64(&t.val, int64(d)) }
// UpdateSince sets the timer value to the difference between since and the current time.
func (t *Timer) UpdateSince(since time.Time) { t.Update(time.Since(since)) }
// String returns a string representation using the name and value of the timer.
func (t *Timer) String() string { return t.desc.Name + ": " + time.Duration(t.val).String() }
// Time updates the timer to the duration it takes to call f.
func (t *Timer) Time(f func()) {
s := time.Now()
f()
t.UpdateSince(s)
}

14
pkg/metrics/timer_test.go Normal file
View File

@ -0,0 +1,14 @@
package metrics
import (
"testing"
"time"
"github.com/influxdata/influxdb/pkg/testing/assert"
)
func TestTimer_Update(t *testing.T) {
var c Timer
c.Update(100 * time.Millisecond)
assert.Equal(t, c.Value(), 100*time.Millisecond, "unexpected value")
}

View File

@ -0,0 +1,96 @@
package assert
import (
"bytes"
"fmt"
"reflect"
)
type TestingT interface {
Helper()
Errorf(format string, args ...interface{})
}
// Equal asserts that the values are equal and returns
// true if the assertion was successful.
func Equal(t TestingT, got, expected interface{}, msgAndArgs ...interface{}) bool {
if ValuesAreEqual(got, expected) {
return true
}
t.Helper()
got, expected = formatValues(got, expected)
fail(t, fmt.Sprintf("Not Equal: got=%s, exp=%s", got, expected), msgAndArgs...)
return false
}
// NotEqual asserts that the values are not equal and returns
// true if the assertion was successful.
func NotEqual(t TestingT, got, expected interface{}, msgAndArgs ...interface{}) bool {
if !ValuesAreEqual(got, expected) {
return true
}
t.Helper()
_, expected = formatValues(got, expected)
fail(t, fmt.Sprintf("Equal: should not be %s", expected), msgAndArgs...)
return false
}
// PanicsWithValue asserts that fn panics, and that
// the recovered panic value equals the expected panic value.
//
// Returns true if the assertion was successful.
func PanicsWithValue(t TestingT, expected interface{}, fn PanicTestFunc, msgAndArgs ...interface{}) bool {
t.Helper()
if funcDidPanic, got := didPanic(fn); !funcDidPanic {
return fail(t, fmt.Sprintf("func %#v should panic\n\r\tPanic value:\t%v", fn, got), msgAndArgs...)
} else if got != expected {
return fail(t, fmt.Sprintf("func %#v should panic with value:\t%v\n\r\tPanic value:\t%v", fn, expected, got), msgAndArgs...)
}
return true
}
// ValuesAreEqual determines if the values are equal.
func ValuesAreEqual(got, expected interface{}) bool {
if got == nil || expected == nil {
return got == expected
}
if exp, ok := expected.([]byte); ok {
act, ok := got.([]byte)
if !ok {
return false
} else if exp == nil || act == nil {
return exp == nil && act == nil
}
return bytes.Equal(exp, act)
}
return reflect.DeepEqual(expected, got)
}
// ValuesAreExactlyEqual determines if the values are equal and
// their types are the same.
func ValuesAreExactlyEqual(got, expected interface{}) bool {
if ValuesAreEqual(got, expected) {
return true
}
actualType := reflect.TypeOf(got)
if actualType == nil {
return false
}
expectedValue := reflect.ValueOf(expected)
if expectedValue.IsValid() && expectedValue.Type().ConvertibleTo(actualType) {
// Attempt comparison after type conversion
return reflect.DeepEqual(expectedValue.Convert(actualType).Interface(), got)
}
return false
}
// PanicTestFunc defines a function that is called to determine whether a panic occurs.
type PanicTestFunc func()

View File

@ -0,0 +1,4 @@
/*
Package assert provides helper functions that can be used with the standard Go testing package.
*/
package assert

View File

@ -0,0 +1,53 @@
package assert
import (
"fmt"
"reflect"
)
func fail(t TestingT, failureMsg string, msgAndArgs ...interface{}) bool {
t.Helper()
msg := formatMsgAndArgs(msgAndArgs...)
if msg == "" {
t.Errorf("%s", failureMsg)
} else {
t.Errorf("%s: %s", failureMsg, msg)
}
return false
}
func formatValues(got, expected interface{}) (string, string) {
if reflect.TypeOf(got) != reflect.TypeOf(expected) {
return fmt.Sprintf("%T(%#v)", got, got), fmt.Sprintf("%T(%#v)", expected, expected)
}
return fmt.Sprintf("%#v", got), fmt.Sprintf("%#v", expected)
}
func formatMsgAndArgs(msgAndArgs ...interface{}) string {
if len(msgAndArgs) == 0 || msgAndArgs == nil {
return ""
}
if len(msgAndArgs) == 1 {
return msgAndArgs[0].(string)
}
if len(msgAndArgs) > 1 {
return fmt.Sprintf(msgAndArgs[0].(string), msgAndArgs[1:]...)
}
return ""
}
// didPanic returns true if fn panics when called.
func didPanic(fn PanicTestFunc) (panicked bool, message interface{}) {
defer func() {
if message = recover(); message != nil {
panicked = true
}
}()
fn()
return panicked, message
}

32
pkg/tracing/context.go Normal file
View File

@ -0,0 +1,32 @@
package tracing
import "context"
type key int
const (
spanKey key = iota
traceKey
)
// NewContextWithSpan returns a new context with the given Span added.
func NewContextWithSpan(ctx context.Context, c *Span) context.Context {
return context.WithValue(ctx, spanKey, c)
}
// SpanFromContext returns the Span associated with ctx or nil if no Span has been assigned.
func SpanFromContext(ctx context.Context) *Span {
c, _ := ctx.Value(spanKey).(*Span)
return c
}
// NewContextWithTrace returns a new context with the given Trace added.
func NewContextWithTrace(ctx context.Context, t *Trace) context.Context {
return context.WithValue(ctx, traceKey, t)
}
// TraceFromContext returns the Trace associated with ctx or nil if no Trace has been assigned.
func TraceFromContext(ctx context.Context) *Trace {
c, _ := ctx.Value(traceKey).(*Trace)
return c
}

26
pkg/tracing/doc.go Normal file
View File

@ -0,0 +1,26 @@
/*
Package tracing provides a way for capturing hierarchical traces.
To start a new trace with a root span named select
trace, span := tracing.NewTrace("select")
It is recommended that a span be forwarded to callees using the
context package. Firstly, create a new context with the span associated
as follows
ctx = tracing.NewContextWithSpan(ctx, span)
followed by calling the API with the new context
SomeAPI(ctx, ...)
Once the trace is complete, it may be converted to a graph with the Tree method.
tree := t.Tree()
The tree is intended to be used with the Walk function in order to generate
different presentations. The default Tree#String method returns a tree.
*/
package tracing

117
pkg/tracing/fields/field.go Normal file
View File

@ -0,0 +1,117 @@
package fields
import (
"fmt"
"math"
"time"
)
type fieldType int
const (
stringType fieldType = iota
boolType
int64Type
uint64Type
durationType
float64Type
)
// Field instances are constructed via Bool, String, and so on.
//
// "heavily influenced by" (i.e., partially stolen from)
// https://github.com/opentracing/opentracing-go/log
type Field struct {
key string
fieldType fieldType
numericVal int64
stringVal string
}
// String adds a string-valued key:value pair to a Span.LogFields() record
func String(key, val string) Field {
return Field{
key: key,
fieldType: stringType,
stringVal: val,
}
}
// Bool adds a bool-valued key:value pair to a Span.LogFields() record
func Bool(key string, val bool) Field {
var numericVal int64
if val {
numericVal = 1
}
return Field{
key: key,
fieldType: boolType,
numericVal: numericVal,
}
}
/// Int64 adds an int64-valued key:value pair to a Span.LogFields() record
func Int64(key string, val int64) Field {
return Field{
key: key,
fieldType: int64Type,
numericVal: val,
}
}
// Uint64 adds a uint64-valued key:value pair to a Span.LogFields() record
func Uint64(key string, val uint64) Field {
return Field{
key: key,
fieldType: uint64Type,
numericVal: int64(val),
}
}
// Uint64 adds a uint64-valued key:value pair to a Span.LogFields() record
func Duration(key string, val time.Duration) Field {
return Field{
key: key,
fieldType: durationType,
numericVal: int64(val),
}
}
// Float64 adds a float64-valued key:value pair to a Span.LogFields() record
func Float64(key string, val float64) Field {
return Field{
key: key,
fieldType: float64Type,
numericVal: int64(math.Float64bits(val)),
}
}
// Key returns the field's key.
func (lf Field) Key() string {
return lf.key
}
// Value returns the field's value as interface{}.
func (lf Field) Value() interface{} {
switch lf.fieldType {
case stringType:
return lf.stringVal
case boolType:
return lf.numericVal != 0
case int64Type:
return int64(lf.numericVal)
case uint64Type:
return uint64(lf.numericVal)
case durationType:
return time.Duration(lf.numericVal)
case float64Type:
return math.Float64frombits(uint64(lf.numericVal))
default:
return nil
}
}
// String returns a string representation of the key and value.
func (lf Field) String() string {
return fmt.Sprint(lf.key, ": ", lf.Value())
}

View File

@ -0,0 +1,61 @@
package fields
import "sort"
type Fields []Field
// Merge merges other with the current set, replacing any matching keys from other.
func (fs *Fields) Merge(other Fields) {
var list []Field
i, j := 0, 0
for i < len(*fs) && j < len(other) {
if (*fs)[i].key < other[j].key {
list = append(list, (*fs)[i])
i++
} else if (*fs)[i].key > other[j].key {
list = append(list, other[j])
j++
} else {
// equal, then "other" replaces existing key
list = append(list, other[j])
i++
j++
}
}
if i < len(*fs) {
list = append(list, (*fs)[i:]...)
} else if j < len(other) {
list = append(list, other[j:]...)
}
*fs = list
}
// New creates a new set of fields, sorted by Key.
// Duplicate keys are removed.
func New(args ...Field) Fields {
fields := Fields(args)
sort.Slice(fields, func(i, j int) bool {
return fields[i].key < fields[j].key
})
// deduplicate
// loop invariant: fields[:i] has no duplicates
for i := 0; i < len(fields)-1; i++ {
j := i + 1
// find all duplicate keys
for j < len(fields) && fields[i].key == fields[j].key {
j++
}
d := (j - 1) - i // number of duplicate keys
if d > 0 {
// copy over duplicate keys in order to maintain loop invariant
copy(fields[i+1:], fields[j:])
fields = fields[:len(fields)-d]
}
}
return fields
}

View File

@ -0,0 +1,101 @@
package fields
import (
"testing"
"github.com/influxdata/influxdb/pkg/testing/assert"
)
func makeFields(args ...string) Fields {
if len(args)%2 != 0 {
panic("uneven number of arguments")
}
var f Fields
for i := 0; i+1 < len(args); i += 2 {
f = append(f, String(args[i], args[i+1]))
}
return f
}
func TestNew(t *testing.T) {
cases := []struct {
n string
l []string
exp Fields
}{
{
n: "empty",
l: nil,
exp: makeFields(),
},
{
n: "not duplicates",
l: []string{"k01", "v01", "k03", "v03", "k02", "v02"},
exp: makeFields("k01", "v01", "k02", "v02", "k03", "v03"),
},
{
n: "duplicates at end",
l: []string{"k01", "v01", "k02", "v02", "k02", "v02"},
exp: makeFields("k01", "v01", "k02", "v02"),
},
{
n: "duplicates at start",
l: []string{"k01", "v01", "k02", "v02", "k01", "v01"},
exp: makeFields("k01", "v01", "k02", "v02"),
},
{
n: "duplicates in middle",
l: []string{"k01", "v01", "k02", "v02", "k03", "v03", "k02", "v02", "k02", "v02"},
exp: makeFields("k01", "v01", "k02", "v02", "k03", "v03"),
},
}
for _, tc := range cases {
t.Run(tc.n, func(t *testing.T) {
l := New(makeFields(tc.l...)...)
assert.Equal(t, tc.exp, l)
})
}
}
func TestFields_Merge(t *testing.T) {
cases := []struct {
n string
l, r Fields
exp Fields
}{
{
n: "no matching keys",
l: New(String("k05", "v05"), String("k03", "v03"), String("k01", "v01")),
r: New(String("k02", "v02"), String("k04", "v04"), String("k00", "v00")),
exp: New(String("k05", "v05"), String("k03", "v03"), String("k01", "v01"), String("k02", "v02"), String("k04", "v04"), String("k00", "v00")),
},
{
n: "multiple matching keys",
l: New(String("k05", "v05"), String("k03", "v03"), String("k01", "v01")),
r: New(String("k02", "v02"), String("k03", "v03a"), String("k05", "v05a")),
exp: New(String("k05", "v05a"), String("k03", "v03a"), String("k01", "v01"), String("k02", "v02")),
},
{
n: "source empty",
l: New(),
r: New(String("k02", "v02"), String("k04", "v04"), String("k00", "v00")),
exp: New(String("k02", "v02"), String("k04", "v04"), String("k00", "v00")),
},
{
n: "other empty",
l: New(String("k02", "v02"), String("k04", "v04"), String("k00", "v00")),
r: New(),
exp: New(String("k02", "v02"), String("k04", "v04"), String("k00", "v00")),
},
}
for _, tc := range cases {
t.Run(tc.n, func(t *testing.T) {
l := tc.l
l.Merge(tc.r)
assert.Equal(t, tc.exp, l)
})
}
}

View File

@ -0,0 +1,74 @@
package labels
import "sort"
type Label struct {
Key, Value string
}
// The Labels type represents a set of labels, sorted by Key.
type Labels []Label
// Merge merges other with the current set, replacing any matching keys from other.
func (ls *Labels) Merge(other Labels) {
var list []Label
i, j := 0, 0
for i < len(*ls) && j < len(other) {
if (*ls)[i].Key < other[j].Key {
list = append(list, (*ls)[i])
i++
} else if (*ls)[i].Key > other[j].Key {
list = append(list, other[j])
j++
} else {
// equal, then "other" replaces existing key
list = append(list, other[j])
i++
j++
}
}
if i < len(*ls) {
list = append(list, (*ls)[i:]...)
} else if j < len(other) {
list = append(list, other[j:]...)
}
*ls = list
}
// New takes an even number of strings representing key-value pairs
// and creates a new slice of Labels. Duplicates are removed, however,
// there is no guarantee which will be removed
func New(args ...string) Labels {
if len(args)%2 != 0 {
panic("uneven number of arguments to label.Labels")
}
var labels Labels
for i := 0; i+1 < len(args); i += 2 {
labels = append(labels, Label{Key: args[i], Value: args[i+1]})
}
sort.Slice(labels, func(i, j int) bool {
return labels[i].Key < labels[j].Key
})
// deduplicate
// loop invariant: labels[:i] has no duplicates
for i := 0; i < len(labels)-1; i++ {
j := i + 1
// find all duplicate keys
for j < len(labels) && labels[i].Key == labels[j].Key {
j++
}
d := (j - 1) - i // number of duplicate keys
if d > 0 {
// copy over duplicate keys in order to maintain loop invariant
copy(labels[i+1:], labels[j:])
labels = labels[:len(labels)-d]
}
}
return labels
}

View File

@ -0,0 +1,101 @@
package labels
import (
"testing"
"github.com/influxdata/influxdb/pkg/testing/assert"
)
func makeLabels(args ...string) Labels {
if len(args)%2 != 0 {
panic("uneven number of arguments")
}
var l Labels
for i := 0; i+1 < len(args); i += 2 {
l = append(l, Label{Key: args[i], Value: args[i+1]})
}
return l
}
func TestNew(t *testing.T) {
cases := []struct {
n string
l []string
exp Labels
}{
{
n: "empty",
l: nil,
exp: makeLabels(),
},
{
n: "not duplicates",
l: []string{"k01", "v01", "k03", "v03", "k02", "v02"},
exp: makeLabels("k01", "v01", "k02", "v02", "k03", "v03"),
},
{
n: "duplicates at end",
l: []string{"k01", "v01", "k02", "v02", "k02", "v02"},
exp: makeLabels("k01", "v01", "k02", "v02"),
},
{
n: "duplicates at start",
l: []string{"k01", "v01", "k02", "v02", "k01", "v01"},
exp: makeLabels("k01", "v01", "k02", "v02"),
},
{
n: "duplicates in middle",
l: []string{"k01", "v01", "k02", "v02", "k03", "v03", "k02", "v02", "k02", "v02"},
exp: makeLabels("k01", "v01", "k02", "v02", "k03", "v03"),
},
}
for _, tc := range cases {
t.Run(tc.n, func(t *testing.T) {
l := New(tc.l...)
assert.Equal(t, l, tc.exp)
})
}
}
func TestLabels_Merge(t *testing.T) {
cases := []struct {
n string
l, r Labels
exp Labels
}{
{
n: "no matching keys",
l: New("k05", "v05", "k03", "v03", "k01", "v01"),
r: New("k02", "v02", "k04", "v04", "k00", "v00"),
exp: New("k05", "v05", "k03", "v03", "k01", "v01", "k02", "v02", "k04", "v04", "k00", "v00"),
},
{
n: "multiple matching keys",
l: New("k05", "v05", "k03", "v03", "k01", "v01"),
r: New("k02", "v02", "k03", "v03a", "k05", "v05a"),
exp: New("k05", "v05a", "k03", "v03a", "k01", "v01", "k02", "v02"),
},
{
n: "source empty",
l: New(),
r: New("k02", "v02", "k04", "v04", "k00", "v00"),
exp: New("k02", "v02", "k04", "v04", "k00", "v00"),
},
{
n: "other empty",
l: New("k02", "v02", "k04", "v04", "k00", "v00"),
r: New(),
exp: New("k02", "v02", "k04", "v04", "k00", "v00"),
},
}
for _, tc := range cases {
t.Run(tc.n, func(t *testing.T) {
l := tc.l
l.Merge(tc.r)
assert.Equal(t, l, tc.exp)
})
}
}

18
pkg/tracing/rawspan.go Normal file
View File

@ -0,0 +1,18 @@
package tracing
import (
"time"
"github.com/influxdata/influxdb/pkg/tracing/fields"
"github.com/influxdata/influxdb/pkg/tracing/labels"
)
// RawSpan represents the data associated with a span.
type RawSpan struct {
Context SpanContext
ParentSpanID uint64 // ParentSpanID identifies the parent of this span or 0 if this is the root span.
Name string // Name is the operation name given to this span.
Start time.Time // Start identifies the start time of the span.
Labels labels.Labels // Labels contains additional metadata about this span.
Fields fields.Fields // Fields contains typed values associated with this span.
}

84
pkg/tracing/span.go Normal file
View File

@ -0,0 +1,84 @@
package tracing
import (
"sync"
"time"
"github.com/influxdata/influxdb/pkg/tracing/fields"
"github.com/influxdata/influxdb/pkg/tracing/labels"
)
// The Span type denotes a specific operation for a Trace.
// A Span may have one or more children, identifying additional
// details about a trace.
type Span struct {
tracer *Trace
mu sync.Mutex
raw RawSpan
}
type StartSpanOption interface {
applyStart(*Span)
}
// The StartTime start span option specifies the start time of
// the new span rather than using now.
type StartTime time.Time
func (t StartTime) applyStart(s *Span) {
s.raw.Start = time.Time(t)
}
// StartSpan creates a new child span using time.Now as the start time.
func (s *Span) StartSpan(name string, opt ...StartSpanOption) *Span {
return s.tracer.startSpan(name, s.raw.Context, opt)
}
// Context returns a SpanContext that can be serialized and passed to a remote node to continue a trace.
func (s *Span) Context() SpanContext {
return s.raw.Context
}
// SetLabels replaces any existing labels for the Span with args.
func (s *Span) SetLabels(args ...string) {
s.mu.Lock()
s.raw.Labels = labels.New(args...)
s.mu.Unlock()
}
// MergeLabels merges args with any existing labels defined
// for the Span.
func (s *Span) MergeLabels(args ...string) {
ls := labels.New(args...)
s.mu.Lock()
s.raw.Labels.Merge(ls)
s.mu.Unlock()
}
// SetFields replaces any existing fields for the Span with args.
func (s *Span) SetFields(set fields.Fields) {
s.mu.Lock()
s.raw.Fields = set
s.mu.Unlock()
}
// MergeFields merges the provides args with any existing fields defined
// for the Span.
func (s *Span) MergeFields(args ...fields.Field) {
set := fields.New(args...)
s.mu.Lock()
s.raw.Fields.Merge(set)
s.mu.Unlock()
}
// Finish marks the end of the span and records it to the associated Trace.
// If Finish is not called, the span will not appear in the trace.
func (s *Span) Finish() {
s.mu.Lock()
s.tracer.addRawSpan(s.raw)
s.mu.Unlock()
}
func (s *Span) Tree() *TreeNode {
return s.tracer.TreeFrom(s.raw.Context.SpanID)
}

View File

@ -0,0 +1,27 @@
package tracing
import (
"github.com/gogo/protobuf/proto"
"github.com/influxdata/influxdb/pkg/tracing/wire"
)
// A SpanContext represents the minimal information to identify a span in a trace.
// This is typically serialized to continue a trace on a remote node.
type SpanContext struct {
TraceID uint64 // TraceID is assigned a random number to this trace.
SpanID uint64 // SpanID is assigned a random number to identify this span.
}
func (s SpanContext) MarshalBinary() ([]byte, error) {
ws := wire.SpanContext(s)
return proto.Marshal(&ws)
}
func (s *SpanContext) UnmarshalBinary(data []byte) error {
var ws wire.SpanContext
err := proto.Unmarshal(data, &ws)
if err == nil {
*s = SpanContext(ws)
}
return err
}

141
pkg/tracing/trace.go Normal file
View File

@ -0,0 +1,141 @@
package tracing
import (
"sort"
"sync"
"time"
)
// The Trace type functions as a container for capturing Spans used to
// trace the execution of a request.
type Trace struct {
mu sync.Mutex
spans map[uint64]RawSpan
}
// NewTrace starts a new trace and returns a root span identified by the provided name.
//
// Additional options may be specified to override the default behavior when creating the span.
func NewTrace(name string, opt ...StartSpanOption) (*Trace, *Span) {
t := &Trace{spans: make(map[uint64]RawSpan)}
s := &Span{tracer: t}
s.raw.Name = name
s.raw.Context.TraceID, s.raw.Context.SpanID = randomID2()
setOptions(s, opt)
return t, s
}
// NewTraceFromSpan starts a new trace and returns the associated span, which is a child of the
// parent span context.
func NewTraceFromSpan(name string, parent SpanContext, opt ...StartSpanOption) (*Trace, *Span) {
t := &Trace{spans: make(map[uint64]RawSpan)}
s := &Span{tracer: t}
s.raw.Name = name
s.raw.ParentSpanID = parent.SpanID
s.raw.Context.TraceID = parent.TraceID
s.raw.Context.SpanID = randomID()
setOptions(s, opt)
return t, s
}
func (t *Trace) startSpan(name string, sc SpanContext, opt []StartSpanOption) *Span {
s := &Span{tracer: t}
s.raw.Name = name
s.raw.Context.SpanID = randomID()
s.raw.Context.TraceID = sc.TraceID
s.raw.ParentSpanID = sc.SpanID
setOptions(s, opt)
return s
}
func setOptions(s *Span, opt []StartSpanOption) {
for _, o := range opt {
o.applyStart(s)
}
if s.raw.Start.IsZero() {
s.raw.Start = time.Now()
}
}
func (t *Trace) addRawSpan(raw RawSpan) {
t.mu.Lock()
t.spans[raw.Context.SpanID] = raw
t.mu.Unlock()
}
// Tree returns a graph of the current trace.
func (t *Trace) Tree() *TreeNode {
t.mu.Lock()
defer t.mu.Unlock()
for _, s := range t.spans {
if s.ParentSpanID == 0 {
return t.treeFrom(s.Context.SpanID)
}
}
return nil
}
// Merge combines other with the current trace. This is
// typically necessary when traces are transferred from a remote.
func (t *Trace) Merge(other *Trace) {
for k, s := range other.spans {
t.spans[k] = s
}
}
func (t *Trace) TreeFrom(root uint64) *TreeNode {
t.mu.Lock()
defer t.mu.Unlock()
return t.treeFrom(root)
}
func (t *Trace) treeFrom(root uint64) *TreeNode {
c := map[uint64]*TreeNode{}
for k, s := range t.spans {
c[k] = &TreeNode{Raw: s}
}
if _, ok := c[root]; !ok {
return nil
}
for _, n := range c {
if n.Raw.ParentSpanID != 0 {
if pn := c[n.Raw.ParentSpanID]; pn != nil {
pn.Children = append(pn.Children, n)
}
}
}
// sort nodes
var v treeSortVisitor
Walk(&v, c[root])
return c[root]
}
type treeSortVisitor struct{}
func (v *treeSortVisitor) Visit(node *TreeNode) Visitor {
sort.Slice(node.Children, func(i, j int) bool {
lt, rt := node.Children[i].Raw.Start.UnixNano(), node.Children[j].Raw.Start.UnixNano()
if lt < rt {
return true
} else if lt > rt {
return false
}
ln, rn := node.Children[i].Raw.Name, node.Children[j].Raw.Name
if ln < rn {
return true
}
return false
})
return v
}

View File

@ -0,0 +1,136 @@
package tracing
import (
"math"
"time"
"github.com/gogo/protobuf/proto"
"github.com/influxdata/influxdb/pkg/tracing/fields"
"github.com/influxdata/influxdb/pkg/tracing/labels"
"github.com/influxdata/influxdb/pkg/tracing/wire"
)
func fieldsToWire(set fields.Fields) []wire.Field {
var r []wire.Field
for _, f := range set {
wf := wire.Field{Key: f.Key()}
switch val := f.Value().(type) {
case string:
wf.FieldType = wire.FieldTypeString
wf.Value = &wire.Field_StringVal{StringVal: val}
case bool:
var numericVal int64
if val {
numericVal = 1
}
wf.FieldType = wire.FieldTypeBool
wf.Value = &wire.Field_NumericVal{NumericVal: numericVal}
case int64:
wf.FieldType = wire.FieldTypeInt64
wf.Value = &wire.Field_NumericVal{NumericVal: val}
case uint64:
wf.FieldType = wire.FieldTypeUint64
wf.Value = &wire.Field_NumericVal{NumericVal: int64(val)}
case time.Duration:
wf.FieldType = wire.FieldTypeDuration
wf.Value = &wire.Field_NumericVal{NumericVal: int64(val)}
case float64:
wf.FieldType = wire.FieldTypeFloat64
wf.Value = &wire.Field_NumericVal{NumericVal: int64(math.Float64bits(val))}
default:
continue
}
r = append(r, wf)
}
return r
}
func labelsToWire(set labels.Labels) []string {
var r []string
for i := range set {
r = append(r, set[i].Key, set[i].Value)
}
return r
}
func (t *Trace) MarshalBinary() ([]byte, error) {
wt := wire.Trace{}
for _, sp := range t.spans {
wt.Spans = append(wt.Spans, &wire.Span{
Context: wire.SpanContext{
TraceID: sp.Context.TraceID,
SpanID: sp.Context.SpanID,
},
ParentSpanID: sp.ParentSpanID,
Name: sp.Name,
Start: sp.Start,
Labels: labelsToWire(sp.Labels),
Fields: fieldsToWire(sp.Fields),
})
}
return proto.Marshal(&wt)
}
func wireToFields(wfs []wire.Field) fields.Fields {
var fs []fields.Field
for _, wf := range wfs {
switch wf.FieldType {
case wire.FieldTypeString:
fs = append(fs, fields.String(wf.Key, wf.GetStringVal()))
case wire.FieldTypeBool:
var boolVal bool
if wf.GetNumericVal() != 0 {
boolVal = true
}
fs = append(fs, fields.Bool(wf.Key, boolVal))
case wire.FieldTypeInt64:
fs = append(fs, fields.Int64(wf.Key, wf.GetNumericVal()))
case wire.FieldTypeUint64:
fs = append(fs, fields.Uint64(wf.Key, uint64(wf.GetNumericVal())))
case wire.FieldTypeDuration:
fs = append(fs, fields.Duration(wf.Key, time.Duration(wf.GetNumericVal())))
case wire.FieldTypeFloat64:
fs = append(fs, fields.Float64(wf.Key, math.Float64frombits(uint64(wf.GetNumericVal()))))
}
}
return fields.New(fs...)
}
func (t *Trace) UnmarshalBinary(data []byte) error {
var wt wire.Trace
if err := proto.Unmarshal(data, &wt); err != nil {
return err
}
t.spans = make(map[uint64]RawSpan)
for _, sp := range wt.Spans {
t.spans[sp.Context.SpanID] = RawSpan{
Context: SpanContext{
TraceID: sp.Context.TraceID,
SpanID: sp.Context.SpanID,
},
ParentSpanID: sp.ParentSpanID,
Name: sp.Name,
Start: sp.Start,
Labels: labels.New(sp.Labels...),
Fields: wireToFields(sp.Fields),
}
}
return nil
}

74
pkg/tracing/tree.go Normal file
View File

@ -0,0 +1,74 @@
package tracing
import (
"github.com/xlab/treeprint"
)
// A Visitor's Visit method is invoked for each node encountered by Walk.
// If the result of Visit is not nil, Walk visits each of the children.
type Visitor interface {
Visit(*TreeNode) Visitor
}
// A TreeNode represents a single node in the graph.
type TreeNode struct {
Raw RawSpan
Children []*TreeNode
}
// String returns the tree as a string.
func (t *TreeNode) String() string {
if t == nil {
return ""
}
tv := newTreeVisitor()
Walk(tv, t)
return tv.root.String()
}
// Walk traverses the graph in a depth-first order, calling v.Visit
// for each node until completion or v.Visit returns nil.
func Walk(v Visitor, node *TreeNode) {
if v = v.Visit(node); v == nil {
return
}
for _, c := range node.Children {
Walk(v, c)
}
}
type treeVisitor struct {
root treeprint.Tree
trees []treeprint.Tree
}
func newTreeVisitor() *treeVisitor {
t := treeprint.New()
return &treeVisitor{root: t, trees: []treeprint.Tree{t}}
}
func (v *treeVisitor) Visit(n *TreeNode) Visitor {
t := v.trees[len(v.trees)-1].AddBranch(n.Raw.Name)
v.trees = append(v.trees, t)
if labels := n.Raw.Labels; len(labels) > 0 {
l := t.AddBranch("labels")
for _, ll := range n.Raw.Labels {
l.AddNode(ll.Key + ": " + ll.Value)
}
}
for _, k := range n.Raw.Fields {
t.AddNode(k.String())
}
for _, cn := range n.Children {
Walk(v, cn)
}
v.trees[len(v.trees)-1] = nil
v.trees = v.trees[:len(v.trees)-1]
return nil
}

26
pkg/tracing/util.go Normal file
View File

@ -0,0 +1,26 @@
package tracing
import (
"math/rand"
"sync"
"time"
)
var (
seededIDGen = rand.New(rand.NewSource(time.Now().UnixNano()))
seededIDLock sync.Mutex
)
func randomID() (n uint64) {
seededIDLock.Lock()
n = uint64(seededIDGen.Int63())
seededIDLock.Unlock()
return
}
func randomID2() (n uint64, m uint64) {
seededIDLock.Lock()
n, m = uint64(seededIDGen.Int63()), uint64(seededIDGen.Int63())
seededIDLock.Unlock()
return
}

View File

@ -0,0 +1,7 @@
/*
Package wire is used to serialize a trace.
*/
package wire
//go:generate protoc -I$GOPATH/src -I. --gogofaster_out=Mgoogle/protobuf/timestamp.proto=github.com/gogo/protobuf/types:. binary.proto

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,44 @@
syntax = "proto3";
package wire;
import "github.com/gogo/protobuf/gogoproto/gogo.proto";
import "google/protobuf/timestamp.proto";
message SpanContext {
uint64 trace_id = 1 [(gogoproto.customname) = "TraceID"];
uint64 span_id = 2 [(gogoproto.customname) = "SpanID"];
}
message Span {
SpanContext context = 1 [(gogoproto.nullable) = false];
uint64 parent_span_id = 2 [(gogoproto.customname) = "ParentSpanID"];
string name = 3;
google.protobuf.Timestamp start_time = 4 [(gogoproto.customname) = "Start", (gogoproto.stdtime) = true, (gogoproto.nullable) = false];
repeated string labels = 5;
repeated Field fields = 6 [(gogoproto.nullable) = false];
}
message Trace {
repeated Span spans = 1;
}
message Field {
enum FieldType {
option (gogoproto.goproto_enum_prefix) = false;
STRING = 0 [(gogoproto.enumvalue_customname) = "FieldTypeString"];
BOOL = 1 [(gogoproto.enumvalue_customname) = "FieldTypeBool"];
INT_64 = 2 [(gogoproto.enumvalue_customname) = "FieldTypeInt64"];
UINT_64 = 3 [(gogoproto.enumvalue_customname) = "FieldTypeUint64"];
DURATION = 4 [(gogoproto.enumvalue_customname) = "FieldTypeDuration"];
FLOAT_64 = 6 [(gogoproto.enumvalue_customname) = "FieldTypeFloat64"];
}
string key = 1;
FieldType field_type = 2 [(gogoproto.customname) = "FieldType"];
oneof value {
sfixed64 numeric_val = 3 [(gogoproto.customname) = "NumericVal"];
string string_val = 4 [(gogoproto.customname) = "StringVal"];
}
}

View File

@ -2,6 +2,7 @@ package query
import ( import (
"bytes" "bytes"
"context"
"fmt" "fmt"
"io" "io"
"strings" "strings"
@ -13,7 +14,7 @@ func (p *preparedStatement) Explain() (string, error) {
// Determine the cost of all iterators created as part of this plan. // Determine the cost of all iterators created as part of this plan.
ic := &explainIteratorCreator{ic: p.ic} ic := &explainIteratorCreator{ic: p.ic}
p.ic = ic p.ic = ic
itrs, _, err := p.Select() itrs, _, err := p.Select(context.Background())
p.ic = ic.ic p.ic = ic.ic
if err != nil { if err != nil {
@ -63,7 +64,7 @@ type explainIteratorCreator struct {
nodes []planNode nodes []planNode
} }
func (e *explainIteratorCreator) CreateIterator(m *influxql.Measurement, opt IteratorOptions) (Iterator, error) { func (e *explainIteratorCreator) CreateIterator(ctx context.Context, m *influxql.Measurement, opt IteratorOptions) (Iterator, error) {
cost, err := e.ic.IteratorCost(m, opt) cost, err := e.ic.IteratorCost(m, opt)
if err != nil { if err != nil {
return nil, err return nil, err

View File

@ -1,6 +1,5 @@
// Code generated by protoc-gen-gogo. // Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: internal/internal.proto // source: internal/internal.proto
// DO NOT EDIT!
/* /*
Package query is a generated protocol buffer package. Package query is a generated protocol buffer package.
@ -48,6 +47,7 @@ type Point struct {
BooleanValue *bool `protobuf:"varint,10,opt,name=BooleanValue" json:"BooleanValue,omitempty"` BooleanValue *bool `protobuf:"varint,10,opt,name=BooleanValue" json:"BooleanValue,omitempty"`
UnsignedValue *uint64 `protobuf:"varint,12,opt,name=UnsignedValue" json:"UnsignedValue,omitempty"` UnsignedValue *uint64 `protobuf:"varint,12,opt,name=UnsignedValue" json:"UnsignedValue,omitempty"`
Stats *IteratorStats `protobuf:"bytes,11,opt,name=Stats" json:"Stats,omitempty"` Stats *IteratorStats `protobuf:"bytes,11,opt,name=Stats" json:"Stats,omitempty"`
Trace []byte `protobuf:"bytes,13,opt,name=Trace" json:"Trace,omitempty"`
XXX_unrecognized []byte `json:"-"` XXX_unrecognized []byte `json:"-"`
} }
@ -140,6 +140,13 @@ func (m *Point) GetStats() *IteratorStats {
return nil return nil
} }
func (m *Point) GetTrace() []byte {
if m != nil {
return m.Trace
}
return nil
}
type Aux struct { type Aux struct {
DataType *int32 `protobuf:"varint,1,req,name=DataType" json:"DataType,omitempty"` DataType *int32 `protobuf:"varint,1,req,name=DataType" json:"DataType,omitempty"`
FloatValue *float64 `protobuf:"fixed64,2,opt,name=FloatValue" json:"FloatValue,omitempty"` FloatValue *float64 `protobuf:"fixed64,2,opt,name=FloatValue" json:"FloatValue,omitempty"`
@ -537,54 +544,54 @@ func init() {
func init() { proto.RegisterFile("internal/internal.proto", fileDescriptorInternal) } func init() { proto.RegisterFile("internal/internal.proto", fileDescriptorInternal) }
var fileDescriptorInternal = []byte{ var fileDescriptorInternal = []byte{
// 769 bytes of a gzipped FileDescriptorProto // 780 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x55, 0xe1, 0x8e, 0xe3, 0x34, 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x55, 0xdb, 0x6e, 0xe3, 0x36,
0x10, 0x56, 0x92, 0x4d, 0xb7, 0x71, 0xb7, 0xf4, 0x30, 0x65, 0xb1, 0xd0, 0x09, 0x45, 0x11, 0x48, 0x10, 0x05, 0xa5, 0xc8, 0xb1, 0xe8, 0xb8, 0xd9, 0xb2, 0x69, 0x4a, 0x14, 0x8b, 0x42, 0x10, 0x5a,
0x11, 0xa0, 0x22, 0xed, 0x2f, 0x7e, 0x21, 0xf5, 0xd8, 0x5b, 0x54, 0xe9, 0xae, 0x7b, 0x72, 0x97, 0x40, 0x68, 0x8b, 0x14, 0xc8, 0x53, 0x9f, 0x0a, 0x78, 0x9b, 0x4d, 0x11, 0x60, 0x37, 0x59, 0xd0,
0xfd, 0x6f, 0x9a, 0xd9, 0xc8, 0x52, 0xea, 0x14, 0xc7, 0x41, 0xed, 0x03, 0xf0, 0x10, 0x3c, 0x16, 0x6e, 0xde, 0x59, 0x6b, 0x22, 0x10, 0x90, 0x29, 0x97, 0xa2, 0x0a, 0xfb, 0x03, 0xf6, 0x23, 0xfa,
0x4f, 0xc2, 0x2b, 0x20, 0x8f, 0x9d, 0x34, 0x5d, 0x81, 0xf6, 0x7e, 0x75, 0xbe, 0x6f, 0xa6, 0x63, 0x59, 0xfd, 0xa3, 0x82, 0x43, 0x4a, 0x96, 0x83, 0x16, 0xd9, 0x27, 0xcf, 0x39, 0x33, 0xe6, 0xe5,
0xcf, 0xcc, 0x37, 0x0e, 0xf9, 0x42, 0x2a, 0x03, 0x5a, 0x89, 0xea, 0x87, 0xce, 0x58, 0xec, 0x75, 0xcc, 0x19, 0x8a, 0x7e, 0xa5, 0xb4, 0x05, 0xa3, 0x65, 0xfd, 0x53, 0x1f, 0x5c, 0x6d, 0x4d, 0x63,
0x6d, 0x6a, 0x1a, 0xff, 0xde, 0x82, 0x3e, 0x66, 0xff, 0x84, 0x24, 0xfe, 0x50, 0x4b, 0x65, 0x28, 0x1b, 0x96, 0xfc, 0xd9, 0x81, 0xd9, 0xe7, 0x1f, 0x63, 0x9a, 0x7c, 0x68, 0x94, 0xb6, 0x8c, 0xd1,
0x25, 0x17, 0x6b, 0xb1, 0x03, 0x16, 0xa4, 0x61, 0x9e, 0x70, 0xb4, 0x2d, 0xf7, 0x20, 0xca, 0x86, 0x93, 0x7b, 0xb9, 0x01, 0x4e, 0xb2, 0xa8, 0x48, 0x05, 0xc6, 0x8e, 0x5b, 0xc9, 0xaa, 0xe5, 0x91,
0x85, 0x8e, 0xb3, 0x36, 0x72, 0x72, 0x07, 0x2c, 0x4a, 0xc3, 0x3c, 0xe2, 0x68, 0xd3, 0x57, 0x24, 0xe7, 0x5c, 0x8c, 0x9c, 0xda, 0x00, 0x8f, 0xb3, 0xa8, 0x88, 0x05, 0xc6, 0xec, 0x15, 0x8d, 0xef,
0x5a, 0xcb, 0x8a, 0x5d, 0xa4, 0x61, 0x3e, 0xe6, 0xd6, 0xa4, 0xaf, 0x49, 0xb4, 0x6c, 0x0f, 0x2c, 0x55, 0xcd, 0x4f, 0xb2, 0xa8, 0x98, 0x0a, 0x17, 0xb2, 0xd7, 0x34, 0x5e, 0x74, 0x3b, 0x9e, 0x64,
0x4e, 0xa3, 0x7c, 0x72, 0x43, 0x16, 0x78, 0xd8, 0x62, 0xd9, 0x1e, 0xb8, 0xa5, 0xe9, 0x57, 0x84, 0x71, 0x31, 0xbb, 0xa6, 0x57, 0xb8, 0xd9, 0xd5, 0xa2, 0xdb, 0x09, 0x47, 0xb3, 0x6f, 0x28, 0x5d,
0x2c, 0xcb, 0x52, 0x43, 0x29, 0x0c, 0x14, 0x6c, 0x94, 0x06, 0xf9, 0x94, 0x0f, 0x18, 0xeb, 0xbf, 0x54, 0x95, 0x81, 0x4a, 0x5a, 0x28, 0xf9, 0x24, 0x23, 0xc5, 0x5c, 0x8c, 0x18, 0x97, 0xbf, 0xad,
0xab, 0x6a, 0x61, 0x1e, 0x45, 0xd5, 0x02, 0xbb, 0x4c, 0x83, 0x3c, 0xe0, 0x03, 0x86, 0x66, 0xe4, 0x1b, 0x69, 0x1f, 0x65, 0xdd, 0x01, 0x3f, 0xcd, 0x48, 0x41, 0xc4, 0x88, 0x61, 0x39, 0x3d, 0xbb,
0x6a, 0xa5, 0x0c, 0x94, 0xa0, 0x5d, 0xc4, 0x38, 0x0d, 0xf2, 0x88, 0x9f, 0x71, 0x34, 0x25, 0x93, 0xd3, 0x16, 0x2a, 0x30, 0xbe, 0x62, 0x9a, 0x91, 0x22, 0x16, 0x47, 0x1c, 0xcb, 0xe8, 0x6c, 0x69,
0x8d, 0xd1, 0x52, 0x95, 0x2e, 0x24, 0x49, 0x83, 0x3c, 0xe1, 0x43, 0xca, 0x66, 0x79, 0x53, 0xd7, 0x8d, 0xd2, 0x95, 0x2f, 0x49, 0x33, 0x52, 0xa4, 0x62, 0x4c, 0xb9, 0x55, 0xde, 0x34, 0x4d, 0x0d,
0x15, 0x08, 0xe5, 0x42, 0x48, 0x1a, 0xe4, 0x63, 0x7e, 0xc6, 0xd1, 0xaf, 0xc9, 0xf4, 0x57, 0xd5, 0x52, 0xfb, 0x12, 0x9a, 0x91, 0x62, 0x2a, 0x8e, 0x38, 0xf6, 0x2d, 0x9d, 0xff, 0xae, 0x5b, 0x55,
0xc8, 0x52, 0x41, 0xe1, 0x82, 0xae, 0xd2, 0x20, 0xbf, 0xe0, 0xe7, 0x24, 0xfd, 0x96, 0xc4, 0x1b, 0x69, 0x28, 0x7d, 0xd1, 0x59, 0x46, 0x8a, 0x13, 0x71, 0x4c, 0xb2, 0xef, 0x69, 0xb2, 0xb4, 0xd2,
0x23, 0x4c, 0xc3, 0x26, 0x69, 0x90, 0x4f, 0x6e, 0xe6, 0xbe, 0xde, 0x95, 0x01, 0x2d, 0x4c, 0xad, 0xb6, 0x7c, 0x96, 0x91, 0x62, 0x76, 0x7d, 0x11, 0xee, 0x7b, 0x67, 0xc1, 0x48, 0xdb, 0x18, 0xcc,
0xd1, 0xc7, 0x5d, 0x48, 0xf6, 0x77, 0x80, 0xad, 0xa1, 0x5f, 0x92, 0xf1, 0xad, 0x30, 0xe2, 0xe1, 0x09, 0x5f, 0xc2, 0x2e, 0x68, 0xb2, 0x32, 0x72, 0x0d, 0x7c, 0x9e, 0x91, 0xe2, 0x4c, 0x78, 0x90,
0xb8, 0x77, 0x3d, 0x8f, 0x79, 0x8f, 0x9f, 0xd5, 0x1f, 0xbe, 0x58, 0x7f, 0xf4, 0x72, 0xfd, 0x17, 0xff, 0x43, 0x50, 0x30, 0xf6, 0x35, 0x9d, 0xde, 0x48, 0x2b, 0x57, 0xfb, 0xad, 0xef, 0x44, 0x22,
0x2f, 0xd7, 0x1f, 0x7f, 0x4c, 0xfd, 0xa3, 0xff, 0xa8, 0x3f, 0xfb, 0x33, 0x26, 0xb3, 0xae, 0xd8, 0x06, 0xfc, 0x4c, 0x95, 0xe8, 0x45, 0x55, 0xe2, 0x97, 0x55, 0x39, 0x79, 0x59, 0x95, 0xe4, 0x53,
0xfb, 0xbd, 0x91, 0xb5, 0x42, 0x9d, 0xbc, 0x3d, 0xec, 0x35, 0x0b, 0xf0, 0x60, 0xb4, 0xad, 0x4e, 0x54, 0x99, 0xfc, 0x87, 0x2a, 0xf9, 0xc7, 0x84, 0x9e, 0xf7, 0x12, 0x3c, 0x6c, 0xad, 0x6a, 0x34,
0xac, 0x2a, 0xc2, 0x34, 0xca, 0x13, 0xa7, 0x84, 0x6f, 0xc8, 0xe8, 0x4e, 0x42, 0x55, 0x34, 0xec, 0xba, 0xe7, 0xed, 0x6e, 0x6b, 0x38, 0xc1, 0x8d, 0x31, 0x76, 0xee, 0x71, 0x5e, 0x89, 0xb2, 0xb8,
0x53, 0x94, 0xca, 0xd4, 0xb7, 0xee, 0x51, 0x68, 0x0e, 0x4f, 0xdc, 0x3b, 0xe9, 0xf7, 0xe4, 0x72, 0x48, 0xbd, 0x3f, 0xbe, 0xa3, 0x93, 0x5b, 0x05, 0x75, 0xd9, 0xf2, 0xcf, 0xd1, 0x40, 0xf3, 0x20,
0x53, 0xb7, 0x7a, 0x0b, 0x0d, 0x8b, 0x30, 0x8e, 0xfa, 0xb8, 0xf7, 0x20, 0x9a, 0x56, 0xc3, 0x0e, 0xe8, 0xa3, 0x34, 0x02, 0x9e, 0x44, 0x48, 0xb2, 0x1f, 0xe9, 0xe9, 0xb2, 0xe9, 0xcc, 0x1a, 0x5a,
0x94, 0xe1, 0x5d, 0x08, 0xfd, 0x8e, 0x8c, 0x6d, 0x2b, 0xf4, 0x1f, 0xa2, 0xc2, 0xba, 0x27, 0x37, 0x1e, 0x63, 0x1d, 0x0b, 0x75, 0xef, 0x41, 0xb6, 0x9d, 0x81, 0x0d, 0x68, 0x2b, 0xfa, 0x12, 0xf6,
0xb3, 0x6e, 0x22, 0x9e, 0xe6, 0x7d, 0x80, 0xed, 0xf5, 0xad, 0xdc, 0x81, 0x6a, 0xec, 0xad, 0x51, 0x03, 0x9d, 0x3a, 0x29, 0xcc, 0x5f, 0xb2, 0xc6, 0x7b, 0xcf, 0xae, 0xcf, 0xfb, 0x3e, 0x05, 0x5a,
0xb0, 0x09, 0x1f, 0x30, 0x94, 0x91, 0xcb, 0x5f, 0x74, 0xdd, 0xee, 0xdf, 0x1c, 0xd9, 0x67, 0xe8, 0x0c, 0x05, 0x4e, 0xeb, 0x1b, 0xb5, 0x01, 0xdd, 0xba, 0x53, 0xa3, 0x8d, 0x53, 0x31, 0x62, 0x18,
0xec, 0xa0, 0xad, 0xf0, 0x4e, 0x56, 0x15, 0xb6, 0x24, 0xe6, 0x68, 0xd3, 0xd7, 0x24, 0xb1, 0xbf, 0xa7, 0xa7, 0xbf, 0x99, 0xa6, 0xdb, 0xbe, 0xd9, 0xf3, 0x2f, 0x30, 0xd9, 0x43, 0x77, 0xc3, 0x5b,
0x43, 0xe1, 0x9e, 0x08, 0xeb, 0xfd, 0xb9, 0x56, 0x85, 0xb4, 0x1d, 0x42, 0xd1, 0x26, 0xfc, 0x44, 0x55, 0xd7, 0x28, 0x49, 0x22, 0x30, 0x66, 0xaf, 0x69, 0xea, 0x7e, 0xc7, 0x76, 0x3e, 0x10, 0x2e,
0x58, 0xef, 0xc6, 0x08, 0x6d, 0x70, 0xbd, 0x12, 0x1c, 0xe9, 0x89, 0xb0, 0xf7, 0x78, 0xab, 0x0a, 0xfb, 0x6b, 0xa3, 0x4b, 0xe5, 0x14, 0x42, 0x2b, 0xa7, 0xe2, 0x40, 0xb8, 0xec, 0xd2, 0x4a, 0x63,
0xf4, 0x11, 0xf4, 0x75, 0xd0, 0x2a, 0xe9, 0x5d, 0xbd, 0x15, 0x98, 0xf4, 0x73, 0x4c, 0xda, 0x63, 0x71, 0xe8, 0x52, 0x6c, 0xe9, 0x81, 0x70, 0xe7, 0x78, 0xab, 0x4b, 0xcc, 0x51, 0xcc, 0xf5, 0xd0,
0x9b, 0x73, 0xd9, 0x6c, 0x41, 0x15, 0x52, 0x95, 0xa8, 0xce, 0x31, 0x3f, 0x11, 0x74, 0x4e, 0xe2, 0x39, 0xe9, 0x5d, 0xb3, 0x96, 0xb8, 0xe8, 0x97, 0xb8, 0xe8, 0x80, 0xdd, 0x9a, 0x8b, 0x76, 0x0d,
0x77, 0x72, 0x27, 0x0d, 0xaa, 0x3a, 0xe2, 0x0e, 0xd0, 0x6b, 0x32, 0xba, 0x7f, 0x7a, 0x6a, 0xc0, 0xba, 0x54, 0xba, 0x42, 0xcf, 0x4e, 0xc5, 0x81, 0x70, 0x0e, 0x7d, 0xa7, 0x36, 0xca, 0xa2, 0xd7,
0xb0, 0x29, 0xd2, 0x1e, 0x59, 0x7e, 0xe3, 0xc2, 0x3f, 0x71, 0xbc, 0x43, 0xf6, 0x66, 0x1b, 0xff, 0x63, 0xe1, 0x01, 0xbb, 0xa4, 0x93, 0x87, 0xa7, 0xa7, 0x16, 0x2c, 0x1a, 0x37, 0x16, 0x01, 0x39,
0x87, 0x99, 0xbb, 0x99, 0x87, 0xae, 0x22, 0x2d, 0xf7, 0xf8, 0xb0, 0x5c, 0xbb, 0xd3, 0x7b, 0xc2, 0x7e, 0xe9, 0xcb, 0x3f, 0xf3, 0xbc, 0x47, 0xee, 0x64, 0xcb, 0xf0, 0x87, 0x73, 0x7f, 0xb2, 0x00,
0xe6, 0xbb, 0x85, 0xa2, 0xdd, 0x03, 0x7b, 0x85, 0x2e, 0x8f, 0xec, 0x44, 0xde, 0x8b, 0xc3, 0x06, 0xfd, 0x8d, 0x8c, 0xda, 0xe2, 0x73, 0x73, 0xe9, 0x77, 0x1f, 0x08, 0xb7, 0xde, 0x0d, 0x94, 0xdd,
0xb4, 0x84, 0x66, 0xcd, 0x28, 0xa6, 0x1c, 0x30, 0xf6, 0xbc, 0x7b, 0x5d, 0x80, 0x86, 0x82, 0xcd, 0x16, 0xf8, 0x2b, 0x4c, 0x05, 0xe4, 0x3a, 0xf2, 0x5e, 0xee, 0x96, 0x60, 0x14, 0xb4, 0xf7, 0x9c,
0xf1, 0x8f, 0x1d, 0xcc, 0x7e, 0x24, 0x57, 0x03, 0x41, 0x34, 0x34, 0x27, 0xf1, 0xca, 0xc0, 0xae, 0xe1, 0x92, 0x23, 0xc6, 0xed, 0xf7, 0x60, 0x4a, 0x30, 0x50, 0xf2, 0x0b, 0xfc, 0x63, 0x0f, 0xf3,
0x61, 0xc1, 0xff, 0x8a, 0xc6, 0x05, 0x64, 0x7f, 0x05, 0x64, 0x32, 0xa0, 0xbb, 0xed, 0xfc, 0x4d, 0x9f, 0xe9, 0xd9, 0xc8, 0x10, 0x2d, 0x2b, 0x68, 0x72, 0x67, 0x61, 0xd3, 0x72, 0xf2, 0xbf, 0xa6,
0x34, 0xe0, 0x15, 0xdc, 0x63, 0x9a, 0x93, 0x19, 0x07, 0x03, 0xca, 0x36, 0xf8, 0x43, 0x5d, 0xc9, 0xf1, 0x05, 0xf9, 0xdf, 0x84, 0xce, 0x46, 0x74, 0x3f, 0x9d, 0x7f, 0xc8, 0x16, 0x82, 0x83, 0x07,
0xed, 0x11, 0x57, 0x34, 0xe1, 0xcf, 0xe9, 0xfe, 0x4d, 0x8d, 0xdc, 0x0e, 0x60, 0xd5, 0x73, 0x12, 0xcc, 0x0a, 0x7a, 0x2e, 0xc0, 0x82, 0x76, 0x02, 0x7f, 0x68, 0x6a, 0xb5, 0xde, 0xe3, 0x88, 0xa6,
0x73, 0x28, 0xe1, 0xe0, 0x37, 0xd2, 0x01, 0x7b, 0xde, 0xaa, 0x79, 0x10, 0xba, 0x04, 0xe3, 0xf7, 0xe2, 0x39, 0x3d, 0xbc, 0xb4, 0xb1, 0x9f, 0x01, 0xbc, 0xf5, 0x05, 0x4d, 0x04, 0x54, 0xb0, 0x0b,
0xb0, 0xc7, 0xd9, 0x4f, 0x27, 0x39, 0xe3, 0xbd, 0x5a, 0xed, 0x66, 0x1d, 0x60, 0x67, 0x7a, 0x3c, 0x13, 0xe9, 0x81, 0xdb, 0xef, 0xae, 0x5d, 0x49, 0x53, 0x81, 0x0d, 0x73, 0x38, 0xe0, 0xfc, 0x97,
0x98, 0x5b, 0x38, 0x9c, 0x5b, 0xb6, 0x24, 0xd3, 0xb3, 0x97, 0x08, 0x07, 0xe6, 0xbb, 0x1b, 0xf8, 0x83, 0x9d, 0xf1, 0x5c, 0x9d, 0xf1, 0xbd, 0x26, 0xa8, 0xcc, 0x80, 0x47, 0x7d, 0x8b, 0xc6, 0x7d,
0x81, 0xf9, 0xd6, 0x5e, 0x93, 0x11, 0x7e, 0x0d, 0xd6, 0x5d, 0x0a, 0x87, 0xb2, 0x05, 0x19, 0xb9, 0xcb, 0x17, 0x74, 0x7e, 0xf4, 0x3e, 0x61, 0xc3, 0x82, 0xba, 0x24, 0x34, 0x2c, 0x48, 0x7b, 0x49,
0x8d, 0xb4, 0x2b, 0xfc, 0x28, 0x2a, 0xff, 0x95, 0xb0, 0x26, 0x7e, 0x10, 0xec, 0x23, 0x16, 0xba, 0x27, 0xf8, 0x8d, 0xb8, 0xef, 0x97, 0xf0, 0x28, 0xbf, 0xa2, 0x13, 0x3f, 0x91, 0x6e, 0x84, 0x1f,
0x35, 0xb0, 0xf6, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x55, 0x55, 0xec, 0xe3, 0x77, 0x06, 0x00, 0x65, 0x1d, 0xbe, 0x1d, 0x2e, 0xc4, 0xcf, 0x84, 0x7b, 0xc4, 0x22, 0x3f, 0x06, 0x2e, 0xfe, 0x37,
0x00, 0x00, 0x00, 0xff, 0xff, 0x3d, 0xd1, 0x25, 0x7b, 0x8d, 0x06, 0x00, 0x00,
} }

View File

@ -16,6 +16,7 @@ message Point {
optional uint64 UnsignedValue = 12; optional uint64 UnsignedValue = 12;
optional IteratorStats Stats = 11; optional IteratorStats Stats = 11;
optional bytes Trace = 13;
} }
message Aux { message Aux {

View File

@ -8,8 +8,7 @@ package query
import ( import (
"container/heap" "container/heap"
"encoding/binary" "context"
"fmt"
"io" "io"
"sort" "sort"
"sync" "sync"
@ -17,7 +16,6 @@ import (
"github.com/gogo/protobuf/proto" "github.com/gogo/protobuf/proto"
"github.com/influxdata/influxdb/influxql" "github.com/influxdata/influxdb/influxql"
internal "github.com/influxdata/influxdb/query/internal"
) )
// DefaultStatsInterval is the default value for IteratorEncoder.StatsInterval. // DefaultStatsInterval is the default value for IteratorEncoder.StatsInterval.
@ -3384,8 +3382,8 @@ type floatReaderIterator struct {
} }
// newFloatReaderIterator returns a new instance of floatReaderIterator. // newFloatReaderIterator returns a new instance of floatReaderIterator.
func newFloatReaderIterator(r io.Reader, stats IteratorStats) *floatReaderIterator { func newFloatReaderIterator(ctx context.Context, r io.Reader, stats IteratorStats) *floatReaderIterator {
dec := NewFloatPointDecoder(r) dec := NewFloatPointDecoder(ctx, r)
dec.stats = stats dec.stats = stats
return &floatReaderIterator{ return &floatReaderIterator{
@ -6777,8 +6775,8 @@ type integerReaderIterator struct {
} }
// newIntegerReaderIterator returns a new instance of integerReaderIterator. // newIntegerReaderIterator returns a new instance of integerReaderIterator.
func newIntegerReaderIterator(r io.Reader, stats IteratorStats) *integerReaderIterator { func newIntegerReaderIterator(ctx context.Context, r io.Reader, stats IteratorStats) *integerReaderIterator {
dec := NewIntegerPointDecoder(r) dec := NewIntegerPointDecoder(ctx, r)
dec.stats = stats dec.stats = stats
return &integerReaderIterator{ return &integerReaderIterator{
@ -10170,8 +10168,8 @@ type unsignedReaderIterator struct {
} }
// newUnsignedReaderIterator returns a new instance of unsignedReaderIterator. // newUnsignedReaderIterator returns a new instance of unsignedReaderIterator.
func newUnsignedReaderIterator(r io.Reader, stats IteratorStats) *unsignedReaderIterator { func newUnsignedReaderIterator(ctx context.Context, r io.Reader, stats IteratorStats) *unsignedReaderIterator {
dec := NewUnsignedPointDecoder(r) dec := NewUnsignedPointDecoder(ctx, r)
dec.stats = stats dec.stats = stats
return &unsignedReaderIterator{ return &unsignedReaderIterator{
@ -13549,8 +13547,8 @@ type stringReaderIterator struct {
} }
// newStringReaderIterator returns a new instance of stringReaderIterator. // newStringReaderIterator returns a new instance of stringReaderIterator.
func newStringReaderIterator(r io.Reader, stats IteratorStats) *stringReaderIterator { func newStringReaderIterator(ctx context.Context, r io.Reader, stats IteratorStats) *stringReaderIterator {
dec := NewStringPointDecoder(r) dec := NewStringPointDecoder(ctx, r)
dec.stats = stats dec.stats = stats
return &stringReaderIterator{ return &stringReaderIterator{
@ -16928,8 +16926,8 @@ type booleanReaderIterator struct {
} }
// newBooleanReaderIterator returns a new instance of booleanReaderIterator. // newBooleanReaderIterator returns a new instance of booleanReaderIterator.
func newBooleanReaderIterator(r io.Reader, stats IteratorStats) *booleanReaderIterator { func newBooleanReaderIterator(ctx context.Context, r io.Reader, stats IteratorStats) *booleanReaderIterator {
dec := NewBooleanPointDecoder(r) dec := NewBooleanPointDecoder(ctx, r)
dec.stats = stats dec.stats = stats
return &booleanReaderIterator{ return &booleanReaderIterator{
@ -16963,39 +16961,6 @@ func (itr *booleanReaderIterator) Next() (*BooleanPoint, error) {
return p, nil return p, nil
} }
// IteratorEncoder is an encoder for encoding an iterator's points to w.
type IteratorEncoder struct {
w io.Writer
// Frequency with which stats are emitted.
StatsInterval time.Duration
}
// NewIteratorEncoder encodes an iterator's points to w.
func NewIteratorEncoder(w io.Writer) *IteratorEncoder {
return &IteratorEncoder{
w: w,
StatsInterval: DefaultStatsInterval,
}
}
// EncodeIterator encodes and writes all of itr's points to the underlying writer.
func (enc *IteratorEncoder) EncodeIterator(itr Iterator) error {
switch itr := itr.(type) {
case FloatIterator:
return enc.encodeFloatIterator(itr)
case IntegerIterator:
return enc.encodeIntegerIterator(itr)
case StringIterator:
return enc.encodeStringIterator(itr)
case BooleanIterator:
return enc.encodeBooleanIterator(itr)
default:
panic(fmt.Sprintf("unsupported iterator for encoder: %T", itr))
}
}
// encodeFloatIterator encodes all points from itr to the underlying writer. // encodeFloatIterator encodes all points from itr to the underlying writer.
func (enc *IteratorEncoder) encodeFloatIterator(itr FloatIterator) error { func (enc *IteratorEncoder) encodeFloatIterator(itr FloatIterator) error {
ticker := time.NewTicker(enc.StatsInterval) ticker := time.NewTicker(enc.StatsInterval)
@ -17210,26 +17175,3 @@ func (enc *IteratorEncoder) encodeBooleanIterator(itr BooleanIterator) error {
} }
return nil return nil
} }
// encode a stats object in the point stream.
func (enc *IteratorEncoder) encodeStats(stats IteratorStats) error {
buf, err := proto.Marshal(&internal.Point{
Name: proto.String(""),
Tags: proto.String(""),
Time: proto.Int64(0),
Nil: proto.Bool(false),
Stats: encodeIteratorStats(&stats),
})
if err != nil {
return err
}
if err := binary.Write(enc.w, binary.BigEndian, uint32(len(buf))); err != nil {
return err
}
if _, err := enc.w.Write(buf); err != nil {
return err
}
return nil
}

View File

@ -1,9 +1,8 @@
package query package query
import ( import (
"context"
"container/heap" "container/heap"
"encoding/binary"
"fmt"
"io" "io"
"sort" "sort"
"sync" "sync"
@ -11,7 +10,6 @@ import (
"github.com/gogo/protobuf/proto" "github.com/gogo/protobuf/proto"
"github.com/influxdata/influxdb/influxql" "github.com/influxdata/influxdb/influxql"
internal "github.com/influxdata/influxdb/query/internal"
) )
// DefaultStatsInterval is the default value for IteratorEncoder.StatsInterval. // DefaultStatsInterval is the default value for IteratorEncoder.StatsInterval.
@ -1723,13 +1721,13 @@ type {{$k.name}}ReaderIterator struct {
} }
// new{{$k.Name}}ReaderIterator returns a new instance of {{$k.name}}ReaderIterator. // new{{$k.Name}}ReaderIterator returns a new instance of {{$k.name}}ReaderIterator.
func new{{$k.Name}}ReaderIterator(r io.Reader, stats IteratorStats) *{{$k.name}}ReaderIterator { func new{{$k.Name}}ReaderIterator(ctx context.Context, r io.Reader, stats IteratorStats) *{{$k.name}}ReaderIterator {
dec := New{{$k.Name}}PointDecoder(r) dec := New{{$k.Name}}PointDecoder(ctx, r)
dec.stats = stats dec.stats = stats
return &{{$k.name}}ReaderIterator{ return &{{$k.name}}ReaderIterator{
r: r, r: r,
dec: dec, dec: dec,
} }
} }
@ -1759,40 +1757,6 @@ func (itr *{{$k.name}}ReaderIterator) Next() (*{{$k.Name}}Point, error) {
} }
{{end}} {{end}}
// IteratorEncoder is an encoder for encoding an iterator's points to w.
type IteratorEncoder struct {
w io.Writer
// Frequency with which stats are emitted.
StatsInterval time.Duration
}
// NewIteratorEncoder encodes an iterator's points to w.
func NewIteratorEncoder(w io.Writer) *IteratorEncoder {
return &IteratorEncoder{
w: w,
StatsInterval: DefaultStatsInterval,
}
}
// EncodeIterator encodes and writes all of itr's points to the underlying writer.
func (enc *IteratorEncoder) EncodeIterator(itr Iterator) error {
switch itr := itr.(type) {
case FloatIterator:
return enc.encodeFloatIterator(itr)
case IntegerIterator:
return enc.encodeIntegerIterator(itr)
case StringIterator:
return enc.encodeStringIterator(itr)
case BooleanIterator:
return enc.encodeBooleanIterator(itr)
default:
panic(fmt.Sprintf("unsupported iterator for encoder: %T", itr))
}
}
{{range .}} {{range .}}
// encode{{.Name}}Iterator encodes all points from itr to the underlying writer. // encode{{.Name}}Iterator encodes all points from itr to the underlying writer.
func (enc *IteratorEncoder) encode{{.Name}}Iterator(itr {{.Name}}Iterator) error { func (enc *IteratorEncoder) encode{{.Name}}Iterator(itr {{.Name}}Iterator) error {
@ -1839,27 +1803,4 @@ func (enc *IteratorEncoder) encode{{.Name}}Iterator(itr {{.Name}}Iterator) error
{{end}} {{end}}
// encode a stats object in the point stream.
func (enc *IteratorEncoder) encodeStats(stats IteratorStats) error {
buf, err := proto.Marshal(&internal.Point{
Name: proto.String(""),
Tags: proto.String(""),
Time: proto.Int64(0),
Nil: proto.Bool(false),
Stats: encodeIteratorStats(&stats),
})
if err != nil {
return err
}
if err := binary.Write(enc.w, binary.BigEndian, uint32(len(buf))); err != nil {
return err
}
if _, err := enc.w.Write(buf); err != nil {
return err
}
return nil
}
{{end}} {{end}}

View File

@ -1,16 +1,18 @@
package query package query
import ( import (
"context"
"encoding/binary"
"errors" "errors"
"fmt" "fmt"
"io" "io"
"regexp"
"sync" "sync"
"time" "time"
"regexp"
"github.com/gogo/protobuf/proto" "github.com/gogo/protobuf/proto"
"github.com/influxdata/influxdb/influxql" "github.com/influxdata/influxdb/influxql"
"github.com/influxdata/influxdb/pkg/tracing"
internal "github.com/influxdata/influxdb/query/internal" internal "github.com/influxdata/influxdb/query/internal"
) )
@ -640,18 +642,18 @@ func DrainIterators(itrs []Iterator) {
} }
// NewReaderIterator returns an iterator that streams from a reader. // NewReaderIterator returns an iterator that streams from a reader.
func NewReaderIterator(r io.Reader, typ influxql.DataType, stats IteratorStats) Iterator { func NewReaderIterator(ctx context.Context, r io.Reader, typ influxql.DataType, stats IteratorStats) Iterator {
switch typ { switch typ {
case influxql.Float: case influxql.Float:
return newFloatReaderIterator(r, stats) return newFloatReaderIterator(ctx, r, stats)
case influxql.Integer: case influxql.Integer:
return newIntegerReaderIterator(r, stats) return newIntegerReaderIterator(ctx, r, stats)
case influxql.Unsigned: case influxql.Unsigned:
return newUnsignedReaderIterator(r, stats) return newUnsignedReaderIterator(ctx, r, stats)
case influxql.String: case influxql.String:
return newStringReaderIterator(r, stats) return newStringReaderIterator(ctx, r, stats)
case influxql.Boolean: case influxql.Boolean:
return newBooleanReaderIterator(r, stats) return newBooleanReaderIterator(ctx, r, stats)
default: default:
return &nilFloatReaderIterator{r: r} return &nilFloatReaderIterator{r: r}
} }
@ -660,7 +662,7 @@ func NewReaderIterator(r io.Reader, typ influxql.DataType, stats IteratorStats)
// IteratorCreator is an interface to create Iterators. // IteratorCreator is an interface to create Iterators.
type IteratorCreator interface { type IteratorCreator interface {
// Creates a simple iterator for use in an InfluxQL query. // Creates a simple iterator for use in an InfluxQL query.
CreateIterator(source *influxql.Measurement, opt IteratorOptions) (Iterator, error) CreateIterator(ctx context.Context, source *influxql.Measurement, opt IteratorOptions) (Iterator, error)
// Determines the potential cost for creating an iterator. // Determines the potential cost for creating an iterator.
IteratorCost(source *influxql.Measurement, opt IteratorOptions) (IteratorCost, error) IteratorCost(source *influxql.Measurement, opt IteratorOptions) (IteratorCost, error)
@ -1426,6 +1428,22 @@ func decodeIteratorStats(pb *internal.IteratorStats) IteratorStats {
} }
} }
func decodeIteratorTrace(ctx context.Context, data []byte) error {
pt := tracing.TraceFromContext(ctx)
if pt == nil {
return nil
}
var ct tracing.Trace
if err := ct.UnmarshalBinary(data); err != nil {
return err
}
pt.Merge(&ct)
return nil
}
// IteratorCost contains statistics retrieved for explaining what potential // IteratorCost contains statistics retrieved for explaining what potential
// cost may be incurred by instantiating an iterator. // cost may be incurred by instantiating an iterator.
type IteratorCost struct { type IteratorCost struct {
@ -1530,3 +1548,86 @@ func abs(v int64) int64 {
} }
return v return v
} }
// IteratorEncoder is an encoder for encoding an iterator's points to w.
type IteratorEncoder struct {
w io.Writer
// Frequency with which stats are emitted.
StatsInterval time.Duration
}
// NewIteratorEncoder encodes an iterator's points to w.
func NewIteratorEncoder(w io.Writer) *IteratorEncoder {
return &IteratorEncoder{
w: w,
StatsInterval: DefaultStatsInterval,
}
}
// EncodeIterator encodes and writes all of itr's points to the underlying writer.
func (enc *IteratorEncoder) EncodeIterator(itr Iterator) error {
switch itr := itr.(type) {
case FloatIterator:
return enc.encodeFloatIterator(itr)
case IntegerIterator:
return enc.encodeIntegerIterator(itr)
case StringIterator:
return enc.encodeStringIterator(itr)
case BooleanIterator:
return enc.encodeBooleanIterator(itr)
default:
panic(fmt.Sprintf("unsupported iterator for encoder: %T", itr))
}
}
func (enc *IteratorEncoder) EncodeTrace(trace *tracing.Trace) error {
data, err := trace.MarshalBinary()
if err != nil {
return err
}
buf, err := proto.Marshal(&internal.Point{
Name: proto.String(""),
Tags: proto.String(""),
Time: proto.Int64(0),
Nil: proto.Bool(false),
Trace: data,
})
if err != nil {
return err
}
if err = binary.Write(enc.w, binary.BigEndian, uint32(len(buf))); err != nil {
return err
}
if _, err = enc.w.Write(buf); err != nil {
return err
}
return nil
}
// encode a stats object in the point stream.
func (enc *IteratorEncoder) encodeStats(stats IteratorStats) error {
buf, err := proto.Marshal(&internal.Point{
Name: proto.String(""),
Tags: proto.String(""),
Time: proto.Int64(0),
Nil: proto.Bool(false),
Stats: encodeIteratorStats(&stats),
})
if err != nil {
return err
}
if err = binary.Write(enc.w, binary.BigEndian, uint32(len(buf))); err != nil {
return err
}
if _, err = enc.w.Write(buf); err != nil {
return err
}
return nil
}

View File

@ -2,6 +2,7 @@ package query_test
import ( import (
"bytes" "bytes"
"context"
"fmt" "fmt"
"math" "math"
"reflect" "reflect"
@ -1525,7 +1526,7 @@ func TestIterator_EncodeDecode(t *testing.T) {
} }
// Decode from the buffer. // Decode from the buffer.
dec := query.NewReaderIterator(&buf, influxql.Float, itr.Stats()) dec := query.NewReaderIterator(context.Background(), &buf, influxql.Float, itr.Stats())
// Initial stats should exist immediately. // Initial stats should exist immediately.
fdec := dec.(query.FloatIterator) fdec := dec.(query.FloatIterator)
@ -1553,12 +1554,12 @@ func TestIterator_EncodeDecode(t *testing.T) {
// IteratorCreator is a mockable implementation of SelectStatementExecutor.IteratorCreator. // IteratorCreator is a mockable implementation of SelectStatementExecutor.IteratorCreator.
type IteratorCreator struct { type IteratorCreator struct {
CreateIteratorFn func(m *influxql.Measurement, opt query.IteratorOptions) (query.Iterator, error) CreateIteratorFn func(ctx context.Context, m *influxql.Measurement, opt query.IteratorOptions) (query.Iterator, error)
FieldDimensionsFn func(m *influxql.Measurement) (fields map[string]influxql.DataType, dimensions map[string]struct{}, err error) FieldDimensionsFn func(m *influxql.Measurement) (fields map[string]influxql.DataType, dimensions map[string]struct{}, err error)
} }
func (ic *IteratorCreator) CreateIterator(m *influxql.Measurement, opt query.IteratorOptions) (query.Iterator, error) { func (ic *IteratorCreator) CreateIterator(ctx context.Context, m *influxql.Measurement, opt query.IteratorOptions) (query.Iterator, error) {
return ic.CreateIteratorFn(m, opt) return ic.CreateIteratorFn(ctx, m, opt)
} }
func (ic *IteratorCreator) FieldDimensions(m *influxql.Measurement) (fields map[string]influxql.DataType, dimensions map[string]struct{}, err error) { func (ic *IteratorCreator) FieldDimensions(m *influxql.Measurement) (fields map[string]influxql.DataType, dimensions map[string]struct{}, err error) {

View File

@ -7,6 +7,7 @@
package query package query
import ( import (
"context"
"encoding/binary" "encoding/binary"
"io" "io"
@ -181,11 +182,12 @@ func (enc *FloatPointEncoder) EncodeFloatPoint(p *FloatPoint) error {
type FloatPointDecoder struct { type FloatPointDecoder struct {
r io.Reader r io.Reader
stats IteratorStats stats IteratorStats
ctx context.Context
} }
// NewFloatPointDecoder returns a new instance of FloatPointDecoder that reads from r. // NewFloatPointDecoder returns a new instance of FloatPointDecoder that reads from r.
func NewFloatPointDecoder(r io.Reader) *FloatPointDecoder { func NewFloatPointDecoder(ctx context.Context, r io.Reader) *FloatPointDecoder {
return &FloatPointDecoder{r: r} return &FloatPointDecoder{r: r, ctx: ctx}
} }
// Stats returns iterator stats embedded within the stream. // Stats returns iterator stats embedded within the stream.
@ -218,6 +220,15 @@ func (dec *FloatPointDecoder) DecodeFloatPoint(p *FloatPoint) error {
continue continue
} }
if len(pb.Trace) > 0 {
var err error
err = decodeIteratorTrace(dec.ctx, pb.Trace)
if err != nil {
return err
}
continue
}
// Decode into point object. // Decode into point object.
*p = *decodeFloatPoint(&pb) *p = *decodeFloatPoint(&pb)
@ -392,11 +403,12 @@ func (enc *IntegerPointEncoder) EncodeIntegerPoint(p *IntegerPoint) error {
type IntegerPointDecoder struct { type IntegerPointDecoder struct {
r io.Reader r io.Reader
stats IteratorStats stats IteratorStats
ctx context.Context
} }
// NewIntegerPointDecoder returns a new instance of IntegerPointDecoder that reads from r. // NewIntegerPointDecoder returns a new instance of IntegerPointDecoder that reads from r.
func NewIntegerPointDecoder(r io.Reader) *IntegerPointDecoder { func NewIntegerPointDecoder(ctx context.Context, r io.Reader) *IntegerPointDecoder {
return &IntegerPointDecoder{r: r} return &IntegerPointDecoder{r: r, ctx: ctx}
} }
// Stats returns iterator stats embedded within the stream. // Stats returns iterator stats embedded within the stream.
@ -429,6 +441,15 @@ func (dec *IntegerPointDecoder) DecodeIntegerPoint(p *IntegerPoint) error {
continue continue
} }
if len(pb.Trace) > 0 {
var err error
err = decodeIteratorTrace(dec.ctx, pb.Trace)
if err != nil {
return err
}
continue
}
// Decode into point object. // Decode into point object.
*p = *decodeIntegerPoint(&pb) *p = *decodeIntegerPoint(&pb)
@ -601,11 +622,12 @@ func (enc *UnsignedPointEncoder) EncodeUnsignedPoint(p *UnsignedPoint) error {
type UnsignedPointDecoder struct { type UnsignedPointDecoder struct {
r io.Reader r io.Reader
stats IteratorStats stats IteratorStats
ctx context.Context
} }
// NewUnsignedPointDecoder returns a new instance of UnsignedPointDecoder that reads from r. // NewUnsignedPointDecoder returns a new instance of UnsignedPointDecoder that reads from r.
func NewUnsignedPointDecoder(r io.Reader) *UnsignedPointDecoder { func NewUnsignedPointDecoder(ctx context.Context, r io.Reader) *UnsignedPointDecoder {
return &UnsignedPointDecoder{r: r} return &UnsignedPointDecoder{r: r, ctx: ctx}
} }
// Stats returns iterator stats embedded within the stream. // Stats returns iterator stats embedded within the stream.
@ -638,6 +660,15 @@ func (dec *UnsignedPointDecoder) DecodeUnsignedPoint(p *UnsignedPoint) error {
continue continue
} }
if len(pb.Trace) > 0 {
var err error
err = decodeIteratorTrace(dec.ctx, pb.Trace)
if err != nil {
return err
}
continue
}
// Decode into point object. // Decode into point object.
*p = *decodeUnsignedPoint(&pb) *p = *decodeUnsignedPoint(&pb)
@ -812,11 +843,12 @@ func (enc *StringPointEncoder) EncodeStringPoint(p *StringPoint) error {
type StringPointDecoder struct { type StringPointDecoder struct {
r io.Reader r io.Reader
stats IteratorStats stats IteratorStats
ctx context.Context
} }
// NewStringPointDecoder returns a new instance of StringPointDecoder that reads from r. // NewStringPointDecoder returns a new instance of StringPointDecoder that reads from r.
func NewStringPointDecoder(r io.Reader) *StringPointDecoder { func NewStringPointDecoder(ctx context.Context, r io.Reader) *StringPointDecoder {
return &StringPointDecoder{r: r} return &StringPointDecoder{r: r, ctx: ctx}
} }
// Stats returns iterator stats embedded within the stream. // Stats returns iterator stats embedded within the stream.
@ -849,6 +881,15 @@ func (dec *StringPointDecoder) DecodeStringPoint(p *StringPoint) error {
continue continue
} }
if len(pb.Trace) > 0 {
var err error
err = decodeIteratorTrace(dec.ctx, pb.Trace)
if err != nil {
return err
}
continue
}
// Decode into point object. // Decode into point object.
*p = *decodeStringPoint(&pb) *p = *decodeStringPoint(&pb)
@ -1023,11 +1064,12 @@ func (enc *BooleanPointEncoder) EncodeBooleanPoint(p *BooleanPoint) error {
type BooleanPointDecoder struct { type BooleanPointDecoder struct {
r io.Reader r io.Reader
stats IteratorStats stats IteratorStats
ctx context.Context
} }
// NewBooleanPointDecoder returns a new instance of BooleanPointDecoder that reads from r. // NewBooleanPointDecoder returns a new instance of BooleanPointDecoder that reads from r.
func NewBooleanPointDecoder(r io.Reader) *BooleanPointDecoder { func NewBooleanPointDecoder(ctx context.Context, r io.Reader) *BooleanPointDecoder {
return &BooleanPointDecoder{r: r} return &BooleanPointDecoder{r: r, ctx: ctx}
} }
// Stats returns iterator stats embedded within the stream. // Stats returns iterator stats embedded within the stream.
@ -1060,6 +1102,15 @@ func (dec *BooleanPointDecoder) DecodeBooleanPoint(p *BooleanPoint) error {
continue continue
} }
if len(pb.Trace) > 0 {
var err error
err = decodeIteratorTrace(dec.ctx, pb.Trace)
if err != nil {
return err
}
continue
}
// Decode into point object. // Decode into point object.
*p = *decodeBooleanPoint(&pb) *p = *decodeBooleanPoint(&pb)

View File

@ -1,6 +1,7 @@
package query package query
import ( import (
"context"
"encoding/binary" "encoding/binary"
"io" "io"
@ -188,11 +189,12 @@ func (enc *{{.Name}}PointEncoder) Encode{{.Name}}Point(p *{{.Name}}Point) error
type {{.Name}}PointDecoder struct { type {{.Name}}PointDecoder struct {
r io.Reader r io.Reader
stats IteratorStats stats IteratorStats
ctx context.Context
} }
// New{{.Name}}PointDecoder returns a new instance of {{.Name}}PointDecoder that reads from r. // New{{.Name}}PointDecoder returns a new instance of {{.Name}}PointDecoder that reads from r.
func New{{.Name}}PointDecoder(r io.Reader) *{{.Name}}PointDecoder { func New{{.Name}}PointDecoder(ctx context.Context, r io.Reader) *{{.Name}}PointDecoder {
return &{{.Name}}PointDecoder{r: r} return &{{.Name}}PointDecoder{r: r, ctx: ctx}
} }
// Stats returns iterator stats embedded within the stream. // Stats returns iterator stats embedded within the stream.
@ -225,6 +227,15 @@ func (dec *{{.Name}}PointDecoder) Decode{{.Name}}Point(p *{{.Name}}Point) error
continue continue
} }
if len(pb.Trace) > 0 {
var err error
err = decodeIteratorTrace(dec.ctx, pb.Trace)
if err != nil {
return err
}
continue
}
// Decode into point object. // Decode into point object.
*p = *decode{{.Name}}Point(&pb) *p = *decode{{.Name}}Point(&pb)

View File

@ -1,6 +1,7 @@
package query package query
import ( import (
"context"
"errors" "errors"
"fmt" "fmt"
"io" "io"
@ -8,6 +9,7 @@ import (
"sort" "sort"
"github.com/influxdata/influxdb/influxql" "github.com/influxdata/influxdb/influxql"
"github.com/influxdata/influxdb/pkg/tracing"
) )
// SelectOptions are options that customize the select call. // SelectOptions are options that customize the select call.
@ -54,7 +56,7 @@ type ShardGroup interface {
// Select is a prepared statement that is ready to be executed. // Select is a prepared statement that is ready to be executed.
type PreparedStatement interface { type PreparedStatement interface {
// Select creates the Iterators that will be used to read the query. // Select creates the Iterators that will be used to read the query.
Select() ([]Iterator, []string, error) Select(ctx context.Context) ([]Iterator, []string, error)
// Explain outputs the explain plan for this statement. // Explain outputs the explain plan for this statement.
Explain() (string, error) Explain() (string, error)
@ -77,14 +79,14 @@ func Prepare(stmt *influxql.SelectStatement, shardMapper ShardMapper, opt Select
// Select compiles, prepares, and then initiates execution of the query using the // Select compiles, prepares, and then initiates execution of the query using the
// default compile options. // default compile options.
func Select(stmt *influxql.SelectStatement, shardMapper ShardMapper, opt SelectOptions) ([]Iterator, []string, error) { func Select(ctx context.Context, stmt *influxql.SelectStatement, shardMapper ShardMapper, opt SelectOptions) ([]Iterator, []string, error) {
s, err := Prepare(stmt, shardMapper, opt) s, err := Prepare(stmt, shardMapper, opt)
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }
// Must be deferred so it runs after Select. // Must be deferred so it runs after Select.
defer s.Close() defer s.Close()
return s.Select() return s.Select(ctx)
} }
type preparedStatement struct { type preparedStatement struct {
@ -97,8 +99,8 @@ type preparedStatement struct {
columns []string columns []string
} }
func (p *preparedStatement) Select() ([]Iterator, []string, error) { func (p *preparedStatement) Select(ctx context.Context) ([]Iterator, []string, error) {
itrs, err := buildIterators(p.stmt, p.ic, p.opt) itrs, err := buildIterators(ctx, p.stmt, p.ic, p.opt)
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }
@ -109,7 +111,8 @@ func (p *preparedStatement) Close() error {
return p.ic.Close() return p.ic.Close()
} }
func buildIterators(stmt *influxql.SelectStatement, ic IteratorCreator, opt IteratorOptions) ([]Iterator, error) { func buildIterators(ctx context.Context, stmt *influxql.SelectStatement, ic IteratorCreator, opt IteratorOptions) ([]Iterator, error) {
span := tracing.SpanFromContext(ctx)
// Retrieve refs for each call and var ref. // Retrieve refs for each call and var ref.
info := newSelectInfo(stmt) info := newSelectInfo(stmt)
if len(info.calls) > 1 && len(info.refs) > 0 { if len(info.calls) > 1 && len(info.refs) > 0 {
@ -125,7 +128,22 @@ func buildIterators(stmt *influxql.SelectStatement, ic IteratorCreator, opt Iter
// If there are multiple auxilary fields and no calls then construct an aux iterator. // If there are multiple auxilary fields and no calls then construct an aux iterator.
if len(info.calls) == 0 && len(info.refs) > 0 { if len(info.calls) == 0 && len(info.refs) > 0 {
return buildAuxIterators(stmt.Fields, ic, stmt.Sources, opt) if span != nil {
span = span.StartSpan("auxiliary_iterators")
defer span.Finish()
span.SetLabels("statement", stmt.String())
ctx = tracing.NewContextWithSpan(ctx, span)
}
return buildAuxIterators(ctx, stmt.Fields, ic, stmt.Sources, opt)
}
if span != nil {
span = span.StartSpan("field_iterators")
defer span.Finish()
span.SetLabels("statement", stmt.String())
ctx = tracing.NewContextWithSpan(ctx, span)
} }
// Include auxiliary fields from top() and bottom() when not writing the results. // Include auxiliary fields from top() and bottom() when not writing the results.
@ -167,18 +185,18 @@ func buildIterators(stmt *influxql.SelectStatement, ic IteratorCreator, opt Iter
} }
} }
return buildFieldIterators(fields, ic, stmt.Sources, opt, selector, stmt.Target != nil) return buildFieldIterators(ctx, fields, ic, stmt.Sources, opt, selector, stmt.Target != nil)
} }
// buildAuxIterators creates a set of iterators from a single combined auxiliary iterator. // buildAuxIterators creates a set of iterators from a single combined auxiliary iterator.
func buildAuxIterators(fields influxql.Fields, ic IteratorCreator, sources influxql.Sources, opt IteratorOptions) ([]Iterator, error) { func buildAuxIterators(ctx context.Context, fields influxql.Fields, ic IteratorCreator, sources influxql.Sources, opt IteratorOptions) ([]Iterator, error) {
// Create the auxiliary iterators for each source. // Create the auxiliary iterators for each source.
inputs := make([]Iterator, 0, len(sources)) inputs := make([]Iterator, 0, len(sources))
if err := func() error { if err := func() error {
for _, source := range sources { for _, source := range sources {
switch source := source.(type) { switch source := source.(type) {
case *influxql.Measurement: case *influxql.Measurement:
input, err := ic.CreateIterator(source, opt) input, err := ic.CreateIterator(ctx, source, opt)
if err != nil { if err != nil {
return err return err
} }
@ -189,7 +207,7 @@ func buildAuxIterators(fields influxql.Fields, ic IteratorCreator, sources influ
stmt: source.Statement, stmt: source.Statement,
} }
input, err := b.buildAuxIterator(opt) input, err := b.buildAuxIterator(ctx, opt)
if err != nil { if err != nil {
return err return err
} }
@ -304,9 +322,10 @@ func buildAuxIterator(expr influxql.Expr, aitr AuxIterator, opt IteratorOptions)
} }
// buildFieldIterators creates an iterator for each field expression. // buildFieldIterators creates an iterator for each field expression.
func buildFieldIterators(fields influxql.Fields, ic IteratorCreator, sources influxql.Sources, opt IteratorOptions, selector, writeMode bool) ([]Iterator, error) { func buildFieldIterators(ctx context.Context, fields influxql.Fields, ic IteratorCreator, sources influxql.Sources, opt IteratorOptions, selector, writeMode bool) ([]Iterator, error) {
// Create iterators from fields against the iterator creator. // Create iterators from fields against the iterator creator.
itrs := make([]Iterator, len(fields)) itrs := make([]Iterator, len(fields))
span := tracing.SpanFromContext(ctx)
if err := func() error { if err := func() error {
hasAuxFields := false hasAuxFields := false
@ -321,8 +340,22 @@ func buildFieldIterators(fields influxql.Fields, ic IteratorCreator, sources inf
continue continue
} }
var localSpan *tracing.Span
localContext := ctx
if span != nil {
localSpan = span.StartSpan("expression")
localSpan.SetLabels("expr", f.Expr.String())
localContext = tracing.NewContextWithSpan(ctx, localSpan)
}
expr := influxql.Reduce(f.Expr, nil) expr := influxql.Reduce(f.Expr, nil)
itr, err := buildExprIterator(expr, ic, sources, opt, selector, writeMode) itr, err := buildExprIterator(localContext, expr, ic, sources, opt, selector, writeMode)
if localSpan != nil {
localSpan.Finish()
}
if err != nil { if err != nil {
return err return err
} else if itr == nil { } else if itr == nil {
@ -371,7 +404,7 @@ func buildFieldIterators(fields influxql.Fields, ic IteratorCreator, sources inf
} }
// buildExprIterator creates an iterator for an expression. // buildExprIterator creates an iterator for an expression.
func buildExprIterator(expr influxql.Expr, ic IteratorCreator, sources influxql.Sources, opt IteratorOptions, selector, writeMode bool) (Iterator, error) { func buildExprIterator(ctx context.Context, expr influxql.Expr, ic IteratorCreator, sources influxql.Sources, opt IteratorOptions, selector, writeMode bool) (Iterator, error) {
opt.Expr = expr opt.Expr = expr
b := exprIteratorBuilder{ b := exprIteratorBuilder{
ic: ic, ic: ic,
@ -383,13 +416,13 @@ func buildExprIterator(expr influxql.Expr, ic IteratorCreator, sources influxql.
switch expr := expr.(type) { switch expr := expr.(type) {
case *influxql.VarRef: case *influxql.VarRef:
return b.buildVarRefIterator(expr) return b.buildVarRefIterator(ctx, expr)
case *influxql.Call: case *influxql.Call:
return b.buildCallIterator(expr) return b.buildCallIterator(ctx, expr)
case *influxql.BinaryExpr: case *influxql.BinaryExpr:
return b.buildBinaryExprIterator(expr) return b.buildBinaryExprIterator(ctx, expr)
case *influxql.ParenExpr: case *influxql.ParenExpr:
return buildExprIterator(expr.Expr, ic, sources, opt, selector, writeMode) return buildExprIterator(ctx, expr.Expr, ic, sources, opt, selector, writeMode)
case *influxql.NilLiteral: case *influxql.NilLiteral:
return &nilFloatIterator{}, nil return &nilFloatIterator{}, nil
default: default:
@ -405,13 +438,13 @@ type exprIteratorBuilder struct {
writeMode bool writeMode bool
} }
func (b *exprIteratorBuilder) buildVarRefIterator(expr *influxql.VarRef) (Iterator, error) { func (b *exprIteratorBuilder) buildVarRefIterator(ctx context.Context, expr *influxql.VarRef) (Iterator, error) {
inputs := make([]Iterator, 0, len(b.sources)) inputs := make([]Iterator, 0, len(b.sources))
if err := func() error { if err := func() error {
for _, source := range b.sources { for _, source := range b.sources {
switch source := source.(type) { switch source := source.(type) {
case *influxql.Measurement: case *influxql.Measurement:
input, err := b.ic.CreateIterator(source, b.opt) input, err := b.ic.CreateIterator(ctx, source, b.opt)
if err != nil { if err != nil {
return err return err
} }
@ -422,7 +455,7 @@ func (b *exprIteratorBuilder) buildVarRefIterator(expr *influxql.VarRef) (Iterat
stmt: source.Statement, stmt: source.Statement,
} }
input, err := subquery.buildVarRefIterator(expr, b.opt) input, err := subquery.buildVarRefIterator(ctx, expr, b.opt)
if err != nil { if err != nil {
return err return err
} }
@ -448,7 +481,7 @@ func (b *exprIteratorBuilder) buildVarRefIterator(expr *influxql.VarRef) (Iterat
return itr, nil return itr, nil
} }
func (b *exprIteratorBuilder) buildCallIterator(expr *influxql.Call) (Iterator, error) { func (b *exprIteratorBuilder) buildCallIterator(ctx context.Context, expr *influxql.Call) (Iterator, error) {
// TODO(jsternberg): Refactor this. This section needs to die in a fire. // TODO(jsternberg): Refactor this. This section needs to die in a fire.
opt := b.opt opt := b.opt
// Eliminate limits and offsets if they were previously set. These are handled by the caller. // Eliminate limits and offsets if they were previously set. These are handled by the caller.
@ -456,7 +489,7 @@ func (b *exprIteratorBuilder) buildCallIterator(expr *influxql.Call) (Iterator,
switch expr.Name { switch expr.Name {
case "distinct": case "distinct":
opt.Ordered = true opt.Ordered = true
input, err := buildExprIterator(expr.Args[0].(*influxql.VarRef), b.ic, b.sources, opt, b.selector, false) input, err := buildExprIterator(ctx, expr.Args[0].(*influxql.VarRef), b.ic, b.sources, opt, b.selector, false)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -467,7 +500,7 @@ func (b *exprIteratorBuilder) buildCallIterator(expr *influxql.Call) (Iterator,
return NewIntervalIterator(input, opt), nil return NewIntervalIterator(input, opt), nil
case "sample": case "sample":
opt.Ordered = true opt.Ordered = true
input, err := buildExprIterator(expr.Args[0], b.ic, b.sources, opt, b.selector, false) input, err := buildExprIterator(ctx, expr.Args[0], b.ic, b.sources, opt, b.selector, false)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -476,7 +509,7 @@ func (b *exprIteratorBuilder) buildCallIterator(expr *influxql.Call) (Iterator,
return newSampleIterator(input, opt, int(size.Val)) return newSampleIterator(input, opt, int(size.Val))
case "holt_winters", "holt_winters_with_fit": case "holt_winters", "holt_winters_with_fit":
opt.Ordered = true opt.Ordered = true
input, err := buildExprIterator(expr.Args[0], b.ic, b.sources, opt, b.selector, false) input, err := buildExprIterator(ctx, expr.Args[0], b.ic, b.sources, opt, b.selector, false)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -502,7 +535,7 @@ func (b *exprIteratorBuilder) buildCallIterator(expr *influxql.Call) (Iterator,
} }
opt.Ordered = true opt.Ordered = true
input, err := buildExprIterator(expr.Args[0], b.ic, b.sources, opt, b.selector, false) input, err := buildExprIterator(ctx, expr.Args[0], b.ic, b.sources, opt, b.selector, false)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -532,14 +565,14 @@ func (b *exprIteratorBuilder) buildCallIterator(expr *influxql.Call) (Iterator,
panic(fmt.Sprintf("invalid series aggregate function: %s", expr.Name)) panic(fmt.Sprintf("invalid series aggregate function: %s", expr.Name))
case "cumulative_sum": case "cumulative_sum":
opt.Ordered = true opt.Ordered = true
input, err := buildExprIterator(expr.Args[0], b.ic, b.sources, opt, b.selector, false) input, err := buildExprIterator(ctx, expr.Args[0], b.ic, b.sources, opt, b.selector, false)
if err != nil { if err != nil {
return nil, err return nil, err
} }
return newCumulativeSumIterator(input, opt) return newCumulativeSumIterator(input, opt)
case "integral": case "integral":
opt.Ordered = true opt.Ordered = true
input, err := buildExprIterator(expr.Args[0].(*influxql.VarRef), b.ic, b.sources, opt, false, false) input, err := buildExprIterator(ctx, expr.Args[0].(*influxql.VarRef), b.ic, b.sources, opt, false, false)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -576,7 +609,7 @@ func (b *exprIteratorBuilder) buildCallIterator(expr *influxql.Call) (Iterator,
builder.selector = true builder.selector = true
builder.writeMode = false builder.writeMode = false
i, err := builder.callIterator(call, callOpt) i, err := builder.callIterator(ctx, call, callOpt)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -589,7 +622,7 @@ func (b *exprIteratorBuilder) buildCallIterator(expr *influxql.Call) (Iterator,
builder.writeMode = false builder.writeMode = false
ref := expr.Args[0].(*influxql.VarRef) ref := expr.Args[0].(*influxql.VarRef)
i, err := builder.buildVarRefIterator(ref) i, err := builder.buildVarRefIterator(ctx, ref)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -629,7 +662,7 @@ func (b *exprIteratorBuilder) buildCallIterator(expr *influxql.Call) (Iterator,
builder.selector = true builder.selector = true
builder.writeMode = false builder.writeMode = false
i, err := builder.callIterator(call, callOpt) i, err := builder.callIterator(ctx, call, callOpt)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -642,7 +675,7 @@ func (b *exprIteratorBuilder) buildCallIterator(expr *influxql.Call) (Iterator,
builder.writeMode = false builder.writeMode = false
ref := expr.Args[0].(*influxql.VarRef) ref := expr.Args[0].(*influxql.VarRef)
i, err := builder.buildVarRefIterator(ref) i, err := builder.buildVarRefIterator(ctx, ref)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -659,7 +692,7 @@ func (b *exprIteratorBuilder) buildCallIterator(expr *influxql.Call) (Iterator,
switch arg0 := expr.Args[0].(type) { switch arg0 := expr.Args[0].(type) {
case *influxql.Call: case *influxql.Call:
if arg0.Name == "distinct" { if arg0.Name == "distinct" {
input, err := buildExprIterator(arg0, b.ic, b.sources, opt, b.selector, false) input, err := buildExprIterator(ctx, arg0, b.ic, b.sources, opt, b.selector, false)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -668,36 +701,36 @@ func (b *exprIteratorBuilder) buildCallIterator(expr *influxql.Call) (Iterator,
} }
fallthrough fallthrough
case "min", "max", "sum", "first", "last", "mean": case "min", "max", "sum", "first", "last", "mean":
return b.callIterator(expr, opt) return b.callIterator(ctx, expr, opt)
case "median": case "median":
opt.Ordered = true opt.Ordered = true
input, err := buildExprIterator(expr.Args[0].(*influxql.VarRef), b.ic, b.sources, opt, false, false) input, err := buildExprIterator(ctx, expr.Args[0].(*influxql.VarRef), b.ic, b.sources, opt, false, false)
if err != nil { if err != nil {
return nil, err return nil, err
} }
return newMedianIterator(input, opt) return newMedianIterator(input, opt)
case "mode": case "mode":
input, err := buildExprIterator(expr.Args[0].(*influxql.VarRef), b.ic, b.sources, opt, false, false) input, err := buildExprIterator(ctx, expr.Args[0].(*influxql.VarRef), b.ic, b.sources, opt, false, false)
if err != nil { if err != nil {
return nil, err return nil, err
} }
return NewModeIterator(input, opt) return NewModeIterator(input, opt)
case "stddev": case "stddev":
input, err := buildExprIterator(expr.Args[0].(*influxql.VarRef), b.ic, b.sources, opt, false, false) input, err := buildExprIterator(ctx, expr.Args[0].(*influxql.VarRef), b.ic, b.sources, opt, false, false)
if err != nil { if err != nil {
return nil, err return nil, err
} }
return newStddevIterator(input, opt) return newStddevIterator(input, opt)
case "spread": case "spread":
// OPTIMIZE(benbjohnson): convert to map/reduce // OPTIMIZE(benbjohnson): convert to map/reduce
input, err := buildExprIterator(expr.Args[0].(*influxql.VarRef), b.ic, b.sources, opt, false, false) input, err := buildExprIterator(ctx, expr.Args[0].(*influxql.VarRef), b.ic, b.sources, opt, false, false)
if err != nil { if err != nil {
return nil, err return nil, err
} }
return newSpreadIterator(input, opt) return newSpreadIterator(input, opt)
case "percentile": case "percentile":
opt.Ordered = true opt.Ordered = true
input, err := buildExprIterator(expr.Args[0].(*influxql.VarRef), b.ic, b.sources, opt, false, false) input, err := buildExprIterator(ctx, expr.Args[0].(*influxql.VarRef), b.ic, b.sources, opt, false, false)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -730,7 +763,7 @@ func (b *exprIteratorBuilder) buildCallIterator(expr *influxql.Call) (Iterator,
return itr, nil return itr, nil
} }
func (b *exprIteratorBuilder) buildBinaryExprIterator(expr *influxql.BinaryExpr) (Iterator, error) { func (b *exprIteratorBuilder) buildBinaryExprIterator(ctx context.Context, expr *influxql.BinaryExpr) (Iterator, error) {
if rhs, ok := expr.RHS.(influxql.Literal); ok { if rhs, ok := expr.RHS.(influxql.Literal); ok {
// The right hand side is a literal. It is more common to have the RHS be a literal, // The right hand side is a literal. It is more common to have the RHS be a literal,
// so we check that one first and have this be the happy path. // so we check that one first and have this be the happy path.
@ -739,24 +772,24 @@ func (b *exprIteratorBuilder) buildBinaryExprIterator(expr *influxql.BinaryExpr)
return nil, fmt.Errorf("unable to construct an iterator from two literals: LHS: %T, RHS: %T", lhs, rhs) return nil, fmt.Errorf("unable to construct an iterator from two literals: LHS: %T, RHS: %T", lhs, rhs)
} }
lhs, err := buildExprIterator(expr.LHS, b.ic, b.sources, b.opt, b.selector, false) lhs, err := buildExprIterator(ctx, expr.LHS, b.ic, b.sources, b.opt, b.selector, false)
if err != nil { if err != nil {
return nil, err return nil, err
} }
return buildRHSTransformIterator(lhs, rhs, expr.Op, b.opt) return buildRHSTransformIterator(lhs, rhs, expr.Op, b.opt)
} else if lhs, ok := expr.LHS.(influxql.Literal); ok { } else if lhs, ok := expr.LHS.(influxql.Literal); ok {
rhs, err := buildExprIterator(expr.RHS, b.ic, b.sources, b.opt, b.selector, false) rhs, err := buildExprIterator(ctx, expr.RHS, b.ic, b.sources, b.opt, b.selector, false)
if err != nil { if err != nil {
return nil, err return nil, err
} }
return buildLHSTransformIterator(lhs, rhs, expr.Op, b.opt) return buildLHSTransformIterator(lhs, rhs, expr.Op, b.opt)
} else { } else {
// We have two iterators. Combine them into a single iterator. // We have two iterators. Combine them into a single iterator.
lhs, err := buildExprIterator(expr.LHS, b.ic, b.sources, b.opt, false, false) lhs, err := buildExprIterator(ctx, expr.LHS, b.ic, b.sources, b.opt, false, false)
if err != nil { if err != nil {
return nil, err return nil, err
} }
rhs, err := buildExprIterator(expr.RHS, b.ic, b.sources, b.opt, false, false) rhs, err := buildExprIterator(ctx, expr.RHS, b.ic, b.sources, b.opt, false, false)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -764,13 +797,13 @@ func (b *exprIteratorBuilder) buildBinaryExprIterator(expr *influxql.BinaryExpr)
} }
} }
func (b *exprIteratorBuilder) callIterator(expr *influxql.Call, opt IteratorOptions) (Iterator, error) { func (b *exprIteratorBuilder) callIterator(ctx context.Context, expr *influxql.Call, opt IteratorOptions) (Iterator, error) {
inputs := make([]Iterator, 0, len(b.sources)) inputs := make([]Iterator, 0, len(b.sources))
if err := func() error { if err := func() error {
for _, source := range b.sources { for _, source := range b.sources {
switch source := source.(type) { switch source := source.(type) {
case *influxql.Measurement: case *influxql.Measurement:
input, err := b.ic.CreateIterator(source, opt) input, err := b.ic.CreateIterator(ctx, source, opt)
if err != nil { if err != nil {
return err return err
} }
@ -779,7 +812,7 @@ func (b *exprIteratorBuilder) callIterator(expr *influxql.Call, opt IteratorOpti
// Identify the name of the field we are using. // Identify the name of the field we are using.
arg0 := expr.Args[0].(*influxql.VarRef) arg0 := expr.Args[0].(*influxql.VarRef)
input, err := buildExprIterator(arg0, b.ic, []influxql.Source{source}, opt, b.selector, false) input, err := buildExprIterator(ctx, arg0, b.ic, []influxql.Source{source}, opt, b.selector, false)
if err != nil { if err != nil {
return err return err
} }

View File

@ -1,6 +1,7 @@
package query_test package query_test
import ( import (
"context"
"fmt" "fmt"
"math/rand" "math/rand"
"reflect" "reflect"
@ -2765,7 +2766,7 @@ func TestSelect(t *testing.T) {
"value": tt.typ, "value": tt.typ,
}, },
Dimensions: []string{"host", "region"}, Dimensions: []string{"host", "region"},
CreateIteratorFn: func(m *influxql.Measurement, opt query.IteratorOptions) (query.Iterator, error) { CreateIteratorFn: func(ctx context.Context, m *influxql.Measurement, opt query.IteratorOptions) (query.Iterator, error) {
if m.Name != "cpu" { if m.Name != "cpu" {
t.Fatalf("unexpected source: %s", m.Name) t.Fatalf("unexpected source: %s", m.Name)
} }
@ -2789,7 +2790,7 @@ func TestSelect(t *testing.T) {
}, },
} }
itrs, _, err := query.Select(MustParseSelectStatement(tt.q), &shardMapper, query.SelectOptions{}) itrs, _, err := query.Select(context.Background(), MustParseSelectStatement(tt.q), &shardMapper, query.SelectOptions{})
if err != nil { if err != nil {
if tt.err == "" { if tt.err == "" {
t.Fatal(err) t.Fatal(err)
@ -2819,7 +2820,7 @@ func TestSelect_Raw(t *testing.T) {
"s": influxql.String, "s": influxql.String,
"b": influxql.Boolean, "b": influxql.Boolean,
}, },
CreateIteratorFn: func(m *influxql.Measurement, opt query.IteratorOptions) (query.Iterator, error) { CreateIteratorFn: func(ctx context.Context, m *influxql.Measurement, opt query.IteratorOptions) (query.Iterator, error) {
if m.Name != "cpu" { if m.Name != "cpu" {
t.Fatalf("unexpected source: %s", m.Name) t.Fatalf("unexpected source: %s", m.Name)
} }
@ -2846,7 +2847,7 @@ func TestSelect_Raw(t *testing.T) {
} }
stmt := MustParseSelectStatement(`SELECT f, i, u, s, b FROM cpu`) stmt := MustParseSelectStatement(`SELECT f, i, u, s, b FROM cpu`)
itrs, _, err := query.Select(stmt, &shardMapper, query.SelectOptions{}) itrs, _, err := query.Select(context.Background(), stmt, &shardMapper, query.SelectOptions{})
if err != nil { if err != nil {
t.Errorf("parse error: %s", err) t.Errorf("parse error: %s", err)
} else if a, err := Iterators(itrs).ReadAll(); err != nil { } else if a, err := Iterators(itrs).ReadAll(); err != nil {
@ -2888,7 +2889,7 @@ func TestSelect_BinaryExpr(t *testing.T) {
"i": influxql.Integer, "i": influxql.Integer,
"u": influxql.Unsigned, "u": influxql.Unsigned,
}, },
CreateIteratorFn: func(m *influxql.Measurement, opt query.IteratorOptions) (query.Iterator, error) { CreateIteratorFn: func(ctx context.Context, m *influxql.Measurement, opt query.IteratorOptions) (query.Iterator, error) {
if m.Name != "cpu" { if m.Name != "cpu" {
t.Fatalf("unexpected source: %s", m.Name) t.Fatalf("unexpected source: %s", m.Name)
} }
@ -3753,7 +3754,7 @@ func TestSelect_BinaryExpr(t *testing.T) {
} { } {
t.Run(test.Name, func(t *testing.T) { t.Run(test.Name, func(t *testing.T) {
stmt := MustParseSelectStatement(test.Statement) stmt := MustParseSelectStatement(test.Statement)
itrs, _, err := query.Select(stmt, &shardMapper, query.SelectOptions{}) itrs, _, err := query.Select(context.Background(), stmt, &shardMapper, query.SelectOptions{})
if err != nil { if err != nil {
if have, want := err.Error(), test.Err; want != "" { if have, want := err.Error(), test.Err; want != "" {
if have != want { if have != want {
@ -3782,7 +3783,7 @@ func TestSelect_BinaryExpr_Boolean(t *testing.T) {
"one": influxql.Boolean, "one": influxql.Boolean,
"two": influxql.Boolean, "two": influxql.Boolean,
}, },
CreateIteratorFn: func(m *influxql.Measurement, opt query.IteratorOptions) (query.Iterator, error) { CreateIteratorFn: func(ctx context.Context, m *influxql.Measurement, opt query.IteratorOptions) (query.Iterator, error) {
if m.Name != "cpu" { if m.Name != "cpu" {
t.Fatalf("unexpected source: %s", m.Name) t.Fatalf("unexpected source: %s", m.Name)
} }
@ -3838,7 +3839,7 @@ func TestSelect_BinaryExpr_Boolean(t *testing.T) {
} { } {
t.Run(test.Name, func(t *testing.T) { t.Run(test.Name, func(t *testing.T) {
stmt := MustParseSelectStatement(test.Statement) stmt := MustParseSelectStatement(test.Statement)
itrs, _, err := query.Select(stmt, &shardMapper, query.SelectOptions{}) itrs, _, err := query.Select(context.Background(), stmt, &shardMapper, query.SelectOptions{})
if err != nil { if err != nil {
t.Errorf("%s: parse error: %s", test.Name, err) t.Errorf("%s: parse error: %s", test.Name, err)
} else if a, err := Iterators(itrs).ReadAll(); err != nil { } else if a, err := Iterators(itrs).ReadAll(); err != nil {
@ -3861,7 +3862,7 @@ func TestSelect_BinaryExpr_NilValues(t *testing.T) {
"total": influxql.Float, "total": influxql.Float,
"value": influxql.Float, "value": influxql.Float,
}, },
CreateIteratorFn: func(m *influxql.Measurement, opt query.IteratorOptions) (query.Iterator, error) { CreateIteratorFn: func(ctx context.Context, m *influxql.Measurement, opt query.IteratorOptions) (query.Iterator, error) {
if m.Name != "cpu" { if m.Name != "cpu" {
t.Fatalf("unexpected source: %s", m.Name) t.Fatalf("unexpected source: %s", m.Name)
} }
@ -3919,7 +3920,7 @@ func TestSelect_BinaryExpr_NilValues(t *testing.T) {
} { } {
t.Run(test.Name, func(t *testing.T) { t.Run(test.Name, func(t *testing.T) {
stmt := MustParseSelectStatement(test.Statement) stmt := MustParseSelectStatement(test.Statement)
itrs, _, err := query.Select(stmt, &shardMapper, query.SelectOptions{}) itrs, _, err := query.Select(context.Background(), stmt, &shardMapper, query.SelectOptions{})
if err != nil { if err != nil {
t.Errorf("%s: parse error: %s", test.Name, err) t.Errorf("%s: parse error: %s", test.Name, err)
} else if a, err := Iterators(itrs).ReadAll(); err != nil { } else if a, err := Iterators(itrs).ReadAll(); err != nil {
@ -3941,13 +3942,13 @@ func (m *ShardMapper) MapShards(sources influxql.Sources, t influxql.TimeRange,
} }
type ShardGroup struct { type ShardGroup struct {
CreateIteratorFn func(m *influxql.Measurement, opt query.IteratorOptions) (query.Iterator, error) CreateIteratorFn func(ctx context.Context, m *influxql.Measurement, opt query.IteratorOptions) (query.Iterator, error)
Fields map[string]influxql.DataType Fields map[string]influxql.DataType
Dimensions []string Dimensions []string
} }
func (sh *ShardGroup) CreateIterator(m *influxql.Measurement, opt query.IteratorOptions) (query.Iterator, error) { func (sh *ShardGroup) CreateIterator(ctx context.Context, m *influxql.Measurement, opt query.IteratorOptions) (query.Iterator, error) {
return sh.CreateIteratorFn(m, opt) return sh.CreateIteratorFn(ctx, m, opt)
} }
func (sh *ShardGroup) IteratorCost(m *influxql.Measurement, opt query.IteratorOptions) (query.IteratorCost, error) { func (sh *ShardGroup) IteratorCost(m *influxql.Measurement, opt query.IteratorOptions) (query.IteratorCost, error) {
@ -3994,7 +3995,7 @@ func benchmarkSelect(b *testing.B, stmt *influxql.SelectStatement, shardMapper q
b.ReportAllocs() b.ReportAllocs()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
itrs, _, err := query.Select(stmt, shardMapper, query.SelectOptions{}) itrs, _, err := query.Select(context.Background(), stmt, shardMapper, query.SelectOptions{})
if err != nil { if err != nil {
b.Fatal(err) b.Fatal(err)
} }
@ -4010,7 +4011,7 @@ func NewRawBenchmarkIteratorCreator(pointN int) query.ShardMapper {
Fields: map[string]influxql.DataType{ Fields: map[string]influxql.DataType{
"fval": influxql.Float, "fval": influxql.Float,
}, },
CreateIteratorFn: func(m *influxql.Measurement, opt query.IteratorOptions) (query.Iterator, error) { CreateIteratorFn: func(ctx context.Context, m *influxql.Measurement, opt query.IteratorOptions) (query.Iterator, error) {
if opt.Expr != nil { if opt.Expr != nil {
panic("unexpected expression") panic("unexpected expression")
} }
@ -4049,7 +4050,7 @@ func benchmarkSelectDedupe(b *testing.B, seriesN, pointsPerSeries int) {
Fields: map[string]influxql.DataType{ Fields: map[string]influxql.DataType{
"sval": influxql.String, "sval": influxql.String,
}, },
CreateIteratorFn: func(m *influxql.Measurement, opt query.IteratorOptions) (query.Iterator, error) { CreateIteratorFn: func(ctx context.Context, m *influxql.Measurement, opt query.IteratorOptions) (query.Iterator, error) {
if opt.Expr != nil { if opt.Expr != nil {
panic("unexpected expression") panic("unexpected expression")
} }
@ -4083,7 +4084,7 @@ func benchmarkSelectTop(b *testing.B, seriesN, pointsPerSeries int) {
Fields: map[string]influxql.DataType{ Fields: map[string]influxql.DataType{
"sval": influxql.Float, "sval": influxql.Float,
}, },
CreateIteratorFn: func(m *influxql.Measurement, opt query.IteratorOptions) (query.Iterator, error) { CreateIteratorFn: func(ctx context.Context, m *influxql.Measurement, opt query.IteratorOptions) (query.Iterator, error) {
if m.Name != "cpu" { if m.Name != "cpu" {
b.Fatalf("unexpected source: %s", m.Name) b.Fatalf("unexpected source: %s", m.Name)
} }

View File

@ -1,6 +1,10 @@
package query package query
import "github.com/influxdata/influxdb/influxql" import (
"context"
"github.com/influxdata/influxdb/influxql"
)
type subqueryBuilder struct { type subqueryBuilder struct {
ic IteratorCreator ic IteratorCreator
@ -8,7 +12,7 @@ type subqueryBuilder struct {
} }
// buildAuxIterator constructs an auxiliary Iterator from a subquery. // buildAuxIterator constructs an auxiliary Iterator from a subquery.
func (b *subqueryBuilder) buildAuxIterator(opt IteratorOptions) (Iterator, error) { func (b *subqueryBuilder) buildAuxIterator(ctx context.Context, opt IteratorOptions) (Iterator, error) {
// Retrieve a list of fields needed for conditions. // Retrieve a list of fields needed for conditions.
auxFields := opt.Aux auxFields := opt.Aux
conds := influxql.ExprNames(opt.Condition) conds := influxql.ExprNames(opt.Condition)
@ -26,7 +30,7 @@ func (b *subqueryBuilder) buildAuxIterator(opt IteratorOptions) (Iterator, error
} }
subOpt.Aux = auxFields subOpt.Aux = auxFields
itrs, err := buildIterators(b.stmt, b.ic, subOpt) itrs, err := buildIterators(ctx, b.stmt, b.ic, subOpt)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -85,7 +89,7 @@ func (b *subqueryBuilder) mapAuxField(name *influxql.VarRef) IteratorMap {
return nil return nil
} }
func (b *subqueryBuilder) buildVarRefIterator(expr *influxql.VarRef, opt IteratorOptions) (Iterator, error) { func (b *subqueryBuilder) buildVarRefIterator(ctx context.Context, expr *influxql.VarRef, opt IteratorOptions) (Iterator, error) {
// Look for the field or tag that is driving this query. // Look for the field or tag that is driving this query.
driver := b.mapAuxField(expr) driver := b.mapAuxField(expr)
if driver == nil { if driver == nil {
@ -116,7 +120,7 @@ func (b *subqueryBuilder) buildVarRefIterator(expr *influxql.VarRef, opt Iterato
} }
subOpt.Aux = auxFields subOpt.Aux = auxFields
itrs, err := buildIterators(b.stmt, b.ic, subOpt) itrs, err := buildIterators(ctx, b.stmt, b.ic, subOpt)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -1,6 +1,7 @@
package tsdb package tsdb
import ( import (
"context"
"errors" "errors"
"fmt" "fmt"
"io" "io"
@ -43,7 +44,7 @@ type Engine interface {
Restore(r io.Reader, basePath string) error Restore(r io.Reader, basePath string) error
Import(r io.Reader, basePath string) error Import(r io.Reader, basePath string) error
CreateIterator(measurement string, opt query.IteratorOptions) (query.Iterator, error) CreateIterator(ctx context.Context, measurement string, opt query.IteratorOptions) (query.Iterator, error)
IteratorCost(measurement string, opt query.IteratorOptions) (query.IteratorCost, error) IteratorCost(measurement string, opt query.IteratorOptions) (query.IteratorCost, error)
WritePoints(points []models.Point) error WritePoints(points []models.Point) error

View File

@ -4,6 +4,7 @@ package tsm1 // import "github.com/influxdata/influxdb/tsdb/engine/tsm1"
import ( import (
"archive/tar" "archive/tar"
"bytes" "bytes"
"context"
"fmt" "fmt"
"io" "io"
"io/ioutil" "io/ioutil"
@ -12,6 +13,7 @@ import (
"path/filepath" "path/filepath"
"regexp" "regexp"
"runtime" "runtime"
"strconv"
"strings" "strings"
"sync" "sync"
"sync/atomic" "sync/atomic"
@ -22,6 +24,8 @@ import (
"github.com/influxdata/influxdb/pkg/bytesutil" "github.com/influxdata/influxdb/pkg/bytesutil"
"github.com/influxdata/influxdb/pkg/estimator" "github.com/influxdata/influxdb/pkg/estimator"
"github.com/influxdata/influxdb/pkg/limiter" "github.com/influxdata/influxdb/pkg/limiter"
"github.com/influxdata/influxdb/pkg/metrics"
"github.com/influxdata/influxdb/pkg/tracing"
"github.com/influxdata/influxdb/query" "github.com/influxdata/influxdb/query"
"github.com/influxdata/influxdb/tsdb" "github.com/influxdata/influxdb/tsdb"
_ "github.com/influxdata/influxdb/tsdb/index" _ "github.com/influxdata/influxdb/tsdb/index"
@ -47,6 +51,14 @@ var (
keyFieldSeparatorBytes = []byte(keyFieldSeparator) keyFieldSeparatorBytes = []byte(keyFieldSeparator)
) )
var (
tsmGroup = metrics.MustRegisterGroup("tsm1")
numberOfRefCursorsCounter = metrics.MustRegisterCounter("cursors_ref", metrics.WithGroup(tsmGroup))
numberOfAuxCursorsCounter = metrics.MustRegisterCounter("cursors_aux", metrics.WithGroup(tsmGroup))
numberOfCondCursorsCounter = metrics.MustRegisterCounter("cursors_cond", metrics.WithGroup(tsmGroup))
planningTimer = metrics.MustRegisterTimer("planning_time", metrics.WithGroup(tsmGroup))
)
const ( const (
// keyFieldSeparator separates the series key from the field name in the composite key // keyFieldSeparator separates the series key from the field name in the composite key
// that identifies a specific field in series // that identifies a specific field in series
@ -1670,12 +1682,29 @@ func (e *Engine) cleanupTempTSMFiles() error {
} }
// KeyCursor returns a KeyCursor for the given key starting at time t. // KeyCursor returns a KeyCursor for the given key starting at time t.
func (e *Engine) KeyCursor(key []byte, t int64, ascending bool) *KeyCursor { func (e *Engine) KeyCursor(ctx context.Context, key []byte, t int64, ascending bool) *KeyCursor {
return e.FileStore.KeyCursor(key, t, ascending) return e.FileStore.KeyCursor(ctx, key, t, ascending)
} }
// CreateIterator returns an iterator for the measurement based on opt. // CreateIterator returns an iterator for the measurement based on opt.
func (e *Engine) CreateIterator(measurement string, opt query.IteratorOptions) (query.Iterator, error) { func (e *Engine) CreateIterator(ctx context.Context, measurement string, opt query.IteratorOptions) (query.Iterator, error) {
if span := tracing.SpanFromContext(ctx); span != nil {
labels := []string{"shard_id", strconv.Itoa(int(e.id)), "measurement", measurement}
if opt.Condition != nil {
labels = append(labels, "cond", opt.Condition.String())
}
span = span.StartSpan("create_iterator")
span.SetLabels(labels...)
ctx = tracing.NewContextWithSpan(ctx, span)
group := metrics.NewGroup(tsmGroup)
ctx = metrics.NewContextWithGroup(ctx, group)
start := time.Now()
defer group.GetTimer(planningTimer).UpdateSince(start)
}
if call, ok := opt.Expr.(*influxql.Call); ok { if call, ok := opt.Expr.(*influxql.Call); ok {
if opt.Interval.IsZero() { if opt.Interval.IsZero() {
if call.Name == "first" || call.Name == "last" { if call.Name == "first" || call.Name == "last" {
@ -1685,31 +1714,31 @@ func (e *Engine) CreateIterator(measurement string, opt query.IteratorOptions) (
refOpt.Ordered = true refOpt.Ordered = true
refOpt.Expr = call.Args[0] refOpt.Expr = call.Args[0]
itrs, err := e.createVarRefIterator(measurement, refOpt) itrs, err := e.createVarRefIterator(ctx, measurement, refOpt)
if err != nil { if err != nil {
return nil, err return nil, err
} }
return newMergeFinalizerIterator(itrs, opt, e.logger) return newMergeFinalizerIterator(ctx, itrs, opt, e.logger)
} }
} }
inputs, err := e.createCallIterator(measurement, call, opt) inputs, err := e.createCallIterator(ctx, measurement, call, opt)
if err != nil { if err != nil {
return nil, err return nil, err
} else if len(inputs) == 0 { } else if len(inputs) == 0 {
return nil, nil return nil, nil
} }
return newMergeFinalizerIterator(inputs, opt, e.logger) return newMergeFinalizerIterator(ctx, inputs, opt, e.logger)
} }
itrs, err := e.createVarRefIterator(measurement, opt) itrs, err := e.createVarRefIterator(ctx, measurement, opt)
if err != nil { if err != nil {
return nil, err return nil, err
} }
return newMergeFinalizerIterator(itrs, opt, e.logger) return newMergeFinalizerIterator(ctx, itrs, opt, e.logger)
} }
func (e *Engine) createCallIterator(measurement string, call *influxql.Call, opt query.IteratorOptions) ([]query.Iterator, error) { func (e *Engine) createCallIterator(ctx context.Context, measurement string, call *influxql.Call, opt query.IteratorOptions) ([]query.Iterator, error) {
ref, _ := call.Args[0].(*influxql.VarRef) ref, _ := call.Args[0].(*influxql.VarRef)
if exists, err := e.index.MeasurementExists([]byte(measurement)); err != nil { if exists, err := e.index.MeasurementExists([]byte(measurement)); err != nil {
@ -1745,7 +1774,7 @@ func (e *Engine) createCallIterator(measurement string, call *influxql.Call, opt
default: default:
} }
inputs, err := e.createTagSetIterators(ref, measurement, t, opt) inputs, err := e.createTagSetIterators(ctx, ref, measurement, t, opt)
if err != nil { if err != nil {
return err return err
} else if len(inputs) == 0 { } else if len(inputs) == 0 {
@ -1779,7 +1808,7 @@ func (e *Engine) createCallIterator(measurement string, call *influxql.Call, opt
} }
// createVarRefIterator creates an iterator for a variable reference. // createVarRefIterator creates an iterator for a variable reference.
func (e *Engine) createVarRefIterator(measurement string, opt query.IteratorOptions) ([]query.Iterator, error) { func (e *Engine) createVarRefIterator(ctx context.Context, measurement string, opt query.IteratorOptions) ([]query.Iterator, error) {
ref, _ := opt.Expr.(*influxql.VarRef) ref, _ := opt.Expr.(*influxql.VarRef)
if exists, err := e.index.MeasurementExists([]byte(measurement)); err != nil { if exists, err := e.index.MeasurementExists([]byte(measurement)); err != nil {
@ -1807,7 +1836,7 @@ func (e *Engine) createVarRefIterator(measurement string, opt query.IteratorOpti
itrs := make([]query.Iterator, 0, len(tagSets)) itrs := make([]query.Iterator, 0, len(tagSets))
if err := func() error { if err := func() error {
for _, t := range tagSets { for _, t := range tagSets {
inputs, err := e.createTagSetIterators(ref, measurement, t, opt) inputs, err := e.createTagSetIterators(ctx, ref, measurement, t, opt)
if err != nil { if err != nil {
return err return err
} else if len(inputs) == 0 { } else if len(inputs) == 0 {
@ -1857,7 +1886,7 @@ func (e *Engine) createVarRefIterator(measurement string, opt query.IteratorOpti
} }
// createTagSetIterators creates a set of iterators for a tagset. // createTagSetIterators creates a set of iterators for a tagset.
func (e *Engine) createTagSetIterators(ref *influxql.VarRef, name string, t *query.TagSet, opt query.IteratorOptions) ([]query.Iterator, error) { func (e *Engine) createTagSetIterators(ctx context.Context, ref *influxql.VarRef, name string, t *query.TagSet, opt query.IteratorOptions) ([]query.Iterator, error) {
// Set parallelism by number of logical cpus. // Set parallelism by number of logical cpus.
parallelism := runtime.GOMAXPROCS(0) parallelism := runtime.GOMAXPROCS(0)
if parallelism > len(t.SeriesKeys) { if parallelism > len(t.SeriesKeys) {
@ -1892,7 +1921,7 @@ func (e *Engine) createTagSetIterators(ref *influxql.VarRef, name string, t *que
wg.Add(1) wg.Add(1)
go func(i int) { go func(i int) {
defer wg.Done() defer wg.Done()
groups[i].itrs, groups[i].err = e.createTagSetGroupIterators(ref, name, groups[i].keys, t, groups[i].filters, opt) groups[i].itrs, groups[i].err = e.createTagSetGroupIterators(ctx, ref, name, groups[i].keys, t, groups[i].filters, opt)
}(i) }(i)
} }
wg.Wait() wg.Wait()
@ -1923,7 +1952,7 @@ func (e *Engine) createTagSetIterators(ref *influxql.VarRef, name string, t *que
} }
// createTagSetGroupIterators creates a set of iterators for a subset of a tagset's series. // createTagSetGroupIterators creates a set of iterators for a subset of a tagset's series.
func (e *Engine) createTagSetGroupIterators(ref *influxql.VarRef, name string, seriesKeys []string, t *query.TagSet, filters []influxql.Expr, opt query.IteratorOptions) ([]query.Iterator, error) { func (e *Engine) createTagSetGroupIterators(ctx context.Context, ref *influxql.VarRef, name string, seriesKeys []string, t *query.TagSet, filters []influxql.Expr, opt query.IteratorOptions) ([]query.Iterator, error) {
itrs := make([]query.Iterator, 0, len(seriesKeys)) itrs := make([]query.Iterator, 0, len(seriesKeys))
for i, seriesKey := range seriesKeys { for i, seriesKey := range seriesKeys {
var conditionFields []influxql.VarRef var conditionFields []influxql.VarRef
@ -1932,7 +1961,7 @@ func (e *Engine) createTagSetGroupIterators(ref *influxql.VarRef, name string, s
conditionFields = influxql.ExprNames(filters[i]) conditionFields = influxql.ExprNames(filters[i])
} }
itr, err := e.createVarRefSeriesIterator(ref, name, seriesKey, t, filters[i], conditionFields, opt) itr, err := e.createVarRefSeriesIterator(ctx, ref, name, seriesKey, t, filters[i], conditionFields, opt)
if err != nil { if err != nil {
return itrs, err return itrs, err
} else if itr == nil { } else if itr == nil {
@ -1959,7 +1988,7 @@ func (e *Engine) createTagSetGroupIterators(ref *influxql.VarRef, name string, s
} }
// createVarRefSeriesIterator creates an iterator for a variable reference for a series. // createVarRefSeriesIterator creates an iterator for a variable reference for a series.
func (e *Engine) createVarRefSeriesIterator(ref *influxql.VarRef, name string, seriesKey string, t *query.TagSet, filter influxql.Expr, conditionFields []influxql.VarRef, opt query.IteratorOptions) (query.Iterator, error) { func (e *Engine) createVarRefSeriesIterator(ctx context.Context, ref *influxql.VarRef, name string, seriesKey string, t *query.TagSet, filter influxql.Expr, conditionFields []influxql.VarRef, opt query.IteratorOptions) (query.Iterator, error) {
_, tfs := models.ParseKey([]byte(seriesKey)) _, tfs := models.ParseKey([]byte(seriesKey))
tags := query.NewTags(tfs.Map()) tags := query.NewTags(tfs.Map())
@ -1967,6 +1996,26 @@ func (e *Engine) createVarRefSeriesIterator(ref *influxql.VarRef, name string, s
itrOpt := opt itrOpt := opt
itrOpt.Condition = filter itrOpt.Condition = filter
var curCounter, auxCounter, condCounter *metrics.Counter
if col := metrics.GroupFromContext(ctx); col != nil {
curCounter = col.GetCounter(numberOfRefCursorsCounter)
auxCounter = col.GetCounter(numberOfAuxCursorsCounter)
condCounter = col.GetCounter(numberOfCondCursorsCounter)
}
// Build main cursor.
var cur cursor
if ref != nil {
cur = e.buildCursor(ctx, name, seriesKey, tfs, ref, opt)
// If the field doesn't exist then don't build an iterator.
if cur == nil {
return nil, nil
}
if curCounter != nil {
curCounter.Add(1)
}
}
// Build auxilary cursors. // Build auxilary cursors.
// Tag values should be returned if the field doesn't exist. // Tag values should be returned if the field doesn't exist.
var aux []cursorAt var aux []cursorAt
@ -1975,8 +2024,11 @@ func (e *Engine) createVarRefSeriesIterator(ref *influxql.VarRef, name string, s
for i, ref := range opt.Aux { for i, ref := range opt.Aux {
// Create cursor from field if a tag wasn't requested. // Create cursor from field if a tag wasn't requested.
if ref.Type != influxql.Tag { if ref.Type != influxql.Tag {
cur := e.buildCursor(name, seriesKey, tfs, &ref, opt) cur := e.buildCursor(ctx, name, seriesKey, tfs, &ref, opt)
if cur != nil { if cur != nil {
if auxCounter != nil {
auxCounter.Add(1)
}
aux[i] = newBufCursor(cur, opt.Ascending) aux[i] = newBufCursor(cur, opt.Ascending)
continue continue
} }
@ -2039,8 +2091,11 @@ func (e *Engine) createVarRefSeriesIterator(ref *influxql.VarRef, name string, s
for i, ref := range conditionFields { for i, ref := range conditionFields {
// Create cursor from field if a tag wasn't requested. // Create cursor from field if a tag wasn't requested.
if ref.Type != influxql.Tag { if ref.Type != influxql.Tag {
cur := e.buildCursor(name, seriesKey, tfs, &ref, opt) cur := e.buildCursor(ctx, name, seriesKey, tfs, &ref, opt)
if cur != nil { if cur != nil {
if condCounter != nil {
condCounter.Add(1)
}
conds[i] = newBufCursor(cur, opt.Ascending) conds[i] = newBufCursor(cur, opt.Ascending)
continue continue
} }
@ -2088,16 +2143,6 @@ func (e *Engine) createVarRefSeriesIterator(ref *influxql.VarRef, name string, s
return newFloatIterator(name, tags, itrOpt, nil, aux, conds, condNames), nil return newFloatIterator(name, tags, itrOpt, nil, aux, conds, condNames), nil
} }
// Build main cursor.
cur := e.buildCursor(name, seriesKey, tfs, ref, opt)
// If the field doesn't exist then don't build an iterator.
if cur == nil {
cursorsAt(aux).close()
cursorsAt(conds).close()
return nil, nil
}
// Remove name if requested. // Remove name if requested.
if opt.StripName { if opt.StripName {
name = "" name = ""
@ -2120,7 +2165,7 @@ func (e *Engine) createVarRefSeriesIterator(ref *influxql.VarRef, name string, s
} }
// buildCursor creates an untyped cursor for a field. // buildCursor creates an untyped cursor for a field.
func (e *Engine) buildCursor(measurement, seriesKey string, tags models.Tags, ref *influxql.VarRef, opt query.IteratorOptions) cursor { func (e *Engine) buildCursor(ctx context.Context, measurement, seriesKey string, tags models.Tags, ref *influxql.VarRef, opt query.IteratorOptions) cursor {
// Check if this is a system field cursor. // Check if this is a system field cursor.
switch ref.Val { switch ref.Val {
case "_name": case "_name":
@ -2157,28 +2202,28 @@ func (e *Engine) buildCursor(measurement, seriesKey string, tags models.Tags, re
case influxql.Float: case influxql.Float:
switch f.Type { switch f.Type {
case influxql.Integer: case influxql.Integer:
cur := e.buildIntegerCursor(measurement, seriesKey, ref.Val, opt) cur := e.buildIntegerCursor(ctx, measurement, seriesKey, ref.Val, opt)
return &floatCastIntegerCursor{cursor: cur} return &floatCastIntegerCursor{cursor: cur}
case influxql.Unsigned: case influxql.Unsigned:
cur := e.buildUnsignedCursor(measurement, seriesKey, ref.Val, opt) cur := e.buildUnsignedCursor(ctx, measurement, seriesKey, ref.Val, opt)
return &floatCastUnsignedCursor{cursor: cur} return &floatCastUnsignedCursor{cursor: cur}
} }
case influxql.Integer: case influxql.Integer:
switch f.Type { switch f.Type {
case influxql.Float: case influxql.Float:
cur := e.buildFloatCursor(measurement, seriesKey, ref.Val, opt) cur := e.buildFloatCursor(ctx, measurement, seriesKey, ref.Val, opt)
return &integerCastFloatCursor{cursor: cur} return &integerCastFloatCursor{cursor: cur}
case influxql.Unsigned: case influxql.Unsigned:
cur := e.buildUnsignedCursor(measurement, seriesKey, ref.Val, opt) cur := e.buildUnsignedCursor(ctx, measurement, seriesKey, ref.Val, opt)
return &integerCastUnsignedCursor{cursor: cur} return &integerCastUnsignedCursor{cursor: cur}
} }
case influxql.Unsigned: case influxql.Unsigned:
switch f.Type { switch f.Type {
case influxql.Float: case influxql.Float:
cur := e.buildFloatCursor(measurement, seriesKey, ref.Val, opt) cur := e.buildFloatCursor(ctx, measurement, seriesKey, ref.Val, opt)
return &unsignedCastFloatCursor{cursor: cur} return &unsignedCastFloatCursor{cursor: cur}
case influxql.Integer: case influxql.Integer:
cur := e.buildIntegerCursor(measurement, seriesKey, ref.Val, opt) cur := e.buildIntegerCursor(ctx, measurement, seriesKey, ref.Val, opt)
return &unsignedCastIntegerCursor{cursor: cur} return &unsignedCastIntegerCursor{cursor: cur}
} }
} }
@ -2188,15 +2233,15 @@ func (e *Engine) buildCursor(measurement, seriesKey string, tags models.Tags, re
// Return appropriate cursor based on type. // Return appropriate cursor based on type.
switch f.Type { switch f.Type {
case influxql.Float: case influxql.Float:
return e.buildFloatCursor(measurement, seriesKey, ref.Val, opt) return e.buildFloatCursor(ctx, measurement, seriesKey, ref.Val, opt)
case influxql.Integer: case influxql.Integer:
return e.buildIntegerCursor(measurement, seriesKey, ref.Val, opt) return e.buildIntegerCursor(ctx, measurement, seriesKey, ref.Val, opt)
case influxql.Unsigned: case influxql.Unsigned:
return e.buildUnsignedCursor(measurement, seriesKey, ref.Val, opt) return e.buildUnsignedCursor(ctx, measurement, seriesKey, ref.Val, opt)
case influxql.String: case influxql.String:
return e.buildStringCursor(measurement, seriesKey, ref.Val, opt) return e.buildStringCursor(ctx, measurement, seriesKey, ref.Val, opt)
case influxql.Boolean: case influxql.Boolean:
return e.buildBooleanCursor(measurement, seriesKey, ref.Val, opt) return e.buildBooleanCursor(ctx, measurement, seriesKey, ref.Val, opt)
default: default:
panic("unreachable") panic("unreachable")
} }
@ -2225,42 +2270,42 @@ func matchTagValues(tags models.Tags, condition influxql.Expr) []string {
} }
// buildFloatCursor creates a cursor for a float field. // buildFloatCursor creates a cursor for a float field.
func (e *Engine) buildFloatCursor(measurement, seriesKey, field string, opt query.IteratorOptions) floatCursor { func (e *Engine) buildFloatCursor(ctx context.Context, measurement, seriesKey, field string, opt query.IteratorOptions) floatCursor {
key := SeriesFieldKeyBytes(seriesKey, field) key := SeriesFieldKeyBytes(seriesKey, field)
cacheValues := e.Cache.Values(key) cacheValues := e.Cache.Values(key)
keyCursor := e.KeyCursor(key, opt.SeekTime(), opt.Ascending) keyCursor := e.KeyCursor(ctx, key, opt.SeekTime(), opt.Ascending)
return newFloatCursor(opt.SeekTime(), opt.Ascending, cacheValues, keyCursor) return newFloatCursor(opt.SeekTime(), opt.Ascending, cacheValues, keyCursor)
} }
// buildIntegerCursor creates a cursor for an integer field. // buildIntegerCursor creates a cursor for an integer field.
func (e *Engine) buildIntegerCursor(measurement, seriesKey, field string, opt query.IteratorOptions) integerCursor { func (e *Engine) buildIntegerCursor(ctx context.Context, measurement, seriesKey, field string, opt query.IteratorOptions) integerCursor {
key := SeriesFieldKeyBytes(seriesKey, field) key := SeriesFieldKeyBytes(seriesKey, field)
cacheValues := e.Cache.Values(key) cacheValues := e.Cache.Values(key)
keyCursor := e.KeyCursor(key, opt.SeekTime(), opt.Ascending) keyCursor := e.KeyCursor(ctx, key, opt.SeekTime(), opt.Ascending)
return newIntegerCursor(opt.SeekTime(), opt.Ascending, cacheValues, keyCursor) return newIntegerCursor(opt.SeekTime(), opt.Ascending, cacheValues, keyCursor)
} }
// buildUnsignedCursor creates a cursor for an unsigned field. // buildUnsignedCursor creates a cursor for an unsigned field.
func (e *Engine) buildUnsignedCursor(measurement, seriesKey, field string, opt query.IteratorOptions) unsignedCursor { func (e *Engine) buildUnsignedCursor(ctx context.Context, measurement, seriesKey, field string, opt query.IteratorOptions) unsignedCursor {
key := SeriesFieldKeyBytes(seriesKey, field) key := SeriesFieldKeyBytes(seriesKey, field)
cacheValues := e.Cache.Values(key) cacheValues := e.Cache.Values(key)
keyCursor := e.KeyCursor(key, opt.SeekTime(), opt.Ascending) keyCursor := e.KeyCursor(ctx, key, opt.SeekTime(), opt.Ascending)
return newUnsignedCursor(opt.SeekTime(), opt.Ascending, cacheValues, keyCursor) return newUnsignedCursor(opt.SeekTime(), opt.Ascending, cacheValues, keyCursor)
} }
// buildStringCursor creates a cursor for a string field. // buildStringCursor creates a cursor for a string field.
func (e *Engine) buildStringCursor(measurement, seriesKey, field string, opt query.IteratorOptions) stringCursor { func (e *Engine) buildStringCursor(ctx context.Context, measurement, seriesKey, field string, opt query.IteratorOptions) stringCursor {
key := SeriesFieldKeyBytes(seriesKey, field) key := SeriesFieldKeyBytes(seriesKey, field)
cacheValues := e.Cache.Values(key) cacheValues := e.Cache.Values(key)
keyCursor := e.KeyCursor(key, opt.SeekTime(), opt.Ascending) keyCursor := e.KeyCursor(ctx, key, opt.SeekTime(), opt.Ascending)
return newStringCursor(opt.SeekTime(), opt.Ascending, cacheValues, keyCursor) return newStringCursor(opt.SeekTime(), opt.Ascending, cacheValues, keyCursor)
} }
// buildBooleanCursor creates a cursor for a boolean field. // buildBooleanCursor creates a cursor for a boolean field.
func (e *Engine) buildBooleanCursor(measurement, seriesKey, field string, opt query.IteratorOptions) booleanCursor { func (e *Engine) buildBooleanCursor(ctx context.Context, measurement, seriesKey, field string, opt query.IteratorOptions) booleanCursor {
key := SeriesFieldKeyBytes(seriesKey, field) key := SeriesFieldKeyBytes(seriesKey, field)
cacheValues := e.Cache.Values(key) cacheValues := e.Cache.Values(key)
keyCursor := e.KeyCursor(key, opt.SeekTime(), opt.Ascending) keyCursor := e.KeyCursor(ctx, key, opt.SeekTime(), opt.Ascending)
return newBooleanCursor(opt.SeekTime(), opt.Ascending, cacheValues, keyCursor) return newBooleanCursor(opt.SeekTime(), opt.Ascending, cacheValues, keyCursor)
} }

View File

@ -3,6 +3,7 @@ package tsm1_test
import ( import (
"archive/tar" "archive/tar"
"bytes" "bytes"
"context"
"fmt" "fmt"
"io" "io"
"io/ioutil" "io/ioutil"
@ -269,7 +270,7 @@ func TestEngine_CreateIterator_Cache_Ascending(t *testing.T) {
t.Fatalf("failed to write points: %s", err.Error()) t.Fatalf("failed to write points: %s", err.Error())
} }
itr, err := e.CreateIterator("cpu", query.IteratorOptions{ itr, err := e.CreateIterator(context.Background(), "cpu", query.IteratorOptions{
Expr: influxql.MustParseExpr(`value`), Expr: influxql.MustParseExpr(`value`),
Dimensions: []string{"host"}, Dimensions: []string{"host"},
StartTime: influxql.MinTime, StartTime: influxql.MinTime,
@ -321,7 +322,7 @@ func TestEngine_CreateIterator_Cache_Descending(t *testing.T) {
t.Fatalf("failed to write points: %s", err.Error()) t.Fatalf("failed to write points: %s", err.Error())
} }
itr, err := e.CreateIterator("cpu", query.IteratorOptions{ itr, err := e.CreateIterator(context.Background(), "cpu", query.IteratorOptions{
Expr: influxql.MustParseExpr(`value`), Expr: influxql.MustParseExpr(`value`),
Dimensions: []string{"host"}, Dimensions: []string{"host"},
StartTime: influxql.MinTime, StartTime: influxql.MinTime,
@ -374,7 +375,7 @@ func TestEngine_CreateIterator_TSM_Ascending(t *testing.T) {
} }
e.MustWriteSnapshot() e.MustWriteSnapshot()
itr, err := e.CreateIterator("cpu", query.IteratorOptions{ itr, err := e.CreateIterator(context.Background(), "cpu", query.IteratorOptions{
Expr: influxql.MustParseExpr(`value`), Expr: influxql.MustParseExpr(`value`),
Dimensions: []string{"host"}, Dimensions: []string{"host"},
StartTime: influxql.MinTime, StartTime: influxql.MinTime,
@ -427,7 +428,7 @@ func TestEngine_CreateIterator_TSM_Descending(t *testing.T) {
} }
e.MustWriteSnapshot() e.MustWriteSnapshot()
itr, err := e.CreateIterator("cpu", query.IteratorOptions{ itr, err := e.CreateIterator(context.Background(), "cpu", query.IteratorOptions{
Expr: influxql.MustParseExpr(`value`), Expr: influxql.MustParseExpr(`value`),
Dimensions: []string{"host"}, Dimensions: []string{"host"},
StartTime: influxql.MinTime, StartTime: influxql.MinTime,
@ -482,7 +483,7 @@ func TestEngine_CreateIterator_Aux(t *testing.T) {
t.Fatalf("failed to write points: %s", err.Error()) t.Fatalf("failed to write points: %s", err.Error())
} }
itr, err := e.CreateIterator("cpu", query.IteratorOptions{ itr, err := e.CreateIterator(context.Background(), "cpu", query.IteratorOptions{
Expr: influxql.MustParseExpr(`value`), Expr: influxql.MustParseExpr(`value`),
Aux: []influxql.VarRef{{Val: "F"}}, Aux: []influxql.VarRef{{Val: "F"}},
Dimensions: []string{"host"}, Dimensions: []string{"host"},
@ -545,7 +546,7 @@ func TestEngine_CreateIterator_Condition(t *testing.T) {
t.Fatalf("failed to write points: %s", err.Error()) t.Fatalf("failed to write points: %s", err.Error())
} }
itr, err := e.CreateIterator("cpu", query.IteratorOptions{ itr, err := e.CreateIterator(context.Background(), "cpu", query.IteratorOptions{
Expr: influxql.MustParseExpr(`value`), Expr: influxql.MustParseExpr(`value`),
Dimensions: []string{"host"}, Dimensions: []string{"host"},
Condition: influxql.MustParseExpr(`X = 10 OR Y > 150`), Condition: influxql.MustParseExpr(`X = 10 OR Y > 150`),
@ -874,7 +875,7 @@ func benchmarkIterator(b *testing.B, opt query.IteratorOptions, pointN int) {
b.ReportAllocs() b.ReportAllocs()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
itr, err := e.CreateIterator("cpu", opt) itr, err := e.CreateIterator(context.Background(), "cpu", opt)
if err != nil { if err != nil {
b.Fatal(err) b.Fatal(err)
} }

View File

@ -20,6 +20,10 @@ func (c *KeyCursor) ReadFloatBlock(buf *[]FloatValue) ([]FloatValue, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
if c.col != nil {
c.col.GetCounter(floatBlocksDecodedCounter).Add(1)
c.col.GetCounter(floatBlocksSizeCounter).Add(int64(first.entry.Size))
}
// Remove values we already read // Remove values we already read
values = FloatValues(values).Exclude(first.readMin, first.readMax) values = FloatValues(values).Exclude(first.readMin, first.readMax)
@ -88,6 +92,11 @@ func (c *KeyCursor) ReadFloatBlock(buf *[]FloatValue) ([]FloatValue, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
if c.col != nil {
c.col.GetCounter(floatBlocksDecodedCounter).Add(1)
c.col.GetCounter(floatBlocksSizeCounter).Add(int64(cur.entry.Size))
}
// Remove any tombstoned values // Remove any tombstoned values
v = c.filterFloatValues(tombstones, v) v = c.filterFloatValues(tombstones, v)
@ -147,6 +156,11 @@ func (c *KeyCursor) ReadFloatBlock(buf *[]FloatValue) ([]FloatValue, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
if c.col != nil {
c.col.GetCounter(floatBlocksDecodedCounter).Add(1)
c.col.GetCounter(floatBlocksSizeCounter).Add(int64(cur.entry.Size))
}
// Remove any tombstoned values // Remove any tombstoned values
v = c.filterFloatValues(tombstones, v) v = c.filterFloatValues(tombstones, v)
@ -183,6 +197,10 @@ func (c *KeyCursor) ReadIntegerBlock(buf *[]IntegerValue) ([]IntegerValue, error
if err != nil { if err != nil {
return nil, err return nil, err
} }
if c.col != nil {
c.col.GetCounter(integerBlocksDecodedCounter).Add(1)
c.col.GetCounter(integerBlocksSizeCounter).Add(int64(first.entry.Size))
}
// Remove values we already read // Remove values we already read
values = IntegerValues(values).Exclude(first.readMin, first.readMax) values = IntegerValues(values).Exclude(first.readMin, first.readMax)
@ -251,6 +269,11 @@ func (c *KeyCursor) ReadIntegerBlock(buf *[]IntegerValue) ([]IntegerValue, error
if err != nil { if err != nil {
return nil, err return nil, err
} }
if c.col != nil {
c.col.GetCounter(integerBlocksDecodedCounter).Add(1)
c.col.GetCounter(integerBlocksSizeCounter).Add(int64(cur.entry.Size))
}
// Remove any tombstoned values // Remove any tombstoned values
v = c.filterIntegerValues(tombstones, v) v = c.filterIntegerValues(tombstones, v)
@ -310,6 +333,11 @@ func (c *KeyCursor) ReadIntegerBlock(buf *[]IntegerValue) ([]IntegerValue, error
if err != nil { if err != nil {
return nil, err return nil, err
} }
if c.col != nil {
c.col.GetCounter(integerBlocksDecodedCounter).Add(1)
c.col.GetCounter(integerBlocksSizeCounter).Add(int64(cur.entry.Size))
}
// Remove any tombstoned values // Remove any tombstoned values
v = c.filterIntegerValues(tombstones, v) v = c.filterIntegerValues(tombstones, v)
@ -346,6 +374,10 @@ func (c *KeyCursor) ReadUnsignedBlock(buf *[]UnsignedValue) ([]UnsignedValue, er
if err != nil { if err != nil {
return nil, err return nil, err
} }
if c.col != nil {
c.col.GetCounter(unsignedBlocksDecodedCounter).Add(1)
c.col.GetCounter(unsignedBlocksSizeCounter).Add(int64(first.entry.Size))
}
// Remove values we already read // Remove values we already read
values = UnsignedValues(values).Exclude(first.readMin, first.readMax) values = UnsignedValues(values).Exclude(first.readMin, first.readMax)
@ -414,6 +446,11 @@ func (c *KeyCursor) ReadUnsignedBlock(buf *[]UnsignedValue) ([]UnsignedValue, er
if err != nil { if err != nil {
return nil, err return nil, err
} }
if c.col != nil {
c.col.GetCounter(unsignedBlocksDecodedCounter).Add(1)
c.col.GetCounter(unsignedBlocksSizeCounter).Add(int64(cur.entry.Size))
}
// Remove any tombstoned values // Remove any tombstoned values
v = c.filterUnsignedValues(tombstones, v) v = c.filterUnsignedValues(tombstones, v)
@ -473,6 +510,11 @@ func (c *KeyCursor) ReadUnsignedBlock(buf *[]UnsignedValue) ([]UnsignedValue, er
if err != nil { if err != nil {
return nil, err return nil, err
} }
if c.col != nil {
c.col.GetCounter(unsignedBlocksDecodedCounter).Add(1)
c.col.GetCounter(unsignedBlocksSizeCounter).Add(int64(cur.entry.Size))
}
// Remove any tombstoned values // Remove any tombstoned values
v = c.filterUnsignedValues(tombstones, v) v = c.filterUnsignedValues(tombstones, v)
@ -509,6 +551,10 @@ func (c *KeyCursor) ReadStringBlock(buf *[]StringValue) ([]StringValue, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
if c.col != nil {
c.col.GetCounter(stringBlocksDecodedCounter).Add(1)
c.col.GetCounter(stringBlocksSizeCounter).Add(int64(first.entry.Size))
}
// Remove values we already read // Remove values we already read
values = StringValues(values).Exclude(first.readMin, first.readMax) values = StringValues(values).Exclude(first.readMin, first.readMax)
@ -577,6 +623,11 @@ func (c *KeyCursor) ReadStringBlock(buf *[]StringValue) ([]StringValue, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
if c.col != nil {
c.col.GetCounter(stringBlocksDecodedCounter).Add(1)
c.col.GetCounter(stringBlocksSizeCounter).Add(int64(cur.entry.Size))
}
// Remove any tombstoned values // Remove any tombstoned values
v = c.filterStringValues(tombstones, v) v = c.filterStringValues(tombstones, v)
@ -636,6 +687,11 @@ func (c *KeyCursor) ReadStringBlock(buf *[]StringValue) ([]StringValue, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
if c.col != nil {
c.col.GetCounter(stringBlocksDecodedCounter).Add(1)
c.col.GetCounter(stringBlocksSizeCounter).Add(int64(cur.entry.Size))
}
// Remove any tombstoned values // Remove any tombstoned values
v = c.filterStringValues(tombstones, v) v = c.filterStringValues(tombstones, v)
@ -672,6 +728,10 @@ func (c *KeyCursor) ReadBooleanBlock(buf *[]BooleanValue) ([]BooleanValue, error
if err != nil { if err != nil {
return nil, err return nil, err
} }
if c.col != nil {
c.col.GetCounter(booleanBlocksDecodedCounter).Add(1)
c.col.GetCounter(booleanBlocksSizeCounter).Add(int64(first.entry.Size))
}
// Remove values we already read // Remove values we already read
values = BooleanValues(values).Exclude(first.readMin, first.readMax) values = BooleanValues(values).Exclude(first.readMin, first.readMax)
@ -740,6 +800,11 @@ func (c *KeyCursor) ReadBooleanBlock(buf *[]BooleanValue) ([]BooleanValue, error
if err != nil { if err != nil {
return nil, err return nil, err
} }
if c.col != nil {
c.col.GetCounter(booleanBlocksDecodedCounter).Add(1)
c.col.GetCounter(booleanBlocksSizeCounter).Add(int64(cur.entry.Size))
}
// Remove any tombstoned values // Remove any tombstoned values
v = c.filterBooleanValues(tombstones, v) v = c.filterBooleanValues(tombstones, v)
@ -799,6 +864,11 @@ func (c *KeyCursor) ReadBooleanBlock(buf *[]BooleanValue) ([]BooleanValue, error
if err != nil { if err != nil {
return nil, err return nil, err
} }
if c.col != nil {
c.col.GetCounter(booleanBlocksDecodedCounter).Add(1)
c.col.GetCounter(booleanBlocksSizeCounter).Add(int64(cur.entry.Size))
}
// Remove any tombstoned values // Remove any tombstoned values
v = c.filterBooleanValues(tombstones, v) v = c.filterBooleanValues(tombstones, v)

View File

@ -16,6 +16,10 @@ func (c *KeyCursor) Read{{.Name}}Block(buf *[]{{.Name}}Value) ([]{{.Name}}Value,
if err != nil { if err != nil {
return nil, err return nil, err
} }
if c.col != nil {
c.col.GetCounter({{.name}}BlocksDecodedCounter).Add(1)
c.col.GetCounter({{.name}}BlocksSizeCounter).Add(int64(first.entry.Size))
}
// Remove values we already read // Remove values we already read
values = {{.Name}}Values(values).Exclude(first.readMin, first.readMax) values = {{.Name}}Values(values).Exclude(first.readMin, first.readMax)
@ -84,6 +88,11 @@ func (c *KeyCursor) Read{{.Name}}Block(buf *[]{{.Name}}Value) ([]{{.Name}}Value,
if err != nil { if err != nil {
return nil, err return nil, err
} }
if c.col != nil {
c.col.GetCounter({{.name}}BlocksDecodedCounter).Add(1)
c.col.GetCounter({{.name}}BlocksSizeCounter).Add(int64(cur.entry.Size))
}
// Remove any tombstoned values // Remove any tombstoned values
v = c.filter{{.Name}}Values(tombstones, v) v = c.filter{{.Name}}Values(tombstones, v)
@ -143,6 +152,11 @@ func (c *KeyCursor) Read{{.Name}}Block(buf *[]{{.Name}}Value) ([]{{.Name}}Value,
if err != nil { if err != nil {
return nil, err return nil, err
} }
if c.col != nil {
c.col.GetCounter({{.name}}BlocksDecodedCounter).Add(1)
c.col.GetCounter({{.name}}BlocksSizeCounter).Add(int64(cur.entry.Size))
}
// Remove any tombstoned values // Remove any tombstoned values
v = c.filter{{.Name}}Values(tombstones, v) v = c.filter{{.Name}}Values(tombstones, v)

View File

@ -2,6 +2,7 @@ package tsm1
import ( import (
"bytes" "bytes"
"context"
"fmt" "fmt"
"io/ioutil" "io/ioutil"
"math" "math"
@ -15,6 +16,7 @@ import (
"time" "time"
"github.com/influxdata/influxdb/models" "github.com/influxdata/influxdb/models"
"github.com/influxdata/influxdb/pkg/metrics"
"github.com/influxdata/influxdb/query" "github.com/influxdata/influxdb/query"
"github.com/uber-go/zap" "github.com/uber-go/zap"
) )
@ -120,6 +122,19 @@ const (
statFileStoreCount = "numFiles" statFileStoreCount = "numFiles"
) )
var (
floatBlocksDecodedCounter = metrics.MustRegisterCounter("float_blocks_decoded", metrics.WithGroup(tsmGroup))
floatBlocksSizeCounter = metrics.MustRegisterCounter("float_blocks_size_bytes", metrics.WithGroup(tsmGroup))
integerBlocksDecodedCounter = metrics.MustRegisterCounter("integer_blocks_decoded", metrics.WithGroup(tsmGroup))
integerBlocksSizeCounter = metrics.MustRegisterCounter("integer_blocks_size_bytes", metrics.WithGroup(tsmGroup))
unsignedBlocksDecodedCounter = metrics.MustRegisterCounter("unsigned_blocks_decoded", metrics.WithGroup(tsmGroup))
unsignedBlocksSizeCounter = metrics.MustRegisterCounter("unsigned_blocks_size_bytes", metrics.WithGroup(tsmGroup))
stringBlocksDecodedCounter = metrics.MustRegisterCounter("string_blocks_decoded", metrics.WithGroup(tsmGroup))
stringBlocksSizeCounter = metrics.MustRegisterCounter("string_blocks_size_bytes", metrics.WithGroup(tsmGroup))
booleanBlocksDecodedCounter = metrics.MustRegisterCounter("boolean_blocks_decoded", metrics.WithGroup(tsmGroup))
booleanBlocksSizeCounter = metrics.MustRegisterCounter("boolean_blocks_size_bytes", metrics.WithGroup(tsmGroup))
)
// FileStore is an abstraction around multiple TSM files. // FileStore is an abstraction around multiple TSM files.
type FileStore struct { type FileStore struct {
mu sync.RWMutex mu sync.RWMutex
@ -509,10 +524,10 @@ func (f *FileStore) TSMReader(path string) *TSMReader {
} }
// KeyCursor returns a KeyCursor for key and t across the files in the FileStore. // KeyCursor returns a KeyCursor for key and t across the files in the FileStore.
func (f *FileStore) KeyCursor(key []byte, t int64, ascending bool) *KeyCursor { func (f *FileStore) KeyCursor(ctx context.Context, key []byte, t int64, ascending bool) *KeyCursor {
f.mu.RLock() f.mu.RLock()
defer f.mu.RUnlock() defer f.mu.RUnlock()
return newKeyCursor(f, key, t, ascending) return newKeyCursor(ctx, f, key, t, ascending)
} }
// Stats returns the stats of the underlying files, preferring the cached version if it is still valid. // Stats returns the stats of the underlying files, preferring the cached version if it is still valid.
@ -952,6 +967,9 @@ type KeyCursor struct {
current []*location current []*location
buf []Value buf []Value
ctx context.Context
col *metrics.Group
// pos is the index within seeks. Based on ascending, it will increment or // pos is the index within seeks. Based on ascending, it will increment or
// decrement through the size of seeks slice. // decrement through the size of seeks slice.
pos int pos int
@ -1011,10 +1029,12 @@ func (a ascLocations) Less(i, j int) bool {
// newKeyCursor returns a new instance of KeyCursor. // newKeyCursor returns a new instance of KeyCursor.
// This function assumes the read-lock has been taken. // This function assumes the read-lock has been taken.
func newKeyCursor(fs *FileStore, key []byte, t int64, ascending bool) *KeyCursor { func newKeyCursor(ctx context.Context, fs *FileStore, key []byte, t int64, ascending bool) *KeyCursor {
c := &KeyCursor{ c := &KeyCursor{
key: key, key: key,
seeks: fs.locations(key, t, ascending), seeks: fs.locations(key, t, ascending),
ctx: ctx,
col: metrics.GroupFromContext(ctx),
ascending: ascending, ascending: ascending,
} }

View File

@ -1,6 +1,7 @@
package tsm1_test package tsm1_test
import ( import (
"context"
"fmt" "fmt"
"io/ioutil" "io/ioutil"
"os" "os"
@ -71,7 +72,7 @@ func TestFileStore_SeekToAsc_FromStart(t *testing.T) {
fs.Replace(nil, files) fs.Replace(nil, files)
buf := make([]tsm1.FloatValue, 1000) buf := make([]tsm1.FloatValue, 1000)
c := fs.KeyCursor([]byte("cpu"), 0, true) c := fs.KeyCursor(context.Background(), []byte("cpu"), 0, true)
// Search for an entry that exists in the second file // Search for an entry that exists in the second file
values, err := c.ReadFloatBlock(&buf) values, err := c.ReadFloatBlock(&buf)
if err != nil { if err != nil {
@ -111,7 +112,7 @@ func TestFileStore_SeekToAsc_Duplicate(t *testing.T) {
fs.Replace(nil, files) fs.Replace(nil, files)
buf := make([]tsm1.FloatValue, 1000) buf := make([]tsm1.FloatValue, 1000)
c := fs.KeyCursor([]byte("cpu"), 0, true) c := fs.KeyCursor(context.Background(), []byte("cpu"), 0, true)
// Search for an entry that exists in the second file // Search for an entry that exists in the second file
values, err := c.ReadFloatBlock(&buf) values, err := c.ReadFloatBlock(&buf)
if err != nil { if err != nil {
@ -184,7 +185,7 @@ func TestFileStore_SeekToAsc_BeforeStart(t *testing.T) {
// Search for an entry that exists in the second file // Search for an entry that exists in the second file
buf := make([]tsm1.FloatValue, 1000) buf := make([]tsm1.FloatValue, 1000)
c := fs.KeyCursor([]byte("cpu"), 0, true) c := fs.KeyCursor(context.Background(), []byte("cpu"), 0, true)
values, err := c.ReadFloatBlock(&buf) values, err := c.ReadFloatBlock(&buf)
if err != nil { if err != nil {
t.Fatalf("unexpected error reading values: %v", err) t.Fatalf("unexpected error reading values: %v", err)
@ -226,7 +227,7 @@ func TestFileStore_SeekToAsc_BeforeStart_OverlapFloat(t *testing.T) {
// Search for an entry that exists in the second file // Search for an entry that exists in the second file
buf := make([]tsm1.FloatValue, 1000) buf := make([]tsm1.FloatValue, 1000)
c := fs.KeyCursor([]byte("cpu"), 0, true) c := fs.KeyCursor(context.Background(), []byte("cpu"), 0, true)
values, err := c.ReadFloatBlock(&buf) values, err := c.ReadFloatBlock(&buf)
if err != nil { if err != nil {
t.Fatalf("unexpected error reading values: %v", err) t.Fatalf("unexpected error reading values: %v", err)
@ -293,7 +294,7 @@ func TestFileStore_SeekToAsc_BeforeStart_OverlapInteger(t *testing.T) {
// Search for an entry that exists in the second file // Search for an entry that exists in the second file
buf := make([]tsm1.IntegerValue, 1000) buf := make([]tsm1.IntegerValue, 1000)
c := fs.KeyCursor([]byte("cpu"), 0, true) c := fs.KeyCursor(context.Background(), []byte("cpu"), 0, true)
values, err := c.ReadIntegerBlock(&buf) values, err := c.ReadIntegerBlock(&buf)
if err != nil { if err != nil {
t.Fatalf("unexpected error reading values: %v", err) t.Fatalf("unexpected error reading values: %v", err)
@ -359,7 +360,7 @@ func TestFileStore_SeekToAsc_BeforeStart_OverlapUnsigned(t *testing.T) {
// Search for an entry that exists in the second file // Search for an entry that exists in the second file
buf := make([]tsm1.UnsignedValue, 1000) buf := make([]tsm1.UnsignedValue, 1000)
c := fs.KeyCursor([]byte("cpu"), 0, true) c := fs.KeyCursor(context.Background(), []byte("cpu"), 0, true)
values, err := c.ReadUnsignedBlock(&buf) values, err := c.ReadUnsignedBlock(&buf)
if err != nil { if err != nil {
t.Fatalf("unexpected error reading values: %v", err) t.Fatalf("unexpected error reading values: %v", err)
@ -425,7 +426,7 @@ func TestFileStore_SeekToAsc_BeforeStart_OverlapBoolean(t *testing.T) {
// Search for an entry that exists in the second file // Search for an entry that exists in the second file
buf := make([]tsm1.BooleanValue, 1000) buf := make([]tsm1.BooleanValue, 1000)
c := fs.KeyCursor([]byte("cpu"), 0, true) c := fs.KeyCursor(context.Background(), []byte("cpu"), 0, true)
values, err := c.ReadBooleanBlock(&buf) values, err := c.ReadBooleanBlock(&buf)
if err != nil { if err != nil {
t.Fatalf("unexpected error reading values: %v", err) t.Fatalf("unexpected error reading values: %v", err)
@ -491,7 +492,7 @@ func TestFileStore_SeekToAsc_BeforeStart_OverlapString(t *testing.T) {
// Search for an entry that exists in the second file // Search for an entry that exists in the second file
buf := make([]tsm1.StringValue, 1000) buf := make([]tsm1.StringValue, 1000)
c := fs.KeyCursor([]byte("cpu"), 0, true) c := fs.KeyCursor(context.Background(), []byte("cpu"), 0, true)
values, err := c.ReadStringBlock(&buf) values, err := c.ReadStringBlock(&buf)
if err != nil { if err != nil {
t.Fatalf("unexpected error reading values: %v", err) t.Fatalf("unexpected error reading values: %v", err)
@ -556,7 +557,7 @@ func TestFileStore_SeekToAsc_OverlapMinFloat(t *testing.T) {
fs.Replace(nil, files) fs.Replace(nil, files)
buf := make([]tsm1.FloatValue, 1000) buf := make([]tsm1.FloatValue, 1000)
c := fs.KeyCursor([]byte("cpu"), 0, true) c := fs.KeyCursor(context.Background(), []byte("cpu"), 0, true)
// Search for an entry that exists in the second file // Search for an entry that exists in the second file
values, err := c.ReadFloatBlock(&buf) values, err := c.ReadFloatBlock(&buf)
if err != nil { if err != nil {
@ -636,7 +637,7 @@ func TestFileStore_SeekToAsc_OverlapMinInteger(t *testing.T) {
fs.Replace(nil, files) fs.Replace(nil, files)
buf := make([]tsm1.IntegerValue, 1000) buf := make([]tsm1.IntegerValue, 1000)
c := fs.KeyCursor([]byte("cpu"), 0, true) c := fs.KeyCursor(context.Background(), []byte("cpu"), 0, true)
// Search for an entry that exists in the second file // Search for an entry that exists in the second file
values, err := c.ReadIntegerBlock(&buf) values, err := c.ReadIntegerBlock(&buf)
if err != nil { if err != nil {
@ -715,7 +716,7 @@ func TestFileStore_SeekToAsc_OverlapMinUnsigned(t *testing.T) {
fs.Replace(nil, files) fs.Replace(nil, files)
buf := make([]tsm1.UnsignedValue, 1000) buf := make([]tsm1.UnsignedValue, 1000)
c := fs.KeyCursor([]byte("cpu"), 0, true) c := fs.KeyCursor(context.Background(), []byte("cpu"), 0, true)
// Search for an entry that exists in the second file // Search for an entry that exists in the second file
values, err := c.ReadUnsignedBlock(&buf) values, err := c.ReadUnsignedBlock(&buf)
if err != nil { if err != nil {
@ -794,7 +795,7 @@ func TestFileStore_SeekToAsc_OverlapMinBoolean(t *testing.T) {
fs.Replace(nil, files) fs.Replace(nil, files)
buf := make([]tsm1.BooleanValue, 1000) buf := make([]tsm1.BooleanValue, 1000)
c := fs.KeyCursor([]byte("cpu"), 0, true) c := fs.KeyCursor(context.Background(), []byte("cpu"), 0, true)
// Search for an entry that exists in the second file // Search for an entry that exists in the second file
values, err := c.ReadBooleanBlock(&buf) values, err := c.ReadBooleanBlock(&buf)
if err != nil { if err != nil {
@ -873,7 +874,7 @@ func TestFileStore_SeekToAsc_OverlapMinString(t *testing.T) {
fs.Replace(nil, files) fs.Replace(nil, files)
buf := make([]tsm1.StringValue, 1000) buf := make([]tsm1.StringValue, 1000)
c := fs.KeyCursor([]byte("cpu"), 0, true) c := fs.KeyCursor(context.Background(), []byte("cpu"), 0, true)
// Search for an entry that exists in the second file // Search for an entry that exists in the second file
values, err := c.ReadStringBlock(&buf) values, err := c.ReadStringBlock(&buf)
if err != nil { if err != nil {
@ -951,7 +952,7 @@ func TestFileStore_SeekToAsc_Middle(t *testing.T) {
// Search for an entry that exists in the second file // Search for an entry that exists in the second file
buf := make([]tsm1.FloatValue, 1000) buf := make([]tsm1.FloatValue, 1000)
c := fs.KeyCursor([]byte("cpu"), 3, true) c := fs.KeyCursor(context.Background(), []byte("cpu"), 3, true)
values, err := c.ReadFloatBlock(&buf) values, err := c.ReadFloatBlock(&buf)
if err != nil { if err != nil {
t.Fatalf("unexpected error reading values: %v", err) t.Fatalf("unexpected error reading values: %v", err)
@ -1007,7 +1008,7 @@ func TestFileStore_SeekToAsc_End(t *testing.T) {
fs.Replace(nil, files) fs.Replace(nil, files)
buf := make([]tsm1.FloatValue, 1000) buf := make([]tsm1.FloatValue, 1000)
c := fs.KeyCursor([]byte("cpu"), 2, true) c := fs.KeyCursor(context.Background(), []byte("cpu"), 2, true)
values, err := c.ReadFloatBlock(&buf) values, err := c.ReadFloatBlock(&buf)
if err != nil { if err != nil {
t.Fatalf("unexpected error reading values: %v", err) t.Fatalf("unexpected error reading values: %v", err)
@ -1046,7 +1047,7 @@ func TestFileStore_SeekToDesc_FromStart(t *testing.T) {
// Search for an entry that exists in the second file // Search for an entry that exists in the second file
buf := make([]tsm1.FloatValue, 1000) buf := make([]tsm1.FloatValue, 1000)
c := fs.KeyCursor([]byte("cpu"), 0, false) c := fs.KeyCursor(context.Background(), []byte("cpu"), 0, false)
values, err := c.ReadFloatBlock(&buf) values, err := c.ReadFloatBlock(&buf)
if err != nil { if err != nil {
t.Fatalf("unexpected error reading values: %v", err) t.Fatalf("unexpected error reading values: %v", err)
@ -1085,7 +1086,7 @@ func TestFileStore_SeekToDesc_Duplicate(t *testing.T) {
// Search for an entry that exists in the second file // Search for an entry that exists in the second file
buf := make([]tsm1.FloatValue, 1000) buf := make([]tsm1.FloatValue, 1000)
c := fs.KeyCursor([]byte("cpu"), 2, false) c := fs.KeyCursor(context.Background(), []byte("cpu"), 2, false)
values, err := c.ReadFloatBlock(&buf) values, err := c.ReadFloatBlock(&buf)
if err != nil { if err != nil {
t.Fatalf("unexpected error reading values: %v", err) t.Fatalf("unexpected error reading values: %v", err)
@ -1144,7 +1145,7 @@ func TestFileStore_SeekToDesc_OverlapMaxFloat(t *testing.T) {
// Search for an entry that exists in the second file // Search for an entry that exists in the second file
buf := make([]tsm1.FloatValue, 1000) buf := make([]tsm1.FloatValue, 1000)
c := fs.KeyCursor([]byte("cpu"), 5, false) c := fs.KeyCursor(context.Background(), []byte("cpu"), 5, false)
values, err := c.ReadFloatBlock(&buf) values, err := c.ReadFloatBlock(&buf)
if err != nil { if err != nil {
t.Fatalf("unexpected error reading values: %v", err) t.Fatalf("unexpected error reading values: %v", err)
@ -1209,7 +1210,7 @@ func TestFileStore_SeekToDesc_OverlapMaxInteger(t *testing.T) {
// Search for an entry that exists in the second file // Search for an entry that exists in the second file
buf := make([]tsm1.IntegerValue, 1000) buf := make([]tsm1.IntegerValue, 1000)
c := fs.KeyCursor([]byte("cpu"), 5, false) c := fs.KeyCursor(context.Background(), []byte("cpu"), 5, false)
values, err := c.ReadIntegerBlock(&buf) values, err := c.ReadIntegerBlock(&buf)
if err != nil { if err != nil {
t.Fatalf("unexpected error reading values: %v", err) t.Fatalf("unexpected error reading values: %v", err)
@ -1271,7 +1272,7 @@ func TestFileStore_SeekToDesc_OverlapMaxUnsigned(t *testing.T) {
// Search for an entry that exists in the second file // Search for an entry that exists in the second file
buf := make([]tsm1.UnsignedValue, 1000) buf := make([]tsm1.UnsignedValue, 1000)
c := fs.KeyCursor([]byte("cpu"), 5, false) c := fs.KeyCursor(context.Background(), []byte("cpu"), 5, false)
values, err := c.ReadUnsignedBlock(&buf) values, err := c.ReadUnsignedBlock(&buf)
if err != nil { if err != nil {
t.Fatalf("unexpected error reading values: %v", err) t.Fatalf("unexpected error reading values: %v", err)
@ -1334,7 +1335,7 @@ func TestFileStore_SeekToDesc_OverlapMaxBoolean(t *testing.T) {
// Search for an entry that exists in the second file // Search for an entry that exists in the second file
buf := make([]tsm1.BooleanValue, 1000) buf := make([]tsm1.BooleanValue, 1000)
c := fs.KeyCursor([]byte("cpu"), 5, false) c := fs.KeyCursor(context.Background(), []byte("cpu"), 5, false)
values, err := c.ReadBooleanBlock(&buf) values, err := c.ReadBooleanBlock(&buf)
if err != nil { if err != nil {
t.Fatalf("unexpected error reading values: %v", err) t.Fatalf("unexpected error reading values: %v", err)
@ -1397,7 +1398,7 @@ func TestFileStore_SeekToDesc_OverlapMaxString(t *testing.T) {
// Search for an entry that exists in the second file // Search for an entry that exists in the second file
buf := make([]tsm1.StringValue, 1000) buf := make([]tsm1.StringValue, 1000)
c := fs.KeyCursor([]byte("cpu"), 5, false) c := fs.KeyCursor(context.Background(), []byte("cpu"), 5, false)
values, err := c.ReadStringBlock(&buf) values, err := c.ReadStringBlock(&buf)
if err != nil { if err != nil {
t.Fatalf("unexpected error reading values: %v", err) t.Fatalf("unexpected error reading values: %v", err)
@ -1458,7 +1459,7 @@ func TestFileStore_SeekToDesc_AfterEnd(t *testing.T) {
fs.Replace(nil, files) fs.Replace(nil, files)
buf := make([]tsm1.FloatValue, 1000) buf := make([]tsm1.FloatValue, 1000)
c := fs.KeyCursor([]byte("cpu"), 4, false) c := fs.KeyCursor(context.Background(), []byte("cpu"), 4, false)
values, err := c.ReadFloatBlock(&buf) values, err := c.ReadFloatBlock(&buf)
if err != nil { if err != nil {
t.Fatalf("unexpected error reading values: %v", err) t.Fatalf("unexpected error reading values: %v", err)
@ -1497,7 +1498,7 @@ func TestFileStore_SeekToDesc_AfterEnd_OverlapFloat(t *testing.T) {
fs.Replace(nil, files) fs.Replace(nil, files)
buf := make([]tsm1.FloatValue, 1000) buf := make([]tsm1.FloatValue, 1000)
c := fs.KeyCursor([]byte("cpu"), 10, false) c := fs.KeyCursor(context.Background(), []byte("cpu"), 10, false)
values, err := c.ReadFloatBlock(&buf) values, err := c.ReadFloatBlock(&buf)
if err != nil { if err != nil {
t.Fatalf("unexpected error reading values: %v", err) t.Fatalf("unexpected error reading values: %v", err)
@ -1594,7 +1595,7 @@ func TestFileStore_SeekToDesc_AfterEnd_OverlapInteger(t *testing.T) {
fs.Replace(nil, files) fs.Replace(nil, files)
buf := make([]tsm1.IntegerValue, 1000) buf := make([]tsm1.IntegerValue, 1000)
c := fs.KeyCursor([]byte("cpu"), 11, false) c := fs.KeyCursor(context.Background(), []byte("cpu"), 11, false)
values, err := c.ReadIntegerBlock(&buf) values, err := c.ReadIntegerBlock(&buf)
if err != nil { if err != nil {
t.Fatalf("unexpected error reading values: %v", err) t.Fatalf("unexpected error reading values: %v", err)
@ -1671,7 +1672,7 @@ func TestFileStore_SeekToDesc_AfterEnd_OverlapUnsigned(t *testing.T) {
fs.Replace(nil, files) fs.Replace(nil, files)
buf := make([]tsm1.UnsignedValue, 1000) buf := make([]tsm1.UnsignedValue, 1000)
c := fs.KeyCursor([]byte("cpu"), 11, false) c := fs.KeyCursor(context.Background(), []byte("cpu"), 11, false)
values, err := c.ReadUnsignedBlock(&buf) values, err := c.ReadUnsignedBlock(&buf)
if err != nil { if err != nil {
t.Fatalf("unexpected error reading values: %v", err) t.Fatalf("unexpected error reading values: %v", err)
@ -1748,7 +1749,7 @@ func TestFileStore_SeekToDesc_AfterEnd_OverlapBoolean(t *testing.T) {
fs.Replace(nil, files) fs.Replace(nil, files)
buf := make([]tsm1.BooleanValue, 1000) buf := make([]tsm1.BooleanValue, 1000)
c := fs.KeyCursor([]byte("cpu"), 11, false) c := fs.KeyCursor(context.Background(), []byte("cpu"), 11, false)
values, err := c.ReadBooleanBlock(&buf) values, err := c.ReadBooleanBlock(&buf)
if err != nil { if err != nil {
t.Fatalf("unexpected error reading values: %v", err) t.Fatalf("unexpected error reading values: %v", err)
@ -1845,7 +1846,7 @@ func TestFileStore_SeekToDesc_AfterEnd_OverlapString(t *testing.T) {
fs.Replace(nil, files) fs.Replace(nil, files)
buf := make([]tsm1.StringValue, 1000) buf := make([]tsm1.StringValue, 1000)
c := fs.KeyCursor([]byte("cpu"), 11, false) c := fs.KeyCursor(context.Background(), []byte("cpu"), 11, false)
values, err := c.ReadStringBlock(&buf) values, err := c.ReadStringBlock(&buf)
if err != nil { if err != nil {
t.Fatalf("unexpected error reading values: %v", err) t.Fatalf("unexpected error reading values: %v", err)
@ -1945,7 +1946,7 @@ func TestFileStore_SeekToDesc_Middle(t *testing.T) {
// Search for an entry that exists in the second file // Search for an entry that exists in the second file
buf := make([]tsm1.FloatValue, 1000) buf := make([]tsm1.FloatValue, 1000)
c := fs.KeyCursor([]byte("cpu"), 3, false) c := fs.KeyCursor(context.Background(), []byte("cpu"), 3, false)
values, err := c.ReadFloatBlock(&buf) values, err := c.ReadFloatBlock(&buf)
if err != nil { if err != nil {
t.Fatalf("unexpected error reading values: %v", err) t.Fatalf("unexpected error reading values: %v", err)
@ -2018,7 +2019,7 @@ func TestFileStore_SeekToDesc_End(t *testing.T) {
fs.Replace(nil, files) fs.Replace(nil, files)
buf := make([]tsm1.FloatValue, 1000) buf := make([]tsm1.FloatValue, 1000)
c := fs.KeyCursor([]byte("cpu"), 2, false) c := fs.KeyCursor(context.Background(), []byte("cpu"), 2, false)
values, err := c.ReadFloatBlock(&buf) values, err := c.ReadFloatBlock(&buf)
if err != nil { if err != nil {
t.Fatalf("unexpected error reading values: %v", err) t.Fatalf("unexpected error reading values: %v", err)
@ -2060,7 +2061,7 @@ func TestKeyCursor_TombstoneRange(t *testing.T) {
} }
buf := make([]tsm1.FloatValue, 1000) buf := make([]tsm1.FloatValue, 1000)
c := fs.KeyCursor([]byte("cpu"), 0, true) c := fs.KeyCursor(context.Background(), []byte("cpu"), 0, true)
expValues := []int{0, 2} expValues := []int{0, 2}
for _, v := range expValues { for _, v := range expValues {
values, err := c.ReadFloatBlock(&buf) values, err := c.ReadFloatBlock(&buf)
@ -2105,7 +2106,7 @@ func TestKeyCursor_TombstoneRange_PartialFloat(t *testing.T) {
} }
buf := make([]tsm1.FloatValue, 1000) buf := make([]tsm1.FloatValue, 1000)
c := fs.KeyCursor([]byte("cpu"), 0, true) c := fs.KeyCursor(context.Background(), []byte("cpu"), 0, true)
values, err := c.ReadFloatBlock(&buf) values, err := c.ReadFloatBlock(&buf)
if err != nil { if err != nil {
t.Fatalf("unexpected error reading values: %v", err) t.Fatalf("unexpected error reading values: %v", err)
@ -2149,7 +2150,7 @@ func TestKeyCursor_TombstoneRange_PartialInteger(t *testing.T) {
} }
buf := make([]tsm1.IntegerValue, 1000) buf := make([]tsm1.IntegerValue, 1000)
c := fs.KeyCursor([]byte("cpu"), 0, true) c := fs.KeyCursor(context.Background(), []byte("cpu"), 0, true)
values, err := c.ReadIntegerBlock(&buf) values, err := c.ReadIntegerBlock(&buf)
if err != nil { if err != nil {
t.Fatalf("unexpected error reading values: %v", err) t.Fatalf("unexpected error reading values: %v", err)
@ -2193,7 +2194,7 @@ func TestKeyCursor_TombstoneRange_PartialUnsigned(t *testing.T) {
} }
buf := make([]tsm1.UnsignedValue, 1000) buf := make([]tsm1.UnsignedValue, 1000)
c := fs.KeyCursor([]byte("cpu"), 0, true) c := fs.KeyCursor(context.Background(), []byte("cpu"), 0, true)
values, err := c.ReadUnsignedBlock(&buf) values, err := c.ReadUnsignedBlock(&buf)
if err != nil { if err != nil {
t.Fatalf("unexpected error reading values: %v", err) t.Fatalf("unexpected error reading values: %v", err)
@ -2237,7 +2238,7 @@ func TestKeyCursor_TombstoneRange_PartialString(t *testing.T) {
} }
buf := make([]tsm1.StringValue, 1000) buf := make([]tsm1.StringValue, 1000)
c := fs.KeyCursor([]byte("cpu"), 0, true) c := fs.KeyCursor(context.Background(), []byte("cpu"), 0, true)
values, err := c.ReadStringBlock(&buf) values, err := c.ReadStringBlock(&buf)
if err != nil { if err != nil {
t.Fatalf("unexpected error reading values: %v", err) t.Fatalf("unexpected error reading values: %v", err)
@ -2281,7 +2282,7 @@ func TestKeyCursor_TombstoneRange_PartialBoolean(t *testing.T) {
} }
buf := make([]tsm1.BooleanValue, 1000) buf := make([]tsm1.BooleanValue, 1000)
c := fs.KeyCursor([]byte("cpu"), 0, true) c := fs.KeyCursor(context.Background(), []byte("cpu"), 0, true)
values, err := c.ReadBooleanBlock(&buf) values, err := c.ReadBooleanBlock(&buf)
if err != nil { if err != nil {
t.Fatalf("unexpected error reading values: %v", err) t.Fatalf("unexpected error reading values: %v", err)
@ -2403,7 +2404,7 @@ func TestFileStore_Replace(t *testing.T) {
} }
// Should record references to the two existing TSM files // Should record references to the two existing TSM files
cur := fs.KeyCursor([]byte("cpu"), 0, true) cur := fs.KeyCursor(context.Background(), []byte("cpu"), 0, true)
// Should move the existing files out of the way, but allow query to complete // Should move the existing files out of the way, but allow query to complete
if err := fs.Replace(files[:2], []string{replacement}); err != nil { if err := fs.Replace(files[:2], []string{replacement}); err != nil {

View File

@ -13,6 +13,9 @@ import (
"sync" "sync"
"github.com/influxdata/influxdb/influxql" "github.com/influxdata/influxdb/influxql"
"github.com/influxdata/influxdb/pkg/metrics"
"github.com/influxdata/influxdb/pkg/tracing"
"github.com/influxdata/influxdb/pkg/tracing/fields"
"github.com/influxdata/influxdb/query" "github.com/influxdata/influxdb/query"
"github.com/influxdata/influxdb/tsdb" "github.com/influxdata/influxdb/tsdb"
"github.com/uber-go/zap" "github.com/uber-go/zap"
@ -142,6 +145,36 @@ func (itr *floatFinalizerIterator) Close() error {
return itr.FloatIterator.Close() return itr.FloatIterator.Close()
} }
type floatInstrumentedIterator struct {
query.FloatIterator
span *tracing.Span
group *metrics.Group
}
func newFloatInstrumentedIterator(inner query.FloatIterator, span *tracing.Span, group *metrics.Group) *floatInstrumentedIterator {
return &floatInstrumentedIterator{FloatIterator: inner, span: span, group: group}
}
func (itr *floatInstrumentedIterator) Close() error {
var f []fields.Field
itr.group.ForEach(func(v metrics.Metric) {
switch m := v.(type) {
case *metrics.Counter:
f = append(f, fields.Int64(m.Name(), m.Value()))
case *metrics.Timer:
f = append(f, fields.Duration(m.Name(), m.Value()))
default:
panic("unexpected metrics")
}
})
itr.span.SetFields(fields.New(f...))
itr.span.Finish()
return itr.FloatIterator.Close()
}
type floatIterator struct { type floatIterator struct {
cur floatCursor cur floatCursor
aux []cursorAt aux []cursorAt
@ -575,6 +608,36 @@ func (itr *integerFinalizerIterator) Close() error {
return itr.IntegerIterator.Close() return itr.IntegerIterator.Close()
} }
type integerInstrumentedIterator struct {
query.IntegerIterator
span *tracing.Span
group *metrics.Group
}
func newIntegerInstrumentedIterator(inner query.IntegerIterator, span *tracing.Span, group *metrics.Group) *integerInstrumentedIterator {
return &integerInstrumentedIterator{IntegerIterator: inner, span: span, group: group}
}
func (itr *integerInstrumentedIterator) Close() error {
var f []fields.Field
itr.group.ForEach(func(v metrics.Metric) {
switch m := v.(type) {
case *metrics.Counter:
f = append(f, fields.Int64(m.Name(), m.Value()))
case *metrics.Timer:
f = append(f, fields.Duration(m.Name(), m.Value()))
default:
panic("unexpected metrics")
}
})
itr.span.SetFields(fields.New(f...))
itr.span.Finish()
return itr.IntegerIterator.Close()
}
type integerIterator struct { type integerIterator struct {
cur integerCursor cur integerCursor
aux []cursorAt aux []cursorAt
@ -1008,6 +1071,36 @@ func (itr *unsignedFinalizerIterator) Close() error {
return itr.UnsignedIterator.Close() return itr.UnsignedIterator.Close()
} }
type unsignedInstrumentedIterator struct {
query.UnsignedIterator
span *tracing.Span
group *metrics.Group
}
func newUnsignedInstrumentedIterator(inner query.UnsignedIterator, span *tracing.Span, group *metrics.Group) *unsignedInstrumentedIterator {
return &unsignedInstrumentedIterator{UnsignedIterator: inner, span: span, group: group}
}
func (itr *unsignedInstrumentedIterator) Close() error {
var f []fields.Field
itr.group.ForEach(func(v metrics.Metric) {
switch m := v.(type) {
case *metrics.Counter:
f = append(f, fields.Int64(m.Name(), m.Value()))
case *metrics.Timer:
f = append(f, fields.Duration(m.Name(), m.Value()))
default:
panic("unexpected metrics")
}
})
itr.span.SetFields(fields.New(f...))
itr.span.Finish()
return itr.UnsignedIterator.Close()
}
type unsignedIterator struct { type unsignedIterator struct {
cur unsignedCursor cur unsignedCursor
aux []cursorAt aux []cursorAt
@ -1441,6 +1534,36 @@ func (itr *stringFinalizerIterator) Close() error {
return itr.StringIterator.Close() return itr.StringIterator.Close()
} }
type stringInstrumentedIterator struct {
query.StringIterator
span *tracing.Span
group *metrics.Group
}
func newStringInstrumentedIterator(inner query.StringIterator, span *tracing.Span, group *metrics.Group) *stringInstrumentedIterator {
return &stringInstrumentedIterator{StringIterator: inner, span: span, group: group}
}
func (itr *stringInstrumentedIterator) Close() error {
var f []fields.Field
itr.group.ForEach(func(v metrics.Metric) {
switch m := v.(type) {
case *metrics.Counter:
f = append(f, fields.Int64(m.Name(), m.Value()))
case *metrics.Timer:
f = append(f, fields.Duration(m.Name(), m.Value()))
default:
panic("unexpected metrics")
}
})
itr.span.SetFields(fields.New(f...))
itr.span.Finish()
return itr.StringIterator.Close()
}
type stringIterator struct { type stringIterator struct {
cur stringCursor cur stringCursor
aux []cursorAt aux []cursorAt
@ -1874,6 +1997,36 @@ func (itr *booleanFinalizerIterator) Close() error {
return itr.BooleanIterator.Close() return itr.BooleanIterator.Close()
} }
type booleanInstrumentedIterator struct {
query.BooleanIterator
span *tracing.Span
group *metrics.Group
}
func newBooleanInstrumentedIterator(inner query.BooleanIterator, span *tracing.Span, group *metrics.Group) *booleanInstrumentedIterator {
return &booleanInstrumentedIterator{BooleanIterator: inner, span: span, group: group}
}
func (itr *booleanInstrumentedIterator) Close() error {
var f []fields.Field
itr.group.ForEach(func(v metrics.Metric) {
switch m := v.(type) {
case *metrics.Counter:
f = append(f, fields.Int64(m.Name(), m.Value()))
case *metrics.Timer:
f = append(f, fields.Duration(m.Name(), m.Value()))
default:
panic("unexpected metrics")
}
})
itr.span.SetFields(fields.New(f...))
itr.span.Finish()
return itr.BooleanIterator.Close()
}
type booleanIterator struct { type booleanIterator struct {
cur booleanCursor cur booleanCursor
aux []cursorAt aux []cursorAt

View File

@ -7,6 +7,9 @@ import (
"sync" "sync"
"github.com/influxdata/influxdb/influxql" "github.com/influxdata/influxdb/influxql"
"github.com/influxdata/influxdb/pkg/metrics"
"github.com/influxdata/influxdb/pkg/tracing"
"github.com/influxdata/influxdb/pkg/tracing/field"
"github.com/influxdata/influxdb/query" "github.com/influxdata/influxdb/query"
"github.com/influxdata/influxdb/tsdb" "github.com/influxdata/influxdb/tsdb"
"github.com/uber-go/zap" "github.com/uber-go/zap"
@ -138,6 +141,38 @@ func (itr *{{.name}}FinalizerIterator) Close() error {
return itr.{{.Name}}Iterator.Close() return itr.{{.Name}}Iterator.Close()
} }
type {{.name}}InstrumentedIterator struct {
query.{{.Name}}Iterator
span *tracing.Span
group *metrics.Group
}
func new{{.Name}}InstrumentedIterator(inner query.{{.Name}}Iterator, span *tracing.Span, group *metrics.Group) *{{.name}}InstrumentedIterator {
return &{{.name}}InstrumentedIterator{ {{.Name}}Iterator: inner, span: span, group: group}
}
func (itr *{{.name}}InstrumentedIterator) Close() error {
var f []field.Field
itr.group.ForEach(func(v metrics.Metric) {
switch m := v.(type) {
case *metrics.Counter:
f = append(f, field.Int64(m.Name(), m.Value()))
case *metrics.Timer:
f = append(f, field.Duration(m.Name(), m.Value()))
default:
panic("unexpected metrics")
}
})
itr.span.SetFieldSet(field.Fields(f...))
itr.span.Finish()
return itr.{{.Name}}Iterator.Close()
}
type {{.name}}Iterator struct { type {{.name}}Iterator struct {
cur {{.name}}Cursor cur {{.name}}Cursor
aux []cursorAt aux []cursorAt

View File

@ -1,8 +1,11 @@
package tsm1 package tsm1
import ( import (
"context"
"fmt" "fmt"
"github.com/influxdata/influxdb/pkg/metrics"
"github.com/influxdata/influxdb/pkg/tracing"
"github.com/influxdata/influxdb/query" "github.com/influxdata/influxdb/query"
"github.com/influxdata/influxdb/tsdb" "github.com/influxdata/influxdb/tsdb"
"github.com/uber-go/zap" "github.com/uber-go/zap"
@ -153,13 +156,13 @@ func (c cursorsAt) close() {
// newMergeFinalizerIterator creates a new Merge iterator from the inputs. If the call to Merge succeeds, // newMergeFinalizerIterator creates a new Merge iterator from the inputs. If the call to Merge succeeds,
// the resulting Iterator will be wrapped in a finalizer iterator. // the resulting Iterator will be wrapped in a finalizer iterator.
// If Merge returns an error, the inputs will be closed. // If Merge returns an error, the inputs will be closed.
func newMergeFinalizerIterator(inputs []query.Iterator, opt query.IteratorOptions, log zap.Logger) (query.Iterator, error) { func newMergeFinalizerIterator(ctx context.Context, inputs []query.Iterator, opt query.IteratorOptions, log zap.Logger) (query.Iterator, error) {
itr, err := query.Iterators(inputs).Merge(opt) itr, err := query.Iterators(inputs).Merge(opt)
if err != nil { if err != nil {
query.Iterators(inputs).Close() query.Iterators(inputs).Close()
return nil, err return nil, err
} }
return newFinalizerIterator(itr, log), nil return newInstrumentedIterator(ctx, newFinalizerIterator(itr, log)), nil
} }
// newFinalizerIterator creates a new iterator that installs a runtime finalizer // newFinalizerIterator creates a new iterator that installs a runtime finalizer
@ -186,3 +189,30 @@ func newFinalizerIterator(itr query.Iterator, log zap.Logger) query.Iterator {
panic(fmt.Sprintf("unsupported finalizer iterator type: %T", itr)) panic(fmt.Sprintf("unsupported finalizer iterator type: %T", itr))
} }
} }
func newInstrumentedIterator(ctx context.Context, itr query.Iterator) query.Iterator {
if itr == nil {
return nil
}
span := tracing.SpanFromContext(ctx)
grp := metrics.GroupFromContext(ctx)
if span == nil || grp == nil {
return itr
}
switch inner := itr.(type) {
case query.FloatIterator:
return newFloatInstrumentedIterator(inner, span, grp)
case query.IntegerIterator:
return newIntegerInstrumentedIterator(inner, span, grp)
case query.UnsignedIterator:
return newUnsignedInstrumentedIterator(inner, span, grp)
case query.StringIterator:
return newStringInstrumentedIterator(inner, span, grp)
case query.BooleanIterator:
return newBooleanInstrumentedIterator(inner, span, grp)
default:
panic(fmt.Sprintf("unsupported instrumented iterator type: %T", itr))
}
}

View File

@ -2,6 +2,7 @@ package tsdb
import ( import (
"bytes" "bytes"
"context"
"errors" "errors"
"fmt" "fmt"
"io" "io"
@ -817,19 +818,18 @@ func (s *Shard) WriteTo(w io.Writer) (int64, error) {
} }
// CreateIterator returns an iterator for the data in the shard. // CreateIterator returns an iterator for the data in the shard.
func (s *Shard) CreateIterator(measurement string, opt query.IteratorOptions) (query.Iterator, error) { func (s *Shard) CreateIterator(ctx context.Context, measurement string, opt query.IteratorOptions) (query.Iterator, error) {
engine, err := s.engine() engine, err := s.engine()
if err != nil { if err != nil {
return nil, err return nil, err
} }
if strings.HasPrefix(measurement, "_") { if strings.HasPrefix(measurement, "_") {
if itr, ok, err := s.createSystemIterator(engine, measurement, opt); ok { if itr, ok, err := s.createSystemIterator(engine, measurement, opt); ok {
return itr, err return itr, err
} }
// Unknown system source so pass this to the engine. // Unknown system source so pass this to the engine.
} }
return engine.CreateIterator(measurement, opt) return engine.CreateIterator(ctx, measurement, opt)
} }
// createSystemIterator returns an iterator for a field of system source. // createSystemIterator returns an iterator for a field of system source.
@ -1164,7 +1164,7 @@ type ShardGroup interface {
MeasurementsByRegex(re *regexp.Regexp) []string MeasurementsByRegex(re *regexp.Regexp) []string
FieldDimensions(measurements []string) (fields map[string]influxql.DataType, dimensions map[string]struct{}, err error) FieldDimensions(measurements []string) (fields map[string]influxql.DataType, dimensions map[string]struct{}, err error)
MapType(measurement, field string) influxql.DataType MapType(measurement, field string) influxql.DataType
CreateIterator(measurement string, opt query.IteratorOptions) (query.Iterator, error) CreateIterator(ctx context.Context, measurement string, opt query.IteratorOptions) (query.Iterator, error)
IteratorCost(measurement string, opt query.IteratorOptions) (query.IteratorCost, error) IteratorCost(measurement string, opt query.IteratorOptions) (query.IteratorCost, error)
ExpandSources(sources influxql.Sources) (influxql.Sources, error) ExpandSources(sources influxql.Sources) (influxql.Sources, error)
} }
@ -1245,10 +1245,10 @@ func (a Shards) MapType(measurement, field string) influxql.DataType {
return typ return typ
} }
func (a Shards) CreateIterator(measurement string, opt query.IteratorOptions) (query.Iterator, error) { func (a Shards) CreateIterator(ctx context.Context, measurement string, opt query.IteratorOptions) (query.Iterator, error) {
itrs := make([]query.Iterator, 0, len(a)) itrs := make([]query.Iterator, 0, len(a))
for _, sh := range a { for _, sh := range a {
itr, err := sh.CreateIterator(measurement, opt) itr, err := sh.CreateIterator(ctx, measurement, opt)
if err != nil { if err != nil {
query.Iterators(itrs).Close() query.Iterators(itrs).Close()
return nil, err return nil, err

View File

@ -1,6 +1,7 @@
package tsdb_test package tsdb_test
import ( import (
"context"
"fmt" "fmt"
"io/ioutil" "io/ioutil"
"os" "os"
@ -464,7 +465,7 @@ func TestShard_WritePoints_FieldConflictConcurrentQuery(t *testing.T) {
sh.WritePoints(points) sh.WritePoints(points)
iter, err := sh.CreateIterator("cpu", query.IteratorOptions{ iter, err := sh.CreateIterator(context.Background(), "cpu", query.IteratorOptions{
Expr: influxql.MustParseExpr(`value`), Expr: influxql.MustParseExpr(`value`),
Aux: []influxql.VarRef{{Val: "value"}}, Aux: []influxql.VarRef{{Val: "value"}},
Dimensions: []string{}, Dimensions: []string{},
@ -525,7 +526,7 @@ func TestShard_WritePoints_FieldConflictConcurrentQuery(t *testing.T) {
sh.WritePoints(points) sh.WritePoints(points)
iter, err := sh.CreateIterator("cpu", query.IteratorOptions{ iter, err := sh.CreateIterator(context.Background(), "cpu", query.IteratorOptions{
Expr: influxql.MustParseExpr(`value`), Expr: influxql.MustParseExpr(`value`),
Aux: []influxql.VarRef{{Val: "value"}}, Aux: []influxql.VarRef{{Val: "value"}},
Dimensions: []string{}, Dimensions: []string{},
@ -609,7 +610,7 @@ func TestShard_CreateIterator_Ascending(t *testing.T) {
// Calling CreateIterator when the engine is not open will return // Calling CreateIterator when the engine is not open will return
// ErrEngineClosed. // ErrEngineClosed.
_, got := sh.CreateIterator("cpu", query.IteratorOptions{}) _, got := sh.CreateIterator(context.Background(), "cpu", query.IteratorOptions{})
if exp := tsdb.ErrEngineClosed; got != exp { if exp := tsdb.ErrEngineClosed; got != exp {
t.Fatalf("got %v, expected %v", got, exp) t.Fatalf("got %v, expected %v", got, exp)
} }
@ -626,7 +627,7 @@ cpu,host=serverB,region=uswest value=25 0
// Create iterator. // Create iterator.
var err error var err error
itr, err = sh.CreateIterator("cpu", query.IteratorOptions{ itr, err = sh.CreateIterator(context.Background(), "cpu", query.IteratorOptions{
Expr: influxql.MustParseExpr(`value`), Expr: influxql.MustParseExpr(`value`),
Aux: []influxql.VarRef{{Val: "val2"}}, Aux: []influxql.VarRef{{Val: "val2"}},
Dimensions: []string{"host"}, Dimensions: []string{"host"},
@ -694,7 +695,7 @@ func TestShard_CreateIterator_Descending(t *testing.T) {
// Calling CreateIterator when the engine is not open will return // Calling CreateIterator when the engine is not open will return
// ErrEngineClosed. // ErrEngineClosed.
_, got := sh.CreateIterator("cpu", query.IteratorOptions{}) _, got := sh.CreateIterator(context.Background(), "cpu", query.IteratorOptions{})
if exp := tsdb.ErrEngineClosed; got != exp { if exp := tsdb.ErrEngineClosed; got != exp {
t.Fatalf("got %v, expected %v", got, exp) t.Fatalf("got %v, expected %v", got, exp)
} }
@ -711,7 +712,7 @@ cpu,host=serverB,region=uswest value=25 0
// Create iterator. // Create iterator.
var err error var err error
itr, err = sh.CreateIterator("cpu", query.IteratorOptions{ itr, err = sh.CreateIterator(context.Background(), "cpu", query.IteratorOptions{
Expr: influxql.MustParseExpr(`value`), Expr: influxql.MustParseExpr(`value`),
Aux: []influxql.VarRef{{Val: "val2"}}, Aux: []influxql.VarRef{{Val: "val2"}},
Dimensions: []string{"host"}, Dimensions: []string{"host"},
@ -795,7 +796,7 @@ func TestShard_Disabled_WriteQuery(t *testing.T) {
t.Fatalf(err.Error()) t.Fatalf(err.Error())
} }
_, got := sh.CreateIterator("cpu", query.IteratorOptions{}) _, got := sh.CreateIterator(context.Background(), "cpu", query.IteratorOptions{})
if err == nil { if err == nil {
t.Fatalf("expected shard disabled error") t.Fatalf("expected shard disabled error")
} }
@ -810,7 +811,7 @@ func TestShard_Disabled_WriteQuery(t *testing.T) {
t.Fatalf("unexpected error: %v", err) t.Fatalf("unexpected error: %v", err)
} }
if _, err = sh.CreateIterator("cpu", query.IteratorOptions{}); err != nil { if _, err = sh.CreateIterator(context.Background(), "cpu", query.IteratorOptions{}); err != nil {
t.Fatalf("unexpected error: %v", got) t.Fatalf("unexpected error: %v", got)
} }
} }

View File

@ -2,6 +2,7 @@ package tsdb_test
import ( import (
"bytes" "bytes"
"context"
"fmt" "fmt"
"io/ioutil" "io/ioutil"
"math" "math"
@ -353,7 +354,7 @@ func TestShards_CreateIterator(t *testing.T) {
shards := s.ShardGroup([]uint64{0, 1}) shards := s.ShardGroup([]uint64{0, 1})
// Create iterator. // Create iterator.
itr, err := shards.CreateIterator("cpu", query.IteratorOptions{ itr, err := shards.CreateIterator(context.Background(), "cpu", query.IteratorOptions{
Expr: influxql.MustParseExpr(`value`), Expr: influxql.MustParseExpr(`value`),
Dimensions: []string{"host"}, Dimensions: []string{"host"},
Ascending: true, Ascending: true,
@ -443,7 +444,7 @@ func TestStore_BackupRestoreShard(t *testing.T) {
} }
// Read data from // Read data from
itr, err := s0.Shard(100).CreateIterator("cpu", query.IteratorOptions{ itr, err := s0.Shard(100).CreateIterator(context.Background(), "cpu", query.IteratorOptions{
Expr: influxql.MustParseExpr(`value`), Expr: influxql.MustParseExpr(`value`),
Ascending: true, Ascending: true,
StartTime: influxql.MinTime, StartTime: influxql.MinTime,