influxdb/query/iterator.gen.go

13525 lines
351 KiB
Go

// Generated by tmpl
// https://github.com/benbjohnson/tmpl
//
// DO NOT EDIT!
// Source: iterator.gen.go.tmpl
package query
import (
"container/heap"
"context"
"io"
"sort"
"sync"
"time"
"github.com/influxdata/influxql"
"google.golang.org/protobuf/proto"
)
// DefaultStatsInterval is the default value for IteratorEncoder.StatsInterval.
const DefaultStatsInterval = time.Second
// FloatIterator represents a stream of float points.
type FloatIterator interface {
Iterator
Next() (*FloatPoint, error)
}
// newFloatIterators converts a slice of Iterator to a slice of FloatIterator.
// Drop and closes any iterator in itrs that is not a FloatIterator and cannot
// be cast to a FloatIterator.
func newFloatIterators(itrs []Iterator) []FloatIterator {
a := make([]FloatIterator, 0, len(itrs))
for _, itr := range itrs {
switch itr := itr.(type) {
case FloatIterator:
a = append(a, itr)
default:
itr.Close()
}
}
return a
}
// bufFloatIterator represents a buffered FloatIterator.
type bufFloatIterator struct {
itr FloatIterator
buf *FloatPoint
}
// newBufFloatIterator returns a buffered FloatIterator.
func newBufFloatIterator(itr FloatIterator) *bufFloatIterator {
return &bufFloatIterator{itr: itr}
}
// Stats returns statistics from the input iterator.
func (itr *bufFloatIterator) Stats() IteratorStats { return itr.itr.Stats() }
// Close closes the underlying iterator.
func (itr *bufFloatIterator) Close() error { return itr.itr.Close() }
// peek returns the next point without removing it from the iterator.
func (itr *bufFloatIterator) peek() (*FloatPoint, error) {
p, err := itr.Next()
if err != nil {
return nil, err
}
itr.unread(p)
return p, nil
}
// peekTime returns the time of the next point.
// Returns zero time if no more points available.
func (itr *bufFloatIterator) peekTime() (int64, error) {
p, err := itr.peek()
if p == nil || err != nil {
return ZeroTime, err
}
return p.Time, nil
}
// Next returns the current buffer, if exists, or calls the underlying iterator.
func (itr *bufFloatIterator) Next() (*FloatPoint, error) {
buf := itr.buf
if buf != nil {
itr.buf = nil
return buf, nil
}
return itr.itr.Next()
}
// NextInWindow returns the next value if it is between [startTime, endTime).
// If the next value is outside the range then it is moved to the buffer.
func (itr *bufFloatIterator) NextInWindow(startTime, endTime int64) (*FloatPoint, error) {
v, err := itr.Next()
if v == nil || err != nil {
return nil, err
} else if t := v.Time; t >= endTime || t < startTime {
itr.unread(v)
return nil, nil
}
return v, nil
}
// unread sets v to the buffer. It is read on the next call to Next().
func (itr *bufFloatIterator) unread(v *FloatPoint) { itr.buf = v }
// floatMergeIterator represents an iterator that combines multiple float iterators.
type floatMergeIterator struct {
inputs []FloatIterator
heap *floatMergeHeap
init bool
closed bool
mu sync.RWMutex
// Current iterator and window.
curr *floatMergeHeapItem
window struct {
name string
tags string
startTime int64
endTime int64
}
}
// newFloatMergeIterator returns a new instance of floatMergeIterator.
func newFloatMergeIterator(inputs []FloatIterator, opt IteratorOptions) *floatMergeIterator {
itr := &floatMergeIterator{
inputs: inputs,
heap: &floatMergeHeap{
items: make([]*floatMergeHeapItem, 0, len(inputs)),
opt: opt,
},
}
// Initialize heap items.
for _, input := range inputs {
// Wrap in buffer, ignore any inputs without anymore points.
bufInput := newBufFloatIterator(input)
// Append to the heap.
itr.heap.items = append(itr.heap.items, &floatMergeHeapItem{itr: bufInput})
}
return itr
}
// Stats returns an aggregation of stats from the underlying iterators.
func (itr *floatMergeIterator) Stats() IteratorStats {
var stats IteratorStats
for _, input := range itr.inputs {
stats.Add(input.Stats())
}
return stats
}
// Close closes the underlying iterators.
func (itr *floatMergeIterator) Close() error {
itr.mu.Lock()
defer itr.mu.Unlock()
for _, input := range itr.inputs {
input.Close()
}
itr.curr = nil
itr.inputs = nil
itr.heap.items = nil
itr.closed = true
return nil
}
// Next returns the next point from the iterator.
func (itr *floatMergeIterator) Next() (*FloatPoint, error) {
itr.mu.RLock()
defer itr.mu.RUnlock()
if itr.closed {
return nil, nil
}
// Initialize the heap. This needs to be done lazily on the first call to this iterator
// so that iterator initialization done through the Select() call returns quickly.
// Queries can only be interrupted after the Select() call completes so any operations
// done during iterator creation cannot be interrupted, which is why we do it here
// instead so an interrupt can happen while initializing the heap.
if !itr.init {
items := itr.heap.items
itr.heap.items = make([]*floatMergeHeapItem, 0, len(items))
for _, item := range items {
if p, err := item.itr.peek(); err != nil {
return nil, err
} else if p == nil {
continue
}
itr.heap.items = append(itr.heap.items, item)
}
heap.Init(itr.heap)
itr.init = true
}
for {
// Retrieve the next iterator if we don't have one.
if itr.curr == nil {
if len(itr.heap.items) == 0 {
return nil, nil
}
itr.curr = heap.Pop(itr.heap).(*floatMergeHeapItem)
// Read point and set current window.
p, err := itr.curr.itr.Next()
if err != nil {
return nil, err
}
tags := p.Tags.Subset(itr.heap.opt.Dimensions)
itr.window.name, itr.window.tags = p.Name, tags.ID()
itr.window.startTime, itr.window.endTime = itr.heap.opt.Window(p.Time)
return p, nil
}
// Read the next point from the current iterator.
p, err := itr.curr.itr.Next()
if err != nil {
return nil, err
}
// If there are no more points then remove iterator from heap and find next.
if p == nil {
itr.curr = nil
continue
}
// Check if the point is inside of our current window.
inWindow := true
if window := itr.window; window.name != p.Name {
inWindow = false
} else if tags := p.Tags.Subset(itr.heap.opt.Dimensions); window.tags != tags.ID() {
inWindow = false
} else if opt := itr.heap.opt; opt.Ascending && p.Time >= window.endTime {
inWindow = false
} else if !opt.Ascending && p.Time < window.startTime {
inWindow = false
}
// If it's outside our window then push iterator back on the heap and find new iterator.
if !inWindow {
itr.curr.itr.unread(p)
heap.Push(itr.heap, itr.curr)
itr.curr = nil
continue
}
return p, nil
}
}
// floatMergeHeap represents a heap of floatMergeHeapItems.
// Items are sorted by their next window and then by name/tags.
type floatMergeHeap struct {
opt IteratorOptions
items []*floatMergeHeapItem
}
func (h *floatMergeHeap) Len() int { return len(h.items) }
func (h *floatMergeHeap) Swap(i, j int) { h.items[i], h.items[j] = h.items[j], h.items[i] }
func (h *floatMergeHeap) Less(i, j int) bool {
x, err := h.items[i].itr.peek()
if err != nil {
return true
}
y, err := h.items[j].itr.peek()
if err != nil {
return false
}
if h.opt.Ascending {
if x.Name != y.Name {
return x.Name < y.Name
} else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); xTags.ID() != yTags.ID() {
return xTags.ID() < yTags.ID()
}
} else {
if x.Name != y.Name {
return x.Name > y.Name
} else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); xTags.ID() != yTags.ID() {
return xTags.ID() > yTags.ID()
}
}
xt, _ := h.opt.Window(x.Time)
yt, _ := h.opt.Window(y.Time)
if h.opt.Ascending {
return xt < yt
}
return xt > yt
}
func (h *floatMergeHeap) Push(x interface{}) {
h.items = append(h.items, x.(*floatMergeHeapItem))
}
func (h *floatMergeHeap) Pop() interface{} {
old := h.items
n := len(old)
item := old[n-1]
h.items = old[0 : n-1]
return item
}
type floatMergeHeapItem struct {
itr *bufFloatIterator
}
// floatSortedMergeIterator is an iterator that sorts and merges multiple iterators into one.
type floatSortedMergeIterator struct {
inputs []FloatIterator
heap *floatSortedMergeHeap
init bool
}
// newFloatSortedMergeIterator returns an instance of floatSortedMergeIterator.
func newFloatSortedMergeIterator(inputs []FloatIterator, opt IteratorOptions) Iterator {
itr := &floatSortedMergeIterator{
inputs: inputs,
heap: &floatSortedMergeHeap{
items: make([]*floatSortedMergeHeapItem, 0, len(inputs)),
opt: opt,
},
}
// Initialize heap items.
for _, input := range inputs {
// Append to the heap.
itr.heap.items = append(itr.heap.items, &floatSortedMergeHeapItem{itr: input})
}
return itr
}
// Stats returns an aggregation of stats from the underlying iterators.
func (itr *floatSortedMergeIterator) Stats() IteratorStats {
var stats IteratorStats
for _, input := range itr.inputs {
stats.Add(input.Stats())
}
return stats
}
// Close closes the underlying iterators.
func (itr *floatSortedMergeIterator) Close() error {
for _, input := range itr.inputs {
input.Close()
}
return nil
}
// Next returns the next points from the iterator.
func (itr *floatSortedMergeIterator) Next() (*FloatPoint, error) { return itr.pop() }
// pop returns the next point from the heap.
// Reads the next point from item's cursor and puts it back on the heap.
func (itr *floatSortedMergeIterator) pop() (*FloatPoint, error) {
// Initialize the heap. See the MergeIterator to see why this has to be done lazily.
if !itr.init {
items := itr.heap.items
itr.heap.items = make([]*floatSortedMergeHeapItem, 0, len(items))
for _, item := range items {
var err error
if item.point, err = item.itr.Next(); err != nil {
return nil, err
} else if item.point == nil {
continue
}
itr.heap.items = append(itr.heap.items, item)
}
heap.Init(itr.heap)
itr.init = true
}
if len(itr.heap.items) == 0 {
return nil, nil
}
// Read the next item from the heap.
item := heap.Pop(itr.heap).(*floatSortedMergeHeapItem)
if item.err != nil {
return nil, item.err
} else if item.point == nil {
return nil, nil
}
// Copy the point for return.
p := item.point.Clone()
// Read the next item from the cursor. Push back to heap if one exists.
if item.point, item.err = item.itr.Next(); item.point != nil {
heap.Push(itr.heap, item)
}
return p, nil
}
// floatSortedMergeHeap represents a heap of floatSortedMergeHeapItems.
// Items are sorted with the following priority:
// - By their measurement name;
// - By their tag keys/values;
// - By time; or
// - By their Aux field values.
type floatSortedMergeHeap struct {
opt IteratorOptions
items []*floatSortedMergeHeapItem
}
func (h *floatSortedMergeHeap) Len() int { return len(h.items) }
func (h *floatSortedMergeHeap) Swap(i, j int) { h.items[i], h.items[j] = h.items[j], h.items[i] }
func (h *floatSortedMergeHeap) Less(i, j int) bool {
x, y := h.items[i].point, h.items[j].point
if h.opt.Ascending {
if x.Name != y.Name {
return x.Name < y.Name
} else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); !xTags.Equals(&yTags) {
return xTags.ID() < yTags.ID()
}
if x.Time != y.Time {
return x.Time < y.Time
}
if len(x.Aux) > 0 && len(x.Aux) == len(y.Aux) {
for i := 0; i < len(x.Aux); i++ {
v1, ok1 := x.Aux[i].(string)
v2, ok2 := y.Aux[i].(string)
if !ok1 || !ok2 {
// Unsupported types used in Aux fields. Maybe they
// need to be added here?
return false
} else if v1 == v2 {
continue
}
return v1 < v2
}
}
return false // Times and/or Aux fields are equal.
}
if x.Name != y.Name {
return x.Name > y.Name
} else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); !xTags.Equals(&yTags) {
return xTags.ID() > yTags.ID()
}
if x.Time != y.Time {
return x.Time > y.Time
}
if len(x.Aux) > 0 && len(x.Aux) == len(y.Aux) {
for i := 0; i < len(x.Aux); i++ {
v1, ok1 := x.Aux[i].(string)
v2, ok2 := y.Aux[i].(string)
if !ok1 || !ok2 {
// Unsupported types used in Aux fields. Maybe they
// need to be added here?
return false
} else if v1 == v2 {
continue
}
return v1 > v2
}
}
return false // Times and/or Aux fields are equal.
}
func (h *floatSortedMergeHeap) Push(x interface{}) {
h.items = append(h.items, x.(*floatSortedMergeHeapItem))
}
func (h *floatSortedMergeHeap) Pop() interface{} {
old := h.items
n := len(old)
item := old[n-1]
h.items = old[0 : n-1]
return item
}
type floatSortedMergeHeapItem struct {
point *FloatPoint
err error
itr FloatIterator
}
// floatIteratorScanner scans the results of a FloatIterator into a map.
type floatIteratorScanner struct {
input *bufFloatIterator
err error
keys []influxql.VarRef
defaultValue interface{}
}
// newFloatIteratorScanner creates a new IteratorScanner.
func newFloatIteratorScanner(input FloatIterator, keys []influxql.VarRef, defaultValue interface{}) *floatIteratorScanner {
return &floatIteratorScanner{
input: newBufFloatIterator(input),
keys: keys,
defaultValue: defaultValue,
}
}
func (s *floatIteratorScanner) Peek() (int64, string, Tags) {
if s.err != nil {
return ZeroTime, "", Tags{}
}
p, err := s.input.peek()
if err != nil {
s.err = err
return ZeroTime, "", Tags{}
} else if p == nil {
return ZeroTime, "", Tags{}
}
return p.Time, p.Name, p.Tags
}
func (s *floatIteratorScanner) ScanAt(ts int64, name string, tags Tags, m map[string]interface{}) {
if s.err != nil {
return
}
p, err := s.input.Next()
if err != nil {
s.err = err
return
} else if p == nil {
s.useDefaults(m)
return
} else if p.Time != ts || p.Name != name || !p.Tags.Equals(&tags) {
s.useDefaults(m)
s.input.unread(p)
return
}
if k := s.keys[0]; k.Val != "" {
if p.Nil {
if s.defaultValue != SkipDefault {
m[k.Val] = castToType(s.defaultValue, k.Type)
}
} else {
m[k.Val] = p.Value
}
}
for i, v := range p.Aux {
k := s.keys[i+1]
switch v.(type) {
case float64, int64, uint64, string, bool:
m[k.Val] = v
default:
// Insert the fill value if one was specified.
if s.defaultValue != SkipDefault {
m[k.Val] = castToType(s.defaultValue, k.Type)
}
}
}
}
func (s *floatIteratorScanner) useDefaults(m map[string]interface{}) {
if s.defaultValue == SkipDefault {
return
}
for _, k := range s.keys {
if k.Val == "" {
continue
}
m[k.Val] = castToType(s.defaultValue, k.Type)
}
}
func (s *floatIteratorScanner) Stats() IteratorStats { return s.input.Stats() }
func (s *floatIteratorScanner) Err() error { return s.err }
func (s *floatIteratorScanner) Close() error { return s.input.Close() }
// floatParallelIterator represents an iterator that pulls data in a separate goroutine.
type floatParallelIterator struct {
input FloatIterator
ch chan floatPointError
once sync.Once
closing chan struct{}
wg sync.WaitGroup
}
// newFloatParallelIterator returns a new instance of floatParallelIterator.
func newFloatParallelIterator(input FloatIterator) *floatParallelIterator {
itr := &floatParallelIterator{
input: input,
ch: make(chan floatPointError, 256),
closing: make(chan struct{}),
}
itr.wg.Add(1)
go itr.monitor()
return itr
}
// Stats returns stats from the underlying iterator.
func (itr *floatParallelIterator) Stats() IteratorStats { return itr.input.Stats() }
// Close closes the underlying iterators.
func (itr *floatParallelIterator) Close() error {
itr.once.Do(func() { close(itr.closing) })
itr.wg.Wait()
return itr.input.Close()
}
// Next returns the next point from the iterator.
func (itr *floatParallelIterator) Next() (*FloatPoint, error) {
v, ok := <-itr.ch
if !ok {
return nil, io.EOF
}
return v.point, v.err
}
// monitor runs in a separate goroutine and actively pulls the next point.
func (itr *floatParallelIterator) monitor() {
defer close(itr.ch)
defer itr.wg.Done()
for {
// Read next point.
p, err := itr.input.Next()
if p != nil {
p = p.Clone()
}
select {
case <-itr.closing:
return
case itr.ch <- floatPointError{point: p, err: err}:
}
}
}
type floatPointError struct {
point *FloatPoint
err error
}
// floatLimitIterator represents an iterator that limits points per group.
type floatLimitIterator struct {
input FloatIterator
opt IteratorOptions
n int
prev struct {
name string
tags Tags
}
}
// newFloatLimitIterator returns a new instance of floatLimitIterator.
func newFloatLimitIterator(input FloatIterator, opt IteratorOptions) *floatLimitIterator {
return &floatLimitIterator{
input: input,
opt: opt,
}
}
// Stats returns stats from the underlying iterator.
func (itr *floatLimitIterator) Stats() IteratorStats { return itr.input.Stats() }
// Close closes the underlying iterators.
func (itr *floatLimitIterator) Close() error { return itr.input.Close() }
// Next returns the next point from the iterator.
func (itr *floatLimitIterator) Next() (*FloatPoint, error) {
for {
p, err := itr.input.Next()
if p == nil || err != nil {
return nil, err
}
// Reset window and counter if a new window is encountered.
if p.Name != itr.prev.name || !p.Tags.Equals(&itr.prev.tags) {
itr.prev.name = p.Name
itr.prev.tags = p.Tags
itr.n = 0
}
// Increment counter.
itr.n++
// Read next point if not beyond the offset.
if itr.n <= itr.opt.Offset {
continue
}
// Read next point if we're beyond the limit.
if itr.opt.Limit > 0 && (itr.n-itr.opt.Offset) > itr.opt.Limit {
continue
}
return p, nil
}
}
type floatFillIterator struct {
input *bufFloatIterator
prev FloatPoint
startTime int64
endTime int64
auxFields []interface{}
init bool
opt IteratorOptions
window struct {
name string
tags Tags
time int64
offset int64
}
}
func newFloatFillIterator(input FloatIterator, expr influxql.Expr, opt IteratorOptions) *floatFillIterator {
if opt.Fill == influxql.NullFill {
if expr, ok := expr.(*influxql.Call); ok && expr.Name == "count" {
opt.Fill = influxql.NumberFill
opt.FillValue = float64(0)
}
}
var startTime, endTime int64
if opt.Ascending {
startTime, _ = opt.Window(opt.StartTime)
endTime, _ = opt.Window(opt.EndTime)
} else {
startTime, _ = opt.Window(opt.EndTime)
endTime, _ = opt.Window(opt.StartTime)
}
var auxFields []interface{}
if len(opt.Aux) > 0 {
auxFields = make([]interface{}, len(opt.Aux))
}
return &floatFillIterator{
input: newBufFloatIterator(input),
prev: FloatPoint{Nil: true},
startTime: startTime,
endTime: endTime,
auxFields: auxFields,
opt: opt,
}
}
func (itr *floatFillIterator) Stats() IteratorStats { return itr.input.Stats() }
func (itr *floatFillIterator) Close() error { return itr.input.Close() }
func (itr *floatFillIterator) Next() (*FloatPoint, error) {
if !itr.init {
p, err := itr.input.peek()
if p == nil || err != nil {
return nil, err
}
itr.window.name, itr.window.tags = p.Name, p.Tags
itr.window.time = itr.startTime
if itr.startTime == influxql.MinTime {
itr.window.time, _ = itr.opt.Window(p.Time)
}
if itr.opt.Location != nil {
_, itr.window.offset = itr.opt.Zone(itr.window.time)
}
itr.init = true
}
p, err := itr.input.Next()
if err != nil {
return nil, err
}
// Check if the next point is outside of our window or is nil.
if p == nil || p.Name != itr.window.name || p.Tags.ID() != itr.window.tags.ID() {
// If we are inside of an interval, unread the point and continue below to
// constructing a new point.
if itr.opt.Ascending && itr.window.time <= itr.endTime {
itr.input.unread(p)
p = nil
goto CONSTRUCT
} else if !itr.opt.Ascending && itr.window.time >= itr.endTime && itr.endTime != influxql.MinTime {
itr.input.unread(p)
p = nil
goto CONSTRUCT
}
// We are *not* in a current interval. If there is no next point,
// we are at the end of all intervals.
if p == nil {
return nil, nil
}
// Set the new interval.
itr.window.name, itr.window.tags = p.Name, p.Tags
itr.window.time = itr.startTime
if itr.window.time == influxql.MinTime {
itr.window.time, _ = itr.opt.Window(p.Time)
}
if itr.opt.Location != nil {
_, itr.window.offset = itr.opt.Zone(itr.window.time)
}
itr.prev = FloatPoint{Nil: true}
}
// Check if the point is our next expected point.
CONSTRUCT:
if p == nil || (itr.opt.Ascending && p.Time > itr.window.time) || (!itr.opt.Ascending && p.Time < itr.window.time) {
if p != nil {
itr.input.unread(p)
}
p = &FloatPoint{
Name: itr.window.name,
Tags: itr.window.tags,
Time: itr.window.time,
Aux: itr.auxFields,
}
switch itr.opt.Fill {
case influxql.LinearFill:
if !itr.prev.Nil {
next, err := itr.input.peek()
if err != nil {
return nil, err
} else if next != nil && next.Name == itr.window.name && next.Tags.ID() == itr.window.tags.ID() {
interval := int64(itr.opt.Interval.Duration)
start := itr.window.time / interval
p.Value = linearFloat(start, itr.prev.Time/interval, next.Time/interval, itr.prev.Value, next.Value)
} else {
p.Nil = true
}
} else {
p.Nil = true
}
case influxql.NullFill:
p.Nil = true
case influxql.NumberFill:
p.Value, _ = castToFloat(itr.opt.FillValue)
case influxql.PreviousFill:
if !itr.prev.Nil {
p.Value = itr.prev.Value
p.Nil = itr.prev.Nil
} else {
p.Nil = true
}
}
} else {
itr.prev = *p
}
// Advance the expected time. Do not advance to a new window here
// as there may be lingering points with the same timestamp in the previous
// window.
if itr.opt.Ascending {
itr.window.time += int64(itr.opt.Interval.Duration)
} else {
itr.window.time -= int64(itr.opt.Interval.Duration)
}
// Check to see if we have passed over an offset change and adjust the time
// to account for this new offset.
if itr.opt.Location != nil {
if _, offset := itr.opt.Zone(itr.window.time - 1); offset != itr.window.offset {
diff := itr.window.offset - offset
if abs(diff) < int64(itr.opt.Interval.Duration) {
itr.window.time += diff
}
itr.window.offset = offset
}
}
return p, nil
}
// floatIntervalIterator represents a float implementation of IntervalIterator.
type floatIntervalIterator struct {
input FloatIterator
opt IteratorOptions
}
func newFloatIntervalIterator(input FloatIterator, opt IteratorOptions) *floatIntervalIterator {
return &floatIntervalIterator{input: input, opt: opt}
}
func (itr *floatIntervalIterator) Stats() IteratorStats { return itr.input.Stats() }
func (itr *floatIntervalIterator) Close() error { return itr.input.Close() }
func (itr *floatIntervalIterator) Next() (*FloatPoint, error) {
p, err := itr.input.Next()
if p == nil || err != nil {
return nil, err
}
p.Time, _ = itr.opt.Window(p.Time)
// If we see the minimum allowable time, set the time to zero so we don't
// break the default returned time for aggregate queries without times.
if p.Time == influxql.MinTime {
p.Time = 0
}
return p, nil
}
// floatInterruptIterator represents a float implementation of InterruptIterator.
type floatInterruptIterator struct {
input FloatIterator
closing <-chan struct{}
count int
}
func newFloatInterruptIterator(input FloatIterator, closing <-chan struct{}) *floatInterruptIterator {
return &floatInterruptIterator{input: input, closing: closing}
}
func (itr *floatInterruptIterator) Stats() IteratorStats { return itr.input.Stats() }
func (itr *floatInterruptIterator) Close() error { return itr.input.Close() }
func (itr *floatInterruptIterator) Next() (*FloatPoint, error) {
// Only check if the channel is closed every N points. This
// intentionally checks on both 0 and N so that if the iterator
// has been interrupted before the first point is emitted it will
// not emit any points.
if itr.count&0xFF == 0xFF {
select {
case <-itr.closing:
return nil, itr.Close()
default:
// Reset iterator count to zero and fall through to emit the next point.
itr.count = 0
}
}
// Increment the counter for every point read.
itr.count++
return itr.input.Next()
}
// floatCloseInterruptIterator represents a float implementation of CloseInterruptIterator.
type floatCloseInterruptIterator struct {
input FloatIterator
closing <-chan struct{}
done chan struct{}
once sync.Once
}
func newFloatCloseInterruptIterator(input FloatIterator, closing <-chan struct{}) *floatCloseInterruptIterator {
itr := &floatCloseInterruptIterator{
input: input,
closing: closing,
done: make(chan struct{}),
}
go itr.monitor()
return itr
}
func (itr *floatCloseInterruptIterator) monitor() {
select {
case <-itr.closing:
itr.Close()
case <-itr.done:
}
}
func (itr *floatCloseInterruptIterator) Stats() IteratorStats {
return itr.input.Stats()
}
func (itr *floatCloseInterruptIterator) Close() error {
itr.once.Do(func() {
close(itr.done)
itr.input.Close()
})
return nil
}
func (itr *floatCloseInterruptIterator) Next() (*FloatPoint, error) {
p, err := itr.input.Next()
if err != nil {
// Check if the iterator was closed.
select {
case <-itr.done:
return nil, nil
default:
return nil, err
}
}
return p, nil
}
// floatReduceFloatIterator executes a reducer for every interval and buffers the result.
type floatReduceFloatIterator struct {
input *bufFloatIterator
create func() (FloatPointAggregator, FloatPointEmitter)
dims []string
opt IteratorOptions
points []FloatPoint
keepTags bool
}
func newFloatReduceFloatIterator(input FloatIterator, opt IteratorOptions, createFn func() (FloatPointAggregator, FloatPointEmitter)) *floatReduceFloatIterator {
return &floatReduceFloatIterator{
input: newBufFloatIterator(input),
create: createFn,
dims: opt.GetDimensions(),
opt: opt,
}
}
// Stats returns stats from the input iterator.
func (itr *floatReduceFloatIterator) Stats() IteratorStats { return itr.input.Stats() }
// Close closes the iterator and all child iterators.
func (itr *floatReduceFloatIterator) Close() error { return itr.input.Close() }
// Next returns the minimum value for the next available interval.
func (itr *floatReduceFloatIterator) Next() (*FloatPoint, error) {
// Calculate next window if we have no more points.
if len(itr.points) == 0 {
var err error
itr.points, err = itr.reduce()
if len(itr.points) == 0 {
return nil, err
}
}
// Pop next point off the stack.
p := &itr.points[len(itr.points)-1]
itr.points = itr.points[:len(itr.points)-1]
return p, nil
}
// floatReduceFloatPoint stores the reduced data for a name/tag combination.
type floatReduceFloatPoint struct {
Name string
Tags Tags
Aggregator FloatPointAggregator
Emitter FloatPointEmitter
}
// reduce executes fn once for every point in the next window.
// The previous value for the dimension is passed to fn.
func (itr *floatReduceFloatIterator) reduce() ([]FloatPoint, error) {
// Calculate next window.
var (
startTime, endTime int64
window struct {
name string
tags string
}
)
for {
p, err := itr.input.Next()
if err != nil || p == nil {
return nil, err
} else if p.Nil {
continue
}
// Unread the point so it can be processed.
itr.input.unread(p)
startTime, endTime = itr.opt.Window(p.Time)
window.name, window.tags = p.Name, p.Tags.Subset(itr.opt.Dimensions).ID()
break
}
// Create points by tags.
m := make(map[string]*floatReduceFloatPoint)
for {
// Read next point.
curr, err := itr.input.NextInWindow(startTime, endTime)
if err != nil {
return nil, err
} else if curr == nil {
break
} else if curr.Nil {
continue
} else if curr.Name != window.name {
itr.input.unread(curr)
break
}
// Ensure this point is within the same final window.
if curr.Name != window.name {
itr.input.unread(curr)
break
} else if tags := curr.Tags.Subset(itr.opt.Dimensions); tags.ID() != window.tags {
itr.input.unread(curr)
break
}
// Retrieve the tags on this point for this level of the query.
// This may be different than the bucket dimensions.
tags := curr.Tags.Subset(itr.dims)
id := tags.ID()
// Retrieve the aggregator for this name/tag combination or create one.
rp := m[id]
if rp == nil {
aggregator, emitter := itr.create()
rp = &floatReduceFloatPoint{
Name: curr.Name,
Tags: tags,
Aggregator: aggregator,
Emitter: emitter,
}
m[id] = rp
}
rp.Aggregator.AggregateFloat(curr)
}
keys := make([]string, 0, len(m))
for k := range m {
keys = append(keys, k)
}
// Reverse sort points by name & tag.
// This ensures a consistent order of output.
if len(keys) > 0 {
var sorted sort.Interface = sort.StringSlice(keys)
if itr.opt.Ascending {
sorted = sort.Reverse(sorted)
}
sort.Sort(sorted)
}
// Assume the points are already sorted until proven otherwise.
sortedByTime := true
// Emit the points for each name & tag combination.
a := make([]FloatPoint, 0, len(m))
for _, k := range keys {
rp := m[k]
points := rp.Emitter.Emit()
for i := len(points) - 1; i >= 0; i-- {
points[i].Name = rp.Name
if !itr.keepTags {
points[i].Tags = rp.Tags
}
// Set the points time to the interval time if the reducer didn't provide one.
if points[i].Time == ZeroTime {
points[i].Time = startTime
} else {
sortedByTime = false
}
a = append(a, points[i])
}
}
// Points may be out of order. Perform a stable sort by time if requested.
if !sortedByTime && itr.opt.Ordered {
var sorted sort.Interface = floatPointsByTime(a)
if itr.opt.Ascending {
sorted = sort.Reverse(sorted)
}
sort.Stable(sorted)
}
return a, nil
}
// floatStreamFloatIterator streams inputs into the iterator and emits points gradually.
type floatStreamFloatIterator struct {
input *bufFloatIterator
create func() (FloatPointAggregator, FloatPointEmitter)
dims []string
opt IteratorOptions
m map[string]*floatReduceFloatPoint
points []FloatPoint
}
// newFloatStreamFloatIterator returns a new instance of floatStreamFloatIterator.
func newFloatStreamFloatIterator(input FloatIterator, createFn func() (FloatPointAggregator, FloatPointEmitter), opt IteratorOptions) *floatStreamFloatIterator {
return &floatStreamFloatIterator{
input: newBufFloatIterator(input),
create: createFn,
dims: opt.GetDimensions(),
opt: opt,
m: make(map[string]*floatReduceFloatPoint),
}
}
// Stats returns stats from the input iterator.
func (itr *floatStreamFloatIterator) Stats() IteratorStats { return itr.input.Stats() }
// Close closes the iterator and all child iterators.
func (itr *floatStreamFloatIterator) Close() error { return itr.input.Close() }
// Next returns the next value for the stream iterator.
func (itr *floatStreamFloatIterator) Next() (*FloatPoint, error) {
// Calculate next window if we have no more points.
if len(itr.points) == 0 {
var err error
itr.points, err = itr.reduce()
if len(itr.points) == 0 {
return nil, err
}
}
// Pop next point off the stack.
p := &itr.points[len(itr.points)-1]
itr.points = itr.points[:len(itr.points)-1]
return p, nil
}
// reduce creates and manages aggregators for every point from the input.
// After aggregating a point, it always tries to emit a value using the emitter.
func (itr *floatStreamFloatIterator) reduce() ([]FloatPoint, error) {
// We have already read all of the input points.
if itr.m == nil {
return nil, nil
}
for {
// Read next point.
curr, err := itr.input.Next()
if err != nil {
return nil, err
} else if curr == nil {
// Close all of the aggregators to flush any remaining points to emit.
var points []FloatPoint
for _, rp := range itr.m {
if aggregator, ok := rp.Aggregator.(io.Closer); ok {
if err := aggregator.Close(); err != nil {
return nil, err
}
pts := rp.Emitter.Emit()
if len(pts) == 0 {
continue
}
for i := range pts {
pts[i].Name = rp.Name
pts[i].Tags = rp.Tags
}
points = append(points, pts...)
}
}
// Eliminate the aggregators and emitters.
itr.m = nil
return points, nil
} else if curr.Nil {
continue
}
tags := curr.Tags.Subset(itr.dims)
id := curr.Name
if len(tags.m) > 0 {
id += "\x00" + tags.ID()
}
// Retrieve the aggregator for this name/tag combination or create one.
rp := itr.m[id]
if rp == nil {
aggregator, emitter := itr.create()
rp = &floatReduceFloatPoint{
Name: curr.Name,
Tags: tags,
Aggregator: aggregator,
Emitter: emitter,
}
itr.m[id] = rp
}
rp.Aggregator.AggregateFloat(curr)
// Attempt to emit points from the aggregator.
points := rp.Emitter.Emit()
if len(points) == 0 {
continue
}
for i := range points {
points[i].Name = rp.Name
points[i].Tags = rp.Tags
}
return points, nil
}
}
// floatReduceIntegerIterator executes a reducer for every interval and buffers the result.
type floatReduceIntegerIterator struct {
input *bufFloatIterator
create func() (FloatPointAggregator, IntegerPointEmitter)
dims []string
opt IteratorOptions
points []IntegerPoint
keepTags bool
}
func newFloatReduceIntegerIterator(input FloatIterator, opt IteratorOptions, createFn func() (FloatPointAggregator, IntegerPointEmitter)) *floatReduceIntegerIterator {
return &floatReduceIntegerIterator{
input: newBufFloatIterator(input),
create: createFn,
dims: opt.GetDimensions(),
opt: opt,
}
}
// Stats returns stats from the input iterator.
func (itr *floatReduceIntegerIterator) Stats() IteratorStats { return itr.input.Stats() }
// Close closes the iterator and all child iterators.
func (itr *floatReduceIntegerIterator) Close() error { return itr.input.Close() }
// Next returns the minimum value for the next available interval.
func (itr *floatReduceIntegerIterator) Next() (*IntegerPoint, error) {
// Calculate next window if we have no more points.
if len(itr.points) == 0 {
var err error
itr.points, err = itr.reduce()
if len(itr.points) == 0 {
return nil, err
}
}
// Pop next point off the stack.
p := &itr.points[len(itr.points)-1]
itr.points = itr.points[:len(itr.points)-1]
return p, nil
}
// floatReduceIntegerPoint stores the reduced data for a name/tag combination.
type floatReduceIntegerPoint struct {
Name string
Tags Tags
Aggregator FloatPointAggregator
Emitter IntegerPointEmitter
}
// reduce executes fn once for every point in the next window.
// The previous value for the dimension is passed to fn.
func (itr *floatReduceIntegerIterator) reduce() ([]IntegerPoint, error) {
// Calculate next window.
var (
startTime, endTime int64
window struct {
name string
tags string
}
)
for {
p, err := itr.input.Next()
if err != nil || p == nil {
return nil, err
} else if p.Nil {
continue
}
// Unread the point so it can be processed.
itr.input.unread(p)
startTime, endTime = itr.opt.Window(p.Time)
window.name, window.tags = p.Name, p.Tags.Subset(itr.opt.Dimensions).ID()
break
}
// Create points by tags.
m := make(map[string]*floatReduceIntegerPoint)
for {
// Read next point.
curr, err := itr.input.NextInWindow(startTime, endTime)
if err != nil {
return nil, err
} else if curr == nil {
break
} else if curr.Nil {
continue
} else if curr.Name != window.name {
itr.input.unread(curr)
break
}
// Ensure this point is within the same final window.
if curr.Name != window.name {
itr.input.unread(curr)
break
} else if tags := curr.Tags.Subset(itr.opt.Dimensions); tags.ID() != window.tags {
itr.input.unread(curr)
break
}
// Retrieve the tags on this point for this level of the query.
// This may be different than the bucket dimensions.
tags := curr.Tags.Subset(itr.dims)
id := tags.ID()
// Retrieve the aggregator for this name/tag combination or create one.
rp := m[id]
if rp == nil {
aggregator, emitter := itr.create()
rp = &floatReduceIntegerPoint{
Name: curr.Name,
Tags: tags,
Aggregator: aggregator,
Emitter: emitter,
}
m[id] = rp
}
rp.Aggregator.AggregateFloat(curr)
}
keys := make([]string, 0, len(m))
for k := range m {
keys = append(keys, k)
}
// Reverse sort points by name & tag.
// This ensures a consistent order of output.
if len(keys) > 0 {
var sorted sort.Interface = sort.StringSlice(keys)
if itr.opt.Ascending {
sorted = sort.Reverse(sorted)
}
sort.Sort(sorted)
}
// Assume the points are already sorted until proven otherwise.
sortedByTime := true
// Emit the points for each name & tag combination.
a := make([]IntegerPoint, 0, len(m))
for _, k := range keys {
rp := m[k]
points := rp.Emitter.Emit()
for i := len(points) - 1; i >= 0; i-- {
points[i].Name = rp.Name
if !itr.keepTags {
points[i].Tags = rp.Tags
}
// Set the points time to the interval time if the reducer didn't provide one.
if points[i].Time == ZeroTime {
points[i].Time = startTime
} else {
sortedByTime = false
}
a = append(a, points[i])
}
}
// Points may be out of order. Perform a stable sort by time if requested.
if !sortedByTime && itr.opt.Ordered {
var sorted sort.Interface = integerPointsByTime(a)
if itr.opt.Ascending {
sorted = sort.Reverse(sorted)
}
sort.Stable(sorted)
}
return a, nil
}
// floatStreamIntegerIterator streams inputs into the iterator and emits points gradually.
type floatStreamIntegerIterator struct {
input *bufFloatIterator
create func() (FloatPointAggregator, IntegerPointEmitter)
dims []string
opt IteratorOptions
m map[string]*floatReduceIntegerPoint
points []IntegerPoint
}
// newFloatStreamIntegerIterator returns a new instance of floatStreamIntegerIterator.
func newFloatStreamIntegerIterator(input FloatIterator, createFn func() (FloatPointAggregator, IntegerPointEmitter), opt IteratorOptions) *floatStreamIntegerIterator {
return &floatStreamIntegerIterator{
input: newBufFloatIterator(input),
create: createFn,
dims: opt.GetDimensions(),
opt: opt,
m: make(map[string]*floatReduceIntegerPoint),
}
}
// Stats returns stats from the input iterator.
func (itr *floatStreamIntegerIterator) Stats() IteratorStats { return itr.input.Stats() }
// Close closes the iterator and all child iterators.
func (itr *floatStreamIntegerIterator) Close() error { return itr.input.Close() }
// Next returns the next value for the stream iterator.
func (itr *floatStreamIntegerIterator) Next() (*IntegerPoint, error) {
// Calculate next window if we have no more points.
if len(itr.points) == 0 {
var err error
itr.points, err = itr.reduce()
if len(itr.points) == 0 {
return nil, err
}
}
// Pop next point off the stack.
p := &itr.points[len(itr.points)-1]
itr.points = itr.points[:len(itr.points)-1]
return p, nil
}
// reduce creates and manages aggregators for every point from the input.
// After aggregating a point, it always tries to emit a value using the emitter.
func (itr *floatStreamIntegerIterator) reduce() ([]IntegerPoint, error) {
// We have already read all of the input points.
if itr.m == nil {
return nil, nil
}
for {
// Read next point.
curr, err := itr.input.Next()
if err != nil {
return nil, err
} else if curr == nil {
// Close all of the aggregators to flush any remaining points to emit.
var points []IntegerPoint
for _, rp := range itr.m {
if aggregator, ok := rp.Aggregator.(io.Closer); ok {
if err := aggregator.Close(); err != nil {
return nil, err
}
pts := rp.Emitter.Emit()
if len(pts) == 0 {
continue
}
for i := range pts {
pts[i].Name = rp.Name
pts[i].Tags = rp.Tags
}
points = append(points, pts...)
}
}
// Eliminate the aggregators and emitters.
itr.m = nil
return points, nil
} else if curr.Nil {
continue
}
tags := curr.Tags.Subset(itr.dims)
id := curr.Name
if len(tags.m) > 0 {
id += "\x00" + tags.ID()
}
// Retrieve the aggregator for this name/tag combination or create one.
rp := itr.m[id]
if rp == nil {
aggregator, emitter := itr.create()
rp = &floatReduceIntegerPoint{
Name: curr.Name,
Tags: tags,
Aggregator: aggregator,
Emitter: emitter,
}
itr.m[id] = rp
}
rp.Aggregator.AggregateFloat(curr)
// Attempt to emit points from the aggregator.
points := rp.Emitter.Emit()
if len(points) == 0 {
continue
}
for i := range points {
points[i].Name = rp.Name
points[i].Tags = rp.Tags
}
return points, nil
}
}
// floatReduceUnsignedIterator executes a reducer for every interval and buffers the result.
type floatReduceUnsignedIterator struct {
input *bufFloatIterator
create func() (FloatPointAggregator, UnsignedPointEmitter)
dims []string
opt IteratorOptions
points []UnsignedPoint
keepTags bool
}
func newFloatReduceUnsignedIterator(input FloatIterator, opt IteratorOptions, createFn func() (FloatPointAggregator, UnsignedPointEmitter)) *floatReduceUnsignedIterator {
return &floatReduceUnsignedIterator{
input: newBufFloatIterator(input),
create: createFn,
dims: opt.GetDimensions(),
opt: opt,
}
}
// Stats returns stats from the input iterator.
func (itr *floatReduceUnsignedIterator) Stats() IteratorStats { return itr.input.Stats() }
// Close closes the iterator and all child iterators.
func (itr *floatReduceUnsignedIterator) Close() error { return itr.input.Close() }
// Next returns the minimum value for the next available interval.
func (itr *floatReduceUnsignedIterator) Next() (*UnsignedPoint, error) {
// Calculate next window if we have no more points.
if len(itr.points) == 0 {
var err error
itr.points, err = itr.reduce()
if len(itr.points) == 0 {
return nil, err
}
}
// Pop next point off the stack.
p := &itr.points[len(itr.points)-1]
itr.points = itr.points[:len(itr.points)-1]
return p, nil
}
// floatReduceUnsignedPoint stores the reduced data for a name/tag combination.
type floatReduceUnsignedPoint struct {
Name string
Tags Tags
Aggregator FloatPointAggregator
Emitter UnsignedPointEmitter
}
// reduce executes fn once for every point in the next window.
// The previous value for the dimension is passed to fn.
func (itr *floatReduceUnsignedIterator) reduce() ([]UnsignedPoint, error) {
// Calculate next window.
var (
startTime, endTime int64
window struct {
name string
tags string
}
)
for {
p, err := itr.input.Next()
if err != nil || p == nil {
return nil, err
} else if p.Nil {
continue
}
// Unread the point so it can be processed.
itr.input.unread(p)
startTime, endTime = itr.opt.Window(p.Time)
window.name, window.tags = p.Name, p.Tags.Subset(itr.opt.Dimensions).ID()
break
}
// Create points by tags.
m := make(map[string]*floatReduceUnsignedPoint)
for {
// Read next point.
curr, err := itr.input.NextInWindow(startTime, endTime)
if err != nil {
return nil, err
} else if curr == nil {
break
} else if curr.Nil {
continue
} else if curr.Name != window.name {
itr.input.unread(curr)
break
}
// Ensure this point is within the same final window.
if curr.Name != window.name {
itr.input.unread(curr)
break
} else if tags := curr.Tags.Subset(itr.opt.Dimensions); tags.ID() != window.tags {
itr.input.unread(curr)
break
}
// Retrieve the tags on this point for this level of the query.
// This may be different than the bucket dimensions.
tags := curr.Tags.Subset(itr.dims)
id := tags.ID()
// Retrieve the aggregator for this name/tag combination or create one.
rp := m[id]
if rp == nil {
aggregator, emitter := itr.create()
rp = &floatReduceUnsignedPoint{
Name: curr.Name,
Tags: tags,
Aggregator: aggregator,
Emitter: emitter,
}
m[id] = rp
}
rp.Aggregator.AggregateFloat(curr)
}
keys := make([]string, 0, len(m))
for k := range m {
keys = append(keys, k)
}
// Reverse sort points by name & tag.
// This ensures a consistent order of output.
if len(keys) > 0 {
var sorted sort.Interface = sort.StringSlice(keys)
if itr.opt.Ascending {
sorted = sort.Reverse(sorted)
}
sort.Sort(sorted)
}
// Assume the points are already sorted until proven otherwise.
sortedByTime := true
// Emit the points for each name & tag combination.
a := make([]UnsignedPoint, 0, len(m))
for _, k := range keys {
rp := m[k]
points := rp.Emitter.Emit()
for i := len(points) - 1; i >= 0; i-- {
points[i].Name = rp.Name
if !itr.keepTags {
points[i].Tags = rp.Tags
}
// Set the points time to the interval time if the reducer didn't provide one.
if points[i].Time == ZeroTime {
points[i].Time = startTime
} else {
sortedByTime = false
}
a = append(a, points[i])
}
}
// Points may be out of order. Perform a stable sort by time if requested.
if !sortedByTime && itr.opt.Ordered {
var sorted sort.Interface = unsignedPointsByTime(a)
if itr.opt.Ascending {
sorted = sort.Reverse(sorted)
}
sort.Stable(sorted)
}
return a, nil
}
// floatStreamUnsignedIterator streams inputs into the iterator and emits points gradually.
type floatStreamUnsignedIterator struct {
input *bufFloatIterator
create func() (FloatPointAggregator, UnsignedPointEmitter)
dims []string
opt IteratorOptions
m map[string]*floatReduceUnsignedPoint
points []UnsignedPoint
}
// newFloatStreamUnsignedIterator returns a new instance of floatStreamUnsignedIterator.
func newFloatStreamUnsignedIterator(input FloatIterator, createFn func() (FloatPointAggregator, UnsignedPointEmitter), opt IteratorOptions) *floatStreamUnsignedIterator {
return &floatStreamUnsignedIterator{
input: newBufFloatIterator(input),
create: createFn,
dims: opt.GetDimensions(),
opt: opt,
m: make(map[string]*floatReduceUnsignedPoint),
}
}
// Stats returns stats from the input iterator.
func (itr *floatStreamUnsignedIterator) Stats() IteratorStats { return itr.input.Stats() }
// Close closes the iterator and all child iterators.
func (itr *floatStreamUnsignedIterator) Close() error { return itr.input.Close() }
// Next returns the next value for the stream iterator.
func (itr *floatStreamUnsignedIterator) Next() (*UnsignedPoint, error) {
// Calculate next window if we have no more points.
if len(itr.points) == 0 {
var err error
itr.points, err = itr.reduce()
if len(itr.points) == 0 {
return nil, err
}
}
// Pop next point off the stack.
p := &itr.points[len(itr.points)-1]
itr.points = itr.points[:len(itr.points)-1]
return p, nil
}
// reduce creates and manages aggregators for every point from the input.
// After aggregating a point, it always tries to emit a value using the emitter.
func (itr *floatStreamUnsignedIterator) reduce() ([]UnsignedPoint, error) {
// We have already read all of the input points.
if itr.m == nil {
return nil, nil
}
for {
// Read next point.
curr, err := itr.input.Next()
if err != nil {
return nil, err
} else if curr == nil {
// Close all of the aggregators to flush any remaining points to emit.
var points []UnsignedPoint
for _, rp := range itr.m {
if aggregator, ok := rp.Aggregator.(io.Closer); ok {
if err := aggregator.Close(); err != nil {
return nil, err
}
pts := rp.Emitter.Emit()
if len(pts) == 0 {
continue
}
for i := range pts {
pts[i].Name = rp.Name
pts[i].Tags = rp.Tags
}
points = append(points, pts...)
}
}
// Eliminate the aggregators and emitters.
itr.m = nil
return points, nil
} else if curr.Nil {
continue
}
tags := curr.Tags.Subset(itr.dims)
id := curr.Name
if len(tags.m) > 0 {
id += "\x00" + tags.ID()
}
// Retrieve the aggregator for this name/tag combination or create one.
rp := itr.m[id]
if rp == nil {
aggregator, emitter := itr.create()
rp = &floatReduceUnsignedPoint{
Name: curr.Name,
Tags: tags,
Aggregator: aggregator,
Emitter: emitter,
}
itr.m[id] = rp
}
rp.Aggregator.AggregateFloat(curr)
// Attempt to emit points from the aggregator.
points := rp.Emitter.Emit()
if len(points) == 0 {
continue
}
for i := range points {
points[i].Name = rp.Name
points[i].Tags = rp.Tags
}
return points, nil
}
}
// floatReduceStringIterator executes a reducer for every interval and buffers the result.
type floatReduceStringIterator struct {
input *bufFloatIterator
create func() (FloatPointAggregator, StringPointEmitter)
dims []string
opt IteratorOptions
points []StringPoint
keepTags bool
}
func newFloatReduceStringIterator(input FloatIterator, opt IteratorOptions, createFn func() (FloatPointAggregator, StringPointEmitter)) *floatReduceStringIterator {
return &floatReduceStringIterator{
input: newBufFloatIterator(input),
create: createFn,
dims: opt.GetDimensions(),
opt: opt,
}
}
// Stats returns stats from the input iterator.
func (itr *floatReduceStringIterator) Stats() IteratorStats { return itr.input.Stats() }
// Close closes the iterator and all child iterators.
func (itr *floatReduceStringIterator) Close() error { return itr.input.Close() }
// Next returns the minimum value for the next available interval.
func (itr *floatReduceStringIterator) Next() (*StringPoint, error) {
// Calculate next window if we have no more points.
if len(itr.points) == 0 {
var err error
itr.points, err = itr.reduce()
if len(itr.points) == 0 {
return nil, err
}
}
// Pop next point off the stack.
p := &itr.points[len(itr.points)-1]
itr.points = itr.points[:len(itr.points)-1]
return p, nil
}
// floatReduceStringPoint stores the reduced data for a name/tag combination.
type floatReduceStringPoint struct {
Name string
Tags Tags
Aggregator FloatPointAggregator
Emitter StringPointEmitter
}
// reduce executes fn once for every point in the next window.
// The previous value for the dimension is passed to fn.
func (itr *floatReduceStringIterator) reduce() ([]StringPoint, error) {
// Calculate next window.
var (
startTime, endTime int64
window struct {
name string
tags string
}
)
for {
p, err := itr.input.Next()
if err != nil || p == nil {
return nil, err
} else if p.Nil {
continue
}
// Unread the point so it can be processed.
itr.input.unread(p)
startTime, endTime = itr.opt.Window(p.Time)
window.name, window.tags = p.Name, p.Tags.Subset(itr.opt.Dimensions).ID()
break
}
// Create points by tags.
m := make(map[string]*floatReduceStringPoint)
for {
// Read next point.
curr, err := itr.input.NextInWindow(startTime, endTime)
if err != nil {
return nil, err
} else if curr == nil {
break
} else if curr.Nil {
continue
} else if curr.Name != window.name {
itr.input.unread(curr)
break
}
// Ensure this point is within the same final window.
if curr.Name != window.name {
itr.input.unread(curr)
break
} else if tags := curr.Tags.Subset(itr.opt.Dimensions); tags.ID() != window.tags {
itr.input.unread(curr)
break
}
// Retrieve the tags on this point for this level of the query.
// This may be different than the bucket dimensions.
tags := curr.Tags.Subset(itr.dims)
id := tags.ID()
// Retrieve the aggregator for this name/tag combination or create one.
rp := m[id]
if rp == nil {
aggregator, emitter := itr.create()
rp = &floatReduceStringPoint{
Name: curr.Name,
Tags: tags,
Aggregator: aggregator,
Emitter: emitter,
}
m[id] = rp
}
rp.Aggregator.AggregateFloat(curr)
}
keys := make([]string, 0, len(m))
for k := range m {
keys = append(keys, k)
}
// Reverse sort points by name & tag.
// This ensures a consistent order of output.
if len(keys) > 0 {
var sorted sort.Interface = sort.StringSlice(keys)
if itr.opt.Ascending {
sorted = sort.Reverse(sorted)
}
sort.Sort(sorted)
}
// Assume the points are already sorted until proven otherwise.
sortedByTime := true
// Emit the points for each name & tag combination.
a := make([]StringPoint, 0, len(m))
for _, k := range keys {
rp := m[k]
points := rp.Emitter.Emit()
for i := len(points) - 1; i >= 0; i-- {
points[i].Name = rp.Name
if !itr.keepTags {
points[i].Tags = rp.Tags
}
// Set the points time to the interval time if the reducer didn't provide one.
if points[i].Time == ZeroTime {
points[i].Time = startTime
} else {
sortedByTime = false
}
a = append(a, points[i])
}
}
// Points may be out of order. Perform a stable sort by time if requested.
if !sortedByTime && itr.opt.Ordered {
var sorted sort.Interface = stringPointsByTime(a)
if itr.opt.Ascending {
sorted = sort.Reverse(sorted)
}
sort.Stable(sorted)
}
return a, nil
}
// floatStreamStringIterator streams inputs into the iterator and emits points gradually.
type floatStreamStringIterator struct {
input *bufFloatIterator
create func() (FloatPointAggregator, StringPointEmitter)
dims []string
opt IteratorOptions
m map[string]*floatReduceStringPoint
points []StringPoint
}
// newFloatStreamStringIterator returns a new instance of floatStreamStringIterator.
func newFloatStreamStringIterator(input FloatIterator, createFn func() (FloatPointAggregator, StringPointEmitter), opt IteratorOptions) *floatStreamStringIterator {
return &floatStreamStringIterator{
input: newBufFloatIterator(input),
create: createFn,
dims: opt.GetDimensions(),
opt: opt,
m: make(map[string]*floatReduceStringPoint),
}
}
// Stats returns stats from the input iterator.
func (itr *floatStreamStringIterator) Stats() IteratorStats { return itr.input.Stats() }
// Close closes the iterator and all child iterators.
func (itr *floatStreamStringIterator) Close() error { return itr.input.Close() }
// Next returns the next value for the stream iterator.
func (itr *floatStreamStringIterator) Next() (*StringPoint, error) {
// Calculate next window if we have no more points.
if len(itr.points) == 0 {
var err error
itr.points, err = itr.reduce()
if len(itr.points) == 0 {
return nil, err
}
}
// Pop next point off the stack.
p := &itr.points[len(itr.points)-1]
itr.points = itr.points[:len(itr.points)-1]
return p, nil
}
// reduce creates and manages aggregators for every point from the input.
// After aggregating a point, it always tries to emit a value using the emitter.
func (itr *floatStreamStringIterator) reduce() ([]StringPoint, error) {
// We have already read all of the input points.
if itr.m == nil {
return nil, nil
}
for {
// Read next point.
curr, err := itr.input.Next()
if err != nil {
return nil, err
} else if curr == nil {
// Close all of the aggregators to flush any remaining points to emit.
var points []StringPoint
for _, rp := range itr.m {
if aggregator, ok := rp.Aggregator.(io.Closer); ok {
if err := aggregator.Close(); err != nil {
return nil, err
}
pts := rp.Emitter.Emit()
if len(pts) == 0 {
continue
}
for i := range pts {
pts[i].Name = rp.Name
pts[i].Tags = rp.Tags
}
points = append(points, pts...)
}
}
// Eliminate the aggregators and emitters.
itr.m = nil
return points, nil
} else if curr.Nil {
continue
}
tags := curr.Tags.Subset(itr.dims)
id := curr.Name
if len(tags.m) > 0 {
id += "\x00" + tags.ID()
}
// Retrieve the aggregator for this name/tag combination or create one.
rp := itr.m[id]
if rp == nil {
aggregator, emitter := itr.create()
rp = &floatReduceStringPoint{
Name: curr.Name,
Tags: tags,
Aggregator: aggregator,
Emitter: emitter,
}
itr.m[id] = rp
}
rp.Aggregator.AggregateFloat(curr)
// Attempt to emit points from the aggregator.
points := rp.Emitter.Emit()
if len(points) == 0 {
continue
}
for i := range points {
points[i].Name = rp.Name
points[i].Tags = rp.Tags
}
return points, nil
}
}
// floatReduceBooleanIterator executes a reducer for every interval and buffers the result.
type floatReduceBooleanIterator struct {
input *bufFloatIterator
create func() (FloatPointAggregator, BooleanPointEmitter)
dims []string
opt IteratorOptions
points []BooleanPoint
keepTags bool
}
func newFloatReduceBooleanIterator(input FloatIterator, opt IteratorOptions, createFn func() (FloatPointAggregator, BooleanPointEmitter)) *floatReduceBooleanIterator {
return &floatReduceBooleanIterator{
input: newBufFloatIterator(input),
create: createFn,
dims: opt.GetDimensions(),
opt: opt,
}
}
// Stats returns stats from the input iterator.
func (itr *floatReduceBooleanIterator) Stats() IteratorStats { return itr.input.Stats() }
// Close closes the iterator and all child iterators.
func (itr *floatReduceBooleanIterator) Close() error { return itr.input.Close() }
// Next returns the minimum value for the next available interval.
func (itr *floatReduceBooleanIterator) Next() (*BooleanPoint, error) {
// Calculate next window if we have no more points.
if len(itr.points) == 0 {
var err error
itr.points, err = itr.reduce()
if len(itr.points) == 0 {
return nil, err
}
}
// Pop next point off the stack.
p := &itr.points[len(itr.points)-1]
itr.points = itr.points[:len(itr.points)-1]
return p, nil
}
// floatReduceBooleanPoint stores the reduced data for a name/tag combination.
type floatReduceBooleanPoint struct {
Name string
Tags Tags
Aggregator FloatPointAggregator
Emitter BooleanPointEmitter
}
// reduce executes fn once for every point in the next window.
// The previous value for the dimension is passed to fn.
func (itr *floatReduceBooleanIterator) reduce() ([]BooleanPoint, error) {
// Calculate next window.
var (
startTime, endTime int64
window struct {
name string
tags string
}
)
for {
p, err := itr.input.Next()
if err != nil || p == nil {
return nil, err
} else if p.Nil {
continue
}
// Unread the point so it can be processed.
itr.input.unread(p)
startTime, endTime = itr.opt.Window(p.Time)
window.name, window.tags = p.Name, p.Tags.Subset(itr.opt.Dimensions).ID()
break
}
// Create points by tags.
m := make(map[string]*floatReduceBooleanPoint)
for {
// Read next point.
curr, err := itr.input.NextInWindow(startTime, endTime)
if err != nil {
return nil, err
} else if curr == nil {
break
} else if curr.Nil {
continue
} else if curr.Name != window.name {
itr.input.unread(curr)
break
}
// Ensure this point is within the same final window.
if curr.Name != window.name {
itr.input.unread(curr)
break
} else if tags := curr.Tags.Subset(itr.opt.Dimensions); tags.ID() != window.tags {
itr.input.unread(curr)
break
}
// Retrieve the tags on this point for this level of the query.
// This may be different than the bucket dimensions.
tags := curr.Tags.Subset(itr.dims)
id := tags.ID()
// Retrieve the aggregator for this name/tag combination or create one.
rp := m[id]
if rp == nil {
aggregator, emitter := itr.create()
rp = &floatReduceBooleanPoint{
Name: curr.Name,
Tags: tags,
Aggregator: aggregator,
Emitter: emitter,
}
m[id] = rp
}
rp.Aggregator.AggregateFloat(curr)
}
keys := make([]string, 0, len(m))
for k := range m {
keys = append(keys, k)
}
// Reverse sort points by name & tag.
// This ensures a consistent order of output.
if len(keys) > 0 {
var sorted sort.Interface = sort.StringSlice(keys)
if itr.opt.Ascending {
sorted = sort.Reverse(sorted)
}
sort.Sort(sorted)
}
// Assume the points are already sorted until proven otherwise.
sortedByTime := true
// Emit the points for each name & tag combination.
a := make([]BooleanPoint, 0, len(m))
for _, k := range keys {
rp := m[k]
points := rp.Emitter.Emit()
for i := len(points) - 1; i >= 0; i-- {
points[i].Name = rp.Name
if !itr.keepTags {
points[i].Tags = rp.Tags
}
// Set the points time to the interval time if the reducer didn't provide one.
if points[i].Time == ZeroTime {
points[i].Time = startTime
} else {
sortedByTime = false
}
a = append(a, points[i])
}
}
// Points may be out of order. Perform a stable sort by time if requested.
if !sortedByTime && itr.opt.Ordered {
var sorted sort.Interface = booleanPointsByTime(a)
if itr.opt.Ascending {
sorted = sort.Reverse(sorted)
}
sort.Stable(sorted)
}
return a, nil
}
// floatStreamBooleanIterator streams inputs into the iterator and emits points gradually.
type floatStreamBooleanIterator struct {
input *bufFloatIterator
create func() (FloatPointAggregator, BooleanPointEmitter)
dims []string
opt IteratorOptions
m map[string]*floatReduceBooleanPoint
points []BooleanPoint
}
// newFloatStreamBooleanIterator returns a new instance of floatStreamBooleanIterator.
func newFloatStreamBooleanIterator(input FloatIterator, createFn func() (FloatPointAggregator, BooleanPointEmitter), opt IteratorOptions) *floatStreamBooleanIterator {
return &floatStreamBooleanIterator{
input: newBufFloatIterator(input),
create: createFn,
dims: opt.GetDimensions(),
opt: opt,
m: make(map[string]*floatReduceBooleanPoint),
}
}
// Stats returns stats from the input iterator.
func (itr *floatStreamBooleanIterator) Stats() IteratorStats { return itr.input.Stats() }
// Close closes the iterator and all child iterators.
func (itr *floatStreamBooleanIterator) Close() error { return itr.input.Close() }
// Next returns the next value for the stream iterator.
func (itr *floatStreamBooleanIterator) Next() (*BooleanPoint, error) {
// Calculate next window if we have no more points.
if len(itr.points) == 0 {
var err error
itr.points, err = itr.reduce()
if len(itr.points) == 0 {
return nil, err
}
}
// Pop next point off the stack.
p := &itr.points[len(itr.points)-1]
itr.points = itr.points[:len(itr.points)-1]
return p, nil
}
// reduce creates and manages aggregators for every point from the input.
// After aggregating a point, it always tries to emit a value using the emitter.
func (itr *floatStreamBooleanIterator) reduce() ([]BooleanPoint, error) {
// We have already read all of the input points.
if itr.m == nil {
return nil, nil
}
for {
// Read next point.
curr, err := itr.input.Next()
if err != nil {
return nil, err
} else if curr == nil {
// Close all of the aggregators to flush any remaining points to emit.
var points []BooleanPoint
for _, rp := range itr.m {
if aggregator, ok := rp.Aggregator.(io.Closer); ok {
if err := aggregator.Close(); err != nil {
return nil, err
}
pts := rp.Emitter.Emit()
if len(pts) == 0 {
continue
}
for i := range pts {
pts[i].Name = rp.Name
pts[i].Tags = rp.Tags
}
points = append(points, pts...)
}
}
// Eliminate the aggregators and emitters.
itr.m = nil
return points, nil
} else if curr.Nil {
continue
}
tags := curr.Tags.Subset(itr.dims)
id := curr.Name
if len(tags.m) > 0 {
id += "\x00" + tags.ID()
}
// Retrieve the aggregator for this name/tag combination or create one.
rp := itr.m[id]
if rp == nil {
aggregator, emitter := itr.create()
rp = &floatReduceBooleanPoint{
Name: curr.Name,
Tags: tags,
Aggregator: aggregator,
Emitter: emitter,
}
itr.m[id] = rp
}
rp.Aggregator.AggregateFloat(curr)
// Attempt to emit points from the aggregator.
points := rp.Emitter.Emit()
if len(points) == 0 {
continue
}
for i := range points {
points[i].Name = rp.Name
points[i].Tags = rp.Tags
}
return points, nil
}
}
// floatDedupeIterator only outputs unique points.
// This differs from the DistinctIterator in that it compares all aux fields too.
// This iterator is relatively inefficient and should only be used on small
// datasets such as meta query results.
type floatDedupeIterator struct {
input FloatIterator
m map[string]struct{} // lookup of points already sent
}
type floatIteratorMapper struct {
cur Cursor
row Row
driver IteratorMap // which iterator to use for the primary value, can be nil
fields []IteratorMap // which iterator to use for an aux field
point FloatPoint
}
func newFloatIteratorMapper(cur Cursor, driver IteratorMap, fields []IteratorMap, opt IteratorOptions) *floatIteratorMapper {
return &floatIteratorMapper{
cur: cur,
driver: driver,
fields: fields,
point: FloatPoint{
Aux: make([]interface{}, len(fields)),
},
}
}
func (itr *floatIteratorMapper) Next() (*FloatPoint, error) {
if !itr.cur.Scan(&itr.row) {
if err := itr.cur.Err(); err != nil {
return nil, err
}
return nil, nil
}
itr.point.Time = itr.row.Time
itr.point.Name = itr.row.Series.Name
itr.point.Tags = itr.row.Series.Tags
if itr.driver != nil {
if v := itr.driver.Value(&itr.row); v != nil {
if v, ok := castToFloat(v); ok {
itr.point.Value = v
itr.point.Nil = false
} else {
itr.point.Value = 0
itr.point.Nil = true
}
} else {
itr.point.Value = 0
itr.point.Nil = true
}
}
for i, f := range itr.fields {
itr.point.Aux[i] = f.Value(&itr.row)
}
return &itr.point, nil
}
func (itr *floatIteratorMapper) Stats() IteratorStats {
return itr.cur.Stats()
}
func (itr *floatIteratorMapper) Close() error {
return itr.cur.Close()
}
type floatFilterIterator struct {
input FloatIterator
cond influxql.Expr
opt IteratorOptions
m map[string]interface{}
}
func newFloatFilterIterator(input FloatIterator, cond influxql.Expr, opt IteratorOptions) FloatIterator {
// Strip out time conditions from the WHERE clause.
// TODO(jsternberg): This should really be done for us when creating the IteratorOptions struct.
n := influxql.RewriteFunc(influxql.CloneExpr(cond), func(n influxql.Node) influxql.Node {
switch n := n.(type) {
case *influxql.BinaryExpr:
if n.LHS.String() == "time" {
return &influxql.BooleanLiteral{Val: true}
}
}
return n
})
cond, _ = n.(influxql.Expr)
if cond == nil {
return input
} else if n, ok := cond.(*influxql.BooleanLiteral); ok && n.Val {
return input
}
return &floatFilterIterator{
input: input,
cond: cond,
opt: opt,
m: make(map[string]interface{}),
}
}
func (itr *floatFilterIterator) Stats() IteratorStats { return itr.input.Stats() }
func (itr *floatFilterIterator) Close() error { return itr.input.Close() }
func (itr *floatFilterIterator) Next() (*FloatPoint, error) {
for {
p, err := itr.input.Next()
if err != nil || p == nil {
return nil, err
}
for i, ref := range itr.opt.Aux {
itr.m[ref.Val] = p.Aux[i]
}
for k, v := range p.Tags.KeyValues() {
itr.m[k] = v
}
if !influxql.EvalBool(itr.cond, itr.m) {
continue
}
return p, nil
}
}
type floatTagSubsetIterator struct {
input FloatIterator
point FloatPoint
lastTags Tags
dimensions []string
}
func newFloatTagSubsetIterator(input FloatIterator, opt IteratorOptions) *floatTagSubsetIterator {
return &floatTagSubsetIterator{
input: input,
dimensions: opt.GetDimensions(),
}
}
func (itr *floatTagSubsetIterator) Next() (*FloatPoint, error) {
p, err := itr.input.Next()
if err != nil {
return nil, err
} else if p == nil {
return nil, nil
}
itr.point.Name = p.Name
if !p.Tags.Equal(itr.lastTags) {
itr.point.Tags = p.Tags.Subset(itr.dimensions)
itr.lastTags = p.Tags
}
itr.point.Time = p.Time
itr.point.Value = p.Value
itr.point.Aux = p.Aux
itr.point.Aggregated = p.Aggregated
itr.point.Nil = p.Nil
return &itr.point, nil
}
func (itr *floatTagSubsetIterator) Stats() IteratorStats {
return itr.input.Stats()
}
func (itr *floatTagSubsetIterator) Close() error {
return itr.input.Close()
}
// newFloatDedupeIterator returns a new instance of floatDedupeIterator.
func newFloatDedupeIterator(input FloatIterator) *floatDedupeIterator {
return &floatDedupeIterator{
input: input,
m: make(map[string]struct{}),
}
}
// Stats returns stats from the input iterator.
func (itr *floatDedupeIterator) Stats() IteratorStats { return itr.input.Stats() }
// Close closes the iterator and all child iterators.
func (itr *floatDedupeIterator) Close() error { return itr.input.Close() }
// Next returns the next unique point from the input iterator.
func (itr *floatDedupeIterator) Next() (*FloatPoint, error) {
for {
// Read next point.
p, err := itr.input.Next()
if p == nil || err != nil {
return nil, err
}
// Serialize to bytes to store in lookup.
buf, err := proto.Marshal(encodeFloatPoint(p))
if err != nil {
return nil, err
}
// If the point has already been output then move to the next point.
if _, ok := itr.m[string(buf)]; ok {
continue
}
// Otherwise mark it as emitted and return point.
itr.m[string(buf)] = struct{}{}
return p, nil
}
}
// floatReaderIterator represents an iterator that streams from a reader.
type floatReaderIterator struct {
r io.Reader
dec *FloatPointDecoder
}
// newFloatReaderIterator returns a new instance of floatReaderIterator.
func newFloatReaderIterator(ctx context.Context, r io.Reader, stats IteratorStats) *floatReaderIterator {
dec := NewFloatPointDecoder(ctx, r)
dec.stats = stats
return &floatReaderIterator{
r: r,
dec: dec,
}
}
// Stats returns stats about points processed.
func (itr *floatReaderIterator) Stats() IteratorStats { return itr.dec.stats }
// Close closes the underlying reader, if applicable.
func (itr *floatReaderIterator) Close() error {
if r, ok := itr.r.(io.ReadCloser); ok {
return r.Close()
}
return nil
}
// Next returns the next point from the iterator.
func (itr *floatReaderIterator) Next() (*FloatPoint, error) {
// OPTIMIZE(benbjohnson): Reuse point on iterator.
// Unmarshal next point.
p := &FloatPoint{}
if err := itr.dec.DecodeFloatPoint(p); err == io.EOF {
return nil, nil
} else if err != nil {
return nil, err
}
return p, nil
}
// IntegerIterator represents a stream of integer points.
type IntegerIterator interface {
Iterator
Next() (*IntegerPoint, error)
}
// newIntegerIterators converts a slice of Iterator to a slice of IntegerIterator.
// Drop and closes any iterator in itrs that is not a IntegerIterator and cannot
// be cast to a IntegerIterator.
func newIntegerIterators(itrs []Iterator) []IntegerIterator {
a := make([]IntegerIterator, 0, len(itrs))
for _, itr := range itrs {
switch itr := itr.(type) {
case IntegerIterator:
a = append(a, itr)
default:
itr.Close()
}
}
return a
}
// bufIntegerIterator represents a buffered IntegerIterator.
type bufIntegerIterator struct {
itr IntegerIterator
buf *IntegerPoint
}
// newBufIntegerIterator returns a buffered IntegerIterator.
func newBufIntegerIterator(itr IntegerIterator) *bufIntegerIterator {
return &bufIntegerIterator{itr: itr}
}
// Stats returns statistics from the input iterator.
func (itr *bufIntegerIterator) Stats() IteratorStats { return itr.itr.Stats() }
// Close closes the underlying iterator.
func (itr *bufIntegerIterator) Close() error { return itr.itr.Close() }
// peek returns the next point without removing it from the iterator.
func (itr *bufIntegerIterator) peek() (*IntegerPoint, error) {
p, err := itr.Next()
if err != nil {
return nil, err
}
itr.unread(p)
return p, nil
}
// peekTime returns the time of the next point.
// Returns zero time if no more points available.
func (itr *bufIntegerIterator) peekTime() (int64, error) {
p, err := itr.peek()
if p == nil || err != nil {
return ZeroTime, err
}
return p.Time, nil
}
// Next returns the current buffer, if exists, or calls the underlying iterator.
func (itr *bufIntegerIterator) Next() (*IntegerPoint, error) {
buf := itr.buf
if buf != nil {
itr.buf = nil
return buf, nil
}
return itr.itr.Next()
}
// NextInWindow returns the next value if it is between [startTime, endTime).
// If the next value is outside the range then it is moved to the buffer.
func (itr *bufIntegerIterator) NextInWindow(startTime, endTime int64) (*IntegerPoint, error) {
v, err := itr.Next()
if v == nil || err != nil {
return nil, err
} else if t := v.Time; t >= endTime || t < startTime {
itr.unread(v)
return nil, nil
}
return v, nil
}
// unread sets v to the buffer. It is read on the next call to Next().
func (itr *bufIntegerIterator) unread(v *IntegerPoint) { itr.buf = v }
// integerMergeIterator represents an iterator that combines multiple integer iterators.
type integerMergeIterator struct {
inputs []IntegerIterator
heap *integerMergeHeap
init bool
closed bool
mu sync.RWMutex
// Current iterator and window.
curr *integerMergeHeapItem
window struct {
name string
tags string
startTime int64
endTime int64
}
}
// newIntegerMergeIterator returns a new instance of integerMergeIterator.
func newIntegerMergeIterator(inputs []IntegerIterator, opt IteratorOptions) *integerMergeIterator {
itr := &integerMergeIterator{
inputs: inputs,
heap: &integerMergeHeap{
items: make([]*integerMergeHeapItem, 0, len(inputs)),
opt: opt,
},
}
// Initialize heap items.
for _, input := range inputs {
// Wrap in buffer, ignore any inputs without anymore points.
bufInput := newBufIntegerIterator(input)
// Append to the heap.
itr.heap.items = append(itr.heap.items, &integerMergeHeapItem{itr: bufInput})
}
return itr
}
// Stats returns an aggregation of stats from the underlying iterators.
func (itr *integerMergeIterator) Stats() IteratorStats {
var stats IteratorStats
for _, input := range itr.inputs {
stats.Add(input.Stats())
}
return stats
}
// Close closes the underlying iterators.
func (itr *integerMergeIterator) Close() error {
itr.mu.Lock()
defer itr.mu.Unlock()
for _, input := range itr.inputs {
input.Close()
}
itr.curr = nil
itr.inputs = nil
itr.heap.items = nil
itr.closed = true
return nil
}
// Next returns the next point from the iterator.
func (itr *integerMergeIterator) Next() (*IntegerPoint, error) {
itr.mu.RLock()
defer itr.mu.RUnlock()
if itr.closed {
return nil, nil
}
// Initialize the heap. This needs to be done lazily on the first call to this iterator
// so that iterator initialization done through the Select() call returns quickly.
// Queries can only be interrupted after the Select() call completes so any operations
// done during iterator creation cannot be interrupted, which is why we do it here
// instead so an interrupt can happen while initializing the heap.
if !itr.init {
items := itr.heap.items
itr.heap.items = make([]*integerMergeHeapItem, 0, len(items))
for _, item := range items {
if p, err := item.itr.peek(); err != nil {
return nil, err
} else if p == nil {
continue
}
itr.heap.items = append(itr.heap.items, item)
}
heap.Init(itr.heap)
itr.init = true
}
for {
// Retrieve the next iterator if we don't have one.
if itr.curr == nil {
if len(itr.heap.items) == 0 {
return nil, nil
}
itr.curr = heap.Pop(itr.heap).(*integerMergeHeapItem)
// Read point and set current window.
p, err := itr.curr.itr.Next()
if err != nil {
return nil, err
}
tags := p.Tags.Subset(itr.heap.opt.Dimensions)
itr.window.name, itr.window.tags = p.Name, tags.ID()
itr.window.startTime, itr.window.endTime = itr.heap.opt.Window(p.Time)
return p, nil
}
// Read the next point from the current iterator.
p, err := itr.curr.itr.Next()
if err != nil {
return nil, err
}
// If there are no more points then remove iterator from heap and find next.
if p == nil {
itr.curr = nil
continue
}
// Check if the point is inside of our current window.
inWindow := true
if window := itr.window; window.name != p.Name {
inWindow = false
} else if tags := p.Tags.Subset(itr.heap.opt.Dimensions); window.tags != tags.ID() {
inWindow = false
} else if opt := itr.heap.opt; opt.Ascending && p.Time >= window.endTime {
inWindow = false
} else if !opt.Ascending && p.Time < window.startTime {
inWindow = false
}
// If it's outside our window then push iterator back on the heap and find new iterator.
if !inWindow {
itr.curr.itr.unread(p)
heap.Push(itr.heap, itr.curr)
itr.curr = nil
continue
}
return p, nil
}
}
// integerMergeHeap represents a heap of integerMergeHeapItems.
// Items are sorted by their next window and then by name/tags.
type integerMergeHeap struct {
opt IteratorOptions
items []*integerMergeHeapItem
}
func (h *integerMergeHeap) Len() int { return len(h.items) }
func (h *integerMergeHeap) Swap(i, j int) { h.items[i], h.items[j] = h.items[j], h.items[i] }
func (h *integerMergeHeap) Less(i, j int) bool {
x, err := h.items[i].itr.peek()
if err != nil {
return true
}
y, err := h.items[j].itr.peek()
if err != nil {
return false
}
if h.opt.Ascending {
if x.Name != y.Name {
return x.Name < y.Name
} else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); xTags.ID() != yTags.ID() {
return xTags.ID() < yTags.ID()
}
} else {
if x.Name != y.Name {
return x.Name > y.Name
} else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); xTags.ID() != yTags.ID() {
return xTags.ID() > yTags.ID()
}
}
xt, _ := h.opt.Window(x.Time)
yt, _ := h.opt.Window(y.Time)
if h.opt.Ascending {
return xt < yt
}
return xt > yt
}
func (h *integerMergeHeap) Push(x interface{}) {
h.items = append(h.items, x.(*integerMergeHeapItem))
}
func (h *integerMergeHeap) Pop() interface{} {
old := h.items
n := len(old)
item := old[n-1]
h.items = old[0 : n-1]
return item
}
type integerMergeHeapItem struct {
itr *bufIntegerIterator
}
// integerSortedMergeIterator is an iterator that sorts and merges multiple iterators into one.
type integerSortedMergeIterator struct {
inputs []IntegerIterator
heap *integerSortedMergeHeap
init bool
}
// newIntegerSortedMergeIterator returns an instance of integerSortedMergeIterator.
func newIntegerSortedMergeIterator(inputs []IntegerIterator, opt IteratorOptions) Iterator {
itr := &integerSortedMergeIterator{
inputs: inputs,
heap: &integerSortedMergeHeap{
items: make([]*integerSortedMergeHeapItem, 0, len(inputs)),
opt: opt,
},
}
// Initialize heap items.
for _, input := range inputs {
// Append to the heap.
itr.heap.items = append(itr.heap.items, &integerSortedMergeHeapItem{itr: input})
}
return itr
}
// Stats returns an aggregation of stats from the underlying iterators.
func (itr *integerSortedMergeIterator) Stats() IteratorStats {
var stats IteratorStats
for _, input := range itr.inputs {
stats.Add(input.Stats())
}
return stats
}
// Close closes the underlying iterators.
func (itr *integerSortedMergeIterator) Close() error {
for _, input := range itr.inputs {
input.Close()
}
return nil
}
// Next returns the next points from the iterator.
func (itr *integerSortedMergeIterator) Next() (*IntegerPoint, error) { return itr.pop() }
// pop returns the next point from the heap.
// Reads the next point from item's cursor and puts it back on the heap.
func (itr *integerSortedMergeIterator) pop() (*IntegerPoint, error) {
// Initialize the heap. See the MergeIterator to see why this has to be done lazily.
if !itr.init {
items := itr.heap.items
itr.heap.items = make([]*integerSortedMergeHeapItem, 0, len(items))
for _, item := range items {
var err error
if item.point, err = item.itr.Next(); err != nil {
return nil, err
} else if item.point == nil {
continue
}
itr.heap.items = append(itr.heap.items, item)
}
heap.Init(itr.heap)
itr.init = true
}
if len(itr.heap.items) == 0 {
return nil, nil
}
// Read the next item from the heap.
item := heap.Pop(itr.heap).(*integerSortedMergeHeapItem)
if item.err != nil {
return nil, item.err
} else if item.point == nil {
return nil, nil
}
// Copy the point for return.
p := item.point.Clone()
// Read the next item from the cursor. Push back to heap if one exists.
if item.point, item.err = item.itr.Next(); item.point != nil {
heap.Push(itr.heap, item)
}
return p, nil
}
// integerSortedMergeHeap represents a heap of integerSortedMergeHeapItems.
// Items are sorted with the following priority:
// - By their measurement name;
// - By their tag keys/values;
// - By time; or
// - By their Aux field values.
type integerSortedMergeHeap struct {
opt IteratorOptions
items []*integerSortedMergeHeapItem
}
func (h *integerSortedMergeHeap) Len() int { return len(h.items) }
func (h *integerSortedMergeHeap) Swap(i, j int) { h.items[i], h.items[j] = h.items[j], h.items[i] }
func (h *integerSortedMergeHeap) Less(i, j int) bool {
x, y := h.items[i].point, h.items[j].point
if h.opt.Ascending {
if x.Name != y.Name {
return x.Name < y.Name
} else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); !xTags.Equals(&yTags) {
return xTags.ID() < yTags.ID()
}
if x.Time != y.Time {
return x.Time < y.Time
}
if len(x.Aux) > 0 && len(x.Aux) == len(y.Aux) {
for i := 0; i < len(x.Aux); i++ {
v1, ok1 := x.Aux[i].(string)
v2, ok2 := y.Aux[i].(string)
if !ok1 || !ok2 {
// Unsupported types used in Aux fields. Maybe they
// need to be added here?
return false
} else if v1 == v2 {
continue
}
return v1 < v2
}
}
return false // Times and/or Aux fields are equal.
}
if x.Name != y.Name {
return x.Name > y.Name
} else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); !xTags.Equals(&yTags) {
return xTags.ID() > yTags.ID()
}
if x.Time != y.Time {
return x.Time > y.Time
}
if len(x.Aux) > 0 && len(x.Aux) == len(y.Aux) {
for i := 0; i < len(x.Aux); i++ {
v1, ok1 := x.Aux[i].(string)
v2, ok2 := y.Aux[i].(string)
if !ok1 || !ok2 {
// Unsupported types used in Aux fields. Maybe they
// need to be added here?
return false
} else if v1 == v2 {
continue
}
return v1 > v2
}
}
return false // Times and/or Aux fields are equal.
}
func (h *integerSortedMergeHeap) Push(x interface{}) {
h.items = append(h.items, x.(*integerSortedMergeHeapItem))
}
func (h *integerSortedMergeHeap) Pop() interface{} {
old := h.items
n := len(old)
item := old[n-1]
h.items = old[0 : n-1]
return item
}
type integerSortedMergeHeapItem struct {
point *IntegerPoint
err error
itr IntegerIterator
}
// integerIteratorScanner scans the results of a IntegerIterator into a map.
type integerIteratorScanner struct {
input *bufIntegerIterator
err error
keys []influxql.VarRef
defaultValue interface{}
}
// newIntegerIteratorScanner creates a new IteratorScanner.
func newIntegerIteratorScanner(input IntegerIterator, keys []influxql.VarRef, defaultValue interface{}) *integerIteratorScanner {
return &integerIteratorScanner{
input: newBufIntegerIterator(input),
keys: keys,
defaultValue: defaultValue,
}
}
func (s *integerIteratorScanner) Peek() (int64, string, Tags) {
if s.err != nil {
return ZeroTime, "", Tags{}
}
p, err := s.input.peek()
if err != nil {
s.err = err
return ZeroTime, "", Tags{}
} else if p == nil {
return ZeroTime, "", Tags{}
}
return p.Time, p.Name, p.Tags
}
func (s *integerIteratorScanner) ScanAt(ts int64, name string, tags Tags, m map[string]interface{}) {
if s.err != nil {
return
}
p, err := s.input.Next()
if err != nil {
s.err = err
return
} else if p == nil {
s.useDefaults(m)
return
} else if p.Time != ts || p.Name != name || !p.Tags.Equals(&tags) {
s.useDefaults(m)
s.input.unread(p)
return
}
if k := s.keys[0]; k.Val != "" {
if p.Nil {
if s.defaultValue != SkipDefault {
m[k.Val] = castToType(s.defaultValue, k.Type)
}
} else {
m[k.Val] = p.Value
}
}
for i, v := range p.Aux {
k := s.keys[i+1]
switch v.(type) {
case float64, int64, uint64, string, bool:
m[k.Val] = v
default:
// Insert the fill value if one was specified.
if s.defaultValue != SkipDefault {
m[k.Val] = castToType(s.defaultValue, k.Type)
}
}
}
}
func (s *integerIteratorScanner) useDefaults(m map[string]interface{}) {
if s.defaultValue == SkipDefault {
return
}
for _, k := range s.keys {
if k.Val == "" {
continue
}
m[k.Val] = castToType(s.defaultValue, k.Type)
}
}
func (s *integerIteratorScanner) Stats() IteratorStats { return s.input.Stats() }
func (s *integerIteratorScanner) Err() error { return s.err }
func (s *integerIteratorScanner) Close() error { return s.input.Close() }
// integerParallelIterator represents an iterator that pulls data in a separate goroutine.
type integerParallelIterator struct {
input IntegerIterator
ch chan integerPointError
once sync.Once
closing chan struct{}
wg sync.WaitGroup
}
// newIntegerParallelIterator returns a new instance of integerParallelIterator.
func newIntegerParallelIterator(input IntegerIterator) *integerParallelIterator {
itr := &integerParallelIterator{
input: input,
ch: make(chan integerPointError, 256),
closing: make(chan struct{}),
}
itr.wg.Add(1)
go itr.monitor()
return itr
}
// Stats returns stats from the underlying iterator.
func (itr *integerParallelIterator) Stats() IteratorStats { return itr.input.Stats() }
// Close closes the underlying iterators.
func (itr *integerParallelIterator) Close() error {
itr.once.Do(func() { close(itr.closing) })
itr.wg.Wait()
return itr.input.Close()
}
// Next returns the next point from the iterator.
func (itr *integerParallelIterator) Next() (*IntegerPoint, error) {
v, ok := <-itr.ch
if !ok {
return nil, io.EOF
}
return v.point, v.err
}
// monitor runs in a separate goroutine and actively pulls the next point.
func (itr *integerParallelIterator) monitor() {
defer close(itr.ch)
defer itr.wg.Done()
for {
// Read next point.
p, err := itr.input.Next()
if p != nil {
p = p.Clone()
}
select {
case <-itr.closing:
return
case itr.ch <- integerPointError{point: p, err: err}:
}
}
}
type integerPointError struct {
point *IntegerPoint
err error
}
// integerLimitIterator represents an iterator that limits points per group.
type integerLimitIterator struct {
input IntegerIterator
opt IteratorOptions
n int
prev struct {
name string
tags Tags
}
}
// newIntegerLimitIterator returns a new instance of integerLimitIterator.
func newIntegerLimitIterator(input IntegerIterator, opt IteratorOptions) *integerLimitIterator {
return &integerLimitIterator{
input: input,
opt: opt,
}
}
// Stats returns stats from the underlying iterator.
func (itr *integerLimitIterator) Stats() IteratorStats { return itr.input.Stats() }
// Close closes the underlying iterators.
func (itr *integerLimitIterator) Close() error { return itr.input.Close() }
// Next returns the next point from the iterator.
func (itr *integerLimitIterator) Next() (*IntegerPoint, error) {
for {
p, err := itr.input.Next()
if p == nil || err != nil {
return nil, err
}
// Reset window and counter if a new window is encountered.
if p.Name != itr.prev.name || !p.Tags.Equals(&itr.prev.tags) {
itr.prev.name = p.Name
itr.prev.tags = p.Tags
itr.n = 0
}
// Increment counter.
itr.n++
// Read next point if not beyond the offset.
if itr.n <= itr.opt.Offset {
continue
}
// Read next point if we're beyond the limit.
if itr.opt.Limit > 0 && (itr.n-itr.opt.Offset) > itr.opt.Limit {
continue
}
return p, nil
}
}
type integerFillIterator struct {
input *bufIntegerIterator
prev IntegerPoint
startTime int64
endTime int64
auxFields []interface{}
init bool
opt IteratorOptions
window struct {
name string
tags Tags
time int64
offset int64
}
}
func newIntegerFillIterator(input IntegerIterator, expr influxql.Expr, opt IteratorOptions) *integerFillIterator {
if opt.Fill == influxql.NullFill {
if expr, ok := expr.(*influxql.Call); ok && expr.Name == "count" {
opt.Fill = influxql.NumberFill
opt.FillValue = int64(0)
}
}
var startTime, endTime int64
if opt.Ascending {
startTime, _ = opt.Window(opt.StartTime)
endTime, _ = opt.Window(opt.EndTime)
} else {
startTime, _ = opt.Window(opt.EndTime)
endTime, _ = opt.Window(opt.StartTime)
}
var auxFields []interface{}
if len(opt.Aux) > 0 {
auxFields = make([]interface{}, len(opt.Aux))
}
return &integerFillIterator{
input: newBufIntegerIterator(input),
prev: IntegerPoint{Nil: true},
startTime: startTime,
endTime: endTime,
auxFields: auxFields,
opt: opt,
}
}
func (itr *integerFillIterator) Stats() IteratorStats { return itr.input.Stats() }
func (itr *integerFillIterator) Close() error { return itr.input.Close() }
func (itr *integerFillIterator) Next() (*IntegerPoint, error) {
if !itr.init {
p, err := itr.input.peek()
if p == nil || err != nil {
return nil, err
}
itr.window.name, itr.window.tags = p.Name, p.Tags
itr.window.time = itr.startTime
if itr.startTime == influxql.MinTime {
itr.window.time, _ = itr.opt.Window(p.Time)
}
if itr.opt.Location != nil {
_, itr.window.offset = itr.opt.Zone(itr.window.time)
}
itr.init = true
}
p, err := itr.input.Next()
if err != nil {
return nil, err
}
// Check if the next point is outside of our window or is nil.
if p == nil || p.Name != itr.window.name || p.Tags.ID() != itr.window.tags.ID() {
// If we are inside of an interval, unread the point and continue below to
// constructing a new point.
if itr.opt.Ascending && itr.window.time <= itr.endTime {
itr.input.unread(p)
p = nil
goto CONSTRUCT
} else if !itr.opt.Ascending && itr.window.time >= itr.endTime && itr.endTime != influxql.MinTime {
itr.input.unread(p)
p = nil
goto CONSTRUCT
}
// We are *not* in a current interval. If there is no next point,
// we are at the end of all intervals.
if p == nil {
return nil, nil
}
// Set the new interval.
itr.window.name, itr.window.tags = p.Name, p.Tags
itr.window.time = itr.startTime
if itr.window.time == influxql.MinTime {
itr.window.time, _ = itr.opt.Window(p.Time)
}
if itr.opt.Location != nil {
_, itr.window.offset = itr.opt.Zone(itr.window.time)
}
itr.prev = IntegerPoint{Nil: true}
}
// Check if the point is our next expected point.
CONSTRUCT:
if p == nil || (itr.opt.Ascending && p.Time > itr.window.time) || (!itr.opt.Ascending && p.Time < itr.window.time) {
if p != nil {
itr.input.unread(p)
}
p = &IntegerPoint{
Name: itr.window.name,
Tags: itr.window.tags,
Time: itr.window.time,
Aux: itr.auxFields,
}
switch itr.opt.Fill {
case influxql.LinearFill:
if !itr.prev.Nil {
next, err := itr.input.peek()
if err != nil {
return nil, err
} else if next != nil && next.Name == itr.window.name && next.Tags.ID() == itr.window.tags.ID() {
interval := int64(itr.opt.Interval.Duration)
start := itr.window.time / interval
p.Value = linearInteger(start, itr.prev.Time/interval, next.Time/interval, itr.prev.Value, next.Value)
} else {
p.Nil = true
}
} else {
p.Nil = true
}
case influxql.NullFill:
p.Nil = true
case influxql.NumberFill:
p.Value, _ = castToInteger(itr.opt.FillValue)
case influxql.PreviousFill:
if !itr.prev.Nil {
p.Value = itr.prev.Value
p.Nil = itr.prev.Nil
} else {
p.Nil = true
}
}
} else {
itr.prev = *p
}
// Advance the expected time. Do not advance to a new window here
// as there may be lingering points with the same timestamp in the previous
// window.
if itr.opt.Ascending {
itr.window.time += int64(itr.opt.Interval.Duration)
} else {
itr.window.time -= int64(itr.opt.Interval.Duration)
}
// Check to see if we have passed over an offset change and adjust the time
// to account for this new offset.
if itr.opt.Location != nil {
if _, offset := itr.opt.Zone(itr.window.time - 1); offset != itr.window.offset {
diff := itr.window.offset - offset
if abs(diff) < int64(itr.opt.Interval.Duration) {
itr.window.time += diff
}
itr.window.offset = offset
}
}
return p, nil
}
// integerIntervalIterator represents a integer implementation of IntervalIterator.
type integerIntervalIterator struct {
input IntegerIterator
opt IteratorOptions
}
func newIntegerIntervalIterator(input IntegerIterator, opt IteratorOptions) *integerIntervalIterator {
return &integerIntervalIterator{input: input, opt: opt}
}
func (itr *integerIntervalIterator) Stats() IteratorStats { return itr.input.Stats() }
func (itr *integerIntervalIterator) Close() error { return itr.input.Close() }
func (itr *integerIntervalIterator) Next() (*IntegerPoint, error) {
p, err := itr.input.Next()
if p == nil || err != nil {
return nil, err
}
p.Time, _ = itr.opt.Window(p.Time)
// If we see the minimum allowable time, set the time to zero so we don't
// break the default returned time for aggregate queries without times.
if p.Time == influxql.MinTime {
p.Time = 0
}
return p, nil
}
// integerInterruptIterator represents a integer implementation of InterruptIterator.
type integerInterruptIterator struct {
input IntegerIterator
closing <-chan struct{}
count int
}
func newIntegerInterruptIterator(input IntegerIterator, closing <-chan struct{}) *integerInterruptIterator {
return &integerInterruptIterator{input: input, closing: closing}
}
func (itr *integerInterruptIterator) Stats() IteratorStats { return itr.input.Stats() }
func (itr *integerInterruptIterator) Close() error { return itr.input.Close() }
func (itr *integerInterruptIterator) Next() (*IntegerPoint, error) {
// Only check if the channel is closed every N points. This
// intentionally checks on both 0 and N so that if the iterator
// has been interrupted before the first point is emitted it will
// not emit any points.
if itr.count&0xFF == 0xFF {
select {
case <-itr.closing:
return nil, itr.Close()
default:
// Reset iterator count to zero and fall through to emit the next point.
itr.count = 0
}
}
// Increment the counter for every point read.
itr.count++
return itr.input.Next()
}
// integerCloseInterruptIterator represents a integer implementation of CloseInterruptIterator.
type integerCloseInterruptIterator struct {
input IntegerIterator
closing <-chan struct{}
done chan struct{}
once sync.Once
}
func newIntegerCloseInterruptIterator(input IntegerIterator, closing <-chan struct{}) *integerCloseInterruptIterator {
itr := &integerCloseInterruptIterator{
input: input,
closing: closing,
done: make(chan struct{}),
}
go itr.monitor()
return itr
}
func (itr *integerCloseInterruptIterator) monitor() {
select {
case <-itr.closing:
itr.Close()
case <-itr.done:
}
}
func (itr *integerCloseInterruptIterator) Stats() IteratorStats {
return itr.input.Stats()
}
func (itr *integerCloseInterruptIterator) Close() error {
itr.once.Do(func() {
close(itr.done)
itr.input.Close()
})
return nil
}
func (itr *integerCloseInterruptIterator) Next() (*IntegerPoint, error) {
p, err := itr.input.Next()
if err != nil {
// Check if the iterator was closed.
select {
case <-itr.done:
return nil, nil
default:
return nil, err
}
}
return p, nil
}
// integerReduceFloatIterator executes a reducer for every interval and buffers the result.
type integerReduceFloatIterator struct {
input *bufIntegerIterator
create func() (IntegerPointAggregator, FloatPointEmitter)
dims []string
opt IteratorOptions
points []FloatPoint
keepTags bool
}
func newIntegerReduceFloatIterator(input IntegerIterator, opt IteratorOptions, createFn func() (IntegerPointAggregator, FloatPointEmitter)) *integerReduceFloatIterator {
return &integerReduceFloatIterator{
input: newBufIntegerIterator(input),
create: createFn,
dims: opt.GetDimensions(),
opt: opt,
}
}
// Stats returns stats from the input iterator.
func (itr *integerReduceFloatIterator) Stats() IteratorStats { return itr.input.Stats() }
// Close closes the iterator and all child iterators.
func (itr *integerReduceFloatIterator) Close() error { return itr.input.Close() }
// Next returns the minimum value for the next available interval.
func (itr *integerReduceFloatIterator) Next() (*FloatPoint, error) {
// Calculate next window if we have no more points.
if len(itr.points) == 0 {
var err error
itr.points, err = itr.reduce()
if len(itr.points) == 0 {
return nil, err
}
}
// Pop next point off the stack.
p := &itr.points[len(itr.points)-1]
itr.points = itr.points[:len(itr.points)-1]
return p, nil
}
// integerReduceFloatPoint stores the reduced data for a name/tag combination.
type integerReduceFloatPoint struct {
Name string
Tags Tags
Aggregator IntegerPointAggregator
Emitter FloatPointEmitter
}
// reduce executes fn once for every point in the next window.
// The previous value for the dimension is passed to fn.
func (itr *integerReduceFloatIterator) reduce() ([]FloatPoint, error) {
// Calculate next window.
var (
startTime, endTime int64
window struct {
name string
tags string
}
)
for {
p, err := itr.input.Next()
if err != nil || p == nil {
return nil, err
} else if p.Nil {
continue
}
// Unread the point so it can be processed.
itr.input.unread(p)
startTime, endTime = itr.opt.Window(p.Time)
window.name, window.tags = p.Name, p.Tags.Subset(itr.opt.Dimensions).ID()
break
}
// Create points by tags.
m := make(map[string]*integerReduceFloatPoint)
for {
// Read next point.
curr, err := itr.input.NextInWindow(startTime, endTime)
if err != nil {
return nil, err
} else if curr == nil {
break
} else if curr.Nil {
continue
} else if curr.Name != window.name {
itr.input.unread(curr)
break
}
// Ensure this point is within the same final window.
if curr.Name != window.name {
itr.input.unread(curr)
break
} else if tags := curr.Tags.Subset(itr.opt.Dimensions); tags.ID() != window.tags {
itr.input.unread(curr)
break
}
// Retrieve the tags on this point for this level of the query.
// This may be different than the bucket dimensions.
tags := curr.Tags.Subset(itr.dims)
id := tags.ID()
// Retrieve the aggregator for this name/tag combination or create one.
rp := m[id]
if rp == nil {
aggregator, emitter := itr.create()
rp = &integerReduceFloatPoint{
Name: curr.Name,
Tags: tags,
Aggregator: aggregator,
Emitter: emitter,
}
m[id] = rp
}
rp.Aggregator.AggregateInteger(curr)
}
keys := make([]string, 0, len(m))
for k := range m {
keys = append(keys, k)
}
// Reverse sort points by name & tag.
// This ensures a consistent order of output.
if len(keys) > 0 {
var sorted sort.Interface = sort.StringSlice(keys)
if itr.opt.Ascending {
sorted = sort.Reverse(sorted)
}
sort.Sort(sorted)
}
// Assume the points are already sorted until proven otherwise.
sortedByTime := true
// Emit the points for each name & tag combination.
a := make([]FloatPoint, 0, len(m))
for _, k := range keys {
rp := m[k]
points := rp.Emitter.Emit()
for i := len(points) - 1; i >= 0; i-- {
points[i].Name = rp.Name
if !itr.keepTags {
points[i].Tags = rp.Tags
}
// Set the points time to the interval time if the reducer didn't provide one.
if points[i].Time == ZeroTime {
points[i].Time = startTime
} else {
sortedByTime = false
}
a = append(a, points[i])
}
}
// Points may be out of order. Perform a stable sort by time if requested.
if !sortedByTime && itr.opt.Ordered {
var sorted sort.Interface = floatPointsByTime(a)
if itr.opt.Ascending {
sorted = sort.Reverse(sorted)
}
sort.Stable(sorted)
}
return a, nil
}
// integerStreamFloatIterator streams inputs into the iterator and emits points gradually.
type integerStreamFloatIterator struct {
input *bufIntegerIterator
create func() (IntegerPointAggregator, FloatPointEmitter)
dims []string
opt IteratorOptions
m map[string]*integerReduceFloatPoint
points []FloatPoint
}
// newIntegerStreamFloatIterator returns a new instance of integerStreamFloatIterator.
func newIntegerStreamFloatIterator(input IntegerIterator, createFn func() (IntegerPointAggregator, FloatPointEmitter), opt IteratorOptions) *integerStreamFloatIterator {
return &integerStreamFloatIterator{
input: newBufIntegerIterator(input),
create: createFn,
dims: opt.GetDimensions(),
opt: opt,
m: make(map[string]*integerReduceFloatPoint),
}
}
// Stats returns stats from the input iterator.
func (itr *integerStreamFloatIterator) Stats() IteratorStats { return itr.input.Stats() }
// Close closes the iterator and all child iterators.
func (itr *integerStreamFloatIterator) Close() error { return itr.input.Close() }
// Next returns the next value for the stream iterator.
func (itr *integerStreamFloatIterator) Next() (*FloatPoint, error) {
// Calculate next window if we have no more points.
if len(itr.points) == 0 {
var err error
itr.points, err = itr.reduce()
if len(itr.points) == 0 {
return nil, err
}
}
// Pop next point off the stack.
p := &itr.points[len(itr.points)-1]
itr.points = itr.points[:len(itr.points)-1]
return p, nil
}
// reduce creates and manages aggregators for every point from the input.
// After aggregating a point, it always tries to emit a value using the emitter.
func (itr *integerStreamFloatIterator) reduce() ([]FloatPoint, error) {
// We have already read all of the input points.
if itr.m == nil {
return nil, nil
}
for {
// Read next point.
curr, err := itr.input.Next()
if err != nil {
return nil, err
} else if curr == nil {
// Close all of the aggregators to flush any remaining points to emit.
var points []FloatPoint
for _, rp := range itr.m {
if aggregator, ok := rp.Aggregator.(io.Closer); ok {
if err := aggregator.Close(); err != nil {
return nil, err
}
pts := rp.Emitter.Emit()
if len(pts) == 0 {
continue
}
for i := range pts {
pts[i].Name = rp.Name
pts[i].Tags = rp.Tags
}
points = append(points, pts...)
}
}
// Eliminate the aggregators and emitters.
itr.m = nil
return points, nil
} else if curr.Nil {
continue
}
tags := curr.Tags.Subset(itr.dims)
id := curr.Name
if len(tags.m) > 0 {
id += "\x00" + tags.ID()
}
// Retrieve the aggregator for this name/tag combination or create one.
rp := itr.m[id]
if rp == nil {
aggregator, emitter := itr.create()
rp = &integerReduceFloatPoint{
Name: curr.Name,
Tags: tags,
Aggregator: aggregator,
Emitter: emitter,
}
itr.m[id] = rp
}
rp.Aggregator.AggregateInteger(curr)
// Attempt to emit points from the aggregator.
points := rp.Emitter.Emit()
if len(points) == 0 {
continue
}
for i := range points {
points[i].Name = rp.Name
points[i].Tags = rp.Tags
}
return points, nil
}
}
// integerReduceIntegerIterator executes a reducer for every interval and buffers the result.
type integerReduceIntegerIterator struct {
input *bufIntegerIterator
create func() (IntegerPointAggregator, IntegerPointEmitter)
dims []string
opt IteratorOptions
points []IntegerPoint
keepTags bool
}
func newIntegerReduceIntegerIterator(input IntegerIterator, opt IteratorOptions, createFn func() (IntegerPointAggregator, IntegerPointEmitter)) *integerReduceIntegerIterator {
return &integerReduceIntegerIterator{
input: newBufIntegerIterator(input),
create: createFn,
dims: opt.GetDimensions(),
opt: opt,
}
}
// Stats returns stats from the input iterator.
func (itr *integerReduceIntegerIterator) Stats() IteratorStats { return itr.input.Stats() }
// Close closes the iterator and all child iterators.
func (itr *integerReduceIntegerIterator) Close() error { return itr.input.Close() }
// Next returns the minimum value for the next available interval.
func (itr *integerReduceIntegerIterator) Next() (*IntegerPoint, error) {
// Calculate next window if we have no more points.
if len(itr.points) == 0 {
var err error
itr.points, err = itr.reduce()
if len(itr.points) == 0 {
return nil, err
}
}
// Pop next point off the stack.
p := &itr.points[len(itr.points)-1]
itr.points = itr.points[:len(itr.points)-1]
return p, nil
}
// integerReduceIntegerPoint stores the reduced data for a name/tag combination.
type integerReduceIntegerPoint struct {
Name string
Tags Tags
Aggregator IntegerPointAggregator
Emitter IntegerPointEmitter
}
// reduce executes fn once for every point in the next window.
// The previous value for the dimension is passed to fn.
func (itr *integerReduceIntegerIterator) reduce() ([]IntegerPoint, error) {
// Calculate next window.
var (
startTime, endTime int64
window struct {
name string
tags string
}
)
for {
p, err := itr.input.Next()
if err != nil || p == nil {
return nil, err
} else if p.Nil {
continue
}
// Unread the point so it can be processed.
itr.input.unread(p)
startTime, endTime = itr.opt.Window(p.Time)
window.name, window.tags = p.Name, p.Tags.Subset(itr.opt.Dimensions).ID()
break
}
// Create points by tags.
m := make(map[string]*integerReduceIntegerPoint)
for {
// Read next point.
curr, err := itr.input.NextInWindow(startTime, endTime)
if err != nil {
return nil, err
} else if curr == nil {
break
} else if curr.Nil {
continue
} else if curr.Name != window.name {
itr.input.unread(curr)
break
}
// Ensure this point is within the same final window.
if curr.Name != window.name {
itr.input.unread(curr)
break
} else if tags := curr.Tags.Subset(itr.opt.Dimensions); tags.ID() != window.tags {
itr.input.unread(curr)
break
}
// Retrieve the tags on this point for this level of the query.
// This may be different than the bucket dimensions.
tags := curr.Tags.Subset(itr.dims)
id := tags.ID()
// Retrieve the aggregator for this name/tag combination or create one.
rp := m[id]
if rp == nil {
aggregator, emitter := itr.create()
rp = &integerReduceIntegerPoint{
Name: curr.Name,
Tags: tags,
Aggregator: aggregator,
Emitter: emitter,
}
m[id] = rp
}
rp.Aggregator.AggregateInteger(curr)
}
keys := make([]string, 0, len(m))
for k := range m {
keys = append(keys, k)
}
// Reverse sort points by name & tag.
// This ensures a consistent order of output.
if len(keys) > 0 {
var sorted sort.Interface = sort.StringSlice(keys)
if itr.opt.Ascending {
sorted = sort.Reverse(sorted)
}
sort.Sort(sorted)
}
// Assume the points are already sorted until proven otherwise.
sortedByTime := true
// Emit the points for each name & tag combination.
a := make([]IntegerPoint, 0, len(m))
for _, k := range keys {
rp := m[k]
points := rp.Emitter.Emit()
for i := len(points) - 1; i >= 0; i-- {
points[i].Name = rp.Name
if !itr.keepTags {
points[i].Tags = rp.Tags
}
// Set the points time to the interval time if the reducer didn't provide one.
if points[i].Time == ZeroTime {
points[i].Time = startTime
} else {
sortedByTime = false
}
a = append(a, points[i])
}
}
// Points may be out of order. Perform a stable sort by time if requested.
if !sortedByTime && itr.opt.Ordered {
var sorted sort.Interface = integerPointsByTime(a)
if itr.opt.Ascending {
sorted = sort.Reverse(sorted)
}
sort.Stable(sorted)
}
return a, nil
}
// integerStreamIntegerIterator streams inputs into the iterator and emits points gradually.
type integerStreamIntegerIterator struct {
input *bufIntegerIterator
create func() (IntegerPointAggregator, IntegerPointEmitter)
dims []string
opt IteratorOptions
m map[string]*integerReduceIntegerPoint
points []IntegerPoint
}
// newIntegerStreamIntegerIterator returns a new instance of integerStreamIntegerIterator.
func newIntegerStreamIntegerIterator(input IntegerIterator, createFn func() (IntegerPointAggregator, IntegerPointEmitter), opt IteratorOptions) *integerStreamIntegerIterator {
return &integerStreamIntegerIterator{
input: newBufIntegerIterator(input),
create: createFn,
dims: opt.GetDimensions(),
opt: opt,
m: make(map[string]*integerReduceIntegerPoint),
}
}
// Stats returns stats from the input iterator.
func (itr *integerStreamIntegerIterator) Stats() IteratorStats { return itr.input.Stats() }
// Close closes the iterator and all child iterators.
func (itr *integerStreamIntegerIterator) Close() error { return itr.input.Close() }
// Next returns the next value for the stream iterator.
func (itr *integerStreamIntegerIterator) Next() (*IntegerPoint, error) {
// Calculate next window if we have no more points.
if len(itr.points) == 0 {
var err error
itr.points, err = itr.reduce()
if len(itr.points) == 0 {
return nil, err
}
}
// Pop next point off the stack.
p := &itr.points[len(itr.points)-1]
itr.points = itr.points[:len(itr.points)-1]
return p, nil
}
// reduce creates and manages aggregators for every point from the input.
// After aggregating a point, it always tries to emit a value using the emitter.
func (itr *integerStreamIntegerIterator) reduce() ([]IntegerPoint, error) {
// We have already read all of the input points.
if itr.m == nil {
return nil, nil
}
for {
// Read next point.
curr, err := itr.input.Next()
if err != nil {
return nil, err
} else if curr == nil {
// Close all of the aggregators to flush any remaining points to emit.
var points []IntegerPoint
for _, rp := range itr.m {
if aggregator, ok := rp.Aggregator.(io.Closer); ok {
if err := aggregator.Close(); err != nil {
return nil, err
}
pts := rp.Emitter.Emit()
if len(pts) == 0 {
continue
}
for i := range pts {
pts[i].Name = rp.Name
pts[i].Tags = rp.Tags
}
points = append(points, pts...)
}
}
// Eliminate the aggregators and emitters.
itr.m = nil
return points, nil
} else if curr.Nil {
continue
}
tags := curr.Tags.Subset(itr.dims)
id := curr.Name
if len(tags.m) > 0 {
id += "\x00" + tags.ID()
}
// Retrieve the aggregator for this name/tag combination or create one.
rp := itr.m[id]
if rp == nil {
aggregator, emitter := itr.create()
rp = &integerReduceIntegerPoint{
Name: curr.Name,
Tags: tags,
Aggregator: aggregator,
Emitter: emitter,
}
itr.m[id] = rp
}
rp.Aggregator.AggregateInteger(curr)
// Attempt to emit points from the aggregator.
points := rp.Emitter.Emit()
if len(points) == 0 {
continue
}
for i := range points {
points[i].Name = rp.Name
points[i].Tags = rp.Tags
}
return points, nil
}
}
// integerReduceUnsignedIterator executes a reducer for every interval and buffers the result.
type integerReduceUnsignedIterator struct {
input *bufIntegerIterator
create func() (IntegerPointAggregator, UnsignedPointEmitter)
dims []string
opt IteratorOptions
points []UnsignedPoint
keepTags bool
}
func newIntegerReduceUnsignedIterator(input IntegerIterator, opt IteratorOptions, createFn func() (IntegerPointAggregator, UnsignedPointEmitter)) *integerReduceUnsignedIterator {
return &integerReduceUnsignedIterator{
input: newBufIntegerIterator(input),
create: createFn,
dims: opt.GetDimensions(),
opt: opt,
}
}
// Stats returns stats from the input iterator.
func (itr *integerReduceUnsignedIterator) Stats() IteratorStats { return itr.input.Stats() }
// Close closes the iterator and all child iterators.
func (itr *integerReduceUnsignedIterator) Close() error { return itr.input.Close() }
// Next returns the minimum value for the next available interval.
func (itr *integerReduceUnsignedIterator) Next() (*UnsignedPoint, error) {
// Calculate next window if we have no more points.
if len(itr.points) == 0 {
var err error
itr.points, err = itr.reduce()
if len(itr.points) == 0 {
return nil, err
}
}
// Pop next point off the stack.
p := &itr.points[len(itr.points)-1]
itr.points = itr.points[:len(itr.points)-1]
return p, nil
}
// integerReduceUnsignedPoint stores the reduced data for a name/tag combination.
type integerReduceUnsignedPoint struct {
Name string
Tags Tags
Aggregator IntegerPointAggregator
Emitter UnsignedPointEmitter
}
// reduce executes fn once for every point in the next window.
// The previous value for the dimension is passed to fn.
func (itr *integerReduceUnsignedIterator) reduce() ([]UnsignedPoint, error) {
// Calculate next window.
var (
startTime, endTime int64
window struct {
name string
tags string
}
)
for {
p, err := itr.input.Next()
if err != nil || p == nil {
return nil, err
} else if p.Nil {
continue
}
// Unread the point so it can be processed.
itr.input.unread(p)
startTime, endTime = itr.opt.Window(p.Time)
window.name, window.tags = p.Name, p.Tags.Subset(itr.opt.Dimensions).ID()
break
}
// Create points by tags.
m := make(map[string]*integerReduceUnsignedPoint)
for {
// Read next point.
curr, err := itr.input.NextInWindow(startTime, endTime)
if err != nil {
return nil, err
} else if curr == nil {
break
} else if curr.Nil {
continue
} else if curr.Name != window.name {
itr.input.unread(curr)
break
}
// Ensure this point is within the same final window.
if curr.Name != window.name {
itr.input.unread(curr)
break
} else if tags := curr.Tags.Subset(itr.opt.Dimensions); tags.ID() != window.tags {
itr.input.unread(curr)
break
}
// Retrieve the tags on this point for this level of the query.
// This may be different than the bucket dimensions.
tags := curr.Tags.Subset(itr.dims)
id := tags.ID()
// Retrieve the aggregator for this name/tag combination or create one.
rp := m[id]
if rp == nil {
aggregator, emitter := itr.create()
rp = &integerReduceUnsignedPoint{
Name: curr.Name,
Tags: tags,
Aggregator: aggregator,
Emitter: emitter,
}
m[id] = rp
}
rp.Aggregator.AggregateInteger(curr)
}
keys := make([]string, 0, len(m))
for k := range m {
keys = append(keys, k)
}
// Reverse sort points by name & tag.
// This ensures a consistent order of output.
if len(keys) > 0 {
var sorted sort.Interface = sort.StringSlice(keys)
if itr.opt.Ascending {
sorted = sort.Reverse(sorted)
}
sort.Sort(sorted)
}
// Assume the points are already sorted until proven otherwise.
sortedByTime := true
// Emit the points for each name & tag combination.
a := make([]UnsignedPoint, 0, len(m))
for _, k := range keys {
rp := m[k]
points := rp.Emitter.Emit()
for i := len(points) - 1; i >= 0; i-- {
points[i].Name = rp.Name
if !itr.keepTags {
points[i].Tags = rp.Tags
}
// Set the points time to the interval time if the reducer didn't provide one.
if points[i].Time == ZeroTime {
points[i].Time = startTime
} else {
sortedByTime = false
}
a = append(a, points[i])
}
}
// Points may be out of order. Perform a stable sort by time if requested.
if !sortedByTime && itr.opt.Ordered {
var sorted sort.Interface = unsignedPointsByTime(a)
if itr.opt.Ascending {
sorted = sort.Reverse(sorted)
}
sort.Stable(sorted)
}
return a, nil
}
// integerStreamUnsignedIterator streams inputs into the iterator and emits points gradually.
type integerStreamUnsignedIterator struct {
input *bufIntegerIterator
create func() (IntegerPointAggregator, UnsignedPointEmitter)
dims []string
opt IteratorOptions
m map[string]*integerReduceUnsignedPoint
points []UnsignedPoint
}
// newIntegerStreamUnsignedIterator returns a new instance of integerStreamUnsignedIterator.
func newIntegerStreamUnsignedIterator(input IntegerIterator, createFn func() (IntegerPointAggregator, UnsignedPointEmitter), opt IteratorOptions) *integerStreamUnsignedIterator {
return &integerStreamUnsignedIterator{
input: newBufIntegerIterator(input),
create: createFn,
dims: opt.GetDimensions(),
opt: opt,
m: make(map[string]*integerReduceUnsignedPoint),
}
}
// Stats returns stats from the input iterator.
func (itr *integerStreamUnsignedIterator) Stats() IteratorStats { return itr.input.Stats() }
// Close closes the iterator and all child iterators.
func (itr *integerStreamUnsignedIterator) Close() error { return itr.input.Close() }
// Next returns the next value for the stream iterator.
func (itr *integerStreamUnsignedIterator) Next() (*UnsignedPoint, error) {
// Calculate next window if we have no more points.
if len(itr.points) == 0 {
var err error
itr.points, err = itr.reduce()
if len(itr.points) == 0 {
return nil, err
}
}
// Pop next point off the stack.
p := &itr.points[len(itr.points)-1]
itr.points = itr.points[:len(itr.points)-1]
return p, nil
}
// reduce creates and manages aggregators for every point from the input.
// After aggregating a point, it always tries to emit a value using the emitter.
func (itr *integerStreamUnsignedIterator) reduce() ([]UnsignedPoint, error) {
// We have already read all of the input points.
if itr.m == nil {
return nil, nil
}
for {
// Read next point.
curr, err := itr.input.Next()
if err != nil {
return nil, err
} else if curr == nil {
// Close all of the aggregators to flush any remaining points to emit.
var points []UnsignedPoint
for _, rp := range itr.m {
if aggregator, ok := rp.Aggregator.(io.Closer); ok {
if err := aggregator.Close(); err != nil {
return nil, err
}
pts := rp.Emitter.Emit()
if len(pts) == 0 {
continue
}
for i := range pts {
pts[i].Name = rp.Name
pts[i].Tags = rp.Tags
}
points = append(points, pts...)
}
}
// Eliminate the aggregators and emitters.
itr.m = nil
return points, nil
} else if curr.Nil {
continue
}
tags := curr.Tags.Subset(itr.dims)
id := curr.Name
if len(tags.m) > 0 {
id += "\x00" + tags.ID()
}
// Retrieve the aggregator for this name/tag combination or create one.
rp := itr.m[id]
if rp == nil {
aggregator, emitter := itr.create()
rp = &integerReduceUnsignedPoint{
Name: curr.Name,
Tags: tags,
Aggregator: aggregator,
Emitter: emitter,
}
itr.m[id] = rp
}
rp.Aggregator.AggregateInteger(curr)
// Attempt to emit points from the aggregator.
points := rp.Emitter.Emit()
if len(points) == 0 {
continue
}
for i := range points {
points[i].Name = rp.Name
points[i].Tags = rp.Tags
}
return points, nil
}
}
// integerReduceStringIterator executes a reducer for every interval and buffers the result.
type integerReduceStringIterator struct {
input *bufIntegerIterator
create func() (IntegerPointAggregator, StringPointEmitter)
dims []string
opt IteratorOptions
points []StringPoint
keepTags bool
}
func newIntegerReduceStringIterator(input IntegerIterator, opt IteratorOptions, createFn func() (IntegerPointAggregator, StringPointEmitter)) *integerReduceStringIterator {
return &integerReduceStringIterator{
input: newBufIntegerIterator(input),
create: createFn,
dims: opt.GetDimensions(),
opt: opt,
}
}
// Stats returns stats from the input iterator.
func (itr *integerReduceStringIterator) Stats() IteratorStats { return itr.input.Stats() }
// Close closes the iterator and all child iterators.
func (itr *integerReduceStringIterator) Close() error { return itr.input.Close() }
// Next returns the minimum value for the next available interval.
func (itr *integerReduceStringIterator) Next() (*StringPoint, error) {
// Calculate next window if we have no more points.
if len(itr.points) == 0 {
var err error
itr.points, err = itr.reduce()
if len(itr.points) == 0 {
return nil, err
}
}
// Pop next point off the stack.
p := &itr.points[len(itr.points)-1]
itr.points = itr.points[:len(itr.points)-1]
return p, nil
}
// integerReduceStringPoint stores the reduced data for a name/tag combination.
type integerReduceStringPoint struct {
Name string
Tags Tags
Aggregator IntegerPointAggregator
Emitter StringPointEmitter
}
// reduce executes fn once for every point in the next window.
// The previous value for the dimension is passed to fn.
func (itr *integerReduceStringIterator) reduce() ([]StringPoint, error) {
// Calculate next window.
var (
startTime, endTime int64
window struct {
name string
tags string
}
)
for {
p, err := itr.input.Next()
if err != nil || p == nil {
return nil, err
} else if p.Nil {
continue
}
// Unread the point so it can be processed.
itr.input.unread(p)
startTime, endTime = itr.opt.Window(p.Time)
window.name, window.tags = p.Name, p.Tags.Subset(itr.opt.Dimensions).ID()
break
}
// Create points by tags.
m := make(map[string]*integerReduceStringPoint)
for {
// Read next point.
curr, err := itr.input.NextInWindow(startTime, endTime)
if err != nil {
return nil, err
} else if curr == nil {
break
} else if curr.Nil {
continue
} else if curr.Name != window.name {
itr.input.unread(curr)
break
}
// Ensure this point is within the same final window.
if curr.Name != window.name {
itr.input.unread(curr)
break
} else if tags := curr.Tags.Subset(itr.opt.Dimensions); tags.ID() != window.tags {
itr.input.unread(curr)
break
}
// Retrieve the tags on this point for this level of the query.
// This may be different than the bucket dimensions.
tags := curr.Tags.Subset(itr.dims)
id := tags.ID()
// Retrieve the aggregator for this name/tag combination or create one.
rp := m[id]
if rp == nil {
aggregator, emitter := itr.create()
rp = &integerReduceStringPoint{
Name: curr.Name,
Tags: tags,
Aggregator: aggregator,
Emitter: emitter,
}
m[id] = rp
}
rp.Aggregator.AggregateInteger(curr)
}
keys := make([]string, 0, len(m))
for k := range m {
keys = append(keys, k)
}
// Reverse sort points by name & tag.
// This ensures a consistent order of output.
if len(keys) > 0 {
var sorted sort.Interface = sort.StringSlice(keys)
if itr.opt.Ascending {
sorted = sort.Reverse(sorted)
}
sort.Sort(sorted)
}
// Assume the points are already sorted until proven otherwise.
sortedByTime := true
// Emit the points for each name & tag combination.
a := make([]StringPoint, 0, len(m))
for _, k := range keys {
rp := m[k]
points := rp.Emitter.Emit()
for i := len(points) - 1; i >= 0; i-- {
points[i].Name = rp.Name
if !itr.keepTags {
points[i].Tags = rp.Tags
}
// Set the points time to the interval time if the reducer didn't provide one.
if points[i].Time == ZeroTime {
points[i].Time = startTime
} else {
sortedByTime = false
}
a = append(a, points[i])
}
}
// Points may be out of order. Perform a stable sort by time if requested.
if !sortedByTime && itr.opt.Ordered {
var sorted sort.Interface = stringPointsByTime(a)
if itr.opt.Ascending {
sorted = sort.Reverse(sorted)
}
sort.Stable(sorted)
}
return a, nil
}
// integerStreamStringIterator streams inputs into the iterator and emits points gradually.
type integerStreamStringIterator struct {
input *bufIntegerIterator
create func() (IntegerPointAggregator, StringPointEmitter)
dims []string
opt IteratorOptions
m map[string]*integerReduceStringPoint
points []StringPoint
}
// newIntegerStreamStringIterator returns a new instance of integerStreamStringIterator.
func newIntegerStreamStringIterator(input IntegerIterator, createFn func() (IntegerPointAggregator, StringPointEmitter), opt IteratorOptions) *integerStreamStringIterator {
return &integerStreamStringIterator{
input: newBufIntegerIterator(input),
create: createFn,
dims: opt.GetDimensions(),
opt: opt,
m: make(map[string]*integerReduceStringPoint),
}
}
// Stats returns stats from the input iterator.
func (itr *integerStreamStringIterator) Stats() IteratorStats { return itr.input.Stats() }
// Close closes the iterator and all child iterators.
func (itr *integerStreamStringIterator) Close() error { return itr.input.Close() }
// Next returns the next value for the stream iterator.
func (itr *integerStreamStringIterator) Next() (*StringPoint, error) {
// Calculate next window if we have no more points.
if len(itr.points) == 0 {
var err error
itr.points, err = itr.reduce()
if len(itr.points) == 0 {
return nil, err
}
}
// Pop next point off the stack.
p := &itr.points[len(itr.points)-1]
itr.points = itr.points[:len(itr.points)-1]
return p, nil
}
// reduce creates and manages aggregators for every point from the input.
// After aggregating a point, it always tries to emit a value using the emitter.
func (itr *integerStreamStringIterator) reduce() ([]StringPoint, error) {
// We have already read all of the input points.
if itr.m == nil {
return nil, nil
}
for {
// Read next point.
curr, err := itr.input.Next()
if err != nil {
return nil, err
} else if curr == nil {
// Close all of the aggregators to flush any remaining points to emit.
var points []StringPoint
for _, rp := range itr.m {
if aggregator, ok := rp.Aggregator.(io.Closer); ok {
if err := aggregator.Close(); err != nil {
return nil, err
}
pts := rp.Emitter.Emit()
if len(pts) == 0 {
continue
}
for i := range pts {
pts[i].Name = rp.Name
pts[i].Tags = rp.Tags
}
points = append(points, pts...)
}
}
// Eliminate the aggregators and emitters.
itr.m = nil
return points, nil
} else if curr.Nil {
continue
}
tags := curr.Tags.Subset(itr.dims)
id := curr.Name
if len(tags.m) > 0 {
id += "\x00" + tags.ID()
}
// Retrieve the aggregator for this name/tag combination or create one.
rp := itr.m[id]
if rp == nil {
aggregator, emitter := itr.create()
rp = &integerReduceStringPoint{
Name: curr.Name,
Tags: tags,
Aggregator: aggregator,
Emitter: emitter,
}
itr.m[id] = rp
}
rp.Aggregator.AggregateInteger(curr)
// Attempt to emit points from the aggregator.
points := rp.Emitter.Emit()
if len(points) == 0 {
continue
}
for i := range points {
points[i].Name = rp.Name
points[i].Tags = rp.Tags
}
return points, nil
}
}
// integerReduceBooleanIterator executes a reducer for every interval and buffers the result.
type integerReduceBooleanIterator struct {
input *bufIntegerIterator
create func() (IntegerPointAggregator, BooleanPointEmitter)
dims []string
opt IteratorOptions
points []BooleanPoint
keepTags bool
}
func newIntegerReduceBooleanIterator(input IntegerIterator, opt IteratorOptions, createFn func() (IntegerPointAggregator, BooleanPointEmitter)) *integerReduceBooleanIterator {
return &integerReduceBooleanIterator{
input: newBufIntegerIterator(input),
create: createFn,
dims: opt.GetDimensions(),
opt: opt,
}
}
// Stats returns stats from the input iterator.
func (itr *integerReduceBooleanIterator) Stats() IteratorStats { return itr.input.Stats() }
// Close closes the iterator and all child iterators.
func (itr *integerReduceBooleanIterator) Close() error { return itr.input.Close() }
// Next returns the minimum value for the next available interval.
func (itr *integerReduceBooleanIterator) Next() (*BooleanPoint, error) {
// Calculate next window if we have no more points.
if len(itr.points) == 0 {
var err error
itr.points, err = itr.reduce()
if len(itr.points) == 0 {
return nil, err
}
}
// Pop next point off the stack.
p := &itr.points[len(itr.points)-1]
itr.points = itr.points[:len(itr.points)-1]
return p, nil
}
// integerReduceBooleanPoint stores the reduced data for a name/tag combination.
type integerReduceBooleanPoint struct {
Name string
Tags Tags
Aggregator IntegerPointAggregator
Emitter BooleanPointEmitter
}
// reduce executes fn once for every point in the next window.
// The previous value for the dimension is passed to fn.
func (itr *integerReduceBooleanIterator) reduce() ([]BooleanPoint, error) {
// Calculate next window.
var (
startTime, endTime int64
window struct {
name string
tags string
}
)
for {
p, err := itr.input.Next()
if err != nil || p == nil {
return nil, err
} else if p.Nil {
continue
}
// Unread the point so it can be processed.
itr.input.unread(p)
startTime, endTime = itr.opt.Window(p.Time)
window.name, window.tags = p.Name, p.Tags.Subset(itr.opt.Dimensions).ID()
break
}
// Create points by tags.
m := make(map[string]*integerReduceBooleanPoint)
for {
// Read next point.
curr, err := itr.input.NextInWindow(startTime, endTime)
if err != nil {
return nil, err
} else if curr == nil {
break
} else if curr.Nil {
continue
} else if curr.Name != window.name {
itr.input.unread(curr)
break
}
// Ensure this point is within the same final window.
if curr.Name != window.name {
itr.input.unread(curr)
break
} else if tags := curr.Tags.Subset(itr.opt.Dimensions); tags.ID() != window.tags {
itr.input.unread(curr)
break
}
// Retrieve the tags on this point for this level of the query.
// This may be different than the bucket dimensions.
tags := curr.Tags.Subset(itr.dims)
id := tags.ID()
// Retrieve the aggregator for this name/tag combination or create one.
rp := m[id]
if rp == nil {
aggregator, emitter := itr.create()
rp = &integerReduceBooleanPoint{
Name: curr.Name,
Tags: tags,
Aggregator: aggregator,
Emitter: emitter,
}
m[id] = rp
}
rp.Aggregator.AggregateInteger(curr)
}
keys := make([]string, 0, len(m))
for k := range m {
keys = append(keys, k)
}
// Reverse sort points by name & tag.
// This ensures a consistent order of output.
if len(keys) > 0 {
var sorted sort.Interface = sort.StringSlice(keys)
if itr.opt.Ascending {
sorted = sort.Reverse(sorted)
}
sort.Sort(sorted)
}
// Assume the points are already sorted until proven otherwise.
sortedByTime := true
// Emit the points for each name & tag combination.
a := make([]BooleanPoint, 0, len(m))
for _, k := range keys {
rp := m[k]
points := rp.Emitter.Emit()
for i := len(points) - 1; i >= 0; i-- {
points[i].Name = rp.Name
if !itr.keepTags {
points[i].Tags = rp.Tags
}
// Set the points time to the interval time if the reducer didn't provide one.
if points[i].Time == ZeroTime {
points[i].Time = startTime
} else {
sortedByTime = false
}
a = append(a, points[i])
}
}
// Points may be out of order. Perform a stable sort by time if requested.
if !sortedByTime && itr.opt.Ordered {
var sorted sort.Interface = booleanPointsByTime(a)
if itr.opt.Ascending {
sorted = sort.Reverse(sorted)
}
sort.Stable(sorted)
}
return a, nil
}
// integerStreamBooleanIterator streams inputs into the iterator and emits points gradually.
type integerStreamBooleanIterator struct {
input *bufIntegerIterator
create func() (IntegerPointAggregator, BooleanPointEmitter)
dims []string
opt IteratorOptions
m map[string]*integerReduceBooleanPoint
points []BooleanPoint
}
// newIntegerStreamBooleanIterator returns a new instance of integerStreamBooleanIterator.
func newIntegerStreamBooleanIterator(input IntegerIterator, createFn func() (IntegerPointAggregator, BooleanPointEmitter), opt IteratorOptions) *integerStreamBooleanIterator {
return &integerStreamBooleanIterator{
input: newBufIntegerIterator(input),
create: createFn,
dims: opt.GetDimensions(),
opt: opt,
m: make(map[string]*integerReduceBooleanPoint),
}
}
// Stats returns stats from the input iterator.
func (itr *integerStreamBooleanIterator) Stats() IteratorStats { return itr.input.Stats() }
// Close closes the iterator and all child iterators.
func (itr *integerStreamBooleanIterator) Close() error { return itr.input.Close() }
// Next returns the next value for the stream iterator.
func (itr *integerStreamBooleanIterator) Next() (*BooleanPoint, error) {
// Calculate next window if we have no more points.
if len(itr.points) == 0 {
var err error
itr.points, err = itr.reduce()
if len(itr.points) == 0 {
return nil, err
}
}
// Pop next point off the stack.
p := &itr.points[len(itr.points)-1]
itr.points = itr.points[:len(itr.points)-1]
return p, nil
}
// reduce creates and manages aggregators for every point from the input.
// After aggregating a point, it always tries to emit a value using the emitter.
func (itr *integerStreamBooleanIterator) reduce() ([]BooleanPoint, error) {
// We have already read all of the input points.
if itr.m == nil {
return nil, nil
}
for {
// Read next point.
curr, err := itr.input.Next()
if err != nil {
return nil, err
} else if curr == nil {
// Close all of the aggregators to flush any remaining points to emit.
var points []BooleanPoint
for _, rp := range itr.m {
if aggregator, ok := rp.Aggregator.(io.Closer); ok {
if err := aggregator.Close(); err != nil {
return nil, err
}
pts := rp.Emitter.Emit()
if len(pts) == 0 {
continue
}
for i := range pts {
pts[i].Name = rp.Name
pts[i].Tags = rp.Tags
}
points = append(points, pts...)
}
}
// Eliminate the aggregators and emitters.
itr.m = nil
return points, nil
} else if curr.Nil {
continue
}
tags := curr.Tags.Subset(itr.dims)
id := curr.Name
if len(tags.m) > 0 {
id += "\x00" + tags.ID()
}
// Retrieve the aggregator for this name/tag combination or create one.
rp := itr.m[id]
if rp == nil {
aggregator, emitter := itr.create()
rp = &integerReduceBooleanPoint{
Name: curr.Name,
Tags: tags,
Aggregator: aggregator,
Emitter: emitter,
}
itr.m[id] = rp
}
rp.Aggregator.AggregateInteger(curr)
// Attempt to emit points from the aggregator.
points := rp.Emitter.Emit()
if len(points) == 0 {
continue
}
for i := range points {
points[i].Name = rp.Name
points[i].Tags = rp.Tags
}
return points, nil
}
}
// integerDedupeIterator only outputs unique points.
// This differs from the DistinctIterator in that it compares all aux fields too.
// This iterator is relatively inefficient and should only be used on small
// datasets such as meta query results.
type integerDedupeIterator struct {
input IntegerIterator
m map[string]struct{} // lookup of points already sent
}
type integerIteratorMapper struct {
cur Cursor
row Row
driver IteratorMap // which iterator to use for the primary value, can be nil
fields []IteratorMap // which iterator to use for an aux field
point IntegerPoint
}
func newIntegerIteratorMapper(cur Cursor, driver IteratorMap, fields []IteratorMap, opt IteratorOptions) *integerIteratorMapper {
return &integerIteratorMapper{
cur: cur,
driver: driver,
fields: fields,
point: IntegerPoint{
Aux: make([]interface{}, len(fields)),
},
}
}
func (itr *integerIteratorMapper) Next() (*IntegerPoint, error) {
if !itr.cur.Scan(&itr.row) {
if err := itr.cur.Err(); err != nil {
return nil, err
}
return nil, nil
}
itr.point.Time = itr.row.Time
itr.point.Name = itr.row.Series.Name
itr.point.Tags = itr.row.Series.Tags
if itr.driver != nil {
if v := itr.driver.Value(&itr.row); v != nil {
if v, ok := castToInteger(v); ok {
itr.point.Value = v
itr.point.Nil = false
} else {
itr.point.Value = 0
itr.point.Nil = true
}
} else {
itr.point.Value = 0
itr.point.Nil = true
}
}
for i, f := range itr.fields {
itr.point.Aux[i] = f.Value(&itr.row)
}
return &itr.point, nil
}
func (itr *integerIteratorMapper) Stats() IteratorStats {
return itr.cur.Stats()
}
func (itr *integerIteratorMapper) Close() error {
return itr.cur.Close()
}
type integerFilterIterator struct {
input IntegerIterator
cond influxql.Expr
opt IteratorOptions
m map[string]interface{}
}
func newIntegerFilterIterator(input IntegerIterator, cond influxql.Expr, opt IteratorOptions) IntegerIterator {
// Strip out time conditions from the WHERE clause.
// TODO(jsternberg): This should really be done for us when creating the IteratorOptions struct.
n := influxql.RewriteFunc(influxql.CloneExpr(cond), func(n influxql.Node) influxql.Node {
switch n := n.(type) {
case *influxql.BinaryExpr:
if n.LHS.String() == "time" {
return &influxql.BooleanLiteral{Val: true}
}
}
return n
})
cond, _ = n.(influxql.Expr)
if cond == nil {
return input
} else if n, ok := cond.(*influxql.BooleanLiteral); ok && n.Val {
return input
}
return &integerFilterIterator{
input: input,
cond: cond,
opt: opt,
m: make(map[string]interface{}),
}
}
func (itr *integerFilterIterator) Stats() IteratorStats { return itr.input.Stats() }
func (itr *integerFilterIterator) Close() error { return itr.input.Close() }
func (itr *integerFilterIterator) Next() (*IntegerPoint, error) {
for {
p, err := itr.input.Next()
if err != nil || p == nil {
return nil, err
}
for i, ref := range itr.opt.Aux {
itr.m[ref.Val] = p.Aux[i]
}
for k, v := range p.Tags.KeyValues() {
itr.m[k] = v
}
if !influxql.EvalBool(itr.cond, itr.m) {
continue
}
return p, nil
}
}
type integerTagSubsetIterator struct {
input IntegerIterator
point IntegerPoint
lastTags Tags
dimensions []string
}
func newIntegerTagSubsetIterator(input IntegerIterator, opt IteratorOptions) *integerTagSubsetIterator {
return &integerTagSubsetIterator{
input: input,
dimensions: opt.GetDimensions(),
}
}
func (itr *integerTagSubsetIterator) Next() (*IntegerPoint, error) {
p, err := itr.input.Next()
if err != nil {
return nil, err
} else if p == nil {
return nil, nil
}
itr.point.Name = p.Name
if !p.Tags.Equal(itr.lastTags) {
itr.point.Tags = p.Tags.Subset(itr.dimensions)
itr.lastTags = p.Tags
}
itr.point.Time = p.Time
itr.point.Value = p.Value
itr.point.Aux = p.Aux
itr.point.Aggregated = p.Aggregated
itr.point.Nil = p.Nil
return &itr.point, nil
}
func (itr *integerTagSubsetIterator) Stats() IteratorStats {
return itr.input.Stats()
}
func (itr *integerTagSubsetIterator) Close() error {
return itr.input.Close()
}
// newIntegerDedupeIterator returns a new instance of integerDedupeIterator.
func newIntegerDedupeIterator(input IntegerIterator) *integerDedupeIterator {
return &integerDedupeIterator{
input: input,
m: make(map[string]struct{}),
}
}
// Stats returns stats from the input iterator.
func (itr *integerDedupeIterator) Stats() IteratorStats { return itr.input.Stats() }
// Close closes the iterator and all child iterators.
func (itr *integerDedupeIterator) Close() error { return itr.input.Close() }
// Next returns the next unique point from the input iterator.
func (itr *integerDedupeIterator) Next() (*IntegerPoint, error) {
for {
// Read next point.
p, err := itr.input.Next()
if p == nil || err != nil {
return nil, err
}
// Serialize to bytes to store in lookup.
buf, err := proto.Marshal(encodeIntegerPoint(p))
if err != nil {
return nil, err
}
// If the point has already been output then move to the next point.
if _, ok := itr.m[string(buf)]; ok {
continue
}
// Otherwise mark it as emitted and return point.
itr.m[string(buf)] = struct{}{}
return p, nil
}
}
// integerReaderIterator represents an iterator that streams from a reader.
type integerReaderIterator struct {
r io.Reader
dec *IntegerPointDecoder
}
// newIntegerReaderIterator returns a new instance of integerReaderIterator.
func newIntegerReaderIterator(ctx context.Context, r io.Reader, stats IteratorStats) *integerReaderIterator {
dec := NewIntegerPointDecoder(ctx, r)
dec.stats = stats
return &integerReaderIterator{
r: r,
dec: dec,
}
}
// Stats returns stats about points processed.
func (itr *integerReaderIterator) Stats() IteratorStats { return itr.dec.stats }
// Close closes the underlying reader, if applicable.
func (itr *integerReaderIterator) Close() error {
if r, ok := itr.r.(io.ReadCloser); ok {
return r.Close()
}
return nil
}
// Next returns the next point from the iterator.
func (itr *integerReaderIterator) Next() (*IntegerPoint, error) {
// OPTIMIZE(benbjohnson): Reuse point on iterator.
// Unmarshal next point.
p := &IntegerPoint{}
if err := itr.dec.DecodeIntegerPoint(p); err == io.EOF {
return nil, nil
} else if err != nil {
return nil, err
}
return p, nil
}
// UnsignedIterator represents a stream of unsigned points.
type UnsignedIterator interface {
Iterator
Next() (*UnsignedPoint, error)
}
// newUnsignedIterators converts a slice of Iterator to a slice of UnsignedIterator.
// Drop and closes any iterator in itrs that is not a UnsignedIterator and cannot
// be cast to a UnsignedIterator.
func newUnsignedIterators(itrs []Iterator) []UnsignedIterator {
a := make([]UnsignedIterator, 0, len(itrs))
for _, itr := range itrs {
switch itr := itr.(type) {
case UnsignedIterator:
a = append(a, itr)
default:
itr.Close()
}
}
return a
}
// bufUnsignedIterator represents a buffered UnsignedIterator.
type bufUnsignedIterator struct {
itr UnsignedIterator
buf *UnsignedPoint
}
// newBufUnsignedIterator returns a buffered UnsignedIterator.
func newBufUnsignedIterator(itr UnsignedIterator) *bufUnsignedIterator {
return &bufUnsignedIterator{itr: itr}
}
// Stats returns statistics from the input iterator.
func (itr *bufUnsignedIterator) Stats() IteratorStats { return itr.itr.Stats() }
// Close closes the underlying iterator.
func (itr *bufUnsignedIterator) Close() error { return itr.itr.Close() }
// peek returns the next point without removing it from the iterator.
func (itr *bufUnsignedIterator) peek() (*UnsignedPoint, error) {
p, err := itr.Next()
if err != nil {
return nil, err
}
itr.unread(p)
return p, nil
}
// peekTime returns the time of the next point.
// Returns zero time if no more points available.
func (itr *bufUnsignedIterator) peekTime() (int64, error) {
p, err := itr.peek()
if p == nil || err != nil {
return ZeroTime, err
}
return p.Time, nil
}
// Next returns the current buffer, if exists, or calls the underlying iterator.
func (itr *bufUnsignedIterator) Next() (*UnsignedPoint, error) {
buf := itr.buf
if buf != nil {
itr.buf = nil
return buf, nil
}
return itr.itr.Next()
}
// NextInWindow returns the next value if it is between [startTime, endTime).
// If the next value is outside the range then it is moved to the buffer.
func (itr *bufUnsignedIterator) NextInWindow(startTime, endTime int64) (*UnsignedPoint, error) {
v, err := itr.Next()
if v == nil || err != nil {
return nil, err
} else if t := v.Time; t >= endTime || t < startTime {
itr.unread(v)
return nil, nil
}
return v, nil
}
// unread sets v to the buffer. It is read on the next call to Next().
func (itr *bufUnsignedIterator) unread(v *UnsignedPoint) { itr.buf = v }
// unsignedMergeIterator represents an iterator that combines multiple unsigned iterators.
type unsignedMergeIterator struct {
inputs []UnsignedIterator
heap *unsignedMergeHeap
init bool
closed bool
mu sync.RWMutex
// Current iterator and window.
curr *unsignedMergeHeapItem
window struct {
name string
tags string
startTime int64
endTime int64
}
}
// newUnsignedMergeIterator returns a new instance of unsignedMergeIterator.
func newUnsignedMergeIterator(inputs []UnsignedIterator, opt IteratorOptions) *unsignedMergeIterator {
itr := &unsignedMergeIterator{
inputs: inputs,
heap: &unsignedMergeHeap{
items: make([]*unsignedMergeHeapItem, 0, len(inputs)),
opt: opt,
},
}
// Initialize heap items.
for _, input := range inputs {
// Wrap in buffer, ignore any inputs without anymore points.
bufInput := newBufUnsignedIterator(input)
// Append to the heap.
itr.heap.items = append(itr.heap.items, &unsignedMergeHeapItem{itr: bufInput})
}
return itr
}
// Stats returns an aggregation of stats from the underlying iterators.
func (itr *unsignedMergeIterator) Stats() IteratorStats {
var stats IteratorStats
for _, input := range itr.inputs {
stats.Add(input.Stats())
}
return stats
}
// Close closes the underlying iterators.
func (itr *unsignedMergeIterator) Close() error {
itr.mu.Lock()
defer itr.mu.Unlock()
for _, input := range itr.inputs {
input.Close()
}
itr.curr = nil
itr.inputs = nil
itr.heap.items = nil
itr.closed = true
return nil
}
// Next returns the next point from the iterator.
func (itr *unsignedMergeIterator) Next() (*UnsignedPoint, error) {
itr.mu.RLock()
defer itr.mu.RUnlock()
if itr.closed {
return nil, nil
}
// Initialize the heap. This needs to be done lazily on the first call to this iterator
// so that iterator initialization done through the Select() call returns quickly.
// Queries can only be interrupted after the Select() call completes so any operations
// done during iterator creation cannot be interrupted, which is why we do it here
// instead so an interrupt can happen while initializing the heap.
if !itr.init {
items := itr.heap.items
itr.heap.items = make([]*unsignedMergeHeapItem, 0, len(items))
for _, item := range items {
if p, err := item.itr.peek(); err != nil {
return nil, err
} else if p == nil {
continue
}
itr.heap.items = append(itr.heap.items, item)
}
heap.Init(itr.heap)
itr.init = true
}
for {
// Retrieve the next iterator if we don't have one.
if itr.curr == nil {
if len(itr.heap.items) == 0 {
return nil, nil
}
itr.curr = heap.Pop(itr.heap).(*unsignedMergeHeapItem)
// Read point and set current window.
p, err := itr.curr.itr.Next()
if err != nil {
return nil, err
}
tags := p.Tags.Subset(itr.heap.opt.Dimensions)
itr.window.name, itr.window.tags = p.Name, tags.ID()
itr.window.startTime, itr.window.endTime = itr.heap.opt.Window(p.Time)
return p, nil
}
// Read the next point from the current iterator.
p, err := itr.curr.itr.Next()
if err != nil {
return nil, err
}
// If there are no more points then remove iterator from heap and find next.
if p == nil {
itr.curr = nil
continue
}
// Check if the point is inside of our current window.
inWindow := true
if window := itr.window; window.name != p.Name {
inWindow = false
} else if tags := p.Tags.Subset(itr.heap.opt.Dimensions); window.tags != tags.ID() {
inWindow = false
} else if opt := itr.heap.opt; opt.Ascending && p.Time >= window.endTime {
inWindow = false
} else if !opt.Ascending && p.Time < window.startTime {
inWindow = false
}
// If it's outside our window then push iterator back on the heap and find new iterator.
if !inWindow {
itr.curr.itr.unread(p)
heap.Push(itr.heap, itr.curr)
itr.curr = nil
continue
}
return p, nil
}
}
// unsignedMergeHeap represents a heap of unsignedMergeHeapItems.
// Items are sorted by their next window and then by name/tags.
type unsignedMergeHeap struct {
opt IteratorOptions
items []*unsignedMergeHeapItem
}
func (h *unsignedMergeHeap) Len() int { return len(h.items) }
func (h *unsignedMergeHeap) Swap(i, j int) { h.items[i], h.items[j] = h.items[j], h.items[i] }
func (h *unsignedMergeHeap) Less(i, j int) bool {
x, err := h.items[i].itr.peek()
if err != nil {
return true
}
y, err := h.items[j].itr.peek()
if err != nil {
return false
}
if h.opt.Ascending {
if x.Name != y.Name {
return x.Name < y.Name
} else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); xTags.ID() != yTags.ID() {
return xTags.ID() < yTags.ID()
}
} else {
if x.Name != y.Name {
return x.Name > y.Name
} else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); xTags.ID() != yTags.ID() {
return xTags.ID() > yTags.ID()
}
}
xt, _ := h.opt.Window(x.Time)
yt, _ := h.opt.Window(y.Time)
if h.opt.Ascending {
return xt < yt
}
return xt > yt
}
func (h *unsignedMergeHeap) Push(x interface{}) {
h.items = append(h.items, x.(*unsignedMergeHeapItem))
}
func (h *unsignedMergeHeap) Pop() interface{} {
old := h.items
n := len(old)
item := old[n-1]
h.items = old[0 : n-1]
return item
}
type unsignedMergeHeapItem struct {
itr *bufUnsignedIterator
}
// unsignedSortedMergeIterator is an iterator that sorts and merges multiple iterators into one.
type unsignedSortedMergeIterator struct {
inputs []UnsignedIterator
heap *unsignedSortedMergeHeap
init bool
}
// newUnsignedSortedMergeIterator returns an instance of unsignedSortedMergeIterator.
func newUnsignedSortedMergeIterator(inputs []UnsignedIterator, opt IteratorOptions) Iterator {
itr := &unsignedSortedMergeIterator{
inputs: inputs,
heap: &unsignedSortedMergeHeap{
items: make([]*unsignedSortedMergeHeapItem, 0, len(inputs)),
opt: opt,
},
}
// Initialize heap items.
for _, input := range inputs {
// Append to the heap.
itr.heap.items = append(itr.heap.items, &unsignedSortedMergeHeapItem{itr: input})
}
return itr
}
// Stats returns an aggregation of stats from the underlying iterators.
func (itr *unsignedSortedMergeIterator) Stats() IteratorStats {
var stats IteratorStats
for _, input := range itr.inputs {
stats.Add(input.Stats())
}
return stats
}
// Close closes the underlying iterators.
func (itr *unsignedSortedMergeIterator) Close() error {
for _, input := range itr.inputs {
input.Close()
}
return nil
}
// Next returns the next points from the iterator.
func (itr *unsignedSortedMergeIterator) Next() (*UnsignedPoint, error) { return itr.pop() }
// pop returns the next point from the heap.
// Reads the next point from item's cursor and puts it back on the heap.
func (itr *unsignedSortedMergeIterator) pop() (*UnsignedPoint, error) {
// Initialize the heap. See the MergeIterator to see why this has to be done lazily.
if !itr.init {
items := itr.heap.items
itr.heap.items = make([]*unsignedSortedMergeHeapItem, 0, len(items))
for _, item := range items {
var err error
if item.point, err = item.itr.Next(); err != nil {
return nil, err
} else if item.point == nil {
continue
}
itr.heap.items = append(itr.heap.items, item)
}
heap.Init(itr.heap)
itr.init = true
}
if len(itr.heap.items) == 0 {
return nil, nil
}
// Read the next item from the heap.
item := heap.Pop(itr.heap).(*unsignedSortedMergeHeapItem)
if item.err != nil {
return nil, item.err
} else if item.point == nil {
return nil, nil
}
// Copy the point for return.
p := item.point.Clone()
// Read the next item from the cursor. Push back to heap if one exists.
if item.point, item.err = item.itr.Next(); item.point != nil {
heap.Push(itr.heap, item)
}
return p, nil
}
// unsignedSortedMergeHeap represents a heap of unsignedSortedMergeHeapItems.
// Items are sorted with the following priority:
// - By their measurement name;
// - By their tag keys/values;
// - By time; or
// - By their Aux field values.
type unsignedSortedMergeHeap struct {
opt IteratorOptions
items []*unsignedSortedMergeHeapItem
}
func (h *unsignedSortedMergeHeap) Len() int { return len(h.items) }
func (h *unsignedSortedMergeHeap) Swap(i, j int) { h.items[i], h.items[j] = h.items[j], h.items[i] }
func (h *unsignedSortedMergeHeap) Less(i, j int) bool {
x, y := h.items[i].point, h.items[j].point
if h.opt.Ascending {
if x.Name != y.Name {
return x.Name < y.Name
} else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); !xTags.Equals(&yTags) {
return xTags.ID() < yTags.ID()
}
if x.Time != y.Time {
return x.Time < y.Time
}
if len(x.Aux) > 0 && len(x.Aux) == len(y.Aux) {
for i := 0; i < len(x.Aux); i++ {
v1, ok1 := x.Aux[i].(string)
v2, ok2 := y.Aux[i].(string)
if !ok1 || !ok2 {
// Unsupported types used in Aux fields. Maybe they
// need to be added here?
return false
} else if v1 == v2 {
continue
}
return v1 < v2
}
}
return false // Times and/or Aux fields are equal.
}
if x.Name != y.Name {
return x.Name > y.Name
} else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); !xTags.Equals(&yTags) {
return xTags.ID() > yTags.ID()
}
if x.Time != y.Time {
return x.Time > y.Time
}
if len(x.Aux) > 0 && len(x.Aux) == len(y.Aux) {
for i := 0; i < len(x.Aux); i++ {
v1, ok1 := x.Aux[i].(string)
v2, ok2 := y.Aux[i].(string)
if !ok1 || !ok2 {
// Unsupported types used in Aux fields. Maybe they
// need to be added here?
return false
} else if v1 == v2 {
continue
}
return v1 > v2
}
}
return false // Times and/or Aux fields are equal.
}
func (h *unsignedSortedMergeHeap) Push(x interface{}) {
h.items = append(h.items, x.(*unsignedSortedMergeHeapItem))
}
func (h *unsignedSortedMergeHeap) Pop() interface{} {
old := h.items
n := len(old)
item := old[n-1]
h.items = old[0 : n-1]
return item
}
type unsignedSortedMergeHeapItem struct {
point *UnsignedPoint
err error
itr UnsignedIterator
}
// unsignedIteratorScanner scans the results of a UnsignedIterator into a map.
type unsignedIteratorScanner struct {
input *bufUnsignedIterator
err error
keys []influxql.VarRef
defaultValue interface{}
}
// newUnsignedIteratorScanner creates a new IteratorScanner.
func newUnsignedIteratorScanner(input UnsignedIterator, keys []influxql.VarRef, defaultValue interface{}) *unsignedIteratorScanner {
return &unsignedIteratorScanner{
input: newBufUnsignedIterator(input),
keys: keys,
defaultValue: defaultValue,
}
}
func (s *unsignedIteratorScanner) Peek() (int64, string, Tags) {
if s.err != nil {
return ZeroTime, "", Tags{}
}
p, err := s.input.peek()
if err != nil {
s.err = err
return ZeroTime, "", Tags{}
} else if p == nil {
return ZeroTime, "", Tags{}
}
return p.Time, p.Name, p.Tags
}
func (s *unsignedIteratorScanner) ScanAt(ts int64, name string, tags Tags, m map[string]interface{}) {
if s.err != nil {
return
}
p, err := s.input.Next()
if err != nil {
s.err = err
return
} else if p == nil {
s.useDefaults(m)
return
} else if p.Time != ts || p.Name != name || !p.Tags.Equals(&tags) {
s.useDefaults(m)
s.input.unread(p)
return
}
if k := s.keys[0]; k.Val != "" {
if p.Nil {
if s.defaultValue != SkipDefault {
m[k.Val] = castToType(s.defaultValue, k.Type)
}
} else {
m[k.Val] = p.Value
}
}
for i, v := range p.Aux {
k := s.keys[i+1]
switch v.(type) {
case float64, int64, uint64, string, bool:
m[k.Val] = v
default:
// Insert the fill value if one was specified.
if s.defaultValue != SkipDefault {
m[k.Val] = castToType(s.defaultValue, k.Type)
}
}
}
}
func (s *unsignedIteratorScanner) useDefaults(m map[string]interface{}) {
if s.defaultValue == SkipDefault {
return
}
for _, k := range s.keys {
if k.Val == "" {
continue
}
m[k.Val] = castToType(s.defaultValue, k.Type)
}
}
func (s *unsignedIteratorScanner) Stats() IteratorStats { return s.input.Stats() }
func (s *unsignedIteratorScanner) Err() error { return s.err }
func (s *unsignedIteratorScanner) Close() error { return s.input.Close() }
// unsignedParallelIterator represents an iterator that pulls data in a separate goroutine.
type unsignedParallelIterator struct {
input UnsignedIterator
ch chan unsignedPointError
once sync.Once
closing chan struct{}
wg sync.WaitGroup
}
// newUnsignedParallelIterator returns a new instance of unsignedParallelIterator.
func newUnsignedParallelIterator(input UnsignedIterator) *unsignedParallelIterator {
itr := &unsignedParallelIterator{
input: input,
ch: make(chan unsignedPointError, 256),
closing: make(chan struct{}),
}
itr.wg.Add(1)
go itr.monitor()
return itr
}
// Stats returns stats from the underlying iterator.
func (itr *unsignedParallelIterator) Stats() IteratorStats { return itr.input.Stats() }
// Close closes the underlying iterators.
func (itr *unsignedParallelIterator) Close() error {
itr.once.Do(func() { close(itr.closing) })
itr.wg.Wait()
return itr.input.Close()
}
// Next returns the next point from the iterator.
func (itr *unsignedParallelIterator) Next() (*UnsignedPoint, error) {
v, ok := <-itr.ch
if !ok {
return nil, io.EOF
}
return v.point, v.err
}
// monitor runs in a separate goroutine and actively pulls the next point.
func (itr *unsignedParallelIterator) monitor() {
defer close(itr.ch)
defer itr.wg.Done()
for {
// Read next point.
p, err := itr.input.Next()
if p != nil {
p = p.Clone()
}
select {
case <-itr.closing:
return
case itr.ch <- unsignedPointError{point: p, err: err}:
}
}
}
type unsignedPointError struct {
point *UnsignedPoint
err error
}
// unsignedLimitIterator represents an iterator that limits points per group.
type unsignedLimitIterator struct {
input UnsignedIterator
opt IteratorOptions
n int
prev struct {
name string
tags Tags
}
}
// newUnsignedLimitIterator returns a new instance of unsignedLimitIterator.
func newUnsignedLimitIterator(input UnsignedIterator, opt IteratorOptions) *unsignedLimitIterator {
return &unsignedLimitIterator{
input: input,
opt: opt,
}
}
// Stats returns stats from the underlying iterator.
func (itr *unsignedLimitIterator) Stats() IteratorStats { return itr.input.Stats() }
// Close closes the underlying iterators.
func (itr *unsignedLimitIterator) Close() error { return itr.input.Close() }
// Next returns the next point from the iterator.
func (itr *unsignedLimitIterator) Next() (*UnsignedPoint, error) {
for {
p, err := itr.input.Next()
if p == nil || err != nil {
return nil, err
}
// Reset window and counter if a new window is encountered.
if p.Name != itr.prev.name || !p.Tags.Equals(&itr.prev.tags) {
itr.prev.name = p.Name
itr.prev.tags = p.Tags
itr.n = 0
}
// Increment counter.
itr.n++
// Read next point if not beyond the offset.
if itr.n <= itr.opt.Offset {
continue
}
// Read next point if we're beyond the limit.
if itr.opt.Limit > 0 && (itr.n-itr.opt.Offset) > itr.opt.Limit {
continue
}
return p, nil
}
}
type unsignedFillIterator struct {
input *bufUnsignedIterator
prev UnsignedPoint
startTime int64
endTime int64
auxFields []interface{}
init bool
opt IteratorOptions
window struct {
name string
tags Tags
time int64
offset int64
}
}
func newUnsignedFillIterator(input UnsignedIterator, expr influxql.Expr, opt IteratorOptions) *unsignedFillIterator {
if opt.Fill == influxql.NullFill {
if expr, ok := expr.(*influxql.Call); ok && expr.Name == "count" {
opt.Fill = influxql.NumberFill
opt.FillValue = uint64(0)
}
}
var startTime, endTime int64
if opt.Ascending {
startTime, _ = opt.Window(opt.StartTime)
endTime, _ = opt.Window(opt.EndTime)
} else {
startTime, _ = opt.Window(opt.EndTime)
endTime, _ = opt.Window(opt.StartTime)
}
var auxFields []interface{}
if len(opt.Aux) > 0 {
auxFields = make([]interface{}, len(opt.Aux))
}
return &unsignedFillIterator{
input: newBufUnsignedIterator(input),
prev: UnsignedPoint{Nil: true},
startTime: startTime,
endTime: endTime,
auxFields: auxFields,
opt: opt,
}
}
func (itr *unsignedFillIterator) Stats() IteratorStats { return itr.input.Stats() }
func (itr *unsignedFillIterator) Close() error { return itr.input.Close() }
func (itr *unsignedFillIterator) Next() (*UnsignedPoint, error) {
if !itr.init {
p, err := itr.input.peek()
if p == nil || err != nil {
return nil, err
}
itr.window.name, itr.window.tags = p.Name, p.Tags
itr.window.time = itr.startTime
if itr.startTime == influxql.MinTime {
itr.window.time, _ = itr.opt.Window(p.Time)
}
if itr.opt.Location != nil {
_, itr.window.offset = itr.opt.Zone(itr.window.time)
}
itr.init = true
}
p, err := itr.input.Next()
if err != nil {
return nil, err
}
// Check if the next point is outside of our window or is nil.
if p == nil || p.Name != itr.window.name || p.Tags.ID() != itr.window.tags.ID() {
// If we are inside of an interval, unread the point and continue below to
// constructing a new point.
if itr.opt.Ascending && itr.window.time <= itr.endTime {
itr.input.unread(p)
p = nil
goto CONSTRUCT
} else if !itr.opt.Ascending && itr.window.time >= itr.endTime && itr.endTime != influxql.MinTime {
itr.input.unread(p)
p = nil
goto CONSTRUCT
}
// We are *not* in a current interval. If there is no next point,
// we are at the end of all intervals.
if p == nil {
return nil, nil
}
// Set the new interval.
itr.window.name, itr.window.tags = p.Name, p.Tags
itr.window.time = itr.startTime
if itr.window.time == influxql.MinTime {
itr.window.time, _ = itr.opt.Window(p.Time)
}
if itr.opt.Location != nil {
_, itr.window.offset = itr.opt.Zone(itr.window.time)
}
itr.prev = UnsignedPoint{Nil: true}
}
// Check if the point is our next expected point.
CONSTRUCT:
if p == nil || (itr.opt.Ascending && p.Time > itr.window.time) || (!itr.opt.Ascending && p.Time < itr.window.time) {
if p != nil {
itr.input.unread(p)
}
p = &UnsignedPoint{
Name: itr.window.name,
Tags: itr.window.tags,
Time: itr.window.time,
Aux: itr.auxFields,
}
switch itr.opt.Fill {
case influxql.LinearFill:
if !itr.prev.Nil {
next, err := itr.input.peek()
if err != nil {
return nil, err
} else if next != nil && next.Name == itr.window.name && next.Tags.ID() == itr.window.tags.ID() {
interval := int64(itr.opt.Interval.Duration)
start := itr.window.time / interval
p.Value = linearUnsigned(start, itr.prev.Time/interval, next.Time/interval, itr.prev.Value, next.Value)
} else {
p.Nil = true
}
} else {
p.Nil = true
}
case influxql.NullFill:
p.Nil = true
case influxql.NumberFill:
p.Value, _ = castToUnsigned(itr.opt.FillValue)
case influxql.PreviousFill:
if !itr.prev.Nil {
p.Value = itr.prev.Value
p.Nil = itr.prev.Nil
} else {
p.Nil = true
}
}
} else {
itr.prev = *p
}
// Advance the expected time. Do not advance to a new window here
// as there may be lingering points with the same timestamp in the previous
// window.
if itr.opt.Ascending {
itr.window.time += int64(itr.opt.Interval.Duration)
} else {
itr.window.time -= int64(itr.opt.Interval.Duration)
}
// Check to see if we have passed over an offset change and adjust the time
// to account for this new offset.
if itr.opt.Location != nil {
if _, offset := itr.opt.Zone(itr.window.time - 1); offset != itr.window.offset {
diff := itr.window.offset - offset
if abs(diff) < int64(itr.opt.Interval.Duration) {
itr.window.time += diff
}
itr.window.offset = offset
}
}
return p, nil
}
// unsignedIntervalIterator represents a unsigned implementation of IntervalIterator.
type unsignedIntervalIterator struct {
input UnsignedIterator
opt IteratorOptions
}
func newUnsignedIntervalIterator(input UnsignedIterator, opt IteratorOptions) *unsignedIntervalIterator {
return &unsignedIntervalIterator{input: input, opt: opt}
}
func (itr *unsignedIntervalIterator) Stats() IteratorStats { return itr.input.Stats() }
func (itr *unsignedIntervalIterator) Close() error { return itr.input.Close() }
func (itr *unsignedIntervalIterator) Next() (*UnsignedPoint, error) {
p, err := itr.input.Next()
if p == nil || err != nil {
return nil, err
}
p.Time, _ = itr.opt.Window(p.Time)
// If we see the minimum allowable time, set the time to zero so we don't
// break the default returned time for aggregate queries without times.
if p.Time == influxql.MinTime {
p.Time = 0
}
return p, nil
}
// unsignedInterruptIterator represents a unsigned implementation of InterruptIterator.
type unsignedInterruptIterator struct {
input UnsignedIterator
closing <-chan struct{}
count int
}
func newUnsignedInterruptIterator(input UnsignedIterator, closing <-chan struct{}) *unsignedInterruptIterator {
return &unsignedInterruptIterator{input: input, closing: closing}
}
func (itr *unsignedInterruptIterator) Stats() IteratorStats { return itr.input.Stats() }
func (itr *unsignedInterruptIterator) Close() error { return itr.input.Close() }
func (itr *unsignedInterruptIterator) Next() (*UnsignedPoint, error) {
// Only check if the channel is closed every N points. This
// intentionally checks on both 0 and N so that if the iterator
// has been interrupted before the first point is emitted it will
// not emit any points.
if itr.count&0xFF == 0xFF {
select {
case <-itr.closing:
return nil, itr.Close()
default:
// Reset iterator count to zero and fall through to emit the next point.
itr.count = 0
}
}
// Increment the counter for every point read.
itr.count++
return itr.input.Next()
}
// unsignedCloseInterruptIterator represents a unsigned implementation of CloseInterruptIterator.
type unsignedCloseInterruptIterator struct {
input UnsignedIterator
closing <-chan struct{}
done chan struct{}
once sync.Once
}
func newUnsignedCloseInterruptIterator(input UnsignedIterator, closing <-chan struct{}) *unsignedCloseInterruptIterator {
itr := &unsignedCloseInterruptIterator{
input: input,
closing: closing,
done: make(chan struct{}),
}
go itr.monitor()
return itr
}
func (itr *unsignedCloseInterruptIterator) monitor() {
select {
case <-itr.closing:
itr.Close()
case <-itr.done:
}
}
func (itr *unsignedCloseInterruptIterator) Stats() IteratorStats {
return itr.input.Stats()
}
func (itr *unsignedCloseInterruptIterator) Close() error {
itr.once.Do(func() {
close(itr.done)
itr.input.Close()
})
return nil
}
func (itr *unsignedCloseInterruptIterator) Next() (*UnsignedPoint, error) {
p, err := itr.input.Next()
if err != nil {
// Check if the iterator was closed.
select {
case <-itr.done:
return nil, nil
default:
return nil, err
}
}
return p, nil
}
// unsignedReduceFloatIterator executes a reducer for every interval and buffers the result.
type unsignedReduceFloatIterator struct {
input *bufUnsignedIterator
create func() (UnsignedPointAggregator, FloatPointEmitter)
dims []string
opt IteratorOptions
points []FloatPoint
keepTags bool
}
func newUnsignedReduceFloatIterator(input UnsignedIterator, opt IteratorOptions, createFn func() (UnsignedPointAggregator, FloatPointEmitter)) *unsignedReduceFloatIterator {
return &unsignedReduceFloatIterator{
input: newBufUnsignedIterator(input),
create: createFn,
dims: opt.GetDimensions(),
opt: opt,
}
}
// Stats returns stats from the input iterator.
func (itr *unsignedReduceFloatIterator) Stats() IteratorStats { return itr.input.Stats() }
// Close closes the iterator and all child iterators.
func (itr *unsignedReduceFloatIterator) Close() error { return itr.input.Close() }
// Next returns the minimum value for the next available interval.
func (itr *unsignedReduceFloatIterator) Next() (*FloatPoint, error) {
// Calculate next window if we have no more points.
if len(itr.points) == 0 {
var err error
itr.points, err = itr.reduce()
if len(itr.points) == 0 {
return nil, err
}
}
// Pop next point off the stack.
p := &itr.points[len(itr.points)-1]
itr.points = itr.points[:len(itr.points)-1]
return p, nil
}
// unsignedReduceFloatPoint stores the reduced data for a name/tag combination.
type unsignedReduceFloatPoint struct {
Name string
Tags Tags
Aggregator UnsignedPointAggregator
Emitter FloatPointEmitter
}
// reduce executes fn once for every point in the next window.
// The previous value for the dimension is passed to fn.
func (itr *unsignedReduceFloatIterator) reduce() ([]FloatPoint, error) {
// Calculate next window.
var (
startTime, endTime int64
window struct {
name string
tags string
}
)
for {
p, err := itr.input.Next()
if err != nil || p == nil {
return nil, err
} else if p.Nil {
continue
}
// Unread the point so it can be processed.
itr.input.unread(p)
startTime, endTime = itr.opt.Window(p.Time)
window.name, window.tags = p.Name, p.Tags.Subset(itr.opt.Dimensions).ID()
break
}
// Create points by tags.
m := make(map[string]*unsignedReduceFloatPoint)
for {
// Read next point.
curr, err := itr.input.NextInWindow(startTime, endTime)
if err != nil {
return nil, err
} else if curr == nil {
break
} else if curr.Nil {
continue
} else if curr.Name != window.name {
itr.input.unread(curr)
break
}
// Ensure this point is within the same final window.
if curr.Name != window.name {
itr.input.unread(curr)
break
} else if tags := curr.Tags.Subset(itr.opt.Dimensions); tags.ID() != window.tags {
itr.input.unread(curr)
break
}
// Retrieve the tags on this point for this level of the query.
// This may be different than the bucket dimensions.
tags := curr.Tags.Subset(itr.dims)
id := tags.ID()
// Retrieve the aggregator for this name/tag combination or create one.
rp := m[id]
if rp == nil {
aggregator, emitter := itr.create()
rp = &unsignedReduceFloatPoint{
Name: curr.Name,
Tags: tags,
Aggregator: aggregator,
Emitter: emitter,
}
m[id] = rp
}
rp.Aggregator.AggregateUnsigned(curr)
}
keys := make([]string, 0, len(m))
for k := range m {
keys = append(keys, k)
}
// Reverse sort points by name & tag.
// This ensures a consistent order of output.
if len(keys) > 0 {
var sorted sort.Interface = sort.StringSlice(keys)
if itr.opt.Ascending {
sorted = sort.Reverse(sorted)
}
sort.Sort(sorted)
}
// Assume the points are already sorted until proven otherwise.
sortedByTime := true
// Emit the points for each name & tag combination.
a := make([]FloatPoint, 0, len(m))
for _, k := range keys {
rp := m[k]
points := rp.Emitter.Emit()
for i := len(points) - 1; i >= 0; i-- {
points[i].Name = rp.Name
if !itr.keepTags {
points[i].Tags = rp.Tags
}
// Set the points time to the interval time if the reducer didn't provide one.
if points[i].Time == ZeroTime {
points[i].Time = startTime
} else {
sortedByTime = false
}
a = append(a, points[i])
}
}
// Points may be out of order. Perform a stable sort by time if requested.
if !sortedByTime && itr.opt.Ordered {
var sorted sort.Interface = floatPointsByTime(a)
if itr.opt.Ascending {
sorted = sort.Reverse(sorted)
}
sort.Stable(sorted)
}
return a, nil
}
// unsignedStreamFloatIterator streams inputs into the iterator and emits points gradually.
type unsignedStreamFloatIterator struct {
input *bufUnsignedIterator
create func() (UnsignedPointAggregator, FloatPointEmitter)
dims []string
opt IteratorOptions
m map[string]*unsignedReduceFloatPoint
points []FloatPoint
}
// newUnsignedStreamFloatIterator returns a new instance of unsignedStreamFloatIterator.
func newUnsignedStreamFloatIterator(input UnsignedIterator, createFn func() (UnsignedPointAggregator, FloatPointEmitter), opt IteratorOptions) *unsignedStreamFloatIterator {
return &unsignedStreamFloatIterator{
input: newBufUnsignedIterator(input),
create: createFn,
dims: opt.GetDimensions(),
opt: opt,
m: make(map[string]*unsignedReduceFloatPoint),
}
}
// Stats returns stats from the input iterator.
func (itr *unsignedStreamFloatIterator) Stats() IteratorStats { return itr.input.Stats() }
// Close closes the iterator and all child iterators.
func (itr *unsignedStreamFloatIterator) Close() error { return itr.input.Close() }
// Next returns the next value for the stream iterator.
func (itr *unsignedStreamFloatIterator) Next() (*FloatPoint, error) {
// Calculate next window if we have no more points.
if len(itr.points) == 0 {
var err error
itr.points, err = itr.reduce()
if len(itr.points) == 0 {
return nil, err
}
}
// Pop next point off the stack.
p := &itr.points[len(itr.points)-1]
itr.points = itr.points[:len(itr.points)-1]
return p, nil
}
// reduce creates and manages aggregators for every point from the input.
// After aggregating a point, it always tries to emit a value using the emitter.
func (itr *unsignedStreamFloatIterator) reduce() ([]FloatPoint, error) {
// We have already read all of the input points.
if itr.m == nil {
return nil, nil
}
for {
// Read next point.
curr, err := itr.input.Next()
if err != nil {
return nil, err
} else if curr == nil {
// Close all of the aggregators to flush any remaining points to emit.
var points []FloatPoint
for _, rp := range itr.m {
if aggregator, ok := rp.Aggregator.(io.Closer); ok {
if err := aggregator.Close(); err != nil {
return nil, err
}
pts := rp.Emitter.Emit()
if len(pts) == 0 {
continue
}
for i := range pts {
pts[i].Name = rp.Name
pts[i].Tags = rp.Tags
}
points = append(points, pts...)
}
}
// Eliminate the aggregators and emitters.
itr.m = nil
return points, nil
} else if curr.Nil {
continue
}
tags := curr.Tags.Subset(itr.dims)
id := curr.Name
if len(tags.m) > 0 {
id += "\x00" + tags.ID()
}
// Retrieve the aggregator for this name/tag combination or create one.
rp := itr.m[id]
if rp == nil {
aggregator, emitter := itr.create()
rp = &unsignedReduceFloatPoint{
Name: curr.Name,
Tags: tags,
Aggregator: aggregator,
Emitter: emitter,
}
itr.m[id] = rp
}
rp.Aggregator.AggregateUnsigned(curr)
// Attempt to emit points from the aggregator.
points := rp.Emitter.Emit()
if len(points) == 0 {
continue
}
for i := range points {
points[i].Name = rp.Name
points[i].Tags = rp.Tags
}
return points, nil
}
}
// unsignedReduceIntegerIterator executes a reducer for every interval and buffers the result.
type unsignedReduceIntegerIterator struct {
input *bufUnsignedIterator
create func() (UnsignedPointAggregator, IntegerPointEmitter)
dims []string
opt IteratorOptions
points []IntegerPoint
keepTags bool
}
func newUnsignedReduceIntegerIterator(input UnsignedIterator, opt IteratorOptions, createFn func() (UnsignedPointAggregator, IntegerPointEmitter)) *unsignedReduceIntegerIterator {
return &unsignedReduceIntegerIterator{
input: newBufUnsignedIterator(input),
create: createFn,
dims: opt.GetDimensions(),
opt: opt,
}
}
// Stats returns stats from the input iterator.
func (itr *unsignedReduceIntegerIterator) Stats() IteratorStats { return itr.input.Stats() }
// Close closes the iterator and all child iterators.
func (itr *unsignedReduceIntegerIterator) Close() error { return itr.input.Close() }
// Next returns the minimum value for the next available interval.
func (itr *unsignedReduceIntegerIterator) Next() (*IntegerPoint, error) {
// Calculate next window if we have no more points.
if len(itr.points) == 0 {
var err error
itr.points, err = itr.reduce()
if len(itr.points) == 0 {
return nil, err
}
}
// Pop next point off the stack.
p := &itr.points[len(itr.points)-1]
itr.points = itr.points[:len(itr.points)-1]
return p, nil
}
// unsignedReduceIntegerPoint stores the reduced data for a name/tag combination.
type unsignedReduceIntegerPoint struct {
Name string
Tags Tags
Aggregator UnsignedPointAggregator
Emitter IntegerPointEmitter
}
// reduce executes fn once for every point in the next window.
// The previous value for the dimension is passed to fn.
func (itr *unsignedReduceIntegerIterator) reduce() ([]IntegerPoint, error) {
// Calculate next window.
var (
startTime, endTime int64
window struct {
name string
tags string
}
)
for {
p, err := itr.input.Next()
if err != nil || p == nil {
return nil, err
} else if p.Nil {
continue
}
// Unread the point so it can be processed.
itr.input.unread(p)
startTime, endTime = itr.opt.Window(p.Time)
window.name, window.tags = p.Name, p.Tags.Subset(itr.opt.Dimensions).ID()
break
}
// Create points by tags.
m := make(map[string]*unsignedReduceIntegerPoint)
for {
// Read next point.
curr, err := itr.input.NextInWindow(startTime, endTime)
if err != nil {
return nil, err
} else if curr == nil {
break
} else if curr.Nil {
continue
} else if curr.Name != window.name {
itr.input.unread(curr)
break
}
// Ensure this point is within the same final window.
if curr.Name != window.name {
itr.input.unread(curr)
break
} else if tags := curr.Tags.Subset(itr.opt.Dimensions); tags.ID() != window.tags {
itr.input.unread(curr)
break
}
// Retrieve the tags on this point for this level of the query.
// This may be different than the bucket dimensions.
tags := curr.Tags.Subset(itr.dims)
id := tags.ID()
// Retrieve the aggregator for this name/tag combination or create one.
rp := m[id]
if rp == nil {
aggregator, emitter := itr.create()
rp = &unsignedReduceIntegerPoint{
Name: curr.Name,
Tags: tags,
Aggregator: aggregator,
Emitter: emitter,
}
m[id] = rp
}
rp.Aggregator.AggregateUnsigned(curr)
}
keys := make([]string, 0, len(m))
for k := range m {
keys = append(keys, k)
}
// Reverse sort points by name & tag.
// This ensures a consistent order of output.
if len(keys) > 0 {
var sorted sort.Interface = sort.StringSlice(keys)
if itr.opt.Ascending {
sorted = sort.Reverse(sorted)
}
sort.Sort(sorted)
}
// Assume the points are already sorted until proven otherwise.
sortedByTime := true
// Emit the points for each name & tag combination.
a := make([]IntegerPoint, 0, len(m))
for _, k := range keys {
rp := m[k]
points := rp.Emitter.Emit()
for i := len(points) - 1; i >= 0; i-- {
points[i].Name = rp.Name
if !itr.keepTags {
points[i].Tags = rp.Tags
}
// Set the points time to the interval time if the reducer didn't provide one.
if points[i].Time == ZeroTime {
points[i].Time = startTime
} else {
sortedByTime = false
}
a = append(a, points[i])
}
}
// Points may be out of order. Perform a stable sort by time if requested.
if !sortedByTime && itr.opt.Ordered {
var sorted sort.Interface = integerPointsByTime(a)
if itr.opt.Ascending {
sorted = sort.Reverse(sorted)
}
sort.Stable(sorted)
}
return a, nil
}
// unsignedStreamIntegerIterator streams inputs into the iterator and emits points gradually.
type unsignedStreamIntegerIterator struct {
input *bufUnsignedIterator
create func() (UnsignedPointAggregator, IntegerPointEmitter)
dims []string
opt IteratorOptions
m map[string]*unsignedReduceIntegerPoint
points []IntegerPoint
}
// newUnsignedStreamIntegerIterator returns a new instance of unsignedStreamIntegerIterator.
func newUnsignedStreamIntegerIterator(input UnsignedIterator, createFn func() (UnsignedPointAggregator, IntegerPointEmitter), opt IteratorOptions) *unsignedStreamIntegerIterator {
return &unsignedStreamIntegerIterator{
input: newBufUnsignedIterator(input),
create: createFn,
dims: opt.GetDimensions(),
opt: opt,
m: make(map[string]*unsignedReduceIntegerPoint),
}
}
// Stats returns stats from the input iterator.
func (itr *unsignedStreamIntegerIterator) Stats() IteratorStats { return itr.input.Stats() }
// Close closes the iterator and all child iterators.
func (itr *unsignedStreamIntegerIterator) Close() error { return itr.input.Close() }
// Next returns the next value for the stream iterator.
func (itr *unsignedStreamIntegerIterator) Next() (*IntegerPoint, error) {
// Calculate next window if we have no more points.
if len(itr.points) == 0 {
var err error
itr.points, err = itr.reduce()
if len(itr.points) == 0 {
return nil, err
}
}
// Pop next point off the stack.
p := &itr.points[len(itr.points)-1]
itr.points = itr.points[:len(itr.points)-1]
return p, nil
}
// reduce creates and manages aggregators for every point from the input.
// After aggregating a point, it always tries to emit a value using the emitter.
func (itr *unsignedStreamIntegerIterator) reduce() ([]IntegerPoint, error) {
// We have already read all of the input points.
if itr.m == nil {
return nil, nil
}
for {
// Read next point.
curr, err := itr.input.Next()
if err != nil {
return nil, err
} else if curr == nil {
// Close all of the aggregators to flush any remaining points to emit.
var points []IntegerPoint
for _, rp := range itr.m {
if aggregator, ok := rp.Aggregator.(io.Closer); ok {
if err := aggregator.Close(); err != nil {
return nil, err
}
pts := rp.Emitter.Emit()
if len(pts) == 0 {
continue
}
for i := range pts {
pts[i].Name = rp.Name
pts[i].Tags = rp.Tags
}
points = append(points, pts...)
}
}
// Eliminate the aggregators and emitters.
itr.m = nil
return points, nil
} else if curr.Nil {
continue
}
tags := curr.Tags.Subset(itr.dims)
id := curr.Name
if len(tags.m) > 0 {
id += "\x00" + tags.ID()
}
// Retrieve the aggregator for this name/tag combination or create one.
rp := itr.m[id]
if rp == nil {
aggregator, emitter := itr.create()
rp = &unsignedReduceIntegerPoint{
Name: curr.Name,
Tags: tags,
Aggregator: aggregator,
Emitter: emitter,
}
itr.m[id] = rp
}
rp.Aggregator.AggregateUnsigned(curr)
// Attempt to emit points from the aggregator.
points := rp.Emitter.Emit()
if len(points) == 0 {
continue
}
for i := range points {
points[i].Name = rp.Name
points[i].Tags = rp.Tags
}
return points, nil
}
}
// unsignedReduceUnsignedIterator executes a reducer for every interval and buffers the result.
type unsignedReduceUnsignedIterator struct {
input *bufUnsignedIterator
create func() (UnsignedPointAggregator, UnsignedPointEmitter)
dims []string
opt IteratorOptions
points []UnsignedPoint
keepTags bool
}
func newUnsignedReduceUnsignedIterator(input UnsignedIterator, opt IteratorOptions, createFn func() (UnsignedPointAggregator, UnsignedPointEmitter)) *unsignedReduceUnsignedIterator {
return &unsignedReduceUnsignedIterator{
input: newBufUnsignedIterator(input),
create: createFn,
dims: opt.GetDimensions(),
opt: opt,
}
}
// Stats returns stats from the input iterator.
func (itr *unsignedReduceUnsignedIterator) Stats() IteratorStats { return itr.input.Stats() }
// Close closes the iterator and all child iterators.
func (itr *unsignedReduceUnsignedIterator) Close() error { return itr.input.Close() }
// Next returns the minimum value for the next available interval.
func (itr *unsignedReduceUnsignedIterator) Next() (*UnsignedPoint, error) {
// Calculate next window if we have no more points.
if len(itr.points) == 0 {
var err error
itr.points, err = itr.reduce()
if len(itr.points) == 0 {
return nil, err
}
}
// Pop next point off the stack.
p := &itr.points[len(itr.points)-1]
itr.points = itr.points[:len(itr.points)-1]
return p, nil
}
// unsignedReduceUnsignedPoint stores the reduced data for a name/tag combination.
type unsignedReduceUnsignedPoint struct {
Name string
Tags Tags
Aggregator UnsignedPointAggregator
Emitter UnsignedPointEmitter
}
// reduce executes fn once for every point in the next window.
// The previous value for the dimension is passed to fn.
func (itr *unsignedReduceUnsignedIterator) reduce() ([]UnsignedPoint, error) {
// Calculate next window.
var (
startTime, endTime int64
window struct {
name string
tags string
}
)
for {
p, err := itr.input.Next()
if err != nil || p == nil {
return nil, err
} else if p.Nil {
continue
}
// Unread the point so it can be processed.
itr.input.unread(p)
startTime, endTime = itr.opt.Window(p.Time)
window.name, window.tags = p.Name, p.Tags.Subset(itr.opt.Dimensions).ID()
break
}
// Create points by tags.
m := make(map[string]*unsignedReduceUnsignedPoint)
for {
// Read next point.
curr, err := itr.input.NextInWindow(startTime, endTime)
if err != nil {
return nil, err
} else if curr == nil {
break
} else if curr.Nil {
continue
} else if curr.Name != window.name {
itr.input.unread(curr)
break
}
// Ensure this point is within the same final window.
if curr.Name != window.name {
itr.input.unread(curr)
break
} else if tags := curr.Tags.Subset(itr.opt.Dimensions); tags.ID() != window.tags {
itr.input.unread(curr)
break
}
// Retrieve the tags on this point for this level of the query.
// This may be different than the bucket dimensions.
tags := curr.Tags.Subset(itr.dims)
id := tags.ID()
// Retrieve the aggregator for this name/tag combination or create one.
rp := m[id]
if rp == nil {
aggregator, emitter := itr.create()
rp = &unsignedReduceUnsignedPoint{
Name: curr.Name,
Tags: tags,
Aggregator: aggregator,
Emitter: emitter,
}
m[id] = rp
}
rp.Aggregator.AggregateUnsigned(curr)
}
keys := make([]string, 0, len(m))
for k := range m {
keys = append(keys, k)
}
// Reverse sort points by name & tag.
// This ensures a consistent order of output.
if len(keys) > 0 {
var sorted sort.Interface = sort.StringSlice(keys)
if itr.opt.Ascending {
sorted = sort.Reverse(sorted)
}
sort.Sort(sorted)
}
// Assume the points are already sorted until proven otherwise.
sortedByTime := true
// Emit the points for each name & tag combination.
a := make([]UnsignedPoint, 0, len(m))
for _, k := range keys {
rp := m[k]
points := rp.Emitter.Emit()
for i := len(points) - 1; i >= 0; i-- {
points[i].Name = rp.Name
if !itr.keepTags {
points[i].Tags = rp.Tags
}
// Set the points time to the interval time if the reducer didn't provide one.
if points[i].Time == ZeroTime {
points[i].Time = startTime
} else {
sortedByTime = false
}
a = append(a, points[i])
}
}
// Points may be out of order. Perform a stable sort by time if requested.
if !sortedByTime && itr.opt.Ordered {
var sorted sort.Interface = unsignedPointsByTime(a)
if itr.opt.Ascending {
sorted = sort.Reverse(sorted)
}
sort.Stable(sorted)
}
return a, nil
}
// unsignedStreamUnsignedIterator streams inputs into the iterator and emits points gradually.
type unsignedStreamUnsignedIterator struct {
input *bufUnsignedIterator
create func() (UnsignedPointAggregator, UnsignedPointEmitter)
dims []string
opt IteratorOptions
m map[string]*unsignedReduceUnsignedPoint
points []UnsignedPoint
}
// newUnsignedStreamUnsignedIterator returns a new instance of unsignedStreamUnsignedIterator.
func newUnsignedStreamUnsignedIterator(input UnsignedIterator, createFn func() (UnsignedPointAggregator, UnsignedPointEmitter), opt IteratorOptions) *unsignedStreamUnsignedIterator {
return &unsignedStreamUnsignedIterator{
input: newBufUnsignedIterator(input),
create: createFn,
dims: opt.GetDimensions(),
opt: opt,
m: make(map[string]*unsignedReduceUnsignedPoint),
}
}
// Stats returns stats from the input iterator.
func (itr *unsignedStreamUnsignedIterator) Stats() IteratorStats { return itr.input.Stats() }
// Close closes the iterator and all child iterators.
func (itr *unsignedStreamUnsignedIterator) Close() error { return itr.input.Close() }
// Next returns the next value for the stream iterator.
func (itr *unsignedStreamUnsignedIterator) Next() (*UnsignedPoint, error) {
// Calculate next window if we have no more points.
if len(itr.points) == 0 {
var err error
itr.points, err = itr.reduce()
if len(itr.points) == 0 {
return nil, err
}
}
// Pop next point off the stack.
p := &itr.points[len(itr.points)-1]
itr.points = itr.points[:len(itr.points)-1]
return p, nil
}
// reduce creates and manages aggregators for every point from the input.
// After aggregating a point, it always tries to emit a value using the emitter.
func (itr *unsignedStreamUnsignedIterator) reduce() ([]UnsignedPoint, error) {
// We have already read all of the input points.
if itr.m == nil {
return nil, nil
}
for {
// Read next point.
curr, err := itr.input.Next()
if err != nil {
return nil, err
} else if curr == nil {
// Close all of the aggregators to flush any remaining points to emit.
var points []UnsignedPoint
for _, rp := range itr.m {
if aggregator, ok := rp.Aggregator.(io.Closer); ok {
if err := aggregator.Close(); err != nil {
return nil, err
}
pts := rp.Emitter.Emit()
if len(pts) == 0 {
continue
}
for i := range pts {
pts[i].Name = rp.Name
pts[i].Tags = rp.Tags
}
points = append(points, pts...)
}
}
// Eliminate the aggregators and emitters.
itr.m = nil
return points, nil
} else if curr.Nil {
continue
}
tags := curr.Tags.Subset(itr.dims)
id := curr.Name
if len(tags.m) > 0 {
id += "\x00" + tags.ID()
}
// Retrieve the aggregator for this name/tag combination or create one.
rp := itr.m[id]
if rp == nil {
aggregator, emitter := itr.create()
rp = &unsignedReduceUnsignedPoint{
Name: curr.Name,
Tags: tags,
Aggregator: aggregator,
Emitter: emitter,
}
itr.m[id] = rp
}
rp.Aggregator.AggregateUnsigned(curr)
// Attempt to emit points from the aggregator.
points := rp.Emitter.Emit()
if len(points) == 0 {
continue
}
for i := range points {
points[i].Name = rp.Name
points[i].Tags = rp.Tags
}
return points, nil
}
}
// unsignedReduceStringIterator executes a reducer for every interval and buffers the result.
type unsignedReduceStringIterator struct {
input *bufUnsignedIterator
create func() (UnsignedPointAggregator, StringPointEmitter)
dims []string
opt IteratorOptions
points []StringPoint
keepTags bool
}
func newUnsignedReduceStringIterator(input UnsignedIterator, opt IteratorOptions, createFn func() (UnsignedPointAggregator, StringPointEmitter)) *unsignedReduceStringIterator {
return &unsignedReduceStringIterator{
input: newBufUnsignedIterator(input),
create: createFn,
dims: opt.GetDimensions(),
opt: opt,
}
}
// Stats returns stats from the input iterator.
func (itr *unsignedReduceStringIterator) Stats() IteratorStats { return itr.input.Stats() }
// Close closes the iterator and all child iterators.
func (itr *unsignedReduceStringIterator) Close() error { return itr.input.Close() }
// Next returns the minimum value for the next available interval.
func (itr *unsignedReduceStringIterator) Next() (*StringPoint, error) {
// Calculate next window if we have no more points.
if len(itr.points) == 0 {
var err error
itr.points, err = itr.reduce()
if len(itr.points) == 0 {
return nil, err
}
}
// Pop next point off the stack.
p := &itr.points[len(itr.points)-1]
itr.points = itr.points[:len(itr.points)-1]
return p, nil
}
// unsignedReduceStringPoint stores the reduced data for a name/tag combination.
type unsignedReduceStringPoint struct {
Name string
Tags Tags
Aggregator UnsignedPointAggregator
Emitter StringPointEmitter
}
// reduce executes fn once for every point in the next window.
// The previous value for the dimension is passed to fn.
func (itr *unsignedReduceStringIterator) reduce() ([]StringPoint, error) {
// Calculate next window.
var (
startTime, endTime int64
window struct {
name string
tags string
}
)
for {
p, err := itr.input.Next()
if err != nil || p == nil {
return nil, err
} else if p.Nil {
continue
}
// Unread the point so it can be processed.
itr.input.unread(p)
startTime, endTime = itr.opt.Window(p.Time)
window.name, window.tags = p.Name, p.Tags.Subset(itr.opt.Dimensions).ID()
break
}
// Create points by tags.
m := make(map[string]*unsignedReduceStringPoint)
for {
// Read next point.
curr, err := itr.input.NextInWindow(startTime, endTime)
if err != nil {
return nil, err
} else if curr == nil {
break
} else if curr.Nil {
continue
} else if curr.Name != window.name {
itr.input.unread(curr)
break
}
// Ensure this point is within the same final window.
if curr.Name != window.name {
itr.input.unread(curr)
break
} else if tags := curr.Tags.Subset(itr.opt.Dimensions); tags.ID() != window.tags {
itr.input.unread(curr)
break
}
// Retrieve the tags on this point for this level of the query.
// This may be different than the bucket dimensions.
tags := curr.Tags.Subset(itr.dims)
id := tags.ID()
// Retrieve the aggregator for this name/tag combination or create one.
rp := m[id]
if rp == nil {
aggregator, emitter := itr.create()
rp = &unsignedReduceStringPoint{
Name: curr.Name,
Tags: tags,
Aggregator: aggregator,
Emitter: emitter,
}
m[id] = rp
}
rp.Aggregator.AggregateUnsigned(curr)
}
keys := make([]string, 0, len(m))
for k := range m {
keys = append(keys, k)
}
// Reverse sort points by name & tag.
// This ensures a consistent order of output.
if len(keys) > 0 {
var sorted sort.Interface = sort.StringSlice(keys)
if itr.opt.Ascending {
sorted = sort.Reverse(sorted)
}
sort.Sort(sorted)
}
// Assume the points are already sorted until proven otherwise.
sortedByTime := true
// Emit the points for each name & tag combination.
a := make([]StringPoint, 0, len(m))
for _, k := range keys {
rp := m[k]
points := rp.Emitter.Emit()
for i := len(points) - 1; i >= 0; i-- {
points[i].Name = rp.Name
if !itr.keepTags {
points[i].Tags = rp.Tags
}
// Set the points time to the interval time if the reducer didn't provide one.
if points[i].Time == ZeroTime {
points[i].Time = startTime
} else {
sortedByTime = false
}
a = append(a, points[i])
}
}
// Points may be out of order. Perform a stable sort by time if requested.
if !sortedByTime && itr.opt.Ordered {
var sorted sort.Interface = stringPointsByTime(a)
if itr.opt.Ascending {
sorted = sort.Reverse(sorted)
}
sort.Stable(sorted)
}
return a, nil
}
// unsignedStreamStringIterator streams inputs into the iterator and emits points gradually.
type unsignedStreamStringIterator struct {
input *bufUnsignedIterator
create func() (UnsignedPointAggregator, StringPointEmitter)
dims []string
opt IteratorOptions
m map[string]*unsignedReduceStringPoint
points []StringPoint
}
// newUnsignedStreamStringIterator returns a new instance of unsignedStreamStringIterator.
func newUnsignedStreamStringIterator(input UnsignedIterator, createFn func() (UnsignedPointAggregator, StringPointEmitter), opt IteratorOptions) *unsignedStreamStringIterator {
return &unsignedStreamStringIterator{
input: newBufUnsignedIterator(input),
create: createFn,
dims: opt.GetDimensions(),
opt: opt,
m: make(map[string]*unsignedReduceStringPoint),
}
}
// Stats returns stats from the input iterator.
func (itr *unsignedStreamStringIterator) Stats() IteratorStats { return itr.input.Stats() }
// Close closes the iterator and all child iterators.
func (itr *unsignedStreamStringIterator) Close() error { return itr.input.Close() }
// Next returns the next value for the stream iterator.
func (itr *unsignedStreamStringIterator) Next() (*StringPoint, error) {
// Calculate next window if we have no more points.
if len(itr.points) == 0 {
var err error
itr.points, err = itr.reduce()
if len(itr.points) == 0 {
return nil, err
}
}
// Pop next point off the stack.
p := &itr.points[len(itr.points)-1]
itr.points = itr.points[:len(itr.points)-1]
return p, nil
}
// reduce creates and manages aggregators for every point from the input.
// After aggregating a point, it always tries to emit a value using the emitter.
func (itr *unsignedStreamStringIterator) reduce() ([]StringPoint, error) {
// We have already read all of the input points.
if itr.m == nil {
return nil, nil
}
for {
// Read next point.
curr, err := itr.input.Next()
if err != nil {
return nil, err
} else if curr == nil {
// Close all of the aggregators to flush any remaining points to emit.
var points []StringPoint
for _, rp := range itr.m {
if aggregator, ok := rp.Aggregator.(io.Closer); ok {
if err := aggregator.Close(); err != nil {
return nil, err
}
pts := rp.Emitter.Emit()
if len(pts) == 0 {
continue
}
for i := range pts {
pts[i].Name = rp.Name
pts[i].Tags = rp.Tags
}
points = append(points, pts...)
}
}
// Eliminate the aggregators and emitters.
itr.m = nil
return points, nil
} else if curr.Nil {
continue
}
tags := curr.Tags.Subset(itr.dims)
id := curr.Name
if len(tags.m) > 0 {
id += "\x00" + tags.ID()
}
// Retrieve the aggregator for this name/tag combination or create one.
rp := itr.m[id]
if rp == nil {
aggregator, emitter := itr.create()
rp = &unsignedReduceStringPoint{
Name: curr.Name,
Tags: tags,
Aggregator: aggregator,
Emitter: emitter,
}
itr.m[id] = rp
}
rp.Aggregator.AggregateUnsigned(curr)
// Attempt to emit points from the aggregator.
points := rp.Emitter.Emit()
if len(points) == 0 {
continue
}
for i := range points {
points[i].Name = rp.Name
points[i].Tags = rp.Tags
}
return points, nil
}
}
// unsignedReduceBooleanIterator executes a reducer for every interval and buffers the result.
type unsignedReduceBooleanIterator struct {
input *bufUnsignedIterator
create func() (UnsignedPointAggregator, BooleanPointEmitter)
dims []string
opt IteratorOptions
points []BooleanPoint
keepTags bool
}
func newUnsignedReduceBooleanIterator(input UnsignedIterator, opt IteratorOptions, createFn func() (UnsignedPointAggregator, BooleanPointEmitter)) *unsignedReduceBooleanIterator {
return &unsignedReduceBooleanIterator{
input: newBufUnsignedIterator(input),
create: createFn,
dims: opt.GetDimensions(),
opt: opt,
}
}
// Stats returns stats from the input iterator.
func (itr *unsignedReduceBooleanIterator) Stats() IteratorStats { return itr.input.Stats() }
// Close closes the iterator and all child iterators.
func (itr *unsignedReduceBooleanIterator) Close() error { return itr.input.Close() }
// Next returns the minimum value for the next available interval.
func (itr *unsignedReduceBooleanIterator) Next() (*BooleanPoint, error) {
// Calculate next window if we have no more points.
if len(itr.points) == 0 {
var err error
itr.points, err = itr.reduce()
if len(itr.points) == 0 {
return nil, err
}
}
// Pop next point off the stack.
p := &itr.points[len(itr.points)-1]
itr.points = itr.points[:len(itr.points)-1]
return p, nil
}
// unsignedReduceBooleanPoint stores the reduced data for a name/tag combination.
type unsignedReduceBooleanPoint struct {
Name string
Tags Tags
Aggregator UnsignedPointAggregator
Emitter BooleanPointEmitter
}
// reduce executes fn once for every point in the next window.
// The previous value for the dimension is passed to fn.
func (itr *unsignedReduceBooleanIterator) reduce() ([]BooleanPoint, error) {
// Calculate next window.
var (
startTime, endTime int64
window struct {
name string
tags string
}
)
for {
p, err := itr.input.Next()
if err != nil || p == nil {
return nil, err
} else if p.Nil {
continue
}
// Unread the point so it can be processed.
itr.input.unread(p)
startTime, endTime = itr.opt.Window(p.Time)
window.name, window.tags = p.Name, p.Tags.Subset(itr.opt.Dimensions).ID()
break
}
// Create points by tags.
m := make(map[string]*unsignedReduceBooleanPoint)
for {
// Read next point.
curr, err := itr.input.NextInWindow(startTime, endTime)
if err != nil {
return nil, err
} else if curr == nil {
break
} else if curr.Nil {
continue
} else if curr.Name != window.name {
itr.input.unread(curr)
break
}
// Ensure this point is within the same final window.
if curr.Name != window.name {
itr.input.unread(curr)
break
} else if tags := curr.Tags.Subset(itr.opt.Dimensions); tags.ID() != window.tags {
itr.input.unread(curr)
break
}
// Retrieve the tags on this point for this level of the query.
// This may be different than the bucket dimensions.
tags := curr.Tags.Subset(itr.dims)
id := tags.ID()
// Retrieve the aggregator for this name/tag combination or create one.
rp := m[id]
if rp == nil {
aggregator, emitter := itr.create()
rp = &unsignedReduceBooleanPoint{
Name: curr.Name,
Tags: tags,
Aggregator: aggregator,
Emitter: emitter,
}
m[id] = rp
}
rp.Aggregator.AggregateUnsigned(curr)
}
keys := make([]string, 0, len(m))
for k := range m {
keys = append(keys, k)
}
// Reverse sort points by name & tag.
// This ensures a consistent order of output.
if len(keys) > 0 {
var sorted sort.Interface = sort.StringSlice(keys)
if itr.opt.Ascending {
sorted = sort.Reverse(sorted)
}
sort.Sort(sorted)
}
// Assume the points are already sorted until proven otherwise.
sortedByTime := true
// Emit the points for each name & tag combination.
a := make([]BooleanPoint, 0, len(m))
for _, k := range keys {
rp := m[k]
points := rp.Emitter.Emit()
for i := len(points) - 1; i >= 0; i-- {
points[i].Name = rp.Name
if !itr.keepTags {
points[i].Tags = rp.Tags
}
// Set the points time to the interval time if the reducer didn't provide one.
if points[i].Time == ZeroTime {
points[i].Time = startTime
} else {
sortedByTime = false
}
a = append(a, points[i])
}
}
// Points may be out of order. Perform a stable sort by time if requested.
if !sortedByTime && itr.opt.Ordered {
var sorted sort.Interface = booleanPointsByTime(a)
if itr.opt.Ascending {
sorted = sort.Reverse(sorted)
}
sort.Stable(sorted)
}
return a, nil
}
// unsignedStreamBooleanIterator streams inputs into the iterator and emits points gradually.
type unsignedStreamBooleanIterator struct {
input *bufUnsignedIterator
create func() (UnsignedPointAggregator, BooleanPointEmitter)
dims []string
opt IteratorOptions
m map[string]*unsignedReduceBooleanPoint
points []BooleanPoint
}
// newUnsignedStreamBooleanIterator returns a new instance of unsignedStreamBooleanIterator.
func newUnsignedStreamBooleanIterator(input UnsignedIterator, createFn func() (UnsignedPointAggregator, BooleanPointEmitter), opt IteratorOptions) *unsignedStreamBooleanIterator {
return &unsignedStreamBooleanIterator{
input: newBufUnsignedIterator(input),
create: createFn,
dims: opt.GetDimensions(),
opt: opt,
m: make(map[string]*unsignedReduceBooleanPoint),
}
}
// Stats returns stats from the input iterator.
func (itr *unsignedStreamBooleanIterator) Stats() IteratorStats { return itr.input.Stats() }
// Close closes the iterator and all child iterators.
func (itr *unsignedStreamBooleanIterator) Close() error { return itr.input.Close() }
// Next returns the next value for the stream iterator.
func (itr *unsignedStreamBooleanIterator) Next() (*BooleanPoint, error) {
// Calculate next window if we have no more points.
if len(itr.points) == 0 {
var err error
itr.points, err = itr.reduce()
if len(itr.points) == 0 {
return nil, err
}
}
// Pop next point off the stack.
p := &itr.points[len(itr.points)-1]
itr.points = itr.points[:len(itr.points)-1]
return p, nil
}
// reduce creates and manages aggregators for every point from the input.
// After aggregating a point, it always tries to emit a value using the emitter.
func (itr *unsignedStreamBooleanIterator) reduce() ([]BooleanPoint, error) {
// We have already read all of the input points.
if itr.m == nil {
return nil, nil
}
for {
// Read next point.
curr, err := itr.input.Next()
if err != nil {
return nil, err
} else if curr == nil {
// Close all of the aggregators to flush any remaining points to emit.
var points []BooleanPoint
for _, rp := range itr.m {
if aggregator, ok := rp.Aggregator.(io.Closer); ok {
if err := aggregator.Close(); err != nil {
return nil, err
}
pts := rp.Emitter.Emit()
if len(pts) == 0 {
continue
}
for i := range pts {
pts[i].Name = rp.Name
pts[i].Tags = rp.Tags
}
points = append(points, pts...)
}
}
// Eliminate the aggregators and emitters.
itr.m = nil
return points, nil
} else if curr.Nil {
continue
}
tags := curr.Tags.Subset(itr.dims)
id := curr.Name
if len(tags.m) > 0 {
id += "\x00" + tags.ID()
}
// Retrieve the aggregator for this name/tag combination or create one.
rp := itr.m[id]
if rp == nil {
aggregator, emitter := itr.create()
rp = &unsignedReduceBooleanPoint{
Name: curr.Name,
Tags: tags,
Aggregator: aggregator,
Emitter: emitter,
}
itr.m[id] = rp
}
rp.Aggregator.AggregateUnsigned(curr)
// Attempt to emit points from the aggregator.
points := rp.Emitter.Emit()
if len(points) == 0 {
continue
}
for i := range points {
points[i].Name = rp.Name
points[i].Tags = rp.Tags
}
return points, nil
}
}
// unsignedDedupeIterator only outputs unique points.
// This differs from the DistinctIterator in that it compares all aux fields too.
// This iterator is relatively inefficient and should only be used on small
// datasets such as meta query results.
type unsignedDedupeIterator struct {
input UnsignedIterator
m map[string]struct{} // lookup of points already sent
}
type unsignedIteratorMapper struct {
cur Cursor
row Row
driver IteratorMap // which iterator to use for the primary value, can be nil
fields []IteratorMap // which iterator to use for an aux field
point UnsignedPoint
}
func newUnsignedIteratorMapper(cur Cursor, driver IteratorMap, fields []IteratorMap, opt IteratorOptions) *unsignedIteratorMapper {
return &unsignedIteratorMapper{
cur: cur,
driver: driver,
fields: fields,
point: UnsignedPoint{
Aux: make([]interface{}, len(fields)),
},
}
}
func (itr *unsignedIteratorMapper) Next() (*UnsignedPoint, error) {
if !itr.cur.Scan(&itr.row) {
if err := itr.cur.Err(); err != nil {
return nil, err
}
return nil, nil
}
itr.point.Time = itr.row.Time
itr.point.Name = itr.row.Series.Name
itr.point.Tags = itr.row.Series.Tags
if itr.driver != nil {
if v := itr.driver.Value(&itr.row); v != nil {
if v, ok := castToUnsigned(v); ok {
itr.point.Value = v
itr.point.Nil = false
} else {
itr.point.Value = 0
itr.point.Nil = true
}
} else {
itr.point.Value = 0
itr.point.Nil = true
}
}
for i, f := range itr.fields {
itr.point.Aux[i] = f.Value(&itr.row)
}
return &itr.point, nil
}
func (itr *unsignedIteratorMapper) Stats() IteratorStats {
return itr.cur.Stats()
}
func (itr *unsignedIteratorMapper) Close() error {
return itr.cur.Close()
}
type unsignedFilterIterator struct {
input UnsignedIterator
cond influxql.Expr
opt IteratorOptions
m map[string]interface{}
}
func newUnsignedFilterIterator(input UnsignedIterator, cond influxql.Expr, opt IteratorOptions) UnsignedIterator {
// Strip out time conditions from the WHERE clause.
// TODO(jsternberg): This should really be done for us when creating the IteratorOptions struct.
n := influxql.RewriteFunc(influxql.CloneExpr(cond), func(n influxql.Node) influxql.Node {
switch n := n.(type) {
case *influxql.BinaryExpr:
if n.LHS.String() == "time" {
return &influxql.BooleanLiteral{Val: true}
}
}
return n
})
cond, _ = n.(influxql.Expr)
if cond == nil {
return input
} else if n, ok := cond.(*influxql.BooleanLiteral); ok && n.Val {
return input
}
return &unsignedFilterIterator{
input: input,
cond: cond,
opt: opt,
m: make(map[string]interface{}),
}
}
func (itr *unsignedFilterIterator) Stats() IteratorStats { return itr.input.Stats() }
func (itr *unsignedFilterIterator) Close() error { return itr.input.Close() }
func (itr *unsignedFilterIterator) Next() (*UnsignedPoint, error) {
for {
p, err := itr.input.Next()
if err != nil || p == nil {
return nil, err
}
for i, ref := range itr.opt.Aux {
itr.m[ref.Val] = p.Aux[i]
}
for k, v := range p.Tags.KeyValues() {
itr.m[k] = v
}
if !influxql.EvalBool(itr.cond, itr.m) {
continue
}
return p, nil
}
}
type unsignedTagSubsetIterator struct {
input UnsignedIterator
point UnsignedPoint
lastTags Tags
dimensions []string
}
func newUnsignedTagSubsetIterator(input UnsignedIterator, opt IteratorOptions) *unsignedTagSubsetIterator {
return &unsignedTagSubsetIterator{
input: input,
dimensions: opt.GetDimensions(),
}
}
func (itr *unsignedTagSubsetIterator) Next() (*UnsignedPoint, error) {
p, err := itr.input.Next()
if err != nil {
return nil, err
} else if p == nil {
return nil, nil
}
itr.point.Name = p.Name
if !p.Tags.Equal(itr.lastTags) {
itr.point.Tags = p.Tags.Subset(itr.dimensions)
itr.lastTags = p.Tags
}
itr.point.Time = p.Time
itr.point.Value = p.Value
itr.point.Aux = p.Aux
itr.point.Aggregated = p.Aggregated
itr.point.Nil = p.Nil
return &itr.point, nil
}
func (itr *unsignedTagSubsetIterator) Stats() IteratorStats {
return itr.input.Stats()
}
func (itr *unsignedTagSubsetIterator) Close() error {
return itr.input.Close()
}
// newUnsignedDedupeIterator returns a new instance of unsignedDedupeIterator.
func newUnsignedDedupeIterator(input UnsignedIterator) *unsignedDedupeIterator {
return &unsignedDedupeIterator{
input: input,
m: make(map[string]struct{}),
}
}
// Stats returns stats from the input iterator.
func (itr *unsignedDedupeIterator) Stats() IteratorStats { return itr.input.Stats() }
// Close closes the iterator and all child iterators.
func (itr *unsignedDedupeIterator) Close() error { return itr.input.Close() }
// Next returns the next unique point from the input iterator.
func (itr *unsignedDedupeIterator) Next() (*UnsignedPoint, error) {
for {
// Read next point.
p, err := itr.input.Next()
if p == nil || err != nil {
return nil, err
}
// Serialize to bytes to store in lookup.
buf, err := proto.Marshal(encodeUnsignedPoint(p))
if err != nil {
return nil, err
}
// If the point has already been output then move to the next point.
if _, ok := itr.m[string(buf)]; ok {
continue
}
// Otherwise mark it as emitted and return point.
itr.m[string(buf)] = struct{}{}
return p, nil
}
}
// unsignedReaderIterator represents an iterator that streams from a reader.
type unsignedReaderIterator struct {
r io.Reader
dec *UnsignedPointDecoder
}
// newUnsignedReaderIterator returns a new instance of unsignedReaderIterator.
func newUnsignedReaderIterator(ctx context.Context, r io.Reader, stats IteratorStats) *unsignedReaderIterator {
dec := NewUnsignedPointDecoder(ctx, r)
dec.stats = stats
return &unsignedReaderIterator{
r: r,
dec: dec,
}
}
// Stats returns stats about points processed.
func (itr *unsignedReaderIterator) Stats() IteratorStats { return itr.dec.stats }
// Close closes the underlying reader, if applicable.
func (itr *unsignedReaderIterator) Close() error {
if r, ok := itr.r.(io.ReadCloser); ok {
return r.Close()
}
return nil
}
// Next returns the next point from the iterator.
func (itr *unsignedReaderIterator) Next() (*UnsignedPoint, error) {
// OPTIMIZE(benbjohnson): Reuse point on iterator.
// Unmarshal next point.
p := &UnsignedPoint{}
if err := itr.dec.DecodeUnsignedPoint(p); err == io.EOF {
return nil, nil
} else if err != nil {
return nil, err
}
return p, nil
}
// StringIterator represents a stream of string points.
type StringIterator interface {
Iterator
Next() (*StringPoint, error)
}
// newStringIterators converts a slice of Iterator to a slice of StringIterator.
// Drop and closes any iterator in itrs that is not a StringIterator and cannot
// be cast to a StringIterator.
func newStringIterators(itrs []Iterator) []StringIterator {
a := make([]StringIterator, 0, len(itrs))
for _, itr := range itrs {
switch itr := itr.(type) {
case StringIterator:
a = append(a, itr)
default:
itr.Close()
}
}
return a
}
// bufStringIterator represents a buffered StringIterator.
type bufStringIterator struct {
itr StringIterator
buf *StringPoint
}
// newBufStringIterator returns a buffered StringIterator.
func newBufStringIterator(itr StringIterator) *bufStringIterator {
return &bufStringIterator{itr: itr}
}
// Stats returns statistics from the input iterator.
func (itr *bufStringIterator) Stats() IteratorStats { return itr.itr.Stats() }
// Close closes the underlying iterator.
func (itr *bufStringIterator) Close() error { return itr.itr.Close() }
// peek returns the next point without removing it from the iterator.
func (itr *bufStringIterator) peek() (*StringPoint, error) {
p, err := itr.Next()
if err != nil {
return nil, err
}
itr.unread(p)
return p, nil
}
// peekTime returns the time of the next point.
// Returns zero time if no more points available.
func (itr *bufStringIterator) peekTime() (int64, error) {
p, err := itr.peek()
if p == nil || err != nil {
return ZeroTime, err
}
return p.Time, nil
}
// Next returns the current buffer, if exists, or calls the underlying iterator.
func (itr *bufStringIterator) Next() (*StringPoint, error) {
buf := itr.buf
if buf != nil {
itr.buf = nil
return buf, nil
}
return itr.itr.Next()
}
// NextInWindow returns the next value if it is between [startTime, endTime).
// If the next value is outside the range then it is moved to the buffer.
func (itr *bufStringIterator) NextInWindow(startTime, endTime int64) (*StringPoint, error) {
v, err := itr.Next()
if v == nil || err != nil {
return nil, err
} else if t := v.Time; t >= endTime || t < startTime {
itr.unread(v)
return nil, nil
}
return v, nil
}
// unread sets v to the buffer. It is read on the next call to Next().
func (itr *bufStringIterator) unread(v *StringPoint) { itr.buf = v }
// stringMergeIterator represents an iterator that combines multiple string iterators.
type stringMergeIterator struct {
inputs []StringIterator
heap *stringMergeHeap
init bool
closed bool
mu sync.RWMutex
// Current iterator and window.
curr *stringMergeHeapItem
window struct {
name string
tags string
startTime int64
endTime int64
}
}
// newStringMergeIterator returns a new instance of stringMergeIterator.
func newStringMergeIterator(inputs []StringIterator, opt IteratorOptions) *stringMergeIterator {
itr := &stringMergeIterator{
inputs: inputs,
heap: &stringMergeHeap{
items: make([]*stringMergeHeapItem, 0, len(inputs)),
opt: opt,
},
}
// Initialize heap items.
for _, input := range inputs {
// Wrap in buffer, ignore any inputs without anymore points.
bufInput := newBufStringIterator(input)
// Append to the heap.
itr.heap.items = append(itr.heap.items, &stringMergeHeapItem{itr: bufInput})
}
return itr
}
// Stats returns an aggregation of stats from the underlying iterators.
func (itr *stringMergeIterator) Stats() IteratorStats {
var stats IteratorStats
for _, input := range itr.inputs {
stats.Add(input.Stats())
}
return stats
}
// Close closes the underlying iterators.
func (itr *stringMergeIterator) Close() error {
itr.mu.Lock()
defer itr.mu.Unlock()
for _, input := range itr.inputs {
input.Close()
}
itr.curr = nil
itr.inputs = nil
itr.heap.items = nil
itr.closed = true
return nil
}
// Next returns the next point from the iterator.
func (itr *stringMergeIterator) Next() (*StringPoint, error) {
itr.mu.RLock()
defer itr.mu.RUnlock()
if itr.closed {
return nil, nil
}
// Initialize the heap. This needs to be done lazily on the first call to this iterator
// so that iterator initialization done through the Select() call returns quickly.
// Queries can only be interrupted after the Select() call completes so any operations
// done during iterator creation cannot be interrupted, which is why we do it here
// instead so an interrupt can happen while initializing the heap.
if !itr.init {
items := itr.heap.items
itr.heap.items = make([]*stringMergeHeapItem, 0, len(items))
for _, item := range items {
if p, err := item.itr.peek(); err != nil {
return nil, err
} else if p == nil {
continue
}
itr.heap.items = append(itr.heap.items, item)
}
heap.Init(itr.heap)
itr.init = true
}
for {
// Retrieve the next iterator if we don't have one.
if itr.curr == nil {
if len(itr.heap.items) == 0 {
return nil, nil
}
itr.curr = heap.Pop(itr.heap).(*stringMergeHeapItem)
// Read point and set current window.
p, err := itr.curr.itr.Next()
if err != nil {
return nil, err
}
tags := p.Tags.Subset(itr.heap.opt.Dimensions)
itr.window.name, itr.window.tags = p.Name, tags.ID()
itr.window.startTime, itr.window.endTime = itr.heap.opt.Window(p.Time)
return p, nil
}
// Read the next point from the current iterator.
p, err := itr.curr.itr.Next()
if err != nil {
return nil, err
}
// If there are no more points then remove iterator from heap and find next.
if p == nil {
itr.curr = nil
continue
}
// Check if the point is inside of our current window.
inWindow := true
if window := itr.window; window.name != p.Name {
inWindow = false
} else if tags := p.Tags.Subset(itr.heap.opt.Dimensions); window.tags != tags.ID() {
inWindow = false
} else if opt := itr.heap.opt; opt.Ascending && p.Time >= window.endTime {
inWindow = false
} else if !opt.Ascending && p.Time < window.startTime {
inWindow = false
}
// If it's outside our window then push iterator back on the heap and find new iterator.
if !inWindow {
itr.curr.itr.unread(p)
heap.Push(itr.heap, itr.curr)
itr.curr = nil
continue
}
return p, nil
}
}
// stringMergeHeap represents a heap of stringMergeHeapItems.
// Items are sorted by their next window and then by name/tags.
type stringMergeHeap struct {
opt IteratorOptions
items []*stringMergeHeapItem
}
func (h *stringMergeHeap) Len() int { return len(h.items) }
func (h *stringMergeHeap) Swap(i, j int) { h.items[i], h.items[j] = h.items[j], h.items[i] }
func (h *stringMergeHeap) Less(i, j int) bool {
x, err := h.items[i].itr.peek()
if err != nil {
return true
}
y, err := h.items[j].itr.peek()
if err != nil {
return false
}
if h.opt.Ascending {
if x.Name != y.Name {
return x.Name < y.Name
} else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); xTags.ID() != yTags.ID() {
return xTags.ID() < yTags.ID()
}
} else {
if x.Name != y.Name {
return x.Name > y.Name
} else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); xTags.ID() != yTags.ID() {
return xTags.ID() > yTags.ID()
}
}
xt, _ := h.opt.Window(x.Time)
yt, _ := h.opt.Window(y.Time)
if h.opt.Ascending {
return xt < yt
}
return xt > yt
}
func (h *stringMergeHeap) Push(x interface{}) {
h.items = append(h.items, x.(*stringMergeHeapItem))
}
func (h *stringMergeHeap) Pop() interface{} {
old := h.items
n := len(old)
item := old[n-1]
h.items = old[0 : n-1]
return item
}
type stringMergeHeapItem struct {
itr *bufStringIterator
}
// stringSortedMergeIterator is an iterator that sorts and merges multiple iterators into one.
type stringSortedMergeIterator struct {
inputs []StringIterator
heap *stringSortedMergeHeap
init bool
}
// newStringSortedMergeIterator returns an instance of stringSortedMergeIterator.
func newStringSortedMergeIterator(inputs []StringIterator, opt IteratorOptions) Iterator {
itr := &stringSortedMergeIterator{
inputs: inputs,
heap: &stringSortedMergeHeap{
items: make([]*stringSortedMergeHeapItem, 0, len(inputs)),
opt: opt,
},
}
// Initialize heap items.
for _, input := range inputs {
// Append to the heap.
itr.heap.items = append(itr.heap.items, &stringSortedMergeHeapItem{itr: input})
}
return itr
}
// Stats returns an aggregation of stats from the underlying iterators.
func (itr *stringSortedMergeIterator) Stats() IteratorStats {
var stats IteratorStats
for _, input := range itr.inputs {
stats.Add(input.Stats())
}
return stats
}
// Close closes the underlying iterators.
func (itr *stringSortedMergeIterator) Close() error {
for _, input := range itr.inputs {
input.Close()
}
return nil
}
// Next returns the next points from the iterator.
func (itr *stringSortedMergeIterator) Next() (*StringPoint, error) { return itr.pop() }
// pop returns the next point from the heap.
// Reads the next point from item's cursor and puts it back on the heap.
func (itr *stringSortedMergeIterator) pop() (*StringPoint, error) {
// Initialize the heap. See the MergeIterator to see why this has to be done lazily.
if !itr.init {
items := itr.heap.items
itr.heap.items = make([]*stringSortedMergeHeapItem, 0, len(items))
for _, item := range items {
var err error
if item.point, err = item.itr.Next(); err != nil {
return nil, err
} else if item.point == nil {
continue
}
itr.heap.items = append(itr.heap.items, item)
}
heap.Init(itr.heap)
itr.init = true
}
if len(itr.heap.items) == 0 {
return nil, nil
}
// Read the next item from the heap.
item := heap.Pop(itr.heap).(*stringSortedMergeHeapItem)
if item.err != nil {
return nil, item.err
} else if item.point == nil {
return nil, nil
}
// Copy the point for return.
p := item.point.Clone()
// Read the next item from the cursor. Push back to heap if one exists.
if item.point, item.err = item.itr.Next(); item.point != nil {
heap.Push(itr.heap, item)
}
return p, nil
}
// stringSortedMergeHeap represents a heap of stringSortedMergeHeapItems.
// Items are sorted with the following priority:
// - By their measurement name;
// - By their tag keys/values;
// - By time; or
// - By their Aux field values.
type stringSortedMergeHeap struct {
opt IteratorOptions
items []*stringSortedMergeHeapItem
}
func (h *stringSortedMergeHeap) Len() int { return len(h.items) }
func (h *stringSortedMergeHeap) Swap(i, j int) { h.items[i], h.items[j] = h.items[j], h.items[i] }
func (h *stringSortedMergeHeap) Less(i, j int) bool {
x, y := h.items[i].point, h.items[j].point
if h.opt.Ascending {
if x.Name != y.Name {
return x.Name < y.Name
} else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); !xTags.Equals(&yTags) {
return xTags.ID() < yTags.ID()
}
if x.Time != y.Time {
return x.Time < y.Time
}
if len(x.Aux) > 0 && len(x.Aux) == len(y.Aux) {
for i := 0; i < len(x.Aux); i++ {
v1, ok1 := x.Aux[i].(string)
v2, ok2 := y.Aux[i].(string)
if !ok1 || !ok2 {
// Unsupported types used in Aux fields. Maybe they
// need to be added here?
return false
} else if v1 == v2 {
continue
}
return v1 < v2
}
}
return false // Times and/or Aux fields are equal.
}
if x.Name != y.Name {
return x.Name > y.Name
} else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); !xTags.Equals(&yTags) {
return xTags.ID() > yTags.ID()
}
if x.Time != y.Time {
return x.Time > y.Time
}
if len(x.Aux) > 0 && len(x.Aux) == len(y.Aux) {
for i := 0; i < len(x.Aux); i++ {
v1, ok1 := x.Aux[i].(string)
v2, ok2 := y.Aux[i].(string)
if !ok1 || !ok2 {
// Unsupported types used in Aux fields. Maybe they
// need to be added here?
return false
} else if v1 == v2 {
continue
}
return v1 > v2
}
}
return false // Times and/or Aux fields are equal.
}
func (h *stringSortedMergeHeap) Push(x interface{}) {
h.items = append(h.items, x.(*stringSortedMergeHeapItem))
}
func (h *stringSortedMergeHeap) Pop() interface{} {
old := h.items
n := len(old)
item := old[n-1]
h.items = old[0 : n-1]
return item
}
type stringSortedMergeHeapItem struct {
point *StringPoint
err error
itr StringIterator
}
// stringIteratorScanner scans the results of a StringIterator into a map.
type stringIteratorScanner struct {
input *bufStringIterator
err error
keys []influxql.VarRef
defaultValue interface{}
}
// newStringIteratorScanner creates a new IteratorScanner.
func newStringIteratorScanner(input StringIterator, keys []influxql.VarRef, defaultValue interface{}) *stringIteratorScanner {
return &stringIteratorScanner{
input: newBufStringIterator(input),
keys: keys,
defaultValue: defaultValue,
}
}
func (s *stringIteratorScanner) Peek() (int64, string, Tags) {
if s.err != nil {
return ZeroTime, "", Tags{}
}
p, err := s.input.peek()
if err != nil {
s.err = err
return ZeroTime, "", Tags{}
} else if p == nil {
return ZeroTime, "", Tags{}
}
return p.Time, p.Name, p.Tags
}
func (s *stringIteratorScanner) ScanAt(ts int64, name string, tags Tags, m map[string]interface{}) {
if s.err != nil {
return
}
p, err := s.input.Next()
if err != nil {
s.err = err
return
} else if p == nil {
s.useDefaults(m)
return
} else if p.Time != ts || p.Name != name || !p.Tags.Equals(&tags) {
s.useDefaults(m)
s.input.unread(p)
return
}
if k := s.keys[0]; k.Val != "" {
if p.Nil {
if s.defaultValue != SkipDefault {
m[k.Val] = castToType(s.defaultValue, k.Type)
}
} else {
m[k.Val] = p.Value
}
}
for i, v := range p.Aux {
k := s.keys[i+1]
switch v.(type) {
case float64, int64, uint64, string, bool:
m[k.Val] = v
default:
// Insert the fill value if one was specified.
if s.defaultValue != SkipDefault {
m[k.Val] = castToType(s.defaultValue, k.Type)
}
}
}
}
func (s *stringIteratorScanner) useDefaults(m map[string]interface{}) {
if s.defaultValue == SkipDefault {
return
}
for _, k := range s.keys {
if k.Val == "" {
continue
}
m[k.Val] = castToType(s.defaultValue, k.Type)
}
}
func (s *stringIteratorScanner) Stats() IteratorStats { return s.input.Stats() }
func (s *stringIteratorScanner) Err() error { return s.err }
func (s *stringIteratorScanner) Close() error { return s.input.Close() }
// stringParallelIterator represents an iterator that pulls data in a separate goroutine.
type stringParallelIterator struct {
input StringIterator
ch chan stringPointError
once sync.Once
closing chan struct{}
wg sync.WaitGroup
}
// newStringParallelIterator returns a new instance of stringParallelIterator.
func newStringParallelIterator(input StringIterator) *stringParallelIterator {
itr := &stringParallelIterator{
input: input,
ch: make(chan stringPointError, 256),
closing: make(chan struct{}),
}
itr.wg.Add(1)
go itr.monitor()
return itr
}
// Stats returns stats from the underlying iterator.
func (itr *stringParallelIterator) Stats() IteratorStats { return itr.input.Stats() }
// Close closes the underlying iterators.
func (itr *stringParallelIterator) Close() error {
itr.once.Do(func() { close(itr.closing) })
itr.wg.Wait()
return itr.input.Close()
}
// Next returns the next point from the iterator.
func (itr *stringParallelIterator) Next() (*StringPoint, error) {
v, ok := <-itr.ch
if !ok {
return nil, io.EOF
}
return v.point, v.err
}
// monitor runs in a separate goroutine and actively pulls the next point.
func (itr *stringParallelIterator) monitor() {
defer close(itr.ch)
defer itr.wg.Done()
for {
// Read next point.
p, err := itr.input.Next()
if p != nil {
p = p.Clone()
}
select {
case <-itr.closing:
return
case itr.ch <- stringPointError{point: p, err: err}:
}
}
}
type stringPointError struct {
point *StringPoint
err error
}
// stringLimitIterator represents an iterator that limits points per group.
type stringLimitIterator struct {
input StringIterator
opt IteratorOptions
n int
prev struct {
name string
tags Tags
}
}
// newStringLimitIterator returns a new instance of stringLimitIterator.
func newStringLimitIterator(input StringIterator, opt IteratorOptions) *stringLimitIterator {
return &stringLimitIterator{
input: input,
opt: opt,
}
}
// Stats returns stats from the underlying iterator.
func (itr *stringLimitIterator) Stats() IteratorStats { return itr.input.Stats() }
// Close closes the underlying iterators.
func (itr *stringLimitIterator) Close() error { return itr.input.Close() }
// Next returns the next point from the iterator.
func (itr *stringLimitIterator) Next() (*StringPoint, error) {
for {
p, err := itr.input.Next()
if p == nil || err != nil {
return nil, err
}
// Reset window and counter if a new window is encountered.
if p.Name != itr.prev.name || !p.Tags.Equals(&itr.prev.tags) {
itr.prev.name = p.Name
itr.prev.tags = p.Tags
itr.n = 0
}
// Increment counter.
itr.n++
// Read next point if not beyond the offset.
if itr.n <= itr.opt.Offset {
continue
}
// Read next point if we're beyond the limit.
if itr.opt.Limit > 0 && (itr.n-itr.opt.Offset) > itr.opt.Limit {
continue
}
return p, nil
}
}
type stringFillIterator struct {
input *bufStringIterator
prev StringPoint
startTime int64
endTime int64
auxFields []interface{}
init bool
opt IteratorOptions
window struct {
name string
tags Tags
time int64
offset int64
}
}
func newStringFillIterator(input StringIterator, expr influxql.Expr, opt IteratorOptions) *stringFillIterator {
if opt.Fill == influxql.NullFill {
if expr, ok := expr.(*influxql.Call); ok && expr.Name == "count" {
opt.Fill = influxql.NumberFill
opt.FillValue = ""
}
}
var startTime, endTime int64
if opt.Ascending {
startTime, _ = opt.Window(opt.StartTime)
endTime, _ = opt.Window(opt.EndTime)
} else {
startTime, _ = opt.Window(opt.EndTime)
endTime, _ = opt.Window(opt.StartTime)
}
var auxFields []interface{}
if len(opt.Aux) > 0 {
auxFields = make([]interface{}, len(opt.Aux))
}
return &stringFillIterator{
input: newBufStringIterator(input),
prev: StringPoint{Nil: true},
startTime: startTime,
endTime: endTime,
auxFields: auxFields,
opt: opt,
}
}
func (itr *stringFillIterator) Stats() IteratorStats { return itr.input.Stats() }
func (itr *stringFillIterator) Close() error { return itr.input.Close() }
func (itr *stringFillIterator) Next() (*StringPoint, error) {
if !itr.init {
p, err := itr.input.peek()
if p == nil || err != nil {
return nil, err
}
itr.window.name, itr.window.tags = p.Name, p.Tags
itr.window.time = itr.startTime
if itr.startTime == influxql.MinTime {
itr.window.time, _ = itr.opt.Window(p.Time)
}
if itr.opt.Location != nil {
_, itr.window.offset = itr.opt.Zone(itr.window.time)
}
itr.init = true
}
p, err := itr.input.Next()
if err != nil {
return nil, err
}
// Check if the next point is outside of our window or is nil.
if p == nil || p.Name != itr.window.name || p.Tags.ID() != itr.window.tags.ID() {
// If we are inside of an interval, unread the point and continue below to
// constructing a new point.
if itr.opt.Ascending && itr.window.time <= itr.endTime {
itr.input.unread(p)
p = nil
goto CONSTRUCT
} else if !itr.opt.Ascending && itr.window.time >= itr.endTime && itr.endTime != influxql.MinTime {
itr.input.unread(p)
p = nil
goto CONSTRUCT
}
// We are *not* in a current interval. If there is no next point,
// we are at the end of all intervals.
if p == nil {
return nil, nil
}
// Set the new interval.
itr.window.name, itr.window.tags = p.Name, p.Tags
itr.window.time = itr.startTime
if itr.window.time == influxql.MinTime {
itr.window.time, _ = itr.opt.Window(p.Time)
}
if itr.opt.Location != nil {
_, itr.window.offset = itr.opt.Zone(itr.window.time)
}
itr.prev = StringPoint{Nil: true}
}
// Check if the point is our next expected point.
CONSTRUCT:
if p == nil || (itr.opt.Ascending && p.Time > itr.window.time) || (!itr.opt.Ascending && p.Time < itr.window.time) {
if p != nil {
itr.input.unread(p)
}
p = &StringPoint{
Name: itr.window.name,
Tags: itr.window.tags,
Time: itr.window.time,
Aux: itr.auxFields,
}
switch itr.opt.Fill {
case influxql.LinearFill:
fallthrough
case influxql.NullFill:
p.Nil = true
case influxql.NumberFill:
p.Value, _ = castToString(itr.opt.FillValue)
case influxql.PreviousFill:
if !itr.prev.Nil {
p.Value = itr.prev.Value
p.Nil = itr.prev.Nil
} else {
p.Nil = true
}
}
} else {
itr.prev = *p
}
// Advance the expected time. Do not advance to a new window here
// as there may be lingering points with the same timestamp in the previous
// window.
if itr.opt.Ascending {
itr.window.time += int64(itr.opt.Interval.Duration)
} else {
itr.window.time -= int64(itr.opt.Interval.Duration)
}
// Check to see if we have passed over an offset change and adjust the time
// to account for this new offset.
if itr.opt.Location != nil {
if _, offset := itr.opt.Zone(itr.window.time - 1); offset != itr.window.offset {
diff := itr.window.offset - offset
if abs(diff) < int64(itr.opt.Interval.Duration) {
itr.window.time += diff
}
itr.window.offset = offset
}
}
return p, nil
}
// stringIntervalIterator represents a string implementation of IntervalIterator.
type stringIntervalIterator struct {
input StringIterator
opt IteratorOptions
}
func newStringIntervalIterator(input StringIterator, opt IteratorOptions) *stringIntervalIterator {
return &stringIntervalIterator{input: input, opt: opt}
}
func (itr *stringIntervalIterator) Stats() IteratorStats { return itr.input.Stats() }
func (itr *stringIntervalIterator) Close() error { return itr.input.Close() }
func (itr *stringIntervalIterator) Next() (*StringPoint, error) {
p, err := itr.input.Next()
if p == nil || err != nil {
return nil, err
}
p.Time, _ = itr.opt.Window(p.Time)
// If we see the minimum allowable time, set the time to zero so we don't
// break the default returned time for aggregate queries without times.
if p.Time == influxql.MinTime {
p.Time = 0
}
return p, nil
}
// stringInterruptIterator represents a string implementation of InterruptIterator.
type stringInterruptIterator struct {
input StringIterator
closing <-chan struct{}
count int
}
func newStringInterruptIterator(input StringIterator, closing <-chan struct{}) *stringInterruptIterator {
return &stringInterruptIterator{input: input, closing: closing}
}
func (itr *stringInterruptIterator) Stats() IteratorStats { return itr.input.Stats() }
func (itr *stringInterruptIterator) Close() error { return itr.input.Close() }
func (itr *stringInterruptIterator) Next() (*StringPoint, error) {
// Only check if the channel is closed every N points. This
// intentionally checks on both 0 and N so that if the iterator
// has been interrupted before the first point is emitted it will
// not emit any points.
if itr.count&0xFF == 0xFF {
select {
case <-itr.closing:
return nil, itr.Close()
default:
// Reset iterator count to zero and fall through to emit the next point.
itr.count = 0
}
}
// Increment the counter for every point read.
itr.count++
return itr.input.Next()
}
// stringCloseInterruptIterator represents a string implementation of CloseInterruptIterator.
type stringCloseInterruptIterator struct {
input StringIterator
closing <-chan struct{}
done chan struct{}
once sync.Once
}
func newStringCloseInterruptIterator(input StringIterator, closing <-chan struct{}) *stringCloseInterruptIterator {
itr := &stringCloseInterruptIterator{
input: input,
closing: closing,
done: make(chan struct{}),
}
go itr.monitor()
return itr
}
func (itr *stringCloseInterruptIterator) monitor() {
select {
case <-itr.closing:
itr.Close()
case <-itr.done:
}
}
func (itr *stringCloseInterruptIterator) Stats() IteratorStats {
return itr.input.Stats()
}
func (itr *stringCloseInterruptIterator) Close() error {
itr.once.Do(func() {
close(itr.done)
itr.input.Close()
})
return nil
}
func (itr *stringCloseInterruptIterator) Next() (*StringPoint, error) {
p, err := itr.input.Next()
if err != nil {
// Check if the iterator was closed.
select {
case <-itr.done:
return nil, nil
default:
return nil, err
}
}
return p, nil
}
// stringReduceFloatIterator executes a reducer for every interval and buffers the result.
type stringReduceFloatIterator struct {
input *bufStringIterator
create func() (StringPointAggregator, FloatPointEmitter)
dims []string
opt IteratorOptions
points []FloatPoint
keepTags bool
}
func newStringReduceFloatIterator(input StringIterator, opt IteratorOptions, createFn func() (StringPointAggregator, FloatPointEmitter)) *stringReduceFloatIterator {
return &stringReduceFloatIterator{
input: newBufStringIterator(input),
create: createFn,
dims: opt.GetDimensions(),
opt: opt,
}
}
// Stats returns stats from the input iterator.
func (itr *stringReduceFloatIterator) Stats() IteratorStats { return itr.input.Stats() }
// Close closes the iterator and all child iterators.
func (itr *stringReduceFloatIterator) Close() error { return itr.input.Close() }
// Next returns the minimum value for the next available interval.
func (itr *stringReduceFloatIterator) Next() (*FloatPoint, error) {
// Calculate next window if we have no more points.
if len(itr.points) == 0 {
var err error
itr.points, err = itr.reduce()
if len(itr.points) == 0 {
return nil, err
}
}
// Pop next point off the stack.
p := &itr.points[len(itr.points)-1]
itr.points = itr.points[:len(itr.points)-1]
return p, nil
}
// stringReduceFloatPoint stores the reduced data for a name/tag combination.
type stringReduceFloatPoint struct {
Name string
Tags Tags
Aggregator StringPointAggregator
Emitter FloatPointEmitter
}
// reduce executes fn once for every point in the next window.
// The previous value for the dimension is passed to fn.
func (itr *stringReduceFloatIterator) reduce() ([]FloatPoint, error) {
// Calculate next window.
var (
startTime, endTime int64
window struct {
name string
tags string
}
)
for {
p, err := itr.input.Next()
if err != nil || p == nil {
return nil, err
} else if p.Nil {
continue
}
// Unread the point so it can be processed.
itr.input.unread(p)
startTime, endTime = itr.opt.Window(p.Time)
window.name, window.tags = p.Name, p.Tags.Subset(itr.opt.Dimensions).ID()
break
}
// Create points by tags.
m := make(map[string]*stringReduceFloatPoint)
for {
// Read next point.
curr, err := itr.input.NextInWindow(startTime, endTime)
if err != nil {
return nil, err
} else if curr == nil {
break
} else if curr.Nil {
continue
} else if curr.Name != window.name {
itr.input.unread(curr)
break
}
// Ensure this point is within the same final window.
if curr.Name != window.name {
itr.input.unread(curr)
break
} else if tags := curr.Tags.Subset(itr.opt.Dimensions); tags.ID() != window.tags {
itr.input.unread(curr)
break
}
// Retrieve the tags on this point for this level of the query.
// This may be different than the bucket dimensions.
tags := curr.Tags.Subset(itr.dims)
id := tags.ID()
// Retrieve the aggregator for this name/tag combination or create one.
rp := m[id]
if rp == nil {
aggregator, emitter := itr.create()
rp = &stringReduceFloatPoint{
Name: curr.Name,
Tags: tags,
Aggregator: aggregator,
Emitter: emitter,
}
m[id] = rp
}
rp.Aggregator.AggregateString(curr)
}
keys := make([]string, 0, len(m))
for k := range m {
keys = append(keys, k)
}
// Reverse sort points by name & tag.
// This ensures a consistent order of output.
if len(keys) > 0 {
var sorted sort.Interface = sort.StringSlice(keys)
if itr.opt.Ascending {
sorted = sort.Reverse(sorted)
}
sort.Sort(sorted)
}
// Assume the points are already sorted until proven otherwise.
sortedByTime := true
// Emit the points for each name & tag combination.
a := make([]FloatPoint, 0, len(m))
for _, k := range keys {
rp := m[k]
points := rp.Emitter.Emit()
for i := len(points) - 1; i >= 0; i-- {
points[i].Name = rp.Name
if !itr.keepTags {
points[i].Tags = rp.Tags
}
// Set the points time to the interval time if the reducer didn't provide one.
if points[i].Time == ZeroTime {
points[i].Time = startTime
} else {
sortedByTime = false
}
a = append(a, points[i])
}
}
// Points may be out of order. Perform a stable sort by time if requested.
if !sortedByTime && itr.opt.Ordered {
var sorted sort.Interface = floatPointsByTime(a)
if itr.opt.Ascending {
sorted = sort.Reverse(sorted)
}
sort.Stable(sorted)
}
return a, nil
}
// stringStreamFloatIterator streams inputs into the iterator and emits points gradually.
type stringStreamFloatIterator struct {
input *bufStringIterator
create func() (StringPointAggregator, FloatPointEmitter)
dims []string
opt IteratorOptions
m map[string]*stringReduceFloatPoint
points []FloatPoint
}
// newStringStreamFloatIterator returns a new instance of stringStreamFloatIterator.
func newStringStreamFloatIterator(input StringIterator, createFn func() (StringPointAggregator, FloatPointEmitter), opt IteratorOptions) *stringStreamFloatIterator {
return &stringStreamFloatIterator{
input: newBufStringIterator(input),
create: createFn,
dims: opt.GetDimensions(),
opt: opt,
m: make(map[string]*stringReduceFloatPoint),
}
}
// Stats returns stats from the input iterator.
func (itr *stringStreamFloatIterator) Stats() IteratorStats { return itr.input.Stats() }
// Close closes the iterator and all child iterators.
func (itr *stringStreamFloatIterator) Close() error { return itr.input.Close() }
// Next returns the next value for the stream iterator.
func (itr *stringStreamFloatIterator) Next() (*FloatPoint, error) {
// Calculate next window if we have no more points.
if len(itr.points) == 0 {
var err error
itr.points, err = itr.reduce()
if len(itr.points) == 0 {
return nil, err
}
}
// Pop next point off the stack.
p := &itr.points[len(itr.points)-1]
itr.points = itr.points[:len(itr.points)-1]
return p, nil
}
// reduce creates and manages aggregators for every point from the input.
// After aggregating a point, it always tries to emit a value using the emitter.
func (itr *stringStreamFloatIterator) reduce() ([]FloatPoint, error) {
// We have already read all of the input points.
if itr.m == nil {
return nil, nil
}
for {
// Read next point.
curr, err := itr.input.Next()
if err != nil {
return nil, err
} else if curr == nil {
// Close all of the aggregators to flush any remaining points to emit.
var points []FloatPoint
for _, rp := range itr.m {
if aggregator, ok := rp.Aggregator.(io.Closer); ok {
if err := aggregator.Close(); err != nil {
return nil, err
}
pts := rp.Emitter.Emit()
if len(pts) == 0 {
continue
}
for i := range pts {
pts[i].Name = rp.Name
pts[i].Tags = rp.Tags
}
points = append(points, pts...)
}
}
// Eliminate the aggregators and emitters.
itr.m = nil
return points, nil
} else if curr.Nil {
continue
}
tags := curr.Tags.Subset(itr.dims)
id := curr.Name
if len(tags.m) > 0 {
id += "\x00" + tags.ID()
}
// Retrieve the aggregator for this name/tag combination or create one.
rp := itr.m[id]
if rp == nil {
aggregator, emitter := itr.create()
rp = &stringReduceFloatPoint{
Name: curr.Name,
Tags: tags,
Aggregator: aggregator,
Emitter: emitter,
}
itr.m[id] = rp
}
rp.Aggregator.AggregateString(curr)
// Attempt to emit points from the aggregator.
points := rp.Emitter.Emit()
if len(points) == 0 {
continue
}
for i := range points {
points[i].Name = rp.Name
points[i].Tags = rp.Tags
}
return points, nil
}
}
// stringReduceIntegerIterator executes a reducer for every interval and buffers the result.
type stringReduceIntegerIterator struct {
input *bufStringIterator
create func() (StringPointAggregator, IntegerPointEmitter)
dims []string
opt IteratorOptions
points []IntegerPoint
keepTags bool
}
func newStringReduceIntegerIterator(input StringIterator, opt IteratorOptions, createFn func() (StringPointAggregator, IntegerPointEmitter)) *stringReduceIntegerIterator {
return &stringReduceIntegerIterator{
input: newBufStringIterator(input),
create: createFn,
dims: opt.GetDimensions(),
opt: opt,
}
}
// Stats returns stats from the input iterator.
func (itr *stringReduceIntegerIterator) Stats() IteratorStats { return itr.input.Stats() }
// Close closes the iterator and all child iterators.
func (itr *stringReduceIntegerIterator) Close() error { return itr.input.Close() }
// Next returns the minimum value for the next available interval.
func (itr *stringReduceIntegerIterator) Next() (*IntegerPoint, error) {
// Calculate next window if we have no more points.
if len(itr.points) == 0 {
var err error
itr.points, err = itr.reduce()
if len(itr.points) == 0 {
return nil, err
}
}
// Pop next point off the stack.
p := &itr.points[len(itr.points)-1]
itr.points = itr.points[:len(itr.points)-1]
return p, nil
}
// stringReduceIntegerPoint stores the reduced data for a name/tag combination.
type stringReduceIntegerPoint struct {
Name string
Tags Tags
Aggregator StringPointAggregator
Emitter IntegerPointEmitter
}
// reduce executes fn once for every point in the next window.
// The previous value for the dimension is passed to fn.
func (itr *stringReduceIntegerIterator) reduce() ([]IntegerPoint, error) {
// Calculate next window.
var (
startTime, endTime int64
window struct {
name string
tags string
}
)
for {
p, err := itr.input.Next()
if err != nil || p == nil {
return nil, err
} else if p.Nil {
continue
}
// Unread the point so it can be processed.
itr.input.unread(p)
startTime, endTime = itr.opt.Window(p.Time)
window.name, window.tags = p.Name, p.Tags.Subset(itr.opt.Dimensions).ID()
break
}
// Create points by tags.
m := make(map[string]*stringReduceIntegerPoint)
for {
// Read next point.
curr, err := itr.input.NextInWindow(startTime, endTime)
if err != nil {
return nil, err
} else if curr == nil {
break
} else if curr.Nil {
continue
} else if curr.Name != window.name {
itr.input.unread(curr)
break
}
// Ensure this point is within the same final window.
if curr.Name != window.name {
itr.input.unread(curr)
break
} else if tags := curr.Tags.Subset(itr.opt.Dimensions); tags.ID() != window.tags {
itr.input.unread(curr)
break
}
// Retrieve the tags on this point for this level of the query.
// This may be different than the bucket dimensions.
tags := curr.Tags.Subset(itr.dims)
id := tags.ID()
// Retrieve the aggregator for this name/tag combination or create one.
rp := m[id]
if rp == nil {
aggregator, emitter := itr.create()
rp = &stringReduceIntegerPoint{
Name: curr.Name,
Tags: tags,
Aggregator: aggregator,
Emitter: emitter,
}
m[id] = rp
}
rp.Aggregator.AggregateString(curr)
}
keys := make([]string, 0, len(m))
for k := range m {
keys = append(keys, k)
}
// Reverse sort points by name & tag.
// This ensures a consistent order of output.
if len(keys) > 0 {
var sorted sort.Interface = sort.StringSlice(keys)
if itr.opt.Ascending {
sorted = sort.Reverse(sorted)
}
sort.Sort(sorted)
}
// Assume the points are already sorted until proven otherwise.
sortedByTime := true
// Emit the points for each name & tag combination.
a := make([]IntegerPoint, 0, len(m))
for _, k := range keys {
rp := m[k]
points := rp.Emitter.Emit()
for i := len(points) - 1; i >= 0; i-- {
points[i].Name = rp.Name
if !itr.keepTags {
points[i].Tags = rp.Tags
}
// Set the points time to the interval time if the reducer didn't provide one.
if points[i].Time == ZeroTime {
points[i].Time = startTime
} else {
sortedByTime = false
}
a = append(a, points[i])
}
}
// Points may be out of order. Perform a stable sort by time if requested.
if !sortedByTime && itr.opt.Ordered {
var sorted sort.Interface = integerPointsByTime(a)
if itr.opt.Ascending {
sorted = sort.Reverse(sorted)
}
sort.Stable(sorted)
}
return a, nil
}
// stringStreamIntegerIterator streams inputs into the iterator and emits points gradually.
type stringStreamIntegerIterator struct {
input *bufStringIterator
create func() (StringPointAggregator, IntegerPointEmitter)
dims []string
opt IteratorOptions
m map[string]*stringReduceIntegerPoint
points []IntegerPoint
}
// newStringStreamIntegerIterator returns a new instance of stringStreamIntegerIterator.
func newStringStreamIntegerIterator(input StringIterator, createFn func() (StringPointAggregator, IntegerPointEmitter), opt IteratorOptions) *stringStreamIntegerIterator {
return &stringStreamIntegerIterator{
input: newBufStringIterator(input),
create: createFn,
dims: opt.GetDimensions(),
opt: opt,
m: make(map[string]*stringReduceIntegerPoint),
}
}
// Stats returns stats from the input iterator.
func (itr *stringStreamIntegerIterator) Stats() IteratorStats { return itr.input.Stats() }
// Close closes the iterator and all child iterators.
func (itr *stringStreamIntegerIterator) Close() error { return itr.input.Close() }
// Next returns the next value for the stream iterator.
func (itr *stringStreamIntegerIterator) Next() (*IntegerPoint, error) {
// Calculate next window if we have no more points.
if len(itr.points) == 0 {
var err error
itr.points, err = itr.reduce()
if len(itr.points) == 0 {
return nil, err
}
}
// Pop next point off the stack.
p := &itr.points[len(itr.points)-1]
itr.points = itr.points[:len(itr.points)-1]
return p, nil
}
// reduce creates and manages aggregators for every point from the input.
// After aggregating a point, it always tries to emit a value using the emitter.
func (itr *stringStreamIntegerIterator) reduce() ([]IntegerPoint, error) {
// We have already read all of the input points.
if itr.m == nil {
return nil, nil
}
for {
// Read next point.
curr, err := itr.input.Next()
if err != nil {
return nil, err
} else if curr == nil {
// Close all of the aggregators to flush any remaining points to emit.
var points []IntegerPoint
for _, rp := range itr.m {
if aggregator, ok := rp.Aggregator.(io.Closer); ok {
if err := aggregator.Close(); err != nil {
return nil, err
}
pts := rp.Emitter.Emit()
if len(pts) == 0 {
continue
}
for i := range pts {
pts[i].Name = rp.Name
pts[i].Tags = rp.Tags
}
points = append(points, pts...)
}
}
// Eliminate the aggregators and emitters.
itr.m = nil
return points, nil
} else if curr.Nil {
continue
}
tags := curr.Tags.Subset(itr.dims)
id := curr.Name
if len(tags.m) > 0 {
id += "\x00" + tags.ID()
}
// Retrieve the aggregator for this name/tag combination or create one.
rp := itr.m[id]
if rp == nil {
aggregator, emitter := itr.create()
rp = &stringReduceIntegerPoint{
Name: curr.Name,
Tags: tags,
Aggregator: aggregator,
Emitter: emitter,
}
itr.m[id] = rp
}
rp.Aggregator.AggregateString(curr)
// Attempt to emit points from the aggregator.
points := rp.Emitter.Emit()
if len(points) == 0 {
continue
}
for i := range points {
points[i].Name = rp.Name
points[i].Tags = rp.Tags
}
return points, nil
}
}
// stringReduceUnsignedIterator executes a reducer for every interval and buffers the result.
type stringReduceUnsignedIterator struct {
input *bufStringIterator
create func() (StringPointAggregator, UnsignedPointEmitter)
dims []string
opt IteratorOptions
points []UnsignedPoint
keepTags bool
}
func newStringReduceUnsignedIterator(input StringIterator, opt IteratorOptions, createFn func() (StringPointAggregator, UnsignedPointEmitter)) *stringReduceUnsignedIterator {
return &stringReduceUnsignedIterator{
input: newBufStringIterator(input),
create: createFn,
dims: opt.GetDimensions(),
opt: opt,
}
}
// Stats returns stats from the input iterator.
func (itr *stringReduceUnsignedIterator) Stats() IteratorStats { return itr.input.Stats() }
// Close closes the iterator and all child iterators.
func (itr *stringReduceUnsignedIterator) Close() error { return itr.input.Close() }
// Next returns the minimum value for the next available interval.
func (itr *stringReduceUnsignedIterator) Next() (*UnsignedPoint, error) {
// Calculate next window if we have no more points.
if len(itr.points) == 0 {
var err error
itr.points, err = itr.reduce()
if len(itr.points) == 0 {
return nil, err
}
}
// Pop next point off the stack.
p := &itr.points[len(itr.points)-1]
itr.points = itr.points[:len(itr.points)-1]
return p, nil
}
// stringReduceUnsignedPoint stores the reduced data for a name/tag combination.
type stringReduceUnsignedPoint struct {
Name string
Tags Tags
Aggregator StringPointAggregator
Emitter UnsignedPointEmitter
}
// reduce executes fn once for every point in the next window.
// The previous value for the dimension is passed to fn.
func (itr *stringReduceUnsignedIterator) reduce() ([]UnsignedPoint, error) {
// Calculate next window.
var (
startTime, endTime int64
window struct {
name string
tags string
}
)
for {
p, err := itr.input.Next()
if err != nil || p == nil {
return nil, err
} else if p.Nil {
continue
}
// Unread the point so it can be processed.
itr.input.unread(p)
startTime, endTime = itr.opt.Window(p.Time)
window.name, window.tags = p.Name, p.Tags.Subset(itr.opt.Dimensions).ID()
break
}
// Create points by tags.
m := make(map[string]*stringReduceUnsignedPoint)
for {
// Read next point.
curr, err := itr.input.NextInWindow(startTime, endTime)
if err != nil {
return nil, err
} else if curr == nil {
break
} else if curr.Nil {
continue
} else if curr.Name != window.name {
itr.input.unread(curr)
break
}
// Ensure this point is within the same final window.
if curr.Name != window.name {
itr.input.unread(curr)
break
} else if tags := curr.Tags.Subset(itr.opt.Dimensions); tags.ID() != window.tags {
itr.input.unread(curr)
break
}
// Retrieve the tags on this point for this level of the query.
// This may be different than the bucket dimensions.
tags := curr.Tags.Subset(itr.dims)
id := tags.ID()
// Retrieve the aggregator for this name/tag combination or create one.
rp := m[id]
if rp == nil {
aggregator, emitter := itr.create()
rp = &stringReduceUnsignedPoint{
Name: curr.Name,
Tags: tags,
Aggregator: aggregator,
Emitter: emitter,
}
m[id] = rp
}
rp.Aggregator.AggregateString(curr)
}
keys := make([]string, 0, len(m))
for k := range m {
keys = append(keys, k)
}
// Reverse sort points by name & tag.
// This ensures a consistent order of output.
if len(keys) > 0 {
var sorted sort.Interface = sort.StringSlice(keys)
if itr.opt.Ascending {
sorted = sort.Reverse(sorted)
}
sort.Sort(sorted)
}
// Assume the points are already sorted until proven otherwise.
sortedByTime := true
// Emit the points for each name & tag combination.
a := make([]UnsignedPoint, 0, len(m))
for _, k := range keys {
rp := m[k]
points := rp.Emitter.Emit()
for i := len(points) - 1; i >= 0; i-- {
points[i].Name = rp.Name
if !itr.keepTags {
points[i].Tags = rp.Tags
}
// Set the points time to the interval time if the reducer didn't provide one.
if points[i].Time == ZeroTime {
points[i].Time = startTime
} else {
sortedByTime = false
}
a = append(a, points[i])
}
}
// Points may be out of order. Perform a stable sort by time if requested.
if !sortedByTime && itr.opt.Ordered {
var sorted sort.Interface = unsignedPointsByTime(a)
if itr.opt.Ascending {
sorted = sort.Reverse(sorted)
}
sort.Stable(sorted)
}
return a, nil
}
// stringStreamUnsignedIterator streams inputs into the iterator and emits points gradually.
type stringStreamUnsignedIterator struct {
input *bufStringIterator
create func() (StringPointAggregator, UnsignedPointEmitter)
dims []string
opt IteratorOptions
m map[string]*stringReduceUnsignedPoint
points []UnsignedPoint
}
// newStringStreamUnsignedIterator returns a new instance of stringStreamUnsignedIterator.
func newStringStreamUnsignedIterator(input StringIterator, createFn func() (StringPointAggregator, UnsignedPointEmitter), opt IteratorOptions) *stringStreamUnsignedIterator {
return &stringStreamUnsignedIterator{
input: newBufStringIterator(input),
create: createFn,
dims: opt.GetDimensions(),
opt: opt,
m: make(map[string]*stringReduceUnsignedPoint),
}
}
// Stats returns stats from the input iterator.
func (itr *stringStreamUnsignedIterator) Stats() IteratorStats { return itr.input.Stats() }
// Close closes the iterator and all child iterators.
func (itr *stringStreamUnsignedIterator) Close() error { return itr.input.Close() }
// Next returns the next value for the stream iterator.
func (itr *stringStreamUnsignedIterator) Next() (*UnsignedPoint, error) {
// Calculate next window if we have no more points.
if len(itr.points) == 0 {
var err error
itr.points, err = itr.reduce()
if len(itr.points) == 0 {
return nil, err
}
}
// Pop next point off the stack.
p := &itr.points[len(itr.points)-1]
itr.points = itr.points[:len(itr.points)-1]
return p, nil
}
// reduce creates and manages aggregators for every point from the input.
// After aggregating a point, it always tries to emit a value using the emitter.
func (itr *stringStreamUnsignedIterator) reduce() ([]UnsignedPoint, error) {
// We have already read all of the input points.
if itr.m == nil {
return nil, nil
}
for {
// Read next point.
curr, err := itr.input.Next()
if err != nil {
return nil, err
} else if curr == nil {
// Close all of the aggregators to flush any remaining points to emit.
var points []UnsignedPoint
for _, rp := range itr.m {
if aggregator, ok := rp.Aggregator.(io.Closer); ok {
if err := aggregator.Close(); err != nil {
return nil, err
}
pts := rp.Emitter.Emit()
if len(pts) == 0 {
continue
}
for i := range pts {
pts[i].Name = rp.Name
pts[i].Tags = rp.Tags
}
points = append(points, pts...)
}
}
// Eliminate the aggregators and emitters.
itr.m = nil
return points, nil
} else if curr.Nil {
continue
}
tags := curr.Tags.Subset(itr.dims)
id := curr.Name
if len(tags.m) > 0 {
id += "\x00" + tags.ID()
}
// Retrieve the aggregator for this name/tag combination or create one.
rp := itr.m[id]
if rp == nil {
aggregator, emitter := itr.create()
rp = &stringReduceUnsignedPoint{
Name: curr.Name,
Tags: tags,
Aggregator: aggregator,
Emitter: emitter,
}
itr.m[id] = rp
}
rp.Aggregator.AggregateString(curr)
// Attempt to emit points from the aggregator.
points := rp.Emitter.Emit()
if len(points) == 0 {
continue
}
for i := range points {
points[i].Name = rp.Name
points[i].Tags = rp.Tags
}
return points, nil
}
}
// stringReduceStringIterator executes a reducer for every interval and buffers the result.
type stringReduceStringIterator struct {
input *bufStringIterator
create func() (StringPointAggregator, StringPointEmitter)
dims []string
opt IteratorOptions
points []StringPoint
keepTags bool
}
func newStringReduceStringIterator(input StringIterator, opt IteratorOptions, createFn func() (StringPointAggregator, StringPointEmitter)) *stringReduceStringIterator {
return &stringReduceStringIterator{
input: newBufStringIterator(input),
create: createFn,
dims: opt.GetDimensions(),
opt: opt,
}
}
// Stats returns stats from the input iterator.
func (itr *stringReduceStringIterator) Stats() IteratorStats { return itr.input.Stats() }
// Close closes the iterator and all child iterators.
func (itr *stringReduceStringIterator) Close() error { return itr.input.Close() }
// Next returns the minimum value for the next available interval.
func (itr *stringReduceStringIterator) Next() (*StringPoint, error) {
// Calculate next window if we have no more points.
if len(itr.points) == 0 {
var err error
itr.points, err = itr.reduce()
if len(itr.points) == 0 {
return nil, err
}
}
// Pop next point off the stack.
p := &itr.points[len(itr.points)-1]
itr.points = itr.points[:len(itr.points)-1]
return p, nil
}
// stringReduceStringPoint stores the reduced data for a name/tag combination.
type stringReduceStringPoint struct {
Name string
Tags Tags
Aggregator StringPointAggregator
Emitter StringPointEmitter
}
// reduce executes fn once for every point in the next window.
// The previous value for the dimension is passed to fn.
func (itr *stringReduceStringIterator) reduce() ([]StringPoint, error) {
// Calculate next window.
var (
startTime, endTime int64
window struct {
name string
tags string
}
)
for {
p, err := itr.input.Next()
if err != nil || p == nil {
return nil, err
} else if p.Nil {
continue
}
// Unread the point so it can be processed.
itr.input.unread(p)
startTime, endTime = itr.opt.Window(p.Time)
window.name, window.tags = p.Name, p.Tags.Subset(itr.opt.Dimensions).ID()
break
}
// Create points by tags.
m := make(map[string]*stringReduceStringPoint)
for {
// Read next point.
curr, err := itr.input.NextInWindow(startTime, endTime)
if err != nil {
return nil, err
} else if curr == nil {
break
} else if curr.Nil {
continue
} else if curr.Name != window.name {
itr.input.unread(curr)
break
}
// Ensure this point is within the same final window.
if curr.Name != window.name {
itr.input.unread(curr)
break
} else if tags := curr.Tags.Subset(itr.opt.Dimensions); tags.ID() != window.tags {
itr.input.unread(curr)
break
}
// Retrieve the tags on this point for this level of the query.
// This may be different than the bucket dimensions.
tags := curr.Tags.Subset(itr.dims)
id := tags.ID()
// Retrieve the aggregator for this name/tag combination or create one.
rp := m[id]
if rp == nil {
aggregator, emitter := itr.create()
rp = &stringReduceStringPoint{
Name: curr.Name,
Tags: tags,
Aggregator: aggregator,
Emitter: emitter,
}
m[id] = rp
}
rp.Aggregator.AggregateString(curr)
}
keys := make([]string, 0, len(m))
for k := range m {
keys = append(keys, k)
}
// Reverse sort points by name & tag.
// This ensures a consistent order of output.
if len(keys) > 0 {
var sorted sort.Interface = sort.StringSlice(keys)
if itr.opt.Ascending {
sorted = sort.Reverse(sorted)
}
sort.Sort(sorted)
}
// Assume the points are already sorted until proven otherwise.
sortedByTime := true
// Emit the points for each name & tag combination.
a := make([]StringPoint, 0, len(m))
for _, k := range keys {
rp := m[k]
points := rp.Emitter.Emit()
for i := len(points) - 1; i >= 0; i-- {
points[i].Name = rp.Name
if !itr.keepTags {
points[i].Tags = rp.Tags
}
// Set the points time to the interval time if the reducer didn't provide one.
if points[i].Time == ZeroTime {
points[i].Time = startTime
} else {
sortedByTime = false
}
a = append(a, points[i])
}
}
// Points may be out of order. Perform a stable sort by time if requested.
if !sortedByTime && itr.opt.Ordered {
var sorted sort.Interface = stringPointsByTime(a)
if itr.opt.Ascending {
sorted = sort.Reverse(sorted)
}
sort.Stable(sorted)
}
return a, nil
}
// stringStreamStringIterator streams inputs into the iterator and emits points gradually.
type stringStreamStringIterator struct {
input *bufStringIterator
create func() (StringPointAggregator, StringPointEmitter)
dims []string
opt IteratorOptions
m map[string]*stringReduceStringPoint
points []StringPoint
}
// newStringStreamStringIterator returns a new instance of stringStreamStringIterator.
func newStringStreamStringIterator(input StringIterator, createFn func() (StringPointAggregator, StringPointEmitter), opt IteratorOptions) *stringStreamStringIterator {
return &stringStreamStringIterator{
input: newBufStringIterator(input),
create: createFn,
dims: opt.GetDimensions(),
opt: opt,
m: make(map[string]*stringReduceStringPoint),
}
}
// Stats returns stats from the input iterator.
func (itr *stringStreamStringIterator) Stats() IteratorStats { return itr.input.Stats() }
// Close closes the iterator and all child iterators.
func (itr *stringStreamStringIterator) Close() error { return itr.input.Close() }
// Next returns the next value for the stream iterator.
func (itr *stringStreamStringIterator) Next() (*StringPoint, error) {
// Calculate next window if we have no more points.
if len(itr.points) == 0 {
var err error
itr.points, err = itr.reduce()
if len(itr.points) == 0 {
return nil, err
}
}
// Pop next point off the stack.
p := &itr.points[len(itr.points)-1]
itr.points = itr.points[:len(itr.points)-1]
return p, nil
}
// reduce creates and manages aggregators for every point from the input.
// After aggregating a point, it always tries to emit a value using the emitter.
func (itr *stringStreamStringIterator) reduce() ([]StringPoint, error) {
// We have already read all of the input points.
if itr.m == nil {
return nil, nil
}
for {
// Read next point.
curr, err := itr.input.Next()
if err != nil {
return nil, err
} else if curr == nil {
// Close all of the aggregators to flush any remaining points to emit.
var points []StringPoint
for _, rp := range itr.m {
if aggregator, ok := rp.Aggregator.(io.Closer); ok {
if err := aggregator.Close(); err != nil {
return nil, err
}
pts := rp.Emitter.Emit()
if len(pts) == 0 {
continue
}
for i := range pts {
pts[i].Name = rp.Name
pts[i].Tags = rp.Tags
}
points = append(points, pts...)
}
}
// Eliminate the aggregators and emitters.
itr.m = nil
return points, nil
} else if curr.Nil {
continue
}
tags := curr.Tags.Subset(itr.dims)
id := curr.Name
if len(tags.m) > 0 {
id += "\x00" + tags.ID()
}
// Retrieve the aggregator for this name/tag combination or create one.
rp := itr.m[id]
if rp == nil {
aggregator, emitter := itr.create()
rp = &stringReduceStringPoint{
Name: curr.Name,
Tags: tags,
Aggregator: aggregator,
Emitter: emitter,
}
itr.m[id] = rp
}
rp.Aggregator.AggregateString(curr)
// Attempt to emit points from the aggregator.
points := rp.Emitter.Emit()
if len(points) == 0 {
continue
}
for i := range points {
points[i].Name = rp.Name
points[i].Tags = rp.Tags
}
return points, nil
}
}
// stringReduceBooleanIterator executes a reducer for every interval and buffers the result.
type stringReduceBooleanIterator struct {
input *bufStringIterator
create func() (StringPointAggregator, BooleanPointEmitter)
dims []string
opt IteratorOptions
points []BooleanPoint
keepTags bool
}
func newStringReduceBooleanIterator(input StringIterator, opt IteratorOptions, createFn func() (StringPointAggregator, BooleanPointEmitter)) *stringReduceBooleanIterator {
return &stringReduceBooleanIterator{
input: newBufStringIterator(input),
create: createFn,
dims: opt.GetDimensions(),
opt: opt,
}
}
// Stats returns stats from the input iterator.
func (itr *stringReduceBooleanIterator) Stats() IteratorStats { return itr.input.Stats() }
// Close closes the iterator and all child iterators.
func (itr *stringReduceBooleanIterator) Close() error { return itr.input.Close() }
// Next returns the minimum value for the next available interval.
func (itr *stringReduceBooleanIterator) Next() (*BooleanPoint, error) {
// Calculate next window if we have no more points.
if len(itr.points) == 0 {
var err error
itr.points, err = itr.reduce()
if len(itr.points) == 0 {
return nil, err
}
}
// Pop next point off the stack.
p := &itr.points[len(itr.points)-1]
itr.points = itr.points[:len(itr.points)-1]
return p, nil
}
// stringReduceBooleanPoint stores the reduced data for a name/tag combination.
type stringReduceBooleanPoint struct {
Name string
Tags Tags
Aggregator StringPointAggregator
Emitter BooleanPointEmitter
}
// reduce executes fn once for every point in the next window.
// The previous value for the dimension is passed to fn.
func (itr *stringReduceBooleanIterator) reduce() ([]BooleanPoint, error) {
// Calculate next window.
var (
startTime, endTime int64
window struct {
name string
tags string
}
)
for {
p, err := itr.input.Next()
if err != nil || p == nil {
return nil, err
} else if p.Nil {
continue
}
// Unread the point so it can be processed.
itr.input.unread(p)
startTime, endTime = itr.opt.Window(p.Time)
window.name, window.tags = p.Name, p.Tags.Subset(itr.opt.Dimensions).ID()
break
}
// Create points by tags.
m := make(map[string]*stringReduceBooleanPoint)
for {
// Read next point.
curr, err := itr.input.NextInWindow(startTime, endTime)
if err != nil {
return nil, err
} else if curr == nil {
break
} else if curr.Nil {
continue
} else if curr.Name != window.name {
itr.input.unread(curr)
break
}
// Ensure this point is within the same final window.
if curr.Name != window.name {
itr.input.unread(curr)
break
} else if tags := curr.Tags.Subset(itr.opt.Dimensions); tags.ID() != window.tags {
itr.input.unread(curr)
break
}
// Retrieve the tags on this point for this level of the query.
// This may be different than the bucket dimensions.
tags := curr.Tags.Subset(itr.dims)
id := tags.ID()
// Retrieve the aggregator for this name/tag combination or create one.
rp := m[id]
if rp == nil {
aggregator, emitter := itr.create()
rp = &stringReduceBooleanPoint{
Name: curr.Name,
Tags: tags,
Aggregator: aggregator,
Emitter: emitter,
}
m[id] = rp
}
rp.Aggregator.AggregateString(curr)
}
keys := make([]string, 0, len(m))
for k := range m {
keys = append(keys, k)
}
// Reverse sort points by name & tag.
// This ensures a consistent order of output.
if len(keys) > 0 {
var sorted sort.Interface = sort.StringSlice(keys)
if itr.opt.Ascending {
sorted = sort.Reverse(sorted)
}
sort.Sort(sorted)
}
// Assume the points are already sorted until proven otherwise.
sortedByTime := true
// Emit the points for each name & tag combination.
a := make([]BooleanPoint, 0, len(m))
for _, k := range keys {
rp := m[k]
points := rp.Emitter.Emit()
for i := len(points) - 1; i >= 0; i-- {
points[i].Name = rp.Name
if !itr.keepTags {
points[i].Tags = rp.Tags
}
// Set the points time to the interval time if the reducer didn't provide one.
if points[i].Time == ZeroTime {
points[i].Time = startTime
} else {
sortedByTime = false
}
a = append(a, points[i])
}
}
// Points may be out of order. Perform a stable sort by time if requested.
if !sortedByTime && itr.opt.Ordered {
var sorted sort.Interface = booleanPointsByTime(a)
if itr.opt.Ascending {
sorted = sort.Reverse(sorted)
}
sort.Stable(sorted)
}
return a, nil
}
// stringStreamBooleanIterator streams inputs into the iterator and emits points gradually.
type stringStreamBooleanIterator struct {
input *bufStringIterator
create func() (StringPointAggregator, BooleanPointEmitter)
dims []string
opt IteratorOptions
m map[string]*stringReduceBooleanPoint
points []BooleanPoint
}
// newStringStreamBooleanIterator returns a new instance of stringStreamBooleanIterator.
func newStringStreamBooleanIterator(input StringIterator, createFn func() (StringPointAggregator, BooleanPointEmitter), opt IteratorOptions) *stringStreamBooleanIterator {
return &stringStreamBooleanIterator{
input: newBufStringIterator(input),
create: createFn,
dims: opt.GetDimensions(),
opt: opt,
m: make(map[string]*stringReduceBooleanPoint),
}
}
// Stats returns stats from the input iterator.
func (itr *stringStreamBooleanIterator) Stats() IteratorStats { return itr.input.Stats() }
// Close closes the iterator and all child iterators.
func (itr *stringStreamBooleanIterator) Close() error { return itr.input.Close() }
// Next returns the next value for the stream iterator.
func (itr *stringStreamBooleanIterator) Next() (*BooleanPoint, error) {
// Calculate next window if we have no more points.
if len(itr.points) == 0 {
var err error
itr.points, err = itr.reduce()
if len(itr.points) == 0 {
return nil, err
}
}
// Pop next point off the stack.
p := &itr.points[len(itr.points)-1]
itr.points = itr.points[:len(itr.points)-1]
return p, nil
}
// reduce creates and manages aggregators for every point from the input.
// After aggregating a point, it always tries to emit a value using the emitter.
func (itr *stringStreamBooleanIterator) reduce() ([]BooleanPoint, error) {
// We have already read all of the input points.
if itr.m == nil {
return nil, nil
}
for {
// Read next point.
curr, err := itr.input.Next()
if err != nil {
return nil, err
} else if curr == nil {
// Close all of the aggregators to flush any remaining points to emit.
var points []BooleanPoint
for _, rp := range itr.m {
if aggregator, ok := rp.Aggregator.(io.Closer); ok {
if err := aggregator.Close(); err != nil {
return nil, err
}
pts := rp.Emitter.Emit()
if len(pts) == 0 {
continue
}
for i := range pts {
pts[i].Name = rp.Name
pts[i].Tags = rp.Tags
}
points = append(points, pts...)
}
}
// Eliminate the aggregators and emitters.
itr.m = nil
return points, nil
} else if curr.Nil {
continue
}
tags := curr.Tags.Subset(itr.dims)
id := curr.Name
if len(tags.m) > 0 {
id += "\x00" + tags.ID()
}
// Retrieve the aggregator for this name/tag combination or create one.
rp := itr.m[id]
if rp == nil {
aggregator, emitter := itr.create()
rp = &stringReduceBooleanPoint{
Name: curr.Name,
Tags: tags,
Aggregator: aggregator,
Emitter: emitter,
}
itr.m[id] = rp
}
rp.Aggregator.AggregateString(curr)
// Attempt to emit points from the aggregator.
points := rp.Emitter.Emit()
if len(points) == 0 {
continue
}
for i := range points {
points[i].Name = rp.Name
points[i].Tags = rp.Tags
}
return points, nil
}
}
// stringDedupeIterator only outputs unique points.
// This differs from the DistinctIterator in that it compares all aux fields too.
// This iterator is relatively inefficient and should only be used on small
// datasets such as meta query results.
type stringDedupeIterator struct {
input StringIterator
m map[string]struct{} // lookup of points already sent
}
type stringIteratorMapper struct {
cur Cursor
row Row
driver IteratorMap // which iterator to use for the primary value, can be nil
fields []IteratorMap // which iterator to use for an aux field
point StringPoint
}
func newStringIteratorMapper(cur Cursor, driver IteratorMap, fields []IteratorMap, opt IteratorOptions) *stringIteratorMapper {
return &stringIteratorMapper{
cur: cur,
driver: driver,
fields: fields,
point: StringPoint{
Aux: make([]interface{}, len(fields)),
},
}
}
func (itr *stringIteratorMapper) Next() (*StringPoint, error) {
if !itr.cur.Scan(&itr.row) {
if err := itr.cur.Err(); err != nil {
return nil, err
}
return nil, nil
}
itr.point.Time = itr.row.Time
itr.point.Name = itr.row.Series.Name
itr.point.Tags = itr.row.Series.Tags
if itr.driver != nil {
if v := itr.driver.Value(&itr.row); v != nil {
if v, ok := castToString(v); ok {
itr.point.Value = v
itr.point.Nil = false
} else {
itr.point.Value = ""
itr.point.Nil = true
}
} else {
itr.point.Value = ""
itr.point.Nil = true
}
}
for i, f := range itr.fields {
itr.point.Aux[i] = f.Value(&itr.row)
}
return &itr.point, nil
}
func (itr *stringIteratorMapper) Stats() IteratorStats {
return itr.cur.Stats()
}
func (itr *stringIteratorMapper) Close() error {
return itr.cur.Close()
}
type stringFilterIterator struct {
input StringIterator
cond influxql.Expr
opt IteratorOptions
m map[string]interface{}
}
func newStringFilterIterator(input StringIterator, cond influxql.Expr, opt IteratorOptions) StringIterator {
// Strip out time conditions from the WHERE clause.
// TODO(jsternberg): This should really be done for us when creating the IteratorOptions struct.
n := influxql.RewriteFunc(influxql.CloneExpr(cond), func(n influxql.Node) influxql.Node {
switch n := n.(type) {
case *influxql.BinaryExpr:
if n.LHS.String() == "time" {
return &influxql.BooleanLiteral{Val: true}
}
}
return n
})
cond, _ = n.(influxql.Expr)
if cond == nil {
return input
} else if n, ok := cond.(*influxql.BooleanLiteral); ok && n.Val {
return input
}
return &stringFilterIterator{
input: input,
cond: cond,
opt: opt,
m: make(map[string]interface{}),
}
}
func (itr *stringFilterIterator) Stats() IteratorStats { return itr.input.Stats() }
func (itr *stringFilterIterator) Close() error { return itr.input.Close() }
func (itr *stringFilterIterator) Next() (*StringPoint, error) {
for {
p, err := itr.input.Next()
if err != nil || p == nil {
return nil, err
}
for i, ref := range itr.opt.Aux {
itr.m[ref.Val] = p.Aux[i]
}
for k, v := range p.Tags.KeyValues() {
itr.m[k] = v
}
if !influxql.EvalBool(itr.cond, itr.m) {
continue
}
return p, nil
}
}
type stringTagSubsetIterator struct {
input StringIterator
point StringPoint
lastTags Tags
dimensions []string
}
func newStringTagSubsetIterator(input StringIterator, opt IteratorOptions) *stringTagSubsetIterator {
return &stringTagSubsetIterator{
input: input,
dimensions: opt.GetDimensions(),
}
}
func (itr *stringTagSubsetIterator) Next() (*StringPoint, error) {
p, err := itr.input.Next()
if err != nil {
return nil, err
} else if p == nil {
return nil, nil
}
itr.point.Name = p.Name
if !p.Tags.Equal(itr.lastTags) {
itr.point.Tags = p.Tags.Subset(itr.dimensions)
itr.lastTags = p.Tags
}
itr.point.Time = p.Time
itr.point.Value = p.Value
itr.point.Aux = p.Aux
itr.point.Aggregated = p.Aggregated
itr.point.Nil = p.Nil
return &itr.point, nil
}
func (itr *stringTagSubsetIterator) Stats() IteratorStats {
return itr.input.Stats()
}
func (itr *stringTagSubsetIterator) Close() error {
return itr.input.Close()
}
// newStringDedupeIterator returns a new instance of stringDedupeIterator.
func newStringDedupeIterator(input StringIterator) *stringDedupeIterator {
return &stringDedupeIterator{
input: input,
m: make(map[string]struct{}),
}
}
// Stats returns stats from the input iterator.
func (itr *stringDedupeIterator) Stats() IteratorStats { return itr.input.Stats() }
// Close closes the iterator and all child iterators.
func (itr *stringDedupeIterator) Close() error { return itr.input.Close() }
// Next returns the next unique point from the input iterator.
func (itr *stringDedupeIterator) Next() (*StringPoint, error) {
for {
// Read next point.
p, err := itr.input.Next()
if p == nil || err != nil {
return nil, err
}
// Serialize to bytes to store in lookup.
buf, err := proto.Marshal(encodeStringPoint(p))
if err != nil {
return nil, err
}
// If the point has already been output then move to the next point.
if _, ok := itr.m[string(buf)]; ok {
continue
}
// Otherwise mark it as emitted and return point.
itr.m[string(buf)] = struct{}{}
return p, nil
}
}
// stringReaderIterator represents an iterator that streams from a reader.
type stringReaderIterator struct {
r io.Reader
dec *StringPointDecoder
}
// newStringReaderIterator returns a new instance of stringReaderIterator.
func newStringReaderIterator(ctx context.Context, r io.Reader, stats IteratorStats) *stringReaderIterator {
dec := NewStringPointDecoder(ctx, r)
dec.stats = stats
return &stringReaderIterator{
r: r,
dec: dec,
}
}
// Stats returns stats about points processed.
func (itr *stringReaderIterator) Stats() IteratorStats { return itr.dec.stats }
// Close closes the underlying reader, if applicable.
func (itr *stringReaderIterator) Close() error {
if r, ok := itr.r.(io.ReadCloser); ok {
return r.Close()
}
return nil
}
// Next returns the next point from the iterator.
func (itr *stringReaderIterator) Next() (*StringPoint, error) {
// OPTIMIZE(benbjohnson): Reuse point on iterator.
// Unmarshal next point.
p := &StringPoint{}
if err := itr.dec.DecodeStringPoint(p); err == io.EOF {
return nil, nil
} else if err != nil {
return nil, err
}
return p, nil
}
// BooleanIterator represents a stream of boolean points.
type BooleanIterator interface {
Iterator
Next() (*BooleanPoint, error)
}
// newBooleanIterators converts a slice of Iterator to a slice of BooleanIterator.
// Drop and closes any iterator in itrs that is not a BooleanIterator and cannot
// be cast to a BooleanIterator.
func newBooleanIterators(itrs []Iterator) []BooleanIterator {
a := make([]BooleanIterator, 0, len(itrs))
for _, itr := range itrs {
switch itr := itr.(type) {
case BooleanIterator:
a = append(a, itr)
default:
itr.Close()
}
}
return a
}
// bufBooleanIterator represents a buffered BooleanIterator.
type bufBooleanIterator struct {
itr BooleanIterator
buf *BooleanPoint
}
// newBufBooleanIterator returns a buffered BooleanIterator.
func newBufBooleanIterator(itr BooleanIterator) *bufBooleanIterator {
return &bufBooleanIterator{itr: itr}
}
// Stats returns statistics from the input iterator.
func (itr *bufBooleanIterator) Stats() IteratorStats { return itr.itr.Stats() }
// Close closes the underlying iterator.
func (itr *bufBooleanIterator) Close() error { return itr.itr.Close() }
// peek returns the next point without removing it from the iterator.
func (itr *bufBooleanIterator) peek() (*BooleanPoint, error) {
p, err := itr.Next()
if err != nil {
return nil, err
}
itr.unread(p)
return p, nil
}
// peekTime returns the time of the next point.
// Returns zero time if no more points available.
func (itr *bufBooleanIterator) peekTime() (int64, error) {
p, err := itr.peek()
if p == nil || err != nil {
return ZeroTime, err
}
return p.Time, nil
}
// Next returns the current buffer, if exists, or calls the underlying iterator.
func (itr *bufBooleanIterator) Next() (*BooleanPoint, error) {
buf := itr.buf
if buf != nil {
itr.buf = nil
return buf, nil
}
return itr.itr.Next()
}
// NextInWindow returns the next value if it is between [startTime, endTime).
// If the next value is outside the range then it is moved to the buffer.
func (itr *bufBooleanIterator) NextInWindow(startTime, endTime int64) (*BooleanPoint, error) {
v, err := itr.Next()
if v == nil || err != nil {
return nil, err
} else if t := v.Time; t >= endTime || t < startTime {
itr.unread(v)
return nil, nil
}
return v, nil
}
// unread sets v to the buffer. It is read on the next call to Next().
func (itr *bufBooleanIterator) unread(v *BooleanPoint) { itr.buf = v }
// booleanMergeIterator represents an iterator that combines multiple boolean iterators.
type booleanMergeIterator struct {
inputs []BooleanIterator
heap *booleanMergeHeap
init bool
closed bool
mu sync.RWMutex
// Current iterator and window.
curr *booleanMergeHeapItem
window struct {
name string
tags string
startTime int64
endTime int64
}
}
// newBooleanMergeIterator returns a new instance of booleanMergeIterator.
func newBooleanMergeIterator(inputs []BooleanIterator, opt IteratorOptions) *booleanMergeIterator {
itr := &booleanMergeIterator{
inputs: inputs,
heap: &booleanMergeHeap{
items: make([]*booleanMergeHeapItem, 0, len(inputs)),
opt: opt,
},
}
// Initialize heap items.
for _, input := range inputs {
// Wrap in buffer, ignore any inputs without anymore points.
bufInput := newBufBooleanIterator(input)
// Append to the heap.
itr.heap.items = append(itr.heap.items, &booleanMergeHeapItem{itr: bufInput})
}
return itr
}
// Stats returns an aggregation of stats from the underlying iterators.
func (itr *booleanMergeIterator) Stats() IteratorStats {
var stats IteratorStats
for _, input := range itr.inputs {
stats.Add(input.Stats())
}
return stats
}
// Close closes the underlying iterators.
func (itr *booleanMergeIterator) Close() error {
itr.mu.Lock()
defer itr.mu.Unlock()
for _, input := range itr.inputs {
input.Close()
}
itr.curr = nil
itr.inputs = nil
itr.heap.items = nil
itr.closed = true
return nil
}
// Next returns the next point from the iterator.
func (itr *booleanMergeIterator) Next() (*BooleanPoint, error) {
itr.mu.RLock()
defer itr.mu.RUnlock()
if itr.closed {
return nil, nil
}
// Initialize the heap. This needs to be done lazily on the first call to this iterator
// so that iterator initialization done through the Select() call returns quickly.
// Queries can only be interrupted after the Select() call completes so any operations
// done during iterator creation cannot be interrupted, which is why we do it here
// instead so an interrupt can happen while initializing the heap.
if !itr.init {
items := itr.heap.items
itr.heap.items = make([]*booleanMergeHeapItem, 0, len(items))
for _, item := range items {
if p, err := item.itr.peek(); err != nil {
return nil, err
} else if p == nil {
continue
}
itr.heap.items = append(itr.heap.items, item)
}
heap.Init(itr.heap)
itr.init = true
}
for {
// Retrieve the next iterator if we don't have one.
if itr.curr == nil {
if len(itr.heap.items) == 0 {
return nil, nil
}
itr.curr = heap.Pop(itr.heap).(*booleanMergeHeapItem)
// Read point and set current window.
p, err := itr.curr.itr.Next()
if err != nil {
return nil, err
}
tags := p.Tags.Subset(itr.heap.opt.Dimensions)
itr.window.name, itr.window.tags = p.Name, tags.ID()
itr.window.startTime, itr.window.endTime = itr.heap.opt.Window(p.Time)
return p, nil
}
// Read the next point from the current iterator.
p, err := itr.curr.itr.Next()
if err != nil {
return nil, err
}
// If there are no more points then remove iterator from heap and find next.
if p == nil {
itr.curr = nil
continue
}
// Check if the point is inside of our current window.
inWindow := true
if window := itr.window; window.name != p.Name {
inWindow = false
} else if tags := p.Tags.Subset(itr.heap.opt.Dimensions); window.tags != tags.ID() {
inWindow = false
} else if opt := itr.heap.opt; opt.Ascending && p.Time >= window.endTime {
inWindow = false
} else if !opt.Ascending && p.Time < window.startTime {
inWindow = false
}
// If it's outside our window then push iterator back on the heap and find new iterator.
if !inWindow {
itr.curr.itr.unread(p)
heap.Push(itr.heap, itr.curr)
itr.curr = nil
continue
}
return p, nil
}
}
// booleanMergeHeap represents a heap of booleanMergeHeapItems.
// Items are sorted by their next window and then by name/tags.
type booleanMergeHeap struct {
opt IteratorOptions
items []*booleanMergeHeapItem
}
func (h *booleanMergeHeap) Len() int { return len(h.items) }
func (h *booleanMergeHeap) Swap(i, j int) { h.items[i], h.items[j] = h.items[j], h.items[i] }
func (h *booleanMergeHeap) Less(i, j int) bool {
x, err := h.items[i].itr.peek()
if err != nil {
return true
}
y, err := h.items[j].itr.peek()
if err != nil {
return false
}
if h.opt.Ascending {
if x.Name != y.Name {
return x.Name < y.Name
} else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); xTags.ID() != yTags.ID() {
return xTags.ID() < yTags.ID()
}
} else {
if x.Name != y.Name {
return x.Name > y.Name
} else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); xTags.ID() != yTags.ID() {
return xTags.ID() > yTags.ID()
}
}
xt, _ := h.opt.Window(x.Time)
yt, _ := h.opt.Window(y.Time)
if h.opt.Ascending {
return xt < yt
}
return xt > yt
}
func (h *booleanMergeHeap) Push(x interface{}) {
h.items = append(h.items, x.(*booleanMergeHeapItem))
}
func (h *booleanMergeHeap) Pop() interface{} {
old := h.items
n := len(old)
item := old[n-1]
h.items = old[0 : n-1]
return item
}
type booleanMergeHeapItem struct {
itr *bufBooleanIterator
}
// booleanSortedMergeIterator is an iterator that sorts and merges multiple iterators into one.
type booleanSortedMergeIterator struct {
inputs []BooleanIterator
heap *booleanSortedMergeHeap
init bool
}
// newBooleanSortedMergeIterator returns an instance of booleanSortedMergeIterator.
func newBooleanSortedMergeIterator(inputs []BooleanIterator, opt IteratorOptions) Iterator {
itr := &booleanSortedMergeIterator{
inputs: inputs,
heap: &booleanSortedMergeHeap{
items: make([]*booleanSortedMergeHeapItem, 0, len(inputs)),
opt: opt,
},
}
// Initialize heap items.
for _, input := range inputs {
// Append to the heap.
itr.heap.items = append(itr.heap.items, &booleanSortedMergeHeapItem{itr: input})
}
return itr
}
// Stats returns an aggregation of stats from the underlying iterators.
func (itr *booleanSortedMergeIterator) Stats() IteratorStats {
var stats IteratorStats
for _, input := range itr.inputs {
stats.Add(input.Stats())
}
return stats
}
// Close closes the underlying iterators.
func (itr *booleanSortedMergeIterator) Close() error {
for _, input := range itr.inputs {
input.Close()
}
return nil
}
// Next returns the next points from the iterator.
func (itr *booleanSortedMergeIterator) Next() (*BooleanPoint, error) { return itr.pop() }
// pop returns the next point from the heap.
// Reads the next point from item's cursor and puts it back on the heap.
func (itr *booleanSortedMergeIterator) pop() (*BooleanPoint, error) {
// Initialize the heap. See the MergeIterator to see why this has to be done lazily.
if !itr.init {
items := itr.heap.items
itr.heap.items = make([]*booleanSortedMergeHeapItem, 0, len(items))
for _, item := range items {
var err error
if item.point, err = item.itr.Next(); err != nil {
return nil, err
} else if item.point == nil {
continue
}
itr.heap.items = append(itr.heap.items, item)
}
heap.Init(itr.heap)
itr.init = true
}
if len(itr.heap.items) == 0 {
return nil, nil
}
// Read the next item from the heap.
item := heap.Pop(itr.heap).(*booleanSortedMergeHeapItem)
if item.err != nil {
return nil, item.err
} else if item.point == nil {
return nil, nil
}
// Copy the point for return.
p := item.point.Clone()
// Read the next item from the cursor. Push back to heap if one exists.
if item.point, item.err = item.itr.Next(); item.point != nil {
heap.Push(itr.heap, item)
}
return p, nil
}
// booleanSortedMergeHeap represents a heap of booleanSortedMergeHeapItems.
// Items are sorted with the following priority:
// - By their measurement name;
// - By their tag keys/values;
// - By time; or
// - By their Aux field values.
type booleanSortedMergeHeap struct {
opt IteratorOptions
items []*booleanSortedMergeHeapItem
}
func (h *booleanSortedMergeHeap) Len() int { return len(h.items) }
func (h *booleanSortedMergeHeap) Swap(i, j int) { h.items[i], h.items[j] = h.items[j], h.items[i] }
func (h *booleanSortedMergeHeap) Less(i, j int) bool {
x, y := h.items[i].point, h.items[j].point
if h.opt.Ascending {
if x.Name != y.Name {
return x.Name < y.Name
} else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); !xTags.Equals(&yTags) {
return xTags.ID() < yTags.ID()
}
if x.Time != y.Time {
return x.Time < y.Time
}
if len(x.Aux) > 0 && len(x.Aux) == len(y.Aux) {
for i := 0; i < len(x.Aux); i++ {
v1, ok1 := x.Aux[i].(string)
v2, ok2 := y.Aux[i].(string)
if !ok1 || !ok2 {
// Unsupported types used in Aux fields. Maybe they
// need to be added here?
return false
} else if v1 == v2 {
continue
}
return v1 < v2
}
}
return false // Times and/or Aux fields are equal.
}
if x.Name != y.Name {
return x.Name > y.Name
} else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); !xTags.Equals(&yTags) {
return xTags.ID() > yTags.ID()
}
if x.Time != y.Time {
return x.Time > y.Time
}
if len(x.Aux) > 0 && len(x.Aux) == len(y.Aux) {
for i := 0; i < len(x.Aux); i++ {
v1, ok1 := x.Aux[i].(string)
v2, ok2 := y.Aux[i].(string)
if !ok1 || !ok2 {
// Unsupported types used in Aux fields. Maybe they
// need to be added here?
return false
} else if v1 == v2 {
continue
}
return v1 > v2
}
}
return false // Times and/or Aux fields are equal.
}
func (h *booleanSortedMergeHeap) Push(x interface{}) {
h.items = append(h.items, x.(*booleanSortedMergeHeapItem))
}
func (h *booleanSortedMergeHeap) Pop() interface{} {
old := h.items
n := len(old)
item := old[n-1]
h.items = old[0 : n-1]
return item
}
type booleanSortedMergeHeapItem struct {
point *BooleanPoint
err error
itr BooleanIterator
}
// booleanIteratorScanner scans the results of a BooleanIterator into a map.
type booleanIteratorScanner struct {
input *bufBooleanIterator
err error
keys []influxql.VarRef
defaultValue interface{}
}
// newBooleanIteratorScanner creates a new IteratorScanner.
func newBooleanIteratorScanner(input BooleanIterator, keys []influxql.VarRef, defaultValue interface{}) *booleanIteratorScanner {
return &booleanIteratorScanner{
input: newBufBooleanIterator(input),
keys: keys,
defaultValue: defaultValue,
}
}
func (s *booleanIteratorScanner) Peek() (int64, string, Tags) {
if s.err != nil {
return ZeroTime, "", Tags{}
}
p, err := s.input.peek()
if err != nil {
s.err = err
return ZeroTime, "", Tags{}
} else if p == nil {
return ZeroTime, "", Tags{}
}
return p.Time, p.Name, p.Tags
}
func (s *booleanIteratorScanner) ScanAt(ts int64, name string, tags Tags, m map[string]interface{}) {
if s.err != nil {
return
}
p, err := s.input.Next()
if err != nil {
s.err = err
return
} else if p == nil {
s.useDefaults(m)
return
} else if p.Time != ts || p.Name != name || !p.Tags.Equals(&tags) {
s.useDefaults(m)
s.input.unread(p)
return
}
if k := s.keys[0]; k.Val != "" {
if p.Nil {
if s.defaultValue != SkipDefault {
m[k.Val] = castToType(s.defaultValue, k.Type)
}
} else {
m[k.Val] = p.Value
}
}
for i, v := range p.Aux {
k := s.keys[i+1]
switch v.(type) {
case float64, int64, uint64, string, bool:
m[k.Val] = v
default:
// Insert the fill value if one was specified.
if s.defaultValue != SkipDefault {
m[k.Val] = castToType(s.defaultValue, k.Type)
}
}
}
}
func (s *booleanIteratorScanner) useDefaults(m map[string]interface{}) {
if s.defaultValue == SkipDefault {
return
}
for _, k := range s.keys {
if k.Val == "" {
continue
}
m[k.Val] = castToType(s.defaultValue, k.Type)
}
}
func (s *booleanIteratorScanner) Stats() IteratorStats { return s.input.Stats() }
func (s *booleanIteratorScanner) Err() error { return s.err }
func (s *booleanIteratorScanner) Close() error { return s.input.Close() }
// booleanParallelIterator represents an iterator that pulls data in a separate goroutine.
type booleanParallelIterator struct {
input BooleanIterator
ch chan booleanPointError
once sync.Once
closing chan struct{}
wg sync.WaitGroup
}
// newBooleanParallelIterator returns a new instance of booleanParallelIterator.
func newBooleanParallelIterator(input BooleanIterator) *booleanParallelIterator {
itr := &booleanParallelIterator{
input: input,
ch: make(chan booleanPointError, 256),
closing: make(chan struct{}),
}
itr.wg.Add(1)
go itr.monitor()
return itr
}
// Stats returns stats from the underlying iterator.
func (itr *booleanParallelIterator) Stats() IteratorStats { return itr.input.Stats() }
// Close closes the underlying iterators.
func (itr *booleanParallelIterator) Close() error {
itr.once.Do(func() { close(itr.closing) })
itr.wg.Wait()
return itr.input.Close()
}
// Next returns the next point from the iterator.
func (itr *booleanParallelIterator) Next() (*BooleanPoint, error) {
v, ok := <-itr.ch
if !ok {
return nil, io.EOF
}
return v.point, v.err
}
// monitor runs in a separate goroutine and actively pulls the next point.
func (itr *booleanParallelIterator) monitor() {
defer close(itr.ch)
defer itr.wg.Done()
for {
// Read next point.
p, err := itr.input.Next()
if p != nil {
p = p.Clone()
}
select {
case <-itr.closing:
return
case itr.ch <- booleanPointError{point: p, err: err}:
}
}
}
type booleanPointError struct {
point *BooleanPoint
err error
}
// booleanLimitIterator represents an iterator that limits points per group.
type booleanLimitIterator struct {
input BooleanIterator
opt IteratorOptions
n int
prev struct {
name string
tags Tags
}
}
// newBooleanLimitIterator returns a new instance of booleanLimitIterator.
func newBooleanLimitIterator(input BooleanIterator, opt IteratorOptions) *booleanLimitIterator {
return &booleanLimitIterator{
input: input,
opt: opt,
}
}
// Stats returns stats from the underlying iterator.
func (itr *booleanLimitIterator) Stats() IteratorStats { return itr.input.Stats() }
// Close closes the underlying iterators.
func (itr *booleanLimitIterator) Close() error { return itr.input.Close() }
// Next returns the next point from the iterator.
func (itr *booleanLimitIterator) Next() (*BooleanPoint, error) {
for {
p, err := itr.input.Next()
if p == nil || err != nil {
return nil, err
}
// Reset window and counter if a new window is encountered.
if p.Name != itr.prev.name || !p.Tags.Equals(&itr.prev.tags) {
itr.prev.name = p.Name
itr.prev.tags = p.Tags
itr.n = 0
}
// Increment counter.
itr.n++
// Read next point if not beyond the offset.
if itr.n <= itr.opt.Offset {
continue
}
// Read next point if we're beyond the limit.
if itr.opt.Limit > 0 && (itr.n-itr.opt.Offset) > itr.opt.Limit {
continue
}
return p, nil
}
}
type booleanFillIterator struct {
input *bufBooleanIterator
prev BooleanPoint
startTime int64
endTime int64
auxFields []interface{}
init bool
opt IteratorOptions
window struct {
name string
tags Tags
time int64
offset int64
}
}
func newBooleanFillIterator(input BooleanIterator, expr influxql.Expr, opt IteratorOptions) *booleanFillIterator {
if opt.Fill == influxql.NullFill {
if expr, ok := expr.(*influxql.Call); ok && expr.Name == "count" {
opt.Fill = influxql.NumberFill
opt.FillValue = false
}
}
var startTime, endTime int64
if opt.Ascending {
startTime, _ = opt.Window(opt.StartTime)
endTime, _ = opt.Window(opt.EndTime)
} else {
startTime, _ = opt.Window(opt.EndTime)
endTime, _ = opt.Window(opt.StartTime)
}
var auxFields []interface{}
if len(opt.Aux) > 0 {
auxFields = make([]interface{}, len(opt.Aux))
}
return &booleanFillIterator{
input: newBufBooleanIterator(input),
prev: BooleanPoint{Nil: true},
startTime: startTime,
endTime: endTime,
auxFields: auxFields,
opt: opt,
}
}
func (itr *booleanFillIterator) Stats() IteratorStats { return itr.input.Stats() }
func (itr *booleanFillIterator) Close() error { return itr.input.Close() }
func (itr *booleanFillIterator) Next() (*BooleanPoint, error) {
if !itr.init {
p, err := itr.input.peek()
if p == nil || err != nil {
return nil, err
}
itr.window.name, itr.window.tags = p.Name, p.Tags
itr.window.time = itr.startTime
if itr.startTime == influxql.MinTime {
itr.window.time, _ = itr.opt.Window(p.Time)
}
if itr.opt.Location != nil {
_, itr.window.offset = itr.opt.Zone(itr.window.time)
}
itr.init = true
}
p, err := itr.input.Next()
if err != nil {
return nil, err
}
// Check if the next point is outside of our window or is nil.
if p == nil || p.Name != itr.window.name || p.Tags.ID() != itr.window.tags.ID() {
// If we are inside of an interval, unread the point and continue below to
// constructing a new point.
if itr.opt.Ascending && itr.window.time <= itr.endTime {
itr.input.unread(p)
p = nil
goto CONSTRUCT
} else if !itr.opt.Ascending && itr.window.time >= itr.endTime && itr.endTime != influxql.MinTime {
itr.input.unread(p)
p = nil
goto CONSTRUCT
}
// We are *not* in a current interval. If there is no next point,
// we are at the end of all intervals.
if p == nil {
return nil, nil
}
// Set the new interval.
itr.window.name, itr.window.tags = p.Name, p.Tags
itr.window.time = itr.startTime
if itr.window.time == influxql.MinTime {
itr.window.time, _ = itr.opt.Window(p.Time)
}
if itr.opt.Location != nil {
_, itr.window.offset = itr.opt.Zone(itr.window.time)
}
itr.prev = BooleanPoint{Nil: true}
}
// Check if the point is our next expected point.
CONSTRUCT:
if p == nil || (itr.opt.Ascending && p.Time > itr.window.time) || (!itr.opt.Ascending && p.Time < itr.window.time) {
if p != nil {
itr.input.unread(p)
}
p = &BooleanPoint{
Name: itr.window.name,
Tags: itr.window.tags,
Time: itr.window.time,
Aux: itr.auxFields,
}
switch itr.opt.Fill {
case influxql.LinearFill:
fallthrough
case influxql.NullFill:
p.Nil = true
case influxql.NumberFill:
p.Value, _ = castToBoolean(itr.opt.FillValue)
case influxql.PreviousFill:
if !itr.prev.Nil {
p.Value = itr.prev.Value
p.Nil = itr.prev.Nil
} else {
p.Nil = true
}
}
} else {
itr.prev = *p
}
// Advance the expected time. Do not advance to a new window here
// as there may be lingering points with the same timestamp in the previous
// window.
if itr.opt.Ascending {
itr.window.time += int64(itr.opt.Interval.Duration)
} else {
itr.window.time -= int64(itr.opt.Interval.Duration)
}
// Check to see if we have passed over an offset change and adjust the time
// to account for this new offset.
if itr.opt.Location != nil {
if _, offset := itr.opt.Zone(itr.window.time - 1); offset != itr.window.offset {
diff := itr.window.offset - offset
if abs(diff) < int64(itr.opt.Interval.Duration) {
itr.window.time += diff
}
itr.window.offset = offset
}
}
return p, nil
}
// booleanIntervalIterator represents a boolean implementation of IntervalIterator.
type booleanIntervalIterator struct {
input BooleanIterator
opt IteratorOptions
}
func newBooleanIntervalIterator(input BooleanIterator, opt IteratorOptions) *booleanIntervalIterator {
return &booleanIntervalIterator{input: input, opt: opt}
}
func (itr *booleanIntervalIterator) Stats() IteratorStats { return itr.input.Stats() }
func (itr *booleanIntervalIterator) Close() error { return itr.input.Close() }
func (itr *booleanIntervalIterator) Next() (*BooleanPoint, error) {
p, err := itr.input.Next()
if p == nil || err != nil {
return nil, err
}
p.Time, _ = itr.opt.Window(p.Time)
// If we see the minimum allowable time, set the time to zero so we don't
// break the default returned time for aggregate queries without times.
if p.Time == influxql.MinTime {
p.Time = 0
}
return p, nil
}
// booleanInterruptIterator represents a boolean implementation of InterruptIterator.
type booleanInterruptIterator struct {
input BooleanIterator
closing <-chan struct{}
count int
}
func newBooleanInterruptIterator(input BooleanIterator, closing <-chan struct{}) *booleanInterruptIterator {
return &booleanInterruptIterator{input: input, closing: closing}
}
func (itr *booleanInterruptIterator) Stats() IteratorStats { return itr.input.Stats() }
func (itr *booleanInterruptIterator) Close() error { return itr.input.Close() }
func (itr *booleanInterruptIterator) Next() (*BooleanPoint, error) {
// Only check if the channel is closed every N points. This
// intentionally checks on both 0 and N so that if the iterator
// has been interrupted before the first point is emitted it will
// not emit any points.
if itr.count&0xFF == 0xFF {
select {
case <-itr.closing:
return nil, itr.Close()
default:
// Reset iterator count to zero and fall through to emit the next point.
itr.count = 0
}
}
// Increment the counter for every point read.
itr.count++
return itr.input.Next()
}
// booleanCloseInterruptIterator represents a boolean implementation of CloseInterruptIterator.
type booleanCloseInterruptIterator struct {
input BooleanIterator
closing <-chan struct{}
done chan struct{}
once sync.Once
}
func newBooleanCloseInterruptIterator(input BooleanIterator, closing <-chan struct{}) *booleanCloseInterruptIterator {
itr := &booleanCloseInterruptIterator{
input: input,
closing: closing,
done: make(chan struct{}),
}
go itr.monitor()
return itr
}
func (itr *booleanCloseInterruptIterator) monitor() {
select {
case <-itr.closing:
itr.Close()
case <-itr.done:
}
}
func (itr *booleanCloseInterruptIterator) Stats() IteratorStats {
return itr.input.Stats()
}
func (itr *booleanCloseInterruptIterator) Close() error {
itr.once.Do(func() {
close(itr.done)
itr.input.Close()
})
return nil
}
func (itr *booleanCloseInterruptIterator) Next() (*BooleanPoint, error) {
p, err := itr.input.Next()
if err != nil {
// Check if the iterator was closed.
select {
case <-itr.done:
return nil, nil
default:
return nil, err
}
}
return p, nil
}
// booleanReduceFloatIterator executes a reducer for every interval and buffers the result.
type booleanReduceFloatIterator struct {
input *bufBooleanIterator
create func() (BooleanPointAggregator, FloatPointEmitter)
dims []string
opt IteratorOptions
points []FloatPoint
keepTags bool
}
func newBooleanReduceFloatIterator(input BooleanIterator, opt IteratorOptions, createFn func() (BooleanPointAggregator, FloatPointEmitter)) *booleanReduceFloatIterator {
return &booleanReduceFloatIterator{
input: newBufBooleanIterator(input),
create: createFn,
dims: opt.GetDimensions(),
opt: opt,
}
}
// Stats returns stats from the input iterator.
func (itr *booleanReduceFloatIterator) Stats() IteratorStats { return itr.input.Stats() }
// Close closes the iterator and all child iterators.
func (itr *booleanReduceFloatIterator) Close() error { return itr.input.Close() }
// Next returns the minimum value for the next available interval.
func (itr *booleanReduceFloatIterator) Next() (*FloatPoint, error) {
// Calculate next window if we have no more points.
if len(itr.points) == 0 {
var err error
itr.points, err = itr.reduce()
if len(itr.points) == 0 {
return nil, err
}
}
// Pop next point off the stack.
p := &itr.points[len(itr.points)-1]
itr.points = itr.points[:len(itr.points)-1]
return p, nil
}
// booleanReduceFloatPoint stores the reduced data for a name/tag combination.
type booleanReduceFloatPoint struct {
Name string
Tags Tags
Aggregator BooleanPointAggregator
Emitter FloatPointEmitter
}
// reduce executes fn once for every point in the next window.
// The previous value for the dimension is passed to fn.
func (itr *booleanReduceFloatIterator) reduce() ([]FloatPoint, error) {
// Calculate next window.
var (
startTime, endTime int64
window struct {
name string
tags string
}
)
for {
p, err := itr.input.Next()
if err != nil || p == nil {
return nil, err
} else if p.Nil {
continue
}
// Unread the point so it can be processed.
itr.input.unread(p)
startTime, endTime = itr.opt.Window(p.Time)
window.name, window.tags = p.Name, p.Tags.Subset(itr.opt.Dimensions).ID()
break
}
// Create points by tags.
m := make(map[string]*booleanReduceFloatPoint)
for {
// Read next point.
curr, err := itr.input.NextInWindow(startTime, endTime)
if err != nil {
return nil, err
} else if curr == nil {
break
} else if curr.Nil {
continue
} else if curr.Name != window.name {
itr.input.unread(curr)
break
}
// Ensure this point is within the same final window.
if curr.Name != window.name {
itr.input.unread(curr)
break
} else if tags := curr.Tags.Subset(itr.opt.Dimensions); tags.ID() != window.tags {
itr.input.unread(curr)
break
}
// Retrieve the tags on this point for this level of the query.
// This may be different than the bucket dimensions.
tags := curr.Tags.Subset(itr.dims)
id := tags.ID()
// Retrieve the aggregator for this name/tag combination or create one.
rp := m[id]
if rp == nil {
aggregator, emitter := itr.create()
rp = &booleanReduceFloatPoint{
Name: curr.Name,
Tags: tags,
Aggregator: aggregator,
Emitter: emitter,
}
m[id] = rp
}
rp.Aggregator.AggregateBoolean(curr)
}
keys := make([]string, 0, len(m))
for k := range m {
keys = append(keys, k)
}
// Reverse sort points by name & tag.
// This ensures a consistent order of output.
if len(keys) > 0 {
var sorted sort.Interface = sort.StringSlice(keys)
if itr.opt.Ascending {
sorted = sort.Reverse(sorted)
}
sort.Sort(sorted)
}
// Assume the points are already sorted until proven otherwise.
sortedByTime := true
// Emit the points for each name & tag combination.
a := make([]FloatPoint, 0, len(m))
for _, k := range keys {
rp := m[k]
points := rp.Emitter.Emit()
for i := len(points) - 1; i >= 0; i-- {
points[i].Name = rp.Name
if !itr.keepTags {
points[i].Tags = rp.Tags
}
// Set the points time to the interval time if the reducer didn't provide one.
if points[i].Time == ZeroTime {
points[i].Time = startTime
} else {
sortedByTime = false
}
a = append(a, points[i])
}
}
// Points may be out of order. Perform a stable sort by time if requested.
if !sortedByTime && itr.opt.Ordered {
var sorted sort.Interface = floatPointsByTime(a)
if itr.opt.Ascending {
sorted = sort.Reverse(sorted)
}
sort.Stable(sorted)
}
return a, nil
}
// booleanStreamFloatIterator streams inputs into the iterator and emits points gradually.
type booleanStreamFloatIterator struct {
input *bufBooleanIterator
create func() (BooleanPointAggregator, FloatPointEmitter)
dims []string
opt IteratorOptions
m map[string]*booleanReduceFloatPoint
points []FloatPoint
}
// newBooleanStreamFloatIterator returns a new instance of booleanStreamFloatIterator.
func newBooleanStreamFloatIterator(input BooleanIterator, createFn func() (BooleanPointAggregator, FloatPointEmitter), opt IteratorOptions) *booleanStreamFloatIterator {
return &booleanStreamFloatIterator{
input: newBufBooleanIterator(input),
create: createFn,
dims: opt.GetDimensions(),
opt: opt,
m: make(map[string]*booleanReduceFloatPoint),
}
}
// Stats returns stats from the input iterator.
func (itr *booleanStreamFloatIterator) Stats() IteratorStats { return itr.input.Stats() }
// Close closes the iterator and all child iterators.
func (itr *booleanStreamFloatIterator) Close() error { return itr.input.Close() }
// Next returns the next value for the stream iterator.
func (itr *booleanStreamFloatIterator) Next() (*FloatPoint, error) {
// Calculate next window if we have no more points.
if len(itr.points) == 0 {
var err error
itr.points, err = itr.reduce()
if len(itr.points) == 0 {
return nil, err
}
}
// Pop next point off the stack.
p := &itr.points[len(itr.points)-1]
itr.points = itr.points[:len(itr.points)-1]
return p, nil
}
// reduce creates and manages aggregators for every point from the input.
// After aggregating a point, it always tries to emit a value using the emitter.
func (itr *booleanStreamFloatIterator) reduce() ([]FloatPoint, error) {
// We have already read all of the input points.
if itr.m == nil {
return nil, nil
}
for {
// Read next point.
curr, err := itr.input.Next()
if err != nil {
return nil, err
} else if curr == nil {
// Close all of the aggregators to flush any remaining points to emit.
var points []FloatPoint
for _, rp := range itr.m {
if aggregator, ok := rp.Aggregator.(io.Closer); ok {
if err := aggregator.Close(); err != nil {
return nil, err
}
pts := rp.Emitter.Emit()
if len(pts) == 0 {
continue
}
for i := range pts {
pts[i].Name = rp.Name
pts[i].Tags = rp.Tags
}
points = append(points, pts...)
}
}
// Eliminate the aggregators and emitters.
itr.m = nil
return points, nil
} else if curr.Nil {
continue
}
tags := curr.Tags.Subset(itr.dims)
id := curr.Name
if len(tags.m) > 0 {
id += "\x00" + tags.ID()
}
// Retrieve the aggregator for this name/tag combination or create one.
rp := itr.m[id]
if rp == nil {
aggregator, emitter := itr.create()
rp = &booleanReduceFloatPoint{
Name: curr.Name,
Tags: tags,
Aggregator: aggregator,
Emitter: emitter,
}
itr.m[id] = rp
}
rp.Aggregator.AggregateBoolean(curr)
// Attempt to emit points from the aggregator.
points := rp.Emitter.Emit()
if len(points) == 0 {
continue
}
for i := range points {
points[i].Name = rp.Name
points[i].Tags = rp.Tags
}
return points, nil
}
}
// booleanReduceIntegerIterator executes a reducer for every interval and buffers the result.
type booleanReduceIntegerIterator struct {
input *bufBooleanIterator
create func() (BooleanPointAggregator, IntegerPointEmitter)
dims []string
opt IteratorOptions
points []IntegerPoint
keepTags bool
}
func newBooleanReduceIntegerIterator(input BooleanIterator, opt IteratorOptions, createFn func() (BooleanPointAggregator, IntegerPointEmitter)) *booleanReduceIntegerIterator {
return &booleanReduceIntegerIterator{
input: newBufBooleanIterator(input),
create: createFn,
dims: opt.GetDimensions(),
opt: opt,
}
}
// Stats returns stats from the input iterator.
func (itr *booleanReduceIntegerIterator) Stats() IteratorStats { return itr.input.Stats() }
// Close closes the iterator and all child iterators.
func (itr *booleanReduceIntegerIterator) Close() error { return itr.input.Close() }
// Next returns the minimum value for the next available interval.
func (itr *booleanReduceIntegerIterator) Next() (*IntegerPoint, error) {
// Calculate next window if we have no more points.
if len(itr.points) == 0 {
var err error
itr.points, err = itr.reduce()
if len(itr.points) == 0 {
return nil, err
}
}
// Pop next point off the stack.
p := &itr.points[len(itr.points)-1]
itr.points = itr.points[:len(itr.points)-1]
return p, nil
}
// booleanReduceIntegerPoint stores the reduced data for a name/tag combination.
type booleanReduceIntegerPoint struct {
Name string
Tags Tags
Aggregator BooleanPointAggregator
Emitter IntegerPointEmitter
}
// reduce executes fn once for every point in the next window.
// The previous value for the dimension is passed to fn.
func (itr *booleanReduceIntegerIterator) reduce() ([]IntegerPoint, error) {
// Calculate next window.
var (
startTime, endTime int64
window struct {
name string
tags string
}
)
for {
p, err := itr.input.Next()
if err != nil || p == nil {
return nil, err
} else if p.Nil {
continue
}
// Unread the point so it can be processed.
itr.input.unread(p)
startTime, endTime = itr.opt.Window(p.Time)
window.name, window.tags = p.Name, p.Tags.Subset(itr.opt.Dimensions).ID()
break
}
// Create points by tags.
m := make(map[string]*booleanReduceIntegerPoint)
for {
// Read next point.
curr, err := itr.input.NextInWindow(startTime, endTime)
if err != nil {
return nil, err
} else if curr == nil {
break
} else if curr.Nil {
continue
} else if curr.Name != window.name {
itr.input.unread(curr)
break
}
// Ensure this point is within the same final window.
if curr.Name != window.name {
itr.input.unread(curr)
break
} else if tags := curr.Tags.Subset(itr.opt.Dimensions); tags.ID() != window.tags {
itr.input.unread(curr)
break
}
// Retrieve the tags on this point for this level of the query.
// This may be different than the bucket dimensions.
tags := curr.Tags.Subset(itr.dims)
id := tags.ID()
// Retrieve the aggregator for this name/tag combination or create one.
rp := m[id]
if rp == nil {
aggregator, emitter := itr.create()
rp = &booleanReduceIntegerPoint{
Name: curr.Name,
Tags: tags,
Aggregator: aggregator,
Emitter: emitter,
}
m[id] = rp
}
rp.Aggregator.AggregateBoolean(curr)
}
keys := make([]string, 0, len(m))
for k := range m {
keys = append(keys, k)
}
// Reverse sort points by name & tag.
// This ensures a consistent order of output.
if len(keys) > 0 {
var sorted sort.Interface = sort.StringSlice(keys)
if itr.opt.Ascending {
sorted = sort.Reverse(sorted)
}
sort.Sort(sorted)
}
// Assume the points are already sorted until proven otherwise.
sortedByTime := true
// Emit the points for each name & tag combination.
a := make([]IntegerPoint, 0, len(m))
for _, k := range keys {
rp := m[k]
points := rp.Emitter.Emit()
for i := len(points) - 1; i >= 0; i-- {
points[i].Name = rp.Name
if !itr.keepTags {
points[i].Tags = rp.Tags
}
// Set the points time to the interval time if the reducer didn't provide one.
if points[i].Time == ZeroTime {
points[i].Time = startTime
} else {
sortedByTime = false
}
a = append(a, points[i])
}
}
// Points may be out of order. Perform a stable sort by time if requested.
if !sortedByTime && itr.opt.Ordered {
var sorted sort.Interface = integerPointsByTime(a)
if itr.opt.Ascending {
sorted = sort.Reverse(sorted)
}
sort.Stable(sorted)
}
return a, nil
}
// booleanStreamIntegerIterator streams inputs into the iterator and emits points gradually.
type booleanStreamIntegerIterator struct {
input *bufBooleanIterator
create func() (BooleanPointAggregator, IntegerPointEmitter)
dims []string
opt IteratorOptions
m map[string]*booleanReduceIntegerPoint
points []IntegerPoint
}
// newBooleanStreamIntegerIterator returns a new instance of booleanStreamIntegerIterator.
func newBooleanStreamIntegerIterator(input BooleanIterator, createFn func() (BooleanPointAggregator, IntegerPointEmitter), opt IteratorOptions) *booleanStreamIntegerIterator {
return &booleanStreamIntegerIterator{
input: newBufBooleanIterator(input),
create: createFn,
dims: opt.GetDimensions(),
opt: opt,
m: make(map[string]*booleanReduceIntegerPoint),
}
}
// Stats returns stats from the input iterator.
func (itr *booleanStreamIntegerIterator) Stats() IteratorStats { return itr.input.Stats() }
// Close closes the iterator and all child iterators.
func (itr *booleanStreamIntegerIterator) Close() error { return itr.input.Close() }
// Next returns the next value for the stream iterator.
func (itr *booleanStreamIntegerIterator) Next() (*IntegerPoint, error) {
// Calculate next window if we have no more points.
if len(itr.points) == 0 {
var err error
itr.points, err = itr.reduce()
if len(itr.points) == 0 {
return nil, err
}
}
// Pop next point off the stack.
p := &itr.points[len(itr.points)-1]
itr.points = itr.points[:len(itr.points)-1]
return p, nil
}
// reduce creates and manages aggregators for every point from the input.
// After aggregating a point, it always tries to emit a value using the emitter.
func (itr *booleanStreamIntegerIterator) reduce() ([]IntegerPoint, error) {
// We have already read all of the input points.
if itr.m == nil {
return nil, nil
}
for {
// Read next point.
curr, err := itr.input.Next()
if err != nil {
return nil, err
} else if curr == nil {
// Close all of the aggregators to flush any remaining points to emit.
var points []IntegerPoint
for _, rp := range itr.m {
if aggregator, ok := rp.Aggregator.(io.Closer); ok {
if err := aggregator.Close(); err != nil {
return nil, err
}
pts := rp.Emitter.Emit()
if len(pts) == 0 {
continue
}
for i := range pts {
pts[i].Name = rp.Name
pts[i].Tags = rp.Tags
}
points = append(points, pts...)
}
}
// Eliminate the aggregators and emitters.
itr.m = nil
return points, nil
} else if curr.Nil {
continue
}
tags := curr.Tags.Subset(itr.dims)
id := curr.Name
if len(tags.m) > 0 {
id += "\x00" + tags.ID()
}
// Retrieve the aggregator for this name/tag combination or create one.
rp := itr.m[id]
if rp == nil {
aggregator, emitter := itr.create()
rp = &booleanReduceIntegerPoint{
Name: curr.Name,
Tags: tags,
Aggregator: aggregator,
Emitter: emitter,
}
itr.m[id] = rp
}
rp.Aggregator.AggregateBoolean(curr)
// Attempt to emit points from the aggregator.
points := rp.Emitter.Emit()
if len(points) == 0 {
continue
}
for i := range points {
points[i].Name = rp.Name
points[i].Tags = rp.Tags
}
return points, nil
}
}
// booleanReduceUnsignedIterator executes a reducer for every interval and buffers the result.
type booleanReduceUnsignedIterator struct {
input *bufBooleanIterator
create func() (BooleanPointAggregator, UnsignedPointEmitter)
dims []string
opt IteratorOptions
points []UnsignedPoint
keepTags bool
}
func newBooleanReduceUnsignedIterator(input BooleanIterator, opt IteratorOptions, createFn func() (BooleanPointAggregator, UnsignedPointEmitter)) *booleanReduceUnsignedIterator {
return &booleanReduceUnsignedIterator{
input: newBufBooleanIterator(input),
create: createFn,
dims: opt.GetDimensions(),
opt: opt,
}
}
// Stats returns stats from the input iterator.
func (itr *booleanReduceUnsignedIterator) Stats() IteratorStats { return itr.input.Stats() }
// Close closes the iterator and all child iterators.
func (itr *booleanReduceUnsignedIterator) Close() error { return itr.input.Close() }
// Next returns the minimum value for the next available interval.
func (itr *booleanReduceUnsignedIterator) Next() (*UnsignedPoint, error) {
// Calculate next window if we have no more points.
if len(itr.points) == 0 {
var err error
itr.points, err = itr.reduce()
if len(itr.points) == 0 {
return nil, err
}
}
// Pop next point off the stack.
p := &itr.points[len(itr.points)-1]
itr.points = itr.points[:len(itr.points)-1]
return p, nil
}
// booleanReduceUnsignedPoint stores the reduced data for a name/tag combination.
type booleanReduceUnsignedPoint struct {
Name string
Tags Tags
Aggregator BooleanPointAggregator
Emitter UnsignedPointEmitter
}
// reduce executes fn once for every point in the next window.
// The previous value for the dimension is passed to fn.
func (itr *booleanReduceUnsignedIterator) reduce() ([]UnsignedPoint, error) {
// Calculate next window.
var (
startTime, endTime int64
window struct {
name string
tags string
}
)
for {
p, err := itr.input.Next()
if err != nil || p == nil {
return nil, err
} else if p.Nil {
continue
}
// Unread the point so it can be processed.
itr.input.unread(p)
startTime, endTime = itr.opt.Window(p.Time)
window.name, window.tags = p.Name, p.Tags.Subset(itr.opt.Dimensions).ID()
break
}
// Create points by tags.
m := make(map[string]*booleanReduceUnsignedPoint)
for {
// Read next point.
curr, err := itr.input.NextInWindow(startTime, endTime)
if err != nil {
return nil, err
} else if curr == nil {
break
} else if curr.Nil {
continue
} else if curr.Name != window.name {
itr.input.unread(curr)
break
}
// Ensure this point is within the same final window.
if curr.Name != window.name {
itr.input.unread(curr)
break
} else if tags := curr.Tags.Subset(itr.opt.Dimensions); tags.ID() != window.tags {
itr.input.unread(curr)
break
}
// Retrieve the tags on this point for this level of the query.
// This may be different than the bucket dimensions.
tags := curr.Tags.Subset(itr.dims)
id := tags.ID()
// Retrieve the aggregator for this name/tag combination or create one.
rp := m[id]
if rp == nil {
aggregator, emitter := itr.create()
rp = &booleanReduceUnsignedPoint{
Name: curr.Name,
Tags: tags,
Aggregator: aggregator,
Emitter: emitter,
}
m[id] = rp
}
rp.Aggregator.AggregateBoolean(curr)
}
keys := make([]string, 0, len(m))
for k := range m {
keys = append(keys, k)
}
// Reverse sort points by name & tag.
// This ensures a consistent order of output.
if len(keys) > 0 {
var sorted sort.Interface = sort.StringSlice(keys)
if itr.opt.Ascending {
sorted = sort.Reverse(sorted)
}
sort.Sort(sorted)
}
// Assume the points are already sorted until proven otherwise.
sortedByTime := true
// Emit the points for each name & tag combination.
a := make([]UnsignedPoint, 0, len(m))
for _, k := range keys {
rp := m[k]
points := rp.Emitter.Emit()
for i := len(points) - 1; i >= 0; i-- {
points[i].Name = rp.Name
if !itr.keepTags {
points[i].Tags = rp.Tags
}
// Set the points time to the interval time if the reducer didn't provide one.
if points[i].Time == ZeroTime {
points[i].Time = startTime
} else {
sortedByTime = false
}
a = append(a, points[i])
}
}
// Points may be out of order. Perform a stable sort by time if requested.
if !sortedByTime && itr.opt.Ordered {
var sorted sort.Interface = unsignedPointsByTime(a)
if itr.opt.Ascending {
sorted = sort.Reverse(sorted)
}
sort.Stable(sorted)
}
return a, nil
}
// booleanStreamUnsignedIterator streams inputs into the iterator and emits points gradually.
type booleanStreamUnsignedIterator struct {
input *bufBooleanIterator
create func() (BooleanPointAggregator, UnsignedPointEmitter)
dims []string
opt IteratorOptions
m map[string]*booleanReduceUnsignedPoint
points []UnsignedPoint
}
// newBooleanStreamUnsignedIterator returns a new instance of booleanStreamUnsignedIterator.
func newBooleanStreamUnsignedIterator(input BooleanIterator, createFn func() (BooleanPointAggregator, UnsignedPointEmitter), opt IteratorOptions) *booleanStreamUnsignedIterator {
return &booleanStreamUnsignedIterator{
input: newBufBooleanIterator(input),
create: createFn,
dims: opt.GetDimensions(),
opt: opt,
m: make(map[string]*booleanReduceUnsignedPoint),
}
}
// Stats returns stats from the input iterator.
func (itr *booleanStreamUnsignedIterator) Stats() IteratorStats { return itr.input.Stats() }
// Close closes the iterator and all child iterators.
func (itr *booleanStreamUnsignedIterator) Close() error { return itr.input.Close() }
// Next returns the next value for the stream iterator.
func (itr *booleanStreamUnsignedIterator) Next() (*UnsignedPoint, error) {
// Calculate next window if we have no more points.
if len(itr.points) == 0 {
var err error
itr.points, err = itr.reduce()
if len(itr.points) == 0 {
return nil, err
}
}
// Pop next point off the stack.
p := &itr.points[len(itr.points)-1]
itr.points = itr.points[:len(itr.points)-1]
return p, nil
}
// reduce creates and manages aggregators for every point from the input.
// After aggregating a point, it always tries to emit a value using the emitter.
func (itr *booleanStreamUnsignedIterator) reduce() ([]UnsignedPoint, error) {
// We have already read all of the input points.
if itr.m == nil {
return nil, nil
}
for {
// Read next point.
curr, err := itr.input.Next()
if err != nil {
return nil, err
} else if curr == nil {
// Close all of the aggregators to flush any remaining points to emit.
var points []UnsignedPoint
for _, rp := range itr.m {
if aggregator, ok := rp.Aggregator.(io.Closer); ok {
if err := aggregator.Close(); err != nil {
return nil, err
}
pts := rp.Emitter.Emit()
if len(pts) == 0 {
continue
}
for i := range pts {
pts[i].Name = rp.Name
pts[i].Tags = rp.Tags
}
points = append(points, pts...)
}
}
// Eliminate the aggregators and emitters.
itr.m = nil
return points, nil
} else if curr.Nil {
continue
}
tags := curr.Tags.Subset(itr.dims)
id := curr.Name
if len(tags.m) > 0 {
id += "\x00" + tags.ID()
}
// Retrieve the aggregator for this name/tag combination or create one.
rp := itr.m[id]
if rp == nil {
aggregator, emitter := itr.create()
rp = &booleanReduceUnsignedPoint{
Name: curr.Name,
Tags: tags,
Aggregator: aggregator,
Emitter: emitter,
}
itr.m[id] = rp
}
rp.Aggregator.AggregateBoolean(curr)
// Attempt to emit points from the aggregator.
points := rp.Emitter.Emit()
if len(points) == 0 {
continue
}
for i := range points {
points[i].Name = rp.Name
points[i].Tags = rp.Tags
}
return points, nil
}
}
// booleanReduceStringIterator executes a reducer for every interval and buffers the result.
type booleanReduceStringIterator struct {
input *bufBooleanIterator
create func() (BooleanPointAggregator, StringPointEmitter)
dims []string
opt IteratorOptions
points []StringPoint
keepTags bool
}
func newBooleanReduceStringIterator(input BooleanIterator, opt IteratorOptions, createFn func() (BooleanPointAggregator, StringPointEmitter)) *booleanReduceStringIterator {
return &booleanReduceStringIterator{
input: newBufBooleanIterator(input),
create: createFn,
dims: opt.GetDimensions(),
opt: opt,
}
}
// Stats returns stats from the input iterator.
func (itr *booleanReduceStringIterator) Stats() IteratorStats { return itr.input.Stats() }
// Close closes the iterator and all child iterators.
func (itr *booleanReduceStringIterator) Close() error { return itr.input.Close() }
// Next returns the minimum value for the next available interval.
func (itr *booleanReduceStringIterator) Next() (*StringPoint, error) {
// Calculate next window if we have no more points.
if len(itr.points) == 0 {
var err error
itr.points, err = itr.reduce()
if len(itr.points) == 0 {
return nil, err
}
}
// Pop next point off the stack.
p := &itr.points[len(itr.points)-1]
itr.points = itr.points[:len(itr.points)-1]
return p, nil
}
// booleanReduceStringPoint stores the reduced data for a name/tag combination.
type booleanReduceStringPoint struct {
Name string
Tags Tags
Aggregator BooleanPointAggregator
Emitter StringPointEmitter
}
// reduce executes fn once for every point in the next window.
// The previous value for the dimension is passed to fn.
func (itr *booleanReduceStringIterator) reduce() ([]StringPoint, error) {
// Calculate next window.
var (
startTime, endTime int64
window struct {
name string
tags string
}
)
for {
p, err := itr.input.Next()
if err != nil || p == nil {
return nil, err
} else if p.Nil {
continue
}
// Unread the point so it can be processed.
itr.input.unread(p)
startTime, endTime = itr.opt.Window(p.Time)
window.name, window.tags = p.Name, p.Tags.Subset(itr.opt.Dimensions).ID()
break
}
// Create points by tags.
m := make(map[string]*booleanReduceStringPoint)
for {
// Read next point.
curr, err := itr.input.NextInWindow(startTime, endTime)
if err != nil {
return nil, err
} else if curr == nil {
break
} else if curr.Nil {
continue
} else if curr.Name != window.name {
itr.input.unread(curr)
break
}
// Ensure this point is within the same final window.
if curr.Name != window.name {
itr.input.unread(curr)
break
} else if tags := curr.Tags.Subset(itr.opt.Dimensions); tags.ID() != window.tags {
itr.input.unread(curr)
break
}
// Retrieve the tags on this point for this level of the query.
// This may be different than the bucket dimensions.
tags := curr.Tags.Subset(itr.dims)
id := tags.ID()
// Retrieve the aggregator for this name/tag combination or create one.
rp := m[id]
if rp == nil {
aggregator, emitter := itr.create()
rp = &booleanReduceStringPoint{
Name: curr.Name,
Tags: tags,
Aggregator: aggregator,
Emitter: emitter,
}
m[id] = rp
}
rp.Aggregator.AggregateBoolean(curr)
}
keys := make([]string, 0, len(m))
for k := range m {
keys = append(keys, k)
}
// Reverse sort points by name & tag.
// This ensures a consistent order of output.
if len(keys) > 0 {
var sorted sort.Interface = sort.StringSlice(keys)
if itr.opt.Ascending {
sorted = sort.Reverse(sorted)
}
sort.Sort(sorted)
}
// Assume the points are already sorted until proven otherwise.
sortedByTime := true
// Emit the points for each name & tag combination.
a := make([]StringPoint, 0, len(m))
for _, k := range keys {
rp := m[k]
points := rp.Emitter.Emit()
for i := len(points) - 1; i >= 0; i-- {
points[i].Name = rp.Name
if !itr.keepTags {
points[i].Tags = rp.Tags
}
// Set the points time to the interval time if the reducer didn't provide one.
if points[i].Time == ZeroTime {
points[i].Time = startTime
} else {
sortedByTime = false
}
a = append(a, points[i])
}
}
// Points may be out of order. Perform a stable sort by time if requested.
if !sortedByTime && itr.opt.Ordered {
var sorted sort.Interface = stringPointsByTime(a)
if itr.opt.Ascending {
sorted = sort.Reverse(sorted)
}
sort.Stable(sorted)
}
return a, nil
}
// booleanStreamStringIterator streams inputs into the iterator and emits points gradually.
type booleanStreamStringIterator struct {
input *bufBooleanIterator
create func() (BooleanPointAggregator, StringPointEmitter)
dims []string
opt IteratorOptions
m map[string]*booleanReduceStringPoint
points []StringPoint
}
// newBooleanStreamStringIterator returns a new instance of booleanStreamStringIterator.
func newBooleanStreamStringIterator(input BooleanIterator, createFn func() (BooleanPointAggregator, StringPointEmitter), opt IteratorOptions) *booleanStreamStringIterator {
return &booleanStreamStringIterator{
input: newBufBooleanIterator(input),
create: createFn,
dims: opt.GetDimensions(),
opt: opt,
m: make(map[string]*booleanReduceStringPoint),
}
}
// Stats returns stats from the input iterator.
func (itr *booleanStreamStringIterator) Stats() IteratorStats { return itr.input.Stats() }
// Close closes the iterator and all child iterators.
func (itr *booleanStreamStringIterator) Close() error { return itr.input.Close() }
// Next returns the next value for the stream iterator.
func (itr *booleanStreamStringIterator) Next() (*StringPoint, error) {
// Calculate next window if we have no more points.
if len(itr.points) == 0 {
var err error
itr.points, err = itr.reduce()
if len(itr.points) == 0 {
return nil, err
}
}
// Pop next point off the stack.
p := &itr.points[len(itr.points)-1]
itr.points = itr.points[:len(itr.points)-1]
return p, nil
}
// reduce creates and manages aggregators for every point from the input.
// After aggregating a point, it always tries to emit a value using the emitter.
func (itr *booleanStreamStringIterator) reduce() ([]StringPoint, error) {
// We have already read all of the input points.
if itr.m == nil {
return nil, nil
}
for {
// Read next point.
curr, err := itr.input.Next()
if err != nil {
return nil, err
} else if curr == nil {
// Close all of the aggregators to flush any remaining points to emit.
var points []StringPoint
for _, rp := range itr.m {
if aggregator, ok := rp.Aggregator.(io.Closer); ok {
if err := aggregator.Close(); err != nil {
return nil, err
}
pts := rp.Emitter.Emit()
if len(pts) == 0 {
continue
}
for i := range pts {
pts[i].Name = rp.Name
pts[i].Tags = rp.Tags
}
points = append(points, pts...)
}
}
// Eliminate the aggregators and emitters.
itr.m = nil
return points, nil
} else if curr.Nil {
continue
}
tags := curr.Tags.Subset(itr.dims)
id := curr.Name
if len(tags.m) > 0 {
id += "\x00" + tags.ID()
}
// Retrieve the aggregator for this name/tag combination or create one.
rp := itr.m[id]
if rp == nil {
aggregator, emitter := itr.create()
rp = &booleanReduceStringPoint{
Name: curr.Name,
Tags: tags,
Aggregator: aggregator,
Emitter: emitter,
}
itr.m[id] = rp
}
rp.Aggregator.AggregateBoolean(curr)
// Attempt to emit points from the aggregator.
points := rp.Emitter.Emit()
if len(points) == 0 {
continue
}
for i := range points {
points[i].Name = rp.Name
points[i].Tags = rp.Tags
}
return points, nil
}
}
// booleanReduceBooleanIterator executes a reducer for every interval and buffers the result.
type booleanReduceBooleanIterator struct {
input *bufBooleanIterator
create func() (BooleanPointAggregator, BooleanPointEmitter)
dims []string
opt IteratorOptions
points []BooleanPoint
keepTags bool
}
func newBooleanReduceBooleanIterator(input BooleanIterator, opt IteratorOptions, createFn func() (BooleanPointAggregator, BooleanPointEmitter)) *booleanReduceBooleanIterator {
return &booleanReduceBooleanIterator{
input: newBufBooleanIterator(input),
create: createFn,
dims: opt.GetDimensions(),
opt: opt,
}
}
// Stats returns stats from the input iterator.
func (itr *booleanReduceBooleanIterator) Stats() IteratorStats { return itr.input.Stats() }
// Close closes the iterator and all child iterators.
func (itr *booleanReduceBooleanIterator) Close() error { return itr.input.Close() }
// Next returns the minimum value for the next available interval.
func (itr *booleanReduceBooleanIterator) Next() (*BooleanPoint, error) {
// Calculate next window if we have no more points.
if len(itr.points) == 0 {
var err error
itr.points, err = itr.reduce()
if len(itr.points) == 0 {
return nil, err
}
}
// Pop next point off the stack.
p := &itr.points[len(itr.points)-1]
itr.points = itr.points[:len(itr.points)-1]
return p, nil
}
// booleanReduceBooleanPoint stores the reduced data for a name/tag combination.
type booleanReduceBooleanPoint struct {
Name string
Tags Tags
Aggregator BooleanPointAggregator
Emitter BooleanPointEmitter
}
// reduce executes fn once for every point in the next window.
// The previous value for the dimension is passed to fn.
func (itr *booleanReduceBooleanIterator) reduce() ([]BooleanPoint, error) {
// Calculate next window.
var (
startTime, endTime int64
window struct {
name string
tags string
}
)
for {
p, err := itr.input.Next()
if err != nil || p == nil {
return nil, err
} else if p.Nil {
continue
}
// Unread the point so it can be processed.
itr.input.unread(p)
startTime, endTime = itr.opt.Window(p.Time)
window.name, window.tags = p.Name, p.Tags.Subset(itr.opt.Dimensions).ID()
break
}
// Create points by tags.
m := make(map[string]*booleanReduceBooleanPoint)
for {
// Read next point.
curr, err := itr.input.NextInWindow(startTime, endTime)
if err != nil {
return nil, err
} else if curr == nil {
break
} else if curr.Nil {
continue
} else if curr.Name != window.name {
itr.input.unread(curr)
break
}
// Ensure this point is within the same final window.
if curr.Name != window.name {
itr.input.unread(curr)
break
} else if tags := curr.Tags.Subset(itr.opt.Dimensions); tags.ID() != window.tags {
itr.input.unread(curr)
break
}
// Retrieve the tags on this point for this level of the query.
// This may be different than the bucket dimensions.
tags := curr.Tags.Subset(itr.dims)
id := tags.ID()
// Retrieve the aggregator for this name/tag combination or create one.
rp := m[id]
if rp == nil {
aggregator, emitter := itr.create()
rp = &booleanReduceBooleanPoint{
Name: curr.Name,
Tags: tags,
Aggregator: aggregator,
Emitter: emitter,
}
m[id] = rp
}
rp.Aggregator.AggregateBoolean(curr)
}
keys := make([]string, 0, len(m))
for k := range m {
keys = append(keys, k)
}
// Reverse sort points by name & tag.
// This ensures a consistent order of output.
if len(keys) > 0 {
var sorted sort.Interface = sort.StringSlice(keys)
if itr.opt.Ascending {
sorted = sort.Reverse(sorted)
}
sort.Sort(sorted)
}
// Assume the points are already sorted until proven otherwise.
sortedByTime := true
// Emit the points for each name & tag combination.
a := make([]BooleanPoint, 0, len(m))
for _, k := range keys {
rp := m[k]
points := rp.Emitter.Emit()
for i := len(points) - 1; i >= 0; i-- {
points[i].Name = rp.Name
if !itr.keepTags {
points[i].Tags = rp.Tags
}
// Set the points time to the interval time if the reducer didn't provide one.
if points[i].Time == ZeroTime {
points[i].Time = startTime
} else {
sortedByTime = false
}
a = append(a, points[i])
}
}
// Points may be out of order. Perform a stable sort by time if requested.
if !sortedByTime && itr.opt.Ordered {
var sorted sort.Interface = booleanPointsByTime(a)
if itr.opt.Ascending {
sorted = sort.Reverse(sorted)
}
sort.Stable(sorted)
}
return a, nil
}
// booleanStreamBooleanIterator streams inputs into the iterator and emits points gradually.
type booleanStreamBooleanIterator struct {
input *bufBooleanIterator
create func() (BooleanPointAggregator, BooleanPointEmitter)
dims []string
opt IteratorOptions
m map[string]*booleanReduceBooleanPoint
points []BooleanPoint
}
// newBooleanStreamBooleanIterator returns a new instance of booleanStreamBooleanIterator.
func newBooleanStreamBooleanIterator(input BooleanIterator, createFn func() (BooleanPointAggregator, BooleanPointEmitter), opt IteratorOptions) *booleanStreamBooleanIterator {
return &booleanStreamBooleanIterator{
input: newBufBooleanIterator(input),
create: createFn,
dims: opt.GetDimensions(),
opt: opt,
m: make(map[string]*booleanReduceBooleanPoint),
}
}
// Stats returns stats from the input iterator.
func (itr *booleanStreamBooleanIterator) Stats() IteratorStats { return itr.input.Stats() }
// Close closes the iterator and all child iterators.
func (itr *booleanStreamBooleanIterator) Close() error { return itr.input.Close() }
// Next returns the next value for the stream iterator.
func (itr *booleanStreamBooleanIterator) Next() (*BooleanPoint, error) {
// Calculate next window if we have no more points.
if len(itr.points) == 0 {
var err error
itr.points, err = itr.reduce()
if len(itr.points) == 0 {
return nil, err
}
}
// Pop next point off the stack.
p := &itr.points[len(itr.points)-1]
itr.points = itr.points[:len(itr.points)-1]
return p, nil
}
// reduce creates and manages aggregators for every point from the input.
// After aggregating a point, it always tries to emit a value using the emitter.
func (itr *booleanStreamBooleanIterator) reduce() ([]BooleanPoint, error) {
// We have already read all of the input points.
if itr.m == nil {
return nil, nil
}
for {
// Read next point.
curr, err := itr.input.Next()
if err != nil {
return nil, err
} else if curr == nil {
// Close all of the aggregators to flush any remaining points to emit.
var points []BooleanPoint
for _, rp := range itr.m {
if aggregator, ok := rp.Aggregator.(io.Closer); ok {
if err := aggregator.Close(); err != nil {
return nil, err
}
pts := rp.Emitter.Emit()
if len(pts) == 0 {
continue
}
for i := range pts {
pts[i].Name = rp.Name
pts[i].Tags = rp.Tags
}
points = append(points, pts...)
}
}
// Eliminate the aggregators and emitters.
itr.m = nil
return points, nil
} else if curr.Nil {
continue
}
tags := curr.Tags.Subset(itr.dims)
id := curr.Name
if len(tags.m) > 0 {
id += "\x00" + tags.ID()
}
// Retrieve the aggregator for this name/tag combination or create one.
rp := itr.m[id]
if rp == nil {
aggregator, emitter := itr.create()
rp = &booleanReduceBooleanPoint{
Name: curr.Name,
Tags: tags,
Aggregator: aggregator,
Emitter: emitter,
}
itr.m[id] = rp
}
rp.Aggregator.AggregateBoolean(curr)
// Attempt to emit points from the aggregator.
points := rp.Emitter.Emit()
if len(points) == 0 {
continue
}
for i := range points {
points[i].Name = rp.Name
points[i].Tags = rp.Tags
}
return points, nil
}
}
// booleanDedupeIterator only outputs unique points.
// This differs from the DistinctIterator in that it compares all aux fields too.
// This iterator is relatively inefficient and should only be used on small
// datasets such as meta query results.
type booleanDedupeIterator struct {
input BooleanIterator
m map[string]struct{} // lookup of points already sent
}
type booleanIteratorMapper struct {
cur Cursor
row Row
driver IteratorMap // which iterator to use for the primary value, can be nil
fields []IteratorMap // which iterator to use for an aux field
point BooleanPoint
}
func newBooleanIteratorMapper(cur Cursor, driver IteratorMap, fields []IteratorMap, opt IteratorOptions) *booleanIteratorMapper {
return &booleanIteratorMapper{
cur: cur,
driver: driver,
fields: fields,
point: BooleanPoint{
Aux: make([]interface{}, len(fields)),
},
}
}
func (itr *booleanIteratorMapper) Next() (*BooleanPoint, error) {
if !itr.cur.Scan(&itr.row) {
if err := itr.cur.Err(); err != nil {
return nil, err
}
return nil, nil
}
itr.point.Time = itr.row.Time
itr.point.Name = itr.row.Series.Name
itr.point.Tags = itr.row.Series.Tags
if itr.driver != nil {
if v := itr.driver.Value(&itr.row); v != nil {
if v, ok := castToBoolean(v); ok {
itr.point.Value = v
itr.point.Nil = false
} else {
itr.point.Value = false
itr.point.Nil = true
}
} else {
itr.point.Value = false
itr.point.Nil = true
}
}
for i, f := range itr.fields {
itr.point.Aux[i] = f.Value(&itr.row)
}
return &itr.point, nil
}
func (itr *booleanIteratorMapper) Stats() IteratorStats {
return itr.cur.Stats()
}
func (itr *booleanIteratorMapper) Close() error {
return itr.cur.Close()
}
type booleanFilterIterator struct {
input BooleanIterator
cond influxql.Expr
opt IteratorOptions
m map[string]interface{}
}
func newBooleanFilterIterator(input BooleanIterator, cond influxql.Expr, opt IteratorOptions) BooleanIterator {
// Strip out time conditions from the WHERE clause.
// TODO(jsternberg): This should really be done for us when creating the IteratorOptions struct.
n := influxql.RewriteFunc(influxql.CloneExpr(cond), func(n influxql.Node) influxql.Node {
switch n := n.(type) {
case *influxql.BinaryExpr:
if n.LHS.String() == "time" {
return &influxql.BooleanLiteral{Val: true}
}
}
return n
})
cond, _ = n.(influxql.Expr)
if cond == nil {
return input
} else if n, ok := cond.(*influxql.BooleanLiteral); ok && n.Val {
return input
}
return &booleanFilterIterator{
input: input,
cond: cond,
opt: opt,
m: make(map[string]interface{}),
}
}
func (itr *booleanFilterIterator) Stats() IteratorStats { return itr.input.Stats() }
func (itr *booleanFilterIterator) Close() error { return itr.input.Close() }
func (itr *booleanFilterIterator) Next() (*BooleanPoint, error) {
for {
p, err := itr.input.Next()
if err != nil || p == nil {
return nil, err
}
for i, ref := range itr.opt.Aux {
itr.m[ref.Val] = p.Aux[i]
}
for k, v := range p.Tags.KeyValues() {
itr.m[k] = v
}
if !influxql.EvalBool(itr.cond, itr.m) {
continue
}
return p, nil
}
}
type booleanTagSubsetIterator struct {
input BooleanIterator
point BooleanPoint
lastTags Tags
dimensions []string
}
func newBooleanTagSubsetIterator(input BooleanIterator, opt IteratorOptions) *booleanTagSubsetIterator {
return &booleanTagSubsetIterator{
input: input,
dimensions: opt.GetDimensions(),
}
}
func (itr *booleanTagSubsetIterator) Next() (*BooleanPoint, error) {
p, err := itr.input.Next()
if err != nil {
return nil, err
} else if p == nil {
return nil, nil
}
itr.point.Name = p.Name
if !p.Tags.Equal(itr.lastTags) {
itr.point.Tags = p.Tags.Subset(itr.dimensions)
itr.lastTags = p.Tags
}
itr.point.Time = p.Time
itr.point.Value = p.Value
itr.point.Aux = p.Aux
itr.point.Aggregated = p.Aggregated
itr.point.Nil = p.Nil
return &itr.point, nil
}
func (itr *booleanTagSubsetIterator) Stats() IteratorStats {
return itr.input.Stats()
}
func (itr *booleanTagSubsetIterator) Close() error {
return itr.input.Close()
}
// newBooleanDedupeIterator returns a new instance of booleanDedupeIterator.
func newBooleanDedupeIterator(input BooleanIterator) *booleanDedupeIterator {
return &booleanDedupeIterator{
input: input,
m: make(map[string]struct{}),
}
}
// Stats returns stats from the input iterator.
func (itr *booleanDedupeIterator) Stats() IteratorStats { return itr.input.Stats() }
// Close closes the iterator and all child iterators.
func (itr *booleanDedupeIterator) Close() error { return itr.input.Close() }
// Next returns the next unique point from the input iterator.
func (itr *booleanDedupeIterator) Next() (*BooleanPoint, error) {
for {
// Read next point.
p, err := itr.input.Next()
if p == nil || err != nil {
return nil, err
}
// Serialize to bytes to store in lookup.
buf, err := proto.Marshal(encodeBooleanPoint(p))
if err != nil {
return nil, err
}
// If the point has already been output then move to the next point.
if _, ok := itr.m[string(buf)]; ok {
continue
}
// Otherwise mark it as emitted and return point.
itr.m[string(buf)] = struct{}{}
return p, nil
}
}
// booleanReaderIterator represents an iterator that streams from a reader.
type booleanReaderIterator struct {
r io.Reader
dec *BooleanPointDecoder
}
// newBooleanReaderIterator returns a new instance of booleanReaderIterator.
func newBooleanReaderIterator(ctx context.Context, r io.Reader, stats IteratorStats) *booleanReaderIterator {
dec := NewBooleanPointDecoder(ctx, r)
dec.stats = stats
return &booleanReaderIterator{
r: r,
dec: dec,
}
}
// Stats returns stats about points processed.
func (itr *booleanReaderIterator) Stats() IteratorStats { return itr.dec.stats }
// Close closes the underlying reader, if applicable.
func (itr *booleanReaderIterator) Close() error {
if r, ok := itr.r.(io.ReadCloser); ok {
return r.Close()
}
return nil
}
// Next returns the next point from the iterator.
func (itr *booleanReaderIterator) Next() (*BooleanPoint, error) {
// OPTIMIZE(benbjohnson): Reuse point on iterator.
// Unmarshal next point.
p := &BooleanPoint{}
if err := itr.dec.DecodeBooleanPoint(p); err == io.EOF {
return nil, nil
} else if err != nil {
return nil, err
}
return p, nil
}
// encodeFloatIterator encodes all points from itr to the underlying writer.
func (enc *IteratorEncoder) encodeFloatIterator(itr FloatIterator) error {
ticker := time.NewTicker(enc.StatsInterval)
defer ticker.Stop()
// Emit initial stats.
if err := enc.encodeStats(itr.Stats()); err != nil {
return err
}
// Continually stream points from the iterator into the encoder.
penc := NewFloatPointEncoder(enc.w)
for {
// Emit stats periodically.
select {
case <-ticker.C:
if err := enc.encodeStats(itr.Stats()); err != nil {
return err
}
default:
}
// Retrieve the next point from the iterator.
p, err := itr.Next()
if err != nil {
return err
} else if p == nil {
break
}
// Write the point to the point encoder.
if err := penc.EncodeFloatPoint(p); err != nil {
return err
}
}
// Emit final stats.
if err := enc.encodeStats(itr.Stats()); err != nil {
return err
}
return nil
}
// encodeIntegerIterator encodes all points from itr to the underlying writer.
func (enc *IteratorEncoder) encodeIntegerIterator(itr IntegerIterator) error {
ticker := time.NewTicker(enc.StatsInterval)
defer ticker.Stop()
// Emit initial stats.
if err := enc.encodeStats(itr.Stats()); err != nil {
return err
}
// Continually stream points from the iterator into the encoder.
penc := NewIntegerPointEncoder(enc.w)
for {
// Emit stats periodically.
select {
case <-ticker.C:
if err := enc.encodeStats(itr.Stats()); err != nil {
return err
}
default:
}
// Retrieve the next point from the iterator.
p, err := itr.Next()
if err != nil {
return err
} else if p == nil {
break
}
// Write the point to the point encoder.
if err := penc.EncodeIntegerPoint(p); err != nil {
return err
}
}
// Emit final stats.
if err := enc.encodeStats(itr.Stats()); err != nil {
return err
}
return nil
}
// encodeUnsignedIterator encodes all points from itr to the underlying writer.
func (enc *IteratorEncoder) encodeUnsignedIterator(itr UnsignedIterator) error {
ticker := time.NewTicker(enc.StatsInterval)
defer ticker.Stop()
// Emit initial stats.
if err := enc.encodeStats(itr.Stats()); err != nil {
return err
}
// Continually stream points from the iterator into the encoder.
penc := NewUnsignedPointEncoder(enc.w)
for {
// Emit stats periodically.
select {
case <-ticker.C:
if err := enc.encodeStats(itr.Stats()); err != nil {
return err
}
default:
}
// Retrieve the next point from the iterator.
p, err := itr.Next()
if err != nil {
return err
} else if p == nil {
break
}
// Write the point to the point encoder.
if err := penc.EncodeUnsignedPoint(p); err != nil {
return err
}
}
// Emit final stats.
if err := enc.encodeStats(itr.Stats()); err != nil {
return err
}
return nil
}
// encodeStringIterator encodes all points from itr to the underlying writer.
func (enc *IteratorEncoder) encodeStringIterator(itr StringIterator) error {
ticker := time.NewTicker(enc.StatsInterval)
defer ticker.Stop()
// Emit initial stats.
if err := enc.encodeStats(itr.Stats()); err != nil {
return err
}
// Continually stream points from the iterator into the encoder.
penc := NewStringPointEncoder(enc.w)
for {
// Emit stats periodically.
select {
case <-ticker.C:
if err := enc.encodeStats(itr.Stats()); err != nil {
return err
}
default:
}
// Retrieve the next point from the iterator.
p, err := itr.Next()
if err != nil {
return err
} else if p == nil {
break
}
// Write the point to the point encoder.
if err := penc.EncodeStringPoint(p); err != nil {
return err
}
}
// Emit final stats.
if err := enc.encodeStats(itr.Stats()); err != nil {
return err
}
return nil
}
// encodeBooleanIterator encodes all points from itr to the underlying writer.
func (enc *IteratorEncoder) encodeBooleanIterator(itr BooleanIterator) error {
ticker := time.NewTicker(enc.StatsInterval)
defer ticker.Stop()
// Emit initial stats.
if err := enc.encodeStats(itr.Stats()); err != nil {
return err
}
// Continually stream points from the iterator into the encoder.
penc := NewBooleanPointEncoder(enc.w)
for {
// Emit stats periodically.
select {
case <-ticker.C:
if err := enc.encodeStats(itr.Stats()); err != nil {
return err
}
default:
}
// Retrieve the next point from the iterator.
p, err := itr.Next()
if err != nil {
return err
} else if p == nil {
break
}
// Write the point to the point encoder.
if err := penc.EncodeBooleanPoint(p); err != nil {
return err
}
}
// Emit final stats.
if err := enc.encodeStats(itr.Stats()); err != nil {
return err
}
return nil
}