2016-05-11 16:32:56 +00:00
package coordinator_test
2016-02-19 20:38:02 +00:00
import (
"bytes"
2017-10-11 14:08:31 +00:00
"context"
2016-03-23 15:05:38 +00:00
"errors"
2018-04-18 00:20:15 +00:00
"fmt"
2016-02-19 20:38:02 +00:00
"io"
"os"
"reflect"
2016-11-23 20:32:42 +00:00
"regexp"
2016-02-19 20:38:02 +00:00
"testing"
"time"
"github.com/davecgh/go-spew/spew"
2016-05-11 16:32:56 +00:00
"github.com/influxdata/influxdb/coordinator"
2016-10-27 22:17:08 +00:00
"github.com/influxdata/influxdb/internal"
2017-10-27 17:27:01 +00:00
"github.com/influxdata/influxdb/logger"
2016-02-19 20:38:02 +00:00
"github.com/influxdata/influxdb/models"
2017-08-15 19:24:22 +00:00
"github.com/influxdata/influxdb/query"
2016-02-19 20:38:02 +00:00
"github.com/influxdata/influxdb/services/meta"
2016-06-06 19:53:54 +00:00
"github.com/influxdata/influxdb/tsdb"
2017-10-30 21:40:26 +00:00
"github.com/influxdata/influxql"
2016-02-19 20:38:02 +00:00
)
const (
// DefaultDatabase is the default database name used in tests.
DefaultDatabase = "db0"
// DefaultRetentionPolicy is the default retention policy name used in tests.
DefaultRetentionPolicy = "rp0"
)
// Ensure query executor can execute a simple SELECT statement.
func TestQueryExecutor_ExecuteQuery_SelectStatement ( t * testing . T ) {
e := DefaultQueryExecutor ( )
// The meta client should return a single shard owned by the local node.
2016-11-23 20:32:42 +00:00
e . MetaClient . ShardGroupsByTimeRangeFn = func ( database , policy string , min , max time . Time ) ( a [ ] meta . ShardGroupInfo , err error ) {
return [ ] meta . ShardGroupInfo {
{ ID : 1 , Shards : [ ] meta . ShardInfo {
{ ID : 100 , Owners : [ ] meta . ShardOwner { { NodeID : 0 } } } ,
} } ,
} , nil
2016-02-19 20:38:02 +00:00
}
// The TSDB store should return an IteratorCreator for shard.
// This IteratorCreator returns a single iterator with "value" in the aux fields.
2016-11-23 20:32:42 +00:00
e . TSDBStore . ShardGroupFn = func ( ids [ ] uint64 ) tsdb . ShardGroup {
if ! reflect . DeepEqual ( ids , [ ] uint64 { 100 } ) {
t . Fatalf ( "unexpected shard ids: %v" , ids )
2016-02-19 20:38:02 +00:00
}
2016-11-23 20:32:42 +00:00
var sh MockShard
2017-11-01 11:22:52 +00:00
sh . CreateIteratorFn = func ( _ context . Context , _ * influxql . Measurement , _ query . IteratorOptions ) ( query . Iterator , error ) {
2017-08-15 19:24:22 +00:00
return & FloatIterator { Points : [ ] query . FloatPoint {
2016-02-19 20:38:02 +00:00
{ Name : "cpu" , Time : int64 ( 0 * time . Second ) , Aux : [ ] interface { } { float64 ( 100 ) } } ,
{ Name : "cpu" , Time : int64 ( 1 * time . Second ) , Aux : [ ] interface { } { float64 ( 200 ) } } ,
} } , nil
}
2016-11-23 20:32:42 +00:00
sh . FieldDimensionsFn = func ( measurements [ ] string ) ( fields map [ string ] influxql . DataType , dimensions map [ string ] struct { } , err error ) {
if ! reflect . DeepEqual ( measurements , [ ] string { "cpu" } ) {
t . Fatalf ( "unexpected source: %#v" , measurements )
}
2016-05-16 16:08:28 +00:00
return map [ string ] influxql . DataType { "value" : influxql . Float } , nil , nil
2016-02-19 20:38:02 +00:00
}
2016-11-23 20:32:42 +00:00
return & sh
2016-02-19 20:38:02 +00:00
}
// Verify all results from the query.
2017-08-15 19:24:22 +00:00
if a := ReadAllResults ( e . ExecuteQuery ( ` SELECT * FROM cpu ` , "db0" , 0 ) ) ; ! reflect . DeepEqual ( a , [ ] * query . Result {
2016-02-19 20:38:02 +00:00
{
StatementID : 0 ,
Series : [ ] * models . Row { {
Name : "cpu" ,
Columns : [ ] string { "time" , "value" } ,
Values : [ ] [ ] interface { } {
{ time . Unix ( 0 , 0 ) . UTC ( ) , float64 ( 100 ) } ,
{ time . Unix ( 1 , 0 ) . UTC ( ) , float64 ( 200 ) } ,
} ,
} } ,
} ,
} ) {
t . Fatalf ( "unexpected results: %s" , spew . Sdump ( a ) )
}
}
2016-03-31 01:00:29 +00:00
// Ensure query executor can enforce a maximum bucket selection count.
func TestQueryExecutor_ExecuteQuery_MaxSelectBucketsN ( t * testing . T ) {
e := DefaultQueryExecutor ( )
2016-03-31 22:12:29 +00:00
e . StatementExecutor . MaxSelectBucketsN = 3
2016-03-31 01:00:29 +00:00
// The meta client should return a single shards on the local node.
2016-11-23 20:32:42 +00:00
e . MetaClient . ShardGroupsByTimeRangeFn = func ( database , policy string , min , max time . Time ) ( a [ ] meta . ShardGroupInfo , err error ) {
return [ ] meta . ShardGroupInfo {
{ ID : 1 , Shards : [ ] meta . ShardInfo {
{ ID : 100 , Owners : [ ] meta . ShardOwner { { NodeID : 0 } } } ,
} } ,
2016-03-31 01:00:29 +00:00
} , nil
}
2016-11-23 20:32:42 +00:00
e . TSDBStore . ShardGroupFn = func ( ids [ ] uint64 ) tsdb . ShardGroup {
if ! reflect . DeepEqual ( ids , [ ] uint64 { 100 } ) {
t . Fatalf ( "unexpected shard ids: %v" , ids )
}
var sh MockShard
2017-11-01 11:22:52 +00:00
sh . CreateIteratorFn = func ( _ context . Context , _ * influxql . Measurement , _ query . IteratorOptions ) ( query . Iterator , error ) {
2016-11-23 20:32:42 +00:00
return & FloatIterator {
2017-08-15 19:24:22 +00:00
Points : [ ] query . FloatPoint { { Name : "cpu" , Time : int64 ( 0 * time . Second ) , Aux : [ ] interface { } { float64 ( 100 ) } } } ,
2016-11-23 20:32:42 +00:00
} , nil
}
sh . FieldDimensionsFn = func ( measurements [ ] string ) ( fields map [ string ] influxql . DataType , dimensions map [ string ] struct { } , err error ) {
if ! reflect . DeepEqual ( measurements , [ ] string { "cpu" } ) {
t . Fatalf ( "unexpected source: %#v" , measurements )
}
return map [ string ] influxql . DataType { "value" : influxql . Float } , nil , nil
}
return & sh
2016-03-31 01:00:29 +00:00
}
// Verify all results from the query.
2017-08-15 19:24:22 +00:00
if a := ReadAllResults ( e . ExecuteQuery ( ` SELECT count(value) FROM cpu WHERE time >= '2000-01-01T00:00:05Z' AND time < '2000-01-01T00:00:35Z' GROUP BY time(10s) ` , "db0" , 0 ) ) ; ! reflect . DeepEqual ( a , [ ] * query . Result {
2016-03-31 01:00:29 +00:00
{
StatementID : 0 ,
2016-10-28 20:44:33 +00:00
Err : errors . New ( "max-select-buckets limit exceeded: (4/3)" ) ,
2016-03-31 01:00:29 +00:00
} ,
} ) {
t . Fatalf ( "unexpected results: %s" , spew . Sdump ( a ) )
2016-03-23 15:05:38 +00:00
}
}
2018-12-05 23:21:41 +00:00
func TestStatementExecutor_ExecuteQuery_WriteInto ( t * testing . T ) {
for _ , tt := range [ ] struct {
name string
pw func ( t * testing . T , req * coordinator . IntoWriteRequest ) error
query string
source func ( ) query . Iterator
written int64
} {
{
name : "DropNullPoints" ,
pw : func ( t * testing . T , req * coordinator . IntoWriteRequest ) error {
if want , got := len ( req . Points ) , 0 ; want != got {
t . Errorf ( "unexpected written points: %d != %d" , want , got )
}
return nil
} ,
query : ` SELECT stddev(value) INTO cpu_stddev FROM cpu WHERE time >= '2000-01-01T00:00:05Z' AND time < '2000-01-01T00:00:35Z' GROUP BY time(10s) ` ,
source : func ( ) query . Iterator {
return & FloatIterator {
Points : [ ] query . FloatPoint { { Name : "cpu" , Time : int64 ( 0 * time . Second ) , Value : 100 } } ,
}
} ,
written : 0 ,
} ,
{
name : "PartialDrop" ,
pw : func ( t * testing . T , req * coordinator . IntoWriteRequest ) error {
if want , got := len ( req . Points ) , 1 ; want != got {
t . Errorf ( "unexpected written points: %d != %d" , want , got )
} else {
fields , err := req . Points [ 0 ] . Fields ( )
if err != nil {
return err
} else if want , got := len ( fields ) , 1 ; want != got {
t . Errorf ( "unexpected number of fields: %d != %d" , want , got )
}
}
return nil
} ,
query : ` SELECT max(value), stddev(value) INTO cpu_agg FROM cpu WHERE time >= '2000-01-01T00:00:05Z' AND time < '2000-01-01T00:00:35Z' GROUP BY time(10s) ` ,
source : func ( ) query . Iterator {
return & FloatIterator {
Points : [ ] query . FloatPoint { { Name : "cpu" , Time : int64 ( 0 * time . Second ) , Value : 100 } } ,
}
} ,
written : 1 ,
} ,
} {
t . Run ( tt . name , func ( t * testing . T ) {
e := DefaultQueryExecutor ( )
e . StatementExecutor . PointsWriter = writePointsIntoFunc ( func ( req * coordinator . IntoWriteRequest ) error {
return tt . pw ( t , req )
} )
// The meta client should return a single shards on the local node.
e . MetaClient . ShardGroupsByTimeRangeFn = func ( database , policy string , min , max time . Time ) ( a [ ] meta . ShardGroupInfo , err error ) {
return [ ] meta . ShardGroupInfo {
{ ID : 1 , Shards : [ ] meta . ShardInfo {
{ ID : 100 , Owners : [ ] meta . ShardOwner { { NodeID : 0 } } } ,
} } ,
} , nil
}
e . TSDBStore . ShardGroupFn = func ( ids [ ] uint64 ) tsdb . ShardGroup {
if ! reflect . DeepEqual ( ids , [ ] uint64 { 100 } ) {
t . Fatalf ( "unexpected shard ids: %v" , ids )
}
var sh MockShard
sh . CreateIteratorFn = func ( _ context . Context , _ * influxql . Measurement , _ query . IteratorOptions ) ( query . Iterator , error ) {
return tt . source ( ) , nil
}
sh . FieldDimensionsFn = func ( measurements [ ] string ) ( fields map [ string ] influxql . DataType , dimensions map [ string ] struct { } , err error ) {
if ! reflect . DeepEqual ( measurements , [ ] string { "cpu" } ) {
t . Fatalf ( "unexpected source: %#v" , measurements )
}
return map [ string ] influxql . DataType { "value" : influxql . Float } , nil , nil
}
return & sh
}
// Verify all results from the query.
if a := ReadAllResults ( e . ExecuteQuery ( tt . query , "db0" , 0 ) ) ; ! reflect . DeepEqual ( a , [ ] * query . Result {
{
StatementID : 0 ,
Series : models . Rows {
{
Name : "result" ,
Columns : [ ] string { "time" , "written" } ,
Values : [ ] [ ] interface { } {
{ ts ( "1970-01-01T00:00:00Z" ) , int64 ( tt . written ) } ,
} ,
} ,
} ,
} ,
} ) {
t . Fatalf ( "unexpected results: %s" , spew . Sdump ( a ) )
}
} )
}
}
2018-04-18 00:20:15 +00:00
func TestStatementExecutor_NormalizeStatement ( t * testing . T ) {
testCases := [ ] struct {
name string
query string
defaultDB string
defaultRP string
expectedDB string
expectedRP string
} {
{
name : "defaults" ,
query : "SELECT f FROM m" ,
defaultDB : DefaultDatabase ,
defaultRP : "" ,
expectedDB : DefaultDatabase ,
expectedRP : DefaultRetentionPolicy ,
} ,
{
name : "alternate database via param" ,
query : "SELECT f FROM m" ,
defaultDB : "dbalt" ,
defaultRP : "" ,
expectedDB : "dbalt" ,
expectedRP : DefaultRetentionPolicy ,
} ,
{
name : "alternate database via query" ,
query : fmt . Sprintf ( "SELECT f FROM dbalt.%s.m" , DefaultRetentionPolicy ) ,
defaultDB : DefaultDatabase ,
defaultRP : "" ,
expectedDB : "dbalt" ,
expectedRP : DefaultRetentionPolicy ,
} ,
{
name : "alternate RP via param" ,
query : "SELECT f FROM m" ,
defaultDB : DefaultDatabase ,
defaultRP : "rpalt" ,
expectedDB : DefaultDatabase ,
expectedRP : "rpalt" ,
} ,
{
name : "alternate RP via query" ,
query : fmt . Sprintf ( "SELECT f FROM %s.rpalt.m" , DefaultDatabase ) ,
defaultDB : DefaultDatabase ,
defaultRP : "" ,
expectedDB : DefaultDatabase ,
expectedRP : "rpalt" ,
} ,
{
name : "alternate RP query disagrees with param and query wins" ,
query : fmt . Sprintf ( "SELECT f FROM %s.rpquery.m" , DefaultDatabase ) ,
defaultDB : DefaultDatabase ,
defaultRP : "rpparam" ,
expectedDB : DefaultDatabase ,
expectedRP : "rpquery" ,
} ,
}
for _ , testCase := range testCases {
t . Run ( testCase . name , func ( t * testing . T ) {
q , err := influxql . ParseQuery ( testCase . query )
if err != nil {
t . Fatalf ( "unexpected error parsing query: %v" , err )
}
stmt := q . Statements [ 0 ] . ( * influxql . SelectStatement )
err = DefaultQueryExecutor ( ) . StatementExecutor . NormalizeStatement ( stmt , testCase . defaultDB , testCase . defaultRP )
if err != nil {
t . Fatalf ( "unexpected error normalizing statement: %v" , err )
}
m := stmt . Sources [ 0 ] . ( * influxql . Measurement )
if m . Database != testCase . expectedDB {
t . Errorf ( "database got %v, want %v" , m . Database , testCase . expectedDB )
}
if m . RetentionPolicy != testCase . expectedRP {
t . Errorf ( "retention policy got %v, want %v" , m . RetentionPolicy , testCase . expectedRP )
}
} )
}
}
2016-10-27 22:17:08 +00:00
func TestStatementExecutor_NormalizeDropSeries ( t * testing . T ) {
q , err := influxql . ParseQuery ( "DROP SERIES FROM cpu" )
if err != nil {
t . Fatalf ( "unexpected error parsing query: %v" , err )
}
stmt := q . Statements [ 0 ] . ( * influxql . DropSeriesStatement )
s := & coordinator . StatementExecutor {
MetaClient : & internal . MetaClientMock {
DatabaseFn : func ( name string ) * meta . DatabaseInfo {
t . Fatal ( "meta client should not be called" )
return nil
} ,
} ,
}
2018-04-18 00:20:15 +00:00
if err := s . NormalizeStatement ( stmt , "foo" , "bar" ) ; err != nil {
2016-10-27 22:17:08 +00:00
t . Fatalf ( "unexpected error normalizing statement: %v" , err )
}
m := stmt . Sources [ 0 ] . ( * influxql . Measurement )
if m . Database != "" {
t . Fatalf ( "database rewritten when not supposed to: %v" , m . Database )
}
if m . RetentionPolicy != "" {
2018-04-18 00:20:15 +00:00
t . Fatalf ( "retention policy rewritten when not supposed to: %v" , m . RetentionPolicy )
2016-10-27 22:17:08 +00:00
}
if exp , got := "DROP SERIES FROM cpu" , q . String ( ) ; exp != got {
t . Fatalf ( "generated query does match parsed: exp %v, got %v" , exp , got )
}
}
func TestStatementExecutor_NormalizeDeleteSeries ( t * testing . T ) {
q , err := influxql . ParseQuery ( "DELETE FROM cpu" )
if err != nil {
t . Fatalf ( "unexpected error parsing query: %v" , err )
}
stmt := q . Statements [ 0 ] . ( * influxql . DeleteSeriesStatement )
s := & coordinator . StatementExecutor {
MetaClient : & internal . MetaClientMock {
DatabaseFn : func ( name string ) * meta . DatabaseInfo {
t . Fatal ( "meta client should not be called" )
return nil
} ,
} ,
}
2018-04-18 00:20:15 +00:00
if err := s . NormalizeStatement ( stmt , "foo" , "bar" ) ; err != nil {
2016-10-27 22:17:08 +00:00
t . Fatalf ( "unexpected error normalizing statement: %v" , err )
}
m := stmt . Sources [ 0 ] . ( * influxql . Measurement )
if m . Database != "" {
t . Fatalf ( "database rewritten when not supposed to: %v" , m . Database )
}
if m . RetentionPolicy != "" {
2018-04-18 00:20:15 +00:00
t . Fatalf ( "retention policy rewritten when not supposed to: %v" , m . RetentionPolicy )
2016-10-27 22:17:08 +00:00
}
if exp , got := "DELETE FROM cpu" , q . String ( ) ; exp != got {
t . Fatalf ( "generated query does match parsed: exp %v, got %v" , exp , got )
}
}
2021-03-22 13:52:33 +00:00
type mockCoarseAuthorizer struct {
2017-02-09 19:32:52 +00:00
AuthorizeDatabaseFn func ( influxql . Privilege , string ) bool
}
2021-03-22 13:52:33 +00:00
func ( a * mockCoarseAuthorizer ) AuthorizeDatabase ( p influxql . Privilege , name string ) bool {
2017-02-09 19:32:52 +00:00
return a . AuthorizeDatabaseFn ( p , name )
}
func TestQueryExecutor_ExecuteQuery_ShowDatabases ( t * testing . T ) {
2018-03-02 05:11:32 +00:00
qe := query . NewExecutor ( )
2017-02-09 19:32:52 +00:00
qe . StatementExecutor = & coordinator . StatementExecutor {
MetaClient : & internal . MetaClientMock {
DatabasesFn : func ( ) [ ] meta . DatabaseInfo {
return [ ] meta . DatabaseInfo {
{ Name : "db1" } , { Name : "db2" } , { Name : "db3" } , { Name : "db4" } ,
}
} ,
} ,
}
2017-08-15 19:24:22 +00:00
opt := query . ExecutionOptions {
2021-03-22 13:52:33 +00:00
CoarseAuthorizer : & mockCoarseAuthorizer {
2017-02-09 19:32:52 +00:00
AuthorizeDatabaseFn : func ( p influxql . Privilege , name string ) bool {
return name == "db2" || name == "db4"
} ,
} ,
}
q , err := influxql . ParseQuery ( "SHOW DATABASES" )
if err != nil {
t . Fatal ( err )
}
results := ReadAllResults ( qe . ExecuteQuery ( q , opt , make ( chan struct { } ) ) )
2017-08-15 19:24:22 +00:00
exp := [ ] * query . Result {
2017-02-09 19:32:52 +00:00
{
StatementID : 0 ,
Series : [ ] * models . Row { {
Name : "databases" ,
Columns : [ ] string { "name" } ,
Values : [ ] [ ] interface { } {
{ "db2" } , { "db4" } ,
} ,
} } ,
} ,
}
if ! reflect . DeepEqual ( results , exp ) {
t . Fatalf ( "unexpected results: exp %s, got %s" , spew . Sdump ( exp ) , spew . Sdump ( results ) )
2021-08-24 15:56:56 +00:00
}
}
func TestQueryExecutor_ExecuteQuery_ShowContinuousQueries ( t * testing . T ) {
qe := query . NewExecutor ( )
qe . StatementExecutor = & coordinator . StatementExecutor {
MetaClient : & internal . MetaClientMock {
DatabasesFn : func ( ) [ ] meta . DatabaseInfo {
return [ ] meta . DatabaseInfo {
{
Name : "db1" ,
ContinuousQueries : [ ] meta . ContinuousQueryInfo { { Name : "db1_query_name" , Query : "db1_query" } } ,
} ,
{
Name : "db2" ,
ContinuousQueries : [ ] meta . ContinuousQueryInfo {
{ Name : "db2_query_name" , Query : "db2_query" } ,
{ Name : "db2_query2_name" , Query : "db2_query2" } ,
} ,
} ,
{
Name : "db3" ,
ContinuousQueries : [ ] meta . ContinuousQueryInfo {
{ Name : "db3_query_name" , Query : "db3_query" } ,
{ Name : "db3_query2_name" , Query : "db3_query2" } ,
} ,
} ,
{
Name : "db4" ,
ContinuousQueries : [ ] meta . ContinuousQueryInfo {
{ Name : "db4_query_name" , Query : "db4_query" } ,
{ Name : "db4_query2_name" , Query : "db4_query2" } ,
{ Name : "db4_query3_name" , Query : "db4_query3" } ,
} ,
} ,
}
} ,
} ,
}
opt := query . ExecutionOptions {
CoarseAuthorizer : & mockCoarseAuthorizer {
AuthorizeDatabaseFn : func ( p influxql . Privilege , name string ) bool {
return name == "db2" || name == "db4"
} ,
} ,
}
q , err := influxql . ParseQuery ( "SHOW CONTINUOUS QUERIES" )
if err != nil {
t . Fatal ( err )
}
results := ReadAllResults ( qe . ExecuteQuery ( q , opt , make ( chan struct { } ) ) )
exp := [ ] * query . Result {
{
StatementID : 0 ,
Series : [ ] * models . Row {
{
Name : "db2" ,
Columns : [ ] string { "name" , "query" } ,
Values : [ ] [ ] interface { } {
{ "db2_query_name" , "db2_query" } ,
{ "db2_query2_name" , "db2_query2" } ,
} ,
} ,
{
Name : "db4" ,
Columns : [ ] string { "name" , "query" } ,
Values : [ ] [ ] interface { } {
{ "db4_query_name" , "db4_query" } ,
{ "db4_query2_name" , "db4_query2" } ,
{ "db4_query3_name" , "db4_query3" } ,
} ,
} ,
} ,
} ,
}
if ! reflect . DeepEqual ( results , exp ) {
t . Fatalf ( "unexpected results: exp %s, got %s" , spew . Sdump ( exp ) , spew . Sdump ( results ) )
2017-02-09 19:32:52 +00:00
}
}
2016-05-11 16:32:56 +00:00
// QueryExecutor is a test wrapper for coordinator.QueryExecutor.
2016-02-19 20:38:02 +00:00
type QueryExecutor struct {
2018-03-02 05:11:32 +00:00
* query . Executor
2016-02-19 20:38:02 +00:00
2016-03-31 22:12:29 +00:00
MetaClient MetaClient
2017-11-03 16:53:23 +00:00
TSDBStore * internal . TSDBStoreMock
2016-05-11 16:32:56 +00:00
StatementExecutor * coordinator . StatementExecutor
2016-03-31 22:12:29 +00:00
LogOutput bytes . Buffer
2016-02-19 20:38:02 +00:00
}
2018-03-02 05:11:32 +00:00
// NewQueryExecutor returns a new instance of Executor.
2016-02-19 20:38:02 +00:00
// This query executor always has a node id of 0.
func NewQueryExecutor ( ) * QueryExecutor {
e := & QueryExecutor {
2018-03-02 05:11:32 +00:00
Executor : query . NewExecutor ( ) ,
TSDBStore : & internal . TSDBStoreMock { } ,
2016-02-19 20:38:02 +00:00
}
2017-11-03 16:53:23 +00:00
e . TSDBStore . CreateShardFn = func ( database , policy string , shardID uint64 , enabled bool ) error {
return nil
}
2021-03-22 13:52:33 +00:00
e . TSDBStore . MeasurementNamesFn = func ( auth query . FineAuthorizer , database string , cond influxql . Expr ) ( [ ] [ ] byte , error ) {
2017-11-03 16:53:23 +00:00
return nil , nil
}
2021-03-22 13:52:33 +00:00
e . TSDBStore . TagValuesFn = func ( _ query . FineAuthorizer , _ [ ] uint64 , _ influxql . Expr ) ( [ ] tsdb . TagValues , error ) {
2017-11-03 16:53:23 +00:00
return nil , nil
}
2016-05-11 16:32:56 +00:00
e . StatementExecutor = & coordinator . StatementExecutor {
2016-03-31 22:12:29 +00:00
MetaClient : & e . MetaClient ,
2017-11-03 16:53:23 +00:00
TSDBStore : e . TSDBStore ,
2016-11-23 20:32:42 +00:00
ShardMapper : & coordinator . LocalShardMapper {
MetaClient : & e . MetaClient ,
2017-11-03 16:53:23 +00:00
TSDBStore : e . TSDBStore ,
2016-11-23 20:32:42 +00:00
} ,
2016-03-31 22:12:29 +00:00
}
2018-03-02 05:11:32 +00:00
e . Executor . StatementExecutor = e . StatementExecutor
2016-02-19 20:38:02 +00:00
2016-04-20 20:07:08 +00:00
var out io . Writer = & e . LogOutput
2016-02-19 20:38:02 +00:00
if testing . Verbose ( ) {
2016-12-30 10:42:38 +00:00
out = io . MultiWriter ( out , os . Stderr )
2016-02-19 20:38:02 +00:00
}
2018-03-02 05:11:32 +00:00
e . Executor . WithLogger ( logger . New ( out ) )
2016-02-19 20:38:02 +00:00
return e
}
2018-03-02 05:11:32 +00:00
// DefaultQueryExecutor returns a Executor with a database (db0) and retention policy (rp0).
2016-02-19 20:38:02 +00:00
func DefaultQueryExecutor ( ) * QueryExecutor {
e := NewQueryExecutor ( )
e . MetaClient . DatabaseFn = DefaultMetaClientDatabaseFn
return e
}
// ExecuteQuery parses query and executes against the database.
2017-08-15 19:24:22 +00:00
func ( e * QueryExecutor ) ExecuteQuery ( q , database string , chunkSize int ) <- chan * query . Result {
2018-03-02 05:11:32 +00:00
return e . Executor . ExecuteQuery ( MustParseQuery ( q ) , query . ExecutionOptions {
2016-06-01 17:30:50 +00:00
Database : database ,
ChunkSize : chunkSize ,
} , make ( chan struct { } ) )
2016-02-19 20:38:02 +00:00
}
2016-11-23 20:32:42 +00:00
type MockShard struct {
2018-07-13 18:05:59 +00:00
Measurements [ ] string
FieldDimensionsFn func ( measurements [ ] string ) ( fields map [ string ] influxql . DataType , dimensions map [ string ] struct { } , err error )
FieldKeysByMeasurementFn func ( name [ ] byte ) [ ] string
CreateIteratorFn func ( ctx context . Context , m * influxql . Measurement , opt query . IteratorOptions ) ( query . Iterator , error )
IteratorCostFn func ( m string , opt query . IteratorOptions ) ( query . IteratorCost , error )
ExpandSourcesFn func ( sources influxql . Sources ) ( influxql . Sources , error )
2016-11-23 20:32:42 +00:00
}
func ( sh * MockShard ) MeasurementsByRegex ( re * regexp . Regexp ) [ ] string {
names := make ( [ ] string , 0 , len ( sh . Measurements ) )
for _ , name := range sh . Measurements {
if re . MatchString ( name ) {
names = append ( names , name )
}
}
return names
}
2018-07-13 18:05:59 +00:00
func ( sh * MockShard ) FieldKeysByMeasurement ( name [ ] byte ) [ ] string {
return sh . FieldKeysByMeasurementFn ( name )
}
2016-11-23 20:32:42 +00:00
func ( sh * MockShard ) FieldDimensions ( measurements [ ] string ) ( fields map [ string ] influxql . DataType , dimensions map [ string ] struct { } , err error ) {
return sh . FieldDimensionsFn ( measurements )
}
func ( sh * MockShard ) MapType ( measurement , field string ) influxql . DataType {
f , d , err := sh . FieldDimensions ( [ ] string { measurement } )
if err != nil {
return influxql . Unknown
}
if typ , ok := f [ field ] ; ok {
return typ
} else if _ , ok := d [ field ] ; ok {
return influxql . Tag
}
return influxql . Unknown
}
2017-11-01 11:22:52 +00:00
func ( sh * MockShard ) CreateIterator ( ctx context . Context , measurement * influxql . Measurement , opt query . IteratorOptions ) ( query . Iterator , error ) {
2017-10-11 14:08:31 +00:00
return sh . CreateIteratorFn ( ctx , measurement , opt )
2016-11-23 20:32:42 +00:00
}
2017-08-24 16:27:29 +00:00
func ( sh * MockShard ) IteratorCost ( measurement string , opt query . IteratorOptions ) ( query . IteratorCost , error ) {
return sh . IteratorCostFn ( measurement , opt )
}
2016-11-23 20:32:42 +00:00
func ( sh * MockShard ) ExpandSources ( sources influxql . Sources ) ( influxql . Sources , error ) {
return sh . ExpandSourcesFn ( sources )
}
2016-02-19 20:38:02 +00:00
// MustParseQuery parses s into a query. Panic on error.
func MustParseQuery ( s string ) * influxql . Query {
q , err := influxql . ParseQuery ( s )
if err != nil {
panic ( err )
}
return q
}
// ReadAllResults reads all results from c and returns as a slice.
2017-08-15 19:24:22 +00:00
func ReadAllResults ( c <- chan * query . Result ) [ ] * query . Result {
var a [ ] * query . Result
2016-02-19 20:38:02 +00:00
for result := range c {
a = append ( a , result )
}
return a
}
// FloatIterator is a represents an iterator that reads from a slice.
type FloatIterator struct {
2017-08-15 19:24:22 +00:00
Points [ ] query . FloatPoint
stats query . IteratorStats
2016-02-19 20:38:02 +00:00
}
2017-08-15 19:24:22 +00:00
func ( itr * FloatIterator ) Stats ( ) query . IteratorStats { return itr . stats }
func ( itr * FloatIterator ) Close ( ) error { return nil }
2016-02-19 20:38:02 +00:00
// Next returns the next value and shifts it off the beginning of the points slice.
2017-08-15 19:24:22 +00:00
func ( itr * FloatIterator ) Next ( ) ( * query . FloatPoint , error ) {
2016-02-19 20:38:02 +00:00
if len ( itr . Points ) == 0 {
2016-04-17 20:00:59 +00:00
return nil , nil
2016-02-19 20:38:02 +00:00
}
v := & itr . Points [ 0 ]
itr . Points = itr . Points [ 1 : ]
2016-04-17 20:00:59 +00:00
return v , nil
2016-02-19 20:38:02 +00:00
}
2018-12-05 23:21:41 +00:00
func ts ( s string ) time . Time {
t , err := time . Parse ( time . RFC3339 , s )
if err != nil {
panic ( err )
}
return t
}
type writePointsIntoFunc func ( req * coordinator . IntoWriteRequest ) error
func ( fn writePointsIntoFunc ) WritePointsInto ( req * coordinator . IntoWriteRequest ) error {
return fn ( req )
}