feat(influxql): Initial implementation of InfluxQL query engine

* Some tests are broken or do not pass; follow up PRs will resolve that
pull/19446/head
Stuart Carnie 2020-08-14 12:37:30 -07:00
parent b73340a4b1
commit 8a7dcc21fb
No known key found for this signature in database
GPG Key ID: 848D9C9718D78B4F
49 changed files with 3409 additions and 2211 deletions

View File

@ -27,6 +27,8 @@ import (
"github.com/influxdata/influxdb/v2/endpoints"
"github.com/influxdata/influxdb/v2/gather"
"github.com/influxdata/influxdb/v2/http"
iqlcontrol "github.com/influxdata/influxdb/v2/influxql/control"
iqlquery "github.com/influxdata/influxdb/v2/influxql/query"
"github.com/influxdata/influxdb/v2/inmem"
"github.com/influxdata/influxdb/v2/internal/fs"
"github.com/influxdata/influxdb/v2/kit/cli"
@ -64,6 +66,7 @@ import (
"github.com/influxdata/influxdb/v2/task/backend/scheduler"
"github.com/influxdata/influxdb/v2/telemetry"
"github.com/influxdata/influxdb/v2/tenant"
iqlcoordinator "github.com/influxdata/influxdb/v2/v1/coordinator"
"github.com/influxdata/influxdb/v2/v1/services/meta"
storage2 "github.com/influxdata/influxdb/v2/v1/services/storage"
_ "github.com/influxdata/influxdb/v2/v1/tsdb/engine/tsm1" // needed for tsm1
@ -845,6 +848,25 @@ func (m *Launcher) run(ctx context.Context) (err error) {
dbrpSvc := dbrp.NewService(ctx, authorizer.NewBucketService(ts.BucketService), m.kvStore)
dbrpSvc = dbrp.NewAuthorizedService(dbrpSvc)
cm := iqlcontrol.NewControllerMetrics([]string{})
m.reg.MustRegister(cm.PrometheusCollectors()...)
mapper := &iqlcoordinator.LocalShardMapper{
MetaClient: metaClient,
TSDBStore: m.engine.TSDBStore,
DBRP: dbrpSvc,
}
qe := iqlquery.NewExecutor(m.log, cm)
se := &iqlcoordinator.StatementExecutor{
MetaClient: metaClient,
TSDBStore: m.engine.TSDBStore,
ShardMapper: mapper,
DBRP: dbrpSvc,
}
qe.StatementExecutor = se
qe.StatementNormalizer = se
var checkSvc platform.CheckService
{
coordinator := coordinator.NewCoordinator(m.log, m.scheduler, m.executor)
@ -1022,6 +1044,7 @@ func (m *Launcher) run(ctx context.Context) (err error) {
VariableService: variableSvc,
PasswordsService: ts.PasswordsService,
InfluxQLService: storageQueryService,
InfluxqldService: iqlquery.NewProxyExecutor(m.log, qe),
FluxService: storageQueryService,
FluxLanguageService: fluxlang.DefaultService,
TaskService: taskSvc,

2
go.mod
View File

@ -88,7 +88,7 @@ require (
github.com/stretchr/testify v1.5.1
github.com/tcnksm/go-input v0.0.0-20180404061846-548a7d7a8ee8
github.com/testcontainers/testcontainers-go v0.0.0-20190108154635-47c0da630f72
github.com/tinylib/msgp v1.1.0 // indirect
github.com/tinylib/msgp v1.1.0
github.com/tylerb/graceful v1.2.15
github.com/uber-go/atomic v1.3.2 // indirect
github.com/uber/jaeger-client-go v2.16.0+incompatible

View File

@ -9,6 +9,7 @@ import (
"github.com/influxdata/influxdb/v2/chronograf/server"
"github.com/influxdata/influxdb/v2/dbrp"
"github.com/influxdata/influxdb/v2/http/metric"
"github.com/influxdata/influxdb/v2/influxql"
"github.com/influxdata/influxdb/v2/kit/feature"
"github.com/influxdata/influxdb/v2/kit/prom"
kithttp "github.com/influxdata/influxdb/v2/kit/transport/http"
@ -76,6 +77,7 @@ type APIBackend struct {
VariableService influxdb.VariableService
PasswordsService influxdb.PasswordsService
InfluxQLService query.ProxyQueryService
InfluxqldService influxql.ProxyQueryService
FluxService query.ProxyQueryService
FluxLanguageService influxdb.FluxLanguageService
TaskService influxdb.TaskService

View File

@ -0,0 +1,158 @@
package http
import (
"fmt"
"net/http"
"strings"
"github.com/influxdata/influxdb/v2"
platcontext "github.com/influxdata/influxdb/v2/context"
"github.com/opentracing/opentracing-go"
)
type Influx1xAuthenticationHandler struct {
influxdb.HTTPErrorHandler
next http.Handler
auth influxdb.AuthorizationService
user influxdb.UserService
}
// NewInflux1xAuthenticationHandler creates an authentication handler to process
// InfluxDB 1.x authentication requests.
func NewInflux1xAuthenticationHandler(next http.Handler, auth influxdb.AuthorizationService, user influxdb.UserService, h influxdb.HTTPErrorHandler) *Influx1xAuthenticationHandler {
return &Influx1xAuthenticationHandler{
HTTPErrorHandler: h,
next: next,
auth: auth,
user: user,
}
}
// ServeHTTP extracts the session or token from the http request and places the resulting authorizer on the request context.
func (h *Influx1xAuthenticationHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
// The ping endpoint does not need authorization
if r.URL.Path == "/ping" {
h.next.ServeHTTP(w, r)
return
}
ctx := r.Context()
creds, err := h.parseCredentials(r)
if err != nil {
UnauthorizedError(ctx, h, w)
return
}
auth, err := h.auth.FindAuthorizationByToken(ctx, creds.Token)
if err != nil {
UnauthorizedError(ctx, h, w)
return
}
var user *influxdb.User
if creds.Username != "" {
user, err = h.user.FindUser(ctx, influxdb.UserFilter{Name: &creds.Username})
if err != nil {
UnauthorizedError(ctx, h, w)
return
}
if user.ID != auth.UserID {
h.HandleHTTPError(ctx, &influxdb.Error{
Code: influxdb.EForbidden,
Msg: "Username and Token do not match",
}, w)
return
}
} else {
user, err = h.user.FindUserByID(ctx, auth.UserID)
if err != nil {
UnauthorizedError(ctx, h, w)
return
}
}
if err = h.isUserActive(user); err != nil {
InactiveUserError(ctx, h, w)
return
}
ctx = platcontext.SetAuthorizer(ctx, auth)
if span := opentracing.SpanFromContext(ctx); span != nil {
span.SetTag("user_id", auth.GetUserID().String())
}
h.next.ServeHTTP(w, r.WithContext(ctx))
}
func (h *Influx1xAuthenticationHandler) isUserActive(u *influxdb.User) error {
if u.Status != "inactive" {
return nil
}
return &influxdb.Error{Code: influxdb.EForbidden, Msg: "User is inactive"}
}
type credentials struct {
Username string
Token string
}
func parseToken(token string) (user, pass string, ok bool) {
s := strings.IndexByte(token, ':')
if s < 0 {
// Token <token>
return "", token, true
}
// Token <username>:<token>
return token[:s], token[s+1:], true
}
// parseCredentials parses a request and returns the authentication credentials.
// The credentials may be present as URL query params, or as a Basic
// Authentication header.
// As params: http://127.0.0.1/query?u=username&p=token
// As basic auth: http://username:token@127.0.0.1
// As Token in Authorization header: Token <username:token>
func (h *Influx1xAuthenticationHandler) parseCredentials(r *http.Request) (*credentials, error) {
q := r.URL.Query()
// Check for username and password in URL params.
if u, p := q.Get("u"), q.Get("p"); u != "" && p != "" {
return &credentials{
Username: u,
Token: p,
}, nil
}
// Check for the HTTP Authorization header.
if s := r.Header.Get("Authorization"); s != "" {
// Check for Bearer token.
strs := strings.Split(s, " ")
if len(strs) == 2 {
switch strs[0] {
case "Token":
if u, p, ok := parseToken(strs[1]); ok {
return &credentials{
Username: u,
Token: p,
}, nil
}
// fallback to only a token
}
}
// Check for basic auth.
if u, p, ok := r.BasicAuth(); ok {
return &credentials{
Username: u,
Token: p,
}, nil
}
}
return nil, fmt.Errorf("unable to parse authentication credentials")
}

View File

@ -0,0 +1,192 @@
package http
import (
"context"
"fmt"
"net/http"
"net/http/httptest"
"testing"
"github.com/influxdata/influxdb/v2"
kithttp "github.com/influxdata/influxdb/v2/kit/transport/http"
"github.com/influxdata/influxdb/v2/mock"
)
func TestInflux1xAuthenticationHandler(t *testing.T) {
var one = influxdb.ID(1)
type fields struct {
FindAuthorizationByTokenFn func(context.Context, string) (*influxdb.Authorization, error)
FindUserFn func(context.Context, influxdb.UserFilter) (*influxdb.User, error)
FindUserByIDFn func(context.Context, influxdb.ID) (*influxdb.User, error)
}
type exp struct {
code int
}
basic := func(u, p string) func(r *http.Request) {
return func(r *http.Request) {
r.SetBasicAuth(u, p)
}
}
token := func(u, p string) func(r *http.Request) {
return func(r *http.Request) {
if u == "" {
SetToken(p, r)
} else {
SetToken(u+":"+p, r)
}
}
}
query := func(u, p string) func(r *http.Request) {
return func(r *http.Request) {
v := r.URL.Query()
v.Add("u", u)
v.Add("p", p)
r.URL.RawQuery = v.Encode()
}
}
const (
User = "sydney"
Token = "my-token"
)
tests := []struct {
name string
fields fields
auth func(r *http.Request)
exp exp
}{
// successful requests
{
name: "basic auth",
fields: fields{},
auth: basic(User, Token),
exp: exp{
code: http.StatusOK,
},
},
{
name: "query string",
fields: fields{},
auth: query(User, Token),
exp: exp{
code: http.StatusOK,
},
},
{
name: "Token as user:token",
fields: fields{},
auth: token(User, Token),
exp: exp{
code: http.StatusOK,
},
},
{
name: "Token as token",
fields: fields{},
auth: token("", Token),
exp: exp{
code: http.StatusOK,
},
},
{
name: "token does not exist",
fields: fields{
FindAuthorizationByTokenFn: func(ctx context.Context, token string) (*influxdb.Authorization, error) {
return nil, fmt.Errorf("authorization not found")
},
},
exp: exp{
code: http.StatusUnauthorized,
},
},
{
name: "user is inactive",
fields: fields{
FindAuthorizationByTokenFn: func(ctx context.Context, token string) (*influxdb.Authorization, error) {
return &influxdb.Authorization{UserID: one}, nil
},
FindUserFn: func(ctx context.Context, f influxdb.UserFilter) (*influxdb.User, error) {
return &influxdb.User{ID: one, Status: "inactive"}, nil
},
},
auth: basic(User, Token),
exp: exp{
code: http.StatusForbidden,
},
},
{
name: "username and token mismatch",
fields: fields{
FindAuthorizationByTokenFn: func(ctx context.Context, token string) (*influxdb.Authorization, error) {
return &influxdb.Authorization{UserID: one}, nil
},
FindUserFn: func(ctx context.Context, f influxdb.UserFilter) (*influxdb.User, error) {
return &influxdb.User{ID: influxdb.ID(2)}, nil
},
},
auth: basic(User, Token),
exp: exp{
code: http.StatusForbidden,
},
},
{
name: "no auth provided",
fields: fields{
FindAuthorizationByTokenFn: func(ctx context.Context, token string) (*influxdb.Authorization, error) {
return &influxdb.Authorization{}, nil
},
},
exp: exp{
code: http.StatusUnauthorized,
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
var h *Influx1xAuthenticationHandler
{
auth := &mock.AuthorizationService{FindAuthorizationByTokenFn: tt.fields.FindAuthorizationByTokenFn}
if auth.FindAuthorizationByTokenFn == nil {
auth.FindAuthorizationByTokenFn = func(ctx context.Context, token string) (*influxdb.Authorization, error) {
return &influxdb.Authorization{UserID: one}, nil
}
}
user := &mock.UserService{FindUserFn: tt.fields.FindUserFn, FindUserByIDFn: tt.fields.FindUserByIDFn}
if user.FindUserFn == nil {
user.FindUserFn = func(context.Context, influxdb.UserFilter) (*influxdb.User, error) {
return &influxdb.User{ID: one}, nil
}
}
if user.FindUserByIDFn == nil {
user.FindUserByIDFn = func(_ context.Context, id influxdb.ID) (*influxdb.User, error) {
return &influxdb.User{ID: id}, nil
}
}
next := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
})
h = NewInflux1xAuthenticationHandler(next, auth, user, kithttp.ErrorHandler(0))
}
w := httptest.NewRecorder()
r := httptest.NewRequest("POST", "http://any.url", nil)
if tt.auth != nil {
tt.auth(r)
}
h.ServeHTTP(w, r)
if got, want := w.Code, tt.exp.code; got != want {
t.Errorf("expected status code to be %d got %d", want, got)
}
})
}
}

56
http/influxql_handler.go Normal file
View File

@ -0,0 +1,56 @@
package http
import (
"net/http"
platform "github.com/influxdata/influxdb/v2"
influxqld "github.com/influxdata/influxdb/v2/influxql"
"github.com/influxdata/influxdb/v2/influxql/control"
"github.com/influxdata/influxdb/v2/query"
"go.uber.org/zap"
)
// InfluxqlHandler mimics the /query handler from influxdb, but, enriches
// with org and forwards requests to the transpiler service.
type InfluxqlHandler struct {
*InfluxQLBackend
LegacyHandlerConfig
Metrics *control.ControllerMetrics
}
type InfluxQLBackend struct {
platform.HTTPErrorHandler
Logger *zap.Logger
AuthorizationService platform.AuthorizationService
OrganizationService platform.OrganizationService
ProxyQueryService query.ProxyQueryService
InfluxqldQueryService influxqld.ProxyQueryService
}
// NewInfluxQLBackend constructs an InfluxQLBackend from a LegacyBackend.
func NewInfluxQLBackend(b *LegacyBackend) *InfluxQLBackend {
return &InfluxQLBackend{
HTTPErrorHandler: b.HTTPErrorHandler,
Logger: b.Logger.With(zap.String("handler", "influxql")),
AuthorizationService: b.AuthorizationService,
OrganizationService: b.OrganizationService,
InfluxqldQueryService: b.InfluxqldQueryService,
}
}
// NewInfluxQLHandler returns a new instance of InfluxqlHandler to handle influxql v1 queries
func NewInfluxQLHandler(b *InfluxQLBackend, config LegacyHandlerConfig) *InfluxqlHandler {
return &InfluxqlHandler{
InfluxQLBackend: b,
LegacyHandlerConfig: config,
Metrics: control.NewControllerMetrics([]string{}),
}
}
func (h *InfluxqlHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
h.handleInfluxqldQuery(w, req)
}
// DefaultChunkSize is the default number of points to write in
// one chunk.
const DefaultChunkSize = 10000

194
http/influxqld_handler.go Normal file
View File

@ -0,0 +1,194 @@
package http
import (
"context"
"encoding/json"
"io/ioutil"
"mime"
"net/http"
"strconv"
"strings"
"github.com/influxdata/flux/iocounter"
"github.com/influxdata/influxdb/v2"
pcontext "github.com/influxdata/influxdb/v2/context"
"github.com/influxdata/influxdb/v2/influxql"
"github.com/influxdata/influxdb/v2/kit/tracing"
"github.com/prometheus/client_golang/prometheus"
"go.uber.org/zap"
)
func (h *InfluxqlHandler) PrometheusCollectors() []prometheus.Collector {
return []prometheus.Collector{
h.Metrics.Requests,
h.Metrics.RequestsLatency,
}
}
// HandleQuery mimics the influxdb 1.0 /query
func (h *InfluxqlHandler) handleInfluxqldQuery(w http.ResponseWriter, r *http.Request) {
span, r := tracing.ExtractFromHTTPRequest(r, "handleInfluxqldQuery")
defer span.Finish()
if id, _, found := tracing.InfoFromSpan(span); found {
w.Header().Set(traceIDHeader, id)
}
ctx := r.Context()
defer r.Body.Close()
auth, err := getAuthorization(ctx)
if err != nil {
h.HandleHTTPError(ctx, err, w)
return
}
if !auth.IsActive() {
h.HandleHTTPError(ctx, &influxdb.Error{
Code: influxdb.EForbidden,
Msg: "insufficient permissions",
}, w)
return
}
o, err := h.OrganizationService.FindOrganization(ctx, influxdb.OrganizationFilter{
ID: &auth.OrgID,
})
if err != nil {
h.HandleHTTPError(ctx, err, w)
return
}
var query string
// Attempt to read the form value from the "q" form value.
if qp := strings.TrimSpace(r.FormValue("q")); qp != "" {
query = qp
} else if r.MultipartForm != nil && r.MultipartForm.File != nil {
// If we have a multipart/form-data, try to retrieve a file from 'q'.
if fhs := r.MultipartForm.File["q"]; len(fhs) > 0 {
d, err := ioutil.ReadFile(fhs[0].Filename)
if err != nil {
h.HandleHTTPError(ctx, err, w)
return
}
query = string(d)
}
} else {
ct := r.Header.Get("Content-Type")
mt, _, err := mime.ParseMediaType(ct)
if err != nil {
h.HandleHTTPError(ctx, &influxdb.Error{
Code: influxdb.EInvalid,
Err: err,
}, w)
return
}
if mt == "application/vnd.influxql" {
if d, err := ioutil.ReadAll(r.Body); err != nil {
h.HandleHTTPError(ctx, err, w)
return
} else {
query = string(d)
}
}
}
// parse the parameters
rawParams := r.FormValue("params")
var params map[string]interface{}
if rawParams != "" {
decoder := json.NewDecoder(strings.NewReader(rawParams))
decoder.UseNumber()
if err := decoder.Decode(&params); err != nil {
h.HandleHTTPError(ctx, &influxdb.Error{
Code: influxdb.EInvalid,
Msg: "error parsing query parameters",
Err: err,
}, w)
return
}
// Convert json.Number into int64 and float64 values
for k, v := range params {
if v, ok := v.(json.Number); ok {
var err error
if strings.Contains(string(v), ".") {
params[k], err = v.Float64()
} else {
params[k], err = v.Int64()
}
if err != nil {
h.HandleHTTPError(ctx, &influxdb.Error{
Code: influxdb.EInvalid,
Msg: "error parsing json value",
Err: err,
}, w)
return
}
}
}
}
// Parse chunk size. Use default if not provided or cannot be parsed
chunked := r.FormValue("chunked") == "true"
chunkSize := DefaultChunkSize
if chunked {
if n, err := strconv.ParseInt(r.FormValue("chunk_size"), 10, 64); err == nil && int(n) > 0 {
chunkSize = int(n)
}
}
req := &influxql.QueryRequest{
DB: r.FormValue("db"),
RP: r.FormValue("rp"),
Epoch: r.FormValue("epoch"),
EncodingFormat: influxql.EncodingFormatFromMimeType(r.Header.Get("Accept")),
OrganizationID: o.ID,
Query: query,
Params: params,
Source: r.Header.Get("User-Agent"),
Authorization: auth,
Chunked: chunked,
ChunkSize: chunkSize,
}
var respSize int64
cw := iocounter.Writer{Writer: w}
_, err = h.InfluxqldQueryService.Query(ctx, &cw, req)
respSize = cw.Count()
if err != nil {
if respSize == 0 {
// Only record the error headers IFF nothing has been written to w.
h.HandleHTTPError(ctx, err, w)
return
}
h.Logger.Info("error writing response to client",
zap.String("org", o.Name),
zap.String("handler", "influxql"),
zap.Error(err),
)
}
}
// getAuthorization extracts authorization information from a context.Context.
// It guards against non influxdb.Authorization values for authorization and
// InfluxQL feature flag not enabled.
func getAuthorization(ctx context.Context) (*influxdb.Authorization, error) {
authorizer, err := pcontext.GetAuthorizer(ctx)
if err != nil {
return nil, err
}
a, ok := authorizer.(*influxdb.Authorization)
if !ok {
return nil, &influxdb.Error{
Code: influxdb.EForbidden,
Msg: "insufficient permissions; session not supported",
}
}
return a, nil
}

View File

@ -0,0 +1,265 @@
package http
import (
"context"
"io"
"net/http"
"net/http/httptest"
"testing"
"github.com/google/go-cmp/cmp"
platform "github.com/influxdata/influxdb/v2"
pcontext "github.com/influxdata/influxdb/v2/context"
"github.com/influxdata/influxdb/v2/influxql"
imock "github.com/influxdata/influxdb/v2/influxql/mock"
kithttp "github.com/influxdata/influxdb/v2/kit/transport/http"
"github.com/influxdata/influxdb/v2/mock"
)
var cmpOpts = []cmp.Option{
// Ignore request ID when comparing headers.
cmp.Comparer(func(h1, h2 http.Header) bool {
for k, v1 := range h1 {
if k == "X-Request-Id" || k == "Request-Id" {
continue
}
if v2, ok := h2[k]; !ok || !cmp.Equal(v1, v2) {
return false
}
}
for k, v2 := range h2 {
if k == "X-Request-Id" || k == "Request-Id" {
continue
}
if v1, ok := h1[k]; !ok || !cmp.Equal(v2, v1) {
return false
}
}
return true
}),
}
func TestInfluxQLdHandler_HandleQuery(t *testing.T) {
t.Skip("almost good to go, only unexpected content types")
ctx := context.Background()
type fields struct {
OrganizationService platform.OrganizationService
ProxyQueryService influxql.ProxyQueryService
}
type args struct {
w *httptest.ResponseRecorder
r *http.Request
}
tests := []struct {
name string
fields fields
args args
context context.Context
wantCode int
wantHeader http.Header
wantBody []byte
wantLogs []string
}{
{
name: "no token causes http error",
args: args{
r: httptest.NewRequest("POST", "/query", nil).WithContext(ctx),
w: httptest.NewRecorder(),
},
wantCode: http.StatusInternalServerError,
wantHeader: http.Header{
"X-Platform-Error-Code": {"internal error"},
"Content-Type": {"application/json; charset=utf-8"},
},
wantBody: []byte(`{"code":"internal error","message":"authorizer not found on context"}`),
},
{
name: "inactive authorizer",
context: pcontext.SetAuthorizer(ctx, &platform.Authorization{Status: platform.Inactive}),
args: args{
r: httptest.NewRequest("POST", "/query", nil).WithContext(ctx),
w: httptest.NewRecorder(),
},
wantCode: http.StatusForbidden,
wantHeader: http.Header{
"Content-Type": {"application/json; charset=utf-8"},
"X-Platform-Error-Code": {"forbidden"},
},
wantBody: []byte(`{"code":"forbidden","message":"insufficient permissions"}`),
},
{
name: "unknown organization",
context: pcontext.SetAuthorizer(ctx, &platform.Authorization{Status: platform.Active}),
fields: fields{
OrganizationService: &mock.OrganizationService{
FindOrganizationF: func(ctx context.Context, filter platform.OrganizationFilter) (*platform.Organization, error) {
return nil, &platform.Error{
Code: platform.EForbidden,
Msg: "nope",
}
},
},
},
args: args{
r: httptest.NewRequest("POST", "/query", nil).WithContext(ctx),
w: httptest.NewRecorder(),
},
wantCode: http.StatusForbidden,
wantHeader: http.Header{
"Content-Type": {"application/json; charset=utf-8"},
"X-Platform-Error-Code": {"forbidden"},
},
wantBody: []byte(`{"code":"forbidden","message":"nope"}`),
},
{
name: "bad query",
context: pcontext.SetAuthorizer(ctx, &platform.Authorization{Status: platform.Active}),
fields: fields{
OrganizationService: &mock.OrganizationService{
FindOrganizationF: func(ctx context.Context, filter platform.OrganizationFilter) (*platform.Organization, error) {
return &platform.Organization{}, nil
},
},
ProxyQueryService: &imock.ProxyQueryService{
QueryF: func(ctx context.Context, w io.Writer, req *influxql.QueryRequest) (influxql.Statistics, error) {
return influxql.Statistics{}, &platform.Error{
Code: platform.EUnprocessableEntity,
Msg: "bad query",
}
},
},
},
args: args{
r: httptest.NewRequest("POST", "/query", nil).WithContext(ctx),
w: httptest.NewRecorder(),
},
wantCode: http.StatusUnprocessableEntity,
wantHeader: http.Header{
"X-Platform-Error-Code": {"unprocessable entity"},
"Content-Type": {"application/json; charset=utf-8"},
},
wantBody: []byte(`{"code":"unprocessable entity","message":"bad query"}`),
},
{
name: "query fails during write",
context: pcontext.SetAuthorizer(ctx, &platform.Authorization{Status: platform.Active}),
fields: fields{
OrganizationService: &mock.OrganizationService{
FindOrganizationF: func(ctx context.Context, filter platform.OrganizationFilter) (*platform.Organization, error) {
return &platform.Organization{}, nil
},
},
ProxyQueryService: &imock.ProxyQueryService{
QueryF: func(ctx context.Context, w io.Writer, req *influxql.QueryRequest) (influxql.Statistics, error) {
_, _ = io.WriteString(w, "fail")
return influxql.Statistics{}, &platform.Error{
Code: platform.EInternal,
Msg: "during query",
}
},
},
},
args: args{
r: httptest.NewRequest("POST", "/query", nil).WithContext(ctx),
w: httptest.NewRecorder(),
},
wantBody: []byte("fail"),
wantCode: http.StatusOK,
wantHeader: http.Header{
"Content-Type": {"application/json"},
},
},
{
name: "good query unknown accept header",
context: pcontext.SetAuthorizer(ctx, &platform.Authorization{Status: platform.Active}),
fields: fields{
OrganizationService: &mock.OrganizationService{
FindOrganizationF: func(ctx context.Context, filter platform.OrganizationFilter) (*platform.Organization, error) {
return &platform.Organization{}, nil
},
},
ProxyQueryService: &imock.ProxyQueryService{
QueryF: func(ctx context.Context, w io.Writer, req *influxql.QueryRequest) (influxql.Statistics, error) {
_, err := io.WriteString(w, "good")
return influxql.Statistics{}, err
},
},
},
args: args{
r: WithHeader(httptest.NewRequest("POST", "/query", nil).WithContext(ctx), "Accept", "text/csv"),
w: httptest.NewRecorder(),
},
wantBody: []byte("good"),
wantCode: http.StatusOK,
wantHeader: http.Header{
"Content-Type": {"text/csv"},
},
wantLogs: []string{"text/csv"},
},
{
name: "good query",
context: pcontext.SetAuthorizer(ctx, &platform.Authorization{Status: platform.Active}),
fields: fields{
OrganizationService: &mock.OrganizationService{
FindOrganizationF: func(ctx context.Context, filter platform.OrganizationFilter) (*platform.Organization, error) {
return &platform.Organization{}, nil
},
},
ProxyQueryService: &imock.ProxyQueryService{
QueryF: func(ctx context.Context, w io.Writer, req *influxql.QueryRequest) (influxql.Statistics, error) {
_, err := io.WriteString(w, "good")
return influxql.Statistics{}, err
},
},
},
args: args{
r: httptest.NewRequest("POST", "/query", nil).WithContext(ctx),
w: httptest.NewRecorder(),
},
wantBody: []byte("good"),
wantCode: http.StatusOK,
wantHeader: http.Header{
"Content-Type": {"application/json"},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
b := &InfluxQLBackend{
HTTPErrorHandler: kithttp.ErrorHandler(0),
OrganizationService: tt.fields.OrganizationService,
InfluxqldQueryService: tt.fields.ProxyQueryService,
}
h := NewInfluxQLHandler(b, LegacyHandlerConfig{})
if tt.context != nil {
tt.args.r = tt.args.r.WithContext(tt.context)
}
tt.args.r.Header.Add("Content-Type", "application/vnd.influxql")
h.handleInfluxqldQuery(tt.args.w, tt.args.r)
if got, want := tt.args.w.Code, tt.wantCode; got != want {
t.Errorf("HandleQuery() status code = got %d / want %d", got, want)
}
if got, want := tt.args.w.Result().Header, tt.wantHeader; !cmp.Equal(got, want, cmpOpts...) {
t.Errorf("HandleQuery() headers = got(-)/want(+) %s", cmp.Diff(got, want))
}
if got, want := tt.args.w.Body.Bytes(), tt.wantBody; !cmp.Equal(got, want) {
t.Errorf("HandleQuery() body = got(-)/want(+) %s", cmp.Diff(string(got), string(want)))
}
})
}
}
func WithHeader(r *http.Request, key, value string) *http.Request {
r.Header.Set(key, value)
return r
}

119
http/legacy.go Normal file
View File

@ -0,0 +1,119 @@
package http
import (
"net/http"
"github.com/influxdata/influxdb/v2"
"github.com/influxdata/influxdb/v2/http/metric"
"github.com/influxdata/influxdb/v2/influxql"
"github.com/influxdata/influxdb/v2/kit/cli"
"github.com/influxdata/influxdb/v2/query"
"github.com/influxdata/influxdb/v2/storage"
"github.com/prometheus/client_golang/prometheus"
"go.uber.org/zap"
)
// LegacyHandler is a collection of all the service handlers.
type LegacyHandler struct {
influxdb.HTTPErrorHandler
PointsWriterHandler *WriteHandler
PingHandler *PingHandler
InfluxQLHandler *InfluxqlHandler
}
type LegacyBackend struct {
influxdb.HTTPErrorHandler
Logger *zap.Logger
MaxBatchSizeBytes int64
WriteEventRecorder metric.EventRecorder
AuthorizationService influxdb.AuthorizationService
OrganizationService influxdb.OrganizationService
BucketService influxdb.BucketService
PointsWriter storage.PointsWriter
DBRPMappingServiceV2 influxdb.DBRPMappingServiceV2
ProxyQueryService query.ProxyQueryService
InfluxqldQueryService influxql.ProxyQueryService
}
// NewLegacyBackend constructs a legacy backend from an api backend.
func NewLegacyBackend(b *APIBackend) *LegacyBackend {
return &LegacyBackend{
HTTPErrorHandler: b.HTTPErrorHandler,
Logger: b.Logger,
// TODO(sgc): /write support
//MaxBatchSizeBytes: b.APIBackend.MaxBatchSizeBytes,
AuthorizationService: b.AuthorizationService,
OrganizationService: b.OrganizationService,
BucketService: b.BucketService,
PointsWriter: b.PointsWriter,
DBRPMappingServiceV2: b.DBRPService,
ProxyQueryService: b.InfluxQLService,
InfluxqldQueryService: b.InfluxqldService,
WriteEventRecorder: b.WriteEventRecorder,
}
}
// LegacyHandlerConfig provides configuration for the legacy handler.
type LegacyHandlerConfig struct {
Version string
DefaultRoutingKey string
}
func NewLegacyHandlerConfig() *LegacyHandlerConfig {
return &LegacyHandlerConfig{}
}
// Opts returns the CLI options for use with kit/cli.
// Currently set values on c are provided as the defaults.
func (c *LegacyHandlerConfig) Opts() []cli.Opt {
return []cli.Opt{
{
DestP: &c.DefaultRoutingKey,
Flag: "influxql-default-routing-key",
Default: "defaultQueue",
Desc: "Default routing key for publishing new query requests",
},
}
}
// NewLegacyHandler constructs a legacy handler from a backend.
func NewLegacyHandler(b *LegacyBackend, config LegacyHandlerConfig) *LegacyHandler {
h := &LegacyHandler{
HTTPErrorHandler: b.HTTPErrorHandler,
}
//pointsWriterBackend := NewPointsWriterBackend(b)
//h.PointsWriterHandler = NewWriterHandler(pointsWriterBackend,
// WithMaxBatchSizeBytes(b.MaxBatchSizeBytes))
influxqlBackend := NewInfluxQLBackend(b)
// TODO(desa): what to do for auth here?
h.InfluxQLHandler = NewInfluxQLHandler(influxqlBackend, config)
h.PingHandler = NewPingHandler(config.Version)
return h
}
func (h *LegacyHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
if r.URL.Path == "/write" {
h.PointsWriterHandler.ServeHTTP(w, r)
return
}
if r.URL.Path == "/ping" {
h.PingHandler.ServeHTTP(w, r)
return
}
if r.URL.Path == "/query" {
h.InfluxQLHandler.ServeHTTP(w, r)
return
}
w.WriteHeader(http.StatusNotFound)
}
func (h *LegacyHandler) PrometheusCollectors() []prometheus.Collector {
return h.InfluxQLHandler.PrometheusCollectors()
}

View File

@ -0,0 +1,144 @@
// Code generated by MockGen. DO NOT EDIT.
// Source: github.com/influxdata/influxdb/v2 (interfaces: BucketService)
// Package mocks is a generated GoMock package.
package mocks
import (
context "context"
gomock "github.com/golang/mock/gomock"
influxdb "github.com/influxdata/influxdb/v2"
reflect "reflect"
)
// MockBucketService is a mock of BucketService interface
type MockBucketService struct {
ctrl *gomock.Controller
recorder *MockBucketServiceMockRecorder
}
// MockBucketServiceMockRecorder is the mock recorder for MockBucketService
type MockBucketServiceMockRecorder struct {
mock *MockBucketService
}
// NewMockBucketService creates a new mock instance
func NewMockBucketService(ctrl *gomock.Controller) *MockBucketService {
mock := &MockBucketService{ctrl: ctrl}
mock.recorder = &MockBucketServiceMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use
func (m *MockBucketService) EXPECT() *MockBucketServiceMockRecorder {
return m.recorder
}
// CreateBucket mocks base method
func (m *MockBucketService) CreateBucket(arg0 context.Context, arg1 *influxdb.Bucket) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "CreateBucket", arg0, arg1)
ret0, _ := ret[0].(error)
return ret0
}
// CreateBucket indicates an expected call of CreateBucket
func (mr *MockBucketServiceMockRecorder) CreateBucket(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateBucket", reflect.TypeOf((*MockBucketService)(nil).CreateBucket), arg0, arg1)
}
// DeleteBucket mocks base method
func (m *MockBucketService) DeleteBucket(arg0 context.Context, arg1 influxdb.ID) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "DeleteBucket", arg0, arg1)
ret0, _ := ret[0].(error)
return ret0
}
// DeleteBucket indicates an expected call of DeleteBucket
func (mr *MockBucketServiceMockRecorder) DeleteBucket(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucket", reflect.TypeOf((*MockBucketService)(nil).DeleteBucket), arg0, arg1)
}
// FindBucket mocks base method
func (m *MockBucketService) FindBucket(arg0 context.Context, arg1 influxdb.BucketFilter) (*influxdb.Bucket, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "FindBucket", arg0, arg1)
ret0, _ := ret[0].(*influxdb.Bucket)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// FindBucket indicates an expected call of FindBucket
func (mr *MockBucketServiceMockRecorder) FindBucket(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FindBucket", reflect.TypeOf((*MockBucketService)(nil).FindBucket), arg0, arg1)
}
// FindBucketByID mocks base method
func (m *MockBucketService) FindBucketByID(arg0 context.Context, arg1 influxdb.ID) (*influxdb.Bucket, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "FindBucketByID", arg0, arg1)
ret0, _ := ret[0].(*influxdb.Bucket)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// FindBucketByID indicates an expected call of FindBucketByID
func (mr *MockBucketServiceMockRecorder) FindBucketByID(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FindBucketByID", reflect.TypeOf((*MockBucketService)(nil).FindBucketByID), arg0, arg1)
}
// FindBucketByName mocks base method
func (m *MockBucketService) FindBucketByName(arg0 context.Context, arg1 influxdb.ID, arg2 string) (*influxdb.Bucket, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "FindBucketByName", arg0, arg1, arg2)
ret0, _ := ret[0].(*influxdb.Bucket)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// FindBucketByName indicates an expected call of FindBucketByName
func (mr *MockBucketServiceMockRecorder) FindBucketByName(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FindBucketByName", reflect.TypeOf((*MockBucketService)(nil).FindBucketByName), arg0, arg1, arg2)
}
// FindBuckets mocks base method
func (m *MockBucketService) FindBuckets(arg0 context.Context, arg1 influxdb.BucketFilter, arg2 ...influxdb.FindOptions) ([]*influxdb.Bucket, int, error) {
m.ctrl.T.Helper()
varargs := []interface{}{arg0, arg1}
for _, a := range arg2 {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "FindBuckets", varargs...)
ret0, _ := ret[0].([]*influxdb.Bucket)
ret1, _ := ret[1].(int)
ret2, _ := ret[2].(error)
return ret0, ret1, ret2
}
// FindBuckets indicates an expected call of FindBuckets
func (mr *MockBucketServiceMockRecorder) FindBuckets(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{arg0, arg1}, arg2...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FindBuckets", reflect.TypeOf((*MockBucketService)(nil).FindBuckets), varargs...)
}
// UpdateBucket mocks base method
func (m *MockBucketService) UpdateBucket(arg0 context.Context, arg1 influxdb.ID, arg2 influxdb.BucketUpdate) (*influxdb.Bucket, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "UpdateBucket", arg0, arg1, arg2)
ret0, _ := ret[0].(*influxdb.Bucket)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// UpdateBucket indicates an expected call of UpdateBucket
func (mr *MockBucketServiceMockRecorder) UpdateBucket(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateBucket", reflect.TypeOf((*MockBucketService)(nil).UpdateBucket), arg0, arg1, arg2)
}

View File

@ -0,0 +1,114 @@
// Code generated by MockGen. DO NOT EDIT.
// Source: github.com/influxdata/influxdb/v2 (interfaces: DBRPMappingService)
// Package mocks is a generated GoMock package.
package mocks
import (
context "context"
gomock "github.com/golang/mock/gomock"
influxdb "github.com/influxdata/influxdb/v2"
reflect "reflect"
)
// MockDBRPMappingService is a mock of DBRPMappingService interface
type MockDBRPMappingService struct {
ctrl *gomock.Controller
recorder *MockDBRPMappingServiceMockRecorder
}
// MockDBRPMappingServiceMockRecorder is the mock recorder for MockDBRPMappingService
type MockDBRPMappingServiceMockRecorder struct {
mock *MockDBRPMappingService
}
// NewMockDBRPMappingService creates a new mock instance
func NewMockDBRPMappingService(ctrl *gomock.Controller) *MockDBRPMappingService {
mock := &MockDBRPMappingService{ctrl: ctrl}
mock.recorder = &MockDBRPMappingServiceMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use
func (m *MockDBRPMappingService) EXPECT() *MockDBRPMappingServiceMockRecorder {
return m.recorder
}
// Create mocks base method
func (m *MockDBRPMappingService) Create(arg0 context.Context, arg1 *influxdb.DBRPMapping) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Create", arg0, arg1)
ret0, _ := ret[0].(error)
return ret0
}
// Create indicates an expected call of Create
func (mr *MockDBRPMappingServiceMockRecorder) Create(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Create", reflect.TypeOf((*MockDBRPMappingService)(nil).Create), arg0, arg1)
}
// Delete mocks base method
func (m *MockDBRPMappingService) Delete(arg0 context.Context, arg1, arg2, arg3 string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Delete", arg0, arg1, arg2, arg3)
ret0, _ := ret[0].(error)
return ret0
}
// Delete indicates an expected call of Delete
func (mr *MockDBRPMappingServiceMockRecorder) Delete(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockDBRPMappingService)(nil).Delete), arg0, arg1, arg2, arg3)
}
// Find mocks base method
func (m *MockDBRPMappingService) Find(arg0 context.Context, arg1 influxdb.DBRPMappingFilter) (*influxdb.DBRPMapping, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Find", arg0, arg1)
ret0, _ := ret[0].(*influxdb.DBRPMapping)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Find indicates an expected call of Find
func (mr *MockDBRPMappingServiceMockRecorder) Find(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Find", reflect.TypeOf((*MockDBRPMappingService)(nil).Find), arg0, arg1)
}
// FindBy mocks base method
func (m *MockDBRPMappingService) FindBy(arg0 context.Context, arg1, arg2, arg3 string) (*influxdb.DBRPMapping, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "FindBy", arg0, arg1, arg2, arg3)
ret0, _ := ret[0].(*influxdb.DBRPMapping)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// FindBy indicates an expected call of FindBy
func (mr *MockDBRPMappingServiceMockRecorder) FindBy(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FindBy", reflect.TypeOf((*MockDBRPMappingService)(nil).FindBy), arg0, arg1, arg2, arg3)
}
// FindMany mocks base method
func (m *MockDBRPMappingService) FindMany(arg0 context.Context, arg1 influxdb.DBRPMappingFilter, arg2 ...influxdb.FindOptions) ([]*influxdb.DBRPMapping, int, error) {
m.ctrl.T.Helper()
varargs := []interface{}{arg0, arg1}
for _, a := range arg2 {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "FindMany", varargs...)
ret0, _ := ret[0].([]*influxdb.DBRPMapping)
ret1, _ := ret[1].(int)
ret2, _ := ret[2].(error)
return ret0, ret1, ret2
}
// FindMany indicates an expected call of FindMany
func (mr *MockDBRPMappingServiceMockRecorder) FindMany(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{arg0, arg1}, arg2...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FindMany", reflect.TypeOf((*MockDBRPMappingService)(nil).FindMany), varargs...)
}

View File

@ -0,0 +1,113 @@
// Code generated by MockGen. DO NOT EDIT.
// Source: github.com/influxdata/influxdb/v2 (interfaces: DBRPMappingServiceV2)
// Package mocks is a generated GoMock package.
package mocks
import (
context "context"
gomock "github.com/golang/mock/gomock"
influxdb "github.com/influxdata/influxdb/v2"
reflect "reflect"
)
// MockDBRPMappingServiceV2 is a mock of DBRPMappingServiceV2 interface
type MockDBRPMappingServiceV2 struct {
ctrl *gomock.Controller
recorder *MockDBRPMappingServiceV2MockRecorder
}
// MockDBRPMappingServiceV2MockRecorder is the mock recorder for MockDBRPMappingServiceV2
type MockDBRPMappingServiceV2MockRecorder struct {
mock *MockDBRPMappingServiceV2
}
// NewMockDBRPMappingServiceV2 creates a new mock instance
func NewMockDBRPMappingServiceV2(ctrl *gomock.Controller) *MockDBRPMappingServiceV2 {
mock := &MockDBRPMappingServiceV2{ctrl: ctrl}
mock.recorder = &MockDBRPMappingServiceV2MockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use
func (m *MockDBRPMappingServiceV2) EXPECT() *MockDBRPMappingServiceV2MockRecorder {
return m.recorder
}
// Create mocks base method
func (m *MockDBRPMappingServiceV2) Create(arg0 context.Context, arg1 *influxdb.DBRPMappingV2) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Create", arg0, arg1)
ret0, _ := ret[0].(error)
return ret0
}
// Create indicates an expected call of Create
func (mr *MockDBRPMappingServiceV2MockRecorder) Create(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Create", reflect.TypeOf((*MockDBRPMappingServiceV2)(nil).Create), arg0, arg1)
}
// Delete mocks base method
func (m *MockDBRPMappingServiceV2) Delete(arg0 context.Context, arg1, arg2 influxdb.ID) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Delete", arg0, arg1, arg2)
ret0, _ := ret[0].(error)
return ret0
}
// Delete indicates an expected call of Delete
func (mr *MockDBRPMappingServiceV2MockRecorder) Delete(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockDBRPMappingServiceV2)(nil).Delete), arg0, arg1, arg2)
}
// FindByID mocks base method
func (m *MockDBRPMappingServiceV2) FindByID(arg0 context.Context, arg1, arg2 influxdb.ID) (*influxdb.DBRPMappingV2, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "FindByID", arg0, arg1, arg2)
ret0, _ := ret[0].(*influxdb.DBRPMappingV2)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// FindByID indicates an expected call of FindByID
func (mr *MockDBRPMappingServiceV2MockRecorder) FindByID(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FindByID", reflect.TypeOf((*MockDBRPMappingServiceV2)(nil).FindByID), arg0, arg1, arg2)
}
// FindMany mocks base method
func (m *MockDBRPMappingServiceV2) FindMany(arg0 context.Context, arg1 influxdb.DBRPMappingFilterV2, arg2 ...influxdb.FindOptions) ([]*influxdb.DBRPMappingV2, int, error) {
m.ctrl.T.Helper()
varargs := []interface{}{arg0, arg1}
for _, a := range arg2 {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "FindMany", varargs...)
ret0, _ := ret[0].([]*influxdb.DBRPMappingV2)
ret1, _ := ret[1].(int)
ret2, _ := ret[2].(error)
return ret0, ret1, ret2
}
// FindMany indicates an expected call of FindMany
func (mr *MockDBRPMappingServiceV2MockRecorder) FindMany(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{arg0, arg1}, arg2...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FindMany", reflect.TypeOf((*MockDBRPMappingServiceV2)(nil).FindMany), varargs...)
}
// Update mocks base method
func (m *MockDBRPMappingServiceV2) Update(arg0 context.Context, arg1 *influxdb.DBRPMappingV2) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Update", arg0, arg1)
ret0, _ := ret[0].(error)
return ret0
}
// Update indicates an expected call of Update
func (mr *MockDBRPMappingServiceV2MockRecorder) Update(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Update", reflect.TypeOf((*MockDBRPMappingServiceV2)(nil).Update), arg0, arg1)
}

View File

@ -0,0 +1,47 @@
// Code generated by MockGen. DO NOT EDIT.
// Source: github.com/influxdata/influxdb/v2/http/metric (interfaces: EventRecorder)
// Package mocks is a generated GoMock package.
package mocks
import (
context "context"
gomock "github.com/golang/mock/gomock"
metric "github.com/influxdata/influxdb/v2/http/metric"
reflect "reflect"
)
// MockEventRecorder is a mock of EventRecorder interface
type MockEventRecorder struct {
ctrl *gomock.Controller
recorder *MockEventRecorderMockRecorder
}
// MockEventRecorderMockRecorder is the mock recorder for MockEventRecorder
type MockEventRecorderMockRecorder struct {
mock *MockEventRecorder
}
// NewMockEventRecorder creates a new mock instance
func NewMockEventRecorder(ctrl *gomock.Controller) *MockEventRecorder {
mock := &MockEventRecorder{ctrl: ctrl}
mock.recorder = &MockEventRecorderMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use
func (m *MockEventRecorder) EXPECT() *MockEventRecorderMockRecorder {
return m.recorder
}
// Record mocks base method
func (m *MockEventRecorder) Record(arg0 context.Context, arg1 metric.Event) {
m.ctrl.T.Helper()
m.ctrl.Call(m, "Record", arg0, arg1)
}
// Record indicates an expected call of Record
func (mr *MockEventRecorderMockRecorder) Record(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Record", reflect.TypeOf((*MockEventRecorder)(nil).Record), arg0, arg1)
}

View File

@ -0,0 +1,129 @@
// Code generated by MockGen. DO NOT EDIT.
// Source: github.com/influxdata/influxdb/v2 (interfaces: OrganizationService)
// Package mocks is a generated GoMock package.
package mocks
import (
context "context"
gomock "github.com/golang/mock/gomock"
influxdb "github.com/influxdata/influxdb/v2"
reflect "reflect"
)
// MockOrganizationService is a mock of OrganizationService interface
type MockOrganizationService struct {
ctrl *gomock.Controller
recorder *MockOrganizationServiceMockRecorder
}
// MockOrganizationServiceMockRecorder is the mock recorder for MockOrganizationService
type MockOrganizationServiceMockRecorder struct {
mock *MockOrganizationService
}
// NewMockOrganizationService creates a new mock instance
func NewMockOrganizationService(ctrl *gomock.Controller) *MockOrganizationService {
mock := &MockOrganizationService{ctrl: ctrl}
mock.recorder = &MockOrganizationServiceMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use
func (m *MockOrganizationService) EXPECT() *MockOrganizationServiceMockRecorder {
return m.recorder
}
// CreateOrganization mocks base method
func (m *MockOrganizationService) CreateOrganization(arg0 context.Context, arg1 *influxdb.Organization) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "CreateOrganization", arg0, arg1)
ret0, _ := ret[0].(error)
return ret0
}
// CreateOrganization indicates an expected call of CreateOrganization
func (mr *MockOrganizationServiceMockRecorder) CreateOrganization(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateOrganization", reflect.TypeOf((*MockOrganizationService)(nil).CreateOrganization), arg0, arg1)
}
// DeleteOrganization mocks base method
func (m *MockOrganizationService) DeleteOrganization(arg0 context.Context, arg1 influxdb.ID) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "DeleteOrganization", arg0, arg1)
ret0, _ := ret[0].(error)
return ret0
}
// DeleteOrganization indicates an expected call of DeleteOrganization
func (mr *MockOrganizationServiceMockRecorder) DeleteOrganization(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteOrganization", reflect.TypeOf((*MockOrganizationService)(nil).DeleteOrganization), arg0, arg1)
}
// FindOrganization mocks base method
func (m *MockOrganizationService) FindOrganization(arg0 context.Context, arg1 influxdb.OrganizationFilter) (*influxdb.Organization, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "FindOrganization", arg0, arg1)
ret0, _ := ret[0].(*influxdb.Organization)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// FindOrganization indicates an expected call of FindOrganization
func (mr *MockOrganizationServiceMockRecorder) FindOrganization(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FindOrganization", reflect.TypeOf((*MockOrganizationService)(nil).FindOrganization), arg0, arg1)
}
// FindOrganizationByID mocks base method
func (m *MockOrganizationService) FindOrganizationByID(arg0 context.Context, arg1 influxdb.ID) (*influxdb.Organization, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "FindOrganizationByID", arg0, arg1)
ret0, _ := ret[0].(*influxdb.Organization)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// FindOrganizationByID indicates an expected call of FindOrganizationByID
func (mr *MockOrganizationServiceMockRecorder) FindOrganizationByID(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FindOrganizationByID", reflect.TypeOf((*MockOrganizationService)(nil).FindOrganizationByID), arg0, arg1)
}
// FindOrganizations mocks base method
func (m *MockOrganizationService) FindOrganizations(arg0 context.Context, arg1 influxdb.OrganizationFilter, arg2 ...influxdb.FindOptions) ([]*influxdb.Organization, int, error) {
m.ctrl.T.Helper()
varargs := []interface{}{arg0, arg1}
for _, a := range arg2 {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "FindOrganizations", varargs...)
ret0, _ := ret[0].([]*influxdb.Organization)
ret1, _ := ret[1].(int)
ret2, _ := ret[2].(error)
return ret0, ret1, ret2
}
// FindOrganizations indicates an expected call of FindOrganizations
func (mr *MockOrganizationServiceMockRecorder) FindOrganizations(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{arg0, arg1}, arg2...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FindOrganizations", reflect.TypeOf((*MockOrganizationService)(nil).FindOrganizations), varargs...)
}
// UpdateOrganization mocks base method
func (m *MockOrganizationService) UpdateOrganization(arg0 context.Context, arg1 influxdb.ID, arg2 influxdb.OrganizationUpdate) (*influxdb.Organization, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "UpdateOrganization", arg0, arg1, arg2)
ret0, _ := ret[0].(*influxdb.Organization)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// UpdateOrganization indicates an expected call of UpdateOrganization
func (mr *MockOrganizationServiceMockRecorder) UpdateOrganization(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateOrganization", reflect.TypeOf((*MockOrganizationService)(nil).UpdateOrganization), arg0, arg1, arg2)
}

View File

@ -0,0 +1,49 @@
// Code generated by MockGen. DO NOT EDIT.
// Source: github.com/influxdata/influxdb/v2/storage (interfaces: PointsWriter)
// Package mocks is a generated GoMock package.
package mocks
import (
context "context"
gomock "github.com/golang/mock/gomock"
models "github.com/influxdata/influxdb/v2/models"
reflect "reflect"
)
// MockPointsWriter is a mock of PointsWriter interface
type MockPointsWriter struct {
ctrl *gomock.Controller
recorder *MockPointsWriterMockRecorder
}
// MockPointsWriterMockRecorder is the mock recorder for MockPointsWriter
type MockPointsWriterMockRecorder struct {
mock *MockPointsWriter
}
// NewMockPointsWriter creates a new mock instance
func NewMockPointsWriter(ctrl *gomock.Controller) *MockPointsWriter {
mock := &MockPointsWriter{ctrl: ctrl}
mock.recorder = &MockPointsWriterMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use
func (m *MockPointsWriter) EXPECT() *MockPointsWriterMockRecorder {
return m.recorder
}
// WritePoints mocks base method
func (m *MockPointsWriter) WritePoints(arg0 context.Context, arg1 []models.Point) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "WritePoints", arg0, arg1)
ret0, _ := ret[0].(error)
return ret0
}
// WritePoints indicates an expected call of WritePoints
func (mr *MockPointsWriterMockRecorder) WritePoints(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WritePoints", reflect.TypeOf((*MockPointsWriter)(nil).WritePoints), arg0, arg1)
}

30
http/ping_handler.go Normal file
View File

@ -0,0 +1,30 @@
package http
import (
"net/http"
"github.com/influxdata/httprouter"
)
type PingHandler struct {
*httprouter.Router
InfluxDBVersion string
}
func NewPingHandler(version string) *PingHandler {
h := &PingHandler{
Router: httprouter.New(),
InfluxDBVersion: version,
}
h.HandlerFunc("GET", "/ping", h.pingHandler)
h.HandlerFunc("HEAD", "/ping", h.pingHandler)
return h
}
// handlePostLegacyWrite is the HTTP handler for the POST /write route.
func (h *PingHandler) pingHandler(w http.ResponseWriter, r *http.Request) {
w.Header().Add("X-Influxdb-Build", "cloud2")
w.Header().Add("X-Influxdb-Version", h.InfluxDBVersion)
w.WriteHeader(http.StatusNoContent)
}

View File

@ -10,9 +10,10 @@ import (
// PlatformHandler is a collection of all the service handlers.
type PlatformHandler struct {
AssetHandler *AssetHandler
DocsHandler http.HandlerFunc
APIHandler http.Handler
AssetHandler *AssetHandler
DocsHandler http.HandlerFunc
APIHandler http.Handler
LegacyHandler http.Handler
}
// NewPlatformHandler returns a platform handler that serves the API and associated assets.
@ -37,15 +38,28 @@ func NewPlatformHandler(b *APIBackend, opts ...APIHandlerOptFn) *PlatformHandler
wrappedHandler := kithttp.SetCORS(h)
wrappedHandler = kithttp.SkipOptions(wrappedHandler)
legacyBackend := NewLegacyBackend(b)
lh := NewLegacyHandler(legacyBackend, LegacyHandlerConfig{})
return &PlatformHandler{
AssetHandler: assetHandler,
DocsHandler: Redoc("/api/v2/swagger.json"),
APIHandler: wrappedHandler,
LegacyHandler: NewInflux1xAuthenticationHandler(lh, b.AuthorizationService, b.UserService, b.HTTPErrorHandler),
}
}
// ServeHTTP delegates a request to the appropriate subhandler.
func (h *PlatformHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
// TODO(affo): change this to be mounted prefixes: https://github.com/influxdata/idpe/issues/6689.
if r.URL.Path == "/v1/write" ||
r.URL.Path == "/write" ||
r.URL.Path == "/query" ||
r.URL.Path == "/ping" {
h.LegacyHandler.ServeHTTP(w, r)
return
}
if strings.HasPrefix(r.URL.Path, "/docs") {
h.DocsHandler.ServeHTTP(w, r)
return

View File

@ -0,0 +1,70 @@
package control
import (
"github.com/prometheus/client_golang/prometheus"
)
// controllerMetrics holds metrics related to the query controller.
type ControllerMetrics struct {
Requests *prometheus.CounterVec
NotImplemented *prometheus.CounterVec
RequestsLatency *prometheus.HistogramVec
ExecutingDuration *prometheus.HistogramVec
}
const (
LabelSuccess = "success"
LabelGenericError = "generic_err"
LabelParseErr = "parse_err"
LabelInterruptedErr = "interrupt_err"
LabelRuntimeError = "runtime_error"
LabelNotImplError = "not_implemented"
LabelNotExecuted = "not_executed"
)
func NewControllerMetrics(labels []string) *ControllerMetrics {
const (
namespace = "influxql"
subsystem = "service"
)
return &ControllerMetrics{
Requests: prometheus.NewCounterVec(prometheus.CounterOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "requests_total",
Help: "Count of the query requests",
}, append(labels, "result")),
NotImplemented: prometheus.NewCounterVec(prometheus.CounterOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "not_implemented_total",
Help: "Count of the query requests executing unimplemented operations",
}, []string{"operation"}),
RequestsLatency: prometheus.NewHistogramVec(prometheus.HistogramOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "requests_latency_seconds",
Help: "Histogram of times spent for end-to-end latency (from issuing query request, to receiving the first byte of the response)",
Buckets: prometheus.ExponentialBuckets(1e-3, 5, 7),
}, append(labels, "result")),
ExecutingDuration: prometheus.NewHistogramVec(prometheus.HistogramOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "executing_duration_seconds",
Help: "Histogram of times spent executing queries",
Buckets: prometheus.ExponentialBuckets(1e-3, 5, 7),
}, append(labels, "result")),
}
}
func (cm *ControllerMetrics) PrometheusCollectors() []prometheus.Collector {
return []prometheus.Collector{
cm.Requests,
cm.NotImplemented,
cm.ExecutingDuration,
}
}

15
influxql/errors.go Normal file
View File

@ -0,0 +1,15 @@
package influxql
// NotImplementedError is returned when a specific operation is unavailable.
type NotImplementedError struct {
Op string // Op is the name of the unimplemented operation
}
func (e *NotImplementedError) Error() string {
return "not implemented: " + e.Op
}
// ErrNotImplemented creates a NotImplementedError specifying op is unavailable.
func ErrNotImplemented(op string) error {
return &NotImplementedError{Op: op}
}

View File

@ -0,0 +1,24 @@
package mock
import (
"context"
"io"
"github.com/influxdata/influxdb/v2/influxql"
"github.com/influxdata/influxdb/v2/kit/check"
)
var _ influxql.ProxyQueryService = (*ProxyQueryService)(nil)
// ProxyQueryService mocks the InfluxQL QueryService for testing.
type ProxyQueryService struct {
QueryF func(ctx context.Context, w io.Writer, req *influxql.QueryRequest) (influxql.Statistics, error)
}
func (s *ProxyQueryService) Query(ctx context.Context, w io.Writer, req *influxql.QueryRequest) (influxql.Statistics, error) {
return s.QueryF(ctx, w, req)
}
func (s *ProxyQueryService) Check(ctx context.Context) check.Response {
return check.Response{Name: "Mock InfluxQL Proxy Query Service", Status: check.StatusPass}
}

View File

@ -1,6 +1,7 @@
package query
import (
"context"
"errors"
"fmt"
"strings"
@ -19,7 +20,7 @@ type CompileOptions struct {
type Statement interface {
// Prepare prepares the statement by mapping shards and finishing the creation
// of the query plan.
Prepare(shardMapper ShardMapper, opt SelectOptions) (PreparedStatement, error)
Prepare(ctx context.Context, shardMapper ShardMapper, opt SelectOptions) (PreparedStatement, error)
}
// compiledStatement represents a select statement that has undergone some initial processing to
@ -1087,7 +1088,7 @@ func (c *compiledStatement) subquery(stmt *influxql.SelectStatement) error {
return subquery.compile(stmt)
}
func (c *compiledStatement) Prepare(shardMapper ShardMapper, sopt SelectOptions) (PreparedStatement, error) {
func (c *compiledStatement) Prepare(ctx context.Context, shardMapper ShardMapper, sopt SelectOptions) (PreparedStatement, error) {
// If this is a query with a grouping, there is a bucket limit, and the minimum time has not been specified,
// we need to limit the possible time range that can be used when mapping shards but not when actually executing
// the select statement. Determine the shard time range here.
@ -1144,13 +1145,13 @@ func (c *compiledStatement) Prepare(shardMapper ShardMapper, sopt SelectOptions)
}
// Create an iterator creator based on the shards in the cluster.
shards, err := shardMapper.MapShards(c.stmt.Sources, timeRange, sopt)
shards, err := shardMapper.MapShards(ctx, c.stmt.Sources, timeRange, sopt)
if err != nil {
return nil, err
}
// Rewrite wildcards, if any exist.
mapper := FieldMapper{FieldMapper: shards}
mapper := queryFieldMapper{FieldMapper: newFieldMapperAdapter(shards, ctx)}
stmt, err := c.stmt.RewriteFields(mapper)
if err != nil {
shards.Close()

View File

@ -1,6 +1,7 @@
package query_test
import (
"context"
"testing"
"github.com/influxdata/influxdb/v2/influxql/query"
@ -419,7 +420,7 @@ func TestPrepare_MapShardsTimeRange(t *testing.T) {
}
shardMapper := ShardMapper{
MapShardsFn: func(_ influxql.Sources, tr influxql.TimeRange) query.ShardGroup {
MapShardsFn: func(_ context.Context, _ influxql.Sources, tr influxql.TimeRange) query.ShardGroup {
if got, want := tr.Min, mustParseTime(tt.start); !got.Equal(want) {
t.Errorf("unexpected start time: got=%s want=%s", got, want)
}
@ -430,7 +431,7 @@ func TestPrepare_MapShardsTimeRange(t *testing.T) {
},
}
if _, err := c.Prepare(&shardMapper, query.SelectOptions{}); err != nil {
if _, err := c.Prepare(context.Background(), &shardMapper, query.SelectOptions{}); err != nil {
t.Fatalf("unexpected error: %s", err)
}
})

View File

@ -2,112 +2,33 @@ package query
import (
"context"
"sync"
iql "github.com/influxdata/influxdb/v2/influxql"
)
// ExecutionContext contains state that the query is currently executing with.
type ExecutionContext struct {
context.Context
// The statement ID of the executing query.
statementID int
// The query ID of the executing query.
QueryID uint64
// The query task information available to the StatementExecutor.
task *Task
// Output channel where results and errors should be sent.
Results chan *Result
// StatisticsGatherer gathers metrics about the execution of a query.
StatisticsGatherer *iql.StatisticsGatherer
// Options used to start this query.
ExecutionOptions
mu sync.RWMutex
done chan struct{}
err error
}
func (ctx *ExecutionContext) watch() {
ctx.done = make(chan struct{})
if ctx.err != nil {
close(ctx.done)
return
}
go func() {
defer close(ctx.done)
var taskCtx <-chan struct{}
if ctx.task != nil {
taskCtx = ctx.task.closing
}
select {
case <-taskCtx:
ctx.err = ctx.task.Error()
if ctx.err == nil {
ctx.err = ErrQueryInterrupted
}
case <-ctx.AbortCh:
ctx.err = ErrQueryAborted
case <-ctx.Context.Done():
ctx.err = ctx.Context.Err()
}
}()
}
func (ctx *ExecutionContext) Done() <-chan struct{} {
ctx.mu.RLock()
if ctx.done != nil {
defer ctx.mu.RUnlock()
return ctx.done
}
ctx.mu.RUnlock()
ctx.mu.Lock()
defer ctx.mu.Unlock()
if ctx.done == nil {
ctx.watch()
}
return ctx.done
}
func (ctx *ExecutionContext) Err() error {
ctx.mu.RLock()
defer ctx.mu.RUnlock()
return ctx.err
}
func (ctx *ExecutionContext) Value(key interface{}) interface{} {
switch key {
case monitorContextKey{}:
return ctx.task
}
return ctx.Context.Value(key)
}
// send sends a Result to the Results channel and will exit if the query has
// been aborted.
func (ctx *ExecutionContext) send(result *Result) error {
result.StatementID = ctx.statementID
select {
case <-ctx.AbortCh:
return ErrQueryAborted
case ctx.Results <- result:
}
return nil
}
// Send sends a Result to the Results channel and will exit if the query has
// been interrupted or aborted.
func (ctx *ExecutionContext) Send(result *Result) error {
result.StatementID = ctx.statementID
func (ectx *ExecutionContext) Send(ctx context.Context, result *Result) error {
result.StatementID = ectx.statementID
select {
case <-ctx.Done():
return ctx.Err()
case ctx.Results <- result:
case ectx.Results <- result:
}
return nil
}

View File

@ -7,12 +7,15 @@ import (
"os"
"runtime/debug"
"strconv"
"sync"
"sync/atomic"
"time"
"github.com/influxdata/influxdb/v2"
iql "github.com/influxdata/influxdb/v2/influxql"
"github.com/influxdata/influxdb/v2/influxql/control"
"github.com/influxdata/influxdb/v2/kit/tracing"
"github.com/influxdata/influxdb/v2/models"
"github.com/influxdata/influxql"
"github.com/opentracing/opentracing-go/log"
"go.uber.org/zap"
)
@ -26,29 +29,9 @@ var (
// ErrQueryInterrupted is an error returned when the query is interrupted.
ErrQueryInterrupted = errors.New("query interrupted")
// ErrQueryAborted is an error returned when the query is aborted.
ErrQueryAborted = errors.New("query aborted")
// ErrQueryEngineShutdown is an error sent when the query cannot be
// created because the query engine was shutdown.
ErrQueryEngineShutdown = errors.New("query engine shutdown")
// ErrQueryTimeoutLimitExceeded is an error when a query hits the max time allowed to run.
ErrQueryTimeoutLimitExceeded = errors.New("query-timeout limit exceeded")
// ErrAlreadyKilled is returned when attempting to kill a query that has already been killed.
ErrAlreadyKilled = errors.New("already killed")
)
// Statistics for the Executor
const (
statQueriesActive = "queriesActive" // Number of queries currently being executed.
statQueriesExecuted = "queriesExecuted" // Number of queries that have been executed (started).
statQueriesFinished = "queriesFinished" // Number of queries that have finished.
statQueryExecutionDuration = "queryDurationNs" // Total (wall) time spent executing queries.
statRecoveredPanics = "recoveredPanics" // Number of panics recovered by Query Executor.
// PanicCrashEnv is the environment variable that, when set, will prevent
// the handler from recovering any panics.
PanicCrashEnv = "INFLUXDB_PANIC_CRASH"
@ -119,6 +102,9 @@ func AuthorizerIsOpen(a Authorizer) bool {
// ExecutionOptions contains the options for executing a query.
type ExecutionOptions struct {
// OrgID is the organization for which this query is being executed.
OrgID influxdb.ID
// The database the query is running against.
Database string
@ -140,14 +126,10 @@ type ExecutionOptions struct {
// Quiet suppresses non-essential output from the query executor.
Quiet bool
// AbortCh is a channel that signals when results are no longer desired by the caller.
AbortCh <-chan struct{}
}
type (
iteratorsContextKey struct{}
monitorContextKey struct{}
)
// NewContextWithIterators returns a new context.Context with the *Iterators slice added.
@ -160,14 +142,24 @@ func NewContextWithIterators(ctx context.Context, itr *Iterators) context.Contex
type StatementExecutor interface {
// ExecuteStatement executes a statement. Results should be sent to the
// results channel in the ExecutionContext.
ExecuteStatement(stmt influxql.Statement, ctx *ExecutionContext) error
ExecuteStatement(ctx context.Context, stmt influxql.Statement, ectx *ExecutionContext) error
}
// StatementNormalizer normalizes a statement before it is executed.
type StatementNormalizer interface {
// NormalizeStatement adds a default database and policy to the
// measurements in the statement.
NormalizeStatement(stmt influxql.Statement, database, retentionPolicy string) error
NormalizeStatement(ctx context.Context, stmt influxql.Statement, database, retentionPolicy string, ectx *ExecutionContext) error
}
var (
nullNormalizer StatementNormalizer = &nullNormalizerImpl{}
)
type nullNormalizerImpl struct{}
func (n *nullNormalizerImpl) NormalizeStatement(ctx context.Context, stmt influxql.Statement, database, retentionPolicy string, ectx *ExecutionContext) error {
return nil
}
// Executor executes every statement in an Query.
@ -175,98 +167,62 @@ type Executor struct {
// Used for executing a statement in the query.
StatementExecutor StatementExecutor
// Used for tracking running queries.
TaskManager *TaskManager
// StatementNormalizer normalizes a statement before it is executed.
StatementNormalizer StatementNormalizer
// Logger to use for all logging.
// Defaults to discarding all log output.
Logger *zap.Logger
Metrics *control.ControllerMetrics
// expvar-based stats.
stats *Statistics
log *zap.Logger
}
// NewExecutor returns a new instance of Executor.
func NewExecutor() *Executor {
func NewExecutor(logger *zap.Logger, cm *control.ControllerMetrics) *Executor {
return &Executor{
TaskManager: NewTaskManager(),
Logger: zap.NewNop(),
stats: &Statistics{},
StatementNormalizer: nullNormalizer,
Metrics: cm,
log: logger.With(zap.String("service", "query")),
}
}
// Statistics keeps statistics related to the Executor.
type Statistics struct {
ActiveQueries int64
ExecutedQueries int64
FinishedQueries int64
QueryExecutionDuration int64
RecoveredPanics int64
}
// Statistics returns statistics for periodic monitoring.
func (e *Executor) Statistics(tags map[string]string) []models.Statistic {
return []models.Statistic{{
Name: "queryExecutor",
Tags: tags,
Values: map[string]interface{}{
statQueriesActive: atomic.LoadInt64(&e.stats.ActiveQueries),
statQueriesExecuted: atomic.LoadInt64(&e.stats.ExecutedQueries),
statQueriesFinished: atomic.LoadInt64(&e.stats.FinishedQueries),
statQueryExecutionDuration: atomic.LoadInt64(&e.stats.QueryExecutionDuration),
statRecoveredPanics: atomic.LoadInt64(&e.stats.RecoveredPanics),
},
}}
}
// Close kills all running queries and prevents new queries from being attached.
func (e *Executor) Close() error {
return e.TaskManager.Close()
}
// SetLogOutput sets the writer to which all logs are written. It must not be
// called after Open is called.
func (e *Executor) WithLogger(log *zap.Logger) {
e.Logger = log.With(zap.String("service", "query"))
e.TaskManager.Logger = e.Logger
return nil
}
// ExecuteQuery executes each statement within a query.
func (e *Executor) ExecuteQuery(query *influxql.Query, opt ExecutionOptions, closing chan struct{}) <-chan *Result {
func (e *Executor) ExecuteQuery(ctx context.Context, query *influxql.Query, opt ExecutionOptions) (<-chan *Result, *iql.Statistics) {
results := make(chan *Result)
go e.executeQuery(query, opt, closing, results)
return results
statistics := new(iql.Statistics)
go e.executeQuery(ctx, query, opt, results, statistics)
return results, statistics
}
func (e *Executor) executeQuery(query *influxql.Query, opt ExecutionOptions, closing <-chan struct{}, results chan *Result) {
defer close(results)
func (e *Executor) executeQuery(ctx context.Context, query *influxql.Query, opt ExecutionOptions, results chan *Result, statistics *iql.Statistics) {
span, ctx := tracing.StartSpanFromContext(ctx)
defer func() {
close(results)
span.Finish()
}()
defer e.recover(query, results)
atomic.AddInt64(&e.stats.ActiveQueries, 1)
atomic.AddInt64(&e.stats.ExecutedQueries, 1)
gatherer := new(iql.StatisticsGatherer)
statusLabel := control.LabelSuccess
defer func(start time.Time) {
atomic.AddInt64(&e.stats.ActiveQueries, -1)
atomic.AddInt64(&e.stats.FinishedQueries, 1)
atomic.AddInt64(&e.stats.QueryExecutionDuration, time.Since(start).Nanoseconds())
dur := time.Since(start)
e.Metrics.ExecutingDuration.WithLabelValues(statusLabel).Observe(dur.Seconds())
}(time.Now())
ctx, detach, err := e.TaskManager.AttachQuery(query, opt, closing)
if err != nil {
select {
case results <- &Result{Err: err}:
case <-opt.AbortCh:
}
return
}
defer detach()
ectx := &ExecutionContext{StatisticsGatherer: gatherer, ExecutionOptions: opt}
// Setup the execution context that will be used when executing statements.
ctx.Results = results
ectx.Results = results
var i int
LOOP:
for ; i < len(query.Statements); i++ {
ctx.statementID = i
ectx.statementID = i
stmt := query.Statements[i]
// If a default database wasn't passed in by the caller, check the statement.
@ -298,9 +254,9 @@ LOOP:
case "_tags":
command = "SHOW TAG VALUES"
}
results <- &Result{
_ = ectx.Send(ctx, &Result{
Err: fmt.Errorf("unable to use system source '%s': use %s instead", s.Name, command),
}
})
break LOOP
}
}
@ -311,48 +267,49 @@ LOOP:
// This can occur on meta read statements which convert to SELECT statements.
newStmt, err := RewriteStatement(stmt)
if err != nil {
results <- &Result{Err: err}
_ = ectx.Send(ctx, &Result{Err: err})
break
}
stmt = newStmt
// Normalize each statement if possible.
if normalizer, ok := e.StatementExecutor.(StatementNormalizer); ok {
if err := normalizer.NormalizeStatement(stmt, defaultDB, opt.RetentionPolicy); err != nil {
if err := ctx.send(&Result{Err: err}); err == ErrQueryAborted {
if err := e.StatementNormalizer.NormalizeStatement(ctx, stmt, defaultDB, opt.RetentionPolicy, ectx); err != nil {
if err := ectx.Send(ctx, &Result{Err: err}); err != nil {
return
}
break
}
}
statistics.StatementCount += 1
// Log each normalized statement.
if !ctx.Quiet {
e.Logger.Info("Executing query", zap.Stringer("query", stmt))
if !ectx.Quiet {
e.log.Info("Executing query", zap.Stringer("query", stmt))
span.LogFields(log.String("normalized_query", stmt.String()))
}
gatherer.Reset()
stmtStart := time.Now()
// Send any other statements to the underlying statement executor.
err = e.StatementExecutor.ExecuteStatement(stmt, ctx)
if err == ErrQueryInterrupted {
// Query was interrupted so retrieve the real interrupt error from
// the query task if there is one.
if qerr := ctx.Err(); qerr != nil {
err = qerr
}
}
err = tracing.LogError(span, e.StatementExecutor.ExecuteStatement(ctx, stmt, ectx))
stmtDur := time.Since(stmtStart)
stmtStats := gatherer.Statistics()
stmtStats.ExecuteDuration = stmtDur - stmtStats.PlanDuration
statistics.Add(stmtStats)
// Send an error for this result if it failed for some reason.
if err != nil {
if err := ctx.send(&Result{
statusLabel = control.LabelNotExecuted
e.Metrics.Requests.WithLabelValues(statusLabel).Inc()
_ = ectx.Send(ctx, &Result{
StatementID: i,
Err: err,
}); err == ErrQueryAborted {
return
}
})
// Stop after the first error.
break
}
e.Metrics.Requests.WithLabelValues(statusLabel).Inc()
// Check if the query was interrupted during an uninterruptible statement.
interrupted := false
select {
@ -363,17 +320,19 @@ LOOP:
}
if interrupted {
statusLabel = control.LabelInterruptedErr
e.Metrics.Requests.WithLabelValues(statusLabel).Inc()
break
}
}
// Send error results for any statements which were not executed.
for ; i < len(query.Statements)-1; i++ {
if err := ctx.send(&Result{
if err := ectx.Send(ctx, &Result{
StatementID: i,
Err: ErrNotExecuted,
}); err == ErrQueryAborted {
return
}); err != nil {
break
}
}
}
@ -391,85 +350,17 @@ func init() {
func (e *Executor) recover(query *influxql.Query, results chan *Result) {
if err := recover(); err != nil {
atomic.AddInt64(&e.stats.RecoveredPanics, 1) // Capture the panic in _internal stats.
e.Logger.Error(fmt.Sprintf("%s [panic:%s] %s", query.String(), err, debug.Stack()))
e.log.Error(fmt.Sprintf("%s [panic:%s] %s", query.String(), err, debug.Stack()))
results <- &Result{
StatementID: -1,
Err: fmt.Errorf("%s [panic:%s]", query.String(), err),
}
if willCrash {
e.Logger.Error("\n\n=====\nAll goroutines now follow:")
e.log.Error("\n\n=====\nAll goroutines now follow:")
buf := debug.Stack()
e.Logger.Error(fmt.Sprintf("%s", buf))
e.log.Error(fmt.Sprintf("%s", buf))
os.Exit(1)
}
}
}
// Task is the internal data structure for managing queries.
// For the public use data structure that gets returned, see Task.
type Task struct {
query string
database string
status TaskStatus
startTime time.Time
closing chan struct{}
monitorCh chan error
err error
mu sync.Mutex
}
// Monitor starts a new goroutine that will monitor a query. The function
// will be passed in a channel to signal when the query has been finished
// normally. If the function returns with an error and the query is still
// running, the query will be terminated.
func (q *Task) Monitor(fn MonitorFunc) {
go q.monitor(fn)
}
// Error returns any asynchronous error that may have occurred while executing
// the query.
func (q *Task) Error() error {
q.mu.Lock()
defer q.mu.Unlock()
return q.err
}
func (q *Task) setError(err error) {
q.mu.Lock()
q.err = err
q.mu.Unlock()
}
func (q *Task) monitor(fn MonitorFunc) {
if err := fn(q.closing); err != nil {
select {
case <-q.closing:
case q.monitorCh <- err:
}
}
}
// close closes the query task closing channel if the query hasn't been previously killed.
func (q *Task) close() {
q.mu.Lock()
if q.status != KilledTask {
// Set the status to killed to prevent closing the channel twice.
q.status = KilledTask
close(q.closing)
}
q.mu.Unlock()
}
func (q *Task) kill() error {
q.mu.Lock()
if q.status == KilledTask {
q.mu.Unlock()
return ErrAlreadyKilled
}
q.status = KilledTask
close(q.closing)
q.mu.Unlock()
return nil
}

View File

@ -1,252 +1,33 @@
package query_test
import (
"context"
"errors"
"fmt"
"strings"
"testing"
"time"
"github.com/golang/mock/gomock"
iql "github.com/influxdata/influxdb/v2/influxql"
"github.com/influxdata/influxdb/v2/influxql/control"
"github.com/influxdata/influxdb/v2/influxql/query"
"github.com/influxdata/influxdb/v2/influxql/query/mocks"
"github.com/influxdata/influxql"
"github.com/stretchr/testify/assert"
"go.uber.org/zap/zaptest"
)
var errUnexpected = errors.New("unexpected error")
type StatementExecutor struct {
ExecuteStatementFn func(stmt influxql.Statement, ctx *query.ExecutionContext) error
ExecuteStatementFn func(ctx context.Context, stmt influxql.Statement, ectx *query.ExecutionContext) error
}
func (e *StatementExecutor) ExecuteStatement(stmt influxql.Statement, ctx *query.ExecutionContext) error {
return e.ExecuteStatementFn(stmt, ctx)
func (e *StatementExecutor) ExecuteStatement(ctx context.Context, stmt influxql.Statement, ectx *query.ExecutionContext) error {
return e.ExecuteStatementFn(ctx, stmt, ectx)
}
func NewQueryExecutor() *query.Executor {
return query.NewExecutor()
}
func TestQueryExecutor_AttachQuery(t *testing.T) {
q, err := influxql.ParseQuery(`SELECT count(value) FROM cpu`)
if err != nil {
t.Fatal(err)
}
e := NewQueryExecutor()
e.StatementExecutor = &StatementExecutor{
ExecuteStatementFn: func(stmt influxql.Statement, ctx *query.ExecutionContext) error {
if ctx.QueryID != 1 {
t.Errorf("incorrect query id: exp=1 got=%d", ctx.QueryID)
}
return nil
},
}
discardOutput(e.ExecuteQuery(q, query.ExecutionOptions{}, nil))
}
func TestQueryExecutor_KillQuery(t *testing.T) {
q, err := influxql.ParseQuery(`SELECT count(value) FROM cpu`)
if err != nil {
t.Fatal(err)
}
qid := make(chan uint64)
e := NewQueryExecutor()
e.StatementExecutor = &StatementExecutor{
ExecuteStatementFn: func(stmt influxql.Statement, ctx *query.ExecutionContext) error {
switch stmt.(type) {
case *influxql.KillQueryStatement:
return e.TaskManager.ExecuteStatement(stmt, ctx)
}
qid <- ctx.QueryID
select {
case <-ctx.Done():
return ctx.Err()
case <-time.After(100 * time.Millisecond):
t.Error("killing the query did not close the channel after 100 milliseconds")
return errUnexpected
}
},
}
results := e.ExecuteQuery(q, query.ExecutionOptions{}, nil)
q, err = influxql.ParseQuery(fmt.Sprintf("KILL QUERY %d", <-qid))
if err != nil {
t.Fatal(err)
}
discardOutput(e.ExecuteQuery(q, query.ExecutionOptions{}, nil))
result := <-results
if result.Err != query.ErrQueryInterrupted {
t.Errorf("unexpected error: %s", result.Err)
}
}
func TestQueryExecutor_KillQuery_Zombie(t *testing.T) {
q, err := influxql.ParseQuery(`SELECT count(value) FROM cpu`)
if err != nil {
t.Fatal(err)
}
qid := make(chan uint64)
done := make(chan struct{})
e := NewQueryExecutor()
e.StatementExecutor = &StatementExecutor{
ExecuteStatementFn: func(stmt influxql.Statement, ctx *query.ExecutionContext) error {
switch stmt.(type) {
case *influxql.KillQueryStatement, *influxql.ShowQueriesStatement:
return e.TaskManager.ExecuteStatement(stmt, ctx)
}
qid <- ctx.QueryID
select {
case <-ctx.Done():
select {
case <-done:
// Keep the query running until we run SHOW QUERIES.
case <-time.After(100 * time.Millisecond):
// Ensure that we don't have a lingering goroutine.
}
return query.ErrQueryInterrupted
case <-time.After(100 * time.Millisecond):
t.Error("killing the query did not close the channel after 100 milliseconds")
return errUnexpected
}
},
}
results := e.ExecuteQuery(q, query.ExecutionOptions{}, nil)
q, err = influxql.ParseQuery(fmt.Sprintf("KILL QUERY %d", <-qid))
if err != nil {
t.Fatal(err)
}
discardOutput(e.ExecuteQuery(q, query.ExecutionOptions{}, nil))
// Display the queries and ensure that the original is still in there.
q, err = influxql.ParseQuery("SHOW QUERIES")
if err != nil {
t.Fatal(err)
}
tasks := e.ExecuteQuery(q, query.ExecutionOptions{}, nil)
// The killed query should still be there.
task := <-tasks
if len(task.Series) != 1 {
t.Errorf("expected %d series, got %d", 1, len(task.Series))
} else if len(task.Series[0].Values) != 2 {
t.Errorf("expected %d rows, got %d", 2, len(task.Series[0].Values))
}
close(done)
// The original query should return.
result := <-results
if result.Err != query.ErrQueryInterrupted {
t.Errorf("unexpected error: %s", result.Err)
}
}
func TestQueryExecutor_KillQuery_CloseTaskManager(t *testing.T) {
q, err := influxql.ParseQuery(`SELECT count(value) FROM cpu`)
if err != nil {
t.Fatal(err)
}
qid := make(chan uint64)
// Open a channel to stall the statement executor forever. This keeps the statement executor
// running even after we kill the query which can happen with some queries. We only close it once
// the test has finished running.
done := make(chan struct{})
defer close(done)
e := NewQueryExecutor()
e.StatementExecutor = &StatementExecutor{
ExecuteStatementFn: func(stmt influxql.Statement, ctx *query.ExecutionContext) error {
switch stmt.(type) {
case *influxql.KillQueryStatement, *influxql.ShowQueriesStatement:
return e.TaskManager.ExecuteStatement(stmt, ctx)
}
qid <- ctx.QueryID
<-done
return nil
},
}
// Kill the query. This should switch it into a zombie state.
go discardOutput(e.ExecuteQuery(q, query.ExecutionOptions{}, nil))
q, err = influxql.ParseQuery(fmt.Sprintf("KILL QUERY %d", <-qid))
if err != nil {
t.Fatal(err)
}
discardOutput(e.ExecuteQuery(q, query.ExecutionOptions{}, nil))
// Display the queries and ensure that the original is still in there.
q, err = influxql.ParseQuery("SHOW QUERIES")
if err != nil {
t.Fatal(err)
}
tasks := e.ExecuteQuery(q, query.ExecutionOptions{}, nil)
// The killed query should still be there.
task := <-tasks
if len(task.Series) != 1 {
t.Errorf("expected %d series, got %d", 1, len(task.Series))
} else if len(task.Series[0].Values) != 2 {
t.Errorf("expected %d rows, got %d", 2, len(task.Series[0].Values))
}
// Close the task manager to ensure it doesn't cause a panic.
if err := e.TaskManager.Close(); err != nil {
t.Errorf("unexpected error: %s", err)
}
}
func TestQueryExecutor_KillQuery_AlreadyKilled(t *testing.T) {
q, err := influxql.ParseQuery(`SELECT count(value) FROM cpu`)
if err != nil {
t.Fatal(err)
}
qid := make(chan uint64)
// Open a channel to stall the statement executor forever. This keeps the statement executor
// running even after we kill the query which can happen with some queries. We only close it once
// the test has finished running.
done := make(chan struct{})
defer close(done)
e := NewQueryExecutor()
e.StatementExecutor = &StatementExecutor{
ExecuteStatementFn: func(stmt influxql.Statement, ctx *query.ExecutionContext) error {
switch stmt.(type) {
case *influxql.KillQueryStatement, *influxql.ShowQueriesStatement:
return e.TaskManager.ExecuteStatement(stmt, ctx)
}
qid <- ctx.QueryID
<-done
return nil
},
}
// Kill the query. This should switch it into a zombie state.
go discardOutput(e.ExecuteQuery(q, query.ExecutionOptions{}, nil))
q, err = influxql.ParseQuery(fmt.Sprintf("KILL QUERY %d", <-qid))
if err != nil {
t.Fatal(err)
}
discardOutput(e.ExecuteQuery(q, query.ExecutionOptions{}, nil))
// Now attempt to kill it again. We should get an error.
results := e.ExecuteQuery(q, query.ExecutionOptions{}, nil)
result := <-results
if got, want := result.Err, query.ErrAlreadyKilled; got != want {
t.Errorf("unexpected error: got=%v want=%v", got, want)
}
func NewQueryExecutor(t *testing.T) *query.Executor {
return query.NewExecutor(zaptest.NewLogger(t), control.NewControllerMetrics([]string{}))
}
func TestQueryExecutor_Interrupt(t *testing.T) {
@ -255,12 +36,12 @@ func TestQueryExecutor_Interrupt(t *testing.T) {
t.Fatal(err)
}
e := NewQueryExecutor()
e := NewQueryExecutor(t)
e.StatementExecutor = &StatementExecutor{
ExecuteStatementFn: func(stmt influxql.Statement, ctx *query.ExecutionContext) error {
ExecuteStatementFn: func(ctx context.Context, stmt influxql.Statement, ectx *query.ExecutionContext) error {
select {
case <-ctx.Done():
return ctx.Err()
return nil
case <-time.After(100 * time.Millisecond):
t.Error("killing the query did not close the channel after 100 milliseconds")
return errUnexpected
@ -268,11 +49,12 @@ func TestQueryExecutor_Interrupt(t *testing.T) {
},
}
closing := make(chan struct{})
results := e.ExecuteQuery(q, query.ExecutionOptions{}, closing)
close(closing)
ctx, cancel := context.WithCancel(context.Background())
results, _ := e.ExecuteQuery(ctx, q, query.ExecutionOptions{})
cancel()
result := <-results
if result.Err != query.ErrQueryInterrupted {
if result != nil && result.Err != query.ErrQueryInterrupted {
t.Errorf("unexpected error: %s", result.Err)
}
}
@ -286,189 +68,42 @@ func TestQueryExecutor_Abort(t *testing.T) {
ch1 := make(chan struct{})
ch2 := make(chan struct{})
e := NewQueryExecutor()
e := NewQueryExecutor(t)
e.StatementExecutor = &StatementExecutor{
ExecuteStatementFn: func(stmt influxql.Statement, ctx *query.ExecutionContext) error {
ExecuteStatementFn: func(ctx context.Context, stmt influxql.Statement, ectx *query.ExecutionContext) error {
<-ch1
if err := ctx.Send(&query.Result{Err: errUnexpected}); err != query.ErrQueryAborted {
t.Errorf("unexpected error: %v", err)
if err := ectx.Send(ctx, &query.Result{Err: errUnexpected}); err == nil {
t.Errorf("expected error")
}
close(ch2)
return nil
},
}
done := make(chan struct{})
close(done)
ctx, cancel := context.WithCancel(context.Background())
cancel()
results := e.ExecuteQuery(q, query.ExecutionOptions{AbortCh: done}, nil)
results, _ := e.ExecuteQuery(ctx, q, query.ExecutionOptions{})
close(ch1)
<-ch2
discardOutput(results)
}
func TestQueryExecutor_ShowQueries(t *testing.T) {
e := NewQueryExecutor()
e.StatementExecutor = &StatementExecutor{
ExecuteStatementFn: func(stmt influxql.Statement, ctx *query.ExecutionContext) error {
switch stmt.(type) {
case *influxql.ShowQueriesStatement:
return e.TaskManager.ExecuteStatement(stmt, ctx)
}
t.Errorf("unexpected statement: %s", stmt)
return errUnexpected
},
}
q, err := influxql.ParseQuery(`SHOW QUERIES`)
if err != nil {
t.Fatal(err)
}
results := e.ExecuteQuery(q, query.ExecutionOptions{}, nil)
result := <-results
if len(result.Series) != 1 {
t.Errorf("expected %d series, got %d", 1, len(result.Series))
} else if len(result.Series[0].Values) != 1 {
t.Errorf("expected %d row, got %d", 1, len(result.Series[0].Values))
}
if result.Err != nil {
t.Errorf("unexpected error: %s", result.Err)
}
}
func TestQueryExecutor_Limit_Timeout(t *testing.T) {
q, err := influxql.ParseQuery(`SELECT count(value) FROM cpu`)
if err != nil {
t.Fatal(err)
}
e := NewQueryExecutor()
e.StatementExecutor = &StatementExecutor{
ExecuteStatementFn: func(stmt influxql.Statement, ctx *query.ExecutionContext) error {
select {
case <-ctx.Done():
return ctx.Err()
case <-time.After(time.Second):
t.Errorf("timeout has not killed the query")
return errUnexpected
}
},
}
e.TaskManager.QueryTimeout = time.Nanosecond
results := e.ExecuteQuery(q, query.ExecutionOptions{}, nil)
result := <-results
if result.Err == nil || !strings.Contains(result.Err.Error(), "query-timeout") {
t.Errorf("unexpected error: %s", result.Err)
}
}
func TestQueryExecutor_Limit_ConcurrentQueries(t *testing.T) {
q, err := influxql.ParseQuery(`SELECT count(value) FROM cpu`)
if err != nil {
t.Fatal(err)
}
qid := make(chan uint64)
e := NewQueryExecutor()
e.StatementExecutor = &StatementExecutor{
ExecuteStatementFn: func(stmt influxql.Statement, ctx *query.ExecutionContext) error {
qid <- ctx.QueryID
<-ctx.Done()
return ctx.Err()
},
}
e.TaskManager.MaxConcurrentQueries = 1
defer e.Close()
// Start first query and wait for it to be executing.
go discardOutput(e.ExecuteQuery(q, query.ExecutionOptions{}, nil))
<-qid
// Start second query and expect for it to fail.
results := e.ExecuteQuery(q, query.ExecutionOptions{}, nil)
select {
case result := <-results:
if len(result.Series) != 0 {
t.Errorf("expected %d rows, got %d", 0, len(result.Series))
}
if result.Err == nil || !strings.Contains(result.Err.Error(), "max-concurrent-queries") {
t.Errorf("unexpected error: %s", result.Err)
}
case <-qid:
t.Errorf("unexpected statement execution for the second query")
}
}
func TestQueryExecutor_Close(t *testing.T) {
q, err := influxql.ParseQuery(`SELECT count(value) FROM cpu`)
if err != nil {
t.Fatal(err)
}
ch1 := make(chan struct{})
ch2 := make(chan struct{})
e := NewQueryExecutor()
e.StatementExecutor = &StatementExecutor{
ExecuteStatementFn: func(stmt influxql.Statement, ctx *query.ExecutionContext) error {
close(ch1)
<-ctx.Done()
return ctx.Err()
},
}
results := e.ExecuteQuery(q, query.ExecutionOptions{}, nil)
go func(results <-chan *query.Result) {
result := <-results
if result.Err != query.ErrQueryEngineShutdown {
t.Errorf("unexpected error: %s", result.Err)
}
close(ch2)
}(results)
// Wait for the statement to start executing.
<-ch1
// Close the query executor.
e.Close()
// Check that the statement gets interrupted and finishes.
select {
case <-ch2:
case <-time.After(100 * time.Millisecond):
t.Fatal("closing the query manager did not kill the query after 100 milliseconds")
}
results = e.ExecuteQuery(q, query.ExecutionOptions{}, nil)
result := <-results
if len(result.Series) != 0 {
t.Errorf("expected %d rows, got %d", 0, len(result.Series))
}
if result.Err != query.ErrQueryEngineShutdown {
t.Errorf("unexpected error: %s", result.Err)
}
}
func TestQueryExecutor_Panic(t *testing.T) {
q, err := influxql.ParseQuery(`SELECT count(value) FROM cpu`)
if err != nil {
t.Fatal(err)
}
e := NewQueryExecutor()
e := NewQueryExecutor(t)
e.StatementExecutor = &StatementExecutor{
ExecuteStatementFn: func(stmt influxql.Statement, ctx *query.ExecutionContext) error {
ExecuteStatementFn: func(ctx context.Context, stmt influxql.Statement, ectx *query.ExecutionContext) error {
panic("test error")
},
}
results := e.ExecuteQuery(q, query.ExecutionOptions{}, nil)
results, _ := e.ExecuteQuery(context.Background(), q, query.ExecutionOptions{})
result := <-results
if len(result.Series) != 0 {
t.Errorf("expected %d rows, got %d", 0, len(result.Series))
@ -479,9 +114,9 @@ func TestQueryExecutor_Panic(t *testing.T) {
}
func TestQueryExecutor_InvalidSource(t *testing.T) {
e := NewQueryExecutor()
e := NewQueryExecutor(t)
e.StatementExecutor = &StatementExecutor{
ExecuteStatementFn: func(stmt influxql.Statement, ctx *query.ExecutionContext) error {
ExecuteStatementFn: func(ctx context.Context, stmt influxql.Statement, ectx *query.ExecutionContext) error {
return errors.New("statement executed unexpectedly")
},
}
@ -517,7 +152,7 @@ func TestQueryExecutor_InvalidSource(t *testing.T) {
continue
}
results := e.ExecuteQuery(q, query.ExecutionOptions{}, nil)
results, _ := e.ExecuteQuery(context.Background(), q, query.ExecutionOptions{})
result := <-results
if len(result.Series) != 0 {
t.Errorf("%d. expected %d rows, got %d", 0, i, len(result.Series))
@ -528,6 +163,35 @@ func TestQueryExecutor_InvalidSource(t *testing.T) {
}
}
// This test verifies Statistics are gathered
// and that ExecuteDuration accounts for PlanDuration
func TestExecutor_ExecuteQuery_Statistics(t *testing.T) {
ctl := gomock.NewController(t)
defer ctl.Finish()
stmt := influxql.MustParseStatement("SELECT f0 FROM m0")
q := &influxql.Query{Statements: influxql.Statements{stmt, stmt}}
se := mocks.NewMockStatementExecutor(ctl)
se.EXPECT().ExecuteStatement(gomock.Any(), stmt, gomock.Any()).
Times(2).
DoAndReturn(func(ctx context.Context, statement influxql.Statement, ectx *query.ExecutionContext) error {
time.Sleep(10 * time.Millisecond)
ectx.StatisticsGatherer.Append(iql.NewImmutableCollector(iql.Statistics{PlanDuration: 5 * time.Millisecond}))
return nil
})
e := NewQueryExecutor(t)
e.StatementExecutor = se
ctx := context.Background()
results, stats := e.ExecuteQuery(ctx, q, query.ExecutionOptions{Quiet: true})
<-results
assert.GreaterOrEqual(t, int64(stats.ExecuteDuration), int64(10*time.Millisecond))
assert.Equal(t, 10*time.Millisecond, stats.PlanDuration)
assert.Equal(t, 2, stats.StatementCount)
}
func discardOutput(results <-chan *query.Result) {
for range results {
// Read all results and discard.

View File

@ -10,11 +10,11 @@ import (
"github.com/influxdata/influxql"
)
func (p *preparedStatement) Explain() (string, error) {
func (p *preparedStatement) Explain(ctx context.Context) (string, error) {
// Determine the cost of all iterators created as part of this plan.
ic := &explainIteratorCreator{ic: p.ic}
p.ic = ic
cur, err := p.Select(context.Background())
cur, err := p.Select(ctx)
p.ic = ic.ic
if err != nil {
@ -65,7 +65,7 @@ type explainIteratorCreator struct {
}
func (e *explainIteratorCreator) CreateIterator(ctx context.Context, m *influxql.Measurement, opt IteratorOptions) (Iterator, error) {
cost, err := e.ic.IteratorCost(m, opt)
cost, err := e.ic.IteratorCost(ctx, m, opt)
if err != nil {
return nil, err
}
@ -77,8 +77,8 @@ func (e *explainIteratorCreator) CreateIterator(ctx context.Context, m *influxql
return &nilFloatIterator{}, nil
}
func (e *explainIteratorCreator) IteratorCost(m *influxql.Measurement, opt IteratorOptions) (IteratorCost, error) {
return e.ic.IteratorCost(m, opt)
func (e *explainIteratorCreator) IteratorCost(ctx context.Context, m *influxql.Measurement, opt IteratorOptions) (IteratorCost, error) {
return e.ic.IteratorCost(ctx, m, opt)
}
func (e *explainIteratorCreator) Close() error {

View File

@ -11,13 +11,13 @@ import (
"github.com/influxdata/influxql"
)
// FieldMapper is a FieldMapper that wraps another FieldMapper and exposes
// queryFieldMapper is a FieldMapper that wraps another FieldMapper and exposes
// the functions implemented by the query engine.
type FieldMapper struct {
type queryFieldMapper struct {
influxql.FieldMapper
}
func (m FieldMapper) CallType(name string, args []influxql.DataType) (influxql.DataType, error) {
func (m queryFieldMapper) CallType(name string, args []influxql.DataType) (influxql.DataType, error) {
if mapper, ok := m.FieldMapper.(influxql.CallTypeMapper); ok {
typ, err := mapper.CallType(name, args)
if err != nil {

View File

@ -10,8 +10,8 @@ import (
"time"
"github.com/gogo/protobuf/proto"
"github.com/influxdata/influxdb/v2"
internal "github.com/influxdata/influxdb/v2/influxql/query/internal"
"github.com/influxdata/influxdb/v2/pkg/tracing"
"github.com/influxdata/influxql"
)
@ -407,25 +407,6 @@ func NewInterruptIterator(input Iterator, closing <-chan struct{}) Iterator {
}
}
// NewCloseInterruptIterator returns an iterator that will invoke the Close() method on an
// iterator when the passed-in channel has been closed.
func NewCloseInterruptIterator(input Iterator, closing <-chan struct{}) Iterator {
switch input := input.(type) {
case FloatIterator:
return newFloatCloseInterruptIterator(input, closing)
case IntegerIterator:
return newIntegerCloseInterruptIterator(input, closing)
case UnsignedIterator:
return newUnsignedCloseInterruptIterator(input, closing)
case StringIterator:
return newStringCloseInterruptIterator(input, closing)
case BooleanIterator:
return newBooleanCloseInterruptIterator(input, closing)
default:
panic(fmt.Sprintf("unsupported close iterator iterator type: %T", input))
}
}
// IteratorScanner is used to scan the results of an iterator into a map.
type IteratorScanner interface {
// Peek retrieves information about the next point. It returns a timestamp, the name, and the tags.
@ -554,11 +535,14 @@ type IteratorCreator interface {
CreateIterator(ctx context.Context, source *influxql.Measurement, opt IteratorOptions) (Iterator, error)
// Determines the potential cost for creating an iterator.
IteratorCost(source *influxql.Measurement, opt IteratorOptions) (IteratorCost, error)
IteratorCost(ctx context.Context, source *influxql.Measurement, opt IteratorOptions) (IteratorCost, error)
}
// IteratorOptions is an object passed to CreateIterator to specify creation options.
type IteratorOptions struct {
// OrgID is the organization for which this query is being executed.
OrgID influxdb.ID
// Expression to iterate for.
// This can be VarRef or a Call.
Expr influxql.Expr
@ -681,14 +665,14 @@ func newIteratorOptionsStmt(stmt *influxql.SelectStatement, sopt SelectOptions)
opt.Limit, opt.Offset = stmt.Limit, stmt.Offset
opt.SLimit, opt.SOffset = stmt.SLimit, stmt.SOffset
opt.MaxSeriesN = sopt.MaxSeriesN
opt.Authorizer = sopt.Authorizer
opt.OrgID = sopt.OrgID
return opt, nil
}
func newIteratorOptionsSubstatement(ctx context.Context, stmt *influxql.SelectStatement, opt IteratorOptions) (IteratorOptions, error) {
subOpt, err := newIteratorOptionsStmt(stmt, SelectOptions{
Authorizer: opt.Authorizer,
OrgID: opt.OrgID,
MaxSeriesN: opt.MaxSeriesN,
})
if err != nil {
@ -702,7 +686,7 @@ func newIteratorOptionsSubstatement(ctx context.Context, stmt *influxql.SelectSt
subOpt.EndTime = opt.EndTime
}
if !subOpt.Interval.IsZero() && subOpt.EndTime == influxql.MaxTime {
if now := ctx.Value("now"); now != nil {
if now := ctx.Value(nowKey); now != nil {
subOpt.EndTime = now.(time.Time).UnixNano()
}
}
@ -1219,22 +1203,6 @@ func decodeIteratorStats(pb *internal.IteratorStats) IteratorStats {
}
}
func decodeIteratorTrace(ctx context.Context, data []byte) error {
pt := tracing.TraceFromContext(ctx)
if pt == nil {
return nil
}
var ct tracing.Trace
if err := ct.UnmarshalBinary(data); err != nil {
return err
}
pt.Merge(&ct)
return nil
}
// IteratorCost contains statistics retrieved for explaining what potential
// cost may be incurred by instantiating an iterator.
type IteratorCost struct {
@ -1327,12 +1295,6 @@ type fastDedupeKey struct {
values [2]interface{}
}
type reverseStringSlice []string
func (p reverseStringSlice) Len() int { return len(p) }
func (p reverseStringSlice) Less(i, j int) bool { return p[i] > p[j] }
func (p reverseStringSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
func abs(v int64) int64 {
sign := v >> 63
return (v ^ sign) - sign
@ -1371,33 +1333,6 @@ func (enc *IteratorEncoder) EncodeIterator(itr Iterator) error {
}
}
func (enc *IteratorEncoder) EncodeTrace(trace *tracing.Trace) error {
data, err := trace.MarshalBinary()
if err != nil {
return err
}
buf, err := proto.Marshal(&internal.Point{
Name: proto.String(""),
Tags: proto.String(""),
Time: proto.Int64(0),
Nil: proto.Bool(false),
Trace: data,
})
if err != nil {
return err
}
if err = binary.Write(enc.w, binary.BigEndian, uint32(len(buf))); err != nil {
return err
}
if _, err = enc.w.Write(buf); err != nil {
return err
}
return nil
}
// encode a stats object in the point stream.
func (enc *IteratorEncoder) encodeStats(stats IteratorStats) error {
buf, err := proto.Marshal(&internal.Point{

View File

@ -0,0 +1,111 @@
// Code generated by MockGen. DO NOT EDIT.
// Source: github.com/influxdata/idpe/influxql/query (interfaces: ShardGroup)
// Package mocks is a generated GoMock package.
package mocks
import (
context "context"
reflect "reflect"
gomock "github.com/golang/mock/gomock"
query "github.com/influxdata/influxdb/v2/influxql/query"
influxql "github.com/influxdata/influxql"
)
// MockShardGroup is a mock of ShardGroup interface
type MockShardGroup struct {
ctrl *gomock.Controller
recorder *MockShardGroupMockRecorder
}
// MockShardGroupMockRecorder is the mock recorder for MockShardGroup
type MockShardGroupMockRecorder struct {
mock *MockShardGroup
}
// NewMockShardGroup creates a new mock instance
func NewMockShardGroup(ctrl *gomock.Controller) *MockShardGroup {
mock := &MockShardGroup{ctrl: ctrl}
mock.recorder = &MockShardGroupMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use
func (m *MockShardGroup) EXPECT() *MockShardGroupMockRecorder {
return m.recorder
}
// Close mocks base method
func (m *MockShardGroup) Close() error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Close")
ret0, _ := ret[0].(error)
return ret0
}
// Close indicates an expected call of Close
func (mr *MockShardGroupMockRecorder) Close() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockShardGroup)(nil).Close))
}
// CreateIterator mocks base method
func (m *MockShardGroup) CreateIterator(arg0 context.Context, arg1 *influxql.Measurement, arg2 query.IteratorOptions) (query.Iterator, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "CreateIterator", arg0, arg1, arg2)
ret0, _ := ret[0].(query.Iterator)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// CreateIterator indicates an expected call of CreateIterator
func (mr *MockShardGroupMockRecorder) CreateIterator(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateIterator", reflect.TypeOf((*MockShardGroup)(nil).CreateIterator), arg0, arg1, arg2)
}
// FieldDimensions mocks base method
func (m *MockShardGroup) FieldDimensions(arg0 context.Context, arg1 *influxql.Measurement) (map[string]influxql.DataType, map[string]struct{}, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "FieldDimensions", arg0, arg1)
ret0, _ := ret[0].(map[string]influxql.DataType)
ret1, _ := ret[1].(map[string]struct{})
ret2, _ := ret[2].(error)
return ret0, ret1, ret2
}
// FieldDimensions indicates an expected call of FieldDimensions
func (mr *MockShardGroupMockRecorder) FieldDimensions(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FieldDimensions", reflect.TypeOf((*MockShardGroup)(nil).FieldDimensions), arg0, arg1)
}
// IteratorCost mocks base method
func (m *MockShardGroup) IteratorCost(arg0 context.Context, arg1 *influxql.Measurement, arg2 query.IteratorOptions) (query.IteratorCost, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "IteratorCost", arg0, arg1, arg2)
ret0, _ := ret[0].(query.IteratorCost)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// IteratorCost indicates an expected call of IteratorCost
func (mr *MockShardGroupMockRecorder) IteratorCost(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IteratorCost", reflect.TypeOf((*MockShardGroup)(nil).IteratorCost), arg0, arg1, arg2)
}
// MapType mocks base method
func (m *MockShardGroup) MapType(arg0 context.Context, arg1 *influxql.Measurement, arg2 string) influxql.DataType {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "MapType", arg0, arg1, arg2)
ret0, _ := ret[0].(influxql.DataType)
return ret0
}
// MapType indicates an expected call of MapType
func (mr *MockShardGroupMockRecorder) MapType(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MapType", reflect.TypeOf((*MockShardGroup)(nil).MapType), arg0, arg1, arg2)
}

View File

@ -0,0 +1,52 @@
// Code generated by MockGen. DO NOT EDIT.
// Source: github.com/influxdata/idpe/influxql/query (interfaces: ShardMapper)
// Package mocks is a generated GoMock package.
package mocks
import (
context "context"
reflect "reflect"
gomock "github.com/golang/mock/gomock"
query "github.com/influxdata/influxdb/v2/influxql/query"
influxql "github.com/influxdata/influxql"
)
// MockShardMapper is a mock of ShardMapper interface
type MockShardMapper struct {
ctrl *gomock.Controller
recorder *MockShardMapperMockRecorder
}
// MockShardMapperMockRecorder is the mock recorder for MockShardMapper
type MockShardMapperMockRecorder struct {
mock *MockShardMapper
}
// NewMockShardMapper creates a new mock instance
func NewMockShardMapper(ctrl *gomock.Controller) *MockShardMapper {
mock := &MockShardMapper{ctrl: ctrl}
mock.recorder = &MockShardMapperMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use
func (m *MockShardMapper) EXPECT() *MockShardMapperMockRecorder {
return m.recorder
}
// MapShards mocks base method
func (m *MockShardMapper) MapShards(arg0 context.Context, arg1 influxql.Sources, arg2 influxql.TimeRange, arg3 query.SelectOptions) (query.ShardGroup, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "MapShards", arg0, arg1, arg2, arg3)
ret0, _ := ret[0].(query.ShardGroup)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// MapShards indicates an expected call of MapShards
func (mr *MockShardMapperMockRecorder) MapShards(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MapShards", reflect.TypeOf((*MockShardMapper)(nil).MapShards), arg0, arg1, arg2, arg3)
}

View File

@ -0,0 +1,51 @@
// Code generated by MockGen. DO NOT EDIT.
// Source: github.com/influxdata/idpe/influxql/query (interfaces: StatementExecutor)
// Package mocks is a generated GoMock package.
package mocks
import (
context "context"
reflect "reflect"
gomock "github.com/golang/mock/gomock"
query "github.com/influxdata/influxdb/v2/influxql/query"
influxql "github.com/influxdata/influxql"
)
// MockStatementExecutor is a mock of StatementExecutor interface
type MockStatementExecutor struct {
ctrl *gomock.Controller
recorder *MockStatementExecutorMockRecorder
}
// MockStatementExecutorMockRecorder is the mock recorder for MockStatementExecutor
type MockStatementExecutorMockRecorder struct {
mock *MockStatementExecutor
}
// NewMockStatementExecutor creates a new mock instance
func NewMockStatementExecutor(ctrl *gomock.Controller) *MockStatementExecutor {
mock := &MockStatementExecutor{ctrl: ctrl}
mock.recorder = &MockStatementExecutorMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use
func (m *MockStatementExecutor) EXPECT() *MockStatementExecutorMockRecorder {
return m.recorder
}
// ExecuteStatement mocks base method
func (m *MockStatementExecutor) ExecuteStatement(arg0 context.Context, arg1 influxql.Statement, arg2 *query.ExecutionContext) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ExecuteStatement", arg0, arg1, arg2)
ret0, _ := ret[0].(error)
return ret0
}
// ExecuteStatement indicates an expected call of ExecuteStatement
func (mr *MockStatementExecutorMockRecorder) ExecuteStatement(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ExecuteStatement", reflect.TypeOf((*MockStatementExecutor)(nil).ExecuteStatement), arg0, arg1, arg2)
}

View File

@ -1,48 +0,0 @@
package query
import (
"context"
"time"
)
// MonitorFunc is a function that will be called to check if a query
// is currently healthy. If the query needs to be interrupted for some reason,
// the error should be returned by this function.
type MonitorFunc func(<-chan struct{}) error
// Monitor monitors the status of a query and returns whether the query should
// be aborted with an error.
type Monitor interface {
// Monitor starts a new goroutine that will monitor a query. The function
// will be passed in a channel to signal when the query has been finished
// normally. If the function returns with an error and the query is still
// running, the query will be terminated.
Monitor(fn MonitorFunc)
}
// MonitorFromContext returns a Monitor embedded within the Context
// if one exists.
func MonitorFromContext(ctx context.Context) Monitor {
v, _ := ctx.Value(monitorContextKey{}).(Monitor)
return v
}
// PointLimitMonitor is a query monitor that exits when the number of points
// emitted exceeds a threshold.
func PointLimitMonitor(cur Cursor, interval time.Duration, limit int) MonitorFunc {
return func(closing <-chan struct{}) error {
ticker := time.NewTicker(interval)
defer ticker.Stop()
for {
select {
case <-ticker.C:
stats := cur.Stats()
if stats.PointN >= limit {
return ErrMaxSelectPointsLimitExceeded(stats.PointN, limit)
}
case <-closing:
return nil
}
}
}
}

View File

@ -1,61 +0,0 @@
package query_test
import (
"context"
"testing"
"time"
"github.com/influxdata/influxdb/v2/influxql/query"
"github.com/influxdata/influxql"
)
func TestPointLimitMonitor(t *testing.T) {
t.Parallel()
stmt := MustParseSelectStatement(`SELECT mean(value) FROM cpu`)
// Create a new task manager so we can use the query task as a monitor.
taskManager := query.NewTaskManager()
ctx, detach, err := taskManager.AttachQuery(&influxql.Query{
Statements: []influxql.Statement{stmt},
}, query.ExecutionOptions{}, nil)
if err != nil {
t.Fatalf("unexpected error: %s", err)
}
defer detach()
shardMapper := ShardMapper{
MapShardsFn: func(sources influxql.Sources, t influxql.TimeRange) query.ShardGroup {
return &ShardGroup{
CreateIteratorFn: func(ctx context.Context, m *influxql.Measurement, opt query.IteratorOptions) (query.Iterator, error) {
return &FloatIterator{
Points: []query.FloatPoint{
{Name: "cpu", Value: 35},
},
Context: ctx,
Delay: 2 * time.Second,
stats: query.IteratorStats{
PointN: 10,
},
}, nil
},
Fields: map[string]influxql.DataType{
"value": influxql.Float,
},
}
},
}
cur, err := query.Select(ctx, stmt, &shardMapper, query.SelectOptions{
MaxPointN: 1,
})
if err != nil {
t.Fatalf("unexpected error: %s", err)
}
if err := query.DrainCursor(cur); err == nil {
t.Fatalf("expected an error")
} else if got, want := err.Error(), "max-select-point limit exceeed: (10/1)"; got != want {
t.Fatalf("unexpected error: got=%v want=%v", got, want)
}
}

View File

@ -6,6 +6,8 @@
package query
//lint:file-ignore U1000 Ignore all unused code, it's generated
import (
"context"
"encoding/binary"
@ -224,13 +226,6 @@ func (dec *FloatPointDecoder) DecodeFloatPoint(p *FloatPoint) error {
continue
}
if len(pb.Trace) > 0 {
if err := decodeIteratorTrace(dec.ctx, pb.Trace); err != nil {
return err
}
continue
}
// Decode into point object.
*p = *decodeFloatPoint(&pb)
@ -447,13 +442,6 @@ func (dec *IntegerPointDecoder) DecodeIntegerPoint(p *IntegerPoint) error {
continue
}
if len(pb.Trace) > 0 {
if err := decodeIteratorTrace(dec.ctx, pb.Trace); err != nil {
return err
}
continue
}
// Decode into point object.
*p = *decodeIntegerPoint(&pb)
@ -668,13 +656,6 @@ func (dec *UnsignedPointDecoder) DecodeUnsignedPoint(p *UnsignedPoint) error {
continue
}
if len(pb.Trace) > 0 {
if err := decodeIteratorTrace(dec.ctx, pb.Trace); err != nil {
return err
}
continue
}
// Decode into point object.
*p = *decodeUnsignedPoint(&pb)
@ -891,13 +872,6 @@ func (dec *StringPointDecoder) DecodeStringPoint(p *StringPoint) error {
continue
}
if len(pb.Trace) > 0 {
if err := decodeIteratorTrace(dec.ctx, pb.Trace); err != nil {
return err
}
continue
}
// Decode into point object.
*p = *decodeStringPoint(&pb)
@ -1114,13 +1088,6 @@ func (dec *BooleanPointDecoder) DecodeBooleanPoint(p *BooleanPoint) error {
continue
}
if len(pb.Trace) > 0 {
if err := decodeIteratorTrace(dec.ctx, pb.Trace); err != nil {
return err
}
continue
}
// Decode into point object.
*p = *decodeBooleanPoint(&pb)

View File

@ -1,5 +1,7 @@
package query
//lint:file-ignore U1000 Ignore all unused code, it's generated
import (
"context"
"encoding/binary"
@ -231,13 +233,6 @@ func (dec *{{.Name}}PointDecoder) Decode{{.Name}}Point(p *{{.Name}}Point) error
continue
}
if len(pb.Trace) > 0 {
if err := decodeIteratorTrace(dec.ctx, pb.Trace); err != nil {
return err
}
continue
}
// Decode into point object.
*p = *decode{{.Name}}Point(&pb)

View File

@ -0,0 +1,168 @@
package query
import (
"context"
"io"
"strings"
"time"
"github.com/influxdata/influxdb/v2"
iql "github.com/influxdata/influxdb/v2/influxql"
"github.com/influxdata/influxdb/v2/kit/check"
"github.com/influxdata/influxdb/v2/kit/tracing"
influxlogger "github.com/influxdata/influxdb/v2/logger"
"github.com/influxdata/influxql"
"github.com/opentracing/opentracing-go/log"
"go.uber.org/zap"
)
type ProxyExecutor struct {
log *zap.Logger
executor *Executor
}
func NewProxyExecutor(log *zap.Logger, executor *Executor) *ProxyExecutor {
return &ProxyExecutor{log: log, executor: executor}
}
func (s *ProxyExecutor) Check(ctx context.Context) check.Response {
return check.Response{Name: "Query Service", Status: check.StatusPass}
}
func (s *ProxyExecutor) Query(ctx context.Context, w io.Writer, req *iql.QueryRequest) (iql.Statistics, error) {
span, ctx := tracing.StartSpanFromContext(ctx)
defer span.Finish()
logger := s.log.With(influxlogger.TraceFields(ctx)...)
logger.Info("executing new query", zap.String("query", req.Query))
p := influxql.NewParser(strings.NewReader(req.Query))
p.SetParams(req.Params)
q, err := p.ParseQuery()
if err != nil {
return iql.Statistics{}, &influxdb.Error{
Code: influxdb.EInvalid,
Msg: "failed to parse query",
Err: err,
}
}
span.LogFields(log.String("query", q.String()))
opts := ExecutionOptions{
OrgID: req.OrganizationID,
Database: req.DB,
RetentionPolicy: req.RP,
ChunkSize: req.ChunkSize,
ReadOnly: true,
Authorizer: OpenAuthorizer,
}
epoch := req.Epoch
rw := NewResponseWriter(req.EncodingFormat)
results, stats := s.executor.ExecuteQuery(ctx, q, opts)
if req.Chunked {
for r := range results {
// Ignore nil results.
if r == nil {
continue
}
// if requested, convert result timestamps to epoch
if epoch != "" {
convertToEpoch(r, epoch)
}
err = rw.WriteResponse(ctx, w, Response{Results: []*Result{r}})
if err != nil {
break
}
}
} else {
resp := Response{Results: GatherResults(results, epoch)}
err = rw.WriteResponse(ctx, w, resp)
}
return *stats, err
}
// GatherResults consumes the results from the given channel and organizes them correctly.
// Results for various statements need to be combined together.
func GatherResults(ch <-chan *Result, epoch string) []*Result {
var results []*Result
for r := range ch {
// Ignore nil results.
if r == nil {
continue
}
// if requested, convert result timestamps to epoch
if epoch != "" {
convertToEpoch(r, epoch)
}
// It's not chunked so buffer results in memory.
// Results for statements need to be combined together.
// We need to check if this new result is for the same statement as
// the last result, or for the next statement.
if l := len(results); l > 0 && results[l-1].StatementID == r.StatementID {
if r.Err != nil {
results[l-1] = r
continue
}
cr := results[l-1]
rowsMerged := 0
if len(cr.Series) > 0 {
lastSeries := cr.Series[len(cr.Series)-1]
for _, row := range r.Series {
if !lastSeries.SameSeries(row) {
// Next row is for a different series than last.
break
}
// Values are for the same series, so append them.
lastSeries.Values = append(lastSeries.Values, row.Values...)
lastSeries.Partial = row.Partial
rowsMerged++
}
}
// Append remaining rows as new rows.
r.Series = r.Series[rowsMerged:]
cr.Series = append(cr.Series, r.Series...)
cr.Messages = append(cr.Messages, r.Messages...)
cr.Partial = r.Partial
} else {
results = append(results, r)
}
}
return results
}
// convertToEpoch converts result timestamps from time.Time to the specified epoch.
func convertToEpoch(r *Result, epoch string) {
divisor := int64(1)
switch epoch {
case "u":
divisor = int64(time.Microsecond)
case "ms":
divisor = int64(time.Millisecond)
case "s":
divisor = int64(time.Second)
case "m":
divisor = int64(time.Minute)
case "h":
divisor = int64(time.Hour)
}
for _, s := range r.Series {
for _, v := range s.Values {
if ts, ok := v[0].(time.Time); ok {
v[0] = ts.UnixNano() / divisor
}
}
}
}

View File

@ -0,0 +1,61 @@
package query
import (
"encoding/json"
"errors"
)
// Response represents a list of statement results.
type Response struct {
Results []*Result
Err error
}
// MarshalJSON encodes a Response struct into JSON.
func (r Response) MarshalJSON() ([]byte, error) {
// Define a struct that outputs "error" as a string.
var o struct {
Results []*Result `json:"results,omitempty"`
Err string `json:"error,omitempty"`
}
// Copy fields to output struct.
o.Results = r.Results
if r.Err != nil {
o.Err = r.Err.Error()
}
return json.Marshal(&o)
}
// UnmarshalJSON decodes the data into the Response struct.
func (r *Response) UnmarshalJSON(b []byte) error {
var o struct {
Results []*Result `json:"results,omitempty"`
Err string `json:"error,omitempty"`
}
err := json.Unmarshal(b, &o)
if err != nil {
return err
}
r.Results = o.Results
if o.Err != "" {
r.Err = errors.New(o.Err)
}
return nil
}
// Error returns the first error from any statement.
// Returns nil if no errors occurred on any statements.
func (r *Response) Error() error {
if r.Err != nil {
return r.Err
}
for _, rr := range r.Results {
if rr.Err != nil {
return rr.Err
}
}
return nil
}

View File

@ -0,0 +1,439 @@
package query
//lint:file-ignore SA1019 Ignore for now
import (
"context"
"encoding/csv"
"encoding/json"
"fmt"
"io"
"reflect"
"sort"
"strconv"
"strings"
"text/tabwriter"
"time"
"github.com/influxdata/influxdb/v2/influxql"
"github.com/influxdata/influxdb/v2/kit/tracing"
"github.com/influxdata/influxdb/v2/models"
"github.com/tinylib/msgp/msgp"
)
// ResponseWriter is an interface for writing a response.
type ResponseWriter interface {
// WriteResponse writes a response.
WriteResponse(ctx context.Context, w io.Writer, resp Response) error
}
// NewResponseWriter creates a new ResponseWriter based on the Accept header
// in the request that wraps the ResponseWriter.
func NewResponseWriter(encoding influxql.EncodingFormat) ResponseWriter {
switch encoding {
case influxql.EncodingFormatCSV:
return &csvFormatter{statementID: -1}
case influxql.EncodingFormatTable:
return &textFormatter{}
case influxql.EncodingFormatMessagePack:
return &msgpFormatter{}
case influxql.EncodingFormatJSON:
fallthrough
default:
// TODO(sgc): Add EncodingFormatJSONPretty
return &jsonFormatter{Pretty: false}
}
}
type jsonFormatter struct {
Pretty bool
}
func (f *jsonFormatter) WriteResponse(ctx context.Context, w io.Writer, resp Response) (err error) {
span, _ := tracing.StartSpanFromContext(ctx)
defer span.Finish()
var b []byte
if f.Pretty {
b, err = json.MarshalIndent(resp, "", " ")
} else {
b, err = json.Marshal(resp)
}
if err != nil {
_, err = io.WriteString(w, err.Error())
} else {
_, err = w.Write(b)
}
w.Write([]byte("\n"))
return err
}
type csvFormatter struct {
statementID int
columns []string
}
func (f *csvFormatter) WriteResponse(ctx context.Context, w io.Writer, resp Response) (err error) {
span, _ := tracing.StartSpanFromContext(ctx)
defer span.Finish()
wr := csv.NewWriter(w)
if resp.Err != nil {
wr.Write([]string{"error"})
wr.Write([]string{resp.Err.Error()})
wr.Flush()
return wr.Error()
}
for _, result := range resp.Results {
if result.StatementID != f.statementID {
// If there are no series in the result, skip past this result.
if len(result.Series) == 0 {
continue
}
// Set the statement id and print out a newline if this is not the first statement.
if f.statementID >= 0 {
// Flush the csv writer and write a newline.
wr.Flush()
if err := wr.Error(); err != nil {
return err
}
if _, err := io.WriteString(w, "\n"); err != nil {
return err
}
}
f.statementID = result.StatementID
// Print out the column headers from the first series.
f.columns = make([]string, 2+len(result.Series[0].Columns))
f.columns[0] = "name"
f.columns[1] = "tags"
copy(f.columns[2:], result.Series[0].Columns)
if err := wr.Write(f.columns); err != nil {
return err
}
}
for i, row := range result.Series {
if i > 0 && !stringsEqual(result.Series[i-1].Columns, row.Columns) {
// The columns have changed. Print a newline and reprint the header.
wr.Flush()
if err := wr.Error(); err != nil {
return err
}
if _, err := io.WriteString(w, "\n"); err != nil {
return err
}
f.columns = make([]string, 2+len(row.Columns))
f.columns[0] = "name"
f.columns[1] = "tags"
copy(f.columns[2:], row.Columns)
if err := wr.Write(f.columns); err != nil {
return err
}
}
f.columns[0] = row.Name
f.columns[1] = ""
if len(row.Tags) > 0 {
hashKey := models.NewTags(row.Tags).HashKey()
if len(hashKey) > 0 {
f.columns[1] = string(hashKey[1:])
}
}
for _, values := range row.Values {
for i, value := range values {
if value == nil {
f.columns[i+2] = ""
continue
}
switch v := value.(type) {
case float64:
f.columns[i+2] = strconv.FormatFloat(v, 'f', -1, 64)
case int64:
f.columns[i+2] = strconv.FormatInt(v, 10)
case uint64:
f.columns[i+2] = strconv.FormatUint(v, 10)
case string:
f.columns[i+2] = v
case bool:
if v {
f.columns[i+2] = "true"
} else {
f.columns[i+2] = "false"
}
case time.Time:
f.columns[i+2] = strconv.FormatInt(v.UnixNano(), 10)
case *float64, *int64, *string, *bool:
f.columns[i+2] = ""
}
}
wr.Write(f.columns)
}
}
}
wr.Flush()
return wr.Error()
}
type msgpFormatter struct{}
func (f *msgpFormatter) ContentType() string {
return "application/x-msgpack"
}
func (f *msgpFormatter) WriteResponse(ctx context.Context, w io.Writer, resp Response) (err error) {
span, _ := tracing.StartSpanFromContext(ctx)
defer span.Finish()
enc := msgp.NewWriter(w)
defer enc.Flush()
enc.WriteMapHeader(1)
if resp.Err != nil {
enc.WriteString("error")
enc.WriteString(resp.Err.Error())
return nil
} else {
enc.WriteString("results")
enc.WriteArrayHeader(uint32(len(resp.Results)))
for _, result := range resp.Results {
if result.Err != nil {
enc.WriteMapHeader(1)
enc.WriteString("error")
enc.WriteString(result.Err.Error())
continue
}
sz := 2
if len(result.Messages) > 0 {
sz++
}
if result.Partial {
sz++
}
enc.WriteMapHeader(uint32(sz))
enc.WriteString("statement_id")
enc.WriteInt(result.StatementID)
if len(result.Messages) > 0 {
enc.WriteString("messages")
enc.WriteArrayHeader(uint32(len(result.Messages)))
for _, msg := range result.Messages {
enc.WriteMapHeader(2)
enc.WriteString("level")
enc.WriteString(msg.Level)
enc.WriteString("text")
enc.WriteString(msg.Text)
}
}
enc.WriteString("series")
enc.WriteArrayHeader(uint32(len(result.Series)))
for _, series := range result.Series {
sz := 2
if series.Name != "" {
sz++
}
if len(series.Tags) > 0 {
sz++
}
if series.Partial {
sz++
}
enc.WriteMapHeader(uint32(sz))
if series.Name != "" {
enc.WriteString("name")
enc.WriteString(series.Name)
}
if len(series.Tags) > 0 {
enc.WriteString("tags")
enc.WriteMapHeader(uint32(len(series.Tags)))
for k, v := range series.Tags {
enc.WriteString(k)
enc.WriteString(v)
}
}
enc.WriteString("columns")
enc.WriteArrayHeader(uint32(len(series.Columns)))
for _, col := range series.Columns {
enc.WriteString(col)
}
enc.WriteString("values")
enc.WriteArrayHeader(uint32(len(series.Values)))
for _, values := range series.Values {
enc.WriteArrayHeader(uint32(len(values)))
for _, v := range values {
enc.WriteIntf(v)
}
}
if series.Partial {
enc.WriteString("partial")
enc.WriteBool(series.Partial)
}
}
if result.Partial {
enc.WriteString("partial")
enc.WriteBool(true)
}
}
}
return nil
}
func stringsEqual(a, b []string) bool {
if len(a) != len(b) {
return false
}
for i := range a {
if a[i] != b[i] {
return false
}
}
return true
}
func tagsEqual(prev, current map[string]string) bool {
return reflect.DeepEqual(prev, current)
}
func columnsEqual(prev, current []string) bool {
return reflect.DeepEqual(prev, current)
}
func headersEqual(prev, current models.Row) bool {
if prev.Name != current.Name {
return false
}
return tagsEqual(prev.Tags, current.Tags) && columnsEqual(prev.Columns, current.Columns)
}
type textFormatter struct{}
func (f *textFormatter) WriteResponse(ctx context.Context, w io.Writer, resp Response) (err error) {
span, _ := tracing.StartSpanFromContext(ctx)
defer span.Finish()
if err := resp.Error(); err != nil {
fmt.Fprintln(w, err.Error())
return nil
}
// Create a tabbed writer for each result as they won't always line up
writer := new(tabwriter.Writer)
writer.Init(w, 0, 8, 1, ' ', 0)
var previousHeaders models.Row
for i, result := range resp.Results {
// Print out all messages first
for _, m := range result.Messages {
fmt.Fprintf(w, "%s: %s.\n", m.Level, m.Text)
}
// Check to see if the headers are the same as the previous row. If so, suppress them in the output
suppressHeaders := len(result.Series) > 0 && headersEqual(previousHeaders, *result.Series[0])
if !suppressHeaders && len(result.Series) > 0 {
previousHeaders = models.Row{
Name: result.Series[0].Name,
Tags: result.Series[0].Tags,
Columns: result.Series[0].Columns,
}
}
// If we are suppressing headers, don't output the extra line return. If we
// aren't suppressing headers, then we put out line returns between results
// (not before the first result, and not after the last result).
if !suppressHeaders && i > 0 {
fmt.Fprintln(writer, "")
}
rows := f.formatResults(result.Series, "\t", suppressHeaders)
for _, r := range rows {
fmt.Fprintln(writer, r)
}
}
_ = writer.Flush()
return nil
}
func (f *textFormatter) formatResults(result models.Rows, separator string, suppressHeaders bool) []string {
var rows []string
// Create a tabbed writer for each result as they won't always line up
for i, row := range result {
// gather tags
var tags []string
for k, v := range row.Tags {
tags = append(tags, fmt.Sprintf("%s=%s", k, v))
sort.Strings(tags)
}
var columnNames []string
columnNames = append(columnNames, row.Columns...)
// Output a line separator if we have more than one set or results and format is column
if i > 0 && !suppressHeaders {
rows = append(rows, "")
}
// If we are column format, we break out the name/tag to separate lines
if !suppressHeaders {
if row.Name != "" {
n := fmt.Sprintf("name: %s", row.Name)
rows = append(rows, n)
}
if len(tags) > 0 {
t := fmt.Sprintf("tags: %s", strings.Join(tags, ", "))
rows = append(rows, t)
}
}
if !suppressHeaders {
rows = append(rows, strings.Join(columnNames, separator))
}
// if format is column, write dashes under each column
if !suppressHeaders {
var lines []string
for _, columnName := range columnNames {
lines = append(lines, strings.Repeat("-", len(columnName)))
}
rows = append(rows, strings.Join(lines, separator))
}
for _, v := range row.Values {
var values []string
for _, vv := range v {
values = append(values, interfaceToString(vv))
}
rows = append(rows, strings.Join(values, separator))
}
}
return rows
}
func interfaceToString(v interface{}) string {
switch t := v.(type) {
case nil:
return ""
case bool:
return fmt.Sprintf("%v", v)
case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64, uintptr:
return fmt.Sprintf("%d", t)
case float32:
// Default for floats via `fmt.Sprintf("%v", t)` is to represent them in scientific notation.
// We want to represent them as they are, with the least digits as possible (prec: -1).
return strconv.FormatFloat(float64(t), 'f', -1, 32)
case float64:
// Default for floats via `fmt.Sprintf("%v", t)` is to represent them in scientific notation.
// We want to represent them as they are, with the least digits as possible (prec: -1).
return strconv.FormatFloat(t, 'f', -1, 64)
default:
return fmt.Sprintf("%v", t)
}
}

View File

@ -5,12 +5,12 @@ import (
"fmt"
"io"
"sort"
"strings"
"sync"
"time"
"github.com/influxdata/influxdb/v2"
iql "github.com/influxdata/influxdb/v2/influxql"
"github.com/influxdata/influxdb/v2/influxql/query/internal/gota"
"github.com/influxdata/influxdb/v2/pkg/tracing"
"github.com/influxdata/influxql"
"golang.org/x/sync/errgroup"
)
@ -22,8 +22,8 @@ var DefaultTypeMapper = influxql.MultiTypeMapper(
// SelectOptions are options that customize the select call.
type SelectOptions struct {
// Authorizer is used to limit access to data
Authorizer Authorizer
// OrgID is the organization for which this query is being executed.
OrgID influxdb.ID
// Node to exclusively read from.
// If zero, all nodes are used.
@ -39,12 +39,45 @@ type SelectOptions struct {
// Maximum number of buckets for a statement.
MaxBucketsN int
// StatisticsGatherer gathers metrics about the execution of the query.
StatisticsGatherer *iql.StatisticsGatherer
}
// ShardMapper retrieves and maps shards into an IteratorCreator that can later be
// used for executing queries.
type ShardMapper interface {
MapShards(sources influxql.Sources, t influxql.TimeRange, opt SelectOptions) (ShardGroup, error)
MapShards(ctx context.Context, sources influxql.Sources, t influxql.TimeRange, opt SelectOptions) (ShardGroup, error)
}
// TypeMapper maps a data type to the measurement and field.
type TypeMapper interface {
MapType(ctx context.Context, m *influxql.Measurement, field string) influxql.DataType
}
// FieldMapper returns the data type for the field inside of the measurement.
type FieldMapper interface {
TypeMapper
FieldDimensions(ctx context.Context, m *influxql.Measurement) (fields map[string]influxql.DataType, dimensions map[string]struct{}, err error)
}
// contextFieldMapper adapts a FieldMapper to an influxql.FieldMapper as
// FieldMapper requires a context.Context and orgID
type fieldMapperAdapter struct {
fm FieldMapper
ctx context.Context
}
func newFieldMapperAdapter(fm FieldMapper, ctx context.Context) *fieldMapperAdapter {
return &fieldMapperAdapter{fm: fm, ctx: ctx}
}
func (c *fieldMapperAdapter) FieldDimensions(m *influxql.Measurement) (fields map[string]influxql.DataType, dimensions map[string]struct{}, err error) {
return c.fm.FieldDimensions(c.ctx, m)
}
func (c *fieldMapperAdapter) MapType(measurement *influxql.Measurement, field string) influxql.DataType {
return c.fm.MapType(c.ctx, measurement, field)
}
// ShardGroup represents a shard or a collection of shards that can be accessed
@ -58,7 +91,7 @@ type ShardMapper interface {
// after creating the iterators, but before the iterators are actually read.
type ShardGroup interface {
IteratorCreator
influxql.FieldMapper
FieldMapper
io.Closer
}
@ -68,7 +101,7 @@ type PreparedStatement interface {
Select(ctx context.Context) (Cursor, error)
// Explain outputs the explain plan for this statement.
Explain() (string, error)
Explain(ctx context.Context) (string, error)
// Close closes the resources associated with this prepared statement.
// This must be called as the mapped shards may hold open resources such
@ -78,18 +111,18 @@ type PreparedStatement interface {
// Prepare will compile the statement with the default compile options and
// then prepare the query.
func Prepare(stmt *influxql.SelectStatement, shardMapper ShardMapper, opt SelectOptions) (PreparedStatement, error) {
func Prepare(ctx context.Context, stmt *influxql.SelectStatement, shardMapper ShardMapper, opt SelectOptions) (PreparedStatement, error) {
c, err := Compile(stmt, CompileOptions{})
if err != nil {
return nil, err
}
return c.Prepare(shardMapper, opt)
return c.Prepare(ctx, shardMapper, opt)
}
// Select compiles, prepares, and then initiates execution of the query using the
// default compile options.
func Select(ctx context.Context, stmt *influxql.SelectStatement, shardMapper ShardMapper, opt SelectOptions) (Cursor, error) {
s, err := Prepare(stmt, shardMapper, opt)
s, err := Prepare(ctx, stmt, shardMapper, opt)
if err != nil {
return nil, err
}
@ -110,11 +143,15 @@ type preparedStatement struct {
now time.Time
}
type contextKey string
const nowKey contextKey = "now"
func (p *preparedStatement) Select(ctx context.Context) (Cursor, error) {
// TODO(jsternberg): Remove this hacky method of propagating now.
// Each level of the query should use a time range discovered during
// compilation, but that requires too large of a refactor at the moment.
ctx = context.WithValue(ctx, "now", p.now)
ctx = context.WithValue(ctx, nowKey, p.now)
opt := p.opt
opt.InterruptCh = ctx.Done()
@ -123,14 +160,6 @@ func (p *preparedStatement) Select(ctx context.Context) (Cursor, error) {
return nil, err
}
// If a monitor exists and we are told there is a maximum number of points,
// register the monitor function.
if m := MonitorFromContext(ctx); m != nil {
if p.maxPointN > 0 {
monitor := PointLimitMonitor(cur, DefaultStatsInterval, p.maxPointN)
m.Monitor(monitor)
}
}
return cur, nil
}
@ -246,7 +275,7 @@ func (b *exprIteratorBuilder) buildCallIterator(ctx context.Context, expr *influ
h := expr.Args[1].(*influxql.IntegerLiteral)
m := expr.Args[2].(*influxql.IntegerLiteral)
includeFitData := "holt_winters_with_fit" == expr.Name
includeFitData := expr.Name == "holt_winters_with_fit"
interval := opt.Interval.Duration
// Redefine interval to be unbounded to capture all aggregate results
@ -621,15 +650,6 @@ func (b *exprIteratorBuilder) callIterator(ctx context.Context, expr *influxql.C
}
func buildCursor(ctx context.Context, stmt *influxql.SelectStatement, ic IteratorCreator, opt IteratorOptions) (Cursor, error) {
span := tracing.SpanFromContext(ctx)
if span != nil {
span = span.StartSpan("build_cursor")
defer span.Finish()
span.SetLabels("statement", stmt.String())
ctx = tracing.NewContextWithSpan(ctx, span)
}
switch opt.Fill {
case influxql.NumberFill:
if v, ok := opt.FillValue.(int); ok {
@ -777,19 +797,6 @@ func buildCursor(ctx context.Context, stmt *influxql.SelectStatement, ic Iterato
}
func buildAuxIterator(ctx context.Context, ic IteratorCreator, sources influxql.Sources, opt IteratorOptions) (Iterator, error) {
span := tracing.SpanFromContext(ctx)
if span != nil {
span = span.StartSpan("iterator_scanner")
defer span.Finish()
auxFieldNames := make([]string, len(opt.Aux))
for i, ref := range opt.Aux {
auxFieldNames[i] = ref.String()
}
span.SetLabels("auxiliary_fields", strings.Join(auxFieldNames, ", "))
ctx = tracing.NewContextWithSpan(ctx, span)
}
inputs := make([]Iterator, 0, len(sources))
if err := func() error {
for _, source := range sources {
@ -850,23 +857,6 @@ func buildAuxIterator(ctx context.Context, ic IteratorCreator, sources influxql.
}
func buildFieldIterator(ctx context.Context, expr influxql.Expr, ic IteratorCreator, sources influxql.Sources, opt IteratorOptions, selector, writeMode bool) (Iterator, error) {
span := tracing.SpanFromContext(ctx)
if span != nil {
span = span.StartSpan("iterator_scanner")
defer span.Finish()
labels := []string{"expr", expr.String()}
if len(opt.Aux) > 0 {
auxFieldNames := make([]string, len(opt.Aux))
for i, ref := range opt.Aux {
auxFieldNames[i] = ref.String()
}
labels = append(labels, "auxiliary_fields", strings.Join(auxFieldNames, ", "))
}
span.SetLabels(labels...)
ctx = tracing.NewContextWithSpan(ctx, span)
}
input, err := buildExprIterator(ctx, expr, ic, sources, opt, selector, writeMode)
if err != nil {
return nil, err

View File

@ -2837,7 +2837,7 @@ func TestSelect(t *testing.T) {
} {
t.Run(tt.name, func(t *testing.T) {
shardMapper := ShardMapper{
MapShardsFn: func(sources influxql.Sources, _ influxql.TimeRange) query.ShardGroup {
MapShardsFn: func(_ context.Context, sources influxql.Sources, _ influxql.TimeRange) query.ShardGroup {
var fields map[string]influxql.DataType
if tt.typ != influxql.Unknown {
fields = map[string]influxql.DataType{"value": tt.typ}
@ -2881,7 +2881,7 @@ func TestSelect(t *testing.T) {
return nil, err
}
p, err := c.Prepare(&shardMapper, query.SelectOptions{})
p, err := c.Prepare(context.Background(), &shardMapper, query.SelectOptions{})
if err != nil {
return nil, err
}
@ -2907,7 +2907,7 @@ func TestSelect(t *testing.T) {
// Ensure a SELECT with raw fields works for all types.
func TestSelect_Raw(t *testing.T) {
shardMapper := ShardMapper{
MapShardsFn: func(sources influxql.Sources, _ influxql.TimeRange) query.ShardGroup {
MapShardsFn: func(_ context.Context, sources influxql.Sources, _ influxql.TimeRange) query.ShardGroup {
return &ShardGroup{
Fields: map[string]influxql.DataType{
"f": influxql.Float,
@ -2979,7 +2979,7 @@ func TestSelect_Raw(t *testing.T) {
// Ensure a SELECT binary expr queries can be executed as floats.
func TestSelect_BinaryExpr(t *testing.T) {
shardMapper := ShardMapper{
MapShardsFn: func(sources influxql.Sources, _ influxql.TimeRange) query.ShardGroup {
MapShardsFn: func(_ context.Context, sources influxql.Sources, _ influxql.TimeRange) query.ShardGroup {
return &ShardGroup{
Fields: map[string]influxql.DataType{
"f": influxql.Float,
@ -3870,7 +3870,7 @@ func TestSelect_BinaryExpr(t *testing.T) {
// Ensure a SELECT binary expr queries can be executed as booleans.
func TestSelect_BinaryExpr_Boolean(t *testing.T) {
shardMapper := ShardMapper{
MapShardsFn: func(sources influxql.Sources, _ influxql.TimeRange) query.ShardGroup {
MapShardsFn: func(_ context.Context, sources influxql.Sources, _ influxql.TimeRange) query.ShardGroup {
return &ShardGroup{
Fields: map[string]influxql.DataType{
"one": influxql.Boolean,
@ -3950,7 +3950,7 @@ func TestSelect_BinaryExpr_Boolean(t *testing.T) {
// but not the other.
func TestSelect_BinaryExpr_NilValues(t *testing.T) {
shardMapper := ShardMapper{
MapShardsFn: func(sources influxql.Sources, _ influxql.TimeRange) query.ShardGroup {
MapShardsFn: func(_ context.Context, sources influxql.Sources, _ influxql.TimeRange) query.ShardGroup {
return &ShardGroup{
Fields: map[string]influxql.DataType{
"total": influxql.Float,
@ -4028,11 +4028,11 @@ func TestSelect_BinaryExpr_NilValues(t *testing.T) {
}
type ShardMapper struct {
MapShardsFn func(sources influxql.Sources, t influxql.TimeRange) query.ShardGroup
MapShardsFn func(ctx context.Context, sources influxql.Sources, t influxql.TimeRange) query.ShardGroup
}
func (m *ShardMapper) MapShards(sources influxql.Sources, t influxql.TimeRange, opt query.SelectOptions) (query.ShardGroup, error) {
shards := m.MapShardsFn(sources, t)
func (m *ShardMapper) MapShards(ctx context.Context, sources influxql.Sources, t influxql.TimeRange, opt query.SelectOptions) (query.ShardGroup, error) {
shards := m.MapShardsFn(ctx, sources, t)
return shards, nil
}
@ -4046,11 +4046,11 @@ func (sh *ShardGroup) CreateIterator(ctx context.Context, m *influxql.Measuremen
return sh.CreateIteratorFn(ctx, m, opt)
}
func (sh *ShardGroup) IteratorCost(m *influxql.Measurement, opt query.IteratorOptions) (query.IteratorCost, error) {
func (sh *ShardGroup) IteratorCost(ctx context.Context, source *influxql.Measurement, opt query.IteratorOptions) (query.IteratorCost, error) {
return query.IteratorCost{}, nil
}
func (sh *ShardGroup) FieldDimensions(m *influxql.Measurement) (fields map[string]influxql.DataType, dimensions map[string]struct{}, err error) {
func (sh *ShardGroup) FieldDimensions(ctx context.Context, m *influxql.Measurement) (fields map[string]influxql.DataType, dimensions map[string]struct{}, err error) {
fields = make(map[string]influxql.DataType)
dimensions = make(map[string]struct{})
@ -4063,7 +4063,7 @@ func (sh *ShardGroup) FieldDimensions(m *influxql.Measurement) (fields map[strin
return fields, dimensions, nil
}
func (sh *ShardGroup) MapType(m *influxql.Measurement, field string) influxql.DataType {
func (sh *ShardGroup) MapType(ctx context.Context, measurement *influxql.Measurement, field string) influxql.DataType {
if typ, ok := sh.Fields[field]; ok {
return typ
}
@ -4101,7 +4101,7 @@ func benchmarkSelect(b *testing.B, stmt *influxql.SelectStatement, shardMapper q
// NewRawBenchmarkIteratorCreator returns a new mock iterator creator with generated fields.
func NewRawBenchmarkIteratorCreator(pointN int) query.ShardMapper {
return &ShardMapper{
MapShardsFn: func(sources influxql.Sources, t influxql.TimeRange) query.ShardGroup {
MapShardsFn: func(_ context.Context, sources influxql.Sources, t influxql.TimeRange) query.ShardGroup {
return &ShardGroup{
Fields: map[string]influxql.DataType{
"fval": influxql.Float,
@ -4140,7 +4140,7 @@ func benchmarkSelectDedupe(b *testing.B, seriesN, pointsPerSeries int) {
stmt.Dedupe = true
shardMapper := ShardMapper{
MapShardsFn: func(sources influxql.Sources, t influxql.TimeRange) query.ShardGroup {
MapShardsFn: func(_ context.Context, sources influxql.Sources, t influxql.TimeRange) query.ShardGroup {
return &ShardGroup{
Fields: map[string]influxql.DataType{
"sval": influxql.String,
@ -4174,7 +4174,7 @@ func benchmarkSelectTop(b *testing.B, seriesN, pointsPerSeries int) {
stmt := MustParseSelectStatement(`SELECT top(sval, 10) FROM cpu`)
shardMapper := ShardMapper{
MapShardsFn: func(sources influxql.Sources, t influxql.TimeRange) query.ShardGroup {
MapShardsFn: func(_ context.Context, sources influxql.Sources, t influxql.TimeRange) query.ShardGroup {
return &ShardGroup{
Fields: map[string]influxql.DataType{
"sval": influxql.Float,

View File

@ -7,7 +7,6 @@ import (
"github.com/google/go-cmp/cmp"
"github.com/influxdata/influxdb/v2/influxql/query"
"github.com/influxdata/influxdb/v2/models"
"github.com/influxdata/influxql"
)
@ -367,7 +366,7 @@ func TestSubquery(t *testing.T) {
} {
t.Run(test.Name, func(t *testing.T) {
shardMapper := ShardMapper{
MapShardsFn: func(sources influxql.Sources, tr influxql.TimeRange) query.ShardGroup {
MapShardsFn: func(_ context.Context, sources influxql.Sources, tr influxql.TimeRange) query.ShardGroup {
fn := test.MapShardsFn(t, tr)
return &ShardGroup{
Fields: test.Fields,
@ -392,50 +391,10 @@ func TestSubquery(t *testing.T) {
}
}
type openAuthorizer struct{}
func (*openAuthorizer) AuthorizeDatabase(p influxql.Privilege, name string) bool { return true }
func (*openAuthorizer) AuthorizeQuery(database string, query *influxql.Query) error { return nil }
func (*openAuthorizer) AuthorizeSeriesRead(database string, measurement []byte, tags models.Tags) bool {
return true
}
func (*openAuthorizer) AuthorizeSeriesWrite(database string, measurement []byte, tags models.Tags) bool {
return true
}
// Ensure that the subquery gets passed the query authorizer.
func TestSubquery_Authorizer(t *testing.T) {
auth := &openAuthorizer{}
shardMapper := ShardMapper{
MapShardsFn: func(sources influxql.Sources, tr influxql.TimeRange) query.ShardGroup {
return &ShardGroup{
Fields: map[string]influxql.DataType{
"value": influxql.Float,
},
CreateIteratorFn: func(ctx context.Context, m *influxql.Measurement, opt query.IteratorOptions) (query.Iterator, error) {
if opt.Authorizer != auth {
t.Errorf("query authorizer has not been set")
}
return nil, nil
},
}
},
}
stmt := MustParseSelectStatement(`SELECT max(value) FROM (SELECT value FROM cpu)`)
cur, err := query.Select(context.Background(), stmt, &shardMapper, query.SelectOptions{
Authorizer: auth,
})
if err != nil {
t.Fatalf("unexpected error: %s", err)
}
cur.Close()
}
// Ensure that the subquery gets passed the max series limit.
func TestSubquery_MaxSeriesN(t *testing.T) {
shardMapper := ShardMapper{
MapShardsFn: func(sources influxql.Sources, tr influxql.TimeRange) query.ShardGroup {
MapShardsFn: func(_ context.Context, sources influxql.Sources, tr influxql.TimeRange) query.ShardGroup {
return &ShardGroup{
Fields: map[string]influxql.DataType{
"value": influxql.Float,

View File

@ -1,319 +0,0 @@
package query
import (
"bytes"
"context"
"encoding/json"
"fmt"
"sync"
"time"
"github.com/influxdata/influxdb/v2/models"
"github.com/influxdata/influxql"
"go.uber.org/zap"
)
const (
// DefaultQueryTimeout is the default timeout for executing a query.
// A value of zero will have no query timeout.
DefaultQueryTimeout = time.Duration(0)
)
type TaskStatus int
const (
// RunningTask is set when the task is running.
RunningTask TaskStatus = iota + 1
// KilledTask is set when the task is killed, but resources are still
// being used.
KilledTask
)
func (t TaskStatus) String() string {
switch t {
case RunningTask:
return "running"
case KilledTask:
return "killed"
default:
return "unknown"
}
}
func (t TaskStatus) MarshalJSON() ([]byte, error) {
s := t.String()
return json.Marshal(s)
}
func (t *TaskStatus) UnmarshalJSON(data []byte) error {
if bytes.Equal(data, []byte("running")) {
*t = RunningTask
} else if bytes.Equal(data, []byte("killed")) {
*t = KilledTask
} else if bytes.Equal(data, []byte("unknown")) {
*t = TaskStatus(0)
} else {
return fmt.Errorf("unknown task status: %s", string(data))
}
return nil
}
// TaskManager takes care of all aspects related to managing running queries.
type TaskManager struct {
// Query execution timeout.
QueryTimeout time.Duration
// Log queries if they are slower than this time.
// If zero, slow queries will never be logged.
LogQueriesAfter time.Duration
// Maximum number of concurrent queries.
MaxConcurrentQueries int
// Logger to use for all logging.
// Defaults to discarding all log output.
Logger *zap.Logger
// Used for managing and tracking running queries.
queries map[uint64]*Task
nextID uint64
mu sync.RWMutex
shutdown bool
}
// NewTaskManager creates a new TaskManager.
func NewTaskManager() *TaskManager {
return &TaskManager{
QueryTimeout: DefaultQueryTimeout,
Logger: zap.NewNop(),
queries: make(map[uint64]*Task),
nextID: 1,
}
}
// ExecuteStatement executes a statement containing one of the task management queries.
func (t *TaskManager) ExecuteStatement(stmt influxql.Statement, ctx *ExecutionContext) error {
switch stmt := stmt.(type) {
case *influxql.ShowQueriesStatement:
rows, err := t.executeShowQueriesStatement(stmt)
if err != nil {
return err
}
ctx.Send(&Result{
Series: rows,
})
case *influxql.KillQueryStatement:
var messages []*Message
if ctx.ReadOnly {
messages = append(messages, ReadOnlyWarning(stmt.String()))
}
if err := t.executeKillQueryStatement(stmt); err != nil {
return err
}
ctx.Send(&Result{
Messages: messages,
})
default:
return ErrInvalidQuery
}
return nil
}
func (t *TaskManager) executeKillQueryStatement(stmt *influxql.KillQueryStatement) error {
return t.KillQuery(stmt.QueryID)
}
func (t *TaskManager) executeShowQueriesStatement(q *influxql.ShowQueriesStatement) (models.Rows, error) {
t.mu.RLock()
defer t.mu.RUnlock()
now := time.Now()
values := make([][]interface{}, 0, len(t.queries))
for id, qi := range t.queries {
d := now.Sub(qi.startTime)
switch {
case d >= time.Second:
d = d - (d % time.Second)
case d >= time.Millisecond:
d = d - (d % time.Millisecond)
case d >= time.Microsecond:
d = d - (d % time.Microsecond)
}
values = append(values, []interface{}{id, qi.query, qi.database, d.String(), qi.status.String()})
}
return []*models.Row{{
Columns: []string{"qid", "query", "database", "duration", "status"},
Values: values,
}}, nil
}
func (t *TaskManager) queryError(qid uint64, err error) {
t.mu.RLock()
query := t.queries[qid]
t.mu.RUnlock()
if query != nil {
query.setError(err)
}
}
// AttachQuery attaches a running query to be managed by the TaskManager.
// Returns the query id of the newly attached query or an error if it was
// unable to assign a query id or attach the query to the TaskManager.
// This function also returns a channel that will be closed when this
// query finishes running.
//
// After a query finishes running, the system is free to reuse a query id.
func (t *TaskManager) AttachQuery(q *influxql.Query, opt ExecutionOptions, interrupt <-chan struct{}) (*ExecutionContext, func(), error) {
t.mu.Lock()
defer t.mu.Unlock()
if t.shutdown {
return nil, nil, ErrQueryEngineShutdown
}
if t.MaxConcurrentQueries > 0 && len(t.queries) >= t.MaxConcurrentQueries {
return nil, nil, ErrMaxConcurrentQueriesLimitExceeded(len(t.queries), t.MaxConcurrentQueries)
}
qid := t.nextID
query := &Task{
query: q.String(),
database: opt.Database,
status: RunningTask,
startTime: time.Now(),
closing: make(chan struct{}),
monitorCh: make(chan error),
}
t.queries[qid] = query
go t.waitForQuery(qid, query.closing, interrupt, query.monitorCh)
if t.LogQueriesAfter != 0 {
go query.monitor(func(closing <-chan struct{}) error {
timer := time.NewTimer(t.LogQueriesAfter)
defer timer.Stop()
select {
case <-timer.C:
t.Logger.Warn(fmt.Sprintf("Detected slow query: %s (qid: %d, database: %s, threshold: %s)",
query.query, qid, query.database, t.LogQueriesAfter))
case <-closing:
}
return nil
})
}
t.nextID++
ctx := &ExecutionContext{
Context: context.Background(),
QueryID: qid,
task: query,
ExecutionOptions: opt,
}
ctx.watch()
return ctx, func() { t.DetachQuery(qid) }, nil
}
// KillQuery enters a query into the killed state and closes the channel
// from the TaskManager. This method can be used to forcefully terminate a
// running query.
func (t *TaskManager) KillQuery(qid uint64) error {
t.mu.Lock()
query := t.queries[qid]
t.mu.Unlock()
if query == nil {
return fmt.Errorf("no such query id: %d", qid)
}
return query.kill()
}
// DetachQuery removes a query from the query table. If the query is not in the
// killed state, this will also close the related channel.
func (t *TaskManager) DetachQuery(qid uint64) error {
t.mu.Lock()
defer t.mu.Unlock()
query := t.queries[qid]
if query == nil {
return fmt.Errorf("no such query id: %d", qid)
}
query.close()
delete(t.queries, qid)
return nil
}
// QueryInfo represents the information for a query.
type QueryInfo struct {
ID uint64 `json:"id"`
Query string `json:"query"`
Database string `json:"database"`
Duration time.Duration `json:"duration"`
Status TaskStatus `json:"status"`
}
// Queries returns a list of all running queries with information about them.
func (t *TaskManager) Queries() []QueryInfo {
t.mu.RLock()
defer t.mu.RUnlock()
now := time.Now()
queries := make([]QueryInfo, 0, len(t.queries))
for id, qi := range t.queries {
queries = append(queries, QueryInfo{
ID: id,
Query: qi.query,
Database: qi.database,
Duration: now.Sub(qi.startTime),
Status: qi.status,
})
}
return queries
}
func (t *TaskManager) waitForQuery(qid uint64, interrupt <-chan struct{}, closing <-chan struct{}, monitorCh <-chan error) {
var timerCh <-chan time.Time
if t.QueryTimeout != 0 {
timer := time.NewTimer(t.QueryTimeout)
timerCh = timer.C
defer timer.Stop()
}
select {
case <-closing:
t.queryError(qid, ErrQueryInterrupted)
case err := <-monitorCh:
if err == nil {
break
}
t.queryError(qid, err)
case <-timerCh:
t.queryError(qid, ErrQueryTimeoutLimitExceeded)
case <-interrupt:
// Query was manually closed so exit the select.
return
}
t.KillQuery(qid)
}
// Close kills all running queries and prevents new queries from being attached.
func (t *TaskManager) Close() error {
t.mu.Lock()
defer t.mu.Unlock()
t.shutdown = true
for _, query := range t.queries {
query.setError(ErrQueryEngineShutdown)
query.close()
}
t.queries = nil
return nil
}

88
influxql/query_request.go Normal file
View File

@ -0,0 +1,88 @@
package influxql
import (
"encoding/json"
"github.com/influxdata/influxdb/v2"
)
type EncodingFormat int
func (f *EncodingFormat) UnmarshalJSON(bytes []byte) error {
var s string
if err := json.Unmarshal(bytes, &s); err != nil {
return err
}
*f = EncodingFormatFromMimeType(s)
return nil
}
func (f EncodingFormat) MarshalJSON() ([]byte, error) {
return json.Marshal(f.ContentType())
}
const (
EncodingFormatJSON EncodingFormat = iota
EncodingFormatCSV
EncodingFormatMessagePack
EncodingFormatTable
)
// Returns closed encoding format from the specified mime type.
// The default is JSON if no exact match is found.
func EncodingFormatFromMimeType(s string) EncodingFormat {
switch s {
case "application/csv", "text/csv":
return EncodingFormatCSV
case "text/plain":
return EncodingFormatTable
case "application/x-msgpack":
return EncodingFormatMessagePack
case "application/json":
fallthrough
default:
return EncodingFormatJSON
}
}
func (f EncodingFormat) ContentType() string {
switch f {
case EncodingFormatCSV:
return "text/csv"
case EncodingFormatTable:
return "text/plain"
case EncodingFormatMessagePack:
return "application/x-msgpack"
case EncodingFormatJSON:
fallthrough
default:
return "application/json"
}
}
type QueryRequest struct {
Authorization *influxdb.Authorization `json:"authorization,omitempty"`
OrganizationID influxdb.ID `json:"organization_id"`
DB string `json:"db"`
RP string `json:"rp"`
Epoch string `json:"epoch"`
EncodingFormat EncodingFormat `json:"encoding_format"`
ContentType string `json:"content_type"` // Content type is the desired response format.
Chunked bool `json:"chunked"` // Chunked indicates responses should be chunked using ChunkSize
ChunkSize int `json:"chunk_size"` // ChunkSize is the number of points to be encoded per batch. 0 indicates no chunking.
Query string `json:"query"` // Query contains the InfluxQL.
Params map[string]interface{} `json:"params,omitempty"`
Source string `json:"source"` // Source represents the ultimate source of the request.
}
// The HTTP query requests represented the body expected by the QueryHandler
func (r *QueryRequest) Valid() error {
if !r.OrganizationID.Valid() {
return &influxdb.Error{
Msg: "organization_id is not valid",
Code: influxdb.EInvalid,
}
}
return r.Authorization.Valid()
}

96
influxql/service.go Normal file
View File

@ -0,0 +1,96 @@
package influxql
import (
"context"
"fmt"
"io"
"github.com/influxdata/influxdb/v2/kit/check"
)
// ProxyQueryService performs InfluxQL queries and encodes the result into a writer.
// The results are opaque to a ProxyQueryService.
type ProxyQueryService interface {
check.Checker
Query(ctx context.Context, w io.Writer, req *QueryRequest) (Statistics, error)
}
// ProxyMode enumerates the possible ProxyQueryService operating modes used by a downstream client.
type ProxyMode byte
const (
// ProxyModeHTTP specifies a ProxyQueryService that forwards InfluxQL requests via HTTP to influxqld.
ProxyModeHTTP ProxyMode = iota
// ProxyModeQueue specifies a ProxyQueryService that pushes InfluxQL requests to a queue and influxqld issues a callback request to the initiating service.
ProxyModeQueue
)
var proxyModeString = [...]string{
ProxyModeHTTP: "http",
ProxyModeQueue: "queue",
}
func (i ProxyMode) String() string {
if int(i) > len(proxyModeString) {
return "invalid"
}
return proxyModeString[i]
}
func (i *ProxyMode) Set(v string) (err error) {
switch v {
case "http":
*i = ProxyModeHTTP
case "queue":
*i = ProxyModeQueue
default:
err = fmt.Errorf("unexpected %s type: %s", i.Type(), v)
}
return err
}
func (i *ProxyMode) Type() string { return "proxy-mode" }
// RequestMode is enumerates the possible influxqld operating modes for receiving InfluxQL requests.
type RequestMode byte
const (
// RequestModeHTTP specifies the HTTP listener should be active.
RequestModeHTTP RequestMode = iota
// RequestModeQueue specifies the queue dispatcher should be active.
RequestModeQueue
// RequestModeAll specifies both the HTTP listener and queue dispatcher should be active.
RequestModeAll
)
var requestModeString = [...]string{
RequestModeHTTP: "http",
RequestModeQueue: "queue",
RequestModeAll: "all",
}
func (i RequestMode) String() string {
if int(i) > len(requestModeString) {
return "invalid"
}
return proxyModeString[i]
}
func (i *RequestMode) Set(v string) (err error) {
switch v {
case "http":
*i = RequestModeHTTP
case "queue":
*i = RequestModeQueue
case "all":
*i = RequestModeAll
default:
err = fmt.Errorf("unexpected %s type: %s", i.Type(), v)
}
return err
}
func (i *RequestMode) Type() string { return "request-mode" }

123
influxql/statistics.go Normal file
View File

@ -0,0 +1,123 @@
package influxql
import (
"sync"
"time"
"github.com/opentracing/opentracing-go"
"github.com/opentracing/opentracing-go/log"
)
// Statistics is a collection of statistics about the processing of a query.
type Statistics struct {
PlanDuration time.Duration `json:"plan_duration"` // PlanDuration is the duration spent planning the query.
ExecuteDuration time.Duration `json:"execute_duration"` // ExecuteDuration is the duration spent executing the query.
StatementCount int `json:"statement_count"` // StatementCount is the number of InfluxQL statements executed
ScannedValues int `json:"scanned_values"` // ScannedValues is the number of values scanned from storage
ScannedBytes int `json:"scanned_bytes"` // ScannedBytes is the number of bytes scanned from storage
}
// Adding returns the sum of s and other.
func (s Statistics) Adding(other Statistics) Statistics {
return Statistics{
PlanDuration: s.PlanDuration + other.PlanDuration,
ExecuteDuration: s.ExecuteDuration + other.ExecuteDuration,
StatementCount: s.StatementCount + other.StatementCount,
ScannedValues: s.ScannedValues + other.ScannedValues,
ScannedBytes: s.ScannedBytes + other.ScannedBytes,
}
}
// Add adds other to s.
func (s *Statistics) Add(other Statistics) {
s.PlanDuration += other.PlanDuration
s.ExecuteDuration += other.ExecuteDuration
s.StatementCount += other.StatementCount
s.ScannedValues += other.ScannedValues
s.ScannedBytes += other.ScannedBytes
}
func (s *Statistics) LogToSpan(span opentracing.Span) {
if span == nil {
return
}
span.LogFields(
log.Float64("stats_plan_duration_seconds", s.PlanDuration.Seconds()),
log.Float64("stats_execute_duration_seconds", s.ExecuteDuration.Seconds()),
log.Int("stats_statement_count", s.StatementCount),
log.Int("stats_scanned_values", s.ScannedValues),
log.Int("stats_scanned_bytes", s.ScannedBytes),
)
}
// TotalDuration returns the sum of all durations for s.
func (s *Statistics) TotalDuration() time.Duration {
return s.PlanDuration + s.ExecuteDuration
}
type CollectorFn func() Statistics
func (fn CollectorFn) Statistics() Statistics {
return fn()
}
type MutableCollector struct {
s *Statistics
}
func NewMutableCollector(s *Statistics) *MutableCollector {
return &MutableCollector{s: s}
}
func (c *MutableCollector) Statistics() Statistics {
return *c.s
}
type ImmutableCollector struct {
s Statistics
}
func NewImmutableCollector(s Statistics) *ImmutableCollector {
return &ImmutableCollector{s: s}
}
func (c *ImmutableCollector) Statistics() Statistics {
return c.s
}
type StatisticsCollector interface {
Statistics() Statistics
}
type StatisticsGatherer struct {
mu sync.Mutex
collectors []StatisticsCollector
}
func (sg *StatisticsGatherer) Append(sc StatisticsCollector) {
sg.mu.Lock()
defer sg.mu.Unlock()
sg.collectors = append(sg.collectors, sc)
}
func (sg *StatisticsGatherer) Statistics() Statistics {
sg.mu.Lock()
defer sg.mu.Unlock()
res := Statistics{}
for i := range sg.collectors {
res = res.Adding(sg.collectors[i].Statistics())
}
return res
}
func (sg *StatisticsGatherer) Reset() {
sg.mu.Lock()
defer sg.mu.Unlock()
coll := sg.collectors
sg.collectors = sg.collectors[:0]
for i := range coll {
coll[i] = nil
}
}

View File

@ -5,7 +5,6 @@ package coordinator
import (
"time"
"github.com/influxdata/influxdb/v2/influxql/query"
"github.com/influxdata/influxdb/v2/toml"
"github.com/influxdata/influxdb/v2/v1/monitor/diagnostics"
)
@ -31,7 +30,6 @@ const (
type Config struct {
WriteTimeout toml.Duration `toml:"write-timeout"`
MaxConcurrentQueries int `toml:"max-concurrent-queries"`
QueryTimeout toml.Duration `toml:"query-timeout"`
LogQueriesAfter toml.Duration `toml:"log-queries-after"`
MaxSelectPointN int `toml:"max-select-point"`
MaxSelectSeriesN int `toml:"max-select-series"`
@ -42,7 +40,6 @@ type Config struct {
func NewConfig() Config {
return Config{
WriteTimeout: toml.Duration(DefaultWriteTimeout),
QueryTimeout: toml.Duration(query.DefaultQueryTimeout),
MaxConcurrentQueries: DefaultMaxConcurrentQueries,
MaxSelectPointN: DefaultMaxSelectPointN,
MaxSelectSeriesN: DefaultMaxSelectSeriesN,
@ -54,7 +51,6 @@ func (c Config) Diagnostics() (*diagnostics.Diagnostics, error) {
return diagnostics.RowFromMap(map[string]interface{}{
"write-timeout": c.WriteTimeout,
"max-concurrent-queries": c.MaxConcurrentQueries,
"query-timeout": c.QueryTimeout,
"log-queries-after": c.LogQueriesAfter,
"max-select-point": c.MaxSelectPointN,
"max-select-series": c.MaxSelectSeriesN,

View File

@ -276,12 +276,6 @@ func (l sgList) Append(sgi meta.ShardGroupInfo) sgList {
return next
}
// WritePointsInto is a copy of WritePoints that uses a tsdb structure instead of
// a cluster structure for information. This is to avoid a circular dependency.
func (w *PointsWriter) WritePointsInto(p *IntoWriteRequest) error {
return w.WritePointsPrivileged(p.Database, p.RetentionPolicy, models.ConsistencyLevelOne, p.Points)
}
// WritePoints writes the data to the underlying storage. consitencyLevel and user are only used for clustered scenarios
func (w *PointsWriter) WritePoints(database, retentionPolicy string, consistencyLevel models.ConsistencyLevel, user meta.User, points []models.Point) error {
return w.WritePointsPrivileged(database, retentionPolicy, consistencyLevel, points)

View File

@ -2,9 +2,11 @@ package coordinator
import (
"context"
"fmt"
"io"
"time"
"github.com/influxdata/influxdb/v2"
"github.com/influxdata/influxdb/v2/influxql/query"
"github.com/influxdata/influxdb/v2/v1/services/meta"
"github.com/influxdata/influxdb/v2/v1/tsdb"
@ -27,24 +29,26 @@ type LocalShardMapper struct {
TSDBStore interface {
ShardGroup(ids []uint64) tsdb.ShardGroup
}
DBRP influxdb.DBRPMappingServiceV2
}
// MapShards maps the sources to the appropriate shards into an IteratorCreator.
func (e *LocalShardMapper) MapShards(sources influxql.Sources, t influxql.TimeRange, opt query.SelectOptions) (query.ShardGroup, error) {
func (e *LocalShardMapper) MapShards(ctx context.Context, sources influxql.Sources, t influxql.TimeRange, opt query.SelectOptions) (query.ShardGroup, error) {
a := &LocalShardMapping{
ShardMap: make(map[Source]tsdb.ShardGroup),
}
tmin := time.Unix(0, t.MinTimeNano())
tmax := time.Unix(0, t.MaxTimeNano())
if err := e.mapShards(a, sources, tmin, tmax); err != nil {
if err := e.mapShards(ctx, a, sources, tmin, tmax, opt.OrgID); err != nil {
return nil, err
}
a.MinTime, a.MaxTime = tmin, tmax
return a, nil
}
func (e *LocalShardMapper) mapShards(a *LocalShardMapping, sources influxql.Sources, tmin, tmax time.Time) error {
func (e *LocalShardMapper) mapShards(ctx context.Context, a *LocalShardMapping, sources influxql.Sources, tmin, tmax time.Time, orgID influxdb.ID) error {
for _, s := range sources {
switch s := s.(type) {
case *influxql.Measurement:
@ -56,7 +60,22 @@ func (e *LocalShardMapper) mapShards(a *LocalShardMapping, sources influxql.Sour
// shards is always the same regardless of which measurement we are
// using.
if _, ok := a.ShardMap[source]; !ok {
groups, err := e.MetaClient.ShardGroupsByTimeRange(s.Database, s.RetentionPolicy, tmin, tmax)
// lookup bucket and create info
mappings, n, err := e.DBRP.FindMany(ctx, influxdb.DBRPMappingFilterV2{
OrgID: &orgID,
Database: &s.Database,
RetentionPolicy: &s.RetentionPolicy,
})
if err != nil {
return fmt.Errorf("finding DBRP mappings: %v", err)
} else if n == 0 {
return fmt.Errorf("retention policy not found: %s", s.RetentionPolicy)
} else if n != 1 {
return fmt.Errorf("finding DBRP mappings: expected 1, found %d", n)
}
mapping := mappings[0]
groups, err := e.MetaClient.ShardGroupsByTimeRange(mapping.BucketID.String(), meta.DefaultRetentionPolicyName, tmin, tmax)
if err != nil {
return err
}
@ -75,7 +94,7 @@ func (e *LocalShardMapper) mapShards(a *LocalShardMapping, sources influxql.Sour
a.ShardMap[source] = e.TSDBStore.ShardGroup(shardIDs)
}
case *influxql.SubQuery:
if err := e.mapShards(a, s.Statement.Sources, tmin, tmax); err != nil {
if err := e.mapShards(ctx, a, s.Statement.Sources, tmin, tmax, orgID); err != nil {
return err
}
}
@ -98,7 +117,7 @@ type LocalShardMapping struct {
MaxTime time.Time
}
func (a *LocalShardMapping) FieldDimensions(m *influxql.Measurement) (fields map[string]influxql.DataType, dimensions map[string]struct{}, err error) {
func (a *LocalShardMapping) FieldDimensions(ctx context.Context, m *influxql.Measurement) (fields map[string]influxql.DataType, dimensions map[string]struct{}, err error) {
source := Source{
Database: m.Database,
RetentionPolicy: m.RetentionPolicy,
@ -132,7 +151,7 @@ func (a *LocalShardMapping) FieldDimensions(m *influxql.Measurement) (fields map
return
}
func (a *LocalShardMapping) MapType(m *influxql.Measurement, field string) influxql.DataType {
func (a *LocalShardMapping) MapType(ctx context.Context, m *influxql.Measurement, field string) influxql.DataType {
source := Source{
Database: m.Database,
RetentionPolicy: m.RetentionPolicy,
@ -208,7 +227,7 @@ func (a *LocalShardMapping) CreateIterator(ctx context.Context, m *influxql.Meas
return sg.CreateIterator(ctx, m, opt)
}
func (a *LocalShardMapping) IteratorCost(m *influxql.Measurement, opt query.IteratorOptions) (query.IteratorCost, error) {
func (a *LocalShardMapping) IteratorCost(ctx context.Context, m *influxql.Measurement, opt query.IteratorOptions) (query.IteratorCost, error) {
source := Source{
Database: m.Database,
RetentionPolicy: m.RetentionPolicy,

File diff suppressed because it is too large Load Diff