diff --git a/cmd/influxd/launcher/launcher.go b/cmd/influxd/launcher/launcher.go index b7b9d596ee..6cc0e3f800 100644 --- a/cmd/influxd/launcher/launcher.go +++ b/cmd/influxd/launcher/launcher.go @@ -27,6 +27,8 @@ import ( "github.com/influxdata/influxdb/v2/endpoints" "github.com/influxdata/influxdb/v2/gather" "github.com/influxdata/influxdb/v2/http" + iqlcontrol "github.com/influxdata/influxdb/v2/influxql/control" + iqlquery "github.com/influxdata/influxdb/v2/influxql/query" "github.com/influxdata/influxdb/v2/inmem" "github.com/influxdata/influxdb/v2/internal/fs" "github.com/influxdata/influxdb/v2/kit/cli" @@ -64,6 +66,7 @@ import ( "github.com/influxdata/influxdb/v2/task/backend/scheduler" "github.com/influxdata/influxdb/v2/telemetry" "github.com/influxdata/influxdb/v2/tenant" + iqlcoordinator "github.com/influxdata/influxdb/v2/v1/coordinator" "github.com/influxdata/influxdb/v2/v1/services/meta" storage2 "github.com/influxdata/influxdb/v2/v1/services/storage" _ "github.com/influxdata/influxdb/v2/v1/tsdb/engine/tsm1" // needed for tsm1 @@ -845,6 +848,25 @@ func (m *Launcher) run(ctx context.Context) (err error) { dbrpSvc := dbrp.NewService(ctx, authorizer.NewBucketService(ts.BucketService), m.kvStore) dbrpSvc = dbrp.NewAuthorizedService(dbrpSvc) + cm := iqlcontrol.NewControllerMetrics([]string{}) + m.reg.MustRegister(cm.PrometheusCollectors()...) + + mapper := &iqlcoordinator.LocalShardMapper{ + MetaClient: metaClient, + TSDBStore: m.engine.TSDBStore, + DBRP: dbrpSvc, + } + + qe := iqlquery.NewExecutor(m.log, cm) + se := &iqlcoordinator.StatementExecutor{ + MetaClient: metaClient, + TSDBStore: m.engine.TSDBStore, + ShardMapper: mapper, + DBRP: dbrpSvc, + } + qe.StatementExecutor = se + qe.StatementNormalizer = se + var checkSvc platform.CheckService { coordinator := coordinator.NewCoordinator(m.log, m.scheduler, m.executor) @@ -1022,6 +1044,7 @@ func (m *Launcher) run(ctx context.Context) (err error) { VariableService: variableSvc, PasswordsService: ts.PasswordsService, InfluxQLService: storageQueryService, + InfluxqldService: iqlquery.NewProxyExecutor(m.log, qe), FluxService: storageQueryService, FluxLanguageService: fluxlang.DefaultService, TaskService: taskSvc, diff --git a/go.mod b/go.mod index 461a08ecc0..3a6bc8d349 100644 --- a/go.mod +++ b/go.mod @@ -88,7 +88,7 @@ require ( github.com/stretchr/testify v1.5.1 github.com/tcnksm/go-input v0.0.0-20180404061846-548a7d7a8ee8 github.com/testcontainers/testcontainers-go v0.0.0-20190108154635-47c0da630f72 - github.com/tinylib/msgp v1.1.0 // indirect + github.com/tinylib/msgp v1.1.0 github.com/tylerb/graceful v1.2.15 github.com/uber-go/atomic v1.3.2 // indirect github.com/uber/jaeger-client-go v2.16.0+incompatible diff --git a/http/api_handler.go b/http/api_handler.go index 9f3737e80f..474f7eddd5 100644 --- a/http/api_handler.go +++ b/http/api_handler.go @@ -9,6 +9,7 @@ import ( "github.com/influxdata/influxdb/v2/chronograf/server" "github.com/influxdata/influxdb/v2/dbrp" "github.com/influxdata/influxdb/v2/http/metric" + "github.com/influxdata/influxdb/v2/influxql" "github.com/influxdata/influxdb/v2/kit/feature" "github.com/influxdata/influxdb/v2/kit/prom" kithttp "github.com/influxdata/influxdb/v2/kit/transport/http" @@ -76,6 +77,7 @@ type APIBackend struct { VariableService influxdb.VariableService PasswordsService influxdb.PasswordsService InfluxQLService query.ProxyQueryService + InfluxqldService influxql.ProxyQueryService FluxService query.ProxyQueryService FluxLanguageService influxdb.FluxLanguageService TaskService influxdb.TaskService diff --git a/http/influx1x_authentication_handler.go b/http/influx1x_authentication_handler.go new file mode 100644 index 0000000000..78abd7436c --- /dev/null +++ b/http/influx1x_authentication_handler.go @@ -0,0 +1,158 @@ +package http + +import ( + "fmt" + "net/http" + "strings" + + "github.com/influxdata/influxdb/v2" + platcontext "github.com/influxdata/influxdb/v2/context" + "github.com/opentracing/opentracing-go" +) + +type Influx1xAuthenticationHandler struct { + influxdb.HTTPErrorHandler + next http.Handler + auth influxdb.AuthorizationService + user influxdb.UserService +} + +// NewInflux1xAuthenticationHandler creates an authentication handler to process +// InfluxDB 1.x authentication requests. +func NewInflux1xAuthenticationHandler(next http.Handler, auth influxdb.AuthorizationService, user influxdb.UserService, h influxdb.HTTPErrorHandler) *Influx1xAuthenticationHandler { + return &Influx1xAuthenticationHandler{ + HTTPErrorHandler: h, + next: next, + auth: auth, + user: user, + } +} + +// ServeHTTP extracts the session or token from the http request and places the resulting authorizer on the request context. +func (h *Influx1xAuthenticationHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + // The ping endpoint does not need authorization + if r.URL.Path == "/ping" { + h.next.ServeHTTP(w, r) + return + } + ctx := r.Context() + + creds, err := h.parseCredentials(r) + if err != nil { + UnauthorizedError(ctx, h, w) + return + } + + auth, err := h.auth.FindAuthorizationByToken(ctx, creds.Token) + if err != nil { + UnauthorizedError(ctx, h, w) + return + } + + var user *influxdb.User + if creds.Username != "" { + user, err = h.user.FindUser(ctx, influxdb.UserFilter{Name: &creds.Username}) + if err != nil { + UnauthorizedError(ctx, h, w) + return + } + + if user.ID != auth.UserID { + h.HandleHTTPError(ctx, &influxdb.Error{ + Code: influxdb.EForbidden, + Msg: "Username and Token do not match", + }, w) + return + } + } else { + user, err = h.user.FindUserByID(ctx, auth.UserID) + if err != nil { + UnauthorizedError(ctx, h, w) + return + } + } + + if err = h.isUserActive(user); err != nil { + InactiveUserError(ctx, h, w) + return + } + + ctx = platcontext.SetAuthorizer(ctx, auth) + + if span := opentracing.SpanFromContext(ctx); span != nil { + span.SetTag("user_id", auth.GetUserID().String()) + } + + h.next.ServeHTTP(w, r.WithContext(ctx)) +} + +func (h *Influx1xAuthenticationHandler) isUserActive(u *influxdb.User) error { + if u.Status != "inactive" { + return nil + } + + return &influxdb.Error{Code: influxdb.EForbidden, Msg: "User is inactive"} +} + +type credentials struct { + Username string + Token string +} + +func parseToken(token string) (user, pass string, ok bool) { + s := strings.IndexByte(token, ':') + if s < 0 { + // Token + return "", token, true + } + + // Token : + return token[:s], token[s+1:], true +} + +// parseCredentials parses a request and returns the authentication credentials. +// The credentials may be present as URL query params, or as a Basic +// Authentication header. +// As params: http://127.0.0.1/query?u=username&p=token +// As basic auth: http://username:token@127.0.0.1 +// As Token in Authorization header: Token +func (h *Influx1xAuthenticationHandler) parseCredentials(r *http.Request) (*credentials, error) { + q := r.URL.Query() + + // Check for username and password in URL params. + if u, p := q.Get("u"), q.Get("p"); u != "" && p != "" { + return &credentials{ + Username: u, + Token: p, + }, nil + } + + // Check for the HTTP Authorization header. + if s := r.Header.Get("Authorization"); s != "" { + // Check for Bearer token. + strs := strings.Split(s, " ") + if len(strs) == 2 { + switch strs[0] { + case "Token": + if u, p, ok := parseToken(strs[1]); ok { + return &credentials{ + Username: u, + Token: p, + }, nil + } + + // fallback to only a token + } + } + + // Check for basic auth. + if u, p, ok := r.BasicAuth(); ok { + return &credentials{ + Username: u, + Token: p, + }, nil + } + } + + return nil, fmt.Errorf("unable to parse authentication credentials") +} diff --git a/http/influx1x_authentication_handler_test.go b/http/influx1x_authentication_handler_test.go new file mode 100644 index 0000000000..ccf6f7ee33 --- /dev/null +++ b/http/influx1x_authentication_handler_test.go @@ -0,0 +1,192 @@ +package http + +import ( + "context" + "fmt" + "net/http" + "net/http/httptest" + "testing" + + "github.com/influxdata/influxdb/v2" + kithttp "github.com/influxdata/influxdb/v2/kit/transport/http" + "github.com/influxdata/influxdb/v2/mock" +) + +func TestInflux1xAuthenticationHandler(t *testing.T) { + var one = influxdb.ID(1) + + type fields struct { + FindAuthorizationByTokenFn func(context.Context, string) (*influxdb.Authorization, error) + FindUserFn func(context.Context, influxdb.UserFilter) (*influxdb.User, error) + FindUserByIDFn func(context.Context, influxdb.ID) (*influxdb.User, error) + } + + type exp struct { + code int + } + + basic := func(u, p string) func(r *http.Request) { + return func(r *http.Request) { + r.SetBasicAuth(u, p) + } + } + + token := func(u, p string) func(r *http.Request) { + return func(r *http.Request) { + if u == "" { + SetToken(p, r) + } else { + SetToken(u+":"+p, r) + } + } + } + + query := func(u, p string) func(r *http.Request) { + return func(r *http.Request) { + v := r.URL.Query() + v.Add("u", u) + v.Add("p", p) + r.URL.RawQuery = v.Encode() + } + } + + const ( + User = "sydney" + Token = "my-token" + ) + + tests := []struct { + name string + fields fields + auth func(r *http.Request) + exp exp + }{ + // successful requests + { + name: "basic auth", + fields: fields{}, + auth: basic(User, Token), + exp: exp{ + code: http.StatusOK, + }, + }, + { + name: "query string", + fields: fields{}, + auth: query(User, Token), + exp: exp{ + code: http.StatusOK, + }, + }, + { + name: "Token as user:token", + fields: fields{}, + auth: token(User, Token), + exp: exp{ + code: http.StatusOK, + }, + }, + { + name: "Token as token", + fields: fields{}, + auth: token("", Token), + exp: exp{ + code: http.StatusOK, + }, + }, + { + name: "token does not exist", + fields: fields{ + FindAuthorizationByTokenFn: func(ctx context.Context, token string) (*influxdb.Authorization, error) { + return nil, fmt.Errorf("authorization not found") + }, + }, + exp: exp{ + code: http.StatusUnauthorized, + }, + }, + { + name: "user is inactive", + fields: fields{ + FindAuthorizationByTokenFn: func(ctx context.Context, token string) (*influxdb.Authorization, error) { + return &influxdb.Authorization{UserID: one}, nil + }, + FindUserFn: func(ctx context.Context, f influxdb.UserFilter) (*influxdb.User, error) { + return &influxdb.User{ID: one, Status: "inactive"}, nil + }, + }, + auth: basic(User, Token), + exp: exp{ + code: http.StatusForbidden, + }, + }, + { + name: "username and token mismatch", + fields: fields{ + FindAuthorizationByTokenFn: func(ctx context.Context, token string) (*influxdb.Authorization, error) { + return &influxdb.Authorization{UserID: one}, nil + }, + FindUserFn: func(ctx context.Context, f influxdb.UserFilter) (*influxdb.User, error) { + return &influxdb.User{ID: influxdb.ID(2)}, nil + }, + }, + auth: basic(User, Token), + exp: exp{ + code: http.StatusForbidden, + }, + }, + { + name: "no auth provided", + fields: fields{ + FindAuthorizationByTokenFn: func(ctx context.Context, token string) (*influxdb.Authorization, error) { + return &influxdb.Authorization{}, nil + }, + }, + exp: exp{ + code: http.StatusUnauthorized, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var h *Influx1xAuthenticationHandler + { + auth := &mock.AuthorizationService{FindAuthorizationByTokenFn: tt.fields.FindAuthorizationByTokenFn} + if auth.FindAuthorizationByTokenFn == nil { + auth.FindAuthorizationByTokenFn = func(ctx context.Context, token string) (*influxdb.Authorization, error) { + return &influxdb.Authorization{UserID: one}, nil + } + } + + user := &mock.UserService{FindUserFn: tt.fields.FindUserFn, FindUserByIDFn: tt.fields.FindUserByIDFn} + if user.FindUserFn == nil { + user.FindUserFn = func(context.Context, influxdb.UserFilter) (*influxdb.User, error) { + return &influxdb.User{ID: one}, nil + } + } + if user.FindUserByIDFn == nil { + user.FindUserByIDFn = func(_ context.Context, id influxdb.ID) (*influxdb.User, error) { + return &influxdb.User{ID: id}, nil + } + } + next := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + }) + + h = NewInflux1xAuthenticationHandler(next, auth, user, kithttp.ErrorHandler(0)) + } + + w := httptest.NewRecorder() + r := httptest.NewRequest("POST", "http://any.url", nil) + if tt.auth != nil { + tt.auth(r) + } + h.ServeHTTP(w, r) + + if got, want := w.Code, tt.exp.code; got != want { + t.Errorf("expected status code to be %d got %d", want, got) + } + }) + } +} diff --git a/http/influxql_handler.go b/http/influxql_handler.go new file mode 100644 index 0000000000..0be731338c --- /dev/null +++ b/http/influxql_handler.go @@ -0,0 +1,56 @@ +package http + +import ( + "net/http" + + platform "github.com/influxdata/influxdb/v2" + influxqld "github.com/influxdata/influxdb/v2/influxql" + "github.com/influxdata/influxdb/v2/influxql/control" + "github.com/influxdata/influxdb/v2/query" + "go.uber.org/zap" +) + +// InfluxqlHandler mimics the /query handler from influxdb, but, enriches +// with org and forwards requests to the transpiler service. +type InfluxqlHandler struct { + *InfluxQLBackend + LegacyHandlerConfig + Metrics *control.ControllerMetrics +} + +type InfluxQLBackend struct { + platform.HTTPErrorHandler + Logger *zap.Logger + AuthorizationService platform.AuthorizationService + OrganizationService platform.OrganizationService + ProxyQueryService query.ProxyQueryService + InfluxqldQueryService influxqld.ProxyQueryService +} + +// NewInfluxQLBackend constructs an InfluxQLBackend from a LegacyBackend. +func NewInfluxQLBackend(b *LegacyBackend) *InfluxQLBackend { + return &InfluxQLBackend{ + HTTPErrorHandler: b.HTTPErrorHandler, + Logger: b.Logger.With(zap.String("handler", "influxql")), + AuthorizationService: b.AuthorizationService, + OrganizationService: b.OrganizationService, + InfluxqldQueryService: b.InfluxqldQueryService, + } +} + +// NewInfluxQLHandler returns a new instance of InfluxqlHandler to handle influxql v1 queries +func NewInfluxQLHandler(b *InfluxQLBackend, config LegacyHandlerConfig) *InfluxqlHandler { + return &InfluxqlHandler{ + InfluxQLBackend: b, + LegacyHandlerConfig: config, + Metrics: control.NewControllerMetrics([]string{}), + } +} + +func (h *InfluxqlHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { + h.handleInfluxqldQuery(w, req) +} + +// DefaultChunkSize is the default number of points to write in +// one chunk. +const DefaultChunkSize = 10000 diff --git a/http/influxqld_handler.go b/http/influxqld_handler.go new file mode 100644 index 0000000000..adf02a8586 --- /dev/null +++ b/http/influxqld_handler.go @@ -0,0 +1,194 @@ +package http + +import ( + "context" + "encoding/json" + "io/ioutil" + "mime" + "net/http" + "strconv" + "strings" + + "github.com/influxdata/flux/iocounter" + "github.com/influxdata/influxdb/v2" + pcontext "github.com/influxdata/influxdb/v2/context" + "github.com/influxdata/influxdb/v2/influxql" + "github.com/influxdata/influxdb/v2/kit/tracing" + "github.com/prometheus/client_golang/prometheus" + "go.uber.org/zap" +) + +func (h *InfluxqlHandler) PrometheusCollectors() []prometheus.Collector { + return []prometheus.Collector{ + h.Metrics.Requests, + h.Metrics.RequestsLatency, + } +} + +// HandleQuery mimics the influxdb 1.0 /query +func (h *InfluxqlHandler) handleInfluxqldQuery(w http.ResponseWriter, r *http.Request) { + span, r := tracing.ExtractFromHTTPRequest(r, "handleInfluxqldQuery") + defer span.Finish() + + if id, _, found := tracing.InfoFromSpan(span); found { + w.Header().Set(traceIDHeader, id) + } + + ctx := r.Context() + defer r.Body.Close() + + auth, err := getAuthorization(ctx) + if err != nil { + h.HandleHTTPError(ctx, err, w) + return + } + + if !auth.IsActive() { + h.HandleHTTPError(ctx, &influxdb.Error{ + Code: influxdb.EForbidden, + Msg: "insufficient permissions", + }, w) + return + } + + o, err := h.OrganizationService.FindOrganization(ctx, influxdb.OrganizationFilter{ + ID: &auth.OrgID, + }) + if err != nil { + h.HandleHTTPError(ctx, err, w) + return + } + + var query string + // Attempt to read the form value from the "q" form value. + if qp := strings.TrimSpace(r.FormValue("q")); qp != "" { + query = qp + } else if r.MultipartForm != nil && r.MultipartForm.File != nil { + // If we have a multipart/form-data, try to retrieve a file from 'q'. + if fhs := r.MultipartForm.File["q"]; len(fhs) > 0 { + d, err := ioutil.ReadFile(fhs[0].Filename) + if err != nil { + h.HandleHTTPError(ctx, err, w) + return + } + query = string(d) + } + } else { + ct := r.Header.Get("Content-Type") + mt, _, err := mime.ParseMediaType(ct) + if err != nil { + h.HandleHTTPError(ctx, &influxdb.Error{ + Code: influxdb.EInvalid, + Err: err, + }, w) + return + } + + if mt == "application/vnd.influxql" { + if d, err := ioutil.ReadAll(r.Body); err != nil { + h.HandleHTTPError(ctx, err, w) + return + } else { + query = string(d) + } + } + } + + // parse the parameters + rawParams := r.FormValue("params") + var params map[string]interface{} + if rawParams != "" { + decoder := json.NewDecoder(strings.NewReader(rawParams)) + decoder.UseNumber() + if err := decoder.Decode(¶ms); err != nil { + h.HandleHTTPError(ctx, &influxdb.Error{ + Code: influxdb.EInvalid, + Msg: "error parsing query parameters", + Err: err, + }, w) + return + } + + // Convert json.Number into int64 and float64 values + for k, v := range params { + if v, ok := v.(json.Number); ok { + var err error + if strings.Contains(string(v), ".") { + params[k], err = v.Float64() + } else { + params[k], err = v.Int64() + } + + if err != nil { + h.HandleHTTPError(ctx, &influxdb.Error{ + Code: influxdb.EInvalid, + Msg: "error parsing json value", + Err: err, + }, w) + return + } + } + } + } + + // Parse chunk size. Use default if not provided or cannot be parsed + chunked := r.FormValue("chunked") == "true" + chunkSize := DefaultChunkSize + if chunked { + if n, err := strconv.ParseInt(r.FormValue("chunk_size"), 10, 64); err == nil && int(n) > 0 { + chunkSize = int(n) + } + } + + req := &influxql.QueryRequest{ + DB: r.FormValue("db"), + RP: r.FormValue("rp"), + Epoch: r.FormValue("epoch"), + EncodingFormat: influxql.EncodingFormatFromMimeType(r.Header.Get("Accept")), + OrganizationID: o.ID, + Query: query, + Params: params, + Source: r.Header.Get("User-Agent"), + Authorization: auth, + Chunked: chunked, + ChunkSize: chunkSize, + } + + var respSize int64 + cw := iocounter.Writer{Writer: w} + _, err = h.InfluxqldQueryService.Query(ctx, &cw, req) + respSize = cw.Count() + + if err != nil { + if respSize == 0 { + // Only record the error headers IFF nothing has been written to w. + h.HandleHTTPError(ctx, err, w) + return + } + h.Logger.Info("error writing response to client", + zap.String("org", o.Name), + zap.String("handler", "influxql"), + zap.Error(err), + ) + } +} + + +// getAuthorization extracts authorization information from a context.Context. +// It guards against non influxdb.Authorization values for authorization and +// InfluxQL feature flag not enabled. +func getAuthorization(ctx context.Context) (*influxdb.Authorization, error) { + authorizer, err := pcontext.GetAuthorizer(ctx) + if err != nil { + return nil, err + } + + a, ok := authorizer.(*influxdb.Authorization) + if !ok { + return nil, &influxdb.Error{ + Code: influxdb.EForbidden, + Msg: "insufficient permissions; session not supported", + } + } + return a, nil +} diff --git a/http/influxqld_handler_test.go b/http/influxqld_handler_test.go new file mode 100644 index 0000000000..36aebd09f4 --- /dev/null +++ b/http/influxqld_handler_test.go @@ -0,0 +1,265 @@ +package http + +import ( + "context" + "io" + "net/http" + "net/http/httptest" + "testing" + + "github.com/google/go-cmp/cmp" + platform "github.com/influxdata/influxdb/v2" + pcontext "github.com/influxdata/influxdb/v2/context" + "github.com/influxdata/influxdb/v2/influxql" + imock "github.com/influxdata/influxdb/v2/influxql/mock" + kithttp "github.com/influxdata/influxdb/v2/kit/transport/http" + "github.com/influxdata/influxdb/v2/mock" +) + +var cmpOpts = []cmp.Option{ + // Ignore request ID when comparing headers. + cmp.Comparer(func(h1, h2 http.Header) bool { + for k, v1 := range h1 { + if k == "X-Request-Id" || k == "Request-Id" { + continue + } + if v2, ok := h2[k]; !ok || !cmp.Equal(v1, v2) { + return false + } + } + for k, v2 := range h2 { + if k == "X-Request-Id" || k == "Request-Id" { + continue + } + if v1, ok := h1[k]; !ok || !cmp.Equal(v2, v1) { + return false + } + } + return true + }), +} + +func TestInfluxQLdHandler_HandleQuery(t *testing.T) { + t.Skip("almost good to go, only unexpected content types") + + ctx := context.Background() + + type fields struct { + OrganizationService platform.OrganizationService + ProxyQueryService influxql.ProxyQueryService + } + type args struct { + w *httptest.ResponseRecorder + r *http.Request + } + tests := []struct { + name string + fields fields + args args + context context.Context + wantCode int + wantHeader http.Header + wantBody []byte + wantLogs []string + }{ + { + name: "no token causes http error", + args: args{ + r: httptest.NewRequest("POST", "/query", nil).WithContext(ctx), + w: httptest.NewRecorder(), + }, + wantCode: http.StatusInternalServerError, + wantHeader: http.Header{ + "X-Platform-Error-Code": {"internal error"}, + "Content-Type": {"application/json; charset=utf-8"}, + }, + wantBody: []byte(`{"code":"internal error","message":"authorizer not found on context"}`), + }, + { + name: "inactive authorizer", + context: pcontext.SetAuthorizer(ctx, &platform.Authorization{Status: platform.Inactive}), + args: args{ + r: httptest.NewRequest("POST", "/query", nil).WithContext(ctx), + w: httptest.NewRecorder(), + }, + wantCode: http.StatusForbidden, + wantHeader: http.Header{ + "Content-Type": {"application/json; charset=utf-8"}, + "X-Platform-Error-Code": {"forbidden"}, + }, + wantBody: []byte(`{"code":"forbidden","message":"insufficient permissions"}`), + }, + { + name: "unknown organization", + context: pcontext.SetAuthorizer(ctx, &platform.Authorization{Status: platform.Active}), + fields: fields{ + OrganizationService: &mock.OrganizationService{ + FindOrganizationF: func(ctx context.Context, filter platform.OrganizationFilter) (*platform.Organization, error) { + return nil, &platform.Error{ + Code: platform.EForbidden, + Msg: "nope", + } + }, + }, + }, + args: args{ + r: httptest.NewRequest("POST", "/query", nil).WithContext(ctx), + w: httptest.NewRecorder(), + }, + wantCode: http.StatusForbidden, + wantHeader: http.Header{ + "Content-Type": {"application/json; charset=utf-8"}, + "X-Platform-Error-Code": {"forbidden"}, + }, + wantBody: []byte(`{"code":"forbidden","message":"nope"}`), + }, + { + name: "bad query", + context: pcontext.SetAuthorizer(ctx, &platform.Authorization{Status: platform.Active}), + fields: fields{ + OrganizationService: &mock.OrganizationService{ + FindOrganizationF: func(ctx context.Context, filter platform.OrganizationFilter) (*platform.Organization, error) { + return &platform.Organization{}, nil + }, + }, + ProxyQueryService: &imock.ProxyQueryService{ + QueryF: func(ctx context.Context, w io.Writer, req *influxql.QueryRequest) (influxql.Statistics, error) { + return influxql.Statistics{}, &platform.Error{ + Code: platform.EUnprocessableEntity, + Msg: "bad query", + } + }, + }, + }, + args: args{ + r: httptest.NewRequest("POST", "/query", nil).WithContext(ctx), + w: httptest.NewRecorder(), + }, + wantCode: http.StatusUnprocessableEntity, + wantHeader: http.Header{ + "X-Platform-Error-Code": {"unprocessable entity"}, + "Content-Type": {"application/json; charset=utf-8"}, + }, + wantBody: []byte(`{"code":"unprocessable entity","message":"bad query"}`), + }, + { + name: "query fails during write", + context: pcontext.SetAuthorizer(ctx, &platform.Authorization{Status: platform.Active}), + fields: fields{ + OrganizationService: &mock.OrganizationService{ + FindOrganizationF: func(ctx context.Context, filter platform.OrganizationFilter) (*platform.Organization, error) { + return &platform.Organization{}, nil + }, + }, + ProxyQueryService: &imock.ProxyQueryService{ + QueryF: func(ctx context.Context, w io.Writer, req *influxql.QueryRequest) (influxql.Statistics, error) { + _, _ = io.WriteString(w, "fail") + return influxql.Statistics{}, &platform.Error{ + Code: platform.EInternal, + Msg: "during query", + } + }, + }, + }, + args: args{ + r: httptest.NewRequest("POST", "/query", nil).WithContext(ctx), + w: httptest.NewRecorder(), + }, + wantBody: []byte("fail"), + wantCode: http.StatusOK, + wantHeader: http.Header{ + "Content-Type": {"application/json"}, + }, + }, + { + name: "good query unknown accept header", + context: pcontext.SetAuthorizer(ctx, &platform.Authorization{Status: platform.Active}), + fields: fields{ + OrganizationService: &mock.OrganizationService{ + FindOrganizationF: func(ctx context.Context, filter platform.OrganizationFilter) (*platform.Organization, error) { + return &platform.Organization{}, nil + }, + }, + ProxyQueryService: &imock.ProxyQueryService{ + QueryF: func(ctx context.Context, w io.Writer, req *influxql.QueryRequest) (influxql.Statistics, error) { + _, err := io.WriteString(w, "good") + return influxql.Statistics{}, err + }, + }, + }, + args: args{ + r: WithHeader(httptest.NewRequest("POST", "/query", nil).WithContext(ctx), "Accept", "text/csv"), + w: httptest.NewRecorder(), + }, + wantBody: []byte("good"), + wantCode: http.StatusOK, + wantHeader: http.Header{ + "Content-Type": {"text/csv"}, + }, + wantLogs: []string{"text/csv"}, + }, + { + name: "good query", + context: pcontext.SetAuthorizer(ctx, &platform.Authorization{Status: platform.Active}), + fields: fields{ + OrganizationService: &mock.OrganizationService{ + FindOrganizationF: func(ctx context.Context, filter platform.OrganizationFilter) (*platform.Organization, error) { + return &platform.Organization{}, nil + }, + }, + ProxyQueryService: &imock.ProxyQueryService{ + QueryF: func(ctx context.Context, w io.Writer, req *influxql.QueryRequest) (influxql.Statistics, error) { + _, err := io.WriteString(w, "good") + return influxql.Statistics{}, err + }, + }, + }, + args: args{ + r: httptest.NewRequest("POST", "/query", nil).WithContext(ctx), + w: httptest.NewRecorder(), + }, + wantBody: []byte("good"), + wantCode: http.StatusOK, + wantHeader: http.Header{ + "Content-Type": {"application/json"}, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + b := &InfluxQLBackend{ + HTTPErrorHandler: kithttp.ErrorHandler(0), + OrganizationService: tt.fields.OrganizationService, + InfluxqldQueryService: tt.fields.ProxyQueryService, + } + + h := NewInfluxQLHandler(b, LegacyHandlerConfig{}) + + if tt.context != nil { + tt.args.r = tt.args.r.WithContext(tt.context) + } + + tt.args.r.Header.Add("Content-Type", "application/vnd.influxql") + + h.handleInfluxqldQuery(tt.args.w, tt.args.r) + + if got, want := tt.args.w.Code, tt.wantCode; got != want { + t.Errorf("HandleQuery() status code = got %d / want %d", got, want) + } + + if got, want := tt.args.w.Result().Header, tt.wantHeader; !cmp.Equal(got, want, cmpOpts...) { + t.Errorf("HandleQuery() headers = got(-)/want(+) %s", cmp.Diff(got, want)) + } + + if got, want := tt.args.w.Body.Bytes(), tt.wantBody; !cmp.Equal(got, want) { + t.Errorf("HandleQuery() body = got(-)/want(+) %s", cmp.Diff(string(got), string(want))) + } + + }) + } +} + +func WithHeader(r *http.Request, key, value string) *http.Request { + r.Header.Set(key, value) + return r +} diff --git a/http/legacy.go b/http/legacy.go new file mode 100644 index 0000000000..b431e30817 --- /dev/null +++ b/http/legacy.go @@ -0,0 +1,119 @@ +package http + +import ( + "net/http" + + "github.com/influxdata/influxdb/v2" + "github.com/influxdata/influxdb/v2/http/metric" + "github.com/influxdata/influxdb/v2/influxql" + "github.com/influxdata/influxdb/v2/kit/cli" + "github.com/influxdata/influxdb/v2/query" + "github.com/influxdata/influxdb/v2/storage" + "github.com/prometheus/client_golang/prometheus" + "go.uber.org/zap" +) + +// LegacyHandler is a collection of all the service handlers. +type LegacyHandler struct { + influxdb.HTTPErrorHandler + PointsWriterHandler *WriteHandler + PingHandler *PingHandler + InfluxQLHandler *InfluxqlHandler +} + +type LegacyBackend struct { + influxdb.HTTPErrorHandler + Logger *zap.Logger + MaxBatchSizeBytes int64 + + WriteEventRecorder metric.EventRecorder + AuthorizationService influxdb.AuthorizationService + OrganizationService influxdb.OrganizationService + BucketService influxdb.BucketService + PointsWriter storage.PointsWriter + DBRPMappingServiceV2 influxdb.DBRPMappingServiceV2 + ProxyQueryService query.ProxyQueryService + InfluxqldQueryService influxql.ProxyQueryService +} + +// NewLegacyBackend constructs a legacy backend from an api backend. +func NewLegacyBackend(b *APIBackend) *LegacyBackend { + return &LegacyBackend{ + HTTPErrorHandler: b.HTTPErrorHandler, + Logger: b.Logger, + // TODO(sgc): /write support + //MaxBatchSizeBytes: b.APIBackend.MaxBatchSizeBytes, + AuthorizationService: b.AuthorizationService, + OrganizationService: b.OrganizationService, + BucketService: b.BucketService, + PointsWriter: b.PointsWriter, + DBRPMappingServiceV2: b.DBRPService, + ProxyQueryService: b.InfluxQLService, + InfluxqldQueryService: b.InfluxqldService, + WriteEventRecorder: b.WriteEventRecorder, + } +} + +// LegacyHandlerConfig provides configuration for the legacy handler. +type LegacyHandlerConfig struct { + Version string + DefaultRoutingKey string +} + +func NewLegacyHandlerConfig() *LegacyHandlerConfig { + return &LegacyHandlerConfig{} +} + +// Opts returns the CLI options for use with kit/cli. +// Currently set values on c are provided as the defaults. +func (c *LegacyHandlerConfig) Opts() []cli.Opt { + return []cli.Opt{ + { + DestP: &c.DefaultRoutingKey, + Flag: "influxql-default-routing-key", + Default: "defaultQueue", + Desc: "Default routing key for publishing new query requests", + }, + } +} + +// NewLegacyHandler constructs a legacy handler from a backend. +func NewLegacyHandler(b *LegacyBackend, config LegacyHandlerConfig) *LegacyHandler { + h := &LegacyHandler{ + HTTPErrorHandler: b.HTTPErrorHandler, + } + + //pointsWriterBackend := NewPointsWriterBackend(b) + //h.PointsWriterHandler = NewWriterHandler(pointsWriterBackend, + // WithMaxBatchSizeBytes(b.MaxBatchSizeBytes)) + + influxqlBackend := NewInfluxQLBackend(b) + // TODO(desa): what to do for auth here? + h.InfluxQLHandler = NewInfluxQLHandler(influxqlBackend, config) + + h.PingHandler = NewPingHandler(config.Version) + return h +} + +func (h *LegacyHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + if r.URL.Path == "/write" { + h.PointsWriterHandler.ServeHTTP(w, r) + return + } + + if r.URL.Path == "/ping" { + h.PingHandler.ServeHTTP(w, r) + return + } + + if r.URL.Path == "/query" { + h.InfluxQLHandler.ServeHTTP(w, r) + return + } + + w.WriteHeader(http.StatusNotFound) +} + +func (h *LegacyHandler) PrometheusCollectors() []prometheus.Collector { + return h.InfluxQLHandler.PrometheusCollectors() +} diff --git a/http/mocks/bucket_service.go b/http/mocks/bucket_service.go new file mode 100644 index 0000000000..0b07f351c1 --- /dev/null +++ b/http/mocks/bucket_service.go @@ -0,0 +1,144 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/influxdata/influxdb/v2 (interfaces: BucketService) + +// Package mocks is a generated GoMock package. +package mocks + +import ( + context "context" + gomock "github.com/golang/mock/gomock" + influxdb "github.com/influxdata/influxdb/v2" + reflect "reflect" +) + +// MockBucketService is a mock of BucketService interface +type MockBucketService struct { + ctrl *gomock.Controller + recorder *MockBucketServiceMockRecorder +} + +// MockBucketServiceMockRecorder is the mock recorder for MockBucketService +type MockBucketServiceMockRecorder struct { + mock *MockBucketService +} + +// NewMockBucketService creates a new mock instance +func NewMockBucketService(ctrl *gomock.Controller) *MockBucketService { + mock := &MockBucketService{ctrl: ctrl} + mock.recorder = &MockBucketServiceMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockBucketService) EXPECT() *MockBucketServiceMockRecorder { + return m.recorder +} + +// CreateBucket mocks base method +func (m *MockBucketService) CreateBucket(arg0 context.Context, arg1 *influxdb.Bucket) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateBucket", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// CreateBucket indicates an expected call of CreateBucket +func (mr *MockBucketServiceMockRecorder) CreateBucket(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateBucket", reflect.TypeOf((*MockBucketService)(nil).CreateBucket), arg0, arg1) +} + +// DeleteBucket mocks base method +func (m *MockBucketService) DeleteBucket(arg0 context.Context, arg1 influxdb.ID) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteBucket", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeleteBucket indicates an expected call of DeleteBucket +func (mr *MockBucketServiceMockRecorder) DeleteBucket(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucket", reflect.TypeOf((*MockBucketService)(nil).DeleteBucket), arg0, arg1) +} + +// FindBucket mocks base method +func (m *MockBucketService) FindBucket(arg0 context.Context, arg1 influxdb.BucketFilter) (*influxdb.Bucket, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "FindBucket", arg0, arg1) + ret0, _ := ret[0].(*influxdb.Bucket) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// FindBucket indicates an expected call of FindBucket +func (mr *MockBucketServiceMockRecorder) FindBucket(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FindBucket", reflect.TypeOf((*MockBucketService)(nil).FindBucket), arg0, arg1) +} + +// FindBucketByID mocks base method +func (m *MockBucketService) FindBucketByID(arg0 context.Context, arg1 influxdb.ID) (*influxdb.Bucket, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "FindBucketByID", arg0, arg1) + ret0, _ := ret[0].(*influxdb.Bucket) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// FindBucketByID indicates an expected call of FindBucketByID +func (mr *MockBucketServiceMockRecorder) FindBucketByID(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FindBucketByID", reflect.TypeOf((*MockBucketService)(nil).FindBucketByID), arg0, arg1) +} + +// FindBucketByName mocks base method +func (m *MockBucketService) FindBucketByName(arg0 context.Context, arg1 influxdb.ID, arg2 string) (*influxdb.Bucket, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "FindBucketByName", arg0, arg1, arg2) + ret0, _ := ret[0].(*influxdb.Bucket) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// FindBucketByName indicates an expected call of FindBucketByName +func (mr *MockBucketServiceMockRecorder) FindBucketByName(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FindBucketByName", reflect.TypeOf((*MockBucketService)(nil).FindBucketByName), arg0, arg1, arg2) +} + +// FindBuckets mocks base method +func (m *MockBucketService) FindBuckets(arg0 context.Context, arg1 influxdb.BucketFilter, arg2 ...influxdb.FindOptions) ([]*influxdb.Bucket, int, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "FindBuckets", varargs...) + ret0, _ := ret[0].([]*influxdb.Bucket) + ret1, _ := ret[1].(int) + ret2, _ := ret[2].(error) + return ret0, ret1, ret2 +} + +// FindBuckets indicates an expected call of FindBuckets +func (mr *MockBucketServiceMockRecorder) FindBuckets(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FindBuckets", reflect.TypeOf((*MockBucketService)(nil).FindBuckets), varargs...) +} + +// UpdateBucket mocks base method +func (m *MockBucketService) UpdateBucket(arg0 context.Context, arg1 influxdb.ID, arg2 influxdb.BucketUpdate) (*influxdb.Bucket, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateBucket", arg0, arg1, arg2) + ret0, _ := ret[0].(*influxdb.Bucket) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UpdateBucket indicates an expected call of UpdateBucket +func (mr *MockBucketServiceMockRecorder) UpdateBucket(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateBucket", reflect.TypeOf((*MockBucketService)(nil).UpdateBucket), arg0, arg1, arg2) +} diff --git a/http/mocks/dbrp_mapping_service.go b/http/mocks/dbrp_mapping_service.go new file mode 100644 index 0000000000..111c8248af --- /dev/null +++ b/http/mocks/dbrp_mapping_service.go @@ -0,0 +1,114 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/influxdata/influxdb/v2 (interfaces: DBRPMappingService) + +// Package mocks is a generated GoMock package. +package mocks + +import ( + context "context" + gomock "github.com/golang/mock/gomock" + influxdb "github.com/influxdata/influxdb/v2" + reflect "reflect" +) + +// MockDBRPMappingService is a mock of DBRPMappingService interface +type MockDBRPMappingService struct { + ctrl *gomock.Controller + recorder *MockDBRPMappingServiceMockRecorder +} + +// MockDBRPMappingServiceMockRecorder is the mock recorder for MockDBRPMappingService +type MockDBRPMappingServiceMockRecorder struct { + mock *MockDBRPMappingService +} + +// NewMockDBRPMappingService creates a new mock instance +func NewMockDBRPMappingService(ctrl *gomock.Controller) *MockDBRPMappingService { + mock := &MockDBRPMappingService{ctrl: ctrl} + mock.recorder = &MockDBRPMappingServiceMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockDBRPMappingService) EXPECT() *MockDBRPMappingServiceMockRecorder { + return m.recorder +} + +// Create mocks base method +func (m *MockDBRPMappingService) Create(arg0 context.Context, arg1 *influxdb.DBRPMapping) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Create", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// Create indicates an expected call of Create +func (mr *MockDBRPMappingServiceMockRecorder) Create(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Create", reflect.TypeOf((*MockDBRPMappingService)(nil).Create), arg0, arg1) +} + +// Delete mocks base method +func (m *MockDBRPMappingService) Delete(arg0 context.Context, arg1, arg2, arg3 string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Delete", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(error) + return ret0 +} + +// Delete indicates an expected call of Delete +func (mr *MockDBRPMappingServiceMockRecorder) Delete(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockDBRPMappingService)(nil).Delete), arg0, arg1, arg2, arg3) +} + +// Find mocks base method +func (m *MockDBRPMappingService) Find(arg0 context.Context, arg1 influxdb.DBRPMappingFilter) (*influxdb.DBRPMapping, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Find", arg0, arg1) + ret0, _ := ret[0].(*influxdb.DBRPMapping) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Find indicates an expected call of Find +func (mr *MockDBRPMappingServiceMockRecorder) Find(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Find", reflect.TypeOf((*MockDBRPMappingService)(nil).Find), arg0, arg1) +} + +// FindBy mocks base method +func (m *MockDBRPMappingService) FindBy(arg0 context.Context, arg1, arg2, arg3 string) (*influxdb.DBRPMapping, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "FindBy", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(*influxdb.DBRPMapping) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// FindBy indicates an expected call of FindBy +func (mr *MockDBRPMappingServiceMockRecorder) FindBy(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FindBy", reflect.TypeOf((*MockDBRPMappingService)(nil).FindBy), arg0, arg1, arg2, arg3) +} + +// FindMany mocks base method +func (m *MockDBRPMappingService) FindMany(arg0 context.Context, arg1 influxdb.DBRPMappingFilter, arg2 ...influxdb.FindOptions) ([]*influxdb.DBRPMapping, int, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "FindMany", varargs...) + ret0, _ := ret[0].([]*influxdb.DBRPMapping) + ret1, _ := ret[1].(int) + ret2, _ := ret[2].(error) + return ret0, ret1, ret2 +} + +// FindMany indicates an expected call of FindMany +func (mr *MockDBRPMappingServiceMockRecorder) FindMany(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FindMany", reflect.TypeOf((*MockDBRPMappingService)(nil).FindMany), varargs...) +} diff --git a/http/mocks/dbrp_mapping_service_v2.go b/http/mocks/dbrp_mapping_service_v2.go new file mode 100644 index 0000000000..b6c40577d4 --- /dev/null +++ b/http/mocks/dbrp_mapping_service_v2.go @@ -0,0 +1,113 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/influxdata/influxdb/v2 (interfaces: DBRPMappingServiceV2) + +// Package mocks is a generated GoMock package. +package mocks + +import ( + context "context" + gomock "github.com/golang/mock/gomock" + influxdb "github.com/influxdata/influxdb/v2" + reflect "reflect" +) + +// MockDBRPMappingServiceV2 is a mock of DBRPMappingServiceV2 interface +type MockDBRPMappingServiceV2 struct { + ctrl *gomock.Controller + recorder *MockDBRPMappingServiceV2MockRecorder +} + +// MockDBRPMappingServiceV2MockRecorder is the mock recorder for MockDBRPMappingServiceV2 +type MockDBRPMappingServiceV2MockRecorder struct { + mock *MockDBRPMappingServiceV2 +} + +// NewMockDBRPMappingServiceV2 creates a new mock instance +func NewMockDBRPMappingServiceV2(ctrl *gomock.Controller) *MockDBRPMappingServiceV2 { + mock := &MockDBRPMappingServiceV2{ctrl: ctrl} + mock.recorder = &MockDBRPMappingServiceV2MockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockDBRPMappingServiceV2) EXPECT() *MockDBRPMappingServiceV2MockRecorder { + return m.recorder +} + +// Create mocks base method +func (m *MockDBRPMappingServiceV2) Create(arg0 context.Context, arg1 *influxdb.DBRPMappingV2) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Create", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// Create indicates an expected call of Create +func (mr *MockDBRPMappingServiceV2MockRecorder) Create(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Create", reflect.TypeOf((*MockDBRPMappingServiceV2)(nil).Create), arg0, arg1) +} + +// Delete mocks base method +func (m *MockDBRPMappingServiceV2) Delete(arg0 context.Context, arg1, arg2 influxdb.ID) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Delete", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// Delete indicates an expected call of Delete +func (mr *MockDBRPMappingServiceV2MockRecorder) Delete(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockDBRPMappingServiceV2)(nil).Delete), arg0, arg1, arg2) +} + +// FindByID mocks base method +func (m *MockDBRPMappingServiceV2) FindByID(arg0 context.Context, arg1, arg2 influxdb.ID) (*influxdb.DBRPMappingV2, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "FindByID", arg0, arg1, arg2) + ret0, _ := ret[0].(*influxdb.DBRPMappingV2) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// FindByID indicates an expected call of FindByID +func (mr *MockDBRPMappingServiceV2MockRecorder) FindByID(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FindByID", reflect.TypeOf((*MockDBRPMappingServiceV2)(nil).FindByID), arg0, arg1, arg2) +} + +// FindMany mocks base method +func (m *MockDBRPMappingServiceV2) FindMany(arg0 context.Context, arg1 influxdb.DBRPMappingFilterV2, arg2 ...influxdb.FindOptions) ([]*influxdb.DBRPMappingV2, int, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "FindMany", varargs...) + ret0, _ := ret[0].([]*influxdb.DBRPMappingV2) + ret1, _ := ret[1].(int) + ret2, _ := ret[2].(error) + return ret0, ret1, ret2 +} + +// FindMany indicates an expected call of FindMany +func (mr *MockDBRPMappingServiceV2MockRecorder) FindMany(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FindMany", reflect.TypeOf((*MockDBRPMappingServiceV2)(nil).FindMany), varargs...) +} + +// Update mocks base method +func (m *MockDBRPMappingServiceV2) Update(arg0 context.Context, arg1 *influxdb.DBRPMappingV2) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Update", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// Update indicates an expected call of Update +func (mr *MockDBRPMappingServiceV2MockRecorder) Update(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Update", reflect.TypeOf((*MockDBRPMappingServiceV2)(nil).Update), arg0, arg1) +} diff --git a/http/mocks/event_recorder.go b/http/mocks/event_recorder.go new file mode 100644 index 0000000000..3fe26bff16 --- /dev/null +++ b/http/mocks/event_recorder.go @@ -0,0 +1,47 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/influxdata/influxdb/v2/http/metric (interfaces: EventRecorder) + +// Package mocks is a generated GoMock package. +package mocks + +import ( + context "context" + gomock "github.com/golang/mock/gomock" + metric "github.com/influxdata/influxdb/v2/http/metric" + reflect "reflect" +) + +// MockEventRecorder is a mock of EventRecorder interface +type MockEventRecorder struct { + ctrl *gomock.Controller + recorder *MockEventRecorderMockRecorder +} + +// MockEventRecorderMockRecorder is the mock recorder for MockEventRecorder +type MockEventRecorderMockRecorder struct { + mock *MockEventRecorder +} + +// NewMockEventRecorder creates a new mock instance +func NewMockEventRecorder(ctrl *gomock.Controller) *MockEventRecorder { + mock := &MockEventRecorder{ctrl: ctrl} + mock.recorder = &MockEventRecorderMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockEventRecorder) EXPECT() *MockEventRecorderMockRecorder { + return m.recorder +} + +// Record mocks base method +func (m *MockEventRecorder) Record(arg0 context.Context, arg1 metric.Event) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Record", arg0, arg1) +} + +// Record indicates an expected call of Record +func (mr *MockEventRecorderMockRecorder) Record(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Record", reflect.TypeOf((*MockEventRecorder)(nil).Record), arg0, arg1) +} diff --git a/http/mocks/organization_service.go b/http/mocks/organization_service.go new file mode 100644 index 0000000000..32ca6db90b --- /dev/null +++ b/http/mocks/organization_service.go @@ -0,0 +1,129 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/influxdata/influxdb/v2 (interfaces: OrganizationService) + +// Package mocks is a generated GoMock package. +package mocks + +import ( + context "context" + gomock "github.com/golang/mock/gomock" + influxdb "github.com/influxdata/influxdb/v2" + reflect "reflect" +) + +// MockOrganizationService is a mock of OrganizationService interface +type MockOrganizationService struct { + ctrl *gomock.Controller + recorder *MockOrganizationServiceMockRecorder +} + +// MockOrganizationServiceMockRecorder is the mock recorder for MockOrganizationService +type MockOrganizationServiceMockRecorder struct { + mock *MockOrganizationService +} + +// NewMockOrganizationService creates a new mock instance +func NewMockOrganizationService(ctrl *gomock.Controller) *MockOrganizationService { + mock := &MockOrganizationService{ctrl: ctrl} + mock.recorder = &MockOrganizationServiceMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockOrganizationService) EXPECT() *MockOrganizationServiceMockRecorder { + return m.recorder +} + +// CreateOrganization mocks base method +func (m *MockOrganizationService) CreateOrganization(arg0 context.Context, arg1 *influxdb.Organization) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateOrganization", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// CreateOrganization indicates an expected call of CreateOrganization +func (mr *MockOrganizationServiceMockRecorder) CreateOrganization(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateOrganization", reflect.TypeOf((*MockOrganizationService)(nil).CreateOrganization), arg0, arg1) +} + +// DeleteOrganization mocks base method +func (m *MockOrganizationService) DeleteOrganization(arg0 context.Context, arg1 influxdb.ID) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteOrganization", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeleteOrganization indicates an expected call of DeleteOrganization +func (mr *MockOrganizationServiceMockRecorder) DeleteOrganization(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteOrganization", reflect.TypeOf((*MockOrganizationService)(nil).DeleteOrganization), arg0, arg1) +} + +// FindOrganization mocks base method +func (m *MockOrganizationService) FindOrganization(arg0 context.Context, arg1 influxdb.OrganizationFilter) (*influxdb.Organization, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "FindOrganization", arg0, arg1) + ret0, _ := ret[0].(*influxdb.Organization) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// FindOrganization indicates an expected call of FindOrganization +func (mr *MockOrganizationServiceMockRecorder) FindOrganization(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FindOrganization", reflect.TypeOf((*MockOrganizationService)(nil).FindOrganization), arg0, arg1) +} + +// FindOrganizationByID mocks base method +func (m *MockOrganizationService) FindOrganizationByID(arg0 context.Context, arg1 influxdb.ID) (*influxdb.Organization, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "FindOrganizationByID", arg0, arg1) + ret0, _ := ret[0].(*influxdb.Organization) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// FindOrganizationByID indicates an expected call of FindOrganizationByID +func (mr *MockOrganizationServiceMockRecorder) FindOrganizationByID(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FindOrganizationByID", reflect.TypeOf((*MockOrganizationService)(nil).FindOrganizationByID), arg0, arg1) +} + +// FindOrganizations mocks base method +func (m *MockOrganizationService) FindOrganizations(arg0 context.Context, arg1 influxdb.OrganizationFilter, arg2 ...influxdb.FindOptions) ([]*influxdb.Organization, int, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "FindOrganizations", varargs...) + ret0, _ := ret[0].([]*influxdb.Organization) + ret1, _ := ret[1].(int) + ret2, _ := ret[2].(error) + return ret0, ret1, ret2 +} + +// FindOrganizations indicates an expected call of FindOrganizations +func (mr *MockOrganizationServiceMockRecorder) FindOrganizations(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FindOrganizations", reflect.TypeOf((*MockOrganizationService)(nil).FindOrganizations), varargs...) +} + +// UpdateOrganization mocks base method +func (m *MockOrganizationService) UpdateOrganization(arg0 context.Context, arg1 influxdb.ID, arg2 influxdb.OrganizationUpdate) (*influxdb.Organization, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateOrganization", arg0, arg1, arg2) + ret0, _ := ret[0].(*influxdb.Organization) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UpdateOrganization indicates an expected call of UpdateOrganization +func (mr *MockOrganizationServiceMockRecorder) UpdateOrganization(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateOrganization", reflect.TypeOf((*MockOrganizationService)(nil).UpdateOrganization), arg0, arg1, arg2) +} diff --git a/http/mocks/points_writer.go b/http/mocks/points_writer.go new file mode 100644 index 0000000000..b2729a9c95 --- /dev/null +++ b/http/mocks/points_writer.go @@ -0,0 +1,49 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/influxdata/influxdb/v2/storage (interfaces: PointsWriter) + +// Package mocks is a generated GoMock package. +package mocks + +import ( + context "context" + gomock "github.com/golang/mock/gomock" + models "github.com/influxdata/influxdb/v2/models" + reflect "reflect" +) + +// MockPointsWriter is a mock of PointsWriter interface +type MockPointsWriter struct { + ctrl *gomock.Controller + recorder *MockPointsWriterMockRecorder +} + +// MockPointsWriterMockRecorder is the mock recorder for MockPointsWriter +type MockPointsWriterMockRecorder struct { + mock *MockPointsWriter +} + +// NewMockPointsWriter creates a new mock instance +func NewMockPointsWriter(ctrl *gomock.Controller) *MockPointsWriter { + mock := &MockPointsWriter{ctrl: ctrl} + mock.recorder = &MockPointsWriterMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockPointsWriter) EXPECT() *MockPointsWriterMockRecorder { + return m.recorder +} + +// WritePoints mocks base method +func (m *MockPointsWriter) WritePoints(arg0 context.Context, arg1 []models.Point) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WritePoints", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// WritePoints indicates an expected call of WritePoints +func (mr *MockPointsWriterMockRecorder) WritePoints(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WritePoints", reflect.TypeOf((*MockPointsWriter)(nil).WritePoints), arg0, arg1) +} diff --git a/http/ping_handler.go b/http/ping_handler.go new file mode 100644 index 0000000000..2fc82d64f9 --- /dev/null +++ b/http/ping_handler.go @@ -0,0 +1,30 @@ +package http + +import ( + "net/http" + + "github.com/influxdata/httprouter" +) + +type PingHandler struct { + *httprouter.Router + InfluxDBVersion string +} + +func NewPingHandler(version string) *PingHandler { + h := &PingHandler{ + Router: httprouter.New(), + InfluxDBVersion: version, + } + + h.HandlerFunc("GET", "/ping", h.pingHandler) + h.HandlerFunc("HEAD", "/ping", h.pingHandler) + return h +} + +// handlePostLegacyWrite is the HTTP handler for the POST /write route. +func (h *PingHandler) pingHandler(w http.ResponseWriter, r *http.Request) { + w.Header().Add("X-Influxdb-Build", "cloud2") + w.Header().Add("X-Influxdb-Version", h.InfluxDBVersion) + w.WriteHeader(http.StatusNoContent) +} diff --git a/http/platform_handler.go b/http/platform_handler.go index 8a28918b2f..64030214af 100644 --- a/http/platform_handler.go +++ b/http/platform_handler.go @@ -10,9 +10,10 @@ import ( // PlatformHandler is a collection of all the service handlers. type PlatformHandler struct { - AssetHandler *AssetHandler - DocsHandler http.HandlerFunc - APIHandler http.Handler + AssetHandler *AssetHandler + DocsHandler http.HandlerFunc + APIHandler http.Handler + LegacyHandler http.Handler } // NewPlatformHandler returns a platform handler that serves the API and associated assets. @@ -37,15 +38,28 @@ func NewPlatformHandler(b *APIBackend, opts ...APIHandlerOptFn) *PlatformHandler wrappedHandler := kithttp.SetCORS(h) wrappedHandler = kithttp.SkipOptions(wrappedHandler) + legacyBackend := NewLegacyBackend(b) + lh := NewLegacyHandler(legacyBackend, LegacyHandlerConfig{}) + return &PlatformHandler{ AssetHandler: assetHandler, DocsHandler: Redoc("/api/v2/swagger.json"), APIHandler: wrappedHandler, + LegacyHandler: NewInflux1xAuthenticationHandler(lh, b.AuthorizationService, b.UserService, b.HTTPErrorHandler), } } // ServeHTTP delegates a request to the appropriate subhandler. func (h *PlatformHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + // TODO(affo): change this to be mounted prefixes: https://github.com/influxdata/idpe/issues/6689. + if r.URL.Path == "/v1/write" || + r.URL.Path == "/write" || + r.URL.Path == "/query" || + r.URL.Path == "/ping" { + h.LegacyHandler.ServeHTTP(w, r) + return + } + if strings.HasPrefix(r.URL.Path, "/docs") { h.DocsHandler.ServeHTTP(w, r) return diff --git a/influxql/control/prometheus.go b/influxql/control/prometheus.go new file mode 100644 index 0000000000..f970f5dedd --- /dev/null +++ b/influxql/control/prometheus.go @@ -0,0 +1,70 @@ +package control + +import ( + "github.com/prometheus/client_golang/prometheus" +) + +// controllerMetrics holds metrics related to the query controller. +type ControllerMetrics struct { + Requests *prometheus.CounterVec + NotImplemented *prometheus.CounterVec + RequestsLatency *prometheus.HistogramVec + ExecutingDuration *prometheus.HistogramVec +} + +const ( + LabelSuccess = "success" + LabelGenericError = "generic_err" + LabelParseErr = "parse_err" + LabelInterruptedErr = "interrupt_err" + LabelRuntimeError = "runtime_error" + LabelNotImplError = "not_implemented" + LabelNotExecuted = "not_executed" +) + +func NewControllerMetrics(labels []string) *ControllerMetrics { + const ( + namespace = "influxql" + subsystem = "service" + ) + + return &ControllerMetrics{ + Requests: prometheus.NewCounterVec(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "requests_total", + Help: "Count of the query requests", + }, append(labels, "result")), + + NotImplemented: prometheus.NewCounterVec(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "not_implemented_total", + Help: "Count of the query requests executing unimplemented operations", + }, []string{"operation"}), + + RequestsLatency: prometheus.NewHistogramVec(prometheus.HistogramOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "requests_latency_seconds", + Help: "Histogram of times spent for end-to-end latency (from issuing query request, to receiving the first byte of the response)", + Buckets: prometheus.ExponentialBuckets(1e-3, 5, 7), + }, append(labels, "result")), + + ExecutingDuration: prometheus.NewHistogramVec(prometheus.HistogramOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "executing_duration_seconds", + Help: "Histogram of times spent executing queries", + Buckets: prometheus.ExponentialBuckets(1e-3, 5, 7), + }, append(labels, "result")), + } +} + +func (cm *ControllerMetrics) PrometheusCollectors() []prometheus.Collector { + return []prometheus.Collector{ + cm.Requests, + cm.NotImplemented, + cm.ExecutingDuration, + } +} diff --git a/influxql/errors.go b/influxql/errors.go new file mode 100644 index 0000000000..2362ce71d0 --- /dev/null +++ b/influxql/errors.go @@ -0,0 +1,15 @@ +package influxql + +// NotImplementedError is returned when a specific operation is unavailable. +type NotImplementedError struct { + Op string // Op is the name of the unimplemented operation +} + +func (e *NotImplementedError) Error() string { + return "not implemented: " + e.Op +} + +// ErrNotImplemented creates a NotImplementedError specifying op is unavailable. +func ErrNotImplemented(op string) error { + return &NotImplementedError{Op: op} +} diff --git a/influxql/mock/proxy_query_service.go b/influxql/mock/proxy_query_service.go new file mode 100644 index 0000000000..ca24027955 --- /dev/null +++ b/influxql/mock/proxy_query_service.go @@ -0,0 +1,24 @@ +package mock + +import ( + "context" + "io" + + "github.com/influxdata/influxdb/v2/influxql" + "github.com/influxdata/influxdb/v2/kit/check" +) + +var _ influxql.ProxyQueryService = (*ProxyQueryService)(nil) + +// ProxyQueryService mocks the InfluxQL QueryService for testing. +type ProxyQueryService struct { + QueryF func(ctx context.Context, w io.Writer, req *influxql.QueryRequest) (influxql.Statistics, error) +} + +func (s *ProxyQueryService) Query(ctx context.Context, w io.Writer, req *influxql.QueryRequest) (influxql.Statistics, error) { + return s.QueryF(ctx, w, req) +} + +func (s *ProxyQueryService) Check(ctx context.Context) check.Response { + return check.Response{Name: "Mock InfluxQL Proxy Query Service", Status: check.StatusPass} +} diff --git a/influxql/query/compile.go b/influxql/query/compile.go index f2462f1cb4..88d37ec198 100644 --- a/influxql/query/compile.go +++ b/influxql/query/compile.go @@ -1,6 +1,7 @@ package query import ( + "context" "errors" "fmt" "strings" @@ -19,7 +20,7 @@ type CompileOptions struct { type Statement interface { // Prepare prepares the statement by mapping shards and finishing the creation // of the query plan. - Prepare(shardMapper ShardMapper, opt SelectOptions) (PreparedStatement, error) + Prepare(ctx context.Context, shardMapper ShardMapper, opt SelectOptions) (PreparedStatement, error) } // compiledStatement represents a select statement that has undergone some initial processing to @@ -1087,7 +1088,7 @@ func (c *compiledStatement) subquery(stmt *influxql.SelectStatement) error { return subquery.compile(stmt) } -func (c *compiledStatement) Prepare(shardMapper ShardMapper, sopt SelectOptions) (PreparedStatement, error) { +func (c *compiledStatement) Prepare(ctx context.Context, shardMapper ShardMapper, sopt SelectOptions) (PreparedStatement, error) { // If this is a query with a grouping, there is a bucket limit, and the minimum time has not been specified, // we need to limit the possible time range that can be used when mapping shards but not when actually executing // the select statement. Determine the shard time range here. @@ -1144,13 +1145,13 @@ func (c *compiledStatement) Prepare(shardMapper ShardMapper, sopt SelectOptions) } // Create an iterator creator based on the shards in the cluster. - shards, err := shardMapper.MapShards(c.stmt.Sources, timeRange, sopt) + shards, err := shardMapper.MapShards(ctx, c.stmt.Sources, timeRange, sopt) if err != nil { return nil, err } // Rewrite wildcards, if any exist. - mapper := FieldMapper{FieldMapper: shards} + mapper := queryFieldMapper{FieldMapper: newFieldMapperAdapter(shards, ctx)} stmt, err := c.stmt.RewriteFields(mapper) if err != nil { shards.Close() diff --git a/influxql/query/compile_test.go b/influxql/query/compile_test.go index 2f874f7c45..22ebf23504 100644 --- a/influxql/query/compile_test.go +++ b/influxql/query/compile_test.go @@ -1,6 +1,7 @@ package query_test import ( + "context" "testing" "github.com/influxdata/influxdb/v2/influxql/query" @@ -419,7 +420,7 @@ func TestPrepare_MapShardsTimeRange(t *testing.T) { } shardMapper := ShardMapper{ - MapShardsFn: func(_ influxql.Sources, tr influxql.TimeRange) query.ShardGroup { + MapShardsFn: func(_ context.Context, _ influxql.Sources, tr influxql.TimeRange) query.ShardGroup { if got, want := tr.Min, mustParseTime(tt.start); !got.Equal(want) { t.Errorf("unexpected start time: got=%s want=%s", got, want) } @@ -430,7 +431,7 @@ func TestPrepare_MapShardsTimeRange(t *testing.T) { }, } - if _, err := c.Prepare(&shardMapper, query.SelectOptions{}); err != nil { + if _, err := c.Prepare(context.Background(), &shardMapper, query.SelectOptions{}); err != nil { t.Fatalf("unexpected error: %s", err) } }) diff --git a/influxql/query/execution_context.go b/influxql/query/execution_context.go index 5479226f56..9359ebf648 100644 --- a/influxql/query/execution_context.go +++ b/influxql/query/execution_context.go @@ -2,112 +2,33 @@ package query import ( "context" - "sync" + + iql "github.com/influxdata/influxdb/v2/influxql" ) // ExecutionContext contains state that the query is currently executing with. type ExecutionContext struct { - context.Context - // The statement ID of the executing query. statementID int - // The query ID of the executing query. - QueryID uint64 - - // The query task information available to the StatementExecutor. - task *Task - // Output channel where results and errors should be sent. Results chan *Result + // StatisticsGatherer gathers metrics about the execution of a query. + StatisticsGatherer *iql.StatisticsGatherer + // Options used to start this query. ExecutionOptions - - mu sync.RWMutex - done chan struct{} - err error -} - -func (ctx *ExecutionContext) watch() { - ctx.done = make(chan struct{}) - if ctx.err != nil { - close(ctx.done) - return - } - - go func() { - defer close(ctx.done) - - var taskCtx <-chan struct{} - if ctx.task != nil { - taskCtx = ctx.task.closing - } - - select { - case <-taskCtx: - ctx.err = ctx.task.Error() - if ctx.err == nil { - ctx.err = ErrQueryInterrupted - } - case <-ctx.AbortCh: - ctx.err = ErrQueryAborted - case <-ctx.Context.Done(): - ctx.err = ctx.Context.Err() - } - }() -} - -func (ctx *ExecutionContext) Done() <-chan struct{} { - ctx.mu.RLock() - if ctx.done != nil { - defer ctx.mu.RUnlock() - return ctx.done - } - ctx.mu.RUnlock() - - ctx.mu.Lock() - defer ctx.mu.Unlock() - if ctx.done == nil { - ctx.watch() - } - return ctx.done -} - -func (ctx *ExecutionContext) Err() error { - ctx.mu.RLock() - defer ctx.mu.RUnlock() - return ctx.err -} - -func (ctx *ExecutionContext) Value(key interface{}) interface{} { - switch key { - case monitorContextKey{}: - return ctx.task - } - return ctx.Context.Value(key) -} - -// send sends a Result to the Results channel and will exit if the query has -// been aborted. -func (ctx *ExecutionContext) send(result *Result) error { - result.StatementID = ctx.statementID - select { - case <-ctx.AbortCh: - return ErrQueryAborted - case ctx.Results <- result: - } - return nil } // Send sends a Result to the Results channel and will exit if the query has // been interrupted or aborted. -func (ctx *ExecutionContext) Send(result *Result) error { - result.StatementID = ctx.statementID +func (ectx *ExecutionContext) Send(ctx context.Context, result *Result) error { + result.StatementID = ectx.statementID select { case <-ctx.Done(): return ctx.Err() - case ctx.Results <- result: + case ectx.Results <- result: } return nil } diff --git a/influxql/query/executor.go b/influxql/query/executor.go index 37fd6f891c..ac95e40fd9 100644 --- a/influxql/query/executor.go +++ b/influxql/query/executor.go @@ -7,12 +7,15 @@ import ( "os" "runtime/debug" "strconv" - "sync" - "sync/atomic" "time" + "github.com/influxdata/influxdb/v2" + iql "github.com/influxdata/influxdb/v2/influxql" + "github.com/influxdata/influxdb/v2/influxql/control" + "github.com/influxdata/influxdb/v2/kit/tracing" "github.com/influxdata/influxdb/v2/models" "github.com/influxdata/influxql" + "github.com/opentracing/opentracing-go/log" "go.uber.org/zap" ) @@ -26,29 +29,9 @@ var ( // ErrQueryInterrupted is an error returned when the query is interrupted. ErrQueryInterrupted = errors.New("query interrupted") - - // ErrQueryAborted is an error returned when the query is aborted. - ErrQueryAborted = errors.New("query aborted") - - // ErrQueryEngineShutdown is an error sent when the query cannot be - // created because the query engine was shutdown. - ErrQueryEngineShutdown = errors.New("query engine shutdown") - - // ErrQueryTimeoutLimitExceeded is an error when a query hits the max time allowed to run. - ErrQueryTimeoutLimitExceeded = errors.New("query-timeout limit exceeded") - - // ErrAlreadyKilled is returned when attempting to kill a query that has already been killed. - ErrAlreadyKilled = errors.New("already killed") ) -// Statistics for the Executor const ( - statQueriesActive = "queriesActive" // Number of queries currently being executed. - statQueriesExecuted = "queriesExecuted" // Number of queries that have been executed (started). - statQueriesFinished = "queriesFinished" // Number of queries that have finished. - statQueryExecutionDuration = "queryDurationNs" // Total (wall) time spent executing queries. - statRecoveredPanics = "recoveredPanics" // Number of panics recovered by Query Executor. - // PanicCrashEnv is the environment variable that, when set, will prevent // the handler from recovering any panics. PanicCrashEnv = "INFLUXDB_PANIC_CRASH" @@ -119,6 +102,9 @@ func AuthorizerIsOpen(a Authorizer) bool { // ExecutionOptions contains the options for executing a query. type ExecutionOptions struct { + // OrgID is the organization for which this query is being executed. + OrgID influxdb.ID + // The database the query is running against. Database string @@ -140,14 +126,10 @@ type ExecutionOptions struct { // Quiet suppresses non-essential output from the query executor. Quiet bool - - // AbortCh is a channel that signals when results are no longer desired by the caller. - AbortCh <-chan struct{} } type ( iteratorsContextKey struct{} - monitorContextKey struct{} ) // NewContextWithIterators returns a new context.Context with the *Iterators slice added. @@ -160,14 +142,24 @@ func NewContextWithIterators(ctx context.Context, itr *Iterators) context.Contex type StatementExecutor interface { // ExecuteStatement executes a statement. Results should be sent to the // results channel in the ExecutionContext. - ExecuteStatement(stmt influxql.Statement, ctx *ExecutionContext) error + ExecuteStatement(ctx context.Context, stmt influxql.Statement, ectx *ExecutionContext) error } // StatementNormalizer normalizes a statement before it is executed. type StatementNormalizer interface { // NormalizeStatement adds a default database and policy to the // measurements in the statement. - NormalizeStatement(stmt influxql.Statement, database, retentionPolicy string) error + NormalizeStatement(ctx context.Context, stmt influxql.Statement, database, retentionPolicy string, ectx *ExecutionContext) error +} + +var ( + nullNormalizer StatementNormalizer = &nullNormalizerImpl{} +) + +type nullNormalizerImpl struct{} + +func (n *nullNormalizerImpl) NormalizeStatement(ctx context.Context, stmt influxql.Statement, database, retentionPolicy string, ectx *ExecutionContext) error { + return nil } // Executor executes every statement in an Query. @@ -175,98 +167,62 @@ type Executor struct { // Used for executing a statement in the query. StatementExecutor StatementExecutor - // Used for tracking running queries. - TaskManager *TaskManager + // StatementNormalizer normalizes a statement before it is executed. + StatementNormalizer StatementNormalizer - // Logger to use for all logging. - // Defaults to discarding all log output. - Logger *zap.Logger + Metrics *control.ControllerMetrics - // expvar-based stats. - stats *Statistics + log *zap.Logger } // NewExecutor returns a new instance of Executor. -func NewExecutor() *Executor { +func NewExecutor(logger *zap.Logger, cm *control.ControllerMetrics) *Executor { return &Executor{ - TaskManager: NewTaskManager(), - Logger: zap.NewNop(), - stats: &Statistics{}, + StatementNormalizer: nullNormalizer, + Metrics: cm, + log: logger.With(zap.String("service", "query")), } } -// Statistics keeps statistics related to the Executor. -type Statistics struct { - ActiveQueries int64 - ExecutedQueries int64 - FinishedQueries int64 - QueryExecutionDuration int64 - RecoveredPanics int64 -} - -// Statistics returns statistics for periodic monitoring. -func (e *Executor) Statistics(tags map[string]string) []models.Statistic { - return []models.Statistic{{ - Name: "queryExecutor", - Tags: tags, - Values: map[string]interface{}{ - statQueriesActive: atomic.LoadInt64(&e.stats.ActiveQueries), - statQueriesExecuted: atomic.LoadInt64(&e.stats.ExecutedQueries), - statQueriesFinished: atomic.LoadInt64(&e.stats.FinishedQueries), - statQueryExecutionDuration: atomic.LoadInt64(&e.stats.QueryExecutionDuration), - statRecoveredPanics: atomic.LoadInt64(&e.stats.RecoveredPanics), - }, - }} -} - // Close kills all running queries and prevents new queries from being attached. func (e *Executor) Close() error { - return e.TaskManager.Close() -} - -// SetLogOutput sets the writer to which all logs are written. It must not be -// called after Open is called. -func (e *Executor) WithLogger(log *zap.Logger) { - e.Logger = log.With(zap.String("service", "query")) - e.TaskManager.Logger = e.Logger + return nil } // ExecuteQuery executes each statement within a query. -func (e *Executor) ExecuteQuery(query *influxql.Query, opt ExecutionOptions, closing chan struct{}) <-chan *Result { +func (e *Executor) ExecuteQuery(ctx context.Context, query *influxql.Query, opt ExecutionOptions) (<-chan *Result, *iql.Statistics) { results := make(chan *Result) - go e.executeQuery(query, opt, closing, results) - return results + statistics := new(iql.Statistics) + go e.executeQuery(ctx, query, opt, results, statistics) + return results, statistics } -func (e *Executor) executeQuery(query *influxql.Query, opt ExecutionOptions, closing <-chan struct{}, results chan *Result) { - defer close(results) +func (e *Executor) executeQuery(ctx context.Context, query *influxql.Query, opt ExecutionOptions, results chan *Result, statistics *iql.Statistics) { + span, ctx := tracing.StartSpanFromContext(ctx) + defer func() { + close(results) + span.Finish() + }() + defer e.recover(query, results) - atomic.AddInt64(&e.stats.ActiveQueries, 1) - atomic.AddInt64(&e.stats.ExecutedQueries, 1) + gatherer := new(iql.StatisticsGatherer) + + statusLabel := control.LabelSuccess defer func(start time.Time) { - atomic.AddInt64(&e.stats.ActiveQueries, -1) - atomic.AddInt64(&e.stats.FinishedQueries, 1) - atomic.AddInt64(&e.stats.QueryExecutionDuration, time.Since(start).Nanoseconds()) + dur := time.Since(start) + e.Metrics.ExecutingDuration.WithLabelValues(statusLabel).Observe(dur.Seconds()) }(time.Now()) - ctx, detach, err := e.TaskManager.AttachQuery(query, opt, closing) - if err != nil { - select { - case results <- &Result{Err: err}: - case <-opt.AbortCh: - } - return - } - defer detach() + ectx := &ExecutionContext{StatisticsGatherer: gatherer, ExecutionOptions: opt} // Setup the execution context that will be used when executing statements. - ctx.Results = results + ectx.Results = results var i int LOOP: for ; i < len(query.Statements); i++ { - ctx.statementID = i + ectx.statementID = i stmt := query.Statements[i] // If a default database wasn't passed in by the caller, check the statement. @@ -298,9 +254,9 @@ LOOP: case "_tags": command = "SHOW TAG VALUES" } - results <- &Result{ + _ = ectx.Send(ctx, &Result{ Err: fmt.Errorf("unable to use system source '%s': use %s instead", s.Name, command), - } + }) break LOOP } } @@ -311,48 +267,49 @@ LOOP: // This can occur on meta read statements which convert to SELECT statements. newStmt, err := RewriteStatement(stmt) if err != nil { - results <- &Result{Err: err} + _ = ectx.Send(ctx, &Result{Err: err}) break } stmt = newStmt - // Normalize each statement if possible. - if normalizer, ok := e.StatementExecutor.(StatementNormalizer); ok { - if err := normalizer.NormalizeStatement(stmt, defaultDB, opt.RetentionPolicy); err != nil { - if err := ctx.send(&Result{Err: err}); err == ErrQueryAborted { + if err := e.StatementNormalizer.NormalizeStatement(ctx, stmt, defaultDB, opt.RetentionPolicy, ectx); err != nil { + if err := ectx.Send(ctx, &Result{Err: err}); err != nil { return } break } - } + + statistics.StatementCount += 1 // Log each normalized statement. - if !ctx.Quiet { - e.Logger.Info("Executing query", zap.Stringer("query", stmt)) + if !ectx.Quiet { + e.log.Info("Executing query", zap.Stringer("query", stmt)) + span.LogFields(log.String("normalized_query", stmt.String())) } + gatherer.Reset() + stmtStart := time.Now() // Send any other statements to the underlying statement executor. - err = e.StatementExecutor.ExecuteStatement(stmt, ctx) - if err == ErrQueryInterrupted { - // Query was interrupted so retrieve the real interrupt error from - // the query task if there is one. - if qerr := ctx.Err(); qerr != nil { - err = qerr - } - } + err = tracing.LogError(span, e.StatementExecutor.ExecuteStatement(ctx, stmt, ectx)) + stmtDur := time.Since(stmtStart) + stmtStats := gatherer.Statistics() + stmtStats.ExecuteDuration = stmtDur - stmtStats.PlanDuration + statistics.Add(stmtStats) // Send an error for this result if it failed for some reason. if err != nil { - if err := ctx.send(&Result{ + statusLabel = control.LabelNotExecuted + e.Metrics.Requests.WithLabelValues(statusLabel).Inc() + _ = ectx.Send(ctx, &Result{ StatementID: i, Err: err, - }); err == ErrQueryAborted { - return - } + }) // Stop after the first error. break } + e.Metrics.Requests.WithLabelValues(statusLabel).Inc() + // Check if the query was interrupted during an uninterruptible statement. interrupted := false select { @@ -363,17 +320,19 @@ LOOP: } if interrupted { + statusLabel = control.LabelInterruptedErr + e.Metrics.Requests.WithLabelValues(statusLabel).Inc() break } } // Send error results for any statements which were not executed. for ; i < len(query.Statements)-1; i++ { - if err := ctx.send(&Result{ + if err := ectx.Send(ctx, &Result{ StatementID: i, Err: ErrNotExecuted, - }); err == ErrQueryAborted { - return + }); err != nil { + break } } } @@ -391,85 +350,17 @@ func init() { func (e *Executor) recover(query *influxql.Query, results chan *Result) { if err := recover(); err != nil { - atomic.AddInt64(&e.stats.RecoveredPanics, 1) // Capture the panic in _internal stats. - e.Logger.Error(fmt.Sprintf("%s [panic:%s] %s", query.String(), err, debug.Stack())) + e.log.Error(fmt.Sprintf("%s [panic:%s] %s", query.String(), err, debug.Stack())) results <- &Result{ StatementID: -1, Err: fmt.Errorf("%s [panic:%s]", query.String(), err), } if willCrash { - e.Logger.Error("\n\n=====\nAll goroutines now follow:") + e.log.Error("\n\n=====\nAll goroutines now follow:") buf := debug.Stack() - e.Logger.Error(fmt.Sprintf("%s", buf)) + e.log.Error(fmt.Sprintf("%s", buf)) os.Exit(1) } } } - -// Task is the internal data structure for managing queries. -// For the public use data structure that gets returned, see Task. -type Task struct { - query string - database string - status TaskStatus - startTime time.Time - closing chan struct{} - monitorCh chan error - err error - mu sync.Mutex -} - -// Monitor starts a new goroutine that will monitor a query. The function -// will be passed in a channel to signal when the query has been finished -// normally. If the function returns with an error and the query is still -// running, the query will be terminated. -func (q *Task) Monitor(fn MonitorFunc) { - go q.monitor(fn) -} - -// Error returns any asynchronous error that may have occurred while executing -// the query. -func (q *Task) Error() error { - q.mu.Lock() - defer q.mu.Unlock() - return q.err -} - -func (q *Task) setError(err error) { - q.mu.Lock() - q.err = err - q.mu.Unlock() -} - -func (q *Task) monitor(fn MonitorFunc) { - if err := fn(q.closing); err != nil { - select { - case <-q.closing: - case q.monitorCh <- err: - } - } -} - -// close closes the query task closing channel if the query hasn't been previously killed. -func (q *Task) close() { - q.mu.Lock() - if q.status != KilledTask { - // Set the status to killed to prevent closing the channel twice. - q.status = KilledTask - close(q.closing) - } - q.mu.Unlock() -} - -func (q *Task) kill() error { - q.mu.Lock() - if q.status == KilledTask { - q.mu.Unlock() - return ErrAlreadyKilled - } - q.status = KilledTask - close(q.closing) - q.mu.Unlock() - return nil -} diff --git a/influxql/query/executor_test.go b/influxql/query/executor_test.go index 8ab43ec1de..c8b91d2d21 100644 --- a/influxql/query/executor_test.go +++ b/influxql/query/executor_test.go @@ -1,252 +1,33 @@ package query_test import ( + "context" "errors" - "fmt" - "strings" "testing" "time" + "github.com/golang/mock/gomock" + iql "github.com/influxdata/influxdb/v2/influxql" + "github.com/influxdata/influxdb/v2/influxql/control" "github.com/influxdata/influxdb/v2/influxql/query" + "github.com/influxdata/influxdb/v2/influxql/query/mocks" "github.com/influxdata/influxql" + "github.com/stretchr/testify/assert" + "go.uber.org/zap/zaptest" ) var errUnexpected = errors.New("unexpected error") type StatementExecutor struct { - ExecuteStatementFn func(stmt influxql.Statement, ctx *query.ExecutionContext) error + ExecuteStatementFn func(ctx context.Context, stmt influxql.Statement, ectx *query.ExecutionContext) error } -func (e *StatementExecutor) ExecuteStatement(stmt influxql.Statement, ctx *query.ExecutionContext) error { - return e.ExecuteStatementFn(stmt, ctx) +func (e *StatementExecutor) ExecuteStatement(ctx context.Context, stmt influxql.Statement, ectx *query.ExecutionContext) error { + return e.ExecuteStatementFn(ctx, stmt, ectx) } -func NewQueryExecutor() *query.Executor { - return query.NewExecutor() -} - -func TestQueryExecutor_AttachQuery(t *testing.T) { - q, err := influxql.ParseQuery(`SELECT count(value) FROM cpu`) - if err != nil { - t.Fatal(err) - } - - e := NewQueryExecutor() - e.StatementExecutor = &StatementExecutor{ - ExecuteStatementFn: func(stmt influxql.Statement, ctx *query.ExecutionContext) error { - if ctx.QueryID != 1 { - t.Errorf("incorrect query id: exp=1 got=%d", ctx.QueryID) - } - return nil - }, - } - - discardOutput(e.ExecuteQuery(q, query.ExecutionOptions{}, nil)) -} - -func TestQueryExecutor_KillQuery(t *testing.T) { - q, err := influxql.ParseQuery(`SELECT count(value) FROM cpu`) - if err != nil { - t.Fatal(err) - } - - qid := make(chan uint64) - - e := NewQueryExecutor() - e.StatementExecutor = &StatementExecutor{ - ExecuteStatementFn: func(stmt influxql.Statement, ctx *query.ExecutionContext) error { - switch stmt.(type) { - case *influxql.KillQueryStatement: - return e.TaskManager.ExecuteStatement(stmt, ctx) - } - - qid <- ctx.QueryID - select { - case <-ctx.Done(): - return ctx.Err() - case <-time.After(100 * time.Millisecond): - t.Error("killing the query did not close the channel after 100 milliseconds") - return errUnexpected - } - }, - } - - results := e.ExecuteQuery(q, query.ExecutionOptions{}, nil) - q, err = influxql.ParseQuery(fmt.Sprintf("KILL QUERY %d", <-qid)) - if err != nil { - t.Fatal(err) - } - discardOutput(e.ExecuteQuery(q, query.ExecutionOptions{}, nil)) - - result := <-results - if result.Err != query.ErrQueryInterrupted { - t.Errorf("unexpected error: %s", result.Err) - } -} - -func TestQueryExecutor_KillQuery_Zombie(t *testing.T) { - q, err := influxql.ParseQuery(`SELECT count(value) FROM cpu`) - if err != nil { - t.Fatal(err) - } - - qid := make(chan uint64) - done := make(chan struct{}) - - e := NewQueryExecutor() - e.StatementExecutor = &StatementExecutor{ - ExecuteStatementFn: func(stmt influxql.Statement, ctx *query.ExecutionContext) error { - switch stmt.(type) { - case *influxql.KillQueryStatement, *influxql.ShowQueriesStatement: - return e.TaskManager.ExecuteStatement(stmt, ctx) - } - - qid <- ctx.QueryID - select { - case <-ctx.Done(): - select { - case <-done: - // Keep the query running until we run SHOW QUERIES. - case <-time.After(100 * time.Millisecond): - // Ensure that we don't have a lingering goroutine. - } - return query.ErrQueryInterrupted - case <-time.After(100 * time.Millisecond): - t.Error("killing the query did not close the channel after 100 milliseconds") - return errUnexpected - } - }, - } - - results := e.ExecuteQuery(q, query.ExecutionOptions{}, nil) - q, err = influxql.ParseQuery(fmt.Sprintf("KILL QUERY %d", <-qid)) - if err != nil { - t.Fatal(err) - } - discardOutput(e.ExecuteQuery(q, query.ExecutionOptions{}, nil)) - - // Display the queries and ensure that the original is still in there. - q, err = influxql.ParseQuery("SHOW QUERIES") - if err != nil { - t.Fatal(err) - } - tasks := e.ExecuteQuery(q, query.ExecutionOptions{}, nil) - - // The killed query should still be there. - task := <-tasks - if len(task.Series) != 1 { - t.Errorf("expected %d series, got %d", 1, len(task.Series)) - } else if len(task.Series[0].Values) != 2 { - t.Errorf("expected %d rows, got %d", 2, len(task.Series[0].Values)) - } - close(done) - - // The original query should return. - result := <-results - if result.Err != query.ErrQueryInterrupted { - t.Errorf("unexpected error: %s", result.Err) - } -} - -func TestQueryExecutor_KillQuery_CloseTaskManager(t *testing.T) { - q, err := influxql.ParseQuery(`SELECT count(value) FROM cpu`) - if err != nil { - t.Fatal(err) - } - - qid := make(chan uint64) - - // Open a channel to stall the statement executor forever. This keeps the statement executor - // running even after we kill the query which can happen with some queries. We only close it once - // the test has finished running. - done := make(chan struct{}) - defer close(done) - - e := NewQueryExecutor() - e.StatementExecutor = &StatementExecutor{ - ExecuteStatementFn: func(stmt influxql.Statement, ctx *query.ExecutionContext) error { - switch stmt.(type) { - case *influxql.KillQueryStatement, *influxql.ShowQueriesStatement: - return e.TaskManager.ExecuteStatement(stmt, ctx) - } - - qid <- ctx.QueryID - <-done - return nil - }, - } - - // Kill the query. This should switch it into a zombie state. - go discardOutput(e.ExecuteQuery(q, query.ExecutionOptions{}, nil)) - q, err = influxql.ParseQuery(fmt.Sprintf("KILL QUERY %d", <-qid)) - if err != nil { - t.Fatal(err) - } - discardOutput(e.ExecuteQuery(q, query.ExecutionOptions{}, nil)) - - // Display the queries and ensure that the original is still in there. - q, err = influxql.ParseQuery("SHOW QUERIES") - if err != nil { - t.Fatal(err) - } - tasks := e.ExecuteQuery(q, query.ExecutionOptions{}, nil) - - // The killed query should still be there. - task := <-tasks - if len(task.Series) != 1 { - t.Errorf("expected %d series, got %d", 1, len(task.Series)) - } else if len(task.Series[0].Values) != 2 { - t.Errorf("expected %d rows, got %d", 2, len(task.Series[0].Values)) - } - - // Close the task manager to ensure it doesn't cause a panic. - if err := e.TaskManager.Close(); err != nil { - t.Errorf("unexpected error: %s", err) - } -} - -func TestQueryExecutor_KillQuery_AlreadyKilled(t *testing.T) { - q, err := influxql.ParseQuery(`SELECT count(value) FROM cpu`) - if err != nil { - t.Fatal(err) - } - - qid := make(chan uint64) - - // Open a channel to stall the statement executor forever. This keeps the statement executor - // running even after we kill the query which can happen with some queries. We only close it once - // the test has finished running. - done := make(chan struct{}) - defer close(done) - - e := NewQueryExecutor() - e.StatementExecutor = &StatementExecutor{ - ExecuteStatementFn: func(stmt influxql.Statement, ctx *query.ExecutionContext) error { - switch stmt.(type) { - case *influxql.KillQueryStatement, *influxql.ShowQueriesStatement: - return e.TaskManager.ExecuteStatement(stmt, ctx) - } - - qid <- ctx.QueryID - <-done - return nil - }, - } - - // Kill the query. This should switch it into a zombie state. - go discardOutput(e.ExecuteQuery(q, query.ExecutionOptions{}, nil)) - q, err = influxql.ParseQuery(fmt.Sprintf("KILL QUERY %d", <-qid)) - if err != nil { - t.Fatal(err) - } - discardOutput(e.ExecuteQuery(q, query.ExecutionOptions{}, nil)) - - // Now attempt to kill it again. We should get an error. - results := e.ExecuteQuery(q, query.ExecutionOptions{}, nil) - result := <-results - if got, want := result.Err, query.ErrAlreadyKilled; got != want { - t.Errorf("unexpected error: got=%v want=%v", got, want) - } +func NewQueryExecutor(t *testing.T) *query.Executor { + return query.NewExecutor(zaptest.NewLogger(t), control.NewControllerMetrics([]string{})) } func TestQueryExecutor_Interrupt(t *testing.T) { @@ -255,12 +36,12 @@ func TestQueryExecutor_Interrupt(t *testing.T) { t.Fatal(err) } - e := NewQueryExecutor() + e := NewQueryExecutor(t) e.StatementExecutor = &StatementExecutor{ - ExecuteStatementFn: func(stmt influxql.Statement, ctx *query.ExecutionContext) error { + ExecuteStatementFn: func(ctx context.Context, stmt influxql.Statement, ectx *query.ExecutionContext) error { select { case <-ctx.Done(): - return ctx.Err() + return nil case <-time.After(100 * time.Millisecond): t.Error("killing the query did not close the channel after 100 milliseconds") return errUnexpected @@ -268,11 +49,12 @@ func TestQueryExecutor_Interrupt(t *testing.T) { }, } - closing := make(chan struct{}) - results := e.ExecuteQuery(q, query.ExecutionOptions{}, closing) - close(closing) + ctx, cancel := context.WithCancel(context.Background()) + results, _ := e.ExecuteQuery(ctx, q, query.ExecutionOptions{}) + cancel() + result := <-results - if result.Err != query.ErrQueryInterrupted { + if result != nil && result.Err != query.ErrQueryInterrupted { t.Errorf("unexpected error: %s", result.Err) } } @@ -286,189 +68,42 @@ func TestQueryExecutor_Abort(t *testing.T) { ch1 := make(chan struct{}) ch2 := make(chan struct{}) - e := NewQueryExecutor() + e := NewQueryExecutor(t) e.StatementExecutor = &StatementExecutor{ - ExecuteStatementFn: func(stmt influxql.Statement, ctx *query.ExecutionContext) error { + ExecuteStatementFn: func(ctx context.Context, stmt influxql.Statement, ectx *query.ExecutionContext) error { <-ch1 - if err := ctx.Send(&query.Result{Err: errUnexpected}); err != query.ErrQueryAborted { - t.Errorf("unexpected error: %v", err) + if err := ectx.Send(ctx, &query.Result{Err: errUnexpected}); err == nil { + t.Errorf("expected error") } close(ch2) return nil }, } - done := make(chan struct{}) - close(done) + ctx, cancel := context.WithCancel(context.Background()) + cancel() - results := e.ExecuteQuery(q, query.ExecutionOptions{AbortCh: done}, nil) + results, _ := e.ExecuteQuery(ctx, q, query.ExecutionOptions{}) close(ch1) <-ch2 discardOutput(results) } -func TestQueryExecutor_ShowQueries(t *testing.T) { - e := NewQueryExecutor() - e.StatementExecutor = &StatementExecutor{ - ExecuteStatementFn: func(stmt influxql.Statement, ctx *query.ExecutionContext) error { - switch stmt.(type) { - case *influxql.ShowQueriesStatement: - return e.TaskManager.ExecuteStatement(stmt, ctx) - } - - t.Errorf("unexpected statement: %s", stmt) - return errUnexpected - }, - } - - q, err := influxql.ParseQuery(`SHOW QUERIES`) - if err != nil { - t.Fatal(err) - } - - results := e.ExecuteQuery(q, query.ExecutionOptions{}, nil) - result := <-results - if len(result.Series) != 1 { - t.Errorf("expected %d series, got %d", 1, len(result.Series)) - } else if len(result.Series[0].Values) != 1 { - t.Errorf("expected %d row, got %d", 1, len(result.Series[0].Values)) - } - if result.Err != nil { - t.Errorf("unexpected error: %s", result.Err) - } -} - -func TestQueryExecutor_Limit_Timeout(t *testing.T) { - q, err := influxql.ParseQuery(`SELECT count(value) FROM cpu`) - if err != nil { - t.Fatal(err) - } - - e := NewQueryExecutor() - e.StatementExecutor = &StatementExecutor{ - ExecuteStatementFn: func(stmt influxql.Statement, ctx *query.ExecutionContext) error { - select { - case <-ctx.Done(): - return ctx.Err() - case <-time.After(time.Second): - t.Errorf("timeout has not killed the query") - return errUnexpected - } - }, - } - e.TaskManager.QueryTimeout = time.Nanosecond - - results := e.ExecuteQuery(q, query.ExecutionOptions{}, nil) - result := <-results - if result.Err == nil || !strings.Contains(result.Err.Error(), "query-timeout") { - t.Errorf("unexpected error: %s", result.Err) - } -} - -func TestQueryExecutor_Limit_ConcurrentQueries(t *testing.T) { - q, err := influxql.ParseQuery(`SELECT count(value) FROM cpu`) - if err != nil { - t.Fatal(err) - } - - qid := make(chan uint64) - - e := NewQueryExecutor() - e.StatementExecutor = &StatementExecutor{ - ExecuteStatementFn: func(stmt influxql.Statement, ctx *query.ExecutionContext) error { - qid <- ctx.QueryID - <-ctx.Done() - return ctx.Err() - }, - } - e.TaskManager.MaxConcurrentQueries = 1 - defer e.Close() - - // Start first query and wait for it to be executing. - go discardOutput(e.ExecuteQuery(q, query.ExecutionOptions{}, nil)) - <-qid - - // Start second query and expect for it to fail. - results := e.ExecuteQuery(q, query.ExecutionOptions{}, nil) - - select { - case result := <-results: - if len(result.Series) != 0 { - t.Errorf("expected %d rows, got %d", 0, len(result.Series)) - } - if result.Err == nil || !strings.Contains(result.Err.Error(), "max-concurrent-queries") { - t.Errorf("unexpected error: %s", result.Err) - } - case <-qid: - t.Errorf("unexpected statement execution for the second query") - } -} - -func TestQueryExecutor_Close(t *testing.T) { - q, err := influxql.ParseQuery(`SELECT count(value) FROM cpu`) - if err != nil { - t.Fatal(err) - } - - ch1 := make(chan struct{}) - ch2 := make(chan struct{}) - - e := NewQueryExecutor() - e.StatementExecutor = &StatementExecutor{ - ExecuteStatementFn: func(stmt influxql.Statement, ctx *query.ExecutionContext) error { - close(ch1) - <-ctx.Done() - return ctx.Err() - }, - } - - results := e.ExecuteQuery(q, query.ExecutionOptions{}, nil) - go func(results <-chan *query.Result) { - result := <-results - if result.Err != query.ErrQueryEngineShutdown { - t.Errorf("unexpected error: %s", result.Err) - } - close(ch2) - }(results) - - // Wait for the statement to start executing. - <-ch1 - - // Close the query executor. - e.Close() - - // Check that the statement gets interrupted and finishes. - select { - case <-ch2: - case <-time.After(100 * time.Millisecond): - t.Fatal("closing the query manager did not kill the query after 100 milliseconds") - } - - results = e.ExecuteQuery(q, query.ExecutionOptions{}, nil) - result := <-results - if len(result.Series) != 0 { - t.Errorf("expected %d rows, got %d", 0, len(result.Series)) - } - if result.Err != query.ErrQueryEngineShutdown { - t.Errorf("unexpected error: %s", result.Err) - } -} - func TestQueryExecutor_Panic(t *testing.T) { q, err := influxql.ParseQuery(`SELECT count(value) FROM cpu`) if err != nil { t.Fatal(err) } - e := NewQueryExecutor() + e := NewQueryExecutor(t) e.StatementExecutor = &StatementExecutor{ - ExecuteStatementFn: func(stmt influxql.Statement, ctx *query.ExecutionContext) error { + ExecuteStatementFn: func(ctx context.Context, stmt influxql.Statement, ectx *query.ExecutionContext) error { panic("test error") }, } - results := e.ExecuteQuery(q, query.ExecutionOptions{}, nil) + results, _ := e.ExecuteQuery(context.Background(), q, query.ExecutionOptions{}) result := <-results if len(result.Series) != 0 { t.Errorf("expected %d rows, got %d", 0, len(result.Series)) @@ -479,9 +114,9 @@ func TestQueryExecutor_Panic(t *testing.T) { } func TestQueryExecutor_InvalidSource(t *testing.T) { - e := NewQueryExecutor() + e := NewQueryExecutor(t) e.StatementExecutor = &StatementExecutor{ - ExecuteStatementFn: func(stmt influxql.Statement, ctx *query.ExecutionContext) error { + ExecuteStatementFn: func(ctx context.Context, stmt influxql.Statement, ectx *query.ExecutionContext) error { return errors.New("statement executed unexpectedly") }, } @@ -517,7 +152,7 @@ func TestQueryExecutor_InvalidSource(t *testing.T) { continue } - results := e.ExecuteQuery(q, query.ExecutionOptions{}, nil) + results, _ := e.ExecuteQuery(context.Background(), q, query.ExecutionOptions{}) result := <-results if len(result.Series) != 0 { t.Errorf("%d. expected %d rows, got %d", 0, i, len(result.Series)) @@ -528,6 +163,35 @@ func TestQueryExecutor_InvalidSource(t *testing.T) { } } +// This test verifies Statistics are gathered +// and that ExecuteDuration accounts for PlanDuration +func TestExecutor_ExecuteQuery_Statistics(t *testing.T) { + ctl := gomock.NewController(t) + defer ctl.Finish() + + stmt := influxql.MustParseStatement("SELECT f0 FROM m0") + q := &influxql.Query{Statements: influxql.Statements{stmt, stmt}} + + se := mocks.NewMockStatementExecutor(ctl) + se.EXPECT().ExecuteStatement(gomock.Any(), stmt, gomock.Any()). + Times(2). + DoAndReturn(func(ctx context.Context, statement influxql.Statement, ectx *query.ExecutionContext) error { + time.Sleep(10 * time.Millisecond) + ectx.StatisticsGatherer.Append(iql.NewImmutableCollector(iql.Statistics{PlanDuration: 5 * time.Millisecond})) + return nil + }) + + e := NewQueryExecutor(t) + e.StatementExecutor = se + + ctx := context.Background() + results, stats := e.ExecuteQuery(ctx, q, query.ExecutionOptions{Quiet: true}) + <-results + assert.GreaterOrEqual(t, int64(stats.ExecuteDuration), int64(10*time.Millisecond)) + assert.Equal(t, 10*time.Millisecond, stats.PlanDuration) + assert.Equal(t, 2, stats.StatementCount) +} + func discardOutput(results <-chan *query.Result) { for range results { // Read all results and discard. diff --git a/influxql/query/explain.go b/influxql/query/explain.go index b599af1c26..a486237d20 100644 --- a/influxql/query/explain.go +++ b/influxql/query/explain.go @@ -10,11 +10,11 @@ import ( "github.com/influxdata/influxql" ) -func (p *preparedStatement) Explain() (string, error) { +func (p *preparedStatement) Explain(ctx context.Context) (string, error) { // Determine the cost of all iterators created as part of this plan. ic := &explainIteratorCreator{ic: p.ic} p.ic = ic - cur, err := p.Select(context.Background()) + cur, err := p.Select(ctx) p.ic = ic.ic if err != nil { @@ -65,7 +65,7 @@ type explainIteratorCreator struct { } func (e *explainIteratorCreator) CreateIterator(ctx context.Context, m *influxql.Measurement, opt IteratorOptions) (Iterator, error) { - cost, err := e.ic.IteratorCost(m, opt) + cost, err := e.ic.IteratorCost(ctx, m, opt) if err != nil { return nil, err } @@ -77,8 +77,8 @@ func (e *explainIteratorCreator) CreateIterator(ctx context.Context, m *influxql return &nilFloatIterator{}, nil } -func (e *explainIteratorCreator) IteratorCost(m *influxql.Measurement, opt IteratorOptions) (IteratorCost, error) { - return e.ic.IteratorCost(m, opt) +func (e *explainIteratorCreator) IteratorCost(ctx context.Context, m *influxql.Measurement, opt IteratorOptions) (IteratorCost, error) { + return e.ic.IteratorCost(ctx, m, opt) } func (e *explainIteratorCreator) Close() error { diff --git a/influxql/query/functions.go b/influxql/query/functions.go index a1351c7d97..cb7b47a215 100644 --- a/influxql/query/functions.go +++ b/influxql/query/functions.go @@ -11,13 +11,13 @@ import ( "github.com/influxdata/influxql" ) -// FieldMapper is a FieldMapper that wraps another FieldMapper and exposes +// queryFieldMapper is a FieldMapper that wraps another FieldMapper and exposes // the functions implemented by the query engine. -type FieldMapper struct { +type queryFieldMapper struct { influxql.FieldMapper } -func (m FieldMapper) CallType(name string, args []influxql.DataType) (influxql.DataType, error) { +func (m queryFieldMapper) CallType(name string, args []influxql.DataType) (influxql.DataType, error) { if mapper, ok := m.FieldMapper.(influxql.CallTypeMapper); ok { typ, err := mapper.CallType(name, args) if err != nil { diff --git a/influxql/query/iterator.go b/influxql/query/iterator.go index 4ed0fbc959..8e11bc207c 100644 --- a/influxql/query/iterator.go +++ b/influxql/query/iterator.go @@ -10,8 +10,8 @@ import ( "time" "github.com/gogo/protobuf/proto" + "github.com/influxdata/influxdb/v2" internal "github.com/influxdata/influxdb/v2/influxql/query/internal" - "github.com/influxdata/influxdb/v2/pkg/tracing" "github.com/influxdata/influxql" ) @@ -407,25 +407,6 @@ func NewInterruptIterator(input Iterator, closing <-chan struct{}) Iterator { } } -// NewCloseInterruptIterator returns an iterator that will invoke the Close() method on an -// iterator when the passed-in channel has been closed. -func NewCloseInterruptIterator(input Iterator, closing <-chan struct{}) Iterator { - switch input := input.(type) { - case FloatIterator: - return newFloatCloseInterruptIterator(input, closing) - case IntegerIterator: - return newIntegerCloseInterruptIterator(input, closing) - case UnsignedIterator: - return newUnsignedCloseInterruptIterator(input, closing) - case StringIterator: - return newStringCloseInterruptIterator(input, closing) - case BooleanIterator: - return newBooleanCloseInterruptIterator(input, closing) - default: - panic(fmt.Sprintf("unsupported close iterator iterator type: %T", input)) - } -} - // IteratorScanner is used to scan the results of an iterator into a map. type IteratorScanner interface { // Peek retrieves information about the next point. It returns a timestamp, the name, and the tags. @@ -554,11 +535,14 @@ type IteratorCreator interface { CreateIterator(ctx context.Context, source *influxql.Measurement, opt IteratorOptions) (Iterator, error) // Determines the potential cost for creating an iterator. - IteratorCost(source *influxql.Measurement, opt IteratorOptions) (IteratorCost, error) + IteratorCost(ctx context.Context, source *influxql.Measurement, opt IteratorOptions) (IteratorCost, error) } // IteratorOptions is an object passed to CreateIterator to specify creation options. type IteratorOptions struct { + // OrgID is the organization for which this query is being executed. + OrgID influxdb.ID + // Expression to iterate for. // This can be VarRef or a Call. Expr influxql.Expr @@ -681,14 +665,14 @@ func newIteratorOptionsStmt(stmt *influxql.SelectStatement, sopt SelectOptions) opt.Limit, opt.Offset = stmt.Limit, stmt.Offset opt.SLimit, opt.SOffset = stmt.SLimit, stmt.SOffset opt.MaxSeriesN = sopt.MaxSeriesN - opt.Authorizer = sopt.Authorizer + opt.OrgID = sopt.OrgID return opt, nil } func newIteratorOptionsSubstatement(ctx context.Context, stmt *influxql.SelectStatement, opt IteratorOptions) (IteratorOptions, error) { subOpt, err := newIteratorOptionsStmt(stmt, SelectOptions{ - Authorizer: opt.Authorizer, + OrgID: opt.OrgID, MaxSeriesN: opt.MaxSeriesN, }) if err != nil { @@ -702,7 +686,7 @@ func newIteratorOptionsSubstatement(ctx context.Context, stmt *influxql.SelectSt subOpt.EndTime = opt.EndTime } if !subOpt.Interval.IsZero() && subOpt.EndTime == influxql.MaxTime { - if now := ctx.Value("now"); now != nil { + if now := ctx.Value(nowKey); now != nil { subOpt.EndTime = now.(time.Time).UnixNano() } } @@ -1219,22 +1203,6 @@ func decodeIteratorStats(pb *internal.IteratorStats) IteratorStats { } } -func decodeIteratorTrace(ctx context.Context, data []byte) error { - pt := tracing.TraceFromContext(ctx) - if pt == nil { - return nil - } - - var ct tracing.Trace - if err := ct.UnmarshalBinary(data); err != nil { - return err - } - - pt.Merge(&ct) - - return nil -} - // IteratorCost contains statistics retrieved for explaining what potential // cost may be incurred by instantiating an iterator. type IteratorCost struct { @@ -1327,12 +1295,6 @@ type fastDedupeKey struct { values [2]interface{} } -type reverseStringSlice []string - -func (p reverseStringSlice) Len() int { return len(p) } -func (p reverseStringSlice) Less(i, j int) bool { return p[i] > p[j] } -func (p reverseStringSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - func abs(v int64) int64 { sign := v >> 63 return (v ^ sign) - sign @@ -1371,33 +1333,6 @@ func (enc *IteratorEncoder) EncodeIterator(itr Iterator) error { } } -func (enc *IteratorEncoder) EncodeTrace(trace *tracing.Trace) error { - data, err := trace.MarshalBinary() - if err != nil { - return err - } - - buf, err := proto.Marshal(&internal.Point{ - Name: proto.String(""), - Tags: proto.String(""), - Time: proto.Int64(0), - Nil: proto.Bool(false), - - Trace: data, - }) - if err != nil { - return err - } - - if err = binary.Write(enc.w, binary.BigEndian, uint32(len(buf))); err != nil { - return err - } - if _, err = enc.w.Write(buf); err != nil { - return err - } - return nil -} - // encode a stats object in the point stream. func (enc *IteratorEncoder) encodeStats(stats IteratorStats) error { buf, err := proto.Marshal(&internal.Point{ diff --git a/influxql/query/mocks/ShardGroup.go b/influxql/query/mocks/ShardGroup.go new file mode 100644 index 0000000000..05bb7b7db7 --- /dev/null +++ b/influxql/query/mocks/ShardGroup.go @@ -0,0 +1,111 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/influxdata/idpe/influxql/query (interfaces: ShardGroup) + +// Package mocks is a generated GoMock package. +package mocks + +import ( + context "context" + reflect "reflect" + + gomock "github.com/golang/mock/gomock" + query "github.com/influxdata/influxdb/v2/influxql/query" + influxql "github.com/influxdata/influxql" +) + +// MockShardGroup is a mock of ShardGroup interface +type MockShardGroup struct { + ctrl *gomock.Controller + recorder *MockShardGroupMockRecorder +} + +// MockShardGroupMockRecorder is the mock recorder for MockShardGroup +type MockShardGroupMockRecorder struct { + mock *MockShardGroup +} + +// NewMockShardGroup creates a new mock instance +func NewMockShardGroup(ctrl *gomock.Controller) *MockShardGroup { + mock := &MockShardGroup{ctrl: ctrl} + mock.recorder = &MockShardGroupMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockShardGroup) EXPECT() *MockShardGroupMockRecorder { + return m.recorder +} + +// Close mocks base method +func (m *MockShardGroup) Close() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Close") + ret0, _ := ret[0].(error) + return ret0 +} + +// Close indicates an expected call of Close +func (mr *MockShardGroupMockRecorder) Close() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockShardGroup)(nil).Close)) +} + +// CreateIterator mocks base method +func (m *MockShardGroup) CreateIterator(arg0 context.Context, arg1 *influxql.Measurement, arg2 query.IteratorOptions) (query.Iterator, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateIterator", arg0, arg1, arg2) + ret0, _ := ret[0].(query.Iterator) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CreateIterator indicates an expected call of CreateIterator +func (mr *MockShardGroupMockRecorder) CreateIterator(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateIterator", reflect.TypeOf((*MockShardGroup)(nil).CreateIterator), arg0, arg1, arg2) +} + +// FieldDimensions mocks base method +func (m *MockShardGroup) FieldDimensions(arg0 context.Context, arg1 *influxql.Measurement) (map[string]influxql.DataType, map[string]struct{}, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "FieldDimensions", arg0, arg1) + ret0, _ := ret[0].(map[string]influxql.DataType) + ret1, _ := ret[1].(map[string]struct{}) + ret2, _ := ret[2].(error) + return ret0, ret1, ret2 +} + +// FieldDimensions indicates an expected call of FieldDimensions +func (mr *MockShardGroupMockRecorder) FieldDimensions(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FieldDimensions", reflect.TypeOf((*MockShardGroup)(nil).FieldDimensions), arg0, arg1) +} + +// IteratorCost mocks base method +func (m *MockShardGroup) IteratorCost(arg0 context.Context, arg1 *influxql.Measurement, arg2 query.IteratorOptions) (query.IteratorCost, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "IteratorCost", arg0, arg1, arg2) + ret0, _ := ret[0].(query.IteratorCost) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// IteratorCost indicates an expected call of IteratorCost +func (mr *MockShardGroupMockRecorder) IteratorCost(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IteratorCost", reflect.TypeOf((*MockShardGroup)(nil).IteratorCost), arg0, arg1, arg2) +} + +// MapType mocks base method +func (m *MockShardGroup) MapType(arg0 context.Context, arg1 *influxql.Measurement, arg2 string) influxql.DataType { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MapType", arg0, arg1, arg2) + ret0, _ := ret[0].(influxql.DataType) + return ret0 +} + +// MapType indicates an expected call of MapType +func (mr *MockShardGroupMockRecorder) MapType(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MapType", reflect.TypeOf((*MockShardGroup)(nil).MapType), arg0, arg1, arg2) +} diff --git a/influxql/query/mocks/ShardMapper.go b/influxql/query/mocks/ShardMapper.go new file mode 100644 index 0000000000..d05757eae8 --- /dev/null +++ b/influxql/query/mocks/ShardMapper.go @@ -0,0 +1,52 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/influxdata/idpe/influxql/query (interfaces: ShardMapper) + +// Package mocks is a generated GoMock package. +package mocks + +import ( + context "context" + reflect "reflect" + + gomock "github.com/golang/mock/gomock" + query "github.com/influxdata/influxdb/v2/influxql/query" + influxql "github.com/influxdata/influxql" +) + +// MockShardMapper is a mock of ShardMapper interface +type MockShardMapper struct { + ctrl *gomock.Controller + recorder *MockShardMapperMockRecorder +} + +// MockShardMapperMockRecorder is the mock recorder for MockShardMapper +type MockShardMapperMockRecorder struct { + mock *MockShardMapper +} + +// NewMockShardMapper creates a new mock instance +func NewMockShardMapper(ctrl *gomock.Controller) *MockShardMapper { + mock := &MockShardMapper{ctrl: ctrl} + mock.recorder = &MockShardMapperMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockShardMapper) EXPECT() *MockShardMapperMockRecorder { + return m.recorder +} + +// MapShards mocks base method +func (m *MockShardMapper) MapShards(arg0 context.Context, arg1 influxql.Sources, arg2 influxql.TimeRange, arg3 query.SelectOptions) (query.ShardGroup, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MapShards", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(query.ShardGroup) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MapShards indicates an expected call of MapShards +func (mr *MockShardMapperMockRecorder) MapShards(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MapShards", reflect.TypeOf((*MockShardMapper)(nil).MapShards), arg0, arg1, arg2, arg3) +} diff --git a/influxql/query/mocks/StatementExecutor.go b/influxql/query/mocks/StatementExecutor.go new file mode 100644 index 0000000000..ad6b77bcc4 --- /dev/null +++ b/influxql/query/mocks/StatementExecutor.go @@ -0,0 +1,51 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/influxdata/idpe/influxql/query (interfaces: StatementExecutor) + +// Package mocks is a generated GoMock package. +package mocks + +import ( + context "context" + reflect "reflect" + + gomock "github.com/golang/mock/gomock" + query "github.com/influxdata/influxdb/v2/influxql/query" + influxql "github.com/influxdata/influxql" +) + +// MockStatementExecutor is a mock of StatementExecutor interface +type MockStatementExecutor struct { + ctrl *gomock.Controller + recorder *MockStatementExecutorMockRecorder +} + +// MockStatementExecutorMockRecorder is the mock recorder for MockStatementExecutor +type MockStatementExecutorMockRecorder struct { + mock *MockStatementExecutor +} + +// NewMockStatementExecutor creates a new mock instance +func NewMockStatementExecutor(ctrl *gomock.Controller) *MockStatementExecutor { + mock := &MockStatementExecutor{ctrl: ctrl} + mock.recorder = &MockStatementExecutorMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockStatementExecutor) EXPECT() *MockStatementExecutorMockRecorder { + return m.recorder +} + +// ExecuteStatement mocks base method +func (m *MockStatementExecutor) ExecuteStatement(arg0 context.Context, arg1 influxql.Statement, arg2 *query.ExecutionContext) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ExecuteStatement", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// ExecuteStatement indicates an expected call of ExecuteStatement +func (mr *MockStatementExecutorMockRecorder) ExecuteStatement(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ExecuteStatement", reflect.TypeOf((*MockStatementExecutor)(nil).ExecuteStatement), arg0, arg1, arg2) +} diff --git a/influxql/query/monitor.go b/influxql/query/monitor.go deleted file mode 100644 index ea4aa27295..0000000000 --- a/influxql/query/monitor.go +++ /dev/null @@ -1,48 +0,0 @@ -package query - -import ( - "context" - "time" -) - -// MonitorFunc is a function that will be called to check if a query -// is currently healthy. If the query needs to be interrupted for some reason, -// the error should be returned by this function. -type MonitorFunc func(<-chan struct{}) error - -// Monitor monitors the status of a query and returns whether the query should -// be aborted with an error. -type Monitor interface { - // Monitor starts a new goroutine that will monitor a query. The function - // will be passed in a channel to signal when the query has been finished - // normally. If the function returns with an error and the query is still - // running, the query will be terminated. - Monitor(fn MonitorFunc) -} - -// MonitorFromContext returns a Monitor embedded within the Context -// if one exists. -func MonitorFromContext(ctx context.Context) Monitor { - v, _ := ctx.Value(monitorContextKey{}).(Monitor) - return v -} - -// PointLimitMonitor is a query monitor that exits when the number of points -// emitted exceeds a threshold. -func PointLimitMonitor(cur Cursor, interval time.Duration, limit int) MonitorFunc { - return func(closing <-chan struct{}) error { - ticker := time.NewTicker(interval) - defer ticker.Stop() - for { - select { - case <-ticker.C: - stats := cur.Stats() - if stats.PointN >= limit { - return ErrMaxSelectPointsLimitExceeded(stats.PointN, limit) - } - case <-closing: - return nil - } - } - } -} diff --git a/influxql/query/monitor_test.go b/influxql/query/monitor_test.go deleted file mode 100644 index 31b2a70daa..0000000000 --- a/influxql/query/monitor_test.go +++ /dev/null @@ -1,61 +0,0 @@ -package query_test - -import ( - "context" - "testing" - "time" - - "github.com/influxdata/influxdb/v2/influxql/query" - "github.com/influxdata/influxql" -) - -func TestPointLimitMonitor(t *testing.T) { - t.Parallel() - - stmt := MustParseSelectStatement(`SELECT mean(value) FROM cpu`) - - // Create a new task manager so we can use the query task as a monitor. - taskManager := query.NewTaskManager() - ctx, detach, err := taskManager.AttachQuery(&influxql.Query{ - Statements: []influxql.Statement{stmt}, - }, query.ExecutionOptions{}, nil) - if err != nil { - t.Fatalf("unexpected error: %s", err) - } - defer detach() - - shardMapper := ShardMapper{ - MapShardsFn: func(sources influxql.Sources, t influxql.TimeRange) query.ShardGroup { - return &ShardGroup{ - CreateIteratorFn: func(ctx context.Context, m *influxql.Measurement, opt query.IteratorOptions) (query.Iterator, error) { - return &FloatIterator{ - Points: []query.FloatPoint{ - {Name: "cpu", Value: 35}, - }, - Context: ctx, - Delay: 2 * time.Second, - stats: query.IteratorStats{ - PointN: 10, - }, - }, nil - }, - Fields: map[string]influxql.DataType{ - "value": influxql.Float, - }, - } - }, - } - - cur, err := query.Select(ctx, stmt, &shardMapper, query.SelectOptions{ - MaxPointN: 1, - }) - if err != nil { - t.Fatalf("unexpected error: %s", err) - } - - if err := query.DrainCursor(cur); err == nil { - t.Fatalf("expected an error") - } else if got, want := err.Error(), "max-select-point limit exceeed: (10/1)"; got != want { - t.Fatalf("unexpected error: got=%v want=%v", got, want) - } -} diff --git a/influxql/query/point.gen.go b/influxql/query/point.gen.go index 7c3159c3c9..25ef4a93e0 100644 --- a/influxql/query/point.gen.go +++ b/influxql/query/point.gen.go @@ -6,6 +6,8 @@ package query +//lint:file-ignore U1000 Ignore all unused code, it's generated + import ( "context" "encoding/binary" @@ -224,13 +226,6 @@ func (dec *FloatPointDecoder) DecodeFloatPoint(p *FloatPoint) error { continue } - if len(pb.Trace) > 0 { - if err := decodeIteratorTrace(dec.ctx, pb.Trace); err != nil { - return err - } - continue - } - // Decode into point object. *p = *decodeFloatPoint(&pb) @@ -447,13 +442,6 @@ func (dec *IntegerPointDecoder) DecodeIntegerPoint(p *IntegerPoint) error { continue } - if len(pb.Trace) > 0 { - if err := decodeIteratorTrace(dec.ctx, pb.Trace); err != nil { - return err - } - continue - } - // Decode into point object. *p = *decodeIntegerPoint(&pb) @@ -668,13 +656,6 @@ func (dec *UnsignedPointDecoder) DecodeUnsignedPoint(p *UnsignedPoint) error { continue } - if len(pb.Trace) > 0 { - if err := decodeIteratorTrace(dec.ctx, pb.Trace); err != nil { - return err - } - continue - } - // Decode into point object. *p = *decodeUnsignedPoint(&pb) @@ -891,13 +872,6 @@ func (dec *StringPointDecoder) DecodeStringPoint(p *StringPoint) error { continue } - if len(pb.Trace) > 0 { - if err := decodeIteratorTrace(dec.ctx, pb.Trace); err != nil { - return err - } - continue - } - // Decode into point object. *p = *decodeStringPoint(&pb) @@ -1114,13 +1088,6 @@ func (dec *BooleanPointDecoder) DecodeBooleanPoint(p *BooleanPoint) error { continue } - if len(pb.Trace) > 0 { - if err := decodeIteratorTrace(dec.ctx, pb.Trace); err != nil { - return err - } - continue - } - // Decode into point object. *p = *decodeBooleanPoint(&pb) diff --git a/influxql/query/point.gen.go.tmpl b/influxql/query/point.gen.go.tmpl index d0a581bf71..91d6818dbf 100644 --- a/influxql/query/point.gen.go.tmpl +++ b/influxql/query/point.gen.go.tmpl @@ -1,5 +1,7 @@ package query +//lint:file-ignore U1000 Ignore all unused code, it's generated + import ( "context" "encoding/binary" @@ -231,13 +233,6 @@ func (dec *{{.Name}}PointDecoder) Decode{{.Name}}Point(p *{{.Name}}Point) error continue } - if len(pb.Trace) > 0 { - if err := decodeIteratorTrace(dec.ctx, pb.Trace); err != nil { - return err - } - continue - } - // Decode into point object. *p = *decode{{.Name}}Point(&pb) diff --git a/influxql/query/proxy_executor.go b/influxql/query/proxy_executor.go new file mode 100644 index 0000000000..25c20db0ed --- /dev/null +++ b/influxql/query/proxy_executor.go @@ -0,0 +1,168 @@ +package query + +import ( + "context" + "io" + "strings" + "time" + + "github.com/influxdata/influxdb/v2" + iql "github.com/influxdata/influxdb/v2/influxql" + "github.com/influxdata/influxdb/v2/kit/check" + "github.com/influxdata/influxdb/v2/kit/tracing" + influxlogger "github.com/influxdata/influxdb/v2/logger" + "github.com/influxdata/influxql" + "github.com/opentracing/opentracing-go/log" + "go.uber.org/zap" +) + +type ProxyExecutor struct { + log *zap.Logger + executor *Executor +} + +func NewProxyExecutor(log *zap.Logger, executor *Executor) *ProxyExecutor { + return &ProxyExecutor{log: log, executor: executor} +} + +func (s *ProxyExecutor) Check(ctx context.Context) check.Response { + return check.Response{Name: "Query Service", Status: check.StatusPass} +} + +func (s *ProxyExecutor) Query(ctx context.Context, w io.Writer, req *iql.QueryRequest) (iql.Statistics, error) { + span, ctx := tracing.StartSpanFromContext(ctx) + defer span.Finish() + + logger := s.log.With(influxlogger.TraceFields(ctx)...) + logger.Info("executing new query", zap.String("query", req.Query)) + + p := influxql.NewParser(strings.NewReader(req.Query)) + p.SetParams(req.Params) + q, err := p.ParseQuery() + if err != nil { + return iql.Statistics{}, &influxdb.Error{ + Code: influxdb.EInvalid, + Msg: "failed to parse query", + Err: err, + } + } + + span.LogFields(log.String("query", q.String())) + + opts := ExecutionOptions{ + OrgID: req.OrganizationID, + Database: req.DB, + RetentionPolicy: req.RP, + ChunkSize: req.ChunkSize, + ReadOnly: true, + Authorizer: OpenAuthorizer, + } + + epoch := req.Epoch + rw := NewResponseWriter(req.EncodingFormat) + + results, stats := s.executor.ExecuteQuery(ctx, q, opts) + if req.Chunked { + for r := range results { + // Ignore nil results. + if r == nil { + continue + } + + // if requested, convert result timestamps to epoch + if epoch != "" { + convertToEpoch(r, epoch) + } + + err = rw.WriteResponse(ctx, w, Response{Results: []*Result{r}}) + if err != nil { + break + } + } + } else { + resp := Response{Results: GatherResults(results, epoch)} + err = rw.WriteResponse(ctx, w, resp) + } + + return *stats, err +} + +// GatherResults consumes the results from the given channel and organizes them correctly. +// Results for various statements need to be combined together. +func GatherResults(ch <-chan *Result, epoch string) []*Result { + var results []*Result + for r := range ch { + // Ignore nil results. + if r == nil { + continue + } + + // if requested, convert result timestamps to epoch + if epoch != "" { + convertToEpoch(r, epoch) + } + + // It's not chunked so buffer results in memory. + // Results for statements need to be combined together. + // We need to check if this new result is for the same statement as + // the last result, or for the next statement. + if l := len(results); l > 0 && results[l-1].StatementID == r.StatementID { + if r.Err != nil { + results[l-1] = r + continue + } + + cr := results[l-1] + rowsMerged := 0 + if len(cr.Series) > 0 { + lastSeries := cr.Series[len(cr.Series)-1] + + for _, row := range r.Series { + if !lastSeries.SameSeries(row) { + // Next row is for a different series than last. + break + } + // Values are for the same series, so append them. + lastSeries.Values = append(lastSeries.Values, row.Values...) + lastSeries.Partial = row.Partial + rowsMerged++ + } + } + + // Append remaining rows as new rows. + r.Series = r.Series[rowsMerged:] + cr.Series = append(cr.Series, r.Series...) + cr.Messages = append(cr.Messages, r.Messages...) + cr.Partial = r.Partial + } else { + results = append(results, r) + } + } + return results +} + +// convertToEpoch converts result timestamps from time.Time to the specified epoch. +func convertToEpoch(r *Result, epoch string) { + divisor := int64(1) + + switch epoch { + case "u": + divisor = int64(time.Microsecond) + case "ms": + divisor = int64(time.Millisecond) + case "s": + divisor = int64(time.Second) + case "m": + divisor = int64(time.Minute) + case "h": + divisor = int64(time.Hour) + } + + for _, s := range r.Series { + for _, v := range s.Values { + if ts, ok := v[0].(time.Time); ok { + v[0] = ts.UnixNano() / divisor + } + } + } +} diff --git a/influxql/query/response.go b/influxql/query/response.go new file mode 100644 index 0000000000..80ffa785c2 --- /dev/null +++ b/influxql/query/response.go @@ -0,0 +1,61 @@ +package query + +import ( + "encoding/json" + "errors" +) + +// Response represents a list of statement results. +type Response struct { + Results []*Result + Err error +} + +// MarshalJSON encodes a Response struct into JSON. +func (r Response) MarshalJSON() ([]byte, error) { + // Define a struct that outputs "error" as a string. + var o struct { + Results []*Result `json:"results,omitempty"` + Err string `json:"error,omitempty"` + } + + // Copy fields to output struct. + o.Results = r.Results + if r.Err != nil { + o.Err = r.Err.Error() + } + + return json.Marshal(&o) +} + +// UnmarshalJSON decodes the data into the Response struct. +func (r *Response) UnmarshalJSON(b []byte) error { + var o struct { + Results []*Result `json:"results,omitempty"` + Err string `json:"error,omitempty"` + } + + err := json.Unmarshal(b, &o) + if err != nil { + return err + } + r.Results = o.Results + if o.Err != "" { + r.Err = errors.New(o.Err) + } + return nil +} + +// Error returns the first error from any statement. +// Returns nil if no errors occurred on any statements. +func (r *Response) Error() error { + if r.Err != nil { + return r.Err + } + for _, rr := range r.Results { + if rr.Err != nil { + return rr.Err + } + } + return nil +} diff --git a/influxql/query/response_writer.go b/influxql/query/response_writer.go new file mode 100644 index 0000000000..b62fa92e62 --- /dev/null +++ b/influxql/query/response_writer.go @@ -0,0 +1,439 @@ +package query + +//lint:file-ignore SA1019 Ignore for now + +import ( + "context" + "encoding/csv" + "encoding/json" + "fmt" + "io" + "reflect" + "sort" + "strconv" + "strings" + "text/tabwriter" + "time" + + "github.com/influxdata/influxdb/v2/influxql" + "github.com/influxdata/influxdb/v2/kit/tracing" + "github.com/influxdata/influxdb/v2/models" + "github.com/tinylib/msgp/msgp" +) + +// ResponseWriter is an interface for writing a response. +type ResponseWriter interface { + // WriteResponse writes a response. + WriteResponse(ctx context.Context, w io.Writer, resp Response) error +} + +// NewResponseWriter creates a new ResponseWriter based on the Accept header +// in the request that wraps the ResponseWriter. +func NewResponseWriter(encoding influxql.EncodingFormat) ResponseWriter { + switch encoding { + case influxql.EncodingFormatCSV: + return &csvFormatter{statementID: -1} + case influxql.EncodingFormatTable: + return &textFormatter{} + case influxql.EncodingFormatMessagePack: + return &msgpFormatter{} + case influxql.EncodingFormatJSON: + fallthrough + default: + // TODO(sgc): Add EncodingFormatJSONPretty + return &jsonFormatter{Pretty: false} + } +} + +type jsonFormatter struct { + Pretty bool +} + +func (f *jsonFormatter) WriteResponse(ctx context.Context, w io.Writer, resp Response) (err error) { + span, _ := tracing.StartSpanFromContext(ctx) + defer span.Finish() + + var b []byte + if f.Pretty { + b, err = json.MarshalIndent(resp, "", " ") + } else { + b, err = json.Marshal(resp) + } + + if err != nil { + _, err = io.WriteString(w, err.Error()) + } else { + _, err = w.Write(b) + } + + w.Write([]byte("\n")) + return err +} + +type csvFormatter struct { + statementID int + columns []string +} + +func (f *csvFormatter) WriteResponse(ctx context.Context, w io.Writer, resp Response) (err error) { + span, _ := tracing.StartSpanFromContext(ctx) + defer span.Finish() + + wr := csv.NewWriter(w) + if resp.Err != nil { + wr.Write([]string{"error"}) + wr.Write([]string{resp.Err.Error()}) + wr.Flush() + return wr.Error() + } + + for _, result := range resp.Results { + if result.StatementID != f.statementID { + // If there are no series in the result, skip past this result. + if len(result.Series) == 0 { + continue + } + + // Set the statement id and print out a newline if this is not the first statement. + if f.statementID >= 0 { + // Flush the csv writer and write a newline. + wr.Flush() + if err := wr.Error(); err != nil { + return err + } + + if _, err := io.WriteString(w, "\n"); err != nil { + return err + } + } + f.statementID = result.StatementID + + // Print out the column headers from the first series. + f.columns = make([]string, 2+len(result.Series[0].Columns)) + f.columns[0] = "name" + f.columns[1] = "tags" + copy(f.columns[2:], result.Series[0].Columns) + if err := wr.Write(f.columns); err != nil { + return err + } + } + + for i, row := range result.Series { + if i > 0 && !stringsEqual(result.Series[i-1].Columns, row.Columns) { + // The columns have changed. Print a newline and reprint the header. + wr.Flush() + if err := wr.Error(); err != nil { + return err + } + + if _, err := io.WriteString(w, "\n"); err != nil { + return err + } + + f.columns = make([]string, 2+len(row.Columns)) + f.columns[0] = "name" + f.columns[1] = "tags" + copy(f.columns[2:], row.Columns) + if err := wr.Write(f.columns); err != nil { + return err + } + } + + f.columns[0] = row.Name + f.columns[1] = "" + if len(row.Tags) > 0 { + hashKey := models.NewTags(row.Tags).HashKey() + if len(hashKey) > 0 { + f.columns[1] = string(hashKey[1:]) + } + } + for _, values := range row.Values { + for i, value := range values { + if value == nil { + f.columns[i+2] = "" + continue + } + + switch v := value.(type) { + case float64: + f.columns[i+2] = strconv.FormatFloat(v, 'f', -1, 64) + case int64: + f.columns[i+2] = strconv.FormatInt(v, 10) + case uint64: + f.columns[i+2] = strconv.FormatUint(v, 10) + case string: + f.columns[i+2] = v + case bool: + if v { + f.columns[i+2] = "true" + } else { + f.columns[i+2] = "false" + } + case time.Time: + f.columns[i+2] = strconv.FormatInt(v.UnixNano(), 10) + case *float64, *int64, *string, *bool: + f.columns[i+2] = "" + } + } + wr.Write(f.columns) + } + } + } + wr.Flush() + return wr.Error() +} + +type msgpFormatter struct{} + +func (f *msgpFormatter) ContentType() string { + return "application/x-msgpack" +} + +func (f *msgpFormatter) WriteResponse(ctx context.Context, w io.Writer, resp Response) (err error) { + span, _ := tracing.StartSpanFromContext(ctx) + defer span.Finish() + + enc := msgp.NewWriter(w) + defer enc.Flush() + + enc.WriteMapHeader(1) + if resp.Err != nil { + enc.WriteString("error") + enc.WriteString(resp.Err.Error()) + return nil + } else { + enc.WriteString("results") + enc.WriteArrayHeader(uint32(len(resp.Results))) + for _, result := range resp.Results { + if result.Err != nil { + enc.WriteMapHeader(1) + enc.WriteString("error") + enc.WriteString(result.Err.Error()) + continue + } + + sz := 2 + if len(result.Messages) > 0 { + sz++ + } + if result.Partial { + sz++ + } + enc.WriteMapHeader(uint32(sz)) + enc.WriteString("statement_id") + enc.WriteInt(result.StatementID) + if len(result.Messages) > 0 { + enc.WriteString("messages") + enc.WriteArrayHeader(uint32(len(result.Messages))) + for _, msg := range result.Messages { + enc.WriteMapHeader(2) + enc.WriteString("level") + enc.WriteString(msg.Level) + enc.WriteString("text") + enc.WriteString(msg.Text) + } + } + enc.WriteString("series") + enc.WriteArrayHeader(uint32(len(result.Series))) + for _, series := range result.Series { + sz := 2 + if series.Name != "" { + sz++ + } + if len(series.Tags) > 0 { + sz++ + } + if series.Partial { + sz++ + } + enc.WriteMapHeader(uint32(sz)) + if series.Name != "" { + enc.WriteString("name") + enc.WriteString(series.Name) + } + if len(series.Tags) > 0 { + enc.WriteString("tags") + enc.WriteMapHeader(uint32(len(series.Tags))) + for k, v := range series.Tags { + enc.WriteString(k) + enc.WriteString(v) + } + } + enc.WriteString("columns") + enc.WriteArrayHeader(uint32(len(series.Columns))) + for _, col := range series.Columns { + enc.WriteString(col) + } + enc.WriteString("values") + enc.WriteArrayHeader(uint32(len(series.Values))) + for _, values := range series.Values { + enc.WriteArrayHeader(uint32(len(values))) + for _, v := range values { + enc.WriteIntf(v) + } + } + if series.Partial { + enc.WriteString("partial") + enc.WriteBool(series.Partial) + } + } + if result.Partial { + enc.WriteString("partial") + enc.WriteBool(true) + } + } + } + return nil +} + +func stringsEqual(a, b []string) bool { + if len(a) != len(b) { + return false + } + for i := range a { + if a[i] != b[i] { + return false + } + } + return true +} + +func tagsEqual(prev, current map[string]string) bool { + return reflect.DeepEqual(prev, current) +} + +func columnsEqual(prev, current []string) bool { + return reflect.DeepEqual(prev, current) +} + +func headersEqual(prev, current models.Row) bool { + if prev.Name != current.Name { + return false + } + return tagsEqual(prev.Tags, current.Tags) && columnsEqual(prev.Columns, current.Columns) +} + +type textFormatter struct{} + +func (f *textFormatter) WriteResponse(ctx context.Context, w io.Writer, resp Response) (err error) { + span, _ := tracing.StartSpanFromContext(ctx) + defer span.Finish() + + if err := resp.Error(); err != nil { + fmt.Fprintln(w, err.Error()) + return nil + } + // Create a tabbed writer for each result as they won't always line up + writer := new(tabwriter.Writer) + writer.Init(w, 0, 8, 1, ' ', 0) + + var previousHeaders models.Row + for i, result := range resp.Results { + // Print out all messages first + for _, m := range result.Messages { + fmt.Fprintf(w, "%s: %s.\n", m.Level, m.Text) + } + // Check to see if the headers are the same as the previous row. If so, suppress them in the output + suppressHeaders := len(result.Series) > 0 && headersEqual(previousHeaders, *result.Series[0]) + if !suppressHeaders && len(result.Series) > 0 { + previousHeaders = models.Row{ + Name: result.Series[0].Name, + Tags: result.Series[0].Tags, + Columns: result.Series[0].Columns, + } + } + + // If we are suppressing headers, don't output the extra line return. If we + // aren't suppressing headers, then we put out line returns between results + // (not before the first result, and not after the last result). + if !suppressHeaders && i > 0 { + fmt.Fprintln(writer, "") + } + + rows := f.formatResults(result.Series, "\t", suppressHeaders) + for _, r := range rows { + fmt.Fprintln(writer, r) + } + + } + _ = writer.Flush() + return nil +} + +func (f *textFormatter) formatResults(result models.Rows, separator string, suppressHeaders bool) []string { + var rows []string + // Create a tabbed writer for each result as they won't always line up + for i, row := range result { + // gather tags + var tags []string + for k, v := range row.Tags { + tags = append(tags, fmt.Sprintf("%s=%s", k, v)) + sort.Strings(tags) + } + + var columnNames []string + + columnNames = append(columnNames, row.Columns...) + + // Output a line separator if we have more than one set or results and format is column + if i > 0 && !suppressHeaders { + rows = append(rows, "") + } + + // If we are column format, we break out the name/tag to separate lines + if !suppressHeaders { + if row.Name != "" { + n := fmt.Sprintf("name: %s", row.Name) + rows = append(rows, n) + } + if len(tags) > 0 { + t := fmt.Sprintf("tags: %s", strings.Join(tags, ", ")) + rows = append(rows, t) + } + } + + if !suppressHeaders { + rows = append(rows, strings.Join(columnNames, separator)) + } + + // if format is column, write dashes under each column + if !suppressHeaders { + var lines []string + for _, columnName := range columnNames { + lines = append(lines, strings.Repeat("-", len(columnName))) + } + rows = append(rows, strings.Join(lines, separator)) + } + + for _, v := range row.Values { + var values []string + + for _, vv := range v { + values = append(values, interfaceToString(vv)) + } + rows = append(rows, strings.Join(values, separator)) + } + } + return rows +} + +func interfaceToString(v interface{}) string { + switch t := v.(type) { + case nil: + return "" + case bool: + return fmt.Sprintf("%v", v) + case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64, uintptr: + return fmt.Sprintf("%d", t) + case float32: + // Default for floats via `fmt.Sprintf("%v", t)` is to represent them in scientific notation. + // We want to represent them as they are, with the least digits as possible (prec: -1). + return strconv.FormatFloat(float64(t), 'f', -1, 32) + case float64: + // Default for floats via `fmt.Sprintf("%v", t)` is to represent them in scientific notation. + // We want to represent them as they are, with the least digits as possible (prec: -1). + return strconv.FormatFloat(t, 'f', -1, 64) + default: + return fmt.Sprintf("%v", t) + } +} diff --git a/influxql/query/select.go b/influxql/query/select.go index ec1de53362..86b2f30f72 100644 --- a/influxql/query/select.go +++ b/influxql/query/select.go @@ -5,12 +5,12 @@ import ( "fmt" "io" "sort" - "strings" "sync" "time" + "github.com/influxdata/influxdb/v2" + iql "github.com/influxdata/influxdb/v2/influxql" "github.com/influxdata/influxdb/v2/influxql/query/internal/gota" - "github.com/influxdata/influxdb/v2/pkg/tracing" "github.com/influxdata/influxql" "golang.org/x/sync/errgroup" ) @@ -22,8 +22,8 @@ var DefaultTypeMapper = influxql.MultiTypeMapper( // SelectOptions are options that customize the select call. type SelectOptions struct { - // Authorizer is used to limit access to data - Authorizer Authorizer + // OrgID is the organization for which this query is being executed. + OrgID influxdb.ID // Node to exclusively read from. // If zero, all nodes are used. @@ -39,12 +39,45 @@ type SelectOptions struct { // Maximum number of buckets for a statement. MaxBucketsN int + + // StatisticsGatherer gathers metrics about the execution of the query. + StatisticsGatherer *iql.StatisticsGatherer } // ShardMapper retrieves and maps shards into an IteratorCreator that can later be // used for executing queries. type ShardMapper interface { - MapShards(sources influxql.Sources, t influxql.TimeRange, opt SelectOptions) (ShardGroup, error) + MapShards(ctx context.Context, sources influxql.Sources, t influxql.TimeRange, opt SelectOptions) (ShardGroup, error) +} + +// TypeMapper maps a data type to the measurement and field. +type TypeMapper interface { + MapType(ctx context.Context, m *influxql.Measurement, field string) influxql.DataType +} + +// FieldMapper returns the data type for the field inside of the measurement. +type FieldMapper interface { + TypeMapper + FieldDimensions(ctx context.Context, m *influxql.Measurement) (fields map[string]influxql.DataType, dimensions map[string]struct{}, err error) +} + +// contextFieldMapper adapts a FieldMapper to an influxql.FieldMapper as +// FieldMapper requires a context.Context and orgID +type fieldMapperAdapter struct { + fm FieldMapper + ctx context.Context +} + +func newFieldMapperAdapter(fm FieldMapper, ctx context.Context) *fieldMapperAdapter { + return &fieldMapperAdapter{fm: fm, ctx: ctx} +} + +func (c *fieldMapperAdapter) FieldDimensions(m *influxql.Measurement) (fields map[string]influxql.DataType, dimensions map[string]struct{}, err error) { + return c.fm.FieldDimensions(c.ctx, m) +} + +func (c *fieldMapperAdapter) MapType(measurement *influxql.Measurement, field string) influxql.DataType { + return c.fm.MapType(c.ctx, measurement, field) } // ShardGroup represents a shard or a collection of shards that can be accessed @@ -58,7 +91,7 @@ type ShardMapper interface { // after creating the iterators, but before the iterators are actually read. type ShardGroup interface { IteratorCreator - influxql.FieldMapper + FieldMapper io.Closer } @@ -68,7 +101,7 @@ type PreparedStatement interface { Select(ctx context.Context) (Cursor, error) // Explain outputs the explain plan for this statement. - Explain() (string, error) + Explain(ctx context.Context) (string, error) // Close closes the resources associated with this prepared statement. // This must be called as the mapped shards may hold open resources such @@ -78,18 +111,18 @@ type PreparedStatement interface { // Prepare will compile the statement with the default compile options and // then prepare the query. -func Prepare(stmt *influxql.SelectStatement, shardMapper ShardMapper, opt SelectOptions) (PreparedStatement, error) { +func Prepare(ctx context.Context, stmt *influxql.SelectStatement, shardMapper ShardMapper, opt SelectOptions) (PreparedStatement, error) { c, err := Compile(stmt, CompileOptions{}) if err != nil { return nil, err } - return c.Prepare(shardMapper, opt) + return c.Prepare(ctx, shardMapper, opt) } // Select compiles, prepares, and then initiates execution of the query using the // default compile options. func Select(ctx context.Context, stmt *influxql.SelectStatement, shardMapper ShardMapper, opt SelectOptions) (Cursor, error) { - s, err := Prepare(stmt, shardMapper, opt) + s, err := Prepare(ctx, stmt, shardMapper, opt) if err != nil { return nil, err } @@ -110,11 +143,15 @@ type preparedStatement struct { now time.Time } +type contextKey string + +const nowKey contextKey = "now" + func (p *preparedStatement) Select(ctx context.Context) (Cursor, error) { // TODO(jsternberg): Remove this hacky method of propagating now. // Each level of the query should use a time range discovered during // compilation, but that requires too large of a refactor at the moment. - ctx = context.WithValue(ctx, "now", p.now) + ctx = context.WithValue(ctx, nowKey, p.now) opt := p.opt opt.InterruptCh = ctx.Done() @@ -123,14 +160,6 @@ func (p *preparedStatement) Select(ctx context.Context) (Cursor, error) { return nil, err } - // If a monitor exists and we are told there is a maximum number of points, - // register the monitor function. - if m := MonitorFromContext(ctx); m != nil { - if p.maxPointN > 0 { - monitor := PointLimitMonitor(cur, DefaultStatsInterval, p.maxPointN) - m.Monitor(monitor) - } - } return cur, nil } @@ -246,7 +275,7 @@ func (b *exprIteratorBuilder) buildCallIterator(ctx context.Context, expr *influ h := expr.Args[1].(*influxql.IntegerLiteral) m := expr.Args[2].(*influxql.IntegerLiteral) - includeFitData := "holt_winters_with_fit" == expr.Name + includeFitData := expr.Name == "holt_winters_with_fit" interval := opt.Interval.Duration // Redefine interval to be unbounded to capture all aggregate results @@ -621,15 +650,6 @@ func (b *exprIteratorBuilder) callIterator(ctx context.Context, expr *influxql.C } func buildCursor(ctx context.Context, stmt *influxql.SelectStatement, ic IteratorCreator, opt IteratorOptions) (Cursor, error) { - span := tracing.SpanFromContext(ctx) - if span != nil { - span = span.StartSpan("build_cursor") - defer span.Finish() - - span.SetLabels("statement", stmt.String()) - ctx = tracing.NewContextWithSpan(ctx, span) - } - switch opt.Fill { case influxql.NumberFill: if v, ok := opt.FillValue.(int); ok { @@ -777,19 +797,6 @@ func buildCursor(ctx context.Context, stmt *influxql.SelectStatement, ic Iterato } func buildAuxIterator(ctx context.Context, ic IteratorCreator, sources influxql.Sources, opt IteratorOptions) (Iterator, error) { - span := tracing.SpanFromContext(ctx) - if span != nil { - span = span.StartSpan("iterator_scanner") - defer span.Finish() - - auxFieldNames := make([]string, len(opt.Aux)) - for i, ref := range opt.Aux { - auxFieldNames[i] = ref.String() - } - span.SetLabels("auxiliary_fields", strings.Join(auxFieldNames, ", ")) - ctx = tracing.NewContextWithSpan(ctx, span) - } - inputs := make([]Iterator, 0, len(sources)) if err := func() error { for _, source := range sources { @@ -850,23 +857,6 @@ func buildAuxIterator(ctx context.Context, ic IteratorCreator, sources influxql. } func buildFieldIterator(ctx context.Context, expr influxql.Expr, ic IteratorCreator, sources influxql.Sources, opt IteratorOptions, selector, writeMode bool) (Iterator, error) { - span := tracing.SpanFromContext(ctx) - if span != nil { - span = span.StartSpan("iterator_scanner") - defer span.Finish() - - labels := []string{"expr", expr.String()} - if len(opt.Aux) > 0 { - auxFieldNames := make([]string, len(opt.Aux)) - for i, ref := range opt.Aux { - auxFieldNames[i] = ref.String() - } - labels = append(labels, "auxiliary_fields", strings.Join(auxFieldNames, ", ")) - } - span.SetLabels(labels...) - ctx = tracing.NewContextWithSpan(ctx, span) - } - input, err := buildExprIterator(ctx, expr, ic, sources, opt, selector, writeMode) if err != nil { return nil, err diff --git a/influxql/query/select_test.go b/influxql/query/select_test.go index 2032caa830..5bdb428df1 100644 --- a/influxql/query/select_test.go +++ b/influxql/query/select_test.go @@ -2837,7 +2837,7 @@ func TestSelect(t *testing.T) { } { t.Run(tt.name, func(t *testing.T) { shardMapper := ShardMapper{ - MapShardsFn: func(sources influxql.Sources, _ influxql.TimeRange) query.ShardGroup { + MapShardsFn: func(_ context.Context, sources influxql.Sources, _ influxql.TimeRange) query.ShardGroup { var fields map[string]influxql.DataType if tt.typ != influxql.Unknown { fields = map[string]influxql.DataType{"value": tt.typ} @@ -2881,7 +2881,7 @@ func TestSelect(t *testing.T) { return nil, err } - p, err := c.Prepare(&shardMapper, query.SelectOptions{}) + p, err := c.Prepare(context.Background(), &shardMapper, query.SelectOptions{}) if err != nil { return nil, err } @@ -2907,7 +2907,7 @@ func TestSelect(t *testing.T) { // Ensure a SELECT with raw fields works for all types. func TestSelect_Raw(t *testing.T) { shardMapper := ShardMapper{ - MapShardsFn: func(sources influxql.Sources, _ influxql.TimeRange) query.ShardGroup { + MapShardsFn: func(_ context.Context, sources influxql.Sources, _ influxql.TimeRange) query.ShardGroup { return &ShardGroup{ Fields: map[string]influxql.DataType{ "f": influxql.Float, @@ -2979,7 +2979,7 @@ func TestSelect_Raw(t *testing.T) { // Ensure a SELECT binary expr queries can be executed as floats. func TestSelect_BinaryExpr(t *testing.T) { shardMapper := ShardMapper{ - MapShardsFn: func(sources influxql.Sources, _ influxql.TimeRange) query.ShardGroup { + MapShardsFn: func(_ context.Context, sources influxql.Sources, _ influxql.TimeRange) query.ShardGroup { return &ShardGroup{ Fields: map[string]influxql.DataType{ "f": influxql.Float, @@ -3870,7 +3870,7 @@ func TestSelect_BinaryExpr(t *testing.T) { // Ensure a SELECT binary expr queries can be executed as booleans. func TestSelect_BinaryExpr_Boolean(t *testing.T) { shardMapper := ShardMapper{ - MapShardsFn: func(sources influxql.Sources, _ influxql.TimeRange) query.ShardGroup { + MapShardsFn: func(_ context.Context, sources influxql.Sources, _ influxql.TimeRange) query.ShardGroup { return &ShardGroup{ Fields: map[string]influxql.DataType{ "one": influxql.Boolean, @@ -3950,7 +3950,7 @@ func TestSelect_BinaryExpr_Boolean(t *testing.T) { // but not the other. func TestSelect_BinaryExpr_NilValues(t *testing.T) { shardMapper := ShardMapper{ - MapShardsFn: func(sources influxql.Sources, _ influxql.TimeRange) query.ShardGroup { + MapShardsFn: func(_ context.Context, sources influxql.Sources, _ influxql.TimeRange) query.ShardGroup { return &ShardGroup{ Fields: map[string]influxql.DataType{ "total": influxql.Float, @@ -4028,11 +4028,11 @@ func TestSelect_BinaryExpr_NilValues(t *testing.T) { } type ShardMapper struct { - MapShardsFn func(sources influxql.Sources, t influxql.TimeRange) query.ShardGroup + MapShardsFn func(ctx context.Context, sources influxql.Sources, t influxql.TimeRange) query.ShardGroup } -func (m *ShardMapper) MapShards(sources influxql.Sources, t influxql.TimeRange, opt query.SelectOptions) (query.ShardGroup, error) { - shards := m.MapShardsFn(sources, t) +func (m *ShardMapper) MapShards(ctx context.Context, sources influxql.Sources, t influxql.TimeRange, opt query.SelectOptions) (query.ShardGroup, error) { + shards := m.MapShardsFn(ctx, sources, t) return shards, nil } @@ -4046,11 +4046,11 @@ func (sh *ShardGroup) CreateIterator(ctx context.Context, m *influxql.Measuremen return sh.CreateIteratorFn(ctx, m, opt) } -func (sh *ShardGroup) IteratorCost(m *influxql.Measurement, opt query.IteratorOptions) (query.IteratorCost, error) { +func (sh *ShardGroup) IteratorCost(ctx context.Context, source *influxql.Measurement, opt query.IteratorOptions) (query.IteratorCost, error) { return query.IteratorCost{}, nil } -func (sh *ShardGroup) FieldDimensions(m *influxql.Measurement) (fields map[string]influxql.DataType, dimensions map[string]struct{}, err error) { +func (sh *ShardGroup) FieldDimensions(ctx context.Context, m *influxql.Measurement) (fields map[string]influxql.DataType, dimensions map[string]struct{}, err error) { fields = make(map[string]influxql.DataType) dimensions = make(map[string]struct{}) @@ -4063,7 +4063,7 @@ func (sh *ShardGroup) FieldDimensions(m *influxql.Measurement) (fields map[strin return fields, dimensions, nil } -func (sh *ShardGroup) MapType(m *influxql.Measurement, field string) influxql.DataType { +func (sh *ShardGroup) MapType(ctx context.Context, measurement *influxql.Measurement, field string) influxql.DataType { if typ, ok := sh.Fields[field]; ok { return typ } @@ -4101,7 +4101,7 @@ func benchmarkSelect(b *testing.B, stmt *influxql.SelectStatement, shardMapper q // NewRawBenchmarkIteratorCreator returns a new mock iterator creator with generated fields. func NewRawBenchmarkIteratorCreator(pointN int) query.ShardMapper { return &ShardMapper{ - MapShardsFn: func(sources influxql.Sources, t influxql.TimeRange) query.ShardGroup { + MapShardsFn: func(_ context.Context, sources influxql.Sources, t influxql.TimeRange) query.ShardGroup { return &ShardGroup{ Fields: map[string]influxql.DataType{ "fval": influxql.Float, @@ -4140,7 +4140,7 @@ func benchmarkSelectDedupe(b *testing.B, seriesN, pointsPerSeries int) { stmt.Dedupe = true shardMapper := ShardMapper{ - MapShardsFn: func(sources influxql.Sources, t influxql.TimeRange) query.ShardGroup { + MapShardsFn: func(_ context.Context, sources influxql.Sources, t influxql.TimeRange) query.ShardGroup { return &ShardGroup{ Fields: map[string]influxql.DataType{ "sval": influxql.String, @@ -4174,7 +4174,7 @@ func benchmarkSelectTop(b *testing.B, seriesN, pointsPerSeries int) { stmt := MustParseSelectStatement(`SELECT top(sval, 10) FROM cpu`) shardMapper := ShardMapper{ - MapShardsFn: func(sources influxql.Sources, t influxql.TimeRange) query.ShardGroup { + MapShardsFn: func(_ context.Context, sources influxql.Sources, t influxql.TimeRange) query.ShardGroup { return &ShardGroup{ Fields: map[string]influxql.DataType{ "sval": influxql.Float, diff --git a/influxql/query/subquery_test.go b/influxql/query/subquery_test.go index 93fb7f9bf7..ac572ec5f3 100644 --- a/influxql/query/subquery_test.go +++ b/influxql/query/subquery_test.go @@ -7,7 +7,6 @@ import ( "github.com/google/go-cmp/cmp" "github.com/influxdata/influxdb/v2/influxql/query" - "github.com/influxdata/influxdb/v2/models" "github.com/influxdata/influxql" ) @@ -367,7 +366,7 @@ func TestSubquery(t *testing.T) { } { t.Run(test.Name, func(t *testing.T) { shardMapper := ShardMapper{ - MapShardsFn: func(sources influxql.Sources, tr influxql.TimeRange) query.ShardGroup { + MapShardsFn: func(_ context.Context, sources influxql.Sources, tr influxql.TimeRange) query.ShardGroup { fn := test.MapShardsFn(t, tr) return &ShardGroup{ Fields: test.Fields, @@ -392,50 +391,10 @@ func TestSubquery(t *testing.T) { } } -type openAuthorizer struct{} - -func (*openAuthorizer) AuthorizeDatabase(p influxql.Privilege, name string) bool { return true } -func (*openAuthorizer) AuthorizeQuery(database string, query *influxql.Query) error { return nil } -func (*openAuthorizer) AuthorizeSeriesRead(database string, measurement []byte, tags models.Tags) bool { - return true -} -func (*openAuthorizer) AuthorizeSeriesWrite(database string, measurement []byte, tags models.Tags) bool { - return true -} - -// Ensure that the subquery gets passed the query authorizer. -func TestSubquery_Authorizer(t *testing.T) { - auth := &openAuthorizer{} - shardMapper := ShardMapper{ - MapShardsFn: func(sources influxql.Sources, tr influxql.TimeRange) query.ShardGroup { - return &ShardGroup{ - Fields: map[string]influxql.DataType{ - "value": influxql.Float, - }, - CreateIteratorFn: func(ctx context.Context, m *influxql.Measurement, opt query.IteratorOptions) (query.Iterator, error) { - if opt.Authorizer != auth { - t.Errorf("query authorizer has not been set") - } - return nil, nil - }, - } - }, - } - - stmt := MustParseSelectStatement(`SELECT max(value) FROM (SELECT value FROM cpu)`) - cur, err := query.Select(context.Background(), stmt, &shardMapper, query.SelectOptions{ - Authorizer: auth, - }) - if err != nil { - t.Fatalf("unexpected error: %s", err) - } - cur.Close() -} - // Ensure that the subquery gets passed the max series limit. func TestSubquery_MaxSeriesN(t *testing.T) { shardMapper := ShardMapper{ - MapShardsFn: func(sources influxql.Sources, tr influxql.TimeRange) query.ShardGroup { + MapShardsFn: func(_ context.Context, sources influxql.Sources, tr influxql.TimeRange) query.ShardGroup { return &ShardGroup{ Fields: map[string]influxql.DataType{ "value": influxql.Float, diff --git a/influxql/query/task_manager.go b/influxql/query/task_manager.go deleted file mode 100644 index e860c32ea3..0000000000 --- a/influxql/query/task_manager.go +++ /dev/null @@ -1,319 +0,0 @@ -package query - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "sync" - "time" - - "github.com/influxdata/influxdb/v2/models" - "github.com/influxdata/influxql" - "go.uber.org/zap" -) - -const ( - // DefaultQueryTimeout is the default timeout for executing a query. - // A value of zero will have no query timeout. - DefaultQueryTimeout = time.Duration(0) -) - -type TaskStatus int - -const ( - // RunningTask is set when the task is running. - RunningTask TaskStatus = iota + 1 - - // KilledTask is set when the task is killed, but resources are still - // being used. - KilledTask -) - -func (t TaskStatus) String() string { - switch t { - case RunningTask: - return "running" - case KilledTask: - return "killed" - default: - return "unknown" - } -} - -func (t TaskStatus) MarshalJSON() ([]byte, error) { - s := t.String() - return json.Marshal(s) -} - -func (t *TaskStatus) UnmarshalJSON(data []byte) error { - if bytes.Equal(data, []byte("running")) { - *t = RunningTask - } else if bytes.Equal(data, []byte("killed")) { - *t = KilledTask - } else if bytes.Equal(data, []byte("unknown")) { - *t = TaskStatus(0) - } else { - return fmt.Errorf("unknown task status: %s", string(data)) - } - return nil -} - -// TaskManager takes care of all aspects related to managing running queries. -type TaskManager struct { - // Query execution timeout. - QueryTimeout time.Duration - - // Log queries if they are slower than this time. - // If zero, slow queries will never be logged. - LogQueriesAfter time.Duration - - // Maximum number of concurrent queries. - MaxConcurrentQueries int - - // Logger to use for all logging. - // Defaults to discarding all log output. - Logger *zap.Logger - - // Used for managing and tracking running queries. - queries map[uint64]*Task - nextID uint64 - mu sync.RWMutex - shutdown bool -} - -// NewTaskManager creates a new TaskManager. -func NewTaskManager() *TaskManager { - return &TaskManager{ - QueryTimeout: DefaultQueryTimeout, - Logger: zap.NewNop(), - queries: make(map[uint64]*Task), - nextID: 1, - } -} - -// ExecuteStatement executes a statement containing one of the task management queries. -func (t *TaskManager) ExecuteStatement(stmt influxql.Statement, ctx *ExecutionContext) error { - switch stmt := stmt.(type) { - case *influxql.ShowQueriesStatement: - rows, err := t.executeShowQueriesStatement(stmt) - if err != nil { - return err - } - - ctx.Send(&Result{ - Series: rows, - }) - case *influxql.KillQueryStatement: - var messages []*Message - if ctx.ReadOnly { - messages = append(messages, ReadOnlyWarning(stmt.String())) - } - - if err := t.executeKillQueryStatement(stmt); err != nil { - return err - } - ctx.Send(&Result{ - Messages: messages, - }) - default: - return ErrInvalidQuery - } - return nil -} - -func (t *TaskManager) executeKillQueryStatement(stmt *influxql.KillQueryStatement) error { - return t.KillQuery(stmt.QueryID) -} - -func (t *TaskManager) executeShowQueriesStatement(q *influxql.ShowQueriesStatement) (models.Rows, error) { - t.mu.RLock() - defer t.mu.RUnlock() - - now := time.Now() - - values := make([][]interface{}, 0, len(t.queries)) - for id, qi := range t.queries { - d := now.Sub(qi.startTime) - - switch { - case d >= time.Second: - d = d - (d % time.Second) - case d >= time.Millisecond: - d = d - (d % time.Millisecond) - case d >= time.Microsecond: - d = d - (d % time.Microsecond) - } - - values = append(values, []interface{}{id, qi.query, qi.database, d.String(), qi.status.String()}) - } - - return []*models.Row{{ - Columns: []string{"qid", "query", "database", "duration", "status"}, - Values: values, - }}, nil -} - -func (t *TaskManager) queryError(qid uint64, err error) { - t.mu.RLock() - query := t.queries[qid] - t.mu.RUnlock() - if query != nil { - query.setError(err) - } -} - -// AttachQuery attaches a running query to be managed by the TaskManager. -// Returns the query id of the newly attached query or an error if it was -// unable to assign a query id or attach the query to the TaskManager. -// This function also returns a channel that will be closed when this -// query finishes running. -// -// After a query finishes running, the system is free to reuse a query id. -func (t *TaskManager) AttachQuery(q *influxql.Query, opt ExecutionOptions, interrupt <-chan struct{}) (*ExecutionContext, func(), error) { - t.mu.Lock() - defer t.mu.Unlock() - - if t.shutdown { - return nil, nil, ErrQueryEngineShutdown - } - - if t.MaxConcurrentQueries > 0 && len(t.queries) >= t.MaxConcurrentQueries { - return nil, nil, ErrMaxConcurrentQueriesLimitExceeded(len(t.queries), t.MaxConcurrentQueries) - } - - qid := t.nextID - query := &Task{ - query: q.String(), - database: opt.Database, - status: RunningTask, - startTime: time.Now(), - closing: make(chan struct{}), - monitorCh: make(chan error), - } - t.queries[qid] = query - - go t.waitForQuery(qid, query.closing, interrupt, query.monitorCh) - if t.LogQueriesAfter != 0 { - go query.monitor(func(closing <-chan struct{}) error { - timer := time.NewTimer(t.LogQueriesAfter) - defer timer.Stop() - - select { - case <-timer.C: - t.Logger.Warn(fmt.Sprintf("Detected slow query: %s (qid: %d, database: %s, threshold: %s)", - query.query, qid, query.database, t.LogQueriesAfter)) - case <-closing: - } - return nil - }) - } - t.nextID++ - - ctx := &ExecutionContext{ - Context: context.Background(), - QueryID: qid, - task: query, - ExecutionOptions: opt, - } - ctx.watch() - return ctx, func() { t.DetachQuery(qid) }, nil -} - -// KillQuery enters a query into the killed state and closes the channel -// from the TaskManager. This method can be used to forcefully terminate a -// running query. -func (t *TaskManager) KillQuery(qid uint64) error { - t.mu.Lock() - query := t.queries[qid] - t.mu.Unlock() - - if query == nil { - return fmt.Errorf("no such query id: %d", qid) - } - return query.kill() -} - -// DetachQuery removes a query from the query table. If the query is not in the -// killed state, this will also close the related channel. -func (t *TaskManager) DetachQuery(qid uint64) error { - t.mu.Lock() - defer t.mu.Unlock() - - query := t.queries[qid] - if query == nil { - return fmt.Errorf("no such query id: %d", qid) - } - - query.close() - delete(t.queries, qid) - return nil -} - -// QueryInfo represents the information for a query. -type QueryInfo struct { - ID uint64 `json:"id"` - Query string `json:"query"` - Database string `json:"database"` - Duration time.Duration `json:"duration"` - Status TaskStatus `json:"status"` -} - -// Queries returns a list of all running queries with information about them. -func (t *TaskManager) Queries() []QueryInfo { - t.mu.RLock() - defer t.mu.RUnlock() - - now := time.Now() - queries := make([]QueryInfo, 0, len(t.queries)) - for id, qi := range t.queries { - queries = append(queries, QueryInfo{ - ID: id, - Query: qi.query, - Database: qi.database, - Duration: now.Sub(qi.startTime), - Status: qi.status, - }) - } - return queries -} - -func (t *TaskManager) waitForQuery(qid uint64, interrupt <-chan struct{}, closing <-chan struct{}, monitorCh <-chan error) { - var timerCh <-chan time.Time - if t.QueryTimeout != 0 { - timer := time.NewTimer(t.QueryTimeout) - timerCh = timer.C - defer timer.Stop() - } - - select { - case <-closing: - t.queryError(qid, ErrQueryInterrupted) - case err := <-monitorCh: - if err == nil { - break - } - - t.queryError(qid, err) - case <-timerCh: - t.queryError(qid, ErrQueryTimeoutLimitExceeded) - case <-interrupt: - // Query was manually closed so exit the select. - return - } - t.KillQuery(qid) -} - -// Close kills all running queries and prevents new queries from being attached. -func (t *TaskManager) Close() error { - t.mu.Lock() - defer t.mu.Unlock() - - t.shutdown = true - for _, query := range t.queries { - query.setError(ErrQueryEngineShutdown) - query.close() - } - t.queries = nil - return nil -} diff --git a/influxql/query_request.go b/influxql/query_request.go new file mode 100644 index 0000000000..80b9ac2c21 --- /dev/null +++ b/influxql/query_request.go @@ -0,0 +1,88 @@ +package influxql + +import ( + "encoding/json" + + "github.com/influxdata/influxdb/v2" +) + +type EncodingFormat int + +func (f *EncodingFormat) UnmarshalJSON(bytes []byte) error { + var s string + if err := json.Unmarshal(bytes, &s); err != nil { + return err + } + + *f = EncodingFormatFromMimeType(s) + return nil +} + +func (f EncodingFormat) MarshalJSON() ([]byte, error) { + return json.Marshal(f.ContentType()) +} + +const ( + EncodingFormatJSON EncodingFormat = iota + EncodingFormatCSV + EncodingFormatMessagePack + EncodingFormatTable +) + +// Returns closed encoding format from the specified mime type. +// The default is JSON if no exact match is found. +func EncodingFormatFromMimeType(s string) EncodingFormat { + switch s { + case "application/csv", "text/csv": + return EncodingFormatCSV + case "text/plain": + return EncodingFormatTable + case "application/x-msgpack": + return EncodingFormatMessagePack + case "application/json": + fallthrough + default: + return EncodingFormatJSON + } +} + +func (f EncodingFormat) ContentType() string { + switch f { + case EncodingFormatCSV: + return "text/csv" + case EncodingFormatTable: + return "text/plain" + case EncodingFormatMessagePack: + return "application/x-msgpack" + case EncodingFormatJSON: + fallthrough + default: + return "application/json" + } +} + +type QueryRequest struct { + Authorization *influxdb.Authorization `json:"authorization,omitempty"` + OrganizationID influxdb.ID `json:"organization_id"` + DB string `json:"db"` + RP string `json:"rp"` + Epoch string `json:"epoch"` + EncodingFormat EncodingFormat `json:"encoding_format"` + ContentType string `json:"content_type"` // Content type is the desired response format. + Chunked bool `json:"chunked"` // Chunked indicates responses should be chunked using ChunkSize + ChunkSize int `json:"chunk_size"` // ChunkSize is the number of points to be encoded per batch. 0 indicates no chunking. + Query string `json:"query"` // Query contains the InfluxQL. + Params map[string]interface{} `json:"params,omitempty"` + Source string `json:"source"` // Source represents the ultimate source of the request. +} + +// The HTTP query requests represented the body expected by the QueryHandler +func (r *QueryRequest) Valid() error { + if !r.OrganizationID.Valid() { + return &influxdb.Error{ + Msg: "organization_id is not valid", + Code: influxdb.EInvalid, + } + } + return r.Authorization.Valid() +} diff --git a/influxql/service.go b/influxql/service.go new file mode 100644 index 0000000000..4430b61334 --- /dev/null +++ b/influxql/service.go @@ -0,0 +1,96 @@ +package influxql + +import ( + "context" + "fmt" + "io" + + "github.com/influxdata/influxdb/v2/kit/check" +) + +// ProxyQueryService performs InfluxQL queries and encodes the result into a writer. +// The results are opaque to a ProxyQueryService. +type ProxyQueryService interface { + check.Checker + Query(ctx context.Context, w io.Writer, req *QueryRequest) (Statistics, error) +} + +// ProxyMode enumerates the possible ProxyQueryService operating modes used by a downstream client. +type ProxyMode byte + +const ( + // ProxyModeHTTP specifies a ProxyQueryService that forwards InfluxQL requests via HTTP to influxqld. + ProxyModeHTTP ProxyMode = iota + + // ProxyModeQueue specifies a ProxyQueryService that pushes InfluxQL requests to a queue and influxqld issues a callback request to the initiating service. + ProxyModeQueue +) + +var proxyModeString = [...]string{ + ProxyModeHTTP: "http", + ProxyModeQueue: "queue", +} + +func (i ProxyMode) String() string { + if int(i) > len(proxyModeString) { + return "invalid" + } + return proxyModeString[i] +} + +func (i *ProxyMode) Set(v string) (err error) { + switch v { + case "http": + *i = ProxyModeHTTP + case "queue": + *i = ProxyModeQueue + default: + err = fmt.Errorf("unexpected %s type: %s", i.Type(), v) + } + return err +} + +func (i *ProxyMode) Type() string { return "proxy-mode" } + +// RequestMode is enumerates the possible influxqld operating modes for receiving InfluxQL requests. +type RequestMode byte + +const ( + // RequestModeHTTP specifies the HTTP listener should be active. + RequestModeHTTP RequestMode = iota + + // RequestModeQueue specifies the queue dispatcher should be active. + RequestModeQueue + + // RequestModeAll specifies both the HTTP listener and queue dispatcher should be active. + RequestModeAll +) + +var requestModeString = [...]string{ + RequestModeHTTP: "http", + RequestModeQueue: "queue", + RequestModeAll: "all", +} + +func (i RequestMode) String() string { + if int(i) > len(requestModeString) { + return "invalid" + } + return proxyModeString[i] +} + +func (i *RequestMode) Set(v string) (err error) { + switch v { + case "http": + *i = RequestModeHTTP + case "queue": + *i = RequestModeQueue + case "all": + *i = RequestModeAll + default: + err = fmt.Errorf("unexpected %s type: %s", i.Type(), v) + } + return err +} + +func (i *RequestMode) Type() string { return "request-mode" } diff --git a/influxql/statistics.go b/influxql/statistics.go new file mode 100644 index 0000000000..3ff3bf1bf2 --- /dev/null +++ b/influxql/statistics.go @@ -0,0 +1,123 @@ +package influxql + +import ( + "sync" + "time" + + "github.com/opentracing/opentracing-go" + "github.com/opentracing/opentracing-go/log" +) + +// Statistics is a collection of statistics about the processing of a query. +type Statistics struct { + PlanDuration time.Duration `json:"plan_duration"` // PlanDuration is the duration spent planning the query. + ExecuteDuration time.Duration `json:"execute_duration"` // ExecuteDuration is the duration spent executing the query. + StatementCount int `json:"statement_count"` // StatementCount is the number of InfluxQL statements executed + ScannedValues int `json:"scanned_values"` // ScannedValues is the number of values scanned from storage + ScannedBytes int `json:"scanned_bytes"` // ScannedBytes is the number of bytes scanned from storage +} + +// Adding returns the sum of s and other. +func (s Statistics) Adding(other Statistics) Statistics { + return Statistics{ + PlanDuration: s.PlanDuration + other.PlanDuration, + ExecuteDuration: s.ExecuteDuration + other.ExecuteDuration, + StatementCount: s.StatementCount + other.StatementCount, + ScannedValues: s.ScannedValues + other.ScannedValues, + ScannedBytes: s.ScannedBytes + other.ScannedBytes, + } +} + +// Add adds other to s. +func (s *Statistics) Add(other Statistics) { + s.PlanDuration += other.PlanDuration + s.ExecuteDuration += other.ExecuteDuration + s.StatementCount += other.StatementCount + s.ScannedValues += other.ScannedValues + s.ScannedBytes += other.ScannedBytes +} + +func (s *Statistics) LogToSpan(span opentracing.Span) { + if span == nil { + return + } + span.LogFields( + log.Float64("stats_plan_duration_seconds", s.PlanDuration.Seconds()), + log.Float64("stats_execute_duration_seconds", s.ExecuteDuration.Seconds()), + log.Int("stats_statement_count", s.StatementCount), + log.Int("stats_scanned_values", s.ScannedValues), + log.Int("stats_scanned_bytes", s.ScannedBytes), + ) +} + +// TotalDuration returns the sum of all durations for s. +func (s *Statistics) TotalDuration() time.Duration { + return s.PlanDuration + s.ExecuteDuration +} + +type CollectorFn func() Statistics + +func (fn CollectorFn) Statistics() Statistics { + return fn() +} + +type MutableCollector struct { + s *Statistics +} + +func NewMutableCollector(s *Statistics) *MutableCollector { + return &MutableCollector{s: s} +} + +func (c *MutableCollector) Statistics() Statistics { + return *c.s +} + +type ImmutableCollector struct { + s Statistics +} + +func NewImmutableCollector(s Statistics) *ImmutableCollector { + return &ImmutableCollector{s: s} +} + +func (c *ImmutableCollector) Statistics() Statistics { + return c.s +} + +type StatisticsCollector interface { + Statistics() Statistics +} + +type StatisticsGatherer struct { + mu sync.Mutex + collectors []StatisticsCollector +} + +func (sg *StatisticsGatherer) Append(sc StatisticsCollector) { + sg.mu.Lock() + defer sg.mu.Unlock() + sg.collectors = append(sg.collectors, sc) +} + +func (sg *StatisticsGatherer) Statistics() Statistics { + sg.mu.Lock() + defer sg.mu.Unlock() + + res := Statistics{} + for i := range sg.collectors { + res = res.Adding(sg.collectors[i].Statistics()) + } + return res +} + +func (sg *StatisticsGatherer) Reset() { + sg.mu.Lock() + defer sg.mu.Unlock() + + coll := sg.collectors + sg.collectors = sg.collectors[:0] + for i := range coll { + coll[i] = nil + } +} diff --git a/v1/coordinator/config.go b/v1/coordinator/config.go index 6fa89ad5bd..88f2a5d343 100644 --- a/v1/coordinator/config.go +++ b/v1/coordinator/config.go @@ -5,7 +5,6 @@ package coordinator import ( "time" - "github.com/influxdata/influxdb/v2/influxql/query" "github.com/influxdata/influxdb/v2/toml" "github.com/influxdata/influxdb/v2/v1/monitor/diagnostics" ) @@ -31,7 +30,6 @@ const ( type Config struct { WriteTimeout toml.Duration `toml:"write-timeout"` MaxConcurrentQueries int `toml:"max-concurrent-queries"` - QueryTimeout toml.Duration `toml:"query-timeout"` LogQueriesAfter toml.Duration `toml:"log-queries-after"` MaxSelectPointN int `toml:"max-select-point"` MaxSelectSeriesN int `toml:"max-select-series"` @@ -42,7 +40,6 @@ type Config struct { func NewConfig() Config { return Config{ WriteTimeout: toml.Duration(DefaultWriteTimeout), - QueryTimeout: toml.Duration(query.DefaultQueryTimeout), MaxConcurrentQueries: DefaultMaxConcurrentQueries, MaxSelectPointN: DefaultMaxSelectPointN, MaxSelectSeriesN: DefaultMaxSelectSeriesN, @@ -54,7 +51,6 @@ func (c Config) Diagnostics() (*diagnostics.Diagnostics, error) { return diagnostics.RowFromMap(map[string]interface{}{ "write-timeout": c.WriteTimeout, "max-concurrent-queries": c.MaxConcurrentQueries, - "query-timeout": c.QueryTimeout, "log-queries-after": c.LogQueriesAfter, "max-select-point": c.MaxSelectPointN, "max-select-series": c.MaxSelectSeriesN, diff --git a/v1/coordinator/points_writer.go b/v1/coordinator/points_writer.go index a8dfedddb9..8fa96d2320 100644 --- a/v1/coordinator/points_writer.go +++ b/v1/coordinator/points_writer.go @@ -276,12 +276,6 @@ func (l sgList) Append(sgi meta.ShardGroupInfo) sgList { return next } -// WritePointsInto is a copy of WritePoints that uses a tsdb structure instead of -// a cluster structure for information. This is to avoid a circular dependency. -func (w *PointsWriter) WritePointsInto(p *IntoWriteRequest) error { - return w.WritePointsPrivileged(p.Database, p.RetentionPolicy, models.ConsistencyLevelOne, p.Points) -} - // WritePoints writes the data to the underlying storage. consitencyLevel and user are only used for clustered scenarios func (w *PointsWriter) WritePoints(database, retentionPolicy string, consistencyLevel models.ConsistencyLevel, user meta.User, points []models.Point) error { return w.WritePointsPrivileged(database, retentionPolicy, consistencyLevel, points) diff --git a/v1/coordinator/shard_mapper.go b/v1/coordinator/shard_mapper.go index 5c3e4a05d1..9a0752753b 100644 --- a/v1/coordinator/shard_mapper.go +++ b/v1/coordinator/shard_mapper.go @@ -2,9 +2,11 @@ package coordinator import ( "context" + "fmt" "io" "time" + "github.com/influxdata/influxdb/v2" "github.com/influxdata/influxdb/v2/influxql/query" "github.com/influxdata/influxdb/v2/v1/services/meta" "github.com/influxdata/influxdb/v2/v1/tsdb" @@ -27,24 +29,26 @@ type LocalShardMapper struct { TSDBStore interface { ShardGroup(ids []uint64) tsdb.ShardGroup } + + DBRP influxdb.DBRPMappingServiceV2 } // MapShards maps the sources to the appropriate shards into an IteratorCreator. -func (e *LocalShardMapper) MapShards(sources influxql.Sources, t influxql.TimeRange, opt query.SelectOptions) (query.ShardGroup, error) { +func (e *LocalShardMapper) MapShards(ctx context.Context, sources influxql.Sources, t influxql.TimeRange, opt query.SelectOptions) (query.ShardGroup, error) { a := &LocalShardMapping{ ShardMap: make(map[Source]tsdb.ShardGroup), } tmin := time.Unix(0, t.MinTimeNano()) tmax := time.Unix(0, t.MaxTimeNano()) - if err := e.mapShards(a, sources, tmin, tmax); err != nil { + if err := e.mapShards(ctx, a, sources, tmin, tmax, opt.OrgID); err != nil { return nil, err } a.MinTime, a.MaxTime = tmin, tmax return a, nil } -func (e *LocalShardMapper) mapShards(a *LocalShardMapping, sources influxql.Sources, tmin, tmax time.Time) error { +func (e *LocalShardMapper) mapShards(ctx context.Context, a *LocalShardMapping, sources influxql.Sources, tmin, tmax time.Time, orgID influxdb.ID) error { for _, s := range sources { switch s := s.(type) { case *influxql.Measurement: @@ -56,7 +60,22 @@ func (e *LocalShardMapper) mapShards(a *LocalShardMapping, sources influxql.Sour // shards is always the same regardless of which measurement we are // using. if _, ok := a.ShardMap[source]; !ok { - groups, err := e.MetaClient.ShardGroupsByTimeRange(s.Database, s.RetentionPolicy, tmin, tmax) + // lookup bucket and create info + mappings, n, err := e.DBRP.FindMany(ctx, influxdb.DBRPMappingFilterV2{ + OrgID: &orgID, + Database: &s.Database, + RetentionPolicy: &s.RetentionPolicy, + }) + if err != nil { + return fmt.Errorf("finding DBRP mappings: %v", err) + } else if n == 0 { + return fmt.Errorf("retention policy not found: %s", s.RetentionPolicy) + } else if n != 1 { + return fmt.Errorf("finding DBRP mappings: expected 1, found %d", n) + } + + mapping := mappings[0] + groups, err := e.MetaClient.ShardGroupsByTimeRange(mapping.BucketID.String(), meta.DefaultRetentionPolicyName, tmin, tmax) if err != nil { return err } @@ -75,7 +94,7 @@ func (e *LocalShardMapper) mapShards(a *LocalShardMapping, sources influxql.Sour a.ShardMap[source] = e.TSDBStore.ShardGroup(shardIDs) } case *influxql.SubQuery: - if err := e.mapShards(a, s.Statement.Sources, tmin, tmax); err != nil { + if err := e.mapShards(ctx, a, s.Statement.Sources, tmin, tmax, orgID); err != nil { return err } } @@ -98,7 +117,7 @@ type LocalShardMapping struct { MaxTime time.Time } -func (a *LocalShardMapping) FieldDimensions(m *influxql.Measurement) (fields map[string]influxql.DataType, dimensions map[string]struct{}, err error) { +func (a *LocalShardMapping) FieldDimensions(ctx context.Context, m *influxql.Measurement) (fields map[string]influxql.DataType, dimensions map[string]struct{}, err error) { source := Source{ Database: m.Database, RetentionPolicy: m.RetentionPolicy, @@ -132,7 +151,7 @@ func (a *LocalShardMapping) FieldDimensions(m *influxql.Measurement) (fields map return } -func (a *LocalShardMapping) MapType(m *influxql.Measurement, field string) influxql.DataType { +func (a *LocalShardMapping) MapType(ctx context.Context, m *influxql.Measurement, field string) influxql.DataType { source := Source{ Database: m.Database, RetentionPolicy: m.RetentionPolicy, @@ -208,7 +227,7 @@ func (a *LocalShardMapping) CreateIterator(ctx context.Context, m *influxql.Meas return sg.CreateIterator(ctx, m, opt) } -func (a *LocalShardMapping) IteratorCost(m *influxql.Measurement, opt query.IteratorOptions) (query.IteratorCost, error) { +func (a *LocalShardMapping) IteratorCost(ctx context.Context, m *influxql.Measurement, opt query.IteratorOptions) (query.IteratorCost, error) { source := Source{ Database: m.Database, RetentionPolicy: m.RetentionPolicy, diff --git a/v1/coordinator/statement_executor.go b/v1/coordinator/statement_executor.go index 661ee9a2fb..523d2dbcae 100644 --- a/v1/coordinator/statement_executor.go +++ b/v1/coordinator/statement_executor.go @@ -1,22 +1,19 @@ package coordinator import ( - "bytes" "context" "errors" "fmt" - "io" - "sort" - "strconv" "strings" "time" + "github.com/influxdata/influxdb/v2" + "github.com/influxdata/influxdb/v2/authorizer" + iql "github.com/influxdata/influxdb/v2/influxql" "github.com/influxdata/influxdb/v2/influxql/query" "github.com/influxdata/influxdb/v2/models" "github.com/influxdata/influxdb/v2/pkg/tracing" "github.com/influxdata/influxdb/v2/pkg/tracing/fields" - influxdb "github.com/influxdata/influxdb/v2/v1" - "github.com/influxdata/influxdb/v2/v1/monitor" "github.com/influxdata/influxdb/v2/v1/services/meta" "github.com/influxdata/influxdb/v2/v1/tsdb" "github.com/influxdata/influxql" @@ -26,30 +23,17 @@ import ( // when a database has not been provided. var ErrDatabaseNameRequired = errors.New("database name required") -type pointsWriter interface { - WritePointsInto(*IntoWriteRequest) error -} - // StatementExecutor executes a statement in the query. type StatementExecutor struct { MetaClient MetaClient - // TaskManager holds the StatementExecutor that handles task-related commands. - TaskManager query.StatementExecutor - // TSDB storage for local node. TSDBStore TSDBStore // ShardMapper for mapping shards when executing a SELECT statement. ShardMapper query.ShardMapper - // Holds monitoring data for SHOW STATS and SHOW DIAGNOSTICS. - Monitor *monitor.Monitor - - // Used for rewriting points back into system for SELECT INTO statements. - PointsWriter interface { - WritePointsInto(*IntoWriteRequest) error - } + DBRP influxdb.DBRPMappingServiceV2 // Select statement limits MaxSelectPointN int @@ -58,10 +42,10 @@ type StatementExecutor struct { } // ExecuteStatement executes the given statement with the given execution context. -func (e *StatementExecutor) ExecuteStatement(stmt influxql.Statement, ctx *query.ExecutionContext) error { +func (e *StatementExecutor) ExecuteStatement(ctx context.Context, stmt influxql.Statement, ectx *query.ExecutionContext) error { // Select statements are handled separately so that they can be streamed. if stmt, ok := stmt.(*influxql.SelectStatement); ok { - return e.executeSelectStatement(stmt, ctx) + return e.executeSelectStatement(ctx, stmt, ectx) } var rows models.Rows @@ -69,141 +53,83 @@ func (e *StatementExecutor) ExecuteStatement(stmt influxql.Statement, ctx *query var err error switch stmt := stmt.(type) { case *influxql.AlterRetentionPolicyStatement: - if ctx.ReadOnly { - messages = append(messages, query.ReadOnlyWarning(stmt.String())) - } - err = e.executeAlterRetentionPolicyStatement(stmt) + err = iql.ErrNotImplemented("ALTER RETENTION POLICY") case *influxql.CreateContinuousQueryStatement: - if ctx.ReadOnly { - messages = append(messages, query.ReadOnlyWarning(stmt.String())) - } - err = e.executeCreateContinuousQueryStatement(stmt) + err = iql.ErrNotImplemented("CREATE CONTINUOUS QUERY") case *influxql.CreateDatabaseStatement: - if ctx.ReadOnly { - messages = append(messages, query.ReadOnlyWarning(stmt.String())) - } - err = e.executeCreateDatabaseStatement(stmt) + err = iql.ErrNotImplemented("CREATE DATABASE") case *influxql.CreateRetentionPolicyStatement: - if ctx.ReadOnly { - messages = append(messages, query.ReadOnlyWarning(stmt.String())) - } - err = e.executeCreateRetentionPolicyStatement(stmt) + err = iql.ErrNotImplemented("CREATE RETENTION POLICY") case *influxql.CreateSubscriptionStatement: - if ctx.ReadOnly { - messages = append(messages, query.ReadOnlyWarning(stmt.String())) - } - err = e.executeCreateSubscriptionStatement(stmt) + err = iql.ErrNotImplemented("CREATE SUBSCRIPTION") case *influxql.CreateUserStatement: - if ctx.ReadOnly { - messages = append(messages, query.ReadOnlyWarning(stmt.String())) - } - err = e.executeCreateUserStatement(stmt) + err = iql.ErrNotImplemented("CREATE USER") case *influxql.DeleteSeriesStatement: - err = e.executeDeleteSeriesStatement(stmt, ctx.Database) + err = iql.ErrNotImplemented("DROP SERIES") case *influxql.DropContinuousQueryStatement: - if ctx.ReadOnly { - messages = append(messages, query.ReadOnlyWarning(stmt.String())) - } - err = e.executeDropContinuousQueryStatement(stmt) + err = iql.ErrNotImplemented("DROP CONTINUOUS QUERY") case *influxql.DropDatabaseStatement: - if ctx.ReadOnly { - messages = append(messages, query.ReadOnlyWarning(stmt.String())) - } - err = e.executeDropDatabaseStatement(stmt) + err = iql.ErrNotImplemented("DROP DATABASE") case *influxql.DropMeasurementStatement: - if ctx.ReadOnly { - messages = append(messages, query.ReadOnlyWarning(stmt.String())) - } - err = e.executeDropMeasurementStatement(stmt, ctx.Database) + err = iql.ErrNotImplemented("DROP MEASUREMENT") case *influxql.DropSeriesStatement: - if ctx.ReadOnly { - messages = append(messages, query.ReadOnlyWarning(stmt.String())) - } - err = e.executeDropSeriesStatement(stmt, ctx.Database) + err = iql.ErrNotImplemented("DROP SERIES") case *influxql.DropRetentionPolicyStatement: - if ctx.ReadOnly { - messages = append(messages, query.ReadOnlyWarning(stmt.String())) - } - err = e.executeDropRetentionPolicyStatement(stmt) + err = iql.ErrNotImplemented("DROP RETENTION POLICY") case *influxql.DropShardStatement: - if ctx.ReadOnly { - messages = append(messages, query.ReadOnlyWarning(stmt.String())) - } - err = e.executeDropShardStatement(stmt) + err = iql.ErrNotImplemented("DROP SHARD") case *influxql.DropSubscriptionStatement: - if ctx.ReadOnly { - messages = append(messages, query.ReadOnlyWarning(stmt.String())) - } - err = e.executeDropSubscriptionStatement(stmt) + err = iql.ErrNotImplemented("DROP SUBSCRIPTION") case *influxql.DropUserStatement: - if ctx.ReadOnly { - messages = append(messages, query.ReadOnlyWarning(stmt.String())) - } - err = e.executeDropUserStatement(stmt) + err = iql.ErrNotImplemented("DROP USER") case *influxql.ExplainStatement: if stmt.Analyze { - rows, err = e.executeExplainAnalyzeStatement(stmt, ctx) + rows, err = e.executeExplainAnalyzeStatement(ctx, stmt, ectx) } else { - rows, err = e.executeExplainStatement(stmt, ctx) + rows, err = e.executeExplainStatement(ctx, stmt, ectx) } case *influxql.GrantStatement: - if ctx.ReadOnly { - messages = append(messages, query.ReadOnlyWarning(stmt.String())) - } - err = e.executeGrantStatement(stmt) + err = iql.ErrNotImplemented("GRANT") case *influxql.GrantAdminStatement: - if ctx.ReadOnly { - messages = append(messages, query.ReadOnlyWarning(stmt.String())) - } - err = e.executeGrantAdminStatement(stmt) + err = iql.ErrNotImplemented("GRANT ALL") case *influxql.RevokeStatement: - if ctx.ReadOnly { - messages = append(messages, query.ReadOnlyWarning(stmt.String())) - } - err = e.executeRevokeStatement(stmt) + err = iql.ErrNotImplemented("REVOKE") case *influxql.RevokeAdminStatement: - if ctx.ReadOnly { - messages = append(messages, query.ReadOnlyWarning(stmt.String())) - } - err = e.executeRevokeAdminStatement(stmt) + err = iql.ErrNotImplemented("REVOKE ALL") case *influxql.ShowContinuousQueriesStatement: - rows, err = e.executeShowContinuousQueriesStatement(stmt) + rows, err = nil, iql.ErrNotImplemented("SHOW CONTINUOUS QUERIES") case *influxql.ShowDatabasesStatement: - rows, err = e.executeShowDatabasesStatement(stmt, ctx) + rows, err = e.executeShowDatabasesStatement(ctx, stmt, ectx) case *influxql.ShowDiagnosticsStatement: - rows, err = e.executeShowDiagnosticsStatement(stmt) + rows, err = nil, iql.ErrNotImplemented("SHOW DIAGNOSTICS") case *influxql.ShowGrantsForUserStatement: - rows, err = e.executeShowGrantsForUserStatement(stmt) + rows, err = nil, iql.ErrNotImplemented("SHOW GRANTS") case *influxql.ShowMeasurementsStatement: - return e.executeShowMeasurementsStatement(stmt, ctx) + return e.executeShowMeasurementsStatement(ctx, stmt, ectx) case *influxql.ShowMeasurementCardinalityStatement: - rows, err = e.executeShowMeasurementCardinalityStatement(stmt) + rows, err = nil, iql.ErrNotImplemented("SHOW MEASUREMENT CARDINALITY") case *influxql.ShowRetentionPoliciesStatement: - rows, err = e.executeShowRetentionPoliciesStatement(stmt) + rows, err = e.executeShowRetentionPoliciesStatement(ctx, stmt, ectx) case *influxql.ShowSeriesCardinalityStatement: - rows, err = e.executeShowSeriesCardinalityStatement(stmt) + rows, err = nil, iql.ErrNotImplemented("SHOW SERIES CARDINALITY") case *influxql.ShowShardsStatement: - rows, err = e.executeShowShardsStatement(stmt) + rows, err = nil, iql.ErrNotImplemented("SHOW SHARDS") case *influxql.ShowShardGroupsStatement: - rows, err = e.executeShowShardGroupsStatement(stmt) + rows, err = nil, iql.ErrNotImplemented("SHOW SHARD GROUPS") case *influxql.ShowStatsStatement: - rows, err = e.executeShowStatsStatement(stmt) + rows, err = nil, iql.ErrNotImplemented("SHOW STATS") case *influxql.ShowSubscriptionsStatement: - rows, err = e.executeShowSubscriptionsStatement(stmt) + rows, err = nil, iql.ErrNotImplemented("SHOW SUBSCRIPTIONS") case *influxql.ShowTagKeysStatement: - return e.executeShowTagKeys(stmt, ctx) + return e.executeShowTagKeys(ctx, stmt, ectx) case *influxql.ShowTagValuesStatement: - return e.executeShowTagValues(stmt, ctx) + return e.executeShowTagValues(ctx, stmt, ectx) case *influxql.ShowUsersStatement: - rows, err = e.executeShowUsersStatement(stmt) + rows, err = nil, iql.ErrNotImplemented("SHOW USERS") case *influxql.SetPasswordUserStatement: - if ctx.ReadOnly { - messages = append(messages, query.ReadOnlyWarning(stmt.String())) - } - err = e.executeSetPasswordUserStatement(stmt) + err = iql.ErrNotImplemented("SET PASSWORD") case *influxql.ShowQueriesStatement, *influxql.KillQueryStatement: - // Send query related statements to the task manager. - return e.TaskManager.ExecuteStatement(stmt, ctx) + err = iql.ErrNotImplemented("SHOW QUERIES") default: return query.ErrInvalidQuery } @@ -212,219 +138,29 @@ func (e *StatementExecutor) ExecuteStatement(stmt influxql.Statement, ctx *query return err } - return ctx.Send(&query.Result{ + return ectx.Send(ctx, &query.Result{ Series: rows, Messages: messages, }) } -func (e *StatementExecutor) executeAlterRetentionPolicyStatement(stmt *influxql.AlterRetentionPolicyStatement) error { - rpu := &meta.RetentionPolicyUpdate{ - Duration: stmt.Duration, - ReplicaN: stmt.Replication, - ShardGroupDuration: stmt.ShardGroupDuration, - } - - // Update the retention policy. - return e.MetaClient.UpdateRetentionPolicy(stmt.Database, stmt.Name, rpu, stmt.Default) -} - -func (e *StatementExecutor) executeCreateContinuousQueryStatement(q *influxql.CreateContinuousQueryStatement) error { - // Verify that retention policies exist. - var err error - verifyRPFn := func(n influxql.Node) { - if err != nil { - return - } - switch m := n.(type) { - case *influxql.Measurement: - var rp *meta.RetentionPolicyInfo - if rp, err = e.MetaClient.RetentionPolicy(m.Database, m.RetentionPolicy); err != nil { - return - } else if rp == nil { - err = fmt.Errorf("%s: %s.%s", meta.ErrRetentionPolicyNotFound, m.Database, m.RetentionPolicy) - } - default: - return - } - } - - influxql.WalkFunc(q, verifyRPFn) - - if err != nil { - return err - } - - return e.MetaClient.CreateContinuousQuery(q.Database, q.Name, q.String()) -} - -func (e *StatementExecutor) executeCreateDatabaseStatement(stmt *influxql.CreateDatabaseStatement) error { - if !meta.ValidName(stmt.Name) { - // TODO This should probably be in `(*meta.Data).CreateDatabase` - // but can't go there until 1.1 is used everywhere - return meta.ErrInvalidName - } - - if !stmt.RetentionPolicyCreate { - _, err := e.MetaClient.CreateDatabase(stmt.Name) - return err - } - - // If we're doing, for example, CREATE DATABASE "db" WITH DURATION 1d then - // the name will not yet be set. We only need to validate non-empty - // retention policy names, such as in the statement: - // CREATE DATABASE "db" WITH DURATION 1d NAME "xyz" - if stmt.RetentionPolicyName != "" && !meta.ValidName(stmt.RetentionPolicyName) { - return meta.ErrInvalidName - } - - spec := meta.RetentionPolicySpec{ - Name: stmt.RetentionPolicyName, - Duration: stmt.RetentionPolicyDuration, - ReplicaN: stmt.RetentionPolicyReplication, - ShardGroupDuration: stmt.RetentionPolicyShardGroupDuration, - } - _, err := e.MetaClient.CreateDatabaseWithRetentionPolicy(stmt.Name, &spec) - return err -} - -func (e *StatementExecutor) executeCreateRetentionPolicyStatement(stmt *influxql.CreateRetentionPolicyStatement) error { - if !meta.ValidName(stmt.Name) { - // TODO This should probably be in `(*meta.Data).CreateRetentionPolicy` - // but can't go there until 1.1 is used everywhere - return meta.ErrInvalidName - } - - spec := meta.RetentionPolicySpec{ - Name: stmt.Name, - Duration: &stmt.Duration, - ReplicaN: &stmt.Replication, - ShardGroupDuration: stmt.ShardGroupDuration, - } - - // Create new retention policy. - _, err := e.MetaClient.CreateRetentionPolicy(stmt.Database, &spec, stmt.Default) - return err -} - -func (e *StatementExecutor) executeCreateSubscriptionStatement(q *influxql.CreateSubscriptionStatement) error { - return e.MetaClient.CreateSubscription(q.Database, q.RetentionPolicy, q.Name, q.Mode, q.Destinations) -} - -func (e *StatementExecutor) executeCreateUserStatement(q *influxql.CreateUserStatement) error { - _, err := e.MetaClient.CreateUser(q.Name, q.Password, q.Admin) - return err -} - -func (e *StatementExecutor) executeDeleteSeriesStatement(stmt *influxql.DeleteSeriesStatement, database string) error { - if dbi := e.MetaClient.Database(database); dbi == nil { - return query.ErrDatabaseNotFound(database) - } - - // Convert "now()" to current time. - stmt.Condition = influxql.Reduce(stmt.Condition, &influxql.NowValuer{Now: time.Now().UTC()}) - - // Locally delete the series. - return e.TSDBStore.DeleteSeries(database, stmt.Sources, stmt.Condition) -} - -func (e *StatementExecutor) executeDropContinuousQueryStatement(q *influxql.DropContinuousQueryStatement) error { - return e.MetaClient.DropContinuousQuery(q.Database, q.Name) -} - -// executeDropDatabaseStatement drops a database from the cluster. -// It does not return an error if the database was not found on any of -// the nodes, or in the Meta store. -func (e *StatementExecutor) executeDropDatabaseStatement(stmt *influxql.DropDatabaseStatement) error { - if e.MetaClient.Database(stmt.Name) == nil { - return nil - } - - // Locally delete the datababse. - if err := e.TSDBStore.DeleteDatabase(stmt.Name); err != nil { - return err - } - - // Remove the database from the Meta Store. - return e.MetaClient.DropDatabase(stmt.Name) -} - -func (e *StatementExecutor) executeDropMeasurementStatement(stmt *influxql.DropMeasurementStatement, database string) error { - if dbi := e.MetaClient.Database(database); dbi == nil { - return query.ErrDatabaseNotFound(database) - } - - // Locally drop the measurement - return e.TSDBStore.DeleteMeasurement(database, stmt.Name) -} - -func (e *StatementExecutor) executeDropSeriesStatement(stmt *influxql.DropSeriesStatement, database string) error { - if dbi := e.MetaClient.Database(database); dbi == nil { - return query.ErrDatabaseNotFound(database) - } - - // Check for time in WHERE clause (not supported). - if influxql.HasTimeExpr(stmt.Condition) { - return errors.New("DROP SERIES doesn't support time in WHERE clause") - } - - // Locally drop the series. - return e.TSDBStore.DeleteSeries(database, stmt.Sources, stmt.Condition) -} - -func (e *StatementExecutor) executeDropShardStatement(stmt *influxql.DropShardStatement) error { - // Locally delete the shard. - if err := e.TSDBStore.DeleteShard(stmt.ID); err != nil { - return err - } - - // Remove the shard reference from the Meta Store. - return e.MetaClient.DropShard(stmt.ID) -} - -func (e *StatementExecutor) executeDropRetentionPolicyStatement(stmt *influxql.DropRetentionPolicyStatement) error { - dbi := e.MetaClient.Database(stmt.Database) - if dbi == nil { - return nil - } - - if dbi.RetentionPolicy(stmt.Name) == nil { - return nil - } - - // Locally drop the retention policy. - if err := e.TSDBStore.DeleteRetentionPolicy(stmt.Database, stmt.Name); err != nil { - return err - } - - return e.MetaClient.DropRetentionPolicy(stmt.Database, stmt.Name) -} - -func (e *StatementExecutor) executeDropSubscriptionStatement(q *influxql.DropSubscriptionStatement) error { - return e.MetaClient.DropSubscription(q.Database, q.RetentionPolicy, q.Name) -} - -func (e *StatementExecutor) executeDropUserStatement(q *influxql.DropUserStatement) error { - return e.MetaClient.DropUser(q.Name) -} - -func (e *StatementExecutor) executeExplainStatement(q *influxql.ExplainStatement, ctx *query.ExecutionContext) (models.Rows, error) { +func (e *StatementExecutor) executeExplainStatement(ctx context.Context, q *influxql.ExplainStatement, ectx *query.ExecutionContext) (models.Rows, error) { opt := query.SelectOptions{ - NodeID: ctx.ExecutionOptions.NodeID, + OrgID: ectx.OrgID, + NodeID: ectx.ExecutionOptions.NodeID, MaxSeriesN: e.MaxSelectSeriesN, MaxBucketsN: e.MaxSelectBucketsN, - Authorizer: ctx.Authorizer, } // Prepare the query for execution, but do not actually execute it. // This should perform any needed substitutions. - p, err := query.Prepare(q.Statement, e.ShardMapper, opt) + p, err := query.Prepare(ctx, q.Statement, e.ShardMapper, opt) if err != nil { return nil, err } defer p.Close() - plan, err := p.Explain() + plan, err := p.Explain(ctx) if err != nil { return nil, err } @@ -439,16 +175,16 @@ func (e *StatementExecutor) executeExplainStatement(q *influxql.ExplainStatement return models.Rows{row}, nil } -func (e *StatementExecutor) executeExplainAnalyzeStatement(q *influxql.ExplainStatement, ectx *query.ExecutionContext) (models.Rows, error) { +func (e *StatementExecutor) executeExplainAnalyzeStatement(ctx context.Context, q *influxql.ExplainStatement, ectx *query.ExecutionContext) (models.Rows, error) { stmt := q.Statement t, span := tracing.NewTrace("select") - ctx := tracing.NewContextWithTrace(ectx, t) + ctx = tracing.NewContextWithTrace(ctx, t) ctx = tracing.NewContextWithSpan(ctx, span) var aux query.Iterators ctx = query.NewContextWithIterators(ctx, &aux) start := time.Now() - cur, err := e.createIterators(ctx, stmt, ectx.ExecutionOptions) + cur, err := e.createIterators(ctx, stmt, ectx.ExecutionOptions, ectx.StatisticsGatherer) if err != nil { return nil, err } @@ -468,8 +204,8 @@ func (e *StatementExecutor) executeExplainAnalyzeStatement(q *influxql.ExplainSt } else if row == nil { // Check if the query was interrupted while emitting. select { - case <-ectx.Done(): - err = ectx.Err() + case <-ctx.Done(): + err = ctx.Err() goto CLEANUP default: } @@ -506,55 +242,22 @@ CLEANUP: return models.Rows{row}, nil } -func (e *StatementExecutor) executeGrantStatement(stmt *influxql.GrantStatement) error { - return e.MetaClient.SetPrivilege(stmt.User, stmt.On, stmt.Privilege) -} - -func (e *StatementExecutor) executeGrantAdminStatement(stmt *influxql.GrantAdminStatement) error { - return e.MetaClient.SetAdminPrivilege(stmt.User, true) -} - -func (e *StatementExecutor) executeRevokeStatement(stmt *influxql.RevokeStatement) error { - priv := influxql.NoPrivileges - - // Revoking all privileges means there's no need to look at existing user privileges. - if stmt.Privilege != influxql.AllPrivileges { - p, err := e.MetaClient.UserPrivilege(stmt.User, stmt.On) - if err != nil { - return err - } - // Bit clear (AND NOT) the user's privilege with the revoked privilege. - priv = *p &^ stmt.Privilege - } - - return e.MetaClient.SetPrivilege(stmt.User, stmt.On, priv) -} - -func (e *StatementExecutor) executeRevokeAdminStatement(stmt *influxql.RevokeAdminStatement) error { - return e.MetaClient.SetAdminPrivilege(stmt.User, false) -} - -func (e *StatementExecutor) executeSetPasswordUserStatement(q *influxql.SetPasswordUserStatement) error { - return e.MetaClient.UpdateUser(q.Name, q.Password) -} - -func (e *StatementExecutor) executeSelectStatement(stmt *influxql.SelectStatement, ctx *query.ExecutionContext) error { - cur, err := e.createIterators(ctx, stmt, ctx.ExecutionOptions) +func (e *StatementExecutor) executeSelectStatement(ctx context.Context, stmt *influxql.SelectStatement, ectx *query.ExecutionContext) error { + cur, err := e.createIterators(ctx, stmt, ectx.ExecutionOptions, ectx.StatisticsGatherer) if err != nil { return err } // Generate a row emitter from the iterator set. - em := query.NewEmitter(cur, ctx.ChunkSize) + em := query.NewEmitter(cur, ectx.ChunkSize) defer em.Close() // Emit rows to the results channel. - var writeN int64 var emitted bool - var pointsWriter *BufferedPointsWriter if stmt.Target != nil { - pointsWriter = NewBufferedPointsWriter(e.PointsWriter, stmt.Target.Measurement.Database, stmt.Target.Measurement.RetentionPolicy, 10000) + // SELECT INTO is unsupported + return iql.ErrNotImplemented("SELECT INTO") } for { @@ -571,53 +274,22 @@ func (e *StatementExecutor) executeSelectStatement(stmt *influxql.SelectStatemen break } - // Write points back into system for INTO statements. - if stmt.Target != nil { - n, err := e.writeInto(pointsWriter, stmt, row) - if err != nil { - return err - } - writeN += n - continue - } - result := &query.Result{ Series: []*models.Row{row}, Partial: partial, } // Send results or exit if closing. - if err := ctx.Send(result); err != nil { + if err := ectx.Send(ctx, result); err != nil { return err } emitted = true } - // Flush remaining points and emit write count if an INTO statement. - if stmt.Target != nil { - if err := pointsWriter.Flush(); err != nil { - return err - } - - var messages []*query.Message - if ctx.ReadOnly { - messages = append(messages, query.ReadOnlyWarning(stmt.String())) - } - - return ctx.Send(&query.Result{ - Messages: messages, - Series: []*models.Row{{ - Name: "result", - Columns: []string{"time", "written"}, - Values: [][]interface{}{{time.Unix(0, 0).UTC(), writeN}}, - }}, - }) - } - // Always emit at least one result. if !emitted { - return ctx.Send(&query.Result{ + return ectx.Send(ctx, &query.Result{ Series: make([]*models.Row, 0), }) } @@ -625,13 +297,19 @@ func (e *StatementExecutor) executeSelectStatement(stmt *influxql.SelectStatemen return nil } -func (e *StatementExecutor) createIterators(ctx context.Context, stmt *influxql.SelectStatement, opt query.ExecutionOptions) (query.Cursor, error) { +func (e *StatementExecutor) createIterators(ctx context.Context, stmt *influxql.SelectStatement, opt query.ExecutionOptions, gatherer *iql.StatisticsGatherer) (query.Cursor, error) { + defer func(start time.Time) { + dur := time.Since(start) + gatherer.Append(iql.NewImmutableCollector(iql.Statistics{PlanDuration: dur})) + }(time.Now()) + sopt := query.SelectOptions{ - NodeID: opt.NodeID, - MaxSeriesN: e.MaxSelectSeriesN, - MaxPointN: e.MaxSelectPointN, - MaxBucketsN: e.MaxSelectBucketsN, - Authorizer: opt.Authorizer, + OrgID: opt.OrgID, + NodeID: opt.NodeID, + MaxSeriesN: e.MaxSelectSeriesN, + MaxPointN: e.MaxSelectPointN, + MaxBucketsN: e.MaxSelectBucketsN, + StatisticsGatherer: gatherer, } // Create a set of iterators from a selection. @@ -642,83 +320,62 @@ func (e *StatementExecutor) createIterators(ctx context.Context, stmt *influxql. return cur, nil } -func (e *StatementExecutor) executeShowContinuousQueriesStatement(stmt *influxql.ShowContinuousQueriesStatement) (models.Rows, error) { - dis := e.MetaClient.Databases() - - rows := []*models.Row{} - for _, di := range dis { - row := &models.Row{Columns: []string{"name", "query"}, Name: di.Name} - for _, cqi := range di.ContinuousQueries { - row.Values = append(row.Values, []interface{}{cqi.Name, cqi.Query}) - } - rows = append(rows, row) - } - return rows, nil -} - -func (e *StatementExecutor) executeShowDatabasesStatement(q *influxql.ShowDatabasesStatement, ctx *query.ExecutionContext) (models.Rows, error) { - dis := e.MetaClient.Databases() - a := ctx.ExecutionOptions.Authorizer - +func (e *StatementExecutor) executeShowDatabasesStatement(ctx context.Context, q *influxql.ShowDatabasesStatement, ectx *query.ExecutionContext) (models.Rows, error) { row := &models.Row{Name: "databases", Columns: []string{"name"}} - for _, di := range dis { - // Only include databases that the user is authorized to read or write. - if a.AuthorizeDatabase(influxql.ReadPrivilege, di.Name) || a.AuthorizeDatabase(influxql.WritePrivilege, di.Name) { - row.Values = append(row.Values, []interface{}{di.Name}) + // TODO(gianarb): How pagination works here? + dbrps, _, err := e.DBRP.FindMany(ctx, influxdb.DBRPMappingFilterV2{ + OrgID: &ectx.OrgID, + }) + if err != nil { + return nil, err + } + for _, dbrp := range dbrps { + perm, err := influxdb.NewPermissionAtID(dbrp.BucketID, influxdb.ReadAction, influxdb.BucketsResourceType, dbrp.OrganizationID) + if err != nil { + return nil, err } + err = authorizer.IsAllowed(ctx, *perm) + if err != nil { + if influxdb.ErrorCode(err) == influxdb.EUnauthorized { + continue + } + return nil, err + } + row.Values = append(row.Values, []interface{}{dbrp.Database}) } return []*models.Row{row}, nil } -func (e *StatementExecutor) executeShowDiagnosticsStatement(stmt *influxql.ShowDiagnosticsStatement) (models.Rows, error) { - diags, err := e.Monitor.Diagnostics() +func (e *StatementExecutor) getDefaultRP(ctx context.Context, database string, ectx *query.ExecutionContext) (*influxdb.DBRPMappingV2, error) { + defaultRP := true + mappings, n, err := e.DBRP.FindMany(ctx, influxdb.DBRPMappingFilterV2{ + OrgID: &ectx.OrgID, + Database: &database, + Default: &defaultRP, + }) if err != nil { - return nil, err + return nil, fmt.Errorf("finding DBRP mappings: %v", err) + } else if n == 0 { + return nil, fmt.Errorf("default retention policy not set for: %s", database) + } else if n != 1 { + return nil, fmt.Errorf("finding DBRP mappings: expected 1, found %d", n) } - - // Get a sorted list of diagnostics keys. - sortedKeys := make([]string, 0, len(diags)) - for k := range diags { - sortedKeys = append(sortedKeys, k) - } - sort.Strings(sortedKeys) - - rows := make([]*models.Row, 0, len(diags)) - for _, k := range sortedKeys { - if stmt.Module != "" && k != stmt.Module { - continue - } - - row := &models.Row{Name: k} - - row.Columns = diags[k].Columns - row.Values = diags[k].Rows - rows = append(rows, row) - } - return rows, nil + return mappings[0], nil } -func (e *StatementExecutor) executeShowGrantsForUserStatement(q *influxql.ShowGrantsForUserStatement) (models.Rows, error) { - priv, err := e.MetaClient.UserPrivileges(q.Name) - if err != nil { - return nil, err - } - - row := &models.Row{Columns: []string{"database", "privilege"}} - for d, p := range priv { - row.Values = append(row.Values, []interface{}{d, p.String()}) - } - return []*models.Row{row}, nil -} - -func (e *StatementExecutor) executeShowMeasurementsStatement(q *influxql.ShowMeasurementsStatement, ctx *query.ExecutionContext) error { +func (e *StatementExecutor) executeShowMeasurementsStatement(ctx context.Context, q *influxql.ShowMeasurementsStatement, ectx *query.ExecutionContext) error { if q.Database == "" { return ErrDatabaseNameRequired } - names, err := e.TSDBStore.MeasurementNames(ctx.Authorizer, q.Database, q.Condition) + mapping, err := e.getDefaultRP(ctx, q.Database, ectx) + if err != nil { + return err + } + + names, err := e.TSDBStore.MeasurementNames(ectx.Authorizer, mapping.BucketID.String(), q.Condition) if err != nil || len(names) == 0 { - return ctx.Send(&query.Result{ + return ectx.Send(ctx, &query.Result{ Err: err, }) } @@ -743,10 +400,10 @@ func (e *StatementExecutor) executeShowMeasurementsStatement(q *influxql.ShowMea } if len(values) == 0 { - return ctx.Send(&query.Result{}) + return ectx.Send(ctx, &query.Result{}) } - return ctx.Send(&query.Result{ + return ectx.Send(ctx, &query.Result{ Series: []*models.Row{{ Name: "measurements", Columns: []string{"name"}, @@ -755,185 +412,52 @@ func (e *StatementExecutor) executeShowMeasurementsStatement(q *influxql.ShowMea }) } -func (e *StatementExecutor) executeShowMeasurementCardinalityStatement(stmt *influxql.ShowMeasurementCardinalityStatement) (models.Rows, error) { - if stmt.Database == "" { - return nil, ErrDatabaseNameRequired - } - - n, err := e.TSDBStore.MeasurementsCardinality(stmt.Database) - if err != nil { - return nil, err - } - - return []*models.Row{&models.Row{ - Columns: []string{"cardinality estimation"}, - Values: [][]interface{}{{n}}, - }}, nil -} - -func (e *StatementExecutor) executeShowRetentionPoliciesStatement(q *influxql.ShowRetentionPoliciesStatement) (models.Rows, error) { +func (e *StatementExecutor) executeShowRetentionPoliciesStatement(ctx context.Context, q *influxql.ShowRetentionPoliciesStatement, ectx *query.ExecutionContext) (models.Rows, error) { if q.Database == "" { return nil, ErrDatabaseNameRequired } - di := e.MetaClient.Database(q.Database) - if di == nil { - return nil, influxdb.ErrDatabaseNotFound(q.Database) - } + dbrps, _, err := e.DBRP.FindMany(ctx, influxdb.DBRPMappingFilterV2{ + OrgID: &ectx.OrgID, + Database: &q.Database, + }) - row := &models.Row{Columns: []string{"name", "duration", "shardGroupDuration", "replicaN", "default"}} - for _, rpi := range di.RetentionPolicies { - row.Values = append(row.Values, []interface{}{rpi.Name, rpi.Duration.String(), rpi.ShardGroupDuration.String(), rpi.ReplicaN, di.DefaultRetentionPolicy == rpi.Name}) - } - return []*models.Row{row}, nil -} - -func (e *StatementExecutor) executeShowShardsStatement(stmt *influxql.ShowShardsStatement) (models.Rows, error) { - dis := e.MetaClient.Databases() - - rows := []*models.Row{} - for _, di := range dis { - row := &models.Row{Columns: []string{"id", "database", "retention_policy", "shard_group", "start_time", "end_time", "expiry_time", "owners"}, Name: di.Name} - for _, rpi := range di.RetentionPolicies { - for _, sgi := range rpi.ShardGroups { - // Shards associated with deleted shard groups are effectively deleted. - // Don't list them. - if sgi.Deleted() { - continue - } - - for _, si := range sgi.Shards { - ownerIDs := make([]uint64, len(si.Owners)) - for i, owner := range si.Owners { - ownerIDs[i] = owner.NodeID - } - - row.Values = append(row.Values, []interface{}{ - si.ID, - di.Name, - rpi.Name, - sgi.ID, - sgi.StartTime.UTC().Format(time.RFC3339), - sgi.EndTime.UTC().Format(time.RFC3339), - sgi.EndTime.Add(rpi.Duration).UTC().Format(time.RFC3339), - joinUint64(ownerIDs), - }) - } - } - } - rows = append(rows, row) - } - return rows, nil -} - -func (e *StatementExecutor) executeShowSeriesCardinalityStatement(stmt *influxql.ShowSeriesCardinalityStatement) (models.Rows, error) { - if stmt.Database == "" { - return nil, ErrDatabaseNameRequired - } - - n, err := e.TSDBStore.SeriesCardinality(stmt.Database) if err != nil { return nil, err } - return []*models.Row{&models.Row{ - Columns: []string{"cardinality estimation"}, - Values: [][]interface{}{{n}}, - }}, nil -} - -func (e *StatementExecutor) executeShowShardGroupsStatement(stmt *influxql.ShowShardGroupsStatement) (models.Rows, error) { - dis := e.MetaClient.Databases() - - row := &models.Row{Columns: []string{"id", "database", "retention_policy", "start_time", "end_time", "expiry_time"}, Name: "shard groups"} - for _, di := range dis { - for _, rpi := range di.RetentionPolicies { - for _, sgi := range rpi.ShardGroups { - // Shards associated with deleted shard groups are effectively deleted. - // Don't list them. - if sgi.Deleted() { - continue - } - - row.Values = append(row.Values, []interface{}{ - sgi.ID, - di.Name, - rpi.Name, - sgi.StartTime.UTC().Format(time.RFC3339), - sgi.EndTime.UTC().Format(time.RFC3339), - sgi.EndTime.Add(rpi.Duration).UTC().Format(time.RFC3339), - }) - } + row := &models.Row{Columns: []string{"name", "duration", "shardGroupDuration", "replicaN", "default"}} + for _, dbrp := range dbrps { + perm, err := influxdb.NewPermissionAtID(dbrp.BucketID, influxdb.ReadAction, influxdb.BucketsResourceType, dbrp.OrganizationID) + if err != nil { + return nil, err } + err = authorizer.IsAllowed(ctx, *perm) + if err != nil { + if influxdb.ErrorCode(err) == influxdb.EUnauthorized { + continue + } + return nil, err + } + row.Values = append(row.Values, []interface{}{dbrp.RetentionPolicy, "", "", "", dbrp.Default}) } return []*models.Row{row}, nil } -func (e *StatementExecutor) executeShowStatsStatement(stmt *influxql.ShowStatsStatement) (models.Rows, error) { - var rows []*models.Row - - if _, ok := e.TSDBStore.(*tsdb.Store); stmt.Module == "indexes" && ok { - // The cost of collecting indexes metrics grows with the size of the indexes, so only collect this - // stat when explicitly requested. - b := e.TSDBStore.(*tsdb.Store).IndexBytes() - row := &models.Row{ - Name: "indexes", - Columns: []string{"memoryBytes"}, - Values: [][]interface{}{{b}}, - } - rows = append(rows, row) - - } else { - stats, err := e.Monitor.Statistics(nil) - if err != nil { - return nil, err - } - - for _, stat := range stats { - if stmt.Module != "" && stat.Name != stmt.Module { - continue - } - row := &models.Row{Name: stat.Name, Tags: stat.Tags} - - values := make([]interface{}, 0, len(stat.Values)) - for _, k := range stat.ValueNames() { - row.Columns = append(row.Columns, k) - values = append(values, stat.Values[k]) - } - row.Values = [][]interface{}{values} - rows = append(rows, row) - } - } - return rows, nil -} - -func (e *StatementExecutor) executeShowSubscriptionsStatement(stmt *influxql.ShowSubscriptionsStatement) (models.Rows, error) { - dis := e.MetaClient.Databases() - - rows := []*models.Row{} - for _, di := range dis { - row := &models.Row{Columns: []string{"retention_policy", "name", "mode", "destinations"}, Name: di.Name} - for _, rpi := range di.RetentionPolicies { - for _, si := range rpi.Subscriptions { - row.Values = append(row.Values, []interface{}{rpi.Name, si.Name, si.Mode, si.Destinations}) - } - } - if len(row.Values) > 0 { - rows = append(rows, row) - } - } - return rows, nil -} - -func (e *StatementExecutor) executeShowTagKeys(q *influxql.ShowTagKeysStatement, ctx *query.ExecutionContext) error { +func (e *StatementExecutor) executeShowTagKeys(ctx context.Context, q *influxql.ShowTagKeysStatement, ectx *query.ExecutionContext) error { if q.Database == "" { return ErrDatabaseNameRequired } + mapping, err := e.getDefaultRP(ctx, q.Database, ectx) + if err != nil { + return err + } + // Determine shard set based on database and time range. // SHOW TAG KEYS returns all tag keys for the default retention policy. - di := e.MetaClient.Database(q.Database) + di := e.MetaClient.Database(mapping.BucketID.String()) if di == nil { return fmt.Errorf("database not found: %s", q.Database) } @@ -949,7 +473,7 @@ func (e *StatementExecutor) executeShowTagKeys(q *influxql.ShowTagKeysStatement, // Get all shards for all retention policies. var allGroups []meta.ShardGroupInfo for _, rpi := range di.RetentionPolicies { - sgis, err := e.MetaClient.ShardGroupsByTimeRange(q.Database, rpi.Name, timeRange.MinTime(), timeRange.MaxTime()) + sgis, err := e.MetaClient.ShardGroupsByTimeRange(mapping.BucketID.String(), rpi.Name, timeRange.MinTime(), timeRange.MaxTime()) if err != nil { return err } @@ -963,9 +487,9 @@ func (e *StatementExecutor) executeShowTagKeys(q *influxql.ShowTagKeysStatement, } } - tagKeys, err := e.TSDBStore.TagKeys(ctx.Authorizer, shardIDs, cond) + tagKeys, err := e.TSDBStore.TagKeys(ectx.Authorizer, shardIDs, cond) if err != nil { - return ctx.Send(&query.Result{ + return ectx.Send(ctx, &query.Result{ Err: err, }) } @@ -998,7 +522,7 @@ func (e *StatementExecutor) executeShowTagKeys(q *influxql.ShowTagKeysStatement, row.Values[i] = []interface{}{key} } - if err := ctx.Send(&query.Result{ + if err := ectx.Send(ctx, &query.Result{ Series: []*models.Row{row}, }); err != nil { return err @@ -1008,19 +532,24 @@ func (e *StatementExecutor) executeShowTagKeys(q *influxql.ShowTagKeysStatement, // Ensure at least one result is emitted. if !emitted { - return ctx.Send(&query.Result{}) + return ectx.Send(ctx, &query.Result{}) } return nil } -func (e *StatementExecutor) executeShowTagValues(q *influxql.ShowTagValuesStatement, ctx *query.ExecutionContext) error { +func (e *StatementExecutor) executeShowTagValues(ctx context.Context, q *influxql.ShowTagValuesStatement, ectx *query.ExecutionContext) error { if q.Database == "" { return ErrDatabaseNameRequired } + mapping, err := e.getDefaultRP(ctx, q.Database, ectx) + if err != nil { + return err + } + // Determine shard set based on database and time range. // SHOW TAG VALUES returns all tag values for the default retention policy. - di := e.MetaClient.Database(q.Database) + di := e.MetaClient.Database(mapping.BucketID.String()) if di == nil { return fmt.Errorf("database not found: %s", q.Database) } @@ -1036,7 +565,7 @@ func (e *StatementExecutor) executeShowTagValues(q *influxql.ShowTagValuesStatem // Get all shards for all retention policies. var allGroups []meta.ShardGroupInfo for _, rpi := range di.RetentionPolicies { - sgis, err := e.MetaClient.ShardGroupsByTimeRange(q.Database, rpi.Name, timeRange.MinTime(), timeRange.MaxTime()) + sgis, err := e.MetaClient.ShardGroupsByTimeRange(mapping.BucketID.String(), rpi.Name, timeRange.MinTime(), timeRange.MaxTime()) if err != nil { return err } @@ -1050,9 +579,9 @@ func (e *StatementExecutor) executeShowTagValues(q *influxql.ShowTagValuesStatem } } - tagValues, err := e.TSDBStore.TagValues(ctx.Authorizer, shardIDs, cond) + tagValues, err := e.TSDBStore.TagValues(ectx.Authorizer, shardIDs, cond) if err != nil { - return ctx.Send(&query.Result{Err: err}) + return ectx.Send(ctx, &query.Result{Err: err}) } emitted := false @@ -1086,7 +615,7 @@ func (e *StatementExecutor) executeShowTagValues(q *influxql.ShowTagValuesStatem row.Values[i] = []interface{}{v.Key, v.Value} } - if err := ctx.Send(&query.Result{ + if err := ectx.Send(ctx, &query.Result{ Series: []*models.Row{row}, }); err != nil { return err @@ -1096,179 +625,14 @@ func (e *StatementExecutor) executeShowTagValues(q *influxql.ShowTagValuesStatem // Ensure at least one result is emitted. if !emitted { - return ctx.Send(&query.Result{}) + return ectx.Send(ctx, &query.Result{}) } return nil } -func (e *StatementExecutor) executeShowUsersStatement(q *influxql.ShowUsersStatement) (models.Rows, error) { - row := &models.Row{Columns: []string{"user", "admin"}} - for _, ui := range e.MetaClient.Users() { - row.Values = append(row.Values, []interface{}{ui.Name, ui.Admin}) - } - return []*models.Row{row}, nil -} - -// BufferedPointsWriter adds buffering to a pointsWriter so that SELECT INTO queries -// write their points to the destination in batches. -type BufferedPointsWriter struct { - w pointsWriter - buf []models.Point - database string - retentionPolicy string -} - -// NewBufferedPointsWriter returns a new BufferedPointsWriter. -func NewBufferedPointsWriter(w pointsWriter, database, retentionPolicy string, capacity int) *BufferedPointsWriter { - return &BufferedPointsWriter{ - w: w, - buf: make([]models.Point, 0, capacity), - database: database, - retentionPolicy: retentionPolicy, - } -} - -// WritePointsInto implements pointsWriter for BufferedPointsWriter. -func (w *BufferedPointsWriter) WritePointsInto(req *IntoWriteRequest) error { - // Make sure we're buffering points only for the expected destination. - if req.Database != w.database || req.RetentionPolicy != w.retentionPolicy { - return fmt.Errorf("writer for %s.%s can't write into %s.%s", w.database, w.retentionPolicy, req.Database, req.RetentionPolicy) - } - - for i := 0; i < len(req.Points); { - // Get the available space in the buffer. - avail := cap(w.buf) - len(w.buf) - - // Calculate number of points to copy into the buffer. - n := len(req.Points[i:]) - if n > avail { - n = avail - } - - // Copy points into buffer. - w.buf = append(w.buf, req.Points[i:n+i]...) - - // Advance the index by number of points copied. - i += n - - // If buffer is full, flush points to underlying writer. - if len(w.buf) == cap(w.buf) { - if err := w.Flush(); err != nil { - return err - } - } - } - - return nil -} - -// Flush writes all buffered points to the underlying writer. -func (w *BufferedPointsWriter) Flush() error { - if len(w.buf) == 0 { - return nil - } - - if err := w.w.WritePointsInto(&IntoWriteRequest{ - Database: w.database, - RetentionPolicy: w.retentionPolicy, - Points: w.buf, - }); err != nil { - return err - } - - // Clear the buffer. - w.buf = w.buf[:0] - - return nil -} - -// Len returns the number of points buffered. -func (w *BufferedPointsWriter) Len() int { return len(w.buf) } - -// Cap returns the capacity (in points) of the buffer. -func (w *BufferedPointsWriter) Cap() int { return cap(w.buf) } - -func (e *StatementExecutor) writeInto(w pointsWriter, stmt *influxql.SelectStatement, row *models.Row) (n int64, err error) { - if stmt.Target.Measurement.Database == "" { - return 0, errNoDatabaseInTarget - } - - // It might seem a bit weird that this is where we do this, since we will have to - // convert rows back to points. The Executors (both aggregate and raw) are complex - // enough that changing them to write back to the DB is going to be clumsy - // - // it might seem weird to have the write be in the Executor, but the interweaving of - // limitedRowWriter and ExecuteAggregate/Raw makes it ridiculously hard to make sure that the - // results will be the same as when queried normally. - name := stmt.Target.Measurement.Name - if name == "" { - name = row.Name - } - - points, err := convertRowToPoints(name, row) - if err != nil { - return 0, err - } - - if err := w.WritePointsInto(&IntoWriteRequest{ - Database: stmt.Target.Measurement.Database, - RetentionPolicy: stmt.Target.Measurement.RetentionPolicy, - Points: points, - }); err != nil { - return 0, err - } - - return int64(len(points)), nil -} - -var errNoDatabaseInTarget = errors.New("no database in target") - -// convertRowToPoints will convert a query result Row into Points that can be written back in. -func convertRowToPoints(measurementName string, row *models.Row) ([]models.Point, error) { - // figure out which parts of the result are the time and which are the fields - timeIndex := -1 - fieldIndexes := make(map[string]int) - for i, c := range row.Columns { - if c == "time" { - timeIndex = i - } else { - fieldIndexes[c] = i - } - } - - if timeIndex == -1 { - return nil, errors.New("error finding time index in result") - } - - points := make([]models.Point, 0, len(row.Values)) - for _, v := range row.Values { - vals := make(map[string]interface{}) - for fieldName, fieldIndex := range fieldIndexes { - val := v[fieldIndex] - // Check specifically for nil or a NullFloat. This is because - // the NullFloat represents float numbers that don't have an internal representation - // (like NaN) that cannot be written back, but will not equal nil so there will be - // an attempt to write them if we do not check for it. - if val != nil && val != query.NullFloat { - vals[fieldName] = v[fieldIndex] - } - } - - p, err := models.NewPoint(measurementName, models.NewTags(row.Tags), vals, v[timeIndex].(time.Time)) - if err != nil { - // Drop points that can't be stored - continue - } - - points = append(points, p) - } - - return points, nil -} - // NormalizeStatement adds a default database and policy to the measurements in statement. // Parameter defaultRetentionPolicy can be "". -func (e *StatementExecutor) NormalizeStatement(stmt influxql.Statement, defaultDatabase, defaultRetentionPolicy string) (err error) { +func (e *StatementExecutor) NormalizeStatement(ctx context.Context, stmt influxql.Statement, defaultDatabase, defaultRetentionPolicy string, ectx *query.ExecutionContext) (err error) { influxql.WalkFunc(stmt, func(node influxql.Node) { if err != nil { return @@ -1304,14 +668,14 @@ func (e *StatementExecutor) NormalizeStatement(stmt influxql.Statement, defaultD // DB and RP not supported by these statements so don't rewrite into invalid // statements default: - err = e.normalizeMeasurement(node, defaultDatabase, defaultRetentionPolicy) + err = e.normalizeMeasurement(ctx, node, defaultDatabase, defaultRetentionPolicy, ectx) } } }) return } -func (e *StatementExecutor) normalizeMeasurement(m *influxql.Measurement, defaultDatabase, defaultRetentionPolicy string) error { +func (e *StatementExecutor) normalizeMeasurement(ctx context.Context, m *influxql.Measurement, defaultDatabase, defaultRetentionPolicy string, ectx *query.ExecutionContext) error { // Targets (measurements in an INTO clause) can have blank names, which means it will be // the same as the measurement name it came from in the FROM clause. if !m.IsTarget && m.Name == "" && m.SystemIterator == "" && m.Regex == nil { @@ -1328,52 +692,51 @@ func (e *StatementExecutor) normalizeMeasurement(m *influxql.Measurement, defaul return ErrDatabaseNameRequired } - // Find database. - di := e.MetaClient.Database(m.Database) - if di == nil { - return influxdb.ErrDatabaseNotFound(m.Database) + // TODO(sgc): Validate database; fetch default RP + filter := influxdb.DBRPMappingFilterV2{ + OrgID: &ectx.OrgID, + Database: &m.Database, + } + + res, _, err := e.DBRP.FindMany(ctx, filter) + if err != nil { + return err + } + + if len(res) == 0 { + return query.ErrDatabaseNotFound(m.Database) } // If no retention policy was specified, use the default. if m.RetentionPolicy == "" { if defaultRetentionPolicy != "" { m.RetentionPolicy = defaultRetentionPolicy - } else if di.DefaultRetentionPolicy != "" { - m.RetentionPolicy = di.DefaultRetentionPolicy + } else if rp := mappings(res).DefaultRetentionPolicy(m.Database); rp != "" { + m.RetentionPolicy = rp } else { - return fmt.Errorf("default retention policy not set for: %s", di.Name) + return fmt.Errorf("default retention policy not set for: %s", m.Database) } } + return nil } -// IntoWriteRequest is a partial copy of cluster.WriteRequest -type IntoWriteRequest struct { - Database string - RetentionPolicy string - Points []models.Point +type mappings []*influxdb.DBRPMappingV2 + +func (m mappings) DefaultRetentionPolicy(db string) string { + for _, v := range m { + if v.Database == db && v.Default { + return v.RetentionPolicy + } + } + return "" } // TSDBStore is an interface for accessing the time series data store. type TSDBStore interface { - CreateShard(database, policy string, shardID uint64, enabled bool) error - WriteToShard(shardID uint64, points []models.Point) error - - RestoreShard(id uint64, r io.Reader) error - BackupShard(id uint64, since time.Time, w io.Writer) error - - DeleteDatabase(name string) error - DeleteMeasurement(database, name string) error - DeleteRetentionPolicy(database, name string) error - DeleteSeries(database string, sources []influxql.Source, condition influxql.Expr) error - DeleteShard(id uint64) error - MeasurementNames(auth query.Authorizer, database string, cond influxql.Expr) ([][]byte, error) TagKeys(auth query.Authorizer, shardIDs []uint64, cond influxql.Expr) ([]tsdb.TagKeys, error) TagValues(auth query.Authorizer, shardIDs []uint64, cond influxql.Expr) ([]tsdb.TagValues, error) - - SeriesCardinality(database string) (int64, error) - MeasurementsCardinality(database string) (int64, error) } var _ TSDBStore = LocalTSDBStore{} @@ -1383,20 +746,3 @@ var _ TSDBStore = LocalTSDBStore{} type LocalTSDBStore struct { *tsdb.Store } - -// ShardIteratorCreator is an interface for creating an IteratorCreator to access a specific shard. -type ShardIteratorCreator interface { - ShardIteratorCreator(id uint64) query.IteratorCreator -} - -// joinUint64 returns a comma-delimited string of uint64 numbers. -func joinUint64(a []uint64) string { - var buf bytes.Buffer - for i, x := range a { - buf.WriteString(strconv.FormatUint(x, 10)) - if i < len(a)-1 { - buf.WriteRune(',') - } - } - return buf.String() -}