Merge pull request #17920 from influxdata/chore/merge-master
chore: merge master into algo-w branchpull/17925/head
commit
1db0fcccd1
|
|
@ -275,6 +275,42 @@ jobs:
|
|||
destination: raw-test-output
|
||||
- store_test_results: # Upload test results for display in Test Summary: https://circleci.com/docs/2.0/collect-test-data/
|
||||
path: /tmp/test-results
|
||||
|
||||
lint-feature-flags:
|
||||
docker:
|
||||
- image: circleci/golang:1.13
|
||||
environment:
|
||||
GOCACHE: /tmp/go-cache
|
||||
GOFLAGS: "-mod=readonly -p=2" # Go on Circle thinks 32 CPUs are available, but there aren't.
|
||||
working_directory: /go/src/github.com/influxdata/influxdb
|
||||
steps:
|
||||
- checkout
|
||||
# Populate GOCACHE.
|
||||
- restore_cache:
|
||||
name: Restoring GOCACHE
|
||||
keys:
|
||||
- influxdb-gocache-{{ .Branch }}-{{ .Revision }} # Matches when retrying a single run.
|
||||
- influxdb-gocache-{{ .Branch }}- # Matches a new commit on an existing branch.
|
||||
- influxdb-gocache- # Matches a new branch.
|
||||
# Populate GOPATH/pkg.
|
||||
- restore_cache:
|
||||
name: Restoring GOPATH/pkg/mod
|
||||
keys:
|
||||
- influxdb-gomod-{{ checksum "go.sum" }} # Matches based on go.sum checksum.
|
||||
- run: ./scripts/ci/lint/flags.bash
|
||||
- skip_if_not_master
|
||||
- save_cache:
|
||||
name: Saving GOCACHE
|
||||
key: influxdb-gocache-{{ .Branch }}-{{ .Revision }}
|
||||
paths:
|
||||
- /tmp/go-cache
|
||||
when: always
|
||||
- save_cache:
|
||||
name: Saving GOPATH/pkg/mod
|
||||
key: influxdb-gomod-{{ checksum "go.sum" }}
|
||||
paths:
|
||||
- /go/pkg/mod
|
||||
when: always
|
||||
golint:
|
||||
docker:
|
||||
- image: circleci/golang:1.13
|
||||
|
|
|
|||
|
|
@ -4,6 +4,8 @@
|
|||
|
||||
### Bug Fixes
|
||||
|
||||
1. [17906](https://github.com/influxdata/influxdb/pull/17906): Ensure UpdateUser cleans up the index when updating names
|
||||
|
||||
### UI Improvements
|
||||
|
||||
1. [17860](https://github.com/influxdata/influxdb/pull/17860): Allow bucket creation from the Data Explorer and Cell Editor
|
||||
|
|
@ -12,6 +14,8 @@
|
|||
|
||||
### Features
|
||||
|
||||
1. [17851](https://github.com/influxdata/influxdb/pull/17851): Add feature flag package capability and flags endpoint
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
1. [17618](https://github.com/influxdata/influxdb/pull/17618): Add index for URM by user ID to improve lookup performance
|
||||
|
|
|
|||
6
Makefile
6
Makefile
|
|
@ -199,5 +199,9 @@ protoc:
|
|||
unzip -o -d /go /tmp/protoc.zip
|
||||
chmod +x /go/bin/protoc
|
||||
|
||||
# generate feature flags
|
||||
flags:
|
||||
$(GO_GENERATE) ./kit/feature
|
||||
|
||||
# .PHONY targets represent actions that do not create an actual file.
|
||||
.PHONY: all $(SUBDIRS) run fmt checkfmt tidy checktidy checkgenerate test test-go test-js test-go-race bench clean node_modules vet nightly chronogiraffe dist ping protoc e2e run-e2e influxd
|
||||
.PHONY: all $(SUBDIRS) run fmt checkfmt tidy checktidy checkgenerate test test-go test-js test-go-race bench clean node_modules vet nightly chronogiraffe dist ping protoc e2e run-e2e influxd libflux flags
|
||||
|
|
|
|||
2
auth.go
2
auth.go
|
|
@ -103,7 +103,7 @@ type AuthorizationService interface {
|
|||
CreateAuthorization(ctx context.Context, a *Authorization) error
|
||||
|
||||
// UpdateAuthorization updates the status and description if available.
|
||||
UpdateAuthorization(ctx context.Context, id ID, udp *AuthorizationUpdate) (*Authorization, error)
|
||||
UpdateAuthorization(ctx context.Context, id ID, upd *AuthorizationUpdate) (*Authorization, error)
|
||||
|
||||
// Removes a authorization by token.
|
||||
DeleteAuthorization(ctx context.Context, id ID) error
|
||||
|
|
|
|||
|
|
@ -0,0 +1,66 @@
|
|||
package authorization
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/influxdata/influxdb/v2"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrInvalidAuthID is used when the Authorization's ID cannot be encoded
|
||||
ErrInvalidAuthID = &influxdb.Error{
|
||||
Code: influxdb.EInvalid,
|
||||
Msg: "authorization ID is invalid",
|
||||
}
|
||||
|
||||
// ErrAuthNotFound is used when the specified auth cannot be found
|
||||
ErrAuthNotFound = &influxdb.Error{
|
||||
Code: influxdb.ENotFound,
|
||||
Msg: "authorization not found",
|
||||
}
|
||||
|
||||
// NotUniqueIDError is used when ...
|
||||
NotUniqueIDError = &influxdb.Error{
|
||||
Code: influxdb.EConflict,
|
||||
Msg: "ID already exists",
|
||||
}
|
||||
|
||||
// ErrFailureGeneratingID occurs ony when the random number generator
|
||||
// cannot generate an ID in MaxIDGenerationN times.
|
||||
ErrFailureGeneratingID = &influxdb.Error{
|
||||
Code: influxdb.EInternal,
|
||||
Msg: "unable to generate valid id",
|
||||
}
|
||||
|
||||
// ErrTokenAlreadyExistsError is used when attempting to create an authorization
|
||||
// with a token that already exists
|
||||
ErrTokenAlreadyExistsError = &influxdb.Error{
|
||||
Code: influxdb.EConflict,
|
||||
Msg: fmt.Sprintf("token already exists"),
|
||||
}
|
||||
)
|
||||
|
||||
// ErrInvalidAuthIDError is used when a service was provided an invalid ID.
|
||||
func ErrInvalidAuthIDError(err error) *influxdb.Error {
|
||||
return &influxdb.Error{
|
||||
Code: influxdb.EInvalid,
|
||||
Msg: "auth id provided is invalid",
|
||||
Err: err,
|
||||
}
|
||||
}
|
||||
|
||||
// ErrInternalServiceError is used when the error comes from an internal system.
|
||||
func ErrInternalServiceError(err error) *influxdb.Error {
|
||||
return &influxdb.Error{
|
||||
Code: influxdb.EInternal,
|
||||
Err: err,
|
||||
}
|
||||
}
|
||||
|
||||
// UnexpectedAuthIndexError is used when the error comes from an internal system.
|
||||
func UnexpectedAuthIndexError(err error) *influxdb.Error {
|
||||
return &influxdb.Error{
|
||||
Code: influxdb.EInternal,
|
||||
Msg: fmt.Sprintf("unexpected error retrieving auth index; Err: %v", err),
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,106 @@
|
|||
package authorization
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
|
||||
"github.com/influxdata/influxdb/v2"
|
||||
"github.com/influxdata/influxdb/v2/pkg/httpc"
|
||||
)
|
||||
|
||||
var _ influxdb.AuthorizationService = (*AuthorizationClientService)(nil)
|
||||
|
||||
// AuthorizationClientService connects to Influx via HTTP using tokens to manage authorizations
|
||||
type AuthorizationClientService struct {
|
||||
Client *httpc.Client
|
||||
}
|
||||
|
||||
// CreateAuthorization creates a new authorization and sets b.ID with the new identifier.
|
||||
func (s *AuthorizationClientService) CreateAuthorization(ctx context.Context, a *influxdb.Authorization) error {
|
||||
newAuth, err := newPostAuthorizationRequest(a)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return s.Client.
|
||||
PostJSON(newAuth, prefixAuthorization).
|
||||
DecodeJSON(a).
|
||||
Do(ctx)
|
||||
}
|
||||
|
||||
// FindAuthorizations returns a list of authorizations that match filter and the total count of matching authorizations.
|
||||
// Additional options provide pagination & sorting.
|
||||
func (s *AuthorizationClientService) FindAuthorizations(ctx context.Context, filter influxdb.AuthorizationFilter, opt ...influxdb.FindOptions) ([]*influxdb.Authorization, int, error) {
|
||||
params := influxdb.FindOptionParams(opt...)
|
||||
if filter.ID != nil {
|
||||
params = append(params, [2]string{"id", filter.ID.String()})
|
||||
}
|
||||
if filter.UserID != nil {
|
||||
params = append(params, [2]string{"userID", filter.UserID.String()})
|
||||
}
|
||||
if filter.User != nil {
|
||||
params = append(params, [2]string{"user", *filter.User})
|
||||
}
|
||||
if filter.OrgID != nil {
|
||||
params = append(params, [2]string{"orgID", filter.OrgID.String()})
|
||||
}
|
||||
if filter.Org != nil {
|
||||
params = append(params, [2]string{"org", *filter.Org})
|
||||
}
|
||||
|
||||
var as authsResponse
|
||||
err := s.Client.
|
||||
Get(prefixAuthorization).
|
||||
QueryParams(params...).
|
||||
DecodeJSON(&as).
|
||||
Do(ctx)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
auths := make([]*influxdb.Authorization, 0, len(as.Auths))
|
||||
for _, a := range as.Auths {
|
||||
auths = append(auths, a.toInfluxdb())
|
||||
}
|
||||
|
||||
return auths, len(auths), nil
|
||||
}
|
||||
|
||||
// FindAuthorizationByToken is not supported by the HTTP authorization service.
|
||||
func (s *AuthorizationClientService) FindAuthorizationByToken(ctx context.Context, token string) (*influxdb.Authorization, error) {
|
||||
return nil, errors.New("not supported in HTTP authorization service")
|
||||
}
|
||||
|
||||
// FindAuthorizationByID finds a single Authorization by its ID against a remote influx server.
|
||||
func (s *AuthorizationClientService) FindAuthorizationByID(ctx context.Context, id influxdb.ID) (*influxdb.Authorization, error) {
|
||||
var b influxdb.Authorization
|
||||
err := s.Client.
|
||||
Get(prefixAuthorization, id.String()).
|
||||
DecodeJSON(&b).
|
||||
Do(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &b, nil
|
||||
}
|
||||
|
||||
// UpdateAuthorization updates the status and description if available.
|
||||
func (s *AuthorizationClientService) UpdateAuthorization(ctx context.Context, id influxdb.ID, upd *influxdb.AuthorizationUpdate) (*influxdb.Authorization, error) {
|
||||
var res authResponse
|
||||
err := s.Client.
|
||||
PatchJSON(upd, prefixAuthorization, id.String()).
|
||||
DecodeJSON(&res).
|
||||
Do(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return res.toInfluxdb(), nil
|
||||
}
|
||||
|
||||
// DeleteAuthorization removes a authorization by id.
|
||||
func (s *AuthorizationClientService) DeleteAuthorization(ctx context.Context, id influxdb.ID) error {
|
||||
return s.Client.
|
||||
Delete(prefixAuthorization, id.String()).
|
||||
Do(ctx)
|
||||
}
|
||||
|
|
@ -0,0 +1,579 @@
|
|||
package authorization
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/go-chi/chi"
|
||||
"github.com/go-chi/chi/middleware"
|
||||
"github.com/influxdata/influxdb/v2"
|
||||
kithttp "github.com/influxdata/influxdb/v2/kit/transport/http"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
type AuthHandler struct {
|
||||
chi.Router
|
||||
api *kithttp.API
|
||||
log *zap.Logger
|
||||
authSvc influxdb.AuthorizationService
|
||||
lookupService influxdb.LookupService
|
||||
tenantService influxdb.TenantService
|
||||
}
|
||||
|
||||
// NewHTTPAuthHandler constructs a new http server.
|
||||
func NewHTTPAuthHandler(log *zap.Logger, authService influxdb.AuthorizationService, tenantService influxdb.TenantService, lookupService influxdb.LookupService) *AuthHandler {
|
||||
h := &AuthHandler{
|
||||
api: kithttp.NewAPI(kithttp.WithLog(log)),
|
||||
log: log,
|
||||
authSvc: authService,
|
||||
tenantService: tenantService,
|
||||
lookupService: lookupService,
|
||||
}
|
||||
|
||||
r := chi.NewRouter()
|
||||
r.Use(
|
||||
middleware.Recoverer,
|
||||
middleware.RequestID,
|
||||
middleware.RealIP,
|
||||
)
|
||||
|
||||
r.Route("/", func(r chi.Router) {
|
||||
r.Post("/", h.handlePostAuthorization)
|
||||
r.Get("/", h.handleGetAuthorizations)
|
||||
|
||||
r.Route("/{id}", func(r chi.Router) {
|
||||
r.Get("/", h.handleGetAuthorization)
|
||||
r.Patch("/", h.handleUpdateAuthorization)
|
||||
r.Delete("/", h.handleDeleteAuthorization)
|
||||
})
|
||||
})
|
||||
|
||||
h.Router = r
|
||||
return h
|
||||
}
|
||||
|
||||
const prefixAuthorization = "/api/v2/authorizations"
|
||||
|
||||
func (h *AuthHandler) Prefix() string {
|
||||
return prefixAuthorization
|
||||
}
|
||||
|
||||
// handlePostAuthorization is the HTTP handler for the POST /api/v2/authorizations route.
|
||||
func (h *AuthHandler) handlePostAuthorization(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
a, err := decodePostAuthorizationRequest(ctx, r)
|
||||
if err != nil {
|
||||
h.api.Err(w, err)
|
||||
return
|
||||
}
|
||||
// We can assume we have a User ID because if the request does not provide one, then the authorizer
|
||||
// middleware gets it from the context
|
||||
auth := a.toInfluxdb(*a.UserID)
|
||||
|
||||
if err := h.authSvc.CreateAuthorization(ctx, auth); err != nil {
|
||||
h.api.Err(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
perms, err := newPermissionsResponse(ctx, auth.Permissions, h.lookupService)
|
||||
if err != nil {
|
||||
h.api.Err(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
h.log.Debug("Auth created ", zap.String("auth", fmt.Sprint(auth)))
|
||||
|
||||
resp, err := h.newAuthResponse(ctx, auth, perms)
|
||||
if err != nil {
|
||||
h.api.Err(w, influxdb.ErrUnableToCreateToken)
|
||||
}
|
||||
|
||||
h.api.Respond(w, http.StatusCreated, resp)
|
||||
}
|
||||
|
||||
type postAuthorizationRequest struct {
|
||||
Status influxdb.Status `json:"status"`
|
||||
OrgID influxdb.ID `json:"orgID"`
|
||||
UserID *influxdb.ID `json:"userID,omitempty"`
|
||||
Description string `json:"description"`
|
||||
Permissions []influxdb.Permission `json:"permissions"`
|
||||
}
|
||||
|
||||
type authResponse struct {
|
||||
ID influxdb.ID `json:"id"`
|
||||
Token string `json:"token"`
|
||||
Status influxdb.Status `json:"status"`
|
||||
Description string `json:"description"`
|
||||
OrgID influxdb.ID `json:"orgID"`
|
||||
Org string `json:"org"`
|
||||
UserID influxdb.ID `json:"userID"`
|
||||
User string `json:"user"`
|
||||
Permissions []permissionResponse `json:"permissions"`
|
||||
Links map[string]string `json:"links"`
|
||||
CreatedAt time.Time `json:"createdAt"`
|
||||
UpdatedAt time.Time `json:"updatedAt"`
|
||||
}
|
||||
|
||||
// In the future, we would like only the service layer to look up the user and org to see if they are valid
|
||||
// but for now we need to look up the User and Org here because the API expects the response
|
||||
// to have the names of the Org and User
|
||||
func (h *AuthHandler) newAuthResponse(ctx context.Context, a *influxdb.Authorization, ps []permissionResponse) (*authResponse, error) {
|
||||
org, err := h.tenantService.FindOrganizationByID(ctx, a.OrgID)
|
||||
if err != nil {
|
||||
h.log.Info("Failed to get org", zap.String("handler", "getAuthorizations"), zap.String("orgID", a.OrgID.String()), zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
user, err := h.tenantService.FindUserByID(ctx, a.UserID)
|
||||
if err != nil {
|
||||
h.log.Info("Failed to get user", zap.String("userID", a.UserID.String()), zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
res := &authResponse{
|
||||
ID: a.ID,
|
||||
Token: a.Token,
|
||||
Status: a.Status,
|
||||
Description: a.Description,
|
||||
OrgID: a.OrgID,
|
||||
UserID: a.UserID,
|
||||
User: user.Name,
|
||||
Org: org.Name,
|
||||
Permissions: ps,
|
||||
Links: map[string]string{
|
||||
"self": fmt.Sprintf("/api/v2/authorizations/%s", a.ID),
|
||||
"user": fmt.Sprintf("/api/v2/users/%s", a.UserID),
|
||||
},
|
||||
CreatedAt: a.CreatedAt,
|
||||
UpdatedAt: a.UpdatedAt,
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (p *postAuthorizationRequest) toInfluxdb(userID influxdb.ID) *influxdb.Authorization {
|
||||
return &influxdb.Authorization{
|
||||
OrgID: p.OrgID,
|
||||
Status: p.Status,
|
||||
Description: p.Description,
|
||||
Permissions: p.Permissions,
|
||||
UserID: userID,
|
||||
}
|
||||
}
|
||||
|
||||
func (a *authResponse) toInfluxdb() *influxdb.Authorization {
|
||||
res := &influxdb.Authorization{
|
||||
ID: a.ID,
|
||||
Token: a.Token,
|
||||
Status: a.Status,
|
||||
Description: a.Description,
|
||||
OrgID: a.OrgID,
|
||||
UserID: a.UserID,
|
||||
CRUDLog: influxdb.CRUDLog{
|
||||
CreatedAt: a.CreatedAt,
|
||||
UpdatedAt: a.UpdatedAt,
|
||||
},
|
||||
}
|
||||
for _, p := range a.Permissions {
|
||||
res.Permissions = append(res.Permissions, influxdb.Permission{Action: p.Action, Resource: p.Resource.Resource})
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
type authsResponse struct {
|
||||
Links map[string]string `json:"links"`
|
||||
Auths []*authResponse `json:"authorizations"`
|
||||
}
|
||||
|
||||
func newAuthsResponse(as []*authResponse) *authsResponse {
|
||||
return &authsResponse{
|
||||
// TODO(desa): update links to include paging and filter information
|
||||
Links: map[string]string{
|
||||
"self": "/api/v2/authorizations",
|
||||
},
|
||||
Auths: as,
|
||||
}
|
||||
}
|
||||
|
||||
func newPostAuthorizationRequest(a *influxdb.Authorization) (*postAuthorizationRequest, error) {
|
||||
res := &postAuthorizationRequest{
|
||||
OrgID: a.OrgID,
|
||||
Description: a.Description,
|
||||
Permissions: a.Permissions,
|
||||
Status: a.Status,
|
||||
}
|
||||
|
||||
if a.UserID.Valid() {
|
||||
res.UserID = &a.UserID
|
||||
}
|
||||
|
||||
res.SetDefaults()
|
||||
|
||||
return res, res.Validate()
|
||||
}
|
||||
|
||||
func (p *postAuthorizationRequest) SetDefaults() {
|
||||
if p.Status == "" {
|
||||
p.Status = influxdb.Active
|
||||
}
|
||||
}
|
||||
|
||||
func (p *postAuthorizationRequest) Validate() error {
|
||||
if len(p.Permissions) == 0 {
|
||||
return &influxdb.Error{
|
||||
Code: influxdb.EInvalid,
|
||||
Msg: "authorization must include permissions",
|
||||
}
|
||||
}
|
||||
|
||||
for _, perm := range p.Permissions {
|
||||
if err := perm.Valid(); err != nil {
|
||||
return &influxdb.Error{
|
||||
Err: err,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !p.OrgID.Valid() {
|
||||
return &influxdb.Error{
|
||||
Err: influxdb.ErrInvalidID,
|
||||
Code: influxdb.EInvalid,
|
||||
Msg: "org id required",
|
||||
}
|
||||
}
|
||||
|
||||
if p.Status == "" {
|
||||
p.Status = influxdb.Active
|
||||
}
|
||||
|
||||
err := p.Status.Valid()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type permissionResponse struct {
|
||||
Action influxdb.Action `json:"action"`
|
||||
Resource resourceResponse `json:"resource"`
|
||||
}
|
||||
|
||||
type resourceResponse struct {
|
||||
influxdb.Resource
|
||||
Name string `json:"name,omitempty"`
|
||||
Organization string `json:"org,omitempty"`
|
||||
}
|
||||
|
||||
func newPermissionsResponse(ctx context.Context, ps []influxdb.Permission, svc influxdb.LookupService) ([]permissionResponse, error) {
|
||||
res := make([]permissionResponse, len(ps))
|
||||
for i, p := range ps {
|
||||
res[i] = permissionResponse{
|
||||
Action: p.Action,
|
||||
Resource: resourceResponse{
|
||||
Resource: p.Resource,
|
||||
},
|
||||
}
|
||||
|
||||
if p.Resource.ID != nil {
|
||||
name, err := svc.Name(ctx, p.Resource.Type, *p.Resource.ID)
|
||||
if influxdb.ErrorCode(err) == influxdb.ENotFound {
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
res[i].Resource.Name = name
|
||||
}
|
||||
|
||||
if p.Resource.OrgID != nil {
|
||||
name, err := svc.Name(ctx, influxdb.OrgsResourceType, *p.Resource.OrgID)
|
||||
if influxdb.ErrorCode(err) == influxdb.ENotFound {
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
res[i].Resource.Organization = name
|
||||
}
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func decodePostAuthorizationRequest(ctx context.Context, r *http.Request) (*postAuthorizationRequest, error) {
|
||||
a := &postAuthorizationRequest{}
|
||||
if err := json.NewDecoder(r.Body).Decode(a); err != nil {
|
||||
return nil, &influxdb.Error{
|
||||
Code: influxdb.EInvalid,
|
||||
Msg: "invalid json structure",
|
||||
Err: err,
|
||||
}
|
||||
}
|
||||
|
||||
a.SetDefaults()
|
||||
|
||||
return a, a.Validate()
|
||||
}
|
||||
|
||||
// handleGetAuthorizations is the HTTP handler for the GET /api/v2/authorizations route.
|
||||
func (h *AuthHandler) handleGetAuthorizations(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
req, err := decodeGetAuthorizationsRequest(ctx, r)
|
||||
if err != nil {
|
||||
h.log.Info("Failed to decode request", zap.String("handler", "getAuthorizations"), zap.Error(err))
|
||||
h.api.Err(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
opts := influxdb.FindOptions{}
|
||||
as, _, err := h.authSvc.FindAuthorizations(ctx, req.filter, opts)
|
||||
|
||||
if err != nil {
|
||||
h.api.Err(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
f := req.filter
|
||||
// If the user or org name was provided, look up the ID first
|
||||
if f.User != nil {
|
||||
u, err := h.tenantService.FindUser(ctx, influxdb.UserFilter{Name: f.User})
|
||||
if err != nil {
|
||||
h.api.Err(w, err)
|
||||
return
|
||||
}
|
||||
f.UserID = &u.ID
|
||||
}
|
||||
|
||||
if f.Org != nil {
|
||||
o, err := h.tenantService.FindOrganization(ctx, influxdb.OrganizationFilter{Name: f.Org})
|
||||
if err != nil {
|
||||
h.api.Err(w, err)
|
||||
return
|
||||
}
|
||||
f.OrgID = &o.ID
|
||||
}
|
||||
|
||||
auths := make([]*authResponse, 0, len(as))
|
||||
for _, a := range as {
|
||||
ps, err := newPermissionsResponse(ctx, a.Permissions, h.lookupService)
|
||||
if err != nil {
|
||||
h.api.Err(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
resp, err := h.newAuthResponse(ctx, a, ps)
|
||||
if err != nil {
|
||||
h.log.Info("Failed to create auth response", zap.String("handler", "getAuthorizations"))
|
||||
continue
|
||||
}
|
||||
auths = append(auths, resp)
|
||||
}
|
||||
|
||||
h.log.Debug("Auths retrieved ", zap.String("auths", fmt.Sprint(auths)))
|
||||
|
||||
h.api.Respond(w, http.StatusOK, newAuthsResponse(auths))
|
||||
}
|
||||
|
||||
type getAuthorizationsRequest struct {
|
||||
filter influxdb.AuthorizationFilter
|
||||
}
|
||||
|
||||
func decodeGetAuthorizationsRequest(ctx context.Context, r *http.Request) (*getAuthorizationsRequest, error) {
|
||||
qp := r.URL.Query()
|
||||
|
||||
req := &getAuthorizationsRequest{}
|
||||
|
||||
userID := qp.Get("userID")
|
||||
if userID != "" {
|
||||
id, err := influxdb.IDFromString(userID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req.filter.UserID = id
|
||||
}
|
||||
|
||||
user := qp.Get("user")
|
||||
if user != "" {
|
||||
req.filter.User = &user
|
||||
}
|
||||
|
||||
orgID := qp.Get("orgID")
|
||||
if orgID != "" {
|
||||
id, err := influxdb.IDFromString(orgID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req.filter.OrgID = id
|
||||
}
|
||||
|
||||
org := qp.Get("org")
|
||||
if org != "" {
|
||||
req.filter.Org = &org
|
||||
}
|
||||
|
||||
authID := qp.Get("id")
|
||||
if authID != "" {
|
||||
id, err := influxdb.IDFromString(authID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req.filter.ID = id
|
||||
}
|
||||
|
||||
return req, nil
|
||||
}
|
||||
|
||||
func (h *AuthHandler) handleGetAuthorization(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
id, err := influxdb.IDFromString(chi.URLParam(r, "id"))
|
||||
if err != nil {
|
||||
h.log.Info("Failed to decode request", zap.String("handler", "getAuthorization"), zap.Error(err))
|
||||
h.api.Err(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
a, err := h.authSvc.FindAuthorizationByID(ctx, *id)
|
||||
if err != nil {
|
||||
// Don't log here, it should already be handled by the service
|
||||
h.api.Err(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
ps, err := newPermissionsResponse(ctx, a.Permissions, h.lookupService)
|
||||
if err != nil {
|
||||
h.api.Err(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
h.log.Debug("Auth retrieved ", zap.String("auth", fmt.Sprint(a)))
|
||||
|
||||
resp, err := h.newAuthResponse(ctx, a, ps)
|
||||
if err != nil {
|
||||
h.api.Err(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
h.api.Respond(w, http.StatusOK, resp)
|
||||
}
|
||||
|
||||
// type getAuthorizationRequest struct {
|
||||
// ID influxdb.ID
|
||||
// }
|
||||
|
||||
// func decodeGetAuthorizationRequest(ctx context.Context, r *http.Request) (*getAuthorizationRequest, error) {
|
||||
// params := httprouter.ParamsFromContext(ctx)
|
||||
// id := params.ByName("id")
|
||||
// if id == "" {
|
||||
// return nil, &influxdb.Error{
|
||||
// Code: influxdb.EInvalid,
|
||||
// Msg: "url missing id",
|
||||
// }
|
||||
// }
|
||||
|
||||
// var i influxdb.ID
|
||||
// if err := i.DecodeFromString(id); err != nil {
|
||||
// return nil, err
|
||||
// }
|
||||
|
||||
// return &getAuthorizationRequest{
|
||||
// ID: i,
|
||||
// }, nil
|
||||
// }
|
||||
|
||||
// handleUpdateAuthorization is the HTTP handler for the PATCH /api/v2/authorizations/:id route that updates the authorization's status and desc.
|
||||
func (h *AuthHandler) handleUpdateAuthorization(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
req, err := decodeUpdateAuthorizationRequest(ctx, r)
|
||||
if err != nil {
|
||||
h.log.Info("Failed to decode request", zap.String("handler", "updateAuthorization"), zap.Error(err))
|
||||
h.api.Err(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
a, err := h.authSvc.FindAuthorizationByID(ctx, req.ID)
|
||||
if err != nil {
|
||||
h.api.Err(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
a, err = h.authSvc.UpdateAuthorization(ctx, a.ID, req.AuthorizationUpdate)
|
||||
if err != nil {
|
||||
h.api.Err(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
ps, err := newPermissionsResponse(ctx, a.Permissions, h.lookupService)
|
||||
if err != nil {
|
||||
h.api.Err(w, err)
|
||||
return
|
||||
}
|
||||
h.log.Debug("Auth updated", zap.String("auth", fmt.Sprint(a)))
|
||||
|
||||
resp, err := h.newAuthResponse(ctx, a, ps)
|
||||
if err != nil {
|
||||
h.api.Err(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
h.api.Respond(w, http.StatusOK, resp)
|
||||
}
|
||||
|
||||
type updateAuthorizationRequest struct {
|
||||
ID influxdb.ID
|
||||
*influxdb.AuthorizationUpdate
|
||||
}
|
||||
|
||||
func decodeUpdateAuthorizationRequest(ctx context.Context, r *http.Request) (*updateAuthorizationRequest, error) {
|
||||
id, err := influxdb.IDFromString(chi.URLParam(r, "id"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
upd := &influxdb.AuthorizationUpdate{}
|
||||
if err := json.NewDecoder(r.Body).Decode(upd); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &updateAuthorizationRequest{
|
||||
ID: *id,
|
||||
AuthorizationUpdate: upd,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// handleDeleteAuthorization is the HTTP handler for the DELETE /api/v2/authorizations/:id route.
|
||||
func (h *AuthHandler) handleDeleteAuthorization(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
req, err := decodeDeleteAuthorizationRequest(ctx, r)
|
||||
if err != nil {
|
||||
h.log.Info("Failed to decode request", zap.String("handler", "deleteAuthorization"), zap.Error(err))
|
||||
h.api.Err(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
if err := h.authSvc.DeleteAuthorization(ctx, req.ID); err != nil {
|
||||
// Don't log here, it should already be handled by the service
|
||||
h.api.Err(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
h.log.Debug("Auth deleted", zap.String("authID", fmt.Sprint(req.ID)))
|
||||
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
}
|
||||
|
||||
type deleteAuthorizationRequest struct {
|
||||
ID influxdb.ID
|
||||
}
|
||||
|
||||
// we can clean up and remove these decode functions todo (al)
|
||||
func decodeDeleteAuthorizationRequest(ctx context.Context, r *http.Request) (*deleteAuthorizationRequest, error) {
|
||||
id, err := influxdb.IDFromString(chi.URLParam(r, "id"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &deleteAuthorizationRequest{
|
||||
ID: *id,
|
||||
}, nil
|
||||
}
|
||||
|
|
@ -0,0 +1,89 @@
|
|||
package authorization_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"github.com/go-chi/chi"
|
||||
"github.com/influxdata/influxdb/v2"
|
||||
"github.com/influxdata/influxdb/v2/authorization"
|
||||
ihttp "github.com/influxdata/influxdb/v2/http"
|
||||
"github.com/influxdata/influxdb/v2/inmem"
|
||||
"github.com/influxdata/influxdb/v2/kv"
|
||||
"github.com/influxdata/influxdb/v2/mock"
|
||||
"github.com/influxdata/influxdb/v2/tenant"
|
||||
itesting "github.com/influxdata/influxdb/v2/testing"
|
||||
"go.uber.org/zap/zaptest"
|
||||
)
|
||||
|
||||
func initAuthorizationService(f itesting.AuthorizationFields, t *testing.T) (influxdb.AuthorizationService, string, func()) {
|
||||
t.Helper()
|
||||
|
||||
s, stCloser, err := NewTestInmemStore(t)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
storage, err := authorization.NewStore(s)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// set up tenant service
|
||||
store, err := tenant.NewStore(s)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
ts := tenant.NewService(store)
|
||||
|
||||
ctx := context.Background()
|
||||
svc := authorization.NewService(storage, ts)
|
||||
|
||||
for _, u := range f.Users {
|
||||
if err := ts.CreateUser(ctx, u); err != nil {
|
||||
t.Fatalf("failed to populate users")
|
||||
}
|
||||
}
|
||||
|
||||
for _, o := range f.Orgs {
|
||||
if err := ts.CreateOrganization(ctx, o); err != nil {
|
||||
t.Fatalf("failed to populate orgs")
|
||||
}
|
||||
}
|
||||
|
||||
for _, a := range f.Authorizations {
|
||||
if err := svc.CreateAuthorization(ctx, a); err != nil {
|
||||
t.Fatalf("failed to populate authorizations: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
handler := authorization.NewHTTPAuthHandler(zaptest.NewLogger(t), svc, ts, mock.NewLookupService())
|
||||
r := chi.NewRouter()
|
||||
r.Mount(handler.Prefix(), handler)
|
||||
server := httptest.NewServer(r)
|
||||
|
||||
httpClient, err := ihttp.NewHTTPClient(server.URL, "", false)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
client := authorization.AuthorizationClientService{
|
||||
Client: httpClient,
|
||||
}
|
||||
|
||||
return &client, "http_authorization", func() {
|
||||
server.Close()
|
||||
stCloser()
|
||||
}
|
||||
}
|
||||
|
||||
func NewTestInmemStore(t *testing.T) (kv.Store, func(), error) {
|
||||
return inmem.NewKVStore(), func() {}, nil
|
||||
}
|
||||
|
||||
func TestAuthorizationService(t *testing.T) {
|
||||
t.Parallel()
|
||||
// skip FindByToken test here because this function is not supported by the API
|
||||
itesting.AuthorizationService(initAuthorizationService, t, itesting.WithoutFindByToken())
|
||||
}
|
||||
|
|
@ -0,0 +1,133 @@
|
|||
package authorization
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/influxdata/influxdb/v2"
|
||||
"github.com/influxdata/influxdb/v2/authorizer"
|
||||
icontext "github.com/influxdata/influxdb/v2/context"
|
||||
)
|
||||
|
||||
type AuthedAuthorizationService struct {
|
||||
s influxdb.AuthorizationService
|
||||
ts influxdb.TenantService
|
||||
}
|
||||
|
||||
var _ influxdb.AuthorizationService = (*AuthedAuthorizationService)(nil)
|
||||
|
||||
func NewAuthedAuthorizationService(s influxdb.AuthorizationService, ts influxdb.TenantService) *AuthedAuthorizationService {
|
||||
return &AuthedAuthorizationService{
|
||||
s: s,
|
||||
ts: ts,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *AuthedAuthorizationService) CreateAuthorization(ctx context.Context, a *influxdb.Authorization) error {
|
||||
if a.UserID == 0 {
|
||||
auth, err := icontext.GetAuthorizer(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
user, err := s.ts.FindUserByID(ctx, auth.GetUserID())
|
||||
if err != nil {
|
||||
// if we could not get the user from the Authorization object or the Context,
|
||||
// then we cannot authorize the user
|
||||
return err
|
||||
}
|
||||
a.UserID = user.ID
|
||||
}
|
||||
|
||||
if _, _, err := authorizer.AuthorizeCreate(ctx, influxdb.AuthorizationsResourceType, a.OrgID); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, _, err := authorizer.AuthorizeWriteResource(ctx, influxdb.UsersResourceType, a.UserID); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := authorizer.VerifyPermissions(ctx, a.Permissions); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return s.s.CreateAuthorization(ctx, a)
|
||||
}
|
||||
|
||||
func (s *AuthedAuthorizationService) FindAuthorizationByToken(ctx context.Context, t string) (*influxdb.Authorization, error) {
|
||||
a, err := s.s.FindAuthorizationByToken(ctx, t)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if _, _, err := authorizer.AuthorizeRead(ctx, influxdb.AuthorizationsResourceType, a.ID, a.OrgID); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if _, _, err := authorizer.AuthorizeReadResource(ctx, influxdb.UsersResourceType, a.UserID); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return a, nil
|
||||
}
|
||||
|
||||
func (s *AuthedAuthorizationService) FindAuthorizationByID(ctx context.Context, id influxdb.ID) (*influxdb.Authorization, error) {
|
||||
a, err := s.s.FindAuthorizationByID(ctx, id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if _, _, err := authorizer.AuthorizeRead(ctx, influxdb.AuthorizationsResourceType, a.ID, a.OrgID); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if _, _, err := authorizer.AuthorizeReadResource(ctx, influxdb.UsersResourceType, a.UserID); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return a, nil
|
||||
}
|
||||
|
||||
func (s *AuthedAuthorizationService) FindAuthorizations(ctx context.Context, filter influxdb.AuthorizationFilter, opt ...influxdb.FindOptions) ([]*influxdb.Authorization, int, error) {
|
||||
// TODO: we'll likely want to push this operation into the database eventually since fetching the whole list of data
|
||||
// will likely be expensive.
|
||||
as, _, err := s.s.FindAuthorizations(ctx, filter, opt...)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
return authorizer.AuthorizeFindAuthorizations(ctx, as)
|
||||
}
|
||||
|
||||
func (s *AuthedAuthorizationService) UpdateAuthorization(ctx context.Context, id influxdb.ID, upd *influxdb.AuthorizationUpdate) (*influxdb.Authorization, error) {
|
||||
a, err := s.s.FindAuthorizationByID(ctx, id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if _, _, err := authorizer.AuthorizeWrite(ctx, influxdb.AuthorizationsResourceType, a.ID, a.OrgID); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if _, _, err := authorizer.AuthorizeWriteResource(ctx, influxdb.UsersResourceType, a.UserID); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return s.s.UpdateAuthorization(ctx, id, upd)
|
||||
}
|
||||
|
||||
func (s *AuthedAuthorizationService) DeleteAuthorization(ctx context.Context, id influxdb.ID) error {
|
||||
a, err := s.s.FindAuthorizationByID(ctx, id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, _, err := authorizer.AuthorizeWrite(ctx, influxdb.AuthorizationsResourceType, a.ID, a.OrgID); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, _, err := authorizer.AuthorizeWriteResource(ctx, influxdb.UsersResourceType, a.UserID); err != nil {
|
||||
return err
|
||||
}
|
||||
return s.s.DeleteAuthorization(ctx, id)
|
||||
}
|
||||
|
||||
// VerifyPermissions ensures that an authorization is allowed all of the appropriate permissions.
|
||||
func VerifyPermissions(ctx context.Context, ps []influxdb.Permission) error {
|
||||
for _, p := range ps {
|
||||
if err := authorizer.IsAllowed(ctx, p); err != nil {
|
||||
return &influxdb.Error{
|
||||
Err: err,
|
||||
Msg: fmt.Sprintf("permission %s is not allowed", p),
|
||||
Code: influxdb.EForbidden,
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
@ -0,0 +1,460 @@
|
|||
package authorization_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/influxdata/influxdb/v2"
|
||||
"github.com/influxdata/influxdb/v2/authorization"
|
||||
influxdbcontext "github.com/influxdata/influxdb/v2/context"
|
||||
"github.com/influxdata/influxdb/v2/inmem"
|
||||
"github.com/influxdata/influxdb/v2/mock"
|
||||
"github.com/influxdata/influxdb/v2/tenant"
|
||||
influxdbtesting "github.com/influxdata/influxdb/v2/testing"
|
||||
)
|
||||
|
||||
var authorizationCmpOptions = cmp.Options{
|
||||
cmp.Comparer(func(x, y []byte) bool {
|
||||
return bytes.Equal(x, y)
|
||||
}),
|
||||
cmp.Transformer("Sort", func(in []*influxdb.Authorization) []*influxdb.Authorization {
|
||||
out := append([]*influxdb.Authorization(nil), in...) // Copy input to avoid mutating it
|
||||
sort.Slice(out, func(i, j int) bool {
|
||||
return out[i].ID.String() > out[j].ID.String()
|
||||
})
|
||||
return out
|
||||
}),
|
||||
}
|
||||
|
||||
func TestAuthorizationService_ReadAuthorization(t *testing.T) {
|
||||
type args struct {
|
||||
permissions []influxdb.Permission
|
||||
}
|
||||
type wants struct {
|
||||
err error
|
||||
authorizations []*influxdb.Authorization
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
wants wants
|
||||
}{
|
||||
{
|
||||
name: "authorized to access id",
|
||||
args: args{
|
||||
permissions: []influxdb.Permission{
|
||||
{
|
||||
Action: influxdb.ReadAction,
|
||||
Resource: influxdb.Resource{
|
||||
Type: influxdb.AuthorizationsResourceType,
|
||||
OrgID: influxdbtesting.IDPtr(1),
|
||||
},
|
||||
},
|
||||
{
|
||||
Action: influxdb.ReadAction,
|
||||
Resource: influxdb.Resource{
|
||||
Type: influxdb.UsersResourceType,
|
||||
ID: influxdbtesting.IDPtr(1),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wants: wants{
|
||||
err: nil,
|
||||
authorizations: []*influxdb.Authorization{
|
||||
{
|
||||
ID: 10,
|
||||
UserID: 1,
|
||||
OrgID: 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "unauthorized to access id - wrong org",
|
||||
args: args{
|
||||
permissions: []influxdb.Permission{
|
||||
{
|
||||
Action: influxdb.ReadAction,
|
||||
Resource: influxdb.Resource{
|
||||
Type: influxdb.AuthorizationsResourceType,
|
||||
OrgID: influxdbtesting.IDPtr(2),
|
||||
},
|
||||
},
|
||||
{
|
||||
Action: influxdb.ReadAction,
|
||||
Resource: influxdb.Resource{
|
||||
Type: influxdb.UsersResourceType,
|
||||
ID: influxdbtesting.IDPtr(1),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wants: wants{
|
||||
err: &influxdb.Error{
|
||||
Msg: "read:orgs/0000000000000001/authorizations/000000000000000a is unauthorized",
|
||||
Code: influxdb.EUnauthorized,
|
||||
},
|
||||
authorizations: []*influxdb.Authorization{},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "unauthorized to access id - wrong user",
|
||||
args: args{
|
||||
permissions: []influxdb.Permission{
|
||||
{
|
||||
Action: influxdb.ReadAction,
|
||||
Resource: influxdb.Resource{
|
||||
Type: influxdb.AuthorizationsResourceType,
|
||||
OrgID: influxdbtesting.IDPtr(1),
|
||||
},
|
||||
},
|
||||
{
|
||||
Action: influxdb.ReadAction,
|
||||
Resource: influxdb.Resource{
|
||||
Type: influxdb.UsersResourceType,
|
||||
ID: influxdbtesting.IDPtr(2),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wants: wants{
|
||||
err: &influxdb.Error{
|
||||
Msg: "read:users/0000000000000001 is unauthorized",
|
||||
Code: influxdb.EUnauthorized,
|
||||
},
|
||||
authorizations: []*influxdb.Authorization{},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
m := &mock.AuthorizationService{}
|
||||
m.FindAuthorizationByIDFn = func(ctx context.Context, id influxdb.ID) (*influxdb.Authorization, error) {
|
||||
return &influxdb.Authorization{
|
||||
ID: id,
|
||||
UserID: 1,
|
||||
OrgID: 1,
|
||||
}, nil
|
||||
}
|
||||
m.FindAuthorizationByTokenFn = func(ctx context.Context, t string) (*influxdb.Authorization, error) {
|
||||
return &influxdb.Authorization{
|
||||
ID: 10,
|
||||
UserID: 1,
|
||||
OrgID: 1,
|
||||
}, nil
|
||||
}
|
||||
m.FindAuthorizationsFn = func(ctx context.Context, filter influxdb.AuthorizationFilter, opt ...influxdb.FindOptions) ([]*influxdb.Authorization, int, error) {
|
||||
return []*influxdb.Authorization{
|
||||
{
|
||||
ID: 10,
|
||||
UserID: 1,
|
||||
OrgID: 1,
|
||||
},
|
||||
}, 1, nil
|
||||
}
|
||||
// set up tenant service
|
||||
st := inmem.NewKVStore()
|
||||
store, err := tenant.NewStore(st)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
ts := tenant.NewService(store)
|
||||
s := authorization.NewAuthedAuthorizationService(m, ts)
|
||||
|
||||
ctx := context.Background()
|
||||
ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, tt.args.permissions))
|
||||
|
||||
t.Run("find authorization by id", func(t *testing.T) {
|
||||
_, err := s.FindAuthorizationByID(ctx, 10)
|
||||
influxdbtesting.ErrorsEqual(t, err, tt.wants.err)
|
||||
})
|
||||
t.Run("find authorization by token", func(t *testing.T) {
|
||||
_, err := s.FindAuthorizationByToken(ctx, "10")
|
||||
influxdbtesting.ErrorsEqual(t, err, tt.wants.err)
|
||||
})
|
||||
|
||||
t.Run("find authorizations", func(t *testing.T) {
|
||||
as, _, err := s.FindAuthorizations(ctx, influxdb.AuthorizationFilter{})
|
||||
influxdbtesting.ErrorsEqual(t, err, nil)
|
||||
|
||||
if diff := cmp.Diff(as, tt.wants.authorizations, authorizationCmpOptions...); diff != "" {
|
||||
t.Errorf("authorizations are different -got/+want\ndiff %s", diff)
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestAuthorizationService_WriteAuthorization(t *testing.T) {
|
||||
type args struct {
|
||||
permissions []influxdb.Permission
|
||||
}
|
||||
type wants struct {
|
||||
err error
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
wants wants
|
||||
}{
|
||||
{
|
||||
name: "authorized to write authorization",
|
||||
args: args{
|
||||
permissions: []influxdb.Permission{
|
||||
{
|
||||
Action: influxdb.WriteAction,
|
||||
Resource: influxdb.Resource{
|
||||
Type: influxdb.AuthorizationsResourceType,
|
||||
OrgID: influxdbtesting.IDPtr(1),
|
||||
},
|
||||
},
|
||||
{
|
||||
Action: influxdb.WriteAction,
|
||||
Resource: influxdb.Resource{
|
||||
Type: influxdb.UsersResourceType,
|
||||
ID: influxdbtesting.IDPtr(1),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wants: wants{
|
||||
err: nil,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "unauthorized to write authorization - wrong org",
|
||||
args: args{
|
||||
permissions: []influxdb.Permission{
|
||||
{
|
||||
Action: influxdb.WriteAction,
|
||||
Resource: influxdb.Resource{
|
||||
Type: influxdb.AuthorizationsResourceType,
|
||||
OrgID: influxdbtesting.IDPtr(2),
|
||||
},
|
||||
},
|
||||
{
|
||||
Action: influxdb.WriteAction,
|
||||
Resource: influxdb.Resource{
|
||||
Type: influxdb.UsersResourceType,
|
||||
ID: influxdbtesting.IDPtr(1),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wants: wants{
|
||||
err: &influxdb.Error{
|
||||
Msg: "write:orgs/0000000000000001/authorizations/000000000000000a is unauthorized",
|
||||
Code: influxdb.EUnauthorized,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "unauthorized to write authorization - wrong user",
|
||||
args: args{
|
||||
permissions: []influxdb.Permission{
|
||||
{
|
||||
Action: influxdb.WriteAction,
|
||||
Resource: influxdb.Resource{
|
||||
Type: influxdb.AuthorizationsResourceType,
|
||||
OrgID: influxdbtesting.IDPtr(1),
|
||||
},
|
||||
},
|
||||
{
|
||||
Action: influxdb.WriteAction,
|
||||
Resource: influxdb.Resource{
|
||||
Type: influxdb.UsersResourceType,
|
||||
ID: influxdbtesting.IDPtr(2),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wants: wants{
|
||||
err: &influxdb.Error{
|
||||
Msg: "write:users/0000000000000001 is unauthorized",
|
||||
Code: influxdb.EUnauthorized,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
m := &mock.AuthorizationService{}
|
||||
m.FindAuthorizationByIDFn = func(ctx context.Context, id influxdb.ID) (*influxdb.Authorization, error) {
|
||||
return &influxdb.Authorization{
|
||||
ID: id,
|
||||
UserID: 1,
|
||||
OrgID: 1,
|
||||
}, nil
|
||||
}
|
||||
m.CreateAuthorizationFn = func(ctx context.Context, a *influxdb.Authorization) error {
|
||||
return nil
|
||||
}
|
||||
m.DeleteAuthorizationFn = func(ctx context.Context, id influxdb.ID) error {
|
||||
return nil
|
||||
}
|
||||
m.UpdateAuthorizationFn = func(ctx context.Context, id influxdb.ID, upd *influxdb.AuthorizationUpdate) (*influxdb.Authorization, error) {
|
||||
return nil, nil
|
||||
}
|
||||
// set up tenant service
|
||||
st := inmem.NewKVStore()
|
||||
store, err := tenant.NewStore(st)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
ts := tenant.NewService(store)
|
||||
s := authorization.NewAuthedAuthorizationService(m, ts)
|
||||
|
||||
ctx := context.Background()
|
||||
ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, tt.args.permissions))
|
||||
|
||||
t.Run("update authorization", func(t *testing.T) {
|
||||
_, err := s.UpdateAuthorization(ctx, 10, &influxdb.AuthorizationUpdate{Status: influxdb.Active.Ptr()})
|
||||
influxdbtesting.ErrorsEqual(t, err, tt.wants.err)
|
||||
})
|
||||
|
||||
t.Run("delete authorization", func(t *testing.T) {
|
||||
err := s.DeleteAuthorization(ctx, 10)
|
||||
influxdbtesting.ErrorsEqual(t, err, tt.wants.err)
|
||||
})
|
||||
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestAuthorizationService_CreateAuthorization(t *testing.T) {
|
||||
type args struct {
|
||||
permissions []influxdb.Permission
|
||||
}
|
||||
type wants struct {
|
||||
err error
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
wants wants
|
||||
}{
|
||||
{
|
||||
name: "authorized to write authorization",
|
||||
args: args{
|
||||
permissions: []influxdb.Permission{
|
||||
{
|
||||
Action: influxdb.WriteAction,
|
||||
Resource: influxdb.Resource{
|
||||
Type: influxdb.AuthorizationsResourceType,
|
||||
OrgID: influxdbtesting.IDPtr(1),
|
||||
},
|
||||
},
|
||||
{
|
||||
Action: influxdb.WriteAction,
|
||||
Resource: influxdb.Resource{
|
||||
Type: influxdb.UsersResourceType,
|
||||
ID: influxdbtesting.IDPtr(1),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wants: wants{
|
||||
err: nil,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "unauthorized to write authorization - wrong org",
|
||||
args: args{
|
||||
permissions: []influxdb.Permission{
|
||||
{
|
||||
Action: influxdb.WriteAction,
|
||||
Resource: influxdb.Resource{
|
||||
Type: influxdb.AuthorizationsResourceType,
|
||||
OrgID: influxdbtesting.IDPtr(2),
|
||||
},
|
||||
},
|
||||
{
|
||||
Action: influxdb.WriteAction,
|
||||
Resource: influxdb.Resource{
|
||||
Type: influxdb.UsersResourceType,
|
||||
ID: influxdbtesting.IDPtr(1),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wants: wants{
|
||||
err: &influxdb.Error{
|
||||
Msg: "write:orgs/0000000000000001/authorizations is unauthorized",
|
||||
Code: influxdb.EUnauthorized,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "unauthorized to write authorization - wrong user",
|
||||
args: args{
|
||||
permissions: []influxdb.Permission{
|
||||
{
|
||||
Action: influxdb.WriteAction,
|
||||
Resource: influxdb.Resource{
|
||||
Type: influxdb.AuthorizationsResourceType,
|
||||
OrgID: influxdbtesting.IDPtr(1),
|
||||
},
|
||||
},
|
||||
{
|
||||
Action: influxdb.WriteAction,
|
||||
Resource: influxdb.Resource{
|
||||
Type: influxdb.UsersResourceType,
|
||||
ID: influxdbtesting.IDPtr(2),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wants: wants{
|
||||
err: &influxdb.Error{
|
||||
Msg: "write:users/0000000000000001 is unauthorized",
|
||||
Code: influxdb.EUnauthorized,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
m := &mock.AuthorizationService{}
|
||||
m.FindAuthorizationByIDFn = func(ctx context.Context, id influxdb.ID) (*influxdb.Authorization, error) {
|
||||
return &influxdb.Authorization{
|
||||
ID: id,
|
||||
UserID: 1,
|
||||
OrgID: 1,
|
||||
}, nil
|
||||
}
|
||||
m.CreateAuthorizationFn = func(ctx context.Context, a *influxdb.Authorization) error {
|
||||
return nil
|
||||
}
|
||||
m.DeleteAuthorizationFn = func(ctx context.Context, id influxdb.ID) error {
|
||||
return nil
|
||||
}
|
||||
m.UpdateAuthorizationFn = func(ctx context.Context, id influxdb.ID, upd *influxdb.AuthorizationUpdate) (*influxdb.Authorization, error) {
|
||||
return nil, nil
|
||||
}
|
||||
// set up tenant service
|
||||
st := inmem.NewKVStore()
|
||||
store, err := tenant.NewStore(st)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
ts := tenant.NewService(store)
|
||||
s := authorization.NewAuthedAuthorizationService(m, ts)
|
||||
|
||||
ctx := context.Background()
|
||||
ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, tt.args.permissions))
|
||||
|
||||
err = s.CreateAuthorization(ctx, &influxdb.Authorization{OrgID: 1, UserID: 1})
|
||||
influxdbtesting.ErrorsEqual(t, err, tt.wants.err)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,100 @@
|
|||
package authorization
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/influxdb/v2"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
type AuthLogger struct {
|
||||
logger *zap.Logger
|
||||
authService influxdb.AuthorizationService
|
||||
}
|
||||
|
||||
// NewAuthLogger returns a logging service middleware for the Authorization Service.
|
||||
func NewAuthLogger(log *zap.Logger, s influxdb.AuthorizationService) *AuthLogger {
|
||||
return &AuthLogger{
|
||||
logger: log,
|
||||
authService: s,
|
||||
}
|
||||
}
|
||||
|
||||
var _ influxdb.AuthorizationService = (*AuthLogger)(nil)
|
||||
|
||||
func (l *AuthLogger) CreateAuthorization(ctx context.Context, a *influxdb.Authorization) (err error) {
|
||||
defer func(start time.Time) {
|
||||
dur := zap.Duration("took", time.Since(start))
|
||||
if err != nil {
|
||||
l.logger.Error("failed to create authorization", zap.Error(err), dur)
|
||||
return
|
||||
}
|
||||
l.logger.Debug("authorization create", dur)
|
||||
}(time.Now())
|
||||
return l.authService.CreateAuthorization(ctx, a)
|
||||
}
|
||||
|
||||
func (l *AuthLogger) FindAuthorizationByID(ctx context.Context, id influxdb.ID) (a *influxdb.Authorization, err error) {
|
||||
defer func(start time.Time) {
|
||||
dur := zap.Duration("took", time.Since(start))
|
||||
if err != nil {
|
||||
msg := fmt.Sprintf("failed to find authorization with ID %v", id)
|
||||
l.logger.Error(msg, zap.Error(err), dur)
|
||||
return
|
||||
}
|
||||
l.logger.Debug("auth find by ID", dur)
|
||||
}(time.Now())
|
||||
return l.authService.FindAuthorizationByID(ctx, id)
|
||||
}
|
||||
|
||||
func (l *AuthLogger) FindAuthorizationByToken(ctx context.Context, t string) (a *influxdb.Authorization, err error) {
|
||||
defer func(start time.Time) {
|
||||
dur := zap.Duration("took", time.Since(start))
|
||||
if err != nil {
|
||||
l.logger.Error("failed to find authorization with token", zap.Error(err), dur)
|
||||
return
|
||||
}
|
||||
l.logger.Debug("auth find", dur)
|
||||
|
||||
}(time.Now())
|
||||
return l.authService.FindAuthorizationByToken(ctx, t)
|
||||
}
|
||||
|
||||
func (l *AuthLogger) FindAuthorizations(ctx context.Context, filter influxdb.AuthorizationFilter, opt ...influxdb.FindOptions) (as []*influxdb.Authorization, count int, err error) {
|
||||
defer func(start time.Time) {
|
||||
dur := zap.Duration("took", time.Since(start))
|
||||
if err != nil {
|
||||
l.logger.Error("failed to find authorizations matching the given filter", zap.Error(err), dur)
|
||||
return
|
||||
}
|
||||
l.logger.Debug("authorizations find", dur)
|
||||
}(time.Now())
|
||||
return l.authService.FindAuthorizations(ctx, filter)
|
||||
}
|
||||
|
||||
func (l *AuthLogger) UpdateAuthorization(ctx context.Context, id influxdb.ID, upd *influxdb.AuthorizationUpdate) (a *influxdb.Authorization, err error) {
|
||||
defer func(start time.Time) {
|
||||
dur := zap.Duration("took", time.Since(start))
|
||||
if err != nil {
|
||||
l.logger.Error("failed to update authorization", zap.Error(err), dur)
|
||||
return
|
||||
}
|
||||
l.logger.Debug("authorizationauthorization update", dur)
|
||||
}(time.Now())
|
||||
return l.authService.UpdateAuthorization(ctx, id, upd)
|
||||
}
|
||||
|
||||
func (l *AuthLogger) DeleteAuthorization(ctx context.Context, id influxdb.ID) (err error) {
|
||||
defer func(start time.Time) {
|
||||
dur := zap.Duration("took", time.Since(start))
|
||||
if err != nil {
|
||||
msg := fmt.Sprintf("failed to delete authorization with ID %v", id)
|
||||
l.logger.Error(msg, zap.Error(err), dur)
|
||||
return
|
||||
}
|
||||
l.logger.Debug("authorization delete", dur)
|
||||
}(time.Now())
|
||||
return l.authService.DeleteAuthorization(ctx, id)
|
||||
}
|
||||
|
|
@ -0,0 +1,95 @@
|
|||
package authorization
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/influxdata/influxdb/v2"
|
||||
"github.com/influxdata/influxdb/v2/kit/metric"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
type AuthMetrics struct {
|
||||
// RED metrics
|
||||
rec *metric.REDClient
|
||||
|
||||
authService influxdb.AuthorizationService
|
||||
}
|
||||
|
||||
var _ influxdb.AuthorizationService = (*AuthMetrics)(nil)
|
||||
|
||||
func NewAuthMetrics(reg prometheus.Registerer, s influxdb.AuthorizationService, opts ...MetricsOption) *AuthMetrics {
|
||||
o := applyOpts(opts...)
|
||||
return &AuthMetrics{
|
||||
rec: metric.New(reg, o.applySuffix("token")),
|
||||
authService: s,
|
||||
}
|
||||
}
|
||||
|
||||
func (m *AuthMetrics) CreateAuthorization(ctx context.Context, a *influxdb.Authorization) error {
|
||||
rec := m.rec.Record("create_authorization")
|
||||
err := m.authService.CreateAuthorization(ctx, a)
|
||||
return rec(err)
|
||||
}
|
||||
|
||||
func (m *AuthMetrics) FindAuthorizationByID(ctx context.Context, id influxdb.ID) (*influxdb.Authorization, error) {
|
||||
rec := m.rec.Record("find_authorization_by_id")
|
||||
a, err := m.authService.FindAuthorizationByID(ctx, id)
|
||||
return a, rec(err)
|
||||
}
|
||||
func (m *AuthMetrics) FindAuthorizationByToken(ctx context.Context, t string) (*influxdb.Authorization, error) {
|
||||
rec := m.rec.Record("find_authorization_by_token")
|
||||
a, err := m.authService.FindAuthorizationByToken(ctx, t)
|
||||
return a, rec(err)
|
||||
}
|
||||
func (m *AuthMetrics) FindAuthorizations(ctx context.Context, filter influxdb.AuthorizationFilter, opt ...influxdb.FindOptions) ([]*influxdb.Authorization, int, error) {
|
||||
rec := m.rec.Record("find_authorization_by_token")
|
||||
a, n, err := m.authService.FindAuthorizations(ctx, filter, opt...)
|
||||
return a, n, rec(err)
|
||||
}
|
||||
|
||||
func (m *AuthMetrics) UpdateAuthorization(ctx context.Context, id influxdb.ID, upd *influxdb.AuthorizationUpdate) (*influxdb.Authorization, error) {
|
||||
rec := m.rec.Record("update_authorization")
|
||||
a, err := m.authService.UpdateAuthorization(ctx, id, upd)
|
||||
return a, rec(err)
|
||||
}
|
||||
|
||||
func (m *AuthMetrics) DeleteAuthorization(ctx context.Context, id influxdb.ID) error {
|
||||
rec := m.rec.Record("delete_authorization")
|
||||
err := m.authService.DeleteAuthorization(ctx, id)
|
||||
return rec(err)
|
||||
}
|
||||
|
||||
// Metrics options
|
||||
type metricOpts struct {
|
||||
serviceSuffix string
|
||||
}
|
||||
|
||||
func defaultOpts() *metricOpts {
|
||||
return &metricOpts{}
|
||||
}
|
||||
|
||||
func (o *metricOpts) applySuffix(prefix string) string {
|
||||
if o.serviceSuffix != "" {
|
||||
return fmt.Sprintf("%s_%s", prefix, o.serviceSuffix)
|
||||
}
|
||||
return prefix
|
||||
}
|
||||
|
||||
// MetricsOption is an option used by a metric middleware.
|
||||
type MetricsOption func(*metricOpts)
|
||||
|
||||
// WithSuffix returns a metric option that applies a suffix to the service name of the metric.
|
||||
func WithSuffix(suffix string) MetricsOption {
|
||||
return func(opts *metricOpts) {
|
||||
opts.serviceSuffix = suffix
|
||||
}
|
||||
}
|
||||
|
||||
func applyOpts(opts ...MetricsOption) *metricOpts {
|
||||
o := defaultOpts()
|
||||
for _, opt := range opts {
|
||||
opt(o)
|
||||
}
|
||||
return o
|
||||
}
|
||||
|
|
@ -0,0 +1,216 @@
|
|||
package authorization
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/influxdb/v2"
|
||||
"github.com/influxdata/influxdb/v2/kv"
|
||||
"github.com/influxdata/influxdb/v2/rand"
|
||||
)
|
||||
|
||||
var _ influxdb.AuthorizationService = (*Service)(nil)
|
||||
|
||||
type Service struct {
|
||||
store *Store
|
||||
tokenGenerator influxdb.TokenGenerator
|
||||
tenantService influxdb.TenantService
|
||||
}
|
||||
|
||||
func NewService(st *Store, ts influxdb.TenantService) influxdb.AuthorizationService {
|
||||
return &Service{
|
||||
store: st,
|
||||
tokenGenerator: rand.NewTokenGenerator(64),
|
||||
tenantService: ts,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) CreateAuthorization(ctx context.Context, a *influxdb.Authorization) error {
|
||||
if err := a.Valid(); err != nil {
|
||||
return &influxdb.Error{
|
||||
Err: err,
|
||||
}
|
||||
}
|
||||
|
||||
if _, err := s.tenantService.FindUserByID(ctx, a.UserID); err != nil {
|
||||
return influxdb.ErrUnableToCreateToken
|
||||
}
|
||||
|
||||
if _, err := s.tenantService.FindOrganizationByID(ctx, a.OrgID); err != nil {
|
||||
return influxdb.ErrUnableToCreateToken
|
||||
}
|
||||
|
||||
err := s.store.View(ctx, func(tx kv.Tx) error {
|
||||
if err := s.store.uniqueAuthToken(ctx, tx, a); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return ErrTokenAlreadyExistsError
|
||||
}
|
||||
|
||||
if a.Token == "" {
|
||||
token, err := s.tokenGenerator.Token()
|
||||
if err != nil {
|
||||
return &influxdb.Error{
|
||||
Err: err,
|
||||
}
|
||||
}
|
||||
a.Token = token
|
||||
}
|
||||
|
||||
now := time.Now()
|
||||
a.SetCreatedAt(now)
|
||||
a.SetUpdatedAt(now)
|
||||
|
||||
return s.store.Update(ctx, func(tx kv.Tx) error {
|
||||
return s.store.CreateAuthorization(ctx, tx, a)
|
||||
})
|
||||
}
|
||||
|
||||
func (s *Service) FindAuthorizationByID(ctx context.Context, id influxdb.ID) (*influxdb.Authorization, error) {
|
||||
var a *influxdb.Authorization
|
||||
err := s.store.View(ctx, func(tx kv.Tx) error {
|
||||
auth, err := s.store.GetAuthorizationByID(ctx, tx, id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
a = auth
|
||||
return nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return a, nil
|
||||
}
|
||||
|
||||
// FindAuthorizationByToken returns a authorization by token for a particular authorization.
|
||||
func (s *Service) FindAuthorizationByToken(ctx context.Context, n string) (*influxdb.Authorization, error) {
|
||||
var a *influxdb.Authorization
|
||||
err := s.store.View(ctx, func(tx kv.Tx) error {
|
||||
auth, err := s.store.GetAuthorizationByToken(ctx, tx, n)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
a = auth
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return a, nil
|
||||
}
|
||||
|
||||
// FindAuthorizations retrives all authorizations that match an arbitrary authorization filter.
|
||||
// Filters using ID, or Token should be efficient.
|
||||
// Other filters will do a linear scan across all authorizations searching for a match.
|
||||
func (s *Service) FindAuthorizations(ctx context.Context, filter influxdb.AuthorizationFilter, opt ...influxdb.FindOptions) ([]*influxdb.Authorization, int, error) {
|
||||
if filter.ID != nil {
|
||||
var auth *influxdb.Authorization
|
||||
err := s.store.View(ctx, func(tx kv.Tx) error {
|
||||
a, e := s.store.GetAuthorizationByID(ctx, tx, *filter.ID)
|
||||
if e != nil {
|
||||
return e
|
||||
}
|
||||
auth = a
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, 0, &influxdb.Error{
|
||||
Err: err,
|
||||
}
|
||||
}
|
||||
|
||||
return []*influxdb.Authorization{auth}, 1, nil
|
||||
}
|
||||
|
||||
if filter.Token != nil {
|
||||
var auth *influxdb.Authorization
|
||||
err := s.store.View(ctx, func(tx kv.Tx) error {
|
||||
a, e := s.store.GetAuthorizationByToken(ctx, tx, *filter.Token)
|
||||
if e != nil {
|
||||
return e
|
||||
}
|
||||
auth = a
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, 0, &influxdb.Error{
|
||||
Err: err,
|
||||
}
|
||||
}
|
||||
|
||||
return []*influxdb.Authorization{auth}, 1, nil
|
||||
}
|
||||
|
||||
as := []*influxdb.Authorization{}
|
||||
err := s.store.View(ctx, func(tx kv.Tx) error {
|
||||
auths, err := s.store.ListAuthorizations(ctx, tx, filter)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
as = auths
|
||||
return nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, 0, &influxdb.Error{
|
||||
Err: err,
|
||||
}
|
||||
}
|
||||
|
||||
return as, len(as), nil
|
||||
}
|
||||
|
||||
// UpdateAuthorization updates the status and description if available.
|
||||
func (s *Service) UpdateAuthorization(ctx context.Context, id influxdb.ID, upd *influxdb.AuthorizationUpdate) (*influxdb.Authorization, error) {
|
||||
var auth *influxdb.Authorization
|
||||
err := s.store.View(ctx, func(tx kv.Tx) error {
|
||||
a, e := s.store.GetAuthorizationByID(ctx, tx, id)
|
||||
if e != nil {
|
||||
return e
|
||||
}
|
||||
auth = a
|
||||
return nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, &influxdb.Error{
|
||||
Code: influxdb.ENotFound,
|
||||
Err: err,
|
||||
}
|
||||
}
|
||||
|
||||
if upd.Status != nil {
|
||||
auth.Status = *upd.Status
|
||||
}
|
||||
if upd.Description != nil {
|
||||
auth.Description = *upd.Description
|
||||
}
|
||||
|
||||
auth.SetUpdatedAt(time.Now())
|
||||
|
||||
err = s.store.Update(ctx, func(tx kv.Tx) error {
|
||||
a, e := s.store.UpdateAuthorization(ctx, tx, id, auth)
|
||||
if e != nil {
|
||||
return e
|
||||
}
|
||||
auth = a
|
||||
return nil
|
||||
})
|
||||
return auth, err
|
||||
}
|
||||
|
||||
func (s *Service) DeleteAuthorization(ctx context.Context, id influxdb.ID) error {
|
||||
return s.store.Update(ctx, func(tx kv.Tx) (err error) {
|
||||
return s.store.DeleteAuthorization(ctx, tx, id)
|
||||
})
|
||||
}
|
||||
|
|
@ -0,0 +1,95 @@
|
|||
package authorization_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdata/influxdb/v2"
|
||||
"github.com/influxdata/influxdb/v2/authorization"
|
||||
"github.com/influxdata/influxdb/v2/bolt"
|
||||
"github.com/influxdata/influxdb/v2/kv"
|
||||
"github.com/influxdata/influxdb/v2/tenant"
|
||||
influxdbtesting "github.com/influxdata/influxdb/v2/testing"
|
||||
"go.uber.org/zap/zaptest"
|
||||
)
|
||||
|
||||
func initBoltAuthService(f influxdbtesting.AuthorizationFields, t *testing.T) (influxdb.AuthorizationService, string, func()) {
|
||||
s, closeBolt, err := NewTestBoltStore(t)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create new kv store: %v", err)
|
||||
}
|
||||
|
||||
svc, closeSvc := initAuthService(s, f, t)
|
||||
return svc, "service_auth", func() {
|
||||
closeSvc()
|
||||
closeBolt()
|
||||
}
|
||||
}
|
||||
|
||||
func initAuthService(s kv.Store, f influxdbtesting.AuthorizationFields, t *testing.T) (influxdb.AuthorizationService, func()) {
|
||||
st, err := tenant.NewStore(s)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
ts := tenant.NewService(st)
|
||||
storage, err := authorization.NewStore(s)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
svc := authorization.NewService(storage, ts)
|
||||
|
||||
for _, u := range f.Users {
|
||||
if err := ts.CreateUser(context.Background(), u); err != nil {
|
||||
t.Fatalf("error populating users: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
for _, o := range f.Orgs {
|
||||
if err := ts.CreateOrganization(context.Background(), o); err != nil {
|
||||
t.Fatalf("failed to populate organizations: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
for _, m := range f.Authorizations {
|
||||
if err := svc.CreateAuthorization(context.Background(), m); err != nil {
|
||||
t.Fatalf("failed to populate authorizations: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
return svc, func() {
|
||||
for _, m := range f.Authorizations {
|
||||
if err := svc.DeleteAuthorization(context.Background(), m.ID); err != nil {
|
||||
t.Logf("failed to remove user resource mapping: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func NewTestBoltStore(t *testing.T) (kv.Store, func(), error) {
|
||||
f, err := ioutil.TempFile("", "influxdata-bolt-")
|
||||
if err != nil {
|
||||
return nil, nil, errors.New("unable to open temporary boltdb file")
|
||||
}
|
||||
f.Close()
|
||||
|
||||
path := f.Name()
|
||||
s := bolt.NewKVStore(zaptest.NewLogger(t), path)
|
||||
if err := s.Open(context.Background()); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
close := func() {
|
||||
s.Close()
|
||||
os.Remove(path)
|
||||
}
|
||||
|
||||
return s, close, nil
|
||||
}
|
||||
|
||||
func TestBoltAuthService(t *testing.T) {
|
||||
t.Parallel()
|
||||
influxdbtesting.AuthorizationService(initBoltAuthService, t)
|
||||
}
|
||||
|
|
@ -0,0 +1,106 @@
|
|||
package authorization
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/influxdata/influxdb/v2"
|
||||
"github.com/influxdata/influxdb/v2/kit/tracing"
|
||||
"github.com/influxdata/influxdb/v2/kv"
|
||||
"github.com/influxdata/influxdb/v2/snowflake"
|
||||
)
|
||||
|
||||
const MaxIDGenerationN = 100
|
||||
const ReservedIDs = 1000
|
||||
|
||||
var (
|
||||
authBucket = []byte("authorizationsv1")
|
||||
authIndex = []byte("authorizationindexv1")
|
||||
)
|
||||
|
||||
type Store struct {
|
||||
kvStore kv.Store
|
||||
IDGen influxdb.IDGenerator
|
||||
}
|
||||
|
||||
func NewStore(kvStore kv.Store) (*Store, error) {
|
||||
st := &Store{
|
||||
kvStore: kvStore,
|
||||
IDGen: snowflake.NewDefaultIDGenerator(),
|
||||
}
|
||||
return st, st.setup()
|
||||
}
|
||||
|
||||
// View opens up a transaction that will not write to any data. Implementing interfaces
|
||||
// should take care to ensure that all view transactions do not mutate any data.
|
||||
func (s *Store) View(ctx context.Context, fn func(kv.Tx) error) error {
|
||||
return s.kvStore.View(ctx, fn)
|
||||
}
|
||||
|
||||
// Update opens up a transaction that will mutate data.
|
||||
func (s *Store) Update(ctx context.Context, fn func(kv.Tx) error) error {
|
||||
return s.kvStore.Update(ctx, fn)
|
||||
}
|
||||
|
||||
func (s *Store) setup() error {
|
||||
return s.Update(context.Background(), func(tx kv.Tx) error {
|
||||
if _, err := tx.Bucket(authBucket); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := authIndexBucket(tx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// generateSafeID attempts to create ids for buckets
|
||||
// and orgs that are without backslash, commas, and spaces, BUT ALSO do not already exist.
|
||||
func (s *Store) generateSafeID(ctx context.Context, tx kv.Tx, bucket []byte) (influxdb.ID, error) {
|
||||
for i := 0; i < MaxIDGenerationN; i++ {
|
||||
id := s.IDGen.ID()
|
||||
|
||||
// TODO: this is probably unnecessary but for testing we need to keep it in.
|
||||
// After KV is cleaned out we can update the tests and remove this.
|
||||
if id < ReservedIDs {
|
||||
continue
|
||||
}
|
||||
|
||||
err := s.uniqueID(ctx, tx, bucket, id)
|
||||
if err == nil {
|
||||
return id, nil
|
||||
}
|
||||
|
||||
if err == NotUniqueIDError {
|
||||
continue
|
||||
}
|
||||
|
||||
return influxdb.InvalidID(), err
|
||||
}
|
||||
return influxdb.InvalidID(), ErrFailureGeneratingID
|
||||
}
|
||||
|
||||
func (s *Store) uniqueID(ctx context.Context, tx kv.Tx, bucket []byte, id influxdb.ID) error {
|
||||
span, _ := tracing.StartSpanFromContext(ctx)
|
||||
defer span.Finish()
|
||||
|
||||
encodedID, err := id.Encode()
|
||||
if err != nil {
|
||||
return &influxdb.Error{
|
||||
Code: influxdb.EInvalid,
|
||||
Err: err,
|
||||
}
|
||||
}
|
||||
|
||||
b, err := tx.Bucket(bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = b.Get(encodedID)
|
||||
if kv.IsNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
|
||||
return NotUniqueIDError
|
||||
}
|
||||
|
|
@ -0,0 +1,446 @@
|
|||
package authorization
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
|
||||
"github.com/buger/jsonparser"
|
||||
influxdb "github.com/influxdata/influxdb/v2"
|
||||
"github.com/influxdata/influxdb/v2/kv"
|
||||
jsonp "github.com/influxdata/influxdb/v2/pkg/jsonparser"
|
||||
)
|
||||
|
||||
func authIndexKey(n string) []byte {
|
||||
return []byte(n)
|
||||
}
|
||||
|
||||
func authIndexBucket(tx kv.Tx) (kv.Bucket, error) {
|
||||
b, err := tx.Bucket([]byte(authIndex))
|
||||
if err != nil {
|
||||
return nil, UnexpectedAuthIndexError(err)
|
||||
}
|
||||
|
||||
return b, nil
|
||||
}
|
||||
|
||||
func encodeAuthorization(a *influxdb.Authorization) ([]byte, error) {
|
||||
switch a.Status {
|
||||
case influxdb.Active, influxdb.Inactive:
|
||||
case "":
|
||||
a.Status = influxdb.Active
|
||||
default:
|
||||
return nil, &influxdb.Error{
|
||||
Code: influxdb.EInvalid,
|
||||
Msg: "unknown authorization status",
|
||||
}
|
||||
}
|
||||
|
||||
return json.Marshal(a)
|
||||
}
|
||||
|
||||
func decodeAuthorization(b []byte, a *influxdb.Authorization) error {
|
||||
if err := json.Unmarshal(b, a); err != nil {
|
||||
return err
|
||||
}
|
||||
if a.Status == "" {
|
||||
a.Status = influxdb.Active
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CreateAuthorization takes an Authorization object and saves it in storage using its token
|
||||
// using its token property as an index
|
||||
func (s *Store) CreateAuthorization(ctx context.Context, tx kv.Tx, a *influxdb.Authorization) error {
|
||||
// if the provided ID is invalid, or already maps to an existing Auth, then generate a new one
|
||||
if !a.ID.Valid() {
|
||||
id, err := s.generateSafeID(ctx, tx, authBucket)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
a.ID = id
|
||||
} else if err := uniqueID(ctx, tx, a.ID); err != nil {
|
||||
id, err := s.generateSafeID(ctx, tx, authBucket)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
a.ID = id
|
||||
}
|
||||
|
||||
if err := s.uniqueAuthToken(ctx, tx, a); err != nil {
|
||||
return ErrTokenAlreadyExistsError
|
||||
}
|
||||
|
||||
v, err := encodeAuthorization(a)
|
||||
if err != nil {
|
||||
return &influxdb.Error{
|
||||
Code: influxdb.EInvalid,
|
||||
Err: err,
|
||||
}
|
||||
}
|
||||
|
||||
encodedID, err := a.ID.Encode()
|
||||
if err != nil {
|
||||
return ErrInvalidAuthIDError(err)
|
||||
}
|
||||
|
||||
idx, err := authIndexBucket(tx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := idx.Put(authIndexKey(a.Token), encodedID); err != nil {
|
||||
return &influxdb.Error{
|
||||
Code: influxdb.EInternal,
|
||||
Err: err,
|
||||
}
|
||||
}
|
||||
|
||||
b, err := tx.Bucket(authBucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := b.Put(encodedID, v); err != nil {
|
||||
return &influxdb.Error{
|
||||
Err: err,
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetAuthorization gets an authorization by its ID from the auth bucket in kv
|
||||
func (s *Store) GetAuthorizationByID(ctx context.Context, tx kv.Tx, id influxdb.ID) (*influxdb.Authorization, error) {
|
||||
encodedID, err := id.Encode()
|
||||
if err != nil {
|
||||
return nil, ErrInvalidAuthID
|
||||
}
|
||||
|
||||
b, err := tx.Bucket(authBucket)
|
||||
if err != nil {
|
||||
return nil, ErrInternalServiceError(err)
|
||||
}
|
||||
|
||||
v, err := b.Get(encodedID)
|
||||
if kv.IsNotFound(err) {
|
||||
return nil, ErrAuthNotFound
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, ErrInternalServiceError(err)
|
||||
}
|
||||
|
||||
a := &influxdb.Authorization{}
|
||||
if err := decodeAuthorization(v, a); err != nil {
|
||||
return nil, &influxdb.Error{
|
||||
Code: influxdb.EInvalid,
|
||||
Err: err,
|
||||
}
|
||||
}
|
||||
|
||||
return a, nil
|
||||
}
|
||||
|
||||
func (s *Store) GetAuthorizationByToken(ctx context.Context, tx kv.Tx, token string) (*influxdb.Authorization, error) {
|
||||
idx, err := authIndexBucket(tx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// use the token to look up the authorization's ID
|
||||
idKey, err := idx.Get(authIndexKey(token))
|
||||
if kv.IsNotFound(err) {
|
||||
return nil, &influxdb.Error{
|
||||
Code: influxdb.ENotFound,
|
||||
Msg: "authorization not found",
|
||||
}
|
||||
}
|
||||
|
||||
var id influxdb.ID
|
||||
if err := id.Decode(idKey); err != nil {
|
||||
return nil, &influxdb.Error{
|
||||
Code: influxdb.EInvalid,
|
||||
Err: err,
|
||||
}
|
||||
}
|
||||
|
||||
return s.GetAuthorizationByID(ctx, tx, id)
|
||||
}
|
||||
|
||||
// ListAuthorizations returns all the authorizations matching a set of FindOptions. This function is used for
|
||||
// FindAuthorizationByID, FindAuthorizationByToken, and FindAuthorizations in the AuthorizationService implementation
|
||||
func (s *Store) ListAuthorizations(ctx context.Context, tx kv.Tx, f influxdb.AuthorizationFilter) ([]*influxdb.Authorization, error) {
|
||||
var as []*influxdb.Authorization
|
||||
pred := authorizationsPredicateFn(f)
|
||||
filterFn := filterAuthorizationsFn(f)
|
||||
err := s.forEachAuthorization(ctx, tx, pred, func(a *influxdb.Authorization) bool {
|
||||
if filterFn(a) {
|
||||
as = append(as, a)
|
||||
}
|
||||
return true
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return as, nil
|
||||
}
|
||||
|
||||
// forEachAuthorization will iterate through all authorizations while fn returns true.
|
||||
func (s *Store) forEachAuthorization(ctx context.Context, tx kv.Tx, pred kv.CursorPredicateFunc, fn func(*influxdb.Authorization) bool) error {
|
||||
b, err := tx.Bucket(authBucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var cur kv.Cursor
|
||||
if pred != nil {
|
||||
cur, err = b.Cursor(kv.WithCursorHintPredicate(pred))
|
||||
} else {
|
||||
cur, err = b.Cursor()
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for k, v := cur.First(); k != nil; k, v = cur.Next() {
|
||||
// preallocate Permissions to reduce multiple slice re-allocations
|
||||
a := &influxdb.Authorization{
|
||||
Permissions: make([]influxdb.Permission, 64),
|
||||
}
|
||||
|
||||
if err := decodeAuthorization(v, a); err != nil {
|
||||
return err
|
||||
}
|
||||
if !fn(a) {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateAuthorization updates the status and description only of an authorization
|
||||
func (s *Store) UpdateAuthorization(ctx context.Context, tx kv.Tx, id influxdb.ID, a *influxdb.Authorization) (*influxdb.Authorization, error) {
|
||||
v, err := encodeAuthorization(a)
|
||||
if err != nil {
|
||||
return nil, &influxdb.Error{
|
||||
Code: influxdb.EInvalid,
|
||||
Err: err,
|
||||
}
|
||||
}
|
||||
|
||||
encodedID, err := a.ID.Encode()
|
||||
if err != nil {
|
||||
return nil, &influxdb.Error{
|
||||
Code: influxdb.ENotFound,
|
||||
Err: err,
|
||||
}
|
||||
}
|
||||
|
||||
idx, err := authIndexBucket(tx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := idx.Put(authIndexKey(a.Token), encodedID); err != nil {
|
||||
return nil, &influxdb.Error{
|
||||
Code: influxdb.EInternal,
|
||||
Err: err,
|
||||
}
|
||||
}
|
||||
|
||||
b, err := tx.Bucket(authBucket)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := b.Put(encodedID, v); err != nil {
|
||||
return nil, &influxdb.Error{
|
||||
Err: err,
|
||||
}
|
||||
}
|
||||
|
||||
return a, nil
|
||||
|
||||
}
|
||||
|
||||
// DeleteAuthorization removes an authorization from storage
|
||||
func (s *Store) DeleteAuthorization(ctx context.Context, tx kv.Tx, id influxdb.ID) error {
|
||||
a, err := s.GetAuthorizationByID(ctx, tx, id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
encodedID, err := id.Encode()
|
||||
if err != nil {
|
||||
return ErrInvalidAuthID
|
||||
}
|
||||
|
||||
idx, err := authIndexBucket(tx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
b, err := tx.Bucket(authBucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := idx.Delete([]byte(a.Token)); err != nil {
|
||||
return ErrInternalServiceError(err)
|
||||
}
|
||||
|
||||
if err := b.Delete(encodedID); err != nil {
|
||||
return ErrInternalServiceError(err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Store) uniqueAuthToken(ctx context.Context, tx kv.Tx, a *influxdb.Authorization) error {
|
||||
err := unique(ctx, tx, authIndex, authIndexKey(a.Token))
|
||||
if err == kv.NotUniqueError {
|
||||
// by returning a generic error we are trying to hide when
|
||||
// a token is non-unique.
|
||||
return influxdb.ErrUnableToCreateToken
|
||||
}
|
||||
// otherwise, this is some sort of internal server error and we
|
||||
// should provide some debugging information.
|
||||
return err
|
||||
}
|
||||
|
||||
func unique(ctx context.Context, tx kv.Tx, indexBucket, indexKey []byte) error {
|
||||
bucket, err := tx.Bucket(indexBucket)
|
||||
if err != nil {
|
||||
return kv.UnexpectedIndexError(err)
|
||||
}
|
||||
|
||||
_, err = bucket.Get(indexKey)
|
||||
// if not found then this token is unique.
|
||||
if kv.IsNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// no error means this is not unique
|
||||
if err == nil {
|
||||
return kv.NotUniqueError
|
||||
}
|
||||
|
||||
// any other error is some sort of internal server error
|
||||
return kv.UnexpectedIndexError(err)
|
||||
}
|
||||
|
||||
// uniqueID returns nil if the ID provided is unique, returns an error otherwise
|
||||
func uniqueID(ctx context.Context, tx kv.Tx, id influxdb.ID) error {
|
||||
encodedID, err := id.Encode()
|
||||
if err != nil {
|
||||
return ErrInvalidAuthID
|
||||
}
|
||||
|
||||
b, err := tx.Bucket(authBucket)
|
||||
if err != nil {
|
||||
return ErrInternalServiceError(err)
|
||||
}
|
||||
|
||||
_, err = b.Get(encodedID)
|
||||
// if not found then the ID is unique
|
||||
if kv.IsNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
// no error means this is not unique
|
||||
if err == nil {
|
||||
return kv.NotUniqueError
|
||||
}
|
||||
|
||||
// any other error is some sort of internal server error
|
||||
return kv.UnexpectedIndexError(err)
|
||||
}
|
||||
|
||||
func authorizationsPredicateFn(f influxdb.AuthorizationFilter) kv.CursorPredicateFunc {
|
||||
// if any errors occur reading the JSON data, the predicate will always return true
|
||||
// to ensure the value is included and handled higher up.
|
||||
|
||||
if f.ID != nil {
|
||||
exp := *f.ID
|
||||
return func(_, value []byte) bool {
|
||||
got, err := jsonp.GetID(value, "id")
|
||||
if err != nil {
|
||||
return true
|
||||
}
|
||||
return got == exp
|
||||
}
|
||||
}
|
||||
|
||||
if f.Token != nil {
|
||||
exp := *f.Token
|
||||
return func(_, value []byte) bool {
|
||||
// it is assumed that token never has escaped string data
|
||||
got, _, _, err := jsonparser.Get(value, "token")
|
||||
if err != nil {
|
||||
return true
|
||||
}
|
||||
return string(got) == exp
|
||||
}
|
||||
}
|
||||
|
||||
var pred kv.CursorPredicateFunc
|
||||
if f.OrgID != nil {
|
||||
exp := *f.OrgID
|
||||
pred = func(_, value []byte) bool {
|
||||
got, err := jsonp.GetID(value, "orgID")
|
||||
if err != nil {
|
||||
return true
|
||||
}
|
||||
|
||||
return got == exp
|
||||
}
|
||||
}
|
||||
|
||||
if f.UserID != nil {
|
||||
exp := *f.UserID
|
||||
prevFn := pred
|
||||
pred = func(key, value []byte) bool {
|
||||
prev := prevFn == nil || prevFn(key, value)
|
||||
got, exists, err := jsonp.GetOptionalID(value, "userID")
|
||||
return prev && ((exp == got && exists) || err != nil)
|
||||
}
|
||||
}
|
||||
|
||||
return pred
|
||||
}
|
||||
|
||||
func filterAuthorizationsFn(filter influxdb.AuthorizationFilter) func(a *influxdb.Authorization) bool {
|
||||
if filter.ID != nil {
|
||||
return func(a *influxdb.Authorization) bool {
|
||||
return a.ID == *filter.ID
|
||||
}
|
||||
}
|
||||
|
||||
if filter.Token != nil {
|
||||
return func(a *influxdb.Authorization) bool {
|
||||
return a.Token == *filter.Token
|
||||
}
|
||||
}
|
||||
|
||||
// Filter by org and user
|
||||
if filter.OrgID != nil && filter.UserID != nil {
|
||||
return func(a *influxdb.Authorization) bool {
|
||||
return a.OrgID == *filter.OrgID && a.UserID == *filter.UserID
|
||||
}
|
||||
}
|
||||
|
||||
if filter.OrgID != nil {
|
||||
return func(a *influxdb.Authorization) bool {
|
||||
return a.OrgID == *filter.OrgID
|
||||
}
|
||||
}
|
||||
|
||||
if filter.UserID != nil {
|
||||
return func(a *influxdb.Authorization) bool {
|
||||
return a.UserID == *filter.UserID
|
||||
}
|
||||
}
|
||||
|
||||
return func(a *influxdb.Authorization) bool { return true }
|
||||
}
|
||||
|
|
@ -0,0 +1,126 @@
|
|||
package authorization_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdata/influxdb/v2"
|
||||
"github.com/influxdata/influxdb/v2/authorization"
|
||||
"github.com/influxdata/influxdb/v2/inmem"
|
||||
"github.com/influxdata/influxdb/v2/kv"
|
||||
)
|
||||
|
||||
func TestAuth(t *testing.T) {
|
||||
s := func() kv.Store {
|
||||
return inmem.NewKVStore()
|
||||
}
|
||||
|
||||
setup := func(t *testing.T, store *authorization.Store, tx kv.Tx) {
|
||||
for i := 1; i <= 10; i++ {
|
||||
err := store.CreateAuthorization(context.Background(), tx, &influxdb.Authorization{
|
||||
ID: influxdb.ID(i),
|
||||
Token: fmt.Sprintf("randomtoken%d", i),
|
||||
OrgID: influxdb.ID(i),
|
||||
UserID: influxdb.ID(i),
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
tt := []struct {
|
||||
name string
|
||||
setup func(*testing.T, *authorization.Store, kv.Tx)
|
||||
update func(*testing.T, *authorization.Store, kv.Tx)
|
||||
results func(*testing.T, *authorization.Store, kv.Tx)
|
||||
}{
|
||||
{
|
||||
name: "create",
|
||||
setup: setup,
|
||||
results: func(t *testing.T, store *authorization.Store, tx kv.Tx) {
|
||||
auths, err := store.ListAuthorizations(context.Background(), tx, influxdb.AuthorizationFilter{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if len(auths) != 10 {
|
||||
t.Fatalf("expected 10 authorizations, got: %d", len(auths))
|
||||
}
|
||||
|
||||
expected := []*influxdb.Authorization{}
|
||||
for i := 1; i <= 10; i++ {
|
||||
expected = append(expected, &influxdb.Authorization{
|
||||
ID: influxdb.ID(i),
|
||||
Token: fmt.Sprintf("randomtoken%d", i),
|
||||
OrgID: influxdb.ID(i),
|
||||
UserID: influxdb.ID(i),
|
||||
Status: "active",
|
||||
})
|
||||
}
|
||||
if !reflect.DeepEqual(auths, expected) {
|
||||
t.Fatalf("expected identical authorizations: \n%+v\n%+v", auths, expected)
|
||||
}
|
||||
|
||||
// should not be able to create two authorizations with identical tokens
|
||||
err = store.CreateAuthorization(context.Background(), tx, &influxdb.Authorization{
|
||||
ID: influxdb.ID(1),
|
||||
Token: fmt.Sprintf("randomtoken%d", 1),
|
||||
OrgID: influxdb.ID(1),
|
||||
UserID: influxdb.ID(1),
|
||||
})
|
||||
if err == nil {
|
||||
t.Fatalf("expected to be unable to create authorizations with identical tokens")
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, testScenario := range tt {
|
||||
t.Run(testScenario.name, func(t *testing.T) {
|
||||
ts, err := authorization.NewStore(s())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// setup
|
||||
if testScenario.setup != nil {
|
||||
err := ts.Update(context.Background(), func(tx kv.Tx) error {
|
||||
testScenario.setup(t, ts, tx)
|
||||
return nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// update
|
||||
if testScenario.update != nil {
|
||||
err := ts.Update(context.Background(), func(tx kv.Tx) error {
|
||||
testScenario.update(t, ts, tx)
|
||||
return nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// results
|
||||
if testScenario.results != nil {
|
||||
err := ts.View(context.Background(), func(tx kv.Tx) error {
|
||||
testScenario.results(t, ts, tx)
|
||||
return nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
@ -27,6 +27,8 @@ import (
|
|||
"github.com/influxdata/influxdb/v2/inmem"
|
||||
"github.com/influxdata/influxdb/v2/internal/fs"
|
||||
"github.com/influxdata/influxdb/v2/kit/cli"
|
||||
"github.com/influxdata/influxdb/v2/kit/feature"
|
||||
overrideflagger "github.com/influxdata/influxdb/v2/kit/feature/override"
|
||||
"github.com/influxdata/influxdb/v2/kit/prom"
|
||||
"github.com/influxdata/influxdb/v2/kit/signals"
|
||||
"github.com/influxdata/influxdb/v2/kit/tracing"
|
||||
|
|
@ -265,14 +267,8 @@ func buildLauncherCommand(l *Launcher, cmd *cobra.Command) {
|
|||
{
|
||||
DestP: &l.enableNewMetaStore,
|
||||
Flag: "new-meta-store",
|
||||
Default: false,
|
||||
Desc: "enables the new meta store",
|
||||
},
|
||||
{
|
||||
DestP: &l.newMetaStoreReadOnly,
|
||||
Flag: "new-meta-store-read-only",
|
||||
Default: true,
|
||||
Desc: "toggles read-only mode for the new meta store, if so, the reads are duplicated between the old and new store (has meaning only if the new meta store is enabled)",
|
||||
Desc: "enables the new meta store",
|
||||
},
|
||||
{
|
||||
DestP: &l.noTasks,
|
||||
|
|
@ -310,8 +306,12 @@ func buildLauncherCommand(l *Launcher, cmd *cobra.Command) {
|
|||
Default: 10,
|
||||
Desc: "the number of queries that are allowed to be awaiting execution before new queries are rejected",
|
||||
},
|
||||
{
|
||||
DestP: &l.featureFlags,
|
||||
Flag: "feature-flags",
|
||||
Desc: "feature flag overrides",
|
||||
},
|
||||
}
|
||||
|
||||
cli.BindOptions(cmd, opts)
|
||||
cmd.AddCommand(inspect.NewCommand())
|
||||
}
|
||||
|
|
@ -337,8 +337,9 @@ type Launcher struct {
|
|||
enginePath string
|
||||
secretStore string
|
||||
|
||||
enableNewMetaStore bool
|
||||
newMetaStoreReadOnly bool
|
||||
enableNewMetaStore bool
|
||||
|
||||
featureFlags map[string]string
|
||||
|
||||
// Query options.
|
||||
concurrencyQuota int
|
||||
|
|
@ -585,14 +586,16 @@ func (m *Launcher) run(ctx context.Context) (err error) {
|
|||
m.reg.MustRegister(m.boltClient)
|
||||
|
||||
var (
|
||||
orgSvc platform.OrganizationService = m.kvService
|
||||
userSvc platform.UserService = m.kvService
|
||||
orgSvc platform.OrganizationService = m.kvService
|
||||
userResourceSvc platform.UserResourceMappingService = m.kvService
|
||||
bucketSvc platform.BucketService = m.kvService
|
||||
passwdsSvc platform.PasswordsService = m.kvService
|
||||
|
||||
authSvc platform.AuthorizationService = m.kvService
|
||||
userSvc platform.UserService = m.kvService
|
||||
variableSvc platform.VariableService = m.kvService
|
||||
bucketSvc platform.BucketService = m.kvService
|
||||
sourceSvc platform.SourceService = m.kvService
|
||||
sessionSvc platform.SessionService = m.kvService
|
||||
passwdsSvc platform.PasswordsService = m.kvService
|
||||
dashboardSvc platform.DashboardService = m.kvService
|
||||
dashboardLogSvc platform.DashboardOperationLogService = m.kvService
|
||||
userLogSvc platform.UserOperationLogService = m.kvService
|
||||
|
|
@ -600,7 +603,6 @@ func (m *Launcher) run(ctx context.Context) (err error) {
|
|||
orgLogSvc platform.OrganizationOperationLogService = m.kvService
|
||||
scraperTargetSvc platform.ScraperTargetStoreService = m.kvService
|
||||
telegrafSvc platform.TelegrafConfigStore = m.kvService
|
||||
userResourceSvc platform.UserResourceMappingService = m.kvService
|
||||
labelSvc platform.LabelService = m.kvService
|
||||
secretSvc platform.SecretService = m.kvService
|
||||
lookupSvc platform.LookupService = m.kvService
|
||||
|
|
@ -613,28 +615,13 @@ func (m *Launcher) run(ctx context.Context) (err error) {
|
|||
return err
|
||||
}
|
||||
|
||||
userSvcForAuth := userSvc
|
||||
if m.enableNewMetaStore {
|
||||
var ts platform.TenantService
|
||||
if m.newMetaStoreReadOnly {
|
||||
store, err := tenant.NewReadOnlyStore(m.kvStore)
|
||||
if err != nil {
|
||||
m.log.Error("Failed creating new meta store", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
oldSvc := m.kvService
|
||||
newSvc := tenant.NewService(store)
|
||||
ts = tenant.NewDuplicateReadTenantService(m.log, oldSvc, newSvc)
|
||||
} else {
|
||||
ts = tenant.NewService(store)
|
||||
}
|
||||
userSvcForAuth = ts
|
||||
|
||||
userSvc = tenant.NewAuthedUserService(tenant.NewUserLogger(m.log.With(zap.String("store", "new")), tenant.NewUserMetrics(m.reg, ts, tenant.WithSuffix("new"))))
|
||||
orgSvc = tenant.NewAuthedOrgService(tenant.NewOrgLogger(m.log.With(zap.String("store", "new")), tenant.NewOrgMetrics(m.reg, ts, tenant.WithSuffix("new"))))
|
||||
userResourceSvc = tenant.NewAuthedURMService(ts, tenant.NewURMLogger(m.log.With(zap.String("store", "new")), tenant.NewUrmMetrics(m.reg, ts, tenant.WithSuffix("new"))))
|
||||
bucketSvc = tenant.NewAuthedBucketService(tenant.NewBucketLogger(m.log.With(zap.String("store", "new")), tenant.NewBucketMetrics(m.reg, ts, tenant.WithSuffix("new"))), userResourceSvc)
|
||||
passwdsSvc = tenant.NewAuthedPasswordService(tenant.NewPasswordLogger(m.log.With(zap.String("store", "new")), tenant.NewPasswordMetrics(m.reg, ts, tenant.WithSuffix("new"))))
|
||||
ts := tenant.NewService(store)
|
||||
userSvc = tenant.NewUserLogger(m.log.With(zap.String("store", "new")), tenant.NewUserMetrics(m.reg, ts, tenant.WithSuffix("new")))
|
||||
orgSvc = tenant.NewOrgLogger(m.log.With(zap.String("store", "new")), tenant.NewOrgMetrics(m.reg, ts, tenant.WithSuffix("new")))
|
||||
userResourceSvc = tenant.NewURMLogger(m.log.With(zap.String("store", "new")), tenant.NewUrmMetrics(m.reg, ts, tenant.WithSuffix("new")))
|
||||
bucketSvc = tenant.NewBucketLogger(m.log.With(zap.String("store", "new")), tenant.NewBucketMetrics(m.reg, ts, tenant.WithSuffix("new")))
|
||||
passwdsSvc = tenant.NewPasswordLogger(m.log.With(zap.String("store", "new")), tenant.NewPasswordMetrics(m.reg, ts, tenant.WithSuffix("new")))
|
||||
}
|
||||
|
||||
switch m.secretStore {
|
||||
|
|
@ -866,6 +853,18 @@ func (m *Launcher) run(ctx context.Context) (err error) {
|
|||
Addr: m.httpBindAddress,
|
||||
}
|
||||
|
||||
flagger := feature.DefaultFlagger()
|
||||
if len(m.featureFlags) > 0 {
|
||||
f, err := overrideflagger.Make(m.featureFlags)
|
||||
if err != nil {
|
||||
m.log.Error("Failed to configure feature flag overrides",
|
||||
zap.Error(err), zap.Any("overrides", m.featureFlags))
|
||||
return err
|
||||
}
|
||||
m.log.Info("Running with feature flag overrides", zap.Any("config", m.featureFlags))
|
||||
flagger = f
|
||||
}
|
||||
|
||||
m.apibackend = &http.APIBackend{
|
||||
AssetsPath: m.assetsPath,
|
||||
HTTPErrorHandler: kithttp.ErrorHandler(0),
|
||||
|
|
@ -909,6 +908,7 @@ func (m *Launcher) run(ctx context.Context) (err error) {
|
|||
OrgLookupService: m.kvService,
|
||||
WriteEventRecorder: infprom.NewEventRecorder("write"),
|
||||
QueryEventRecorder: infprom.NewEventRecorder("query"),
|
||||
Flagger: flagger,
|
||||
}
|
||||
|
||||
m.reg.MustRegister(m.apibackend.PrometheusCollectors()...)
|
||||
|
|
@ -959,7 +959,7 @@ func (m *Launcher) run(ctx context.Context) (err error) {
|
|||
}
|
||||
|
||||
{
|
||||
platformHandler := http.NewPlatformHandler(m.apibackend, userSvcForAuth, http.WithResourceHandler(pkgHTTPServer), http.WithResourceHandler(onboardHTTPServer))
|
||||
platformHandler := http.NewPlatformHandler(m.apibackend, http.WithResourceHandler(pkgHTTPServer), http.WithResourceHandler(onboardHTTPServer))
|
||||
|
||||
httpLogger := m.log.With(zap.String("service", "http"))
|
||||
m.httpServer.Handler = http.NewHandlerFromRegistry(
|
||||
|
|
|
|||
|
|
@ -4,6 +4,9 @@ import (
|
|||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
nethttp "net/http"
|
||||
"net/http/httptest"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
|
@ -176,6 +179,66 @@ func TestLauncher_Pkger(t *testing.T) {
|
|||
assert.NotZero(t, newStack.CRUDLog)
|
||||
})
|
||||
|
||||
t.Run("apply with only a stackID succeeds when stack has URLs", func(t *testing.T) {
|
||||
svr := httptest.NewServer(nethttp.HandlerFunc(func(w nethttp.ResponseWriter, r *nethttp.Request) {
|
||||
pkg := newPkg(newBucketObject("bucket_0", "", ""))
|
||||
b, err := pkg.Encode(pkger.EncodingJSON)
|
||||
if err != nil {
|
||||
w.WriteHeader(nethttp.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
w.Write(b)
|
||||
}))
|
||||
defer svr.Close()
|
||||
|
||||
f, err := ioutil.TempFile("", "pkg.yml")
|
||||
require.NoError(t, err)
|
||||
defer f.Close()
|
||||
|
||||
pkg := newPkg(newBucketObject("bucket_1", "", ""))
|
||||
b, err := pkg.Encode(pkger.EncodingYAML)
|
||||
require.NoError(t, err)
|
||||
f.Write(b)
|
||||
require.NoError(t, f.Close())
|
||||
|
||||
expectedURLs := []string{
|
||||
// URL for http call
|
||||
svr.URL + "/pkg.json",
|
||||
// URL for file
|
||||
"file://" + f.Name(),
|
||||
}
|
||||
|
||||
newStack, err := svc.InitStack(ctx, l.User.ID, pkger.Stack{
|
||||
OrgID: l.Org.ID,
|
||||
URLs: expectedURLs,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.NotZero(t, newStack.ID)
|
||||
assert.Equal(t, l.Org.ID, newStack.OrgID)
|
||||
assert.Equal(t, expectedURLs, newStack.URLs)
|
||||
|
||||
sumEquals := func(t *testing.T, sum pkger.Summary) {
|
||||
t.Helper()
|
||||
require.Len(t, sum.Buckets, 2)
|
||||
assert.Equal(t, "bucket_0", sum.Buckets[0].PkgName)
|
||||
assert.Equal(t, "bucket_0", sum.Buckets[0].Name)
|
||||
assert.Equal(t, "bucket_1", sum.Buckets[1].PkgName)
|
||||
assert.Equal(t, "bucket_1", sum.Buckets[1].Name)
|
||||
}
|
||||
|
||||
sum, _, err := svc.DryRun(ctx, l.Org.ID, l.User.ID, nil, pkger.ApplyWithStackID(newStack.ID))
|
||||
require.NoError(t, err)
|
||||
sumEquals(t, sum)
|
||||
|
||||
sum, _, err = svc.Apply(ctx, l.Org.ID, l.User.ID, nil, pkger.ApplyWithStackID(newStack.ID))
|
||||
require.NoError(t, err)
|
||||
sumEquals(t, sum)
|
||||
|
||||
defer resourceCheck.mustDeleteBucket(t, influxdb.ID(sum.Buckets[0].ID))
|
||||
defer resourceCheck.mustDeleteBucket(t, influxdb.ID(sum.Buckets[1].ID))
|
||||
})
|
||||
|
||||
t.Run("apply a pkg with a stack and associations", func(t *testing.T) {
|
||||
testLabelMappingFn := func(t *testing.T, stackID influxdb.ID, pkg *pkger.Pkg, assertAssociatedLabelsFn func(pkger.Summary, []*influxdb.Label, influxdb.ResourceType)) pkger.Summary {
|
||||
t.Helper()
|
||||
|
|
|
|||
|
|
@ -0,0 +1,28 @@
|
|||
# This file defines feature flags.
|
||||
#
|
||||
# It is used for code generation in the ./kit/feature package.
|
||||
# If you change this file, run `make flags` to regenerate.
|
||||
#
|
||||
# Format details:
|
||||
#
|
||||
# - name: Human-readable name
|
||||
# description: Human-readable description
|
||||
# key: Programmatic name
|
||||
# default: Used when unable to reach server and to infer flag type
|
||||
# contact: Contact for information or issues regarding the flag
|
||||
# lifetime: Expected lifetime of the flag; temporary or permanent, default temporary
|
||||
# expose: Boolean indicating whether the flag should be exposed to callers; default false
|
||||
|
||||
- name: Backend Example
|
||||
description: A permanent backend example boolean flag
|
||||
key: backendExample
|
||||
default: false
|
||||
contact: Gavin Cabbage
|
||||
lifetime: permanent
|
||||
|
||||
- name: Frontend Example
|
||||
description: A temporary frontend example integer flag
|
||||
key: frontendExample
|
||||
default: 42
|
||||
contact: Gavin Cabbage
|
||||
expose: true
|
||||
|
|
@ -8,6 +8,7 @@ import (
|
|||
"github.com/influxdata/influxdb/v2/authorizer"
|
||||
"github.com/influxdata/influxdb/v2/chronograf/server"
|
||||
"github.com/influxdata/influxdb/v2/http/metric"
|
||||
"github.com/influxdata/influxdb/v2/kit/feature"
|
||||
"github.com/influxdata/influxdb/v2/kit/prom"
|
||||
kithttp "github.com/influxdata/influxdb/v2/kit/transport/http"
|
||||
"github.com/influxdata/influxdb/v2/query"
|
||||
|
|
@ -83,6 +84,7 @@ type APIBackend struct {
|
|||
DocumentService influxdb.DocumentService
|
||||
NotificationRuleStore influxdb.NotificationRuleStore
|
||||
NotificationEndpointService influxdb.NotificationEndpointService
|
||||
Flagger feature.Flagger
|
||||
}
|
||||
|
||||
// PrometheusCollectors exposes the prometheus collectors associated with an APIBackend.
|
||||
|
|
@ -204,6 +206,7 @@ func NewAPIHandler(b *APIBackend, opts ...APIHandlerOptFn) *APIHandler {
|
|||
userHandler := NewUserHandler(b.Logger, userBackend)
|
||||
h.Mount(prefixMe, userHandler)
|
||||
h.Mount(prefixUsers, userHandler)
|
||||
h.Mount("/api/v2/flags", serveFlagsHandler(b.HTTPErrorHandler))
|
||||
|
||||
variableBackend := NewVariableBackend(b.Logger.With(zap.String("handler", "variable")), b)
|
||||
variableBackend.VariableService = authorizer.NewVariableService(b.VariableService)
|
||||
|
|
@ -237,6 +240,7 @@ var apiLinks = map[string]interface{}{
|
|||
"external": map[string]string{
|
||||
"statusFeed": "https://www.influxdata.com/feed/json",
|
||||
},
|
||||
"flags": "/api/v2/flags",
|
||||
"labels": "/api/v2/labels",
|
||||
"variables": "/api/v2/variables",
|
||||
"me": "/api/v2/me",
|
||||
|
|
@ -278,3 +282,16 @@ func serveLinksHandler(errorHandler influxdb.HTTPErrorHandler) http.Handler {
|
|||
}
|
||||
return http.HandlerFunc(fn)
|
||||
}
|
||||
|
||||
func serveFlagsHandler(errorHandler influxdb.HTTPErrorHandler) http.Handler {
|
||||
fn := func(w http.ResponseWriter, r *http.Request) {
|
||||
var (
|
||||
ctx = r.Context()
|
||||
flags = feature.ExposedFlagsFromContext(ctx)
|
||||
)
|
||||
if err := encodeResponse(ctx, w, http.StatusOK, flags); err != nil {
|
||||
errorHandler.HandleHTTPError(ctx, err, w)
|
||||
}
|
||||
}
|
||||
return http.HandlerFunc(fn)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -9,6 +9,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/influxdata/httprouter"
|
||||
"github.com/influxdata/influxdb/v2"
|
||||
platform "github.com/influxdata/influxdb/v2"
|
||||
platcontext "github.com/influxdata/influxdb/v2/context"
|
||||
"github.com/influxdata/influxdb/v2/pkg/httpc"
|
||||
|
|
@ -657,7 +658,7 @@ func (s *AuthorizationService) FindAuthorizationByToken(ctx context.Context, tok
|
|||
// FindAuthorizations returns a list of authorizations that match filter and the total count of matching authorizations.
|
||||
// Additional options provide pagination & sorting.
|
||||
func (s *AuthorizationService) FindAuthorizations(ctx context.Context, filter platform.AuthorizationFilter, opt ...platform.FindOptions) ([]*platform.Authorization, int, error) {
|
||||
params := findOptionParams(opt...)
|
||||
params := influxdb.FindOptionParams(opt...)
|
||||
if filter.ID != nil {
|
||||
params = append(params, [2]string{"id", filter.ID.String()})
|
||||
}
|
||||
|
|
|
|||
|
|
@ -312,7 +312,7 @@ func newBucketsResponse(ctx context.Context, opts influxdb.FindOptions, f influx
|
|||
rs = append(rs, NewBucketResponse(b, labels))
|
||||
}
|
||||
return &bucketsResponse{
|
||||
Links: newPagingLinks(prefixBuckets, opts, f, len(bs)),
|
||||
Links: influxdb.NewPagingLinks(prefixBuckets, opts, f, len(bs)),
|
||||
Buckets: rs,
|
||||
}
|
||||
}
|
||||
|
|
@ -428,7 +428,7 @@ func (h *BucketHandler) handleGetBucketLog(w http.ResponseWriter, r *http.Reques
|
|||
return
|
||||
}
|
||||
|
||||
opts, err := decodeFindOptions(r)
|
||||
opts, err := influxdb.DecodeFindOptions(r)
|
||||
if err != nil {
|
||||
h.api.Err(w, err)
|
||||
return
|
||||
|
|
@ -505,7 +505,7 @@ func (h *BucketHandler) handleGetBuckets(w http.ResponseWriter, r *http.Request)
|
|||
filter.OrganizationID = &orgID
|
||||
}
|
||||
|
||||
opts, err := decodeFindOptions(r)
|
||||
opts, err := influxdb.DecodeFindOptions(r)
|
||||
if err != nil {
|
||||
h.api.Err(w, err)
|
||||
return
|
||||
|
|
@ -530,7 +530,7 @@ func decodeGetBucketsRequest(r *http.Request) (*getBucketsRequest, error) {
|
|||
qp := r.URL.Query()
|
||||
req := &getBucketsRequest{}
|
||||
|
||||
opts, err := decodeFindOptions(r)
|
||||
opts, err := influxdb.DecodeFindOptions(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -701,7 +701,7 @@ func (s *BucketService) FindBuckets(ctx context.Context, filter influxdb.BucketF
|
|||
span, _ := tracing.StartSpanFromContext(ctx)
|
||||
defer span.Finish()
|
||||
|
||||
params := findOptionParams(opt...)
|
||||
params := influxdb.FindOptionParams(opt...)
|
||||
if filter.OrganizationID != nil {
|
||||
params = append(params, [2]string{"orgID", filter.OrganizationID.String()})
|
||||
}
|
||||
|
|
|
|||
|
|
@ -237,7 +237,7 @@ func (h *CheckHandler) newCheckResponse(ctx context.Context, chk influxdb.Check,
|
|||
func (h *CheckHandler) newChecksResponse(ctx context.Context, chks []influxdb.Check, labelService influxdb.LabelService, f influxdb.PagingFilter, opts influxdb.FindOptions) *checksResponse {
|
||||
resp := &checksResponse{
|
||||
Checks: []*checkResponse{},
|
||||
Links: newPagingLinks(prefixChecks, opts, f, len(chks)),
|
||||
Links: influxdb.NewPagingLinks(prefixChecks, opts, f, len(chks)),
|
||||
}
|
||||
for _, chk := range chks {
|
||||
labels, _ := labelService.FindResourceLabels(ctx, influxdb.LabelMappingFilter{ResourceID: chk.GetID(), ResourceType: influxdb.ChecksResourceType})
|
||||
|
|
@ -367,7 +367,7 @@ func decodeCheckFilter(ctx context.Context, r *http.Request) (*influxdb.CheckFil
|
|||
},
|
||||
}
|
||||
|
||||
opts, err := decodeFindOptions(r)
|
||||
opts, err := influxdb.DecodeFindOptions(r)
|
||||
if err != nil {
|
||||
return f, nil, err
|
||||
}
|
||||
|
|
@ -748,7 +748,7 @@ func (s *CheckService) FindChecks(ctx context.Context, filter influxdb.CheckFilt
|
|||
span, _ := tracing.StartSpanFromContext(ctx)
|
||||
defer span.Finish()
|
||||
|
||||
params := findOptionParams(opt...)
|
||||
params := influxdb.FindOptionParams(opt...)
|
||||
if filter.OrgID != nil {
|
||||
params = append(params, [2]string{"orgID", filter.OrgID.String()})
|
||||
}
|
||||
|
|
|
|||
|
|
@ -399,7 +399,7 @@ func decodeGetDashboardsRequest(ctx context.Context, r *http.Request) (*getDashb
|
|||
qp := r.URL.Query()
|
||||
req := &getDashboardsRequest{}
|
||||
|
||||
opts, err := decodeFindOptions(r)
|
||||
opts, err := influxdb.DecodeFindOptions(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -447,7 +447,7 @@ func (d getDashboardsResponse) toinfluxdb() []*influxdb.Dashboard {
|
|||
|
||||
func newGetDashboardsResponse(ctx context.Context, dashboards []*influxdb.Dashboard, filter influxdb.DashboardFilter, opts influxdb.FindOptions, labelService influxdb.LabelService) getDashboardsResponse {
|
||||
res := getDashboardsResponse{
|
||||
Links: newPagingLinks(prefixDashboards, opts, filter, len(dashboards)),
|
||||
Links: influxdb.NewPagingLinks(prefixDashboards, opts, filter, len(dashboards)),
|
||||
Dashboards: make([]dashboardResponse, 0, len(dashboards)),
|
||||
}
|
||||
|
||||
|
|
@ -591,7 +591,7 @@ func decodeGetDashboardLogRequest(ctx context.Context, r *http.Request) (*getDas
|
|||
return nil, err
|
||||
}
|
||||
|
||||
opts, err := decodeFindOptions(r)
|
||||
opts, err := influxdb.DecodeFindOptions(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -1106,7 +1106,7 @@ func (s *DashboardService) FindDashboardByID(ctx context.Context, id influxdb.ID
|
|||
// FindDashboards returns a list of dashboards that match filter and the total count of matching dashboards.
|
||||
// Additional options provide pagination & sorting.
|
||||
func (s *DashboardService) FindDashboards(ctx context.Context, filter influxdb.DashboardFilter, opts influxdb.FindOptions) ([]*influxdb.Dashboard, int, error) {
|
||||
queryPairs := findOptionParams(opts)
|
||||
queryPairs := influxdb.FindOptionParams(opts)
|
||||
for _, id := range filter.IDs {
|
||||
queryPairs = append(queryPairs, [2]string{"id", id.String()})
|
||||
}
|
||||
|
|
|
|||
|
|
@ -108,17 +108,30 @@ func NewHandlerFromRegistry(name string, reg *prom.Registry, opts ...HandlerOptF
|
|||
h.initMetrics()
|
||||
|
||||
r := chi.NewRouter()
|
||||
r.Use(
|
||||
kithttp.Trace(name),
|
||||
kithttp.Metrics(name, h.requests, h.requestDur),
|
||||
)
|
||||
{
|
||||
r.Mount(MetricsPath, opt.metricsHandler)
|
||||
r.Mount(ReadyPath, opt.readyHandler)
|
||||
r.Mount(HealthPath, opt.healthHandler)
|
||||
r.Mount(DebugPath, opt.debugHandler)
|
||||
r.Mount("/", opt.apiHandler)
|
||||
}
|
||||
// only gather metrics for system handlers
|
||||
r.Group(func(r chi.Router) {
|
||||
r.Use(
|
||||
kithttp.Metrics(name, h.requests, h.requestDur),
|
||||
)
|
||||
{
|
||||
r.Mount(MetricsPath, opt.metricsHandler)
|
||||
r.Mount(ReadyPath, opt.readyHandler)
|
||||
r.Mount(HealthPath, opt.healthHandler)
|
||||
r.Mount(DebugPath, opt.debugHandler)
|
||||
}
|
||||
})
|
||||
|
||||
// gather metrics and traces for everything else
|
||||
r.Group(func(r chi.Router) {
|
||||
r.Use(
|
||||
kithttp.Trace(name),
|
||||
kithttp.Metrics(name, h.requests, h.requestDur),
|
||||
)
|
||||
{
|
||||
r.Mount("/", opt.apiHandler)
|
||||
}
|
||||
})
|
||||
|
||||
h.r = r
|
||||
|
||||
reg.MustRegister(h.PrometheusCollectors()...)
|
||||
|
|
|
|||
|
|
@ -541,7 +541,7 @@ func (s *LabelService) FindLabelByID(ctx context.Context, id influxdb.ID) (*infl
|
|||
|
||||
// FindLabels is a client for the find labels response from the server.
|
||||
func (s *LabelService) FindLabels(ctx context.Context, filter influxdb.LabelFilter, opt ...influxdb.FindOptions) ([]*influxdb.Label, error) {
|
||||
params := findOptionParams(opt...)
|
||||
params := influxdb.FindOptionParams(opt...)
|
||||
if filter.OrgID != nil {
|
||||
params = append(params, [2]string{"orgID", filter.OrgID.String()})
|
||||
}
|
||||
|
|
|
|||
|
|
@ -193,7 +193,7 @@ func newNotificationEndpointResponse(edp influxdb.NotificationEndpoint, labels [
|
|||
func newNotificationEndpointsResponse(ctx context.Context, edps []influxdb.NotificationEndpoint, labelService influxdb.LabelService, f influxdb.PagingFilter, opts influxdb.FindOptions) *notificationEndpointsResponse {
|
||||
resp := ¬ificationEndpointsResponse{
|
||||
NotificationEndpoints: make([]notificationEndpointResponse, len(edps)),
|
||||
Links: newPagingLinks(prefixNotificationEndpoints, opts, f, len(edps)),
|
||||
Links: influxdb.NewPagingLinks(prefixNotificationEndpoints, opts, f, len(edps)),
|
||||
}
|
||||
for i, edp := range edps {
|
||||
labels, _ := labelService.FindResourceLabels(ctx, influxdb.LabelMappingFilter{ResourceID: edp.GetID(), ResourceType: influxdb.NotificationEndpointResourceType})
|
||||
|
|
@ -272,7 +272,7 @@ func decodeNotificationEndpointFilter(ctx context.Context, r *http.Request) (inf
|
|||
},
|
||||
}
|
||||
|
||||
opts, err := decodeFindOptions(r)
|
||||
opts, err := influxdb.DecodeFindOptions(r)
|
||||
if err != nil {
|
||||
return influxdb.NotificationEndpointFilter{}, influxdb.FindOptions{}, err
|
||||
}
|
||||
|
|
@ -590,7 +590,7 @@ func (s *NotificationEndpointService) FindNotificationEndpointByID(ctx context.C
|
|||
// FindNotificationEndpoints returns a list of notification endpoints that match filter and the total count of matching notification endpoints.
|
||||
// Additional options provide pagination & sorting.
|
||||
func (s *NotificationEndpointService) FindNotificationEndpoints(ctx context.Context, filter influxdb.NotificationEndpointFilter, opt ...influxdb.FindOptions) ([]influxdb.NotificationEndpoint, int, error) {
|
||||
params := findOptionParams(opt...)
|
||||
params := influxdb.FindOptionParams(opt...)
|
||||
if filter.ID != nil {
|
||||
params = append(params, [2]string{"id", filter.ID.String()})
|
||||
}
|
||||
|
|
|
|||
|
|
@ -230,7 +230,7 @@ func (h *NotificationRuleHandler) newNotificationRuleResponse(ctx context.Contex
|
|||
func (h *NotificationRuleHandler) newNotificationRulesResponse(ctx context.Context, nrs []influxdb.NotificationRule, labelService influxdb.LabelService, f influxdb.PagingFilter, opts influxdb.FindOptions) (*notificationRulesResponse, error) {
|
||||
resp := ¬ificationRulesResponse{
|
||||
NotificationRules: []*notificationRuleResponse{},
|
||||
Links: newPagingLinks(prefixNotificationRules, opts, f, len(nrs)),
|
||||
Links: influxdb.NewPagingLinks(prefixNotificationRules, opts, f, len(nrs)),
|
||||
}
|
||||
for _, nr := range nrs {
|
||||
labels, _ := labelService.FindResourceLabels(ctx, influxdb.LabelMappingFilter{ResourceID: nr.GetID(), ResourceType: influxdb.NotificationRuleResourceType})
|
||||
|
|
@ -358,7 +358,7 @@ func decodeNotificationRuleFilter(ctx context.Context, r *http.Request) (*influx
|
|||
f.UserResourceMappingFilter = *urm
|
||||
}
|
||||
|
||||
opts, err := decodeFindOptions(r)
|
||||
opts, err := influxdb.DecodeFindOptions(r)
|
||||
if err != nil {
|
||||
return f, nil, err
|
||||
}
|
||||
|
|
@ -789,7 +789,7 @@ func (s *NotificationRuleService) FindNotificationRuleByID(ctx context.Context,
|
|||
// FindNotificationRules returns a list of notification rules that match filter and the total count of matching notification rules.
|
||||
// Additional options provide pagination & sorting.
|
||||
func (s *NotificationRuleService) FindNotificationRules(ctx context.Context, filter influxdb.NotificationRuleFilter, opt ...influxdb.FindOptions) ([]influxdb.NotificationRule, int, error) {
|
||||
var params = findOptionParams(opt...)
|
||||
var params = influxdb.FindOptionParams(opt...)
|
||||
if filter.OrgID != nil {
|
||||
params = append(params, [2]string{"orgID", filter.OrgID.String()})
|
||||
}
|
||||
|
|
|
|||
|
|
@ -410,7 +410,7 @@ func (h *OrgHandler) handleGetOrgLog(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
|
||||
opts, err := decodeFindOptions(r)
|
||||
opts, err := influxdb.DecodeFindOptions(r)
|
||||
if err != nil {
|
||||
h.API.Err(w, err)
|
||||
return
|
||||
|
|
@ -567,7 +567,7 @@ func (s *OrganizationService) FindOrganizations(ctx context.Context, filter infl
|
|||
span, _ := tracing.StartSpanFromContext(ctx)
|
||||
defer span.Finish()
|
||||
|
||||
params := findOptionParams(opt...)
|
||||
params := influxdb.FindOptionParams(opt...)
|
||||
if filter.Name != nil {
|
||||
span.LogKV("org", *filter.Name)
|
||||
params = append(params, [2]string{"org", *filter.Name})
|
||||
|
|
|
|||
133
http/paging.go
133
http/paging.go
|
|
@ -1,133 +0,0 @@
|
|||
package http
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
|
||||
platform "github.com/influxdata/influxdb/v2"
|
||||
)
|
||||
|
||||
// decodeFindOptions returns a FindOptions decoded from http request.
|
||||
func decodeFindOptions(r *http.Request) (*platform.FindOptions, error) {
|
||||
opts := &platform.FindOptions{}
|
||||
qp := r.URL.Query()
|
||||
|
||||
if offset := qp.Get("offset"); offset != "" {
|
||||
o, err := strconv.Atoi(offset)
|
||||
if err != nil {
|
||||
return nil, &platform.Error{
|
||||
Code: platform.EInvalid,
|
||||
Msg: "offset is invalid",
|
||||
}
|
||||
}
|
||||
|
||||
opts.Offset = o
|
||||
}
|
||||
|
||||
if limit := qp.Get("limit"); limit != "" {
|
||||
l, err := strconv.Atoi(limit)
|
||||
if err != nil {
|
||||
return nil, &platform.Error{
|
||||
Code: platform.EInvalid,
|
||||
Msg: "limit is invalid",
|
||||
}
|
||||
}
|
||||
|
||||
if l < 1 || l > platform.MaxPageSize {
|
||||
return nil, &platform.Error{
|
||||
Code: platform.EInvalid,
|
||||
Msg: fmt.Sprintf("limit must be between 1 and %d", platform.MaxPageSize),
|
||||
}
|
||||
}
|
||||
|
||||
opts.Limit = l
|
||||
} else {
|
||||
opts.Limit = platform.DefaultPageSize
|
||||
}
|
||||
|
||||
if sortBy := qp.Get("sortBy"); sortBy != "" {
|
||||
opts.SortBy = sortBy
|
||||
}
|
||||
|
||||
if descending := qp.Get("descending"); descending != "" {
|
||||
desc, err := strconv.ParseBool(descending)
|
||||
if err != nil {
|
||||
return nil, &platform.Error{
|
||||
Code: platform.EInvalid,
|
||||
Msg: "descending is invalid",
|
||||
}
|
||||
}
|
||||
|
||||
opts.Descending = desc
|
||||
}
|
||||
|
||||
return opts, nil
|
||||
}
|
||||
|
||||
func findOptionParams(opts ...platform.FindOptions) [][2]string {
|
||||
var out [][2]string
|
||||
for _, o := range opts {
|
||||
for k, vals := range o.QueryParams() {
|
||||
for _, v := range vals {
|
||||
out = append(out, [2]string{k, v})
|
||||
}
|
||||
}
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// newPagingLinks returns a PagingLinks.
|
||||
// num is the number of returned results.
|
||||
func newPagingLinks(basePath string, opts platform.FindOptions, f platform.PagingFilter, num int) *platform.PagingLinks {
|
||||
u := url.URL{
|
||||
Path: basePath,
|
||||
}
|
||||
|
||||
values := url.Values{}
|
||||
for k, vs := range f.QueryParams() {
|
||||
for _, v := range vs {
|
||||
if v != "" {
|
||||
values.Add(k, v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var self, next, prev string
|
||||
for k, vs := range opts.QueryParams() {
|
||||
for _, v := range vs {
|
||||
if v != "" {
|
||||
values.Add(k, v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
u.RawQuery = values.Encode()
|
||||
self = u.String()
|
||||
|
||||
if num >= opts.Limit {
|
||||
nextOffset := opts.Offset + opts.Limit
|
||||
values.Set("offset", strconv.Itoa(nextOffset))
|
||||
u.RawQuery = values.Encode()
|
||||
next = u.String()
|
||||
}
|
||||
|
||||
if opts.Offset > 0 {
|
||||
prevOffset := opts.Offset - opts.Limit
|
||||
if prevOffset < 0 {
|
||||
prevOffset = 0
|
||||
}
|
||||
values.Set("offset", strconv.Itoa(prevOffset))
|
||||
u.RawQuery = values.Encode()
|
||||
prev = u.String()
|
||||
}
|
||||
|
||||
links := &platform.PagingLinks{
|
||||
Prev: prev,
|
||||
Self: self,
|
||||
Next: next,
|
||||
}
|
||||
|
||||
return links
|
||||
}
|
||||
|
|
@ -4,11 +4,12 @@ import (
|
|||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdata/influxdb/v2"
|
||||
platform "github.com/influxdata/influxdb/v2"
|
||||
"github.com/influxdata/influxdb/v2/mock"
|
||||
)
|
||||
|
||||
func TestPaging_decodeFindOptions(t *testing.T) {
|
||||
func TestPaging_DecodeFindOptions(t *testing.T) {
|
||||
type args struct {
|
||||
queryParams map[string]string
|
||||
}
|
||||
|
|
@ -67,28 +68,28 @@ func TestPaging_decodeFindOptions(t *testing.T) {
|
|||
}
|
||||
r.URL.RawQuery = qp.Encode()
|
||||
|
||||
opts, err := decodeFindOptions(r)
|
||||
opts, err := influxdb.DecodeFindOptions(r)
|
||||
if err != nil {
|
||||
t.Errorf("%q failed, err: %s", tt.name, err.Error())
|
||||
}
|
||||
|
||||
if opts.Offset != tt.wants.opts.Offset {
|
||||
t.Errorf("%q. decodeFindOptions() = %v, want %v", tt.name, opts.Offset, tt.wants.opts.Offset)
|
||||
t.Errorf("%q. influxdb.DecodeFindOptions() = %v, want %v", tt.name, opts.Offset, tt.wants.opts.Offset)
|
||||
}
|
||||
if opts.Limit != tt.wants.opts.Limit {
|
||||
t.Errorf("%q. decodeFindOptions() = %v, want %v", tt.name, opts.Limit, tt.wants.opts.Limit)
|
||||
t.Errorf("%q. influxdb.DecodeFindOptions() = %v, want %v", tt.name, opts.Limit, tt.wants.opts.Limit)
|
||||
}
|
||||
if opts.SortBy != tt.wants.opts.SortBy {
|
||||
t.Errorf("%q. decodeFindOptions() = %v, want %v", tt.name, opts.SortBy, tt.wants.opts.SortBy)
|
||||
t.Errorf("%q. influxdb.DecodeFindOptions() = %v, want %v", tt.name, opts.SortBy, tt.wants.opts.SortBy)
|
||||
}
|
||||
if opts.Descending != tt.wants.opts.Descending {
|
||||
t.Errorf("%q. decodeFindOptions() = %v, want %v", tt.name, opts.Descending, tt.wants.opts.Descending)
|
||||
t.Errorf("%q. influxdb.DecodeFindOptions() = %v, want %v", tt.name, opts.Descending, tt.wants.opts.Descending)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestPaging_newPagingLinks(t *testing.T) {
|
||||
func TestPaging_NewPagingLinks(t *testing.T) {
|
||||
type args struct {
|
||||
basePath string
|
||||
num int
|
||||
|
|
@ -177,18 +178,18 @@ func TestPaging_newPagingLinks(t *testing.T) {
|
|||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
links := newPagingLinks(tt.args.basePath, tt.args.opts, tt.args.filter, tt.args.num)
|
||||
links := influxdb.NewPagingLinks(tt.args.basePath, tt.args.opts, tt.args.filter, tt.args.num)
|
||||
|
||||
if links.Prev != tt.wants.links.Prev {
|
||||
t.Errorf("%q. newPagingLinks() = %v, want %v", tt.name, links.Prev, tt.wants.links.Prev)
|
||||
t.Errorf("%q. influxdb.NewPagingLinks() = %v, want %v", tt.name, links.Prev, tt.wants.links.Prev)
|
||||
}
|
||||
|
||||
if links.Self != tt.wants.links.Self {
|
||||
t.Errorf("%q. newPagingLinks() = %v, want %v", tt.name, links.Self, tt.wants.links.Self)
|
||||
t.Errorf("%q. influxdb.NewPagingLinks() = %v, want %v", tt.name, links.Self, tt.wants.links.Self)
|
||||
}
|
||||
|
||||
if links.Next != tt.wants.links.Next {
|
||||
t.Errorf("%q. newPagingLinks() = %v, want %v", tt.name, links.Next, tt.wants.links.Next)
|
||||
t.Errorf("%q. influxdb.NewPagingLinks() = %v, want %v", tt.name, links.Next, tt.wants.links.Next)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@ import (
|
|||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/influxdata/influxdb/v2"
|
||||
"github.com/influxdata/influxdb/v2/kit/feature"
|
||||
kithttp "github.com/influxdata/influxdb/v2/kit/transport/http"
|
||||
)
|
||||
|
||||
|
|
@ -16,13 +16,13 @@ type PlatformHandler struct {
|
|||
}
|
||||
|
||||
// NewPlatformHandler returns a platform handler that serves the API and associated assets.
|
||||
func NewPlatformHandler(b *APIBackend, us influxdb.UserService, opts ...APIHandlerOptFn) *PlatformHandler {
|
||||
func NewPlatformHandler(b *APIBackend, opts ...APIHandlerOptFn) *PlatformHandler {
|
||||
h := NewAuthenticationHandler(b.Logger, b.HTTPErrorHandler)
|
||||
h.Handler = NewAPIHandler(b, opts...)
|
||||
h.Handler = feature.NewHandler(b.Logger, b.Flagger, feature.Flags(), NewAPIHandler(b, opts...))
|
||||
h.AuthorizationService = b.AuthorizationService
|
||||
h.SessionService = b.SessionService
|
||||
h.SessionRenewDisabled = b.SessionRenewDisabled
|
||||
h.UserService = us
|
||||
h.UserService = b.UserService
|
||||
|
||||
h.RegisterNoAuthRoute("GET", "/api/v2")
|
||||
h.RegisterNoAuthRoute("POST", "/api/v2/signin")
|
||||
|
|
|
|||
|
|
@ -4936,6 +4936,27 @@ paths:
|
|||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/Error"
|
||||
/flags:
|
||||
get:
|
||||
operationId: GetFlags
|
||||
tags:
|
||||
- Users
|
||||
summary: Return the feature flags for the currently authenticated user
|
||||
parameters:
|
||||
- $ref: '#/components/parameters/TraceSpan'
|
||||
responses:
|
||||
'200':
|
||||
description: Feature flags for the currently authenticated user
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/Flags"
|
||||
default:
|
||||
description: Unexpected error
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/Error"
|
||||
/me:
|
||||
get:
|
||||
operationId: GetMe
|
||||
|
|
@ -8138,6 +8159,9 @@ components:
|
|||
type: array
|
||||
items:
|
||||
$ref: "#/components/schemas/User"
|
||||
Flags:
|
||||
type: object
|
||||
additionalProperties: true
|
||||
ResourceMember:
|
||||
allOf:
|
||||
- $ref: "#/components/schemas/User"
|
||||
|
|
@ -8223,6 +8247,9 @@ components:
|
|||
me:
|
||||
type: string
|
||||
format: uri
|
||||
flags:
|
||||
type: string
|
||||
format: uri
|
||||
orgs:
|
||||
type: string
|
||||
format: uri
|
||||
|
|
|
|||
|
|
@ -439,7 +439,7 @@ func (s *TelegrafService) FindTelegrafConfigByID(ctx context.Context, id platfor
|
|||
// FindTelegrafConfigs returns a list of telegraf configs that match filter and the total count of matching telegraf configs.
|
||||
// Additional options provide pagination & sorting.
|
||||
func (s *TelegrafService) FindTelegrafConfigs(ctx context.Context, f platform.TelegrafConfigFilter, opt ...platform.FindOptions) ([]*platform.TelegrafConfig, int, error) {
|
||||
params := findOptionParams(opt...)
|
||||
params := influxdb.FindOptionParams(opt...)
|
||||
if f.OrgID != nil {
|
||||
params = append(params, [2]string{"orgID", f.OrgID.String()})
|
||||
}
|
||||
|
|
|
|||
|
|
@ -378,7 +378,7 @@ func decodeGetUserLogRequest(ctx context.Context, r *http.Request) (*getUserLogR
|
|||
return nil, err
|
||||
}
|
||||
|
||||
opts, err := decodeFindOptions(r)
|
||||
opts, err := influxdb.DecodeFindOptions(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -610,7 +610,7 @@ func (s *UserService) FindUser(ctx context.Context, filter influxdb.UserFilter)
|
|||
// FindUsers returns a list of users that match filter and the total count of matching users.
|
||||
// Additional options provide pagination & sorting.
|
||||
func (s *UserService) FindUsers(ctx context.Context, filter influxdb.UserFilter, opt ...influxdb.FindOptions) ([]*influxdb.User, int, error) {
|
||||
params := findOptionParams(opt...)
|
||||
params := influxdb.FindOptionParams(opt...)
|
||||
if filter.ID != nil {
|
||||
params = append(params, [2]string{"id", filter.ID.String()})
|
||||
}
|
||||
|
|
|
|||
|
|
@ -98,7 +98,7 @@ func newGetVariablesResponse(ctx context.Context, variables []*influxdb.Variable
|
|||
num := len(variables)
|
||||
resp := getVariablesResponse{
|
||||
Variables: make([]variableResponse, 0, num),
|
||||
Links: newPagingLinks(prefixVariables, opts, f, num),
|
||||
Links: influxdb.NewPagingLinks(prefixVariables, opts, f, num),
|
||||
}
|
||||
|
||||
for _, variable := range variables {
|
||||
|
|
@ -115,7 +115,7 @@ type getVariablesRequest struct {
|
|||
}
|
||||
|
||||
func decodeGetVariablesRequest(ctx context.Context, r *http.Request) (*getVariablesRequest, error) {
|
||||
opts, err := decodeFindOptions(r)
|
||||
opts, err := influxdb.DecodeFindOptions(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -457,7 +457,7 @@ func (s *VariableService) FindVariableByID(ctx context.Context, id influxdb.ID)
|
|||
// FindVariables returns a list of variables that match filter.
|
||||
// Additional options provide pagination & sorting.
|
||||
func (s *VariableService) FindVariables(ctx context.Context, filter influxdb.VariableFilter, opts ...influxdb.FindOptions) ([]*influxdb.Variable, error) {
|
||||
params := findOptionParams(opts...)
|
||||
params := influxdb.FindOptionParams(opts...)
|
||||
if filter.OrganizationID != nil {
|
||||
params = append(params, [2]string{"orgID", filter.OrganizationID.String()})
|
||||
}
|
||||
|
|
|
|||
|
|
@ -150,6 +150,18 @@ func BindOptions(cmd *cobra.Command, opts []Opt) {
|
|||
}
|
||||
mustBindPFlag(o.Flag, flagset)
|
||||
*destP = viper.GetStringSlice(envVar)
|
||||
case *map[string]string:
|
||||
var d map[string]string
|
||||
if o.Default != nil {
|
||||
d = o.Default.(map[string]string)
|
||||
}
|
||||
if hasShort {
|
||||
flagset.StringToStringVarP(destP, o.Flag, string(o.Short), d, o.Desc)
|
||||
} else {
|
||||
flagset.StringToStringVar(destP, o.Flag, d, o.Desc)
|
||||
}
|
||||
mustBindPFlag(o.Flag, flagset)
|
||||
*destP = viper.GetStringMapString(envVar)
|
||||
case pflag.Value:
|
||||
if hasShort {
|
||||
flagset.VarP(destP, o.Flag, string(o.Short), o.Desc)
|
||||
|
|
|
|||
|
|
@ -0,0 +1,271 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"flag"
|
||||
"fmt"
|
||||
"go/format"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strings"
|
||||
"text/template"
|
||||
|
||||
"github.com/Masterminds/sprig"
|
||||
"github.com/influxdata/influxdb/v2/kit/feature"
|
||||
yaml "gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
const tmpl = `// Code generated by the feature package; DO NOT EDIT.
|
||||
|
||||
package feature
|
||||
|
||||
{{ .Qualify | import }}
|
||||
|
||||
{{ range $_, $flag := .Flags }}
|
||||
var {{ $flag.Key }} = {{ $.Qualify | package }}{{ $flag.Default | maker }}(
|
||||
{{ $flag.Name | quote }},
|
||||
{{ $flag.Key | quote }},
|
||||
{{ $flag.Contact | quote }},
|
||||
{{ $flag.Default | conditionalQuote }},
|
||||
{{ $.Qualify | package }}{{ $flag.Lifetime | lifetime }},
|
||||
{{ $flag.Expose }},
|
||||
)
|
||||
|
||||
// {{ $flag.Name | replace " " "_" | camelcase }} - {{ $flag.Description }}
|
||||
func {{ $flag.Name | replace " " "_" | camelcase }}() {{ $.Qualify | package }}{{ $flag.Default | flagType }} {
|
||||
return {{ $flag.Key }}
|
||||
}
|
||||
{{ end }}
|
||||
|
||||
var all = []{{ .Qualify | package }}Flag{
|
||||
{{ range $_, $flag := .Flags }} {{ $flag.Key }},
|
||||
{{ end }}}
|
||||
|
||||
var byKey = map[string]{{ $.Qualify | package }}Flag{
|
||||
{{ range $_, $flag := .Flags }} {{ $flag.Key | quote }}: {{ $flag.Key }},
|
||||
{{ end }}}
|
||||
`
|
||||
|
||||
type flagConfig struct {
|
||||
Name string
|
||||
Description string
|
||||
Key string
|
||||
Default interface{}
|
||||
Contact string
|
||||
Lifetime feature.Lifetime
|
||||
Expose bool
|
||||
}
|
||||
|
||||
func (f flagConfig) Valid() error {
|
||||
var problems []string
|
||||
if f.Key == "" {
|
||||
problems = append(problems, "missing key")
|
||||
}
|
||||
if f.Contact == "" {
|
||||
problems = append(problems, "missing contact")
|
||||
}
|
||||
if f.Default == nil {
|
||||
problems = append(problems, "missing default")
|
||||
}
|
||||
if f.Description == "" {
|
||||
problems = append(problems, "missing description")
|
||||
}
|
||||
|
||||
if len(problems) > 0 {
|
||||
name := f.Name
|
||||
if name == "" {
|
||||
if f.Key != "" {
|
||||
name = f.Key
|
||||
} else {
|
||||
name = "anonymous"
|
||||
}
|
||||
}
|
||||
// e.g. "my flag: missing key; missing default"
|
||||
return fmt.Errorf("%s: %s\n", name, strings.Join(problems, "; "))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type flagValidationError struct {
|
||||
errs []error
|
||||
}
|
||||
|
||||
func newFlagValidationError(errs []error) *flagValidationError {
|
||||
if len(errs) == 0 {
|
||||
return nil
|
||||
}
|
||||
return &flagValidationError{errs}
|
||||
}
|
||||
|
||||
func (e *flagValidationError) Error() string {
|
||||
var s strings.Builder
|
||||
s.WriteString("flag validation error: \n")
|
||||
for _, err := range e.errs {
|
||||
s.WriteString(err.Error())
|
||||
}
|
||||
return s.String()
|
||||
}
|
||||
|
||||
func validate(flags []flagConfig) error {
|
||||
var (
|
||||
errs []error
|
||||
seen = make(map[string]bool, len(flags))
|
||||
)
|
||||
for _, flag := range flags {
|
||||
if err := flag.Valid(); err != nil {
|
||||
errs = append(errs, err)
|
||||
} else if _, repeated := seen[flag.Key]; repeated {
|
||||
errs = append(errs, fmt.Errorf("duplicate flag key '%s'\n", flag.Key))
|
||||
}
|
||||
seen[flag.Key] = true
|
||||
}
|
||||
if len(errs) != 0 {
|
||||
return newFlagValidationError(errs)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
var argv = struct {
|
||||
in, out *string
|
||||
qualify *bool
|
||||
}{
|
||||
in: flag.String("in", "", "flag configuration path"),
|
||||
out: flag.String("out", "", "flag generation destination path"),
|
||||
qualify: flag.Bool("qualify", false, "qualify types with imported package name"),
|
||||
}
|
||||
|
||||
func main() {
|
||||
if err := run(); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
func run() error {
|
||||
flag.Parse()
|
||||
|
||||
in, err := os.Open(*argv.in)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer in.Close()
|
||||
|
||||
configuration, err := ioutil.ReadAll(in)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var flags []flagConfig
|
||||
err = yaml.Unmarshal(configuration, &flags)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = validate(flags)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
t, err := template.New("flags").Funcs(templateFunctions()).Parse(tmpl)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
out, err := os.Create(*argv.out)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer out.Close()
|
||||
|
||||
var (
|
||||
buf = new(bytes.Buffer)
|
||||
vars = struct {
|
||||
Qualify bool
|
||||
Flags []flagConfig
|
||||
}{
|
||||
Qualify: *argv.qualify,
|
||||
Flags: flags,
|
||||
}
|
||||
)
|
||||
if err := t.Execute(buf, vars); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
raw, err := ioutil.ReadAll(buf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
formatted, err := format.Source(raw)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = out.Write(formatted)
|
||||
return err
|
||||
}
|
||||
|
||||
func templateFunctions() template.FuncMap {
|
||||
functions := sprig.TxtFuncMap()
|
||||
|
||||
functions["lifetime"] = func(t interface{}) string {
|
||||
switch t {
|
||||
case feature.Permanent:
|
||||
return "Permanent"
|
||||
default:
|
||||
return "Temporary"
|
||||
}
|
||||
}
|
||||
|
||||
functions["conditionalQuote"] = func(t interface{}) string {
|
||||
switch t.(type) {
|
||||
case string:
|
||||
return fmt.Sprintf("%q", t)
|
||||
default:
|
||||
return fmt.Sprintf("%v", t)
|
||||
}
|
||||
}
|
||||
|
||||
functions["flagType"] = func(t interface{}) string {
|
||||
switch t.(type) {
|
||||
case bool:
|
||||
return "BoolFlag"
|
||||
case float64:
|
||||
return "FloatFlag"
|
||||
case int:
|
||||
return "IntFlag"
|
||||
default:
|
||||
return "StringFlag"
|
||||
}
|
||||
}
|
||||
|
||||
functions["maker"] = func(t interface{}) string {
|
||||
switch t.(type) {
|
||||
case bool:
|
||||
return "MakeBoolFlag"
|
||||
case float64:
|
||||
return "MakeFloatFlag"
|
||||
case int:
|
||||
return "MakeIntFlag"
|
||||
default:
|
||||
return "MakeStringFlag"
|
||||
}
|
||||
}
|
||||
|
||||
functions["package"] = func(t interface{}) string {
|
||||
if t.(bool) {
|
||||
return "feature."
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
functions["import"] = func(t interface{}) string {
|
||||
if t.(bool) {
|
||||
return "import \"github.com/influxdata/influxdb/v2/kit/feature\""
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
return functions
|
||||
}
|
||||
|
|
@ -0,0 +1,75 @@
|
|||
// Package feature provides feature flagging capabilities for InfluxDB servers.
|
||||
// This document describes this package and how it is used to control
|
||||
// experimental features in `influxd`.
|
||||
//
|
||||
// Flags are configured in `flags.yml` at the top of this repository.
|
||||
// Running `make flags` generates Go code based on this configuration
|
||||
// to programmatically test flag values in a given request context.
|
||||
// Boolean flags are the most common case, but integers, floats and
|
||||
// strings are supported for more complicated experiments.
|
||||
//
|
||||
// The `Flagger` interface is the crux of this package.
|
||||
// It computes a map of feature flag values for a given request context.
|
||||
// The default implementation always returns the flag default configured
|
||||
// in `flags.yml`. The override implementation allows an operator to
|
||||
// override feature flag defaults at startup. Changing these overrides
|
||||
// requires a restart.
|
||||
//
|
||||
// In `influxd`, a `Flagger` instance is provided to a `Handler` middleware
|
||||
// configured to intercept all API requests and annotate their request context
|
||||
// with a map of feature flags.
|
||||
//
|
||||
// A flag can opt in to be exposed externally in `flags.yml`. If exposed,
|
||||
// this flag will be included in the response from the `/api/v2/flags`
|
||||
// endpoint. This allows the UI and other API clients to control their
|
||||
// behavior according to the flag in addition to the server itself.
|
||||
//
|
||||
// A concrete example to illustrate the above:
|
||||
//
|
||||
// I have a feature called "My Feature" that will involve turning on new code
|
||||
// in both the UI and the server.
|
||||
//
|
||||
// First, I add an entry to `flags.yml`.
|
||||
//
|
||||
// ```yaml
|
||||
// - name: My Feature
|
||||
// description: My feature is awesome
|
||||
// key: myFeature
|
||||
// default: false
|
||||
// expose: true
|
||||
// contact: My Name
|
||||
// ```
|
||||
//
|
||||
// My flag type is inferred to be boolean by my defaulf of `false` when I run
|
||||
// `make flags` and the `feature` package now includes `func MyFeature() BoolFlag`.
|
||||
//
|
||||
// I use this to control my backend code with
|
||||
//
|
||||
// ```go
|
||||
// if feature.MyFeature.Enabled(ctx) {
|
||||
// // new code...
|
||||
// } else {
|
||||
// // new code...
|
||||
// }
|
||||
// ```
|
||||
//
|
||||
// and the `/api/v2/flags` response provides the same information to the frontend.
|
||||
//
|
||||
// ```json
|
||||
// {
|
||||
// "myFeature": false
|
||||
// }
|
||||
// ```
|
||||
//
|
||||
// While `false` by default, I can turn on my experimental feature by starting
|
||||
// my server with a flag override.
|
||||
//
|
||||
// ```
|
||||
// env INFLUXD_FEATURE_FLAGS="{\"flag1\":\value1\",\"key2\":\"value2\"}" influxd
|
||||
// ```
|
||||
//
|
||||
// ```
|
||||
// influxd --feature-flags flag1:value1,flag2:value2
|
||||
// ```
|
||||
//
|
||||
package feature
|
||||
|
|
@ -0,0 +1,133 @@
|
|||
package feature
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
|
||||
"github.com/opentracing/opentracing-go"
|
||||
)
|
||||
|
||||
type contextKey string
|
||||
|
||||
const featureContextKey contextKey = "influx/feature/v1"
|
||||
|
||||
// Flagger returns flag values.
|
||||
type Flagger interface {
|
||||
// Flags returns a map of flag keys to flag values.
|
||||
//
|
||||
// If an authorization is present on the context, it may be used to compute flag
|
||||
// values according to the affiliated user ID and its organization and other mappings.
|
||||
// Otherwise, they should be computed generally or return a default.
|
||||
//
|
||||
// One or more flags may be provided to restrict the results.
|
||||
// Otherwise, all flags should be computed.
|
||||
Flags(context.Context, ...Flag) (map[string]interface{}, error)
|
||||
}
|
||||
|
||||
// Annotate the context with a map computed of computed flags.
|
||||
func Annotate(ctx context.Context, f Flagger, flags ...Flag) (context.Context, error) {
|
||||
computed, err := f.Flags(ctx, flags...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
span := opentracing.SpanFromContext(ctx)
|
||||
if span != nil {
|
||||
for k, v := range computed {
|
||||
span.LogKV(k, v)
|
||||
}
|
||||
}
|
||||
|
||||
return context.WithValue(ctx, featureContextKey, computed), nil
|
||||
}
|
||||
|
||||
// FlagsFromContext returns the map of flags attached to the context
|
||||
// by Annotate, or nil if none is found.
|
||||
func FlagsFromContext(ctx context.Context) map[string]interface{} {
|
||||
v, ok := ctx.Value(featureContextKey).(map[string]interface{})
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// ExposedFlagsFromContext returns the filtered map of exposed flags attached
|
||||
// to the context by Annotate, or nil if none is found.
|
||||
func ExposedFlagsFromContext(ctx context.Context) map[string]interface{} {
|
||||
m := FlagsFromContext(ctx)
|
||||
|
||||
if m == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
filtered := make(map[string]interface{})
|
||||
for k, v := range m {
|
||||
if flag := byKey[k]; flag != nil && flag.Expose() {
|
||||
filtered[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
return filtered
|
||||
}
|
||||
|
||||
// Lifetime represents the intended lifetime of the feature flag.
|
||||
//
|
||||
// The zero value is Temporary, the most common case, but Permanent
|
||||
// is included to mark special cases where a flag is not intended
|
||||
// to be removed, e.g. enabling debug tracing for an organization.
|
||||
//
|
||||
// TODO(gavincabbage): This may become a stale date, which can then
|
||||
// be used to trigger a notification to the contact when the flag
|
||||
// has become stale, to encourage flag cleanup.
|
||||
type Lifetime int
|
||||
|
||||
const (
|
||||
// Temporary indicates a flag is intended to be removed after a feature is no longer in development.
|
||||
Temporary Lifetime = iota
|
||||
// Permanent indicates a flag is not intended to be removed.
|
||||
Permanent
|
||||
)
|
||||
|
||||
// UnmarshalYAML implements yaml.Unmarshaler and interprets a case-insensitive text
|
||||
// representation as a lifetime constant.
|
||||
func (l *Lifetime) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||
var s string
|
||||
if err := unmarshal(&s); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
switch strings.ToLower(s) {
|
||||
case "permanent":
|
||||
*l = Permanent
|
||||
default:
|
||||
*l = Temporary
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type defaultFlagger struct{}
|
||||
|
||||
// DefaultFlagger returns a flagger that always returns default values.
|
||||
func DefaultFlagger() Flagger {
|
||||
return &defaultFlagger{}
|
||||
}
|
||||
|
||||
// Flags returns a map of default values. It never returns an error.
|
||||
func (*defaultFlagger) Flags(_ context.Context, flags ...Flag) (map[string]interface{}, error) {
|
||||
if len(flags) == 0 {
|
||||
flags = Flags()
|
||||
}
|
||||
|
||||
m := make(map[string]interface{}, len(flags))
|
||||
for _, flag := range flags {
|
||||
m[flag.Key()] = flag.Default()
|
||||
}
|
||||
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// Flags returns all feature flags.
|
||||
func Flags() []Flag {
|
||||
return all
|
||||
}
|
||||
|
|
@ -0,0 +1,185 @@
|
|||
package feature_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdata/influxdb/v2/kit/feature"
|
||||
)
|
||||
|
||||
func Test_feature(t *testing.T) {
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
flag feature.Flag
|
||||
err error
|
||||
values map[string]interface{}
|
||||
ctx context.Context
|
||||
expected interface{}
|
||||
}{
|
||||
{
|
||||
name: "bool happy path",
|
||||
flag: newFlag("test", false),
|
||||
values: map[string]interface{}{
|
||||
"test": true,
|
||||
},
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "int happy path",
|
||||
flag: newFlag("test", 0),
|
||||
values: map[string]interface{}{
|
||||
"test": int32(42),
|
||||
},
|
||||
expected: int32(42),
|
||||
},
|
||||
{
|
||||
name: "float happy path",
|
||||
flag: newFlag("test", 0.0),
|
||||
values: map[string]interface{}{
|
||||
"test": 42.42,
|
||||
},
|
||||
expected: 42.42,
|
||||
},
|
||||
{
|
||||
name: "string happy path",
|
||||
flag: newFlag("test", ""),
|
||||
values: map[string]interface{}{
|
||||
"test": "restaurantattheendoftheuniverse",
|
||||
},
|
||||
expected: "restaurantattheendoftheuniverse",
|
||||
},
|
||||
{
|
||||
name: "bool missing use default",
|
||||
flag: newFlag("test", false),
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "bool missing use default true",
|
||||
flag: newFlag("test", true),
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "int missing use default",
|
||||
flag: newFlag("test", 65),
|
||||
expected: int32(65),
|
||||
},
|
||||
{
|
||||
name: "float missing use default",
|
||||
flag: newFlag("test", 65.65),
|
||||
expected: 65.65,
|
||||
},
|
||||
{
|
||||
name: "string missing use default",
|
||||
flag: newFlag("test", "mydefault"),
|
||||
expected: "mydefault",
|
||||
},
|
||||
|
||||
{
|
||||
name: "bool invalid use default",
|
||||
flag: newFlag("test", true),
|
||||
values: map[string]interface{}{
|
||||
"test": "notabool",
|
||||
},
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "int invalid use default",
|
||||
flag: newFlag("test", 42),
|
||||
values: map[string]interface{}{
|
||||
"test": 99.99,
|
||||
},
|
||||
expected: int32(42),
|
||||
},
|
||||
{
|
||||
name: "float invalid use default",
|
||||
flag: newFlag("test", 42.42),
|
||||
values: map[string]interface{}{
|
||||
"test": 99,
|
||||
},
|
||||
expected: 42.42,
|
||||
},
|
||||
{
|
||||
name: "string invalid use default",
|
||||
flag: newFlag("test", "restaurantattheendoftheuniverse"),
|
||||
values: map[string]interface{}{
|
||||
"test": true,
|
||||
},
|
||||
expected: "restaurantattheendoftheuniverse",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range cases {
|
||||
t.Run("flagger "+test.name, func(t *testing.T) {
|
||||
flagger := testFlagsFlagger{
|
||||
m: test.values,
|
||||
err: test.err,
|
||||
}
|
||||
|
||||
var actual interface{}
|
||||
switch flag := test.flag.(type) {
|
||||
case feature.BoolFlag:
|
||||
actual = flag.Enabled(test.ctx, flagger)
|
||||
case feature.FloatFlag:
|
||||
actual = flag.Float(test.ctx, flagger)
|
||||
case feature.IntFlag:
|
||||
actual = flag.Int(test.ctx, flagger)
|
||||
case feature.StringFlag:
|
||||
actual = flag.String(test.ctx, flagger)
|
||||
default:
|
||||
t.Errorf("unknown flag type %T (%#v)", flag, flag)
|
||||
}
|
||||
|
||||
if actual != test.expected {
|
||||
t.Errorf("unexpected flag value: got %v, want %v", actual, test.expected)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("annotate "+test.name, func(t *testing.T) {
|
||||
flagger := testFlagsFlagger{
|
||||
m: test.values,
|
||||
err: test.err,
|
||||
}
|
||||
|
||||
ctx, err := feature.Annotate(context.Background(), flagger)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
var actual interface{}
|
||||
switch flag := test.flag.(type) {
|
||||
case feature.BoolFlag:
|
||||
actual = flag.Enabled(ctx)
|
||||
case feature.FloatFlag:
|
||||
actual = flag.Float(ctx)
|
||||
case feature.IntFlag:
|
||||
actual = flag.Int(ctx)
|
||||
case feature.StringFlag:
|
||||
actual = flag.String(ctx)
|
||||
default:
|
||||
t.Errorf("unknown flag type %T (%#v)", flag, flag)
|
||||
}
|
||||
|
||||
if actual != test.expected {
|
||||
t.Errorf("unexpected flag value: got %v, want %v", actual, test.expected)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type testFlagsFlagger struct {
|
||||
m map[string]interface{}
|
||||
err error
|
||||
}
|
||||
|
||||
func (f testFlagsFlagger) Flags(ctx context.Context, flags ...feature.Flag) (map[string]interface{}, error) {
|
||||
if f.err != nil {
|
||||
return nil, f.err
|
||||
}
|
||||
|
||||
return f.m, nil
|
||||
}
|
||||
|
||||
func newFlag(key string, defaultValue interface{}) feature.Flag {
|
||||
return feature.MakeFlag(key, key, "", defaultValue, feature.Temporary, false)
|
||||
}
|
||||
|
|
@ -0,0 +1,216 @@
|
|||
//go:generate go run ./_codegen/main.go --in ../../flags.yml --out ./list.go
|
||||
|
||||
package feature
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// Flag represents a generic feature flag with a key and a default.
|
||||
type Flag interface {
|
||||
// Key returns the programmatic backend identifier for the flag.
|
||||
Key() string
|
||||
// Default returns the type-agnostic zero value for the flag.
|
||||
// Type-specific flag implementations may expose a typed default
|
||||
// (e.g. BoolFlag includes a boolean Default field).
|
||||
Default() interface{}
|
||||
// Expose the flag.
|
||||
Expose() bool
|
||||
}
|
||||
|
||||
// MakeFlag constructs a Flag. The concrete implementation is inferred from the provided default.
|
||||
func MakeFlag(name, key, owner string, defaultValue interface{}, lifetime Lifetime, expose bool) Flag {
|
||||
b := MakeBase(name, key, owner, defaultValue, lifetime, expose)
|
||||
switch v := defaultValue.(type) {
|
||||
case bool:
|
||||
return BoolFlag{b, v}
|
||||
case float64:
|
||||
return FloatFlag{b, v}
|
||||
case int32:
|
||||
return IntFlag{b, v}
|
||||
case int:
|
||||
return IntFlag{b, int32(v)}
|
||||
case string:
|
||||
return StringFlag{b, v}
|
||||
default:
|
||||
return StringFlag{b, fmt.Sprintf("%v", v)}
|
||||
}
|
||||
}
|
||||
|
||||
// flag base type.
|
||||
type Base struct {
|
||||
// name of the flag.
|
||||
name string
|
||||
// key is the programmatic backend identifier for the flag.
|
||||
key string
|
||||
// defaultValue for the flag.
|
||||
defaultValue interface{}
|
||||
// owner is an individual or team responsible for the flag.
|
||||
owner string
|
||||
// lifetime of the feature flag.
|
||||
lifetime Lifetime
|
||||
// expose the flag.
|
||||
expose bool
|
||||
}
|
||||
|
||||
var _ Flag = Base{}
|
||||
|
||||
// MakeBase constructs a flag flag.
|
||||
func MakeBase(name, key, owner string, defaultValue interface{}, lifetime Lifetime, expose bool) Base {
|
||||
return Base{
|
||||
name: name,
|
||||
key: key,
|
||||
owner: owner,
|
||||
defaultValue: defaultValue,
|
||||
lifetime: lifetime,
|
||||
expose: expose,
|
||||
}
|
||||
}
|
||||
|
||||
// Key returns the programmatic backend identifier for the flag.
|
||||
func (f Base) Key() string {
|
||||
return f.key
|
||||
}
|
||||
|
||||
// Default returns the type-agnostic zero value for the flag.
|
||||
func (f Base) Default() interface{} {
|
||||
return f.defaultValue
|
||||
}
|
||||
|
||||
// Expose the flag.
|
||||
func (f Base) Expose() bool {
|
||||
return f.expose
|
||||
}
|
||||
|
||||
func (f Base) value(ctx context.Context, flagger ...Flagger) (interface{}, bool) {
|
||||
var (
|
||||
m map[string]interface{}
|
||||
ok bool
|
||||
)
|
||||
if len(flagger) < 1 {
|
||||
m, ok = ctx.Value(featureContextKey).(map[string]interface{})
|
||||
} else {
|
||||
var err error
|
||||
m, err = flagger[0].Flags(ctx, f)
|
||||
ok = err == nil
|
||||
}
|
||||
if !ok {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
v, ok := m[f.Key()]
|
||||
if !ok {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
return v, true
|
||||
}
|
||||
|
||||
// StringFlag implements Flag for string values.
|
||||
type StringFlag struct {
|
||||
Base
|
||||
defaultString string
|
||||
}
|
||||
|
||||
var _ Flag = StringFlag{}
|
||||
|
||||
// MakeStringFlag returns a string flag with the given Base and default.
|
||||
func MakeStringFlag(name, key, owner string, defaultValue string, lifetime Lifetime, expose bool) StringFlag {
|
||||
b := MakeBase(name, key, owner, defaultValue, lifetime, expose)
|
||||
return StringFlag{b, defaultValue}
|
||||
}
|
||||
|
||||
// String value of the flag on the request context.
|
||||
func (f StringFlag) String(ctx context.Context, flagger ...Flagger) string {
|
||||
i, ok := f.value(ctx, flagger...)
|
||||
if !ok {
|
||||
return f.defaultString
|
||||
}
|
||||
s, ok := i.(string)
|
||||
if !ok {
|
||||
return f.defaultString
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// FloatFlag implements Flag for float values.
|
||||
type FloatFlag struct {
|
||||
Base
|
||||
defaultFloat float64
|
||||
}
|
||||
|
||||
var _ Flag = FloatFlag{}
|
||||
|
||||
// MakeFloatFlag returns a string flag with the given Base and default.
|
||||
func MakeFloatFlag(name, key, owner string, defaultValue float64, lifetime Lifetime, expose bool) FloatFlag {
|
||||
b := MakeBase(name, key, owner, defaultValue, lifetime, expose)
|
||||
return FloatFlag{b, defaultValue}
|
||||
}
|
||||
|
||||
// Float value of the flag on the request context.
|
||||
func (f FloatFlag) Float(ctx context.Context, flagger ...Flagger) float64 {
|
||||
i, ok := f.value(ctx, flagger...)
|
||||
if !ok {
|
||||
return f.defaultFloat
|
||||
}
|
||||
v, ok := i.(float64)
|
||||
if !ok {
|
||||
return f.defaultFloat
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// IntFlag implements Flag for integer values.
|
||||
type IntFlag struct {
|
||||
Base
|
||||
defaultInt int32
|
||||
}
|
||||
|
||||
var _ Flag = IntFlag{}
|
||||
|
||||
// MakeIntFlag returns a string flag with the given Base and default.
|
||||
func MakeIntFlag(name, key, owner string, defaultValue int32, lifetime Lifetime, expose bool) IntFlag {
|
||||
b := MakeBase(name, key, owner, defaultValue, lifetime, expose)
|
||||
return IntFlag{b, defaultValue}
|
||||
}
|
||||
|
||||
// Int value of the flag on the request context.
|
||||
func (f IntFlag) Int(ctx context.Context, flagger ...Flagger) int32 {
|
||||
i, ok := f.value(ctx, flagger...)
|
||||
if !ok {
|
||||
return f.defaultInt
|
||||
}
|
||||
v, ok := i.(int32)
|
||||
if !ok {
|
||||
return f.defaultInt
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// BoolFlag implements Flag for boolean values.
|
||||
type BoolFlag struct {
|
||||
Base
|
||||
defaultBool bool
|
||||
}
|
||||
|
||||
var _ Flag = BoolFlag{}
|
||||
|
||||
// MakeBoolFlag returns a string flag with the given Base and default.
|
||||
func MakeBoolFlag(name, key, owner string, defaultValue bool, lifetime Lifetime, expose bool) BoolFlag {
|
||||
b := MakeBase(name, key, owner, defaultValue, lifetime, expose)
|
||||
return BoolFlag{b, defaultValue}
|
||||
}
|
||||
|
||||
// Enabled indicates whether flag is true or false on the request context.
|
||||
func (f BoolFlag) Enabled(ctx context.Context, flagger ...Flagger) bool {
|
||||
i, ok := f.value(ctx, flagger...)
|
||||
if !ok {
|
||||
return f.defaultBool
|
||||
}
|
||||
v, ok := i.(bool)
|
||||
if !ok {
|
||||
return f.defaultBool
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
|
@ -0,0 +1,41 @@
|
|||
// Code generated by the feature package; DO NOT EDIT.
|
||||
|
||||
package feature
|
||||
|
||||
var backendExample = MakeBoolFlag(
|
||||
"Backend Example",
|
||||
"backendExample",
|
||||
"Gavin Cabbage",
|
||||
false,
|
||||
Permanent,
|
||||
false,
|
||||
)
|
||||
|
||||
// BackendExample - A permanent backend example boolean flag
|
||||
func BackendExample() BoolFlag {
|
||||
return backendExample
|
||||
}
|
||||
|
||||
var frontendExample = MakeIntFlag(
|
||||
"Frontend Example",
|
||||
"frontendExample",
|
||||
"Gavin Cabbage",
|
||||
42,
|
||||
Temporary,
|
||||
true,
|
||||
)
|
||||
|
||||
// FrontendExample - A temporary frontend example integer flag
|
||||
func FrontendExample() IntFlag {
|
||||
return frontendExample
|
||||
}
|
||||
|
||||
var all = []Flag{
|
||||
backendExample,
|
||||
frontendExample,
|
||||
}
|
||||
|
||||
var byKey = map[string]Flag{
|
||||
"backendExample": backendExample,
|
||||
"frontendExample": frontendExample,
|
||||
}
|
||||
|
|
@ -0,0 +1,43 @@
|
|||
package feature
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// Handler is a middleware that annotates the context with a map of computed feature flags.
|
||||
// To accurately compute identity-scoped flags, this middleware should be executed after any
|
||||
// authorization middleware has annotated the request context with an authorizer.
|
||||
type Handler struct {
|
||||
log *zap.Logger
|
||||
next http.Handler
|
||||
flagger Flagger
|
||||
flags []Flag
|
||||
}
|
||||
|
||||
// NewHandler returns a configured feature flag middleware that will annotate request context
|
||||
// with a computed map of the given flags using the provided Flagger.
|
||||
func NewHandler(log *zap.Logger, flagger Flagger, flags []Flag, next http.Handler) http.Handler {
|
||||
return &Handler{
|
||||
log: log,
|
||||
next: next,
|
||||
flagger: flagger,
|
||||
flags: flags,
|
||||
}
|
||||
}
|
||||
|
||||
// ServeHTTP annotates the request context with a map of computed feature flags before
|
||||
// continuing to serve the request.
|
||||
func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
ctx, err := Annotate(r.Context(), h.flagger, h.flags...)
|
||||
if err != nil {
|
||||
h.log.Warn("Unable to annotate context with feature flags", zap.Error(err))
|
||||
} else {
|
||||
r = r.WithContext(ctx)
|
||||
}
|
||||
|
||||
if h.next != nil {
|
||||
h.next.ServeHTTP(w, r)
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,47 @@
|
|||
package feature_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdata/influxdb/v2/kit/feature"
|
||||
"go.uber.org/zap/zaptest"
|
||||
)
|
||||
|
||||
func Test_Handler(t *testing.T) {
|
||||
var (
|
||||
w = &httptest.ResponseRecorder{}
|
||||
r = httptest.NewRequest(http.MethodGet, "http://nowhere.test", new(bytes.Buffer)).
|
||||
WithContext(context.Background())
|
||||
|
||||
original = r.Context()
|
||||
)
|
||||
|
||||
handler := &checkHandler{t: t, f: func(t *testing.T, r *http.Request) {
|
||||
if r.Context() == original {
|
||||
t.Error("expected annotated context")
|
||||
}
|
||||
}}
|
||||
|
||||
subject := feature.NewHandler(zaptest.NewLogger(t), feature.DefaultFlagger(), feature.Flags(), handler)
|
||||
|
||||
subject.ServeHTTP(w, r)
|
||||
|
||||
if !handler.called {
|
||||
t.Error("expected handler to be called")
|
||||
}
|
||||
}
|
||||
|
||||
type checkHandler struct {
|
||||
t *testing.T
|
||||
f func(t *testing.T, r *http.Request)
|
||||
called bool
|
||||
}
|
||||
|
||||
func (h *checkHandler) ServeHTTP(_ http.ResponseWriter, r *http.Request) {
|
||||
h.called = true
|
||||
h.f(h.t, r)
|
||||
}
|
||||
|
|
@ -0,0 +1,61 @@
|
|||
package override
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"github.com/influxdata/influxdb/v2/kit/feature"
|
||||
)
|
||||
|
||||
// Flagger can override default flag values.
|
||||
type Flagger struct {
|
||||
overrides map[string]string
|
||||
}
|
||||
|
||||
// Make a Flagger that returns defaults with any overrides parsed from the string.
|
||||
func Make(m map[string]string) (Flagger, error) {
|
||||
return Flagger{
|
||||
overrides: m,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Flags returns a map of default values. It never returns an error.
|
||||
func (f Flagger) Flags(_ context.Context, flags ...feature.Flag) (map[string]interface{}, error) {
|
||||
if len(flags) == 0 {
|
||||
flags = feature.Flags()
|
||||
}
|
||||
|
||||
m := make(map[string]interface{}, len(flags))
|
||||
for _, flag := range flags {
|
||||
if s, overridden := f.overrides[flag.Key()]; overridden {
|
||||
iface, err := f.coerce(s, flag)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
m[flag.Key()] = iface
|
||||
} else {
|
||||
m[flag.Key()] = flag.Default()
|
||||
}
|
||||
}
|
||||
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func (Flagger) coerce(s string, flag feature.Flag) (iface interface{}, err error) {
|
||||
switch flag.(type) {
|
||||
case feature.BoolFlag:
|
||||
iface, err = strconv.ParseBool(s)
|
||||
case feature.IntFlag:
|
||||
iface, err = strconv.Atoi(s)
|
||||
case feature.FloatFlag:
|
||||
iface, err = strconv.ParseFloat(s, 64)
|
||||
default:
|
||||
iface = s
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("coercing string %q based on flag type %T: %v", s, flag, err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
|
@ -0,0 +1,114 @@
|
|||
package override
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdata/influxdb/v2/kit/feature"
|
||||
)
|
||||
|
||||
func TestFlagger(t *testing.T) {
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
env map[string]string
|
||||
defaults []feature.Flag
|
||||
expected map[string]interface{}
|
||||
expectMakeErr bool
|
||||
expectFlagsErr bool
|
||||
}{
|
||||
{
|
||||
name: "enabled happy path filtering",
|
||||
env: map[string]string{
|
||||
"flag1": "new1",
|
||||
"flag3": "new3",
|
||||
},
|
||||
defaults: []feature.Flag{
|
||||
newFlag("flag0", "original0"),
|
||||
newFlag("flag1", "original1"),
|
||||
newFlag("flag2", "original2"),
|
||||
newFlag("flag3", "original3"),
|
||||
newFlag("flag4", "original4"),
|
||||
},
|
||||
expected: map[string]interface{}{
|
||||
"flag0": "original0",
|
||||
"flag1": "new1",
|
||||
"flag2": "original2",
|
||||
"flag3": "new3",
|
||||
"flag4": "original4",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "enabled happy path types",
|
||||
env: map[string]string{
|
||||
"intflag": "43",
|
||||
"floatflag": "43.43",
|
||||
"boolflag": "true",
|
||||
},
|
||||
defaults: []feature.Flag{
|
||||
newFlag("intflag", 42),
|
||||
newFlag("floatflag", 42.42),
|
||||
newFlag("boolflag", false),
|
||||
},
|
||||
expected: map[string]interface{}{
|
||||
"intflag": 43,
|
||||
"floatflag": 43.43,
|
||||
"boolflag": true,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "type coerce error",
|
||||
env: map[string]string{
|
||||
"key": "not_an_int",
|
||||
},
|
||||
defaults: []feature.Flag{
|
||||
newFlag("key", 42),
|
||||
},
|
||||
expectFlagsErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range cases {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
subject, err := Make(test.env)
|
||||
if err != nil {
|
||||
if test.expectMakeErr {
|
||||
return
|
||||
}
|
||||
t.Fatalf("unexpected error making Flagger: %v", err)
|
||||
}
|
||||
|
||||
computed, err := subject.Flags(context.Background(), test.defaults...)
|
||||
if err != nil {
|
||||
if test.expectFlagsErr {
|
||||
return
|
||||
}
|
||||
t.Fatalf("unexpected error calling Flags: %v", err)
|
||||
}
|
||||
|
||||
if len(computed) != len(test.expected) {
|
||||
t.Fatalf("incorrect number of flags computed: expected %d, got %d", len(test.expected), len(computed))
|
||||
}
|
||||
|
||||
// check for extra or incorrect keys
|
||||
for k, v := range computed {
|
||||
if xv, found := test.expected[k]; !found {
|
||||
t.Errorf("unexpected key %s", k)
|
||||
} else if v != xv {
|
||||
t.Errorf("incorrect value for key %s: expected %v, got %v", k, xv, v)
|
||||
}
|
||||
}
|
||||
|
||||
// check for missing keys
|
||||
for k := range test.expected {
|
||||
if _, found := computed[k]; !found {
|
||||
t.Errorf("missing expected key %s", k)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func newFlag(key string, defaultValue interface{}) feature.Flag {
|
||||
return feature.MakeFlag(key, key, "", defaultValue, feature.Temporary, false)
|
||||
}
|
||||
|
|
@ -0,0 +1,68 @@
|
|||
package feature
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/influxdata/influxdb/v2"
|
||||
icontext "github.com/influxdata/influxdb/v2/context"
|
||||
)
|
||||
|
||||
var ErrMissingTargetInfo = errors.New("unable to determine any user or org IDs from authorizer on context")
|
||||
|
||||
// Target against which to match a feature flag rule.
|
||||
type Target struct {
|
||||
// UserID to Target.
|
||||
UserID influxdb.ID
|
||||
// OrgIDs to Target.
|
||||
OrgIDs []influxdb.ID
|
||||
}
|
||||
|
||||
// MakeTarget returns a populated feature flag Target for the given environment,
|
||||
// including user and org information from the provided context, if available.
|
||||
//
|
||||
// If the authorizer on the context provides a user ID, it is used to fetch associated org IDs.
|
||||
// If a user ID is not provided, an org ID is taken directly off the authorizer if possible.
|
||||
// If no user or org information can be determined, a sentinel error is returned.
|
||||
func MakeTarget(ctx context.Context, urms influxdb.UserResourceMappingService) (Target, error) {
|
||||
auth, err := icontext.GetAuthorizer(ctx)
|
||||
if err != nil {
|
||||
return Target{}, ErrMissingTargetInfo
|
||||
}
|
||||
userID := auth.GetUserID()
|
||||
|
||||
var orgIDs []influxdb.ID
|
||||
if userID.Valid() {
|
||||
orgIDs, err = fromURMs(ctx, userID, urms)
|
||||
if err != nil {
|
||||
return Target{}, err
|
||||
}
|
||||
} else if a, ok := auth.(*influxdb.Authorization); ok {
|
||||
orgIDs = []influxdb.ID{a.OrgID}
|
||||
} else {
|
||||
return Target{}, ErrMissingTargetInfo
|
||||
}
|
||||
|
||||
return Target{
|
||||
UserID: userID,
|
||||
OrgIDs: orgIDs,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func fromURMs(ctx context.Context, userID influxdb.ID, urms influxdb.UserResourceMappingService) ([]influxdb.ID, error) {
|
||||
m, _, err := urms.FindUserResourceMappings(ctx, influxdb.UserResourceMappingFilter{
|
||||
UserID: userID,
|
||||
ResourceType: influxdb.OrgsResourceType,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("finding organization mappings for user %s: %v", userID, err)
|
||||
}
|
||||
|
||||
orgIDs := make([]influxdb.ID, 0, len(m))
|
||||
for _, o := range m {
|
||||
orgIDs = append(orgIDs, o.ResourceID)
|
||||
}
|
||||
|
||||
return orgIDs, nil
|
||||
}
|
||||
|
|
@ -354,7 +354,7 @@ func (s *Service) updateUser(ctx context.Context, tx Tx, id influxdb.ID, upd inf
|
|||
}
|
||||
|
||||
if upd.Name != nil {
|
||||
if err := s.removeUserFromIndex(ctx, tx, id, *upd.Name); err != nil {
|
||||
if err := s.removeUserFromIndex(ctx, tx, id, u.Name); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -0,0 +1,27 @@
|
|||
package mock
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/influxdata/influxdb/v2/kit/feature"
|
||||
)
|
||||
|
||||
// Flagger is a mock.
|
||||
type Flagger struct {
|
||||
m map[string]interface{}
|
||||
}
|
||||
|
||||
// NewFlagger returns a mock Flagger.
|
||||
func NewFlagger(flags map[feature.Flag]interface{}) *Flagger {
|
||||
m := make(map[string]interface{}, len(flags))
|
||||
for k, v := range flags {
|
||||
m[k.Key()] = v
|
||||
}
|
||||
return &Flagger{m}
|
||||
}
|
||||
|
||||
// Flags returns a map of flag keys to flag values according to its configured flag map.
|
||||
// It never returns an error.
|
||||
func (f Flagger) Flags(context.Context, ...feature.Flag) (map[string]interface{}, error) {
|
||||
return f.m, nil
|
||||
}
|
||||
71
paging.go
71
paging.go
|
|
@ -1,6 +1,8 @@
|
|||
package influxdb
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
)
|
||||
|
|
@ -31,6 +33,75 @@ type FindOptions struct {
|
|||
Descending bool
|
||||
}
|
||||
|
||||
// DecodeFindOptions returns a FindOptions decoded from http request.
|
||||
func DecodeFindOptions(r *http.Request) (*FindOptions, error) {
|
||||
opts := &FindOptions{}
|
||||
qp := r.URL.Query()
|
||||
|
||||
if offset := qp.Get("offset"); offset != "" {
|
||||
o, err := strconv.Atoi(offset)
|
||||
if err != nil {
|
||||
return nil, &Error{
|
||||
Code: EInvalid,
|
||||
Msg: "offset is invalid",
|
||||
}
|
||||
}
|
||||
|
||||
opts.Offset = o
|
||||
}
|
||||
|
||||
if limit := qp.Get("limit"); limit != "" {
|
||||
l, err := strconv.Atoi(limit)
|
||||
if err != nil {
|
||||
return nil, &Error{
|
||||
Code: EInvalid,
|
||||
Msg: "limit is invalid",
|
||||
}
|
||||
}
|
||||
|
||||
if l < 1 || l > MaxPageSize {
|
||||
return nil, &Error{
|
||||
Code: EInvalid,
|
||||
Msg: fmt.Sprintf("limit must be between 1 and %d", MaxPageSize),
|
||||
}
|
||||
}
|
||||
|
||||
opts.Limit = l
|
||||
} else {
|
||||
opts.Limit = DefaultPageSize
|
||||
}
|
||||
|
||||
if sortBy := qp.Get("sortBy"); sortBy != "" {
|
||||
opts.SortBy = sortBy
|
||||
}
|
||||
|
||||
if descending := qp.Get("descending"); descending != "" {
|
||||
desc, err := strconv.ParseBool(descending)
|
||||
if err != nil {
|
||||
return nil, &Error{
|
||||
Code: EInvalid,
|
||||
Msg: "descending is invalid",
|
||||
}
|
||||
}
|
||||
|
||||
opts.Descending = desc
|
||||
}
|
||||
|
||||
return opts, nil
|
||||
}
|
||||
|
||||
func FindOptionParams(opts ...FindOptions) [][2]string {
|
||||
var out [][2]string
|
||||
for _, o := range opts {
|
||||
for k, vals := range o.QueryParams() {
|
||||
for _, v := range vals {
|
||||
out = append(out, [2]string{k, v})
|
||||
}
|
||||
}
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// QueryParams returns a map containing url query params.
|
||||
func (f FindOptions) QueryParams() map[string][]string {
|
||||
qp := map[string][]string{
|
||||
|
|
|
|||
|
|
@ -119,9 +119,13 @@ func (s *HTTPRemoteService) Apply(ctx context.Context, orgID, userID influxdb.ID
|
|||
func (s *HTTPRemoteService) apply(ctx context.Context, orgID influxdb.ID, pkg *Pkg, dryRun bool, opts ...ApplyOptFn) (Summary, Diff, error) {
|
||||
opt := applyOptFromOptFns(opts...)
|
||||
|
||||
b, err := pkg.Encode(EncodingJSON)
|
||||
if err != nil {
|
||||
return Summary{}, Diff{}, err
|
||||
var rawPkg []byte
|
||||
if pkg != nil {
|
||||
b, err := pkg.Encode(EncodingJSON)
|
||||
if err != nil {
|
||||
return Summary{}, Diff{}, err
|
||||
}
|
||||
rawPkg = b
|
||||
}
|
||||
|
||||
reqBody := ReqApplyPkg{
|
||||
|
|
@ -129,7 +133,7 @@ func (s *HTTPRemoteService) apply(ctx context.Context, orgID influxdb.ID, pkg *P
|
|||
DryRun: dryRun,
|
||||
EnvRefs: opt.EnvRefs,
|
||||
Secrets: opt.MissingSecrets,
|
||||
RawPkg: b,
|
||||
RawPkg: rawPkg,
|
||||
}
|
||||
if opt.StackID != 0 {
|
||||
stackID := opt.StackID.String()
|
||||
|
|
@ -137,7 +141,7 @@ func (s *HTTPRemoteService) apply(ctx context.Context, orgID influxdb.ID, pkg *P
|
|||
}
|
||||
|
||||
var resp RespApplyPkg
|
||||
err = s.Client.
|
||||
err := s.Client.
|
||||
PostJSON(reqBody, RoutePrefix, "/apply").
|
||||
DecodeJSON(&resp).
|
||||
Do(ctx)
|
||||
|
|
|
|||
|
|
@ -295,7 +295,7 @@ func (r ReqApplyPkg) Pkgs(encoding Encoding) (*Pkg, error) {
|
|||
rawPkgs = append(rawPkgs, pkg)
|
||||
}
|
||||
|
||||
return Combine(rawPkgs)
|
||||
return Combine(rawPkgs, ValidWithoutResources())
|
||||
}
|
||||
|
||||
// RespApplyPkg is the response body for the apply pkg endpoint.
|
||||
|
|
|
|||
|
|
@ -293,6 +293,10 @@ type Pkg struct {
|
|||
|
||||
// Encode is a helper for encoding the pkg correctly.
|
||||
func (p *Pkg) Encode(encoding Encoding) ([]byte, error) {
|
||||
if p == nil {
|
||||
panic("attempted to encode a nil Pkg")
|
||||
}
|
||||
|
||||
var (
|
||||
buf bytes.Buffer
|
||||
err error
|
||||
|
|
|
|||
|
|
@ -3580,7 +3580,7 @@ spec:
|
|||
t.Run("with valid fields", func(t *testing.T) {
|
||||
testfileRunner(t, "testdata/telegraf", func(t *testing.T, pkg *Pkg) {
|
||||
sum := pkg.Summary()
|
||||
require.Len(t, sum.TelegrafConfigs, 1)
|
||||
require.Len(t, sum.TelegrafConfigs, 2)
|
||||
|
||||
actual := sum.TelegrafConfigs[0]
|
||||
assert.Equal(t, "display name", actual.TelegrafConfig.Name)
|
||||
|
|
@ -3590,6 +3590,10 @@ spec:
|
|||
assert.Equal(t, "label_1", actual.LabelAssociations[0].Name)
|
||||
assert.Equal(t, "label_2", actual.LabelAssociations[1].Name)
|
||||
|
||||
actual = sum.TelegrafConfigs[1]
|
||||
assert.Equal(t, "tele_2", actual.TelegrafConfig.Name)
|
||||
assert.Empty(t, actual.LabelAssociations)
|
||||
|
||||
require.Len(t, sum.LabelMappings, 2)
|
||||
expectedMapping := SummaryLabelMapping{
|
||||
Status: StateStatusNew,
|
||||
|
|
|
|||
|
|
@ -5,6 +5,7 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"path"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
|
@ -644,28 +645,37 @@ func (s *Service) filterOrgResourceKinds(resourceKindFilters []Kind) []struct {
|
|||
// for later calls to Apply. This func will be run on an Apply if it has not been run
|
||||
// already.
|
||||
func (s *Service) DryRun(ctx context.Context, orgID, userID influxdb.ID, pkg *Pkg, opts ...ApplyOptFn) (Summary, Diff, error) {
|
||||
state, err := s.dryRun(ctx, orgID, pkg, opts...)
|
||||
opt := applyOptFromOptFns(opts...)
|
||||
|
||||
if opt.StackID != 0 {
|
||||
remotePkgs, err := s.getStackRemotePackages(ctx, opt.StackID)
|
||||
if err != nil {
|
||||
return Summary{}, Diff{}, err
|
||||
}
|
||||
pkg, err = Combine(append(remotePkgs, pkg), ValidWithoutResources())
|
||||
if err != nil {
|
||||
return Summary{}, Diff{}, err
|
||||
}
|
||||
}
|
||||
|
||||
state, err := s.dryRun(ctx, orgID, pkg, opt)
|
||||
if err != nil {
|
||||
return Summary{}, Diff{}, err
|
||||
}
|
||||
return newSummaryFromStatePkg(state, pkg), state.diff(), nil
|
||||
}
|
||||
|
||||
func (s *Service) dryRun(ctx context.Context, orgID influxdb.ID, pkg *Pkg, opts ...ApplyOptFn) (*stateCoordinator, error) {
|
||||
func (s *Service) dryRun(ctx context.Context, orgID influxdb.ID, pkg *Pkg, opt ApplyOpt) (*stateCoordinator, error) {
|
||||
// so here's the deal, when we have issues with the parsing validation, we
|
||||
// continue to do the diff anyhow. any resource that does not have a name
|
||||
// will be skipped, and won't bleed into the dry run here. We can now return
|
||||
// a error (parseErr) and valid diff/summary.
|
||||
var parseErr error
|
||||
if !pkg.isParsed {
|
||||
err := pkg.Validate()
|
||||
if err != nil && !IsParseErr(err) {
|
||||
return nil, internalErr(err)
|
||||
}
|
||||
parseErr = err
|
||||
err := pkg.Validate()
|
||||
if err != nil && !IsParseErr(err) {
|
||||
return nil, internalErr(err)
|
||||
}
|
||||
|
||||
opt := applyOptFromOptFns(opts...)
|
||||
parseErr = err
|
||||
|
||||
if len(opt.EnvRefs) > 0 {
|
||||
err := pkg.applyEnvRefs(opt.EnvRefs)
|
||||
|
|
@ -694,7 +704,8 @@ func (s *Service) dryRun(ctx context.Context, orgID influxdb.ID, pkg *Pkg, opts
|
|||
s.dryRunTasks(ctx, orgID, state.mTasks)
|
||||
s.dryRunTelegrafConfigs(ctx, orgID, state.mTelegrafs)
|
||||
s.dryRunVariables(ctx, orgID, state.mVariables)
|
||||
err := s.dryRunNotificationEndpoints(ctx, orgID, state.mEndpoints)
|
||||
|
||||
err = s.dryRunNotificationEndpoints(ctx, orgID, state.mEndpoints)
|
||||
if err != nil {
|
||||
return nil, ierrors.Wrap(err, "failed to dry run notification endpoints")
|
||||
}
|
||||
|
|
@ -1119,19 +1130,29 @@ func applyOptFromOptFns(opts ...ApplyOptFn) ApplyOpt {
|
|||
// in its entirety. If a failure happens midway then the entire pkg will be rolled back to the state
|
||||
// from before the pkg were applied.
|
||||
func (s *Service) Apply(ctx context.Context, orgID, userID influxdb.ID, pkg *Pkg, opts ...ApplyOptFn) (sum Summary, diff Diff, e error) {
|
||||
if !pkg.isParsed {
|
||||
if err := pkg.Validate(); err != nil {
|
||||
return Summary{}, Diff{}, failedValidationErr(err)
|
||||
opt := applyOptFromOptFns(opts...)
|
||||
|
||||
if opt.StackID != 0 {
|
||||
remotePkgs, err := s.getStackRemotePackages(ctx, opt.StackID)
|
||||
if err != nil {
|
||||
return Summary{}, Diff{}, err
|
||||
}
|
||||
|
||||
pkg, err = Combine(append(remotePkgs, pkg), ValidWithoutResources())
|
||||
if err != nil {
|
||||
return Summary{}, Diff{}, err
|
||||
}
|
||||
}
|
||||
|
||||
opt := applyOptFromOptFns(opts...)
|
||||
if err := pkg.Validate(ValidWithoutResources()); err != nil {
|
||||
return Summary{}, Diff{}, failedValidationErr(err)
|
||||
}
|
||||
|
||||
if err := pkg.applyEnvRefs(opt.EnvRefs); err != nil {
|
||||
return Summary{}, Diff{}, failedValidationErr(err)
|
||||
}
|
||||
|
||||
state, err := s.dryRun(ctx, orgID, pkg, opts...)
|
||||
state, err := s.dryRun(ctx, orgID, pkg, opt)
|
||||
if err != nil {
|
||||
return Summary{}, Diff{}, err
|
||||
}
|
||||
|
|
@ -2525,6 +2546,47 @@ func (s *Service) rollbackLabelMappings(ctx context.Context, mappings []stateLab
|
|||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) getStackRemotePackages(ctx context.Context, stackID influxdb.ID) ([]*Pkg, error) {
|
||||
stack, err := s.store.ReadStackByID(ctx, stackID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var remotePkgs []*Pkg
|
||||
for _, rawURL := range stack.URLs {
|
||||
u, err := url.Parse(rawURL)
|
||||
if err != nil {
|
||||
return nil, &influxdb.Error{
|
||||
Code: influxdb.EInternal,
|
||||
Msg: "failed to parse url",
|
||||
Err: err,
|
||||
}
|
||||
}
|
||||
|
||||
encoding := EncodingSource
|
||||
switch path.Ext(u.String()) {
|
||||
case ".jsonnet":
|
||||
encoding = EncodingJsonnet
|
||||
case ".json":
|
||||
encoding = EncodingJSON
|
||||
case ".yaml", ".yml":
|
||||
encoding = EncodingYAML
|
||||
}
|
||||
|
||||
readerFn := FromHTTPRequest(u.String())
|
||||
if u.Scheme == "file" {
|
||||
readerFn = FromFile(u.Path)
|
||||
}
|
||||
|
||||
pkg, err := Parse(encoding, readerFn)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
remotePkgs = append(remotePkgs, pkg)
|
||||
}
|
||||
return remotePkgs, nil
|
||||
}
|
||||
|
||||
func (s *Service) updateStackAfterSuccess(ctx context.Context, stackID influxdb.ID, state *stateCoordinator) error {
|
||||
stack, err := s.store.ReadStackByID(ctx, stackID)
|
||||
if err != nil {
|
||||
|
|
|
|||
|
|
@ -1332,9 +1332,10 @@ func TestService(t *testing.T) {
|
|||
sum, _, err := svc.Apply(context.TODO(), orgID, 0, pkg)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Len(t, sum.TelegrafConfigs, 1)
|
||||
require.Len(t, sum.TelegrafConfigs, 2)
|
||||
assert.Equal(t, "display name", sum.TelegrafConfigs[0].TelegrafConfig.Name)
|
||||
assert.Equal(t, "desc", sum.TelegrafConfigs[0].TelegrafConfig.Description)
|
||||
assert.Equal(t, "tele_2", sum.TelegrafConfigs[1].TelegrafConfig.Name)
|
||||
})
|
||||
})
|
||||
|
||||
|
|
@ -1356,13 +1357,6 @@ func TestService(t *testing.T) {
|
|||
return nil
|
||||
}
|
||||
|
||||
stubTele := &telegraf{
|
||||
identity: identity{
|
||||
name: &references{val: "stub"},
|
||||
},
|
||||
}
|
||||
pkg.mTelegrafs[stubTele.PkgName()] = stubTele
|
||||
|
||||
svc := newTestService(WithTelegrafSVC(fakeTeleSVC))
|
||||
|
||||
orgID := influxdb.ID(9000)
|
||||
|
|
|
|||
|
|
@ -34,6 +34,16 @@
|
|||
],
|
||||
"config": "# Configuration for telegraf agent\n [agent]\n ## Default data collection interval for all inputs\n interval = \"10s\"\n ## Rounds collection interval to 'interval'\n ## ie, if interval=\"10s\" then always collect on :00, :10, :20, etc.\n round_interval = true\n\n ## Telegraf will send metrics to outputs in batches of at most\n ## metric_batch_size metrics.\n ## This controls the size of writes that Telegraf sends to output plugins.\n metric_batch_size = 1000\n\n ## For failed writes, telegraf will cache metric_buffer_limit metrics for each\n ## output, and will flush this buffer on a successful write. Oldest metrics\n ## are dropped first when this buffer fills.\n ## This buffer only fills when writes fail to output plugin(s).\n metric_buffer_limit = 10000\n\n ## Collection jitter is used to jitter the collection by a random amount.\n ## Each plugin will sleep for a random time within jitter before collecting.\n ## This can be used to avoid many plugins querying things like sysfs at the\n ## same time, which can have a measurable effect on the system.\n collection_jitter = \"0s\"\n\n ## Default flushing interval for all outputs. Maximum flush_interval will be\n ## flush_interval + flush_jitter\n flush_interval = \"10s\"\n ## Jitter the flush interval by a random amount. This is primarily to avoid\n ## large write spikes for users running a large number of telegraf instances.\n ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s\n flush_jitter = \"0s\"\n\n ## By default or when set to \"0s\", precision will be set to the same\n ## timestamp order as the collection interval, with the maximum being 1s.\n ## ie, when interval = \"10s\", precision will be \"1s\"\n ## when interval = \"250ms\", precision will be \"1ms\"\n ## Precision will NOT be used for service inputs. It is up to each individual\n ## service input to set the timestamp at the appropriate precision.\n ## Valid time units are \"ns\", \"us\" (or \"µs\"), \"ms\", \"s\".\n precision = \"\"\n\n ## Logging configuration:\n ## Run telegraf with debug log messages.\n debug = false\n ## Run telegraf in quiet mode (error log messages only).\n quiet = false\n ## Specify the log file name. The empty string means to log to stderr.\n logfile = \"\"\n\n ## Override default hostname, if empty use os.Hostname()\n hostname = \"\"\n ## If set to true, do no set the \"host\" tag in the telegraf agent.\n omit_hostname = false\n [[outputs.influxdb_v2]]\n ## The URLs of the InfluxDB cluster nodes.\n ##\n ## Multiple URLs can be specified for a single cluster, only ONE of the\n ## urls will be written to each interval.\n ## urls exp: http://127.0.0.1:9999\n urls = [\"http://localhost:9999\"]\n\n ## Token for authentication.\n token = \"$INFLUX_TOKEN\"\n\n ## Organization is the name of the organization you wish to write to; must exist.\n organization = \"rg\"\n\n ## Destination bucket to write into.\n bucket = \"rucket_3\"\n [[inputs.cpu]]\n ## Whether to report per-cpu stats or not\n percpu = true\n ## Whether to report total system cpu stats or not\n totalcpu = true\n ## If true, collect raw CPU time metrics.\n collect_cpu_time = false\n ## If true, compute and report the sum of all non-idle CPU states.\n report_active = false\n [[inputs.disk]]\n ## By default stats will be gathered for all mount points.\n ## Set mount_points will restrict the stats to only the specified mount points.\n # mount_points = [\"/\"]\n ## Ignore mount points by filesystem type.\n ignore_fs = [\"tmpfs\", \"devtmpfs\", \"devfs\", \"overlay\", \"aufs\", \"squashfs\"]\n [[inputs.diskio]]\n [[inputs.mem]]\n [[inputs.net]]\n [[inputs.processes]]\n [[inputs.swap]]\n [[inputs.system]]"
|
||||
}
|
||||
},
|
||||
{
|
||||
"apiVersion": "influxdata.com/v2alpha1",
|
||||
"kind": "Telegraf",
|
||||
"metadata": {
|
||||
"name": "tele_2"
|
||||
},
|
||||
"spec": {
|
||||
"config": "# Configuration for telegraf agent\n [agent]\n ## Default data collection interval for all inputs\n interval = \"10s\"\n ## Rounds collection interval to 'interval'\n ## ie, if interval=\"10s\" then always collect on :00, :10, :20, etc.\n round_interval = true\n\n ## Telegraf will send metrics to outputs in batches of at most\n ## metric_batch_size metrics.\n ## This controls the size of writes that Telegraf sends to output plugins.\n metric_batch_size = 1000\n\n ## For failed writes, telegraf will cache metric_buffer_limit metrics for each\n ## output, and will flush this buffer on a successful write. Oldest metrics\n ## are dropped first when this buffer fills.\n ## This buffer only fills when writes fail to output plugin(s).\n metric_buffer_limit = 10000\n\n ## Collection jitter is used to jitter the collection by a random amount.\n ## Each plugin will sleep for a random time within jitter before collecting.\n ## This can be used to avoid many plugins querying things like sysfs at the\n ## same time, which can have a measurable effect on the system.\n collection_jitter = \"0s\"\n\n ## Default flushing interval for all outputs. Maximum flush_interval will be\n ## flush_interval + flush_jitter\n flush_interval = \"10s\"\n ## Jitter the flush interval by a random amount. This is primarily to avoid\n ## large write spikes for users running a large number of telegraf instances.\n ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s\n flush_jitter = \"0s\"\n\n ## By default or when set to \"0s\", precision will be set to the same\n ## timestamp order as the collection interval, with the maximum being 1s.\n ## ie, when interval = \"10s\", precision will be \"1s\"\n ## when interval = \"250ms\", precision will be \"1ms\"\n ## Precision will NOT be used for service inputs. It is up to each individual\n ## service input to set the timestamp at the appropriate precision.\n ## Valid time units are \"ns\", \"us\" (or \"µs\"), \"ms\", \"s\".\n precision = \"\"\n\n ## Logging configuration:\n ## Run telegraf with debug log messages.\n debug = false\n ## Run telegraf in quiet mode (error log messages only).\n quiet = false\n ## Specify the log file name. The empty string means to log to stderr.\n logfile = \"\"\n\n ## Override default hostname, if empty use os.Hostname()\n hostname = \"\"\n ## If set to true, do no set the \"host\" tag in the telegraf agent.\n omit_hostname = false\n [[outputs.influxdb_v2]]\n ## The URLs of the InfluxDB cluster nodes.\n ##\n ## Multiple URLs can be specified for a single cluster, only ONE of the\n ## urls will be written to each interval.\n ## urls exp: http://127.0.0.1:9999\n urls = [\"http://localhost:9999\"]\n\n ## Token for authentication.\n token = \"$INFLUX_TOKEN\"\n\n ## Organization is the name of the organization you wish to write to; must exist.\n organization = \"rg\"\n\n ## Destination bucket to write into.\n bucket = \"rucket_3\"\n [[inputs.cpu]]\n ## Whether to report per-cpu stats or not\n percpu = true\n ## Whether to report total system cpu stats or not\n totalcpu = true\n ## If true, collect raw CPU time metrics.\n collect_cpu_time = false\n ## If true, compute and report the sum of all non-idle CPU states.\n report_active = false\n [[inputs.disk]]\n ## By default stats will be gathered for all mount points.\n ## Set mount_points will restrict the stats to only the specified mount points.\n # mount_points = [\"/\"]\n ## Ignore mount points by filesystem type.\n ignore_fs = [\"tmpfs\", \"devtmpfs\", \"devfs\", \"overlay\", \"aufs\", \"squashfs\"]\n [[inputs.diskio]]\n [[inputs.mem]]\n [[inputs.net]]\n [[inputs.processes]]\n [[inputs.swap]]\n [[inputs.system]]"
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
|
|
|
|||
|
|
@ -114,3 +114,58 @@ spec:
|
|||
[[inputs.system]]
|
||||
|
||||
|
||||
---
|
||||
apiVersion: influxdata.com/v2alpha1
|
||||
kind: Telegraf
|
||||
metadata:
|
||||
name: tele_2
|
||||
spec:
|
||||
config: |
|
||||
# Configuration for telegraf agent
|
||||
[agent]
|
||||
## Default data collection interval for all inputs
|
||||
interval = "10s"
|
||||
## Rounds collection interval to 'interval'
|
||||
## ie, if interval="10s" then always collect on :00, :10, :20, etc.
|
||||
round_interval = true
|
||||
|
||||
## Telegraf will send metrics to outputs in batches of at most
|
||||
## metric_batch_size metrics.
|
||||
## This controls the size of writes that Telegraf sends to output plugins.
|
||||
metric_batch_size = 1000
|
||||
|
||||
## For failed writes, telegraf will cache metric_buffer_limit metrics for each
|
||||
## output, and will flush this buffer on a successful write. Oldest metrics
|
||||
## are dropped first when this buffer fills.
|
||||
## This buffer only fills when writes fail to output plugin(s).
|
||||
metric_buffer_limit = 10000
|
||||
|
||||
## Collection jitter is used to jitter the collection by a random amount.
|
||||
## Each plugin will sleep for a random time within jitter before collecting.
|
||||
## This can be used to avoid many plugins querying things like sysfs at the
|
||||
## same time, which can have a measurable effect on the system.
|
||||
collection_jitter = "0s"
|
||||
|
||||
## Default flushing interval for all outputs. Maximum flush_interval will be
|
||||
## flush_interval + flush_jitter
|
||||
flush_interval = "10s"
|
||||
## Jitter the flush interval by a random amount. This is primarily to avoid
|
||||
## large write spikes for users running a large number of telegraf instances.
|
||||
## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
|
||||
flush_jitter = "0s"
|
||||
|
||||
## By default or when set to "0s", precision will be set to the same
|
||||
## timestamp order as the collection interval, with the maximum being 1s.
|
||||
## ie, when interval = "10s", precision will be "1s"
|
||||
## when interval = "250ms", precision will be "1ms"
|
||||
## Precision will NOT be used for service inputs. It is up to each individual
|
||||
## service input to set the timestamp at the appropriate precision.
|
||||
## Valid time units are "ns", "us" (or "µs"), "ms", "s".
|
||||
precision = ""
|
||||
debug = false
|
||||
quiet = false
|
||||
logfile = ""
|
||||
|
||||
hostname = ""
|
||||
omit_hostname = false
|
||||
|
||||
|
|
|
|||
|
|
@ -0,0 +1,14 @@
|
|||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
|
||||
# This script regenerates the flag list and checks for differences to ensure flags
|
||||
# have been regenerated in case of changes to flags.yml.
|
||||
|
||||
make flags
|
||||
|
||||
if ! git --no-pager diff --exit-code -- ./kit/feature/list.go
|
||||
then
|
||||
echo "Differences detected! Run 'make flags' to regenerate feature flag list."
|
||||
exit 1
|
||||
fi
|
||||
|
|
@ -0,0 +1,28 @@
|
|||
/*
|
||||
The tenant domain encapsulates all the storage critical metadata services:
|
||||
User
|
||||
Organization
|
||||
Bucket
|
||||
URM's
|
||||
|
||||
These services are the cornerstone of all other metadata services. The intent is to have
|
||||
a single location for all tenant related code. THis should facilitate faster bug resolution and
|
||||
allow us to make changes to this service without effecting any dependant services.
|
||||
|
||||
When a new request for the tenant service comes in it should follow this pattern:
|
||||
1 http_server_resource - this is where the request is parsed and rejected if the client didn't send
|
||||
the right information
|
||||
2 middleware_resource_auth - We now confirm the user that generated the request has sufficient permission
|
||||
to accomplish this task, in some cases we adjust the request if the user is without the correct permissions
|
||||
3 middleware_resource_metrics - Track RED metrics for this request
|
||||
4 middleware_resource_logging - add logging around request duration and status.
|
||||
5 service_resource - When a request reaches the service we verify the content for compatibility with the existing dataset,
|
||||
for instance if a resource has a "orgID" we will ensure the organization exists
|
||||
6 storage_resource - Basic CRUD actions for the system.
|
||||
|
||||
This pattern of api -> middleware -> service -> basic crud helps us to break down the responsibilities into digestible
|
||||
chunks and allows us to swap in or out any pieces we need depending on the situation. Currently the storage layer is using
|
||||
a kv store but by breaking the crud actions into its own independent set of concerns we allow ourselves to move away from kv
|
||||
if the need arises without having to be concerned about messing up some other pieces of logic.
|
||||
*/
|
||||
package tenant
|
||||
|
|
@ -99,7 +99,7 @@ func (s *BucketClientService) FindBuckets(ctx context.Context, filter influxdb.B
|
|||
span, _ := tracing.StartSpanFromContext(ctx)
|
||||
defer span.Finish()
|
||||
|
||||
params := findOptionParams(opt...)
|
||||
params := influxdb.FindOptionParams(opt...)
|
||||
if filter.OrganizationID != nil {
|
||||
params = append(params, [2]string{"orgID", filter.OrganizationID.String()})
|
||||
}
|
||||
|
|
|
|||
|
|
@ -65,7 +65,7 @@ func (s *OrgClientService) FindOrganizations(ctx context.Context, filter influxd
|
|||
span, _ := tracing.StartSpanFromContext(ctx)
|
||||
defer span.Finish()
|
||||
|
||||
params := findOptionParams(opt...)
|
||||
params := influxdb.FindOptionParams(opt...)
|
||||
if filter.Name != nil {
|
||||
span.LogKV("org", *filter.Name)
|
||||
params = append(params, [2]string{"org", *filter.Name})
|
||||
|
|
|
|||
|
|
@ -72,7 +72,7 @@ func (s *UserClientService) FindUser(ctx context.Context, filter influxdb.UserFi
|
|||
// FindUsers returns a list of users that match filter and the total count of matching users.
|
||||
// Additional options provide pagination & sorting.
|
||||
func (s *UserClientService) FindUsers(ctx context.Context, filter influxdb.UserFilter, opt ...influxdb.FindOptions) ([]*influxdb.User, int, error) {
|
||||
params := findOptionParams(opt...)
|
||||
params := influxdb.FindOptionParams(opt...)
|
||||
if filter.ID != nil {
|
||||
params = append(params, [2]string{"id", filter.ID.String()})
|
||||
}
|
||||
|
|
|
|||
|
|
@ -2,9 +2,7 @@ package tenant
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strconv"
|
||||
|
||||
"github.com/go-chi/chi"
|
||||
"github.com/influxdata/influxdb/v2"
|
||||
|
|
@ -15,76 +13,6 @@ type tenantContext string
|
|||
|
||||
const ctxOrgKey tenantContext = "orgID"
|
||||
|
||||
// findOptionsParams converts find options into a paramiterizated key pair
|
||||
func findOptionParams(opts ...influxdb.FindOptions) [][2]string {
|
||||
var out [][2]string
|
||||
for _, o := range opts {
|
||||
for k, vals := range o.QueryParams() {
|
||||
for _, v := range vals {
|
||||
out = append(out, [2]string{k, v})
|
||||
}
|
||||
}
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// decodeFindOptions returns a FindOptions decoded from http request.
|
||||
func decodeFindOptions(r *http.Request) (*influxdb.FindOptions, error) {
|
||||
opts := &influxdb.FindOptions{}
|
||||
qp := r.URL.Query()
|
||||
|
||||
if offset := qp.Get("offset"); offset != "" {
|
||||
o, err := strconv.Atoi(offset)
|
||||
if err != nil {
|
||||
return nil, &influxdb.Error{
|
||||
Code: influxdb.EInvalid,
|
||||
Msg: "offset is invalid",
|
||||
}
|
||||
}
|
||||
|
||||
opts.Offset = o
|
||||
}
|
||||
|
||||
if limit := qp.Get("limit"); limit != "" {
|
||||
l, err := strconv.Atoi(limit)
|
||||
if err != nil {
|
||||
return nil, &influxdb.Error{
|
||||
Code: influxdb.EInvalid,
|
||||
Msg: "limit is invalid",
|
||||
}
|
||||
}
|
||||
|
||||
if l < 1 || l > influxdb.MaxPageSize {
|
||||
return nil, &influxdb.Error{
|
||||
Code: influxdb.EInvalid,
|
||||
Msg: fmt.Sprintf("limit must be between 1 and %d", influxdb.MaxPageSize),
|
||||
}
|
||||
}
|
||||
|
||||
opts.Limit = l
|
||||
} else {
|
||||
opts.Limit = influxdb.DefaultPageSize
|
||||
}
|
||||
|
||||
if sortBy := qp.Get("sortBy"); sortBy != "" {
|
||||
opts.SortBy = sortBy
|
||||
}
|
||||
|
||||
if descending := qp.Get("descending"); descending != "" {
|
||||
desc, err := strconv.ParseBool(descending)
|
||||
if err != nil {
|
||||
return nil, &influxdb.Error{
|
||||
Code: influxdb.EInvalid,
|
||||
Msg: "descending is invalid",
|
||||
}
|
||||
}
|
||||
|
||||
opts.Descending = desc
|
||||
}
|
||||
|
||||
return opts, nil
|
||||
}
|
||||
|
||||
// ValidResource make sure a resource exists when a sub system needs to be mounted to an api
|
||||
func ValidResource(api *kit.API, lookupOrgByResourceID func(context.Context, influxdb.ID) (influxdb.ID, error)) kit.Middleware {
|
||||
return func(next http.Handler) http.Handler {
|
||||
|
|
|
|||
|
|
@ -394,7 +394,7 @@ func decodeGetBucketsRequest(r *http.Request) (*getBucketsRequest, error) {
|
|||
qp := r.URL.Query()
|
||||
req := &getBucketsRequest{}
|
||||
|
||||
opts, err := decodeFindOptions(r)
|
||||
opts, err := influxdb.DecodeFindOptions(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
|||
267
testing/auth.go
267
testing/auth.go
|
|
@ -8,6 +8,8 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/google/go-cmp/cmp/cmpopts"
|
||||
"github.com/influxdata/influxdb/v2"
|
||||
platform "github.com/influxdata/influxdb/v2"
|
||||
"github.com/influxdata/influxdb/v2/mock"
|
||||
)
|
||||
|
|
@ -20,6 +22,8 @@ const (
|
|||
)
|
||||
|
||||
var authorizationCmpOptions = cmp.Options{
|
||||
cmpopts.EquateEmpty(),
|
||||
cmpopts.IgnoreFields(influxdb.Authorization{}, "ID", "Token", "CreatedAt", "UpdatedAt"),
|
||||
cmp.Comparer(func(x, y []byte) bool {
|
||||
return bytes.Equal(x, y)
|
||||
}),
|
||||
|
|
@ -32,6 +36,18 @@ var authorizationCmpOptions = cmp.Options{
|
|||
}),
|
||||
}
|
||||
|
||||
type AuthTestOpts struct {
|
||||
WithoutFindByToken bool
|
||||
}
|
||||
|
||||
// WithoutFindByToken allows the Find By Token test case to be skipped when we are testing the http server,
|
||||
// since finding by token is not supported by the HTTP API
|
||||
func WithoutFindByToken() AuthTestOpts {
|
||||
return AuthTestOpts{
|
||||
WithoutFindByToken: true,
|
||||
}
|
||||
}
|
||||
|
||||
// AuthorizationFields will include the IDGenerator, and authorizations
|
||||
type AuthorizationFields struct {
|
||||
IDGenerator platform.IDGenerator
|
||||
|
|
@ -44,8 +60,9 @@ type AuthorizationFields struct {
|
|||
|
||||
// AuthorizationService tests all the service functions.
|
||||
func AuthorizationService(
|
||||
init func(AuthorizationFields, *testing.T) (platform.AuthorizationService, string, func()), t *testing.T,
|
||||
) {
|
||||
init func(AuthorizationFields, *testing.T) (platform.AuthorizationService, string, func()),
|
||||
t *testing.T,
|
||||
opts ...AuthTestOpts) {
|
||||
tests := []struct {
|
||||
name string
|
||||
fn func(init func(AuthorizationFields, *testing.T) (platform.AuthorizationService, string, func()),
|
||||
|
|
@ -77,6 +94,9 @@ func AuthorizationService(
|
|||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
if tt.name == "FindAuthorizationByToken" && len(opts) > 0 && opts[0].WithoutFindByToken {
|
||||
continue
|
||||
}
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
tt.fn(init, t)
|
||||
})
|
||||
|
|
@ -172,73 +192,6 @@ func CreateAuthorization(
|
|||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "if auth ID supplied it is ignored",
|
||||
fields: AuthorizationFields{
|
||||
IDGenerator: mock.NewIDGenerator(authTwoID, t),
|
||||
TimeGenerator: &mock.TimeGenerator{
|
||||
FakeValue: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC),
|
||||
},
|
||||
TokenGenerator: &mock.TokenGenerator{
|
||||
TokenFn: func() (string, error) {
|
||||
return "rand", nil
|
||||
},
|
||||
},
|
||||
Users: []*platform.User{
|
||||
{
|
||||
Name: "cooluser",
|
||||
ID: MustIDBase16(userOneID),
|
||||
},
|
||||
},
|
||||
Orgs: []*platform.Organization{
|
||||
{
|
||||
Name: "o1",
|
||||
ID: MustIDBase16(orgOneID),
|
||||
},
|
||||
},
|
||||
Authorizations: []*platform.Authorization{
|
||||
{
|
||||
ID: MustIDBase16(authOneID),
|
||||
UserID: MustIDBase16(userOneID),
|
||||
OrgID: MustIDBase16(orgOneID),
|
||||
Token: "supersecret",
|
||||
Permissions: allUsersPermission(MustIDBase16(orgOneID)),
|
||||
},
|
||||
},
|
||||
},
|
||||
args: args{
|
||||
authorization: &platform.Authorization{
|
||||
ID: platform.ID(1), // Should be ignored.
|
||||
UserID: MustIDBase16(userOneID),
|
||||
OrgID: MustIDBase16(orgOneID),
|
||||
Permissions: createUsersPermission(MustIDBase16(orgOneID)),
|
||||
},
|
||||
},
|
||||
wants: wants{
|
||||
authorizations: []*platform.Authorization{
|
||||
{
|
||||
ID: MustIDBase16(authOneID),
|
||||
UserID: MustIDBase16(userOneID),
|
||||
OrgID: MustIDBase16(orgOneID),
|
||||
Status: platform.Active,
|
||||
Token: "supersecret",
|
||||
Permissions: allUsersPermission(MustIDBase16(orgOneID)),
|
||||
},
|
||||
{
|
||||
ID: MustIDBase16(authTwoID),
|
||||
UserID: MustIDBase16(userOneID),
|
||||
OrgID: MustIDBase16(orgOneID),
|
||||
Token: "rand",
|
||||
Status: platform.Active,
|
||||
Permissions: createUsersPermission(MustIDBase16(orgOneID)),
|
||||
CRUDLog: platform.CRUDLog{
|
||||
CreatedAt: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC),
|
||||
UpdatedAt: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "providing a non existing user is invalid",
|
||||
fields: AuthorizationFields{
|
||||
|
|
@ -387,18 +340,14 @@ func FindAuthorizationByID(
|
|||
init func(AuthorizationFields, *testing.T) (platform.AuthorizationService, string, func()),
|
||||
t *testing.T,
|
||||
) {
|
||||
type args struct {
|
||||
id platform.ID
|
||||
}
|
||||
type wants struct {
|
||||
err error
|
||||
authorization *platform.Authorization
|
||||
err error
|
||||
authorizations []*platform.Authorization
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
fields AuthorizationFields
|
||||
args args
|
||||
wants wants
|
||||
}{
|
||||
{
|
||||
|
|
@ -437,17 +386,24 @@ func FindAuthorizationByID(
|
|||
},
|
||||
},
|
||||
},
|
||||
args: args{
|
||||
id: MustIDBase16(authTwoID),
|
||||
},
|
||||
wants: wants{
|
||||
authorization: &platform.Authorization{
|
||||
ID: MustIDBase16(authTwoID),
|
||||
UserID: MustIDBase16(userTwoID),
|
||||
OrgID: MustIDBase16(orgOneID),
|
||||
Status: platform.Active,
|
||||
Token: "rand2",
|
||||
Permissions: createUsersPermission(MustIDBase16(orgOneID)),
|
||||
authorizations: []*platform.Authorization{
|
||||
{
|
||||
ID: MustIDBase16(authOneID),
|
||||
UserID: MustIDBase16(userOneID),
|
||||
OrgID: MustIDBase16(orgOneID),
|
||||
Token: "rand1",
|
||||
Status: "active",
|
||||
Permissions: allUsersPermission(MustIDBase16(orgOneID)),
|
||||
},
|
||||
{
|
||||
ID: MustIDBase16(authTwoID),
|
||||
UserID: MustIDBase16(userTwoID),
|
||||
OrgID: MustIDBase16(orgOneID),
|
||||
Token: "rand2",
|
||||
Status: "active",
|
||||
Permissions: createUsersPermission(MustIDBase16(orgOneID)),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
|
@ -459,12 +415,15 @@ func FindAuthorizationByID(
|
|||
defer done()
|
||||
ctx := context.Background()
|
||||
|
||||
authorization, err := s.FindAuthorizationByID(ctx, tt.args.id)
|
||||
diffPlatformErrors(tt.name, err, tt.wants.err, opPrefix, t)
|
||||
for i := range tt.fields.Authorizations {
|
||||
authorization, err := s.FindAuthorizationByID(ctx, tt.fields.Authorizations[i].ID)
|
||||
diffPlatformErrors(tt.name, err, tt.wants.err, opPrefix, t)
|
||||
|
||||
if diff := cmp.Diff(authorization, tt.wants.authorization, authorizationCmpOptions...); diff != "" {
|
||||
t.Errorf("authorization is different -got/+want\ndiff %s", diff)
|
||||
if diff := cmp.Diff(authorization, tt.wants.authorizations[i], authorizationCmpOptions...); diff != "" {
|
||||
t.Errorf("authorization is different -got/+want\ndiff %s", diff)
|
||||
}
|
||||
}
|
||||
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
@ -820,6 +779,70 @@ func FindAuthorizationByToken(
|
|||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "find authorization by token",
|
||||
fields: AuthorizationFields{
|
||||
Users: []*platform.User{
|
||||
{
|
||||
Name: "cooluser",
|
||||
ID: MustIDBase16(userOneID),
|
||||
},
|
||||
{
|
||||
Name: "regularuser",
|
||||
ID: MustIDBase16(userTwoID),
|
||||
},
|
||||
},
|
||||
Orgs: []*platform.Organization{
|
||||
{
|
||||
Name: "o1",
|
||||
ID: MustIDBase16(orgOneID),
|
||||
},
|
||||
},
|
||||
Authorizations: []*platform.Authorization{
|
||||
{
|
||||
ID: MustIDBase16(authZeroID),
|
||||
UserID: MustIDBase16(userOneID),
|
||||
OrgID: MustIDBase16(orgOneID),
|
||||
Token: "rand1",
|
||||
Permissions: deleteUsersPermission(MustIDBase16(orgOneID)),
|
||||
},
|
||||
{
|
||||
ID: MustIDBase16(authTwoID),
|
||||
UserID: MustIDBase16(userTwoID),
|
||||
OrgID: MustIDBase16(orgOneID),
|
||||
Token: "rand2",
|
||||
Permissions: createUsersPermission(MustIDBase16(orgOneID)),
|
||||
},
|
||||
{
|
||||
ID: MustIDBase16(authOneID),
|
||||
UserID: MustIDBase16(userOneID),
|
||||
OrgID: MustIDBase16(orgOneID),
|
||||
Token: "rand3",
|
||||
Permissions: allUsersPermission(MustIDBase16(orgOneID)),
|
||||
},
|
||||
{
|
||||
ID: MustIDBase16(authThreeID),
|
||||
UserID: MustIDBase16(userOneID),
|
||||
OrgID: MustIDBase16(orgOneID),
|
||||
Token: "rand4",
|
||||
Permissions: deleteUsersPermission(MustIDBase16(orgOneID)),
|
||||
},
|
||||
},
|
||||
},
|
||||
args: args{
|
||||
token: "rand2",
|
||||
},
|
||||
wants: wants{
|
||||
authorization: &platform.Authorization{
|
||||
ID: MustIDBase16(authTwoID),
|
||||
UserID: MustIDBase16(userTwoID),
|
||||
OrgID: MustIDBase16(orgOneID),
|
||||
Token: "rand2",
|
||||
Status: platform.Active,
|
||||
Permissions: createUsersPermission(MustIDBase16(orgOneID)),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
|
|
@ -1131,72 +1154,6 @@ func FindAuthorizations(
|
|||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "find authorization by token",
|
||||
fields: AuthorizationFields{
|
||||
Users: []*platform.User{
|
||||
{
|
||||
Name: "cooluser",
|
||||
ID: MustIDBase16(userOneID),
|
||||
},
|
||||
{
|
||||
Name: "regularuser",
|
||||
ID: MustIDBase16(userTwoID),
|
||||
},
|
||||
},
|
||||
Orgs: []*platform.Organization{
|
||||
{
|
||||
Name: "o1",
|
||||
ID: MustIDBase16(orgOneID),
|
||||
},
|
||||
},
|
||||
Authorizations: []*platform.Authorization{
|
||||
{
|
||||
ID: MustIDBase16(authZeroID),
|
||||
UserID: MustIDBase16(userOneID),
|
||||
OrgID: MustIDBase16(orgOneID),
|
||||
Token: "rand1",
|
||||
Permissions: deleteUsersPermission(MustIDBase16(orgOneID)),
|
||||
},
|
||||
{
|
||||
ID: MustIDBase16(authOneID),
|
||||
UserID: MustIDBase16(userOneID),
|
||||
OrgID: MustIDBase16(orgOneID),
|
||||
Token: "rand1",
|
||||
Permissions: allUsersPermission(MustIDBase16(orgOneID)),
|
||||
},
|
||||
{
|
||||
ID: MustIDBase16(authTwoID),
|
||||
UserID: MustIDBase16(userTwoID),
|
||||
OrgID: MustIDBase16(orgOneID),
|
||||
Token: "rand2",
|
||||
Permissions: createUsersPermission(MustIDBase16(orgOneID)),
|
||||
},
|
||||
{
|
||||
ID: MustIDBase16(authThreeID),
|
||||
UserID: MustIDBase16(userOneID),
|
||||
OrgID: MustIDBase16(orgOneID),
|
||||
Token: "rand3",
|
||||
Permissions: deleteUsersPermission(MustIDBase16(orgOneID)),
|
||||
},
|
||||
},
|
||||
},
|
||||
args: args{
|
||||
token: "rand2",
|
||||
},
|
||||
wants: wants{
|
||||
authorizations: []*platform.Authorization{
|
||||
{
|
||||
ID: MustIDBase16(authTwoID),
|
||||
UserID: MustIDBase16(userTwoID),
|
||||
OrgID: MustIDBase16(orgOneID),
|
||||
Token: "rand2",
|
||||
Status: platform.Active,
|
||||
Permissions: createUsersPermission(MustIDBase16(orgOneID)),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
|
|
|
|||
|
|
@ -51,7 +51,7 @@ type BucketSvcOpts struct {
|
|||
NoHooks bool
|
||||
}
|
||||
|
||||
// WithoutHooks allows the test suite to be run without being able to hook into the underlieing implementation of theservice
|
||||
// WithoutHooks allows the test suite to be run without being able to hook into the underlying implementation of theservice
|
||||
// in most cases that is to remove specific id generation controls.
|
||||
func WithoutHooks() BucketSvcOpts {
|
||||
return BucketSvcOpts{
|
||||
|
|
|
|||
|
|
@ -79,6 +79,10 @@ func UserService(
|
|||
name: "UpdateUser",
|
||||
fn: UpdateUser,
|
||||
},
|
||||
{
|
||||
name: "UpdateUser_IndexHygiene",
|
||||
fn: UpdateUser_IndexHygiene,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
|
|
@ -977,3 +981,53 @@ func UpdateUser(
|
|||
})
|
||||
}
|
||||
}
|
||||
|
||||
func UpdateUser_IndexHygiene(
|
||||
init func(UserFields, *testing.T) (platform.UserService, string, func()),
|
||||
t *testing.T,
|
||||
) {
|
||||
|
||||
oldUserName := "user1"
|
||||
users := UserFields{
|
||||
Users: []*platform.User{
|
||||
{
|
||||
ID: MustIDBase16(userOneID),
|
||||
Name: oldUserName,
|
||||
Status: "active",
|
||||
},
|
||||
},
|
||||
}
|
||||
s, _, done := init(users, t)
|
||||
defer done()
|
||||
|
||||
newUserName := "user1Updated"
|
||||
upd := platform.UserUpdate{
|
||||
Name: &newUserName,
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
_, err := s.UpdateUser(ctx, MustIDBase16(userOneID), upd)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
// Ensure we can find the user with the new name.
|
||||
_, nerr := s.FindUser(ctx, platform.UserFilter{
|
||||
Name: &newUserName,
|
||||
})
|
||||
if nerr != nil {
|
||||
t.Error("unexpected error when finding user by name", nerr)
|
||||
}
|
||||
|
||||
// Ensure we cannot find a user with the old name. The index used when
|
||||
// searching by name should have been cleared out by the UpdateUser
|
||||
// operation.
|
||||
_, oerr := s.FindUser(ctx, platform.UserFilter{
|
||||
Name: &oldUserName,
|
||||
})
|
||||
ErrorsEqual(t, oerr, &platform.Error{
|
||||
Code: platform.ENotFound,
|
||||
Op: platform.OpFindUser,
|
||||
Msg: "user not found",
|
||||
})
|
||||
}
|
||||
|
|
|
|||
|
|
@ -61,7 +61,7 @@ describe('The Query Builder', () => {
|
|||
// wait for the notification since it's highly animated
|
||||
// we close the notification since it contains the name of the dashboard and interfers with cy.contains
|
||||
cy.wait(250)
|
||||
cy.get('.notification-close').click()
|
||||
cy.get('.cf-notification--dismiss').click()
|
||||
cy.wait(250)
|
||||
|
||||
// force a click on the hidden dashboard nav item (cypress can't do the hover)
|
||||
|
|
|
|||
|
|
@ -122,7 +122,7 @@
|
|||
"ts-loader": "^5.3.3",
|
||||
"ts-node": "^8.3.0",
|
||||
"tslib": "^1.9.0",
|
||||
"typescript": "3.6.4",
|
||||
"typescript": "3.8.3",
|
||||
"webpack": "^4.41.4",
|
||||
"webpack-cli": "^3.3.10",
|
||||
"webpack-dev-server": "^3.7.2",
|
||||
|
|
|
|||
|
|
@ -33,16 +33,10 @@ import {
|
|||
checkBucketLimits as checkBucketLimitsAction,
|
||||
LimitStatus,
|
||||
} from 'src/cloud/actions/limits'
|
||||
import {
|
||||
getDemoDataBuckets as getDemoDataBucketsAction,
|
||||
getDemoDataBucketMembership as getDemoDataBucketMembershipAction,
|
||||
} from 'src/cloud/actions/demodata'
|
||||
|
||||
// Utils
|
||||
import {getNewDemoBuckets} from 'src/cloud/selectors/demodata'
|
||||
import {extractBucketLimits} from 'src/cloud/utils/limits'
|
||||
import {getAll} from 'src/resources/selectors'
|
||||
import {isFlagEnabled} from 'src/shared/utils/featureFlag'
|
||||
import {SortTypes} from 'src/shared/utils/sort'
|
||||
|
||||
// Types
|
||||
|
|
@ -52,7 +46,6 @@ import {BucketSortKey} from 'src/shared/components/resource_sort_dropdown/genera
|
|||
interface StateProps {
|
||||
buckets: Bucket[]
|
||||
limitStatus: LimitStatus
|
||||
demoDataBuckets: Bucket[]
|
||||
}
|
||||
|
||||
interface DispatchProps {
|
||||
|
|
@ -60,8 +53,6 @@ interface DispatchProps {
|
|||
updateBucket: typeof updateBucket
|
||||
deleteBucket: typeof deleteBucket
|
||||
checkBucketLimits: typeof checkBucketLimitsAction
|
||||
getDemoDataBuckets: typeof getDemoDataBucketsAction
|
||||
getDemoDataBucketMembership: typeof getDemoDataBucketMembershipAction
|
||||
}
|
||||
|
||||
interface State {
|
||||
|
|
@ -90,18 +81,10 @@ class BucketsTab extends PureComponent<Props, State> {
|
|||
|
||||
public componentDidMount() {
|
||||
this.props.checkBucketLimits()
|
||||
if (isFlagEnabled('demodata')) {
|
||||
this.props.getDemoDataBuckets()
|
||||
}
|
||||
}
|
||||
|
||||
public render() {
|
||||
const {
|
||||
buckets,
|
||||
limitStatus,
|
||||
demoDataBuckets,
|
||||
getDemoDataBucketMembership,
|
||||
} = this.props
|
||||
const {buckets, limitStatus} = this.props
|
||||
const {searchTerm, sortKey, sortDirection, sortType} = this.state
|
||||
|
||||
const leftHeaderItems = (
|
||||
|
|
@ -125,12 +108,7 @@ class BucketsTab extends PureComponent<Props, State> {
|
|||
const rightHeaderItems = (
|
||||
<>
|
||||
<FeatureFlag name="demodata">
|
||||
{demoDataBuckets.length > 0 && (
|
||||
<DemoDataDropdown
|
||||
buckets={demoDataBuckets}
|
||||
getMembership={getDemoDataBucketMembership}
|
||||
/>
|
||||
)}
|
||||
<DemoDataDropdown />
|
||||
</FeatureFlag>
|
||||
<CreateBucketButton />
|
||||
</>
|
||||
|
|
@ -229,7 +207,6 @@ const mstp = (state: AppState): StateProps => {
|
|||
return {
|
||||
buckets,
|
||||
limitStatus: extractBucketLimits(state.cloud.limits),
|
||||
demoDataBuckets: getNewDemoBuckets(state, buckets),
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -238,8 +215,6 @@ const mdtp: DispatchProps = {
|
|||
updateBucket,
|
||||
deleteBucket,
|
||||
checkBucketLimits: checkBucketLimitsAction,
|
||||
getDemoDataBuckets: getDemoDataBucketsAction,
|
||||
getDemoDataBucketMembership: getDemoDataBucketMembershipAction,
|
||||
}
|
||||
|
||||
export default connect<StateProps, DispatchProps, {}>(
|
||||
|
|
|
|||
|
|
@ -0,0 +1,20 @@
|
|||
.demodata-dropdown--item-contents {
|
||||
display: inline-flex;
|
||||
align-items: center;
|
||||
}
|
||||
|
||||
.demodata-dropdown--item-icon {
|
||||
margin-right: $cf-marg-b;
|
||||
opacity: 0;
|
||||
}
|
||||
|
||||
.demodata-dropdown--item__added,
|
||||
.demodata-dropdown--item__added:hover {
|
||||
.demodata-dropdown--item-icon {
|
||||
opacity: 1;
|
||||
}
|
||||
|
||||
cursor: default;
|
||||
background: none !important;
|
||||
color: $c-honeydew !important;
|
||||
}
|
||||
|
|
@ -1,31 +1,97 @@
|
|||
// Libraries
|
||||
import React, {FC} from 'react'
|
||||
import _ from 'lodash'
|
||||
import React, {FC, useEffect} from 'react'
|
||||
import {connect} from 'react-redux'
|
||||
import {get, sortBy} from 'lodash'
|
||||
|
||||
// Utils
|
||||
import {getAll} from 'src/resources/selectors'
|
||||
|
||||
// Actions
|
||||
import {
|
||||
getDemoDataBucketMembership as getDemoDataBucketMembershipAction,
|
||||
getDemoDataBuckets as getDemoDataBucketsAction,
|
||||
} from 'src/cloud/actions/demodata'
|
||||
|
||||
// Components
|
||||
import {IconFont, ComponentColor, Dropdown} from '@influxdata/clockface'
|
||||
import {ComponentColor, Dropdown, Icon, IconFont} from '@influxdata/clockface'
|
||||
|
||||
// Types
|
||||
import {Bucket} from 'src/types'
|
||||
import {getDemoDataBucketMembership} from 'src/cloud/actions/demodata'
|
||||
import {AppState, Bucket, ResourceType} from 'src/types'
|
||||
|
||||
interface Props {
|
||||
buckets: Bucket[]
|
||||
getMembership: typeof getDemoDataBucketMembership
|
||||
interface StateProps {
|
||||
ownBuckets: Bucket[]
|
||||
demoDataBuckets: Bucket[]
|
||||
}
|
||||
|
||||
const DemoDataDropdown: FC<Props> = ({buckets, getMembership}) => {
|
||||
const demoDataItems = buckets.map(b => (
|
||||
<Dropdown.Item
|
||||
testID={`dropdown-item--demodata-${b.name}`}
|
||||
id={b.id}
|
||||
key={b.id}
|
||||
value={b}
|
||||
onClick={getMembership}
|
||||
>
|
||||
{b.name}
|
||||
</Dropdown.Item>
|
||||
))
|
||||
interface DispatchProps {
|
||||
getDemoDataBucketMembership: typeof getDemoDataBucketMembershipAction
|
||||
getDemoDataBuckets: typeof getDemoDataBucketsAction
|
||||
}
|
||||
|
||||
type Props = DispatchProps & StateProps
|
||||
|
||||
const DemoDataDropdown: FC<Props> = ({
|
||||
ownBuckets,
|
||||
demoDataBuckets,
|
||||
getDemoDataBucketMembership,
|
||||
getDemoDataBuckets,
|
||||
}) => {
|
||||
useEffect(() => {
|
||||
getDemoDataBuckets()
|
||||
}, [])
|
||||
|
||||
if (!demoDataBuckets.length) {
|
||||
return null
|
||||
}
|
||||
|
||||
const ownBucketNames = ownBuckets.map(o => o.name.toLocaleLowerCase())
|
||||
|
||||
const sortedBuckets = sortBy(demoDataBuckets, d => {
|
||||
return d.name.toLocaleLowerCase()
|
||||
})
|
||||
|
||||
const dropdownItems = sortedBuckets.map(b => {
|
||||
if (ownBucketNames.includes(b.name.toLocaleLowerCase())) {
|
||||
return (
|
||||
<Dropdown.Item
|
||||
testID={`dropdown-item--demodata-${b.name}`}
|
||||
className="demodata-dropdown--item__added"
|
||||
id={b.id}
|
||||
key={b.id}
|
||||
value={b}
|
||||
selected={true}
|
||||
>
|
||||
<div className="demodata-dropdown--item-contents">
|
||||
<Icon
|
||||
glyph={IconFont.Checkmark}
|
||||
className="demodata-dropdown--item-icon"
|
||||
/>
|
||||
{b.name}
|
||||
</div>
|
||||
</Dropdown.Item>
|
||||
)
|
||||
}
|
||||
|
||||
return (
|
||||
<Dropdown.Item
|
||||
testID={`dropdown-item--demodata-${b.name}`}
|
||||
className="demodata-dropdown--item"
|
||||
id={b.id}
|
||||
key={b.id}
|
||||
value={b}
|
||||
onClick={getDemoDataBucketMembership}
|
||||
selected={false}
|
||||
>
|
||||
<div className="demodata-dropdown--item-contents">
|
||||
<Icon
|
||||
glyph={IconFont.Checkmark}
|
||||
className="demodata-dropdown--item-icon"
|
||||
/>
|
||||
{b.name}
|
||||
</div>
|
||||
</Dropdown.Item>
|
||||
)
|
||||
})
|
||||
|
||||
return (
|
||||
<Dropdown
|
||||
|
|
@ -43,10 +109,23 @@ const DemoDataDropdown: FC<Props> = ({buckets, getMembership}) => {
|
|||
</Dropdown.Button>
|
||||
)}
|
||||
menu={onCollapse => (
|
||||
<Dropdown.Menu onCollapse={onCollapse}>{demoDataItems}</Dropdown.Menu>
|
||||
<Dropdown.Menu onCollapse={onCollapse}>{dropdownItems}</Dropdown.Menu>
|
||||
)}
|
||||
/>
|
||||
)
|
||||
}
|
||||
|
||||
export default DemoDataDropdown
|
||||
const mstp = (state: AppState): StateProps => ({
|
||||
ownBuckets: getAll<Bucket>(state, ResourceType.Buckets),
|
||||
demoDataBuckets: get(state, 'cloud.demoData.buckets', []) as Bucket[],
|
||||
})
|
||||
|
||||
const mdtp: DispatchProps = {
|
||||
getDemoDataBucketMembership: getDemoDataBucketMembershipAction,
|
||||
getDemoDataBuckets: getDemoDataBucketsAction,
|
||||
}
|
||||
|
||||
export default connect<StateProps, DispatchProps, {}>(
|
||||
mstp,
|
||||
mdtp
|
||||
)(DemoDataDropdown)
|
||||
|
|
|
|||
|
|
@ -3,21 +3,26 @@ import {
|
|||
getDemoDataBuckets as getDemoDataBucketsAJAX,
|
||||
getDemoDataBucketMembership as getDemoDataBucketMembershipAJAX,
|
||||
deleteDemoDataBucketMembership as deleteDemoDataBucketMembershipAJAX,
|
||||
getNormalizedDemoDataBucket,
|
||||
} from 'src/cloud/apis/demodata'
|
||||
import {createDashboardFromTemplate} from 'src/templates/api'
|
||||
import {deleteDashboard, getBucket} from 'src/client'
|
||||
import {getBucket} from 'src/client'
|
||||
|
||||
// Actions
|
||||
import {getDashboards} from 'src/dashboards/actions/thunks'
|
||||
import {addBucket, removeBucket} from 'src/buckets/actions/creators'
|
||||
import {notify} from 'src/shared/actions/notifications'
|
||||
|
||||
// Selectors
|
||||
import {getOrg} from 'src/organizations/selectors'
|
||||
import {getAll} from 'src/resources/selectors/getAll'
|
||||
import {normalize} from 'normalizr'
|
||||
import {getAll} from 'src/resources/selectors'
|
||||
|
||||
// Constants
|
||||
import {DemoDataTemplates, DemoDataDashboards} from 'src/cloud/constants'
|
||||
import {
|
||||
demoDataAddBucketFailed,
|
||||
demoDataDeleteBucketFailed,
|
||||
demoDataSucceeded,
|
||||
} from 'src/shared/copy/notifications'
|
||||
|
||||
// Types
|
||||
import {
|
||||
|
|
@ -25,11 +30,10 @@ import {
|
|||
RemoteDataState,
|
||||
GetState,
|
||||
DemoBucket,
|
||||
Dashboard,
|
||||
ResourceType,
|
||||
BucketEntities,
|
||||
Dashboard,
|
||||
} from 'src/types'
|
||||
import {bucketSchema} from 'src/schemas'
|
||||
import {reportError} from 'src/shared/utils/errors'
|
||||
|
||||
export type Actions =
|
||||
| ReturnType<typeof setDemoDataStatus>
|
||||
|
|
@ -57,88 +61,74 @@ export const getDemoDataBuckets = () => async (
|
|||
if (status === RemoteDataState.NotStarted) {
|
||||
dispatch(setDemoDataStatus(RemoteDataState.Loading))
|
||||
}
|
||||
|
||||
try {
|
||||
const buckets = await getDemoDataBucketsAJAX()
|
||||
|
||||
dispatch(setDemoDataStatus(RemoteDataState.Done))
|
||||
dispatch(setDemoDataBuckets(buckets))
|
||||
} catch (error) {
|
||||
console.error(error)
|
||||
|
||||
reportError(error, {
|
||||
name: 'getDemoDataBuckets function',
|
||||
})
|
||||
|
||||
dispatch(setDemoDataStatus(RemoteDataState.Error))
|
||||
}
|
||||
}
|
||||
|
||||
export const getDemoDataBucketMembership = (bucket: DemoBucket) => async (
|
||||
dispatch,
|
||||
getState: GetState
|
||||
) => {
|
||||
export const getDemoDataBucketMembership = ({
|
||||
name: bucketName,
|
||||
id: bucketID,
|
||||
}) => async (dispatch, getState: GetState) => {
|
||||
const state = getState()
|
||||
|
||||
const {
|
||||
me: {id: userID},
|
||||
} = state
|
||||
|
||||
const {id: orgID} = getOrg(state)
|
||||
|
||||
try {
|
||||
await getDemoDataBucketMembershipAJAX(bucket.id, userID)
|
||||
await getDemoDataBucketMembershipAJAX(bucketID, userID)
|
||||
|
||||
const template = await DemoDataTemplates[bucket.name]
|
||||
if (template) {
|
||||
await createDashboardFromTemplate(template, orgID)
|
||||
} else {
|
||||
const normalizedBucket = await getNormalizedDemoDataBucket(bucketID)
|
||||
dispatch(addBucket(normalizedBucket))
|
||||
|
||||
const template = await DemoDataTemplates[bucketName]
|
||||
if (!template) {
|
||||
throw new Error(
|
||||
`Could not find template for demodata bucket ${bucket.name}`
|
||||
`Could not find dashboard template for demodata bucket ${bucketName}`
|
||||
)
|
||||
}
|
||||
|
||||
const resp = await getBucket({bucketID: bucket.id})
|
||||
|
||||
if (resp.status !== 200) {
|
||||
throw new Error('Request for demo data bucket membership did not succeed')
|
||||
}
|
||||
|
||||
const newBucket = {
|
||||
...resp.data,
|
||||
type: 'demodata' as 'demodata',
|
||||
labels: [],
|
||||
} as DemoBucket
|
||||
|
||||
const normalizedBucket = normalize<Bucket, BucketEntities, string>(
|
||||
newBucket,
|
||||
bucketSchema
|
||||
)
|
||||
|
||||
dispatch(addBucket(normalizedBucket))
|
||||
|
||||
// TODO: notify success and error appropriately
|
||||
} catch (error) {
|
||||
console.error(error)
|
||||
}
|
||||
}
|
||||
export const deleteDemoDataDashboard = (dashboardName: string) => async (
|
||||
dispatch,
|
||||
getState: GetState
|
||||
) => {
|
||||
try {
|
||||
await dispatch(getDashboards())
|
||||
|
||||
await createDashboardFromTemplate(template, orgID)
|
||||
const updatedState = getState()
|
||||
|
||||
const ddDashboard = getAll(updatedState, ResourceType.Dashboards).find(
|
||||
d => {
|
||||
d.name === dashboardName
|
||||
}
|
||||
) as Dashboard
|
||||
const allDashboards = getAll<Dashboard>(
|
||||
updatedState,
|
||||
ResourceType.Dashboards
|
||||
)
|
||||
|
||||
if (ddDashboard) {
|
||||
const deleteResp = await deleteDashboard({
|
||||
dashboardID: ddDashboard.id,
|
||||
})
|
||||
if (deleteResp.status !== 204) {
|
||||
throw new Error(deleteResp.data.message)
|
||||
}
|
||||
const createdDashboard = allDashboards.find(
|
||||
d => d.name === DemoDataDashboards[bucketName]
|
||||
)
|
||||
|
||||
if (!createdDashboard) {
|
||||
throw new Error(
|
||||
`Could not create dashboard for demodata bucket ${bucketName}`
|
||||
)
|
||||
}
|
||||
|
||||
const url = `/orgs/${orgID}/dashboards/${createdDashboard.id}`
|
||||
|
||||
dispatch(notify(demoDataSucceeded(bucketName, url)))
|
||||
} catch (error) {
|
||||
throw new Error(error)
|
||||
dispatch(notify(demoDataAddBucketFailed(error)))
|
||||
|
||||
reportError(error, {
|
||||
name: 'getDemoDataBucketMembership function',
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -160,18 +150,11 @@ export const deleteDemoDataBucketMembership = (bucket: DemoBucket) => async (
|
|||
}
|
||||
|
||||
dispatch(removeBucket(bucket.id))
|
||||
|
||||
const demoDashboardName = DemoDataDashboards[bucket.name]
|
||||
|
||||
if (!demoDashboardName) {
|
||||
throw new Error(
|
||||
`Could not find dashboard name for demo data bucket ${bucket.name}`
|
||||
)
|
||||
}
|
||||
|
||||
dispatch(deleteDemoDataDashboard(demoDashboardName))
|
||||
// TODO: notify for success and error appropriately
|
||||
} catch (error) {
|
||||
console.error(error)
|
||||
dispatch(notify(demoDataDeleteBucketFailed(bucket.name, error)))
|
||||
|
||||
reportError(error, {
|
||||
name: 'deleteDemoDataBucketMembership function',
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,56 +1,50 @@
|
|||
// Libraries
|
||||
import {get} from 'lodash'
|
||||
import {getBuckets} from 'src/client'
|
||||
import {getBuckets, getBucket} from 'src/client'
|
||||
import AJAX from 'src/utils/ajax'
|
||||
|
||||
//Utils
|
||||
import {isFlagEnabled} from 'src/shared/utils/featureFlag'
|
||||
|
||||
//Types
|
||||
import {Bucket, DemoBucket} from 'src/types'
|
||||
import {Bucket, DemoBucket, BucketEntities} from 'src/types'
|
||||
import {LIMIT} from 'src/resources/constants'
|
||||
import {normalize} from 'normalizr'
|
||||
import {bucketSchema} from 'src/schemas'
|
||||
import {NormalizedSchema} from 'normalizr'
|
||||
|
||||
const baseURL = '/api/v2/experimental/sampledata'
|
||||
|
||||
export const getDemoDataBuckets = async (): Promise<Bucket[]> => {
|
||||
try {
|
||||
const {data} = await AJAX({
|
||||
method: 'GET',
|
||||
url: `${baseURL}/buckets`,
|
||||
})
|
||||
//todo (deniz) convert to fetch
|
||||
const {data} = await AJAX({
|
||||
method: 'GET',
|
||||
url: `${baseURL}/buckets`,
|
||||
})
|
||||
|
||||
// if sampledata endpoints are not available in a cluster
|
||||
// gateway responds with a list of links where 'buckets' field is a string
|
||||
const buckets = get(data, 'buckets', false)
|
||||
if (!Array.isArray(buckets)) {
|
||||
throw new Error('Could not reach demodata endpoint')
|
||||
}
|
||||
|
||||
return buckets.filter(b => b.type == 'user') as Bucket[] // remove returned _tasks and _monitoring buckets
|
||||
} catch (error) {
|
||||
console.error(error)
|
||||
throw error
|
||||
// if sampledata endpoints are not available in a cluster
|
||||
// gateway responds with a list of links where 'buckets' field is a string
|
||||
const buckets = get(data, 'buckets', null)
|
||||
if (!Array.isArray(buckets)) {
|
||||
throw new Error('Could not reach demodata endpoint')
|
||||
}
|
||||
|
||||
return buckets.filter(b => b.type == 'user') as Bucket[] // remove returned _tasks and _monitoring buckets
|
||||
}
|
||||
|
||||
export const getDemoDataBucketMembership = async (
|
||||
bucketID: string,
|
||||
userID: string
|
||||
) => {
|
||||
try {
|
||||
const response = await AJAX({
|
||||
method: 'POST',
|
||||
url: `${baseURL}/buckets/${bucketID}/members`,
|
||||
data: {userID},
|
||||
})
|
||||
const response = await AJAX({
|
||||
method: 'POST',
|
||||
url: `${baseURL}/buckets/${bucketID}/members`,
|
||||
data: {userID},
|
||||
})
|
||||
|
||||
if (response.status === '200') {
|
||||
// a failed or successful membership POST to sampledata should return 204
|
||||
throw new Error('Could not reach demodata endpoint')
|
||||
}
|
||||
} catch (error) {
|
||||
console.error(error)
|
||||
throw error
|
||||
if (response.status === '200') {
|
||||
// a failed or successful membership POST to sampledata should return 204
|
||||
throw new Error('Could not reach demodata endpoint')
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -106,3 +100,30 @@ export const fetchDemoDataBuckets = async (): Promise<Bucket[]> => {
|
|||
return [] // demodata bucket fetching errors should not effect regular bucket fetching
|
||||
}
|
||||
}
|
||||
|
||||
export const getNormalizedDemoDataBucket = async (
|
||||
bucketID: string
|
||||
): Promise<NormalizedSchema<BucketEntities, string>> => {
|
||||
const resp = await getBucket({bucketID})
|
||||
|
||||
if (resp.status !== 200) {
|
||||
throw new Error(
|
||||
`Request for demo data bucket membership did not succeed: ${
|
||||
resp.data.message
|
||||
}`
|
||||
)
|
||||
}
|
||||
|
||||
const newBucket = {
|
||||
...resp.data,
|
||||
type: 'demodata' as 'demodata',
|
||||
labels: [],
|
||||
} as DemoBucket
|
||||
|
||||
const normalizedBucket = normalize<Bucket, BucketEntities, string>(
|
||||
newBucket,
|
||||
bucketSchema
|
||||
)
|
||||
|
||||
return normalizedBucket
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,20 +0,0 @@
|
|||
import {get, differenceBy, sortBy} from 'lodash'
|
||||
import {AppState, Bucket, DemoBucket} from 'src/types'
|
||||
|
||||
export const getNewDemoBuckets = (state: AppState, ownBuckets: Bucket[]) => {
|
||||
const demoDataBuckets = get(
|
||||
state,
|
||||
'cloud.demoData.buckets',
|
||||
[]
|
||||
) as DemoBucket[]
|
||||
|
||||
const newDemoDataBuckets = differenceBy(
|
||||
demoDataBuckets,
|
||||
ownBuckets,
|
||||
b => b.id
|
||||
)
|
||||
|
||||
return sortBy(newDemoDataBuckets, d => {
|
||||
return d.name.toLocaleLowerCase()
|
||||
})
|
||||
}
|
||||
|
|
@ -1,11 +1,11 @@
|
|||
// Libraries
|
||||
import qs from 'qs'
|
||||
import {replace, RouterAction} from 'react-router-redux'
|
||||
import {Dispatch, Action} from 'redux'
|
||||
import {Dispatch} from 'redux'
|
||||
import {get, pickBy} from 'lodash'
|
||||
|
||||
// Actions
|
||||
import {notify} from 'src/shared/actions/notifications'
|
||||
import {notify, Action as NotifyAction} from 'src/shared/actions/notifications'
|
||||
|
||||
// Utils
|
||||
import {stripPrefix} from 'src/utils/basepath'
|
||||
|
|
@ -104,7 +104,7 @@ export const updateQueryVars = varsObj => {
|
|||
}
|
||||
|
||||
export const updateTimeRangeFromQueryParams = (dashboardID: string) => (
|
||||
dispatch: Dispatch<Action>,
|
||||
dispatch: Dispatch<Action | NotifyAction>,
|
||||
getState
|
||||
): void => {
|
||||
const {ranges} = getState()
|
||||
|
|
|
|||
|
|
@ -22,11 +22,12 @@ import {
|
|||
} from 'src/dashboards/actions/notes'
|
||||
|
||||
// Types
|
||||
import {AppState} from 'src/types'
|
||||
import {AppState, NoteEditorMode} from 'src/types'
|
||||
|
||||
interface StateProps {
|
||||
note: string
|
||||
showNoteWhenEmpty: boolean
|
||||
hasQuery: boolean
|
||||
}
|
||||
|
||||
interface DispatchProps {
|
||||
|
|
@ -79,7 +80,11 @@ class NoteEditor extends PureComponent<Props, State> {
|
|||
}
|
||||
|
||||
private get visibilityToggle(): JSX.Element {
|
||||
const {showNoteWhenEmpty, onToggleShowNoteWhenEmpty} = this.props
|
||||
const {hasQuery, showNoteWhenEmpty, onToggleShowNoteWhenEmpty} = this.props
|
||||
|
||||
if (!hasQuery) {
|
||||
return null
|
||||
}
|
||||
|
||||
return (
|
||||
<FlexBox
|
||||
|
|
@ -109,9 +114,14 @@ class NoteEditor extends PureComponent<Props, State> {
|
|||
}
|
||||
|
||||
const mstp = (state: AppState) => {
|
||||
const {note, isPreviewing, showNoteWhenEmpty} = state.noteEditor
|
||||
const {note, mode, viewID, isPreviewing, showNoteWhenEmpty} = state.noteEditor
|
||||
const hasQuery =
|
||||
mode === NoteEditorMode.Editing &&
|
||||
viewID &&
|
||||
state.resources.views.byID[viewID] &&
|
||||
state.resources.views.byID[viewID].properties.type !== 'markdown'
|
||||
|
||||
return {note, isPreviewing, showNoteWhenEmpty}
|
||||
return {note, hasQuery, isPreviewing, showNoteWhenEmpty}
|
||||
}
|
||||
|
||||
const mdtp = {
|
||||
|
|
|
|||
|
|
@ -6,6 +6,7 @@ export interface NoteEditorState {
|
|||
note: string
|
||||
showNoteWhenEmpty: boolean
|
||||
isPreviewing: boolean
|
||||
viewID?: string
|
||||
}
|
||||
|
||||
const initialState = (): NoteEditorState => ({
|
||||
|
|
|
|||
|
|
@ -61,11 +61,10 @@ export function comments(editor: EditorType) {
|
|||
}
|
||||
|
||||
export function submit(editor: EditorType, submitFn: () => any) {
|
||||
editor.onKeyUp(evt => {
|
||||
const {ctrlKey, code} = evt
|
||||
|
||||
if (ctrlKey && code === 'Enter') {
|
||||
editor.addCommand(
|
||||
window.monaco.KeyMod.CtrlCmd | window.monaco.KeyCode.Enter,
|
||||
() => {
|
||||
submitFn()
|
||||
}
|
||||
})
|
||||
)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -21,11 +21,11 @@ export const reducer = (state: RuleState, action: Action) => {
|
|||
let newState: RuleState = state
|
||||
|
||||
if (schedule === 'every') {
|
||||
newState = omit(state, 'cron')
|
||||
newState = omit(state, 'cron') as NotificationRuleDraft
|
||||
}
|
||||
|
||||
if (schedule === 'cron') {
|
||||
newState = omit(state, 'every')
|
||||
newState = omit(state, 'every') as NotificationRuleDraft
|
||||
}
|
||||
|
||||
return {...newState, [schedule]: ''}
|
||||
|
|
|
|||
|
|
@ -21,7 +21,7 @@ import auth0js, {WebAuth} from 'auth0-js'
|
|||
// Components
|
||||
import {LoginForm} from 'src/onboarding/components/LoginForm'
|
||||
import {SocialButton} from 'src/shared/components/SocialButton'
|
||||
import {GoogleLogo, GithubLogo} from 'src/clientLibraries/graphics'
|
||||
import {GoogleLogo} from 'src/clientLibraries/graphics'
|
||||
|
||||
// Types
|
||||
import {Auth0Connection, FormFieldValidation} from 'src/types'
|
||||
|
|
@ -127,14 +127,6 @@ class LoginPageContents extends PureComponent<DispatchProps> {
|
|||
>
|
||||
<GoogleLogo className="signup-icon" />
|
||||
</SocialButton>
|
||||
<SocialButton
|
||||
buttonText="Github"
|
||||
handleClick={() => {
|
||||
this.handleSocialClick(Auth0Connection.Github)
|
||||
}}
|
||||
>
|
||||
<GithubLogo className="signup-icon" />
|
||||
</SocialButton>
|
||||
</FlexBox>
|
||||
</Grid.Row>
|
||||
</Grid>
|
||||
|
|
|
|||
|
|
@ -104,7 +104,7 @@ export class OnboardingWizardPage extends PureComponent<Props, State> {
|
|||
loading={this.state.loading}
|
||||
spinnerComponent={<TechnoSpinner />}
|
||||
>
|
||||
<Notifications inPresentationMode={true} />
|
||||
<Notifications />
|
||||
<OnboardingWizard
|
||||
onDecrementCurrentStepIndex={this.handleDecrementStepIndex}
|
||||
onIncrementCurrentStepIndex={this.handleIncrementStepIndex}
|
||||
|
|
|
|||
|
|
@ -43,7 +43,7 @@ const EventMarker: FC<Props> = ({xScale, xDomain, events, xFormatter}) => {
|
|||
let triggerRect: DOMRect = null
|
||||
|
||||
if (trigger.current) {
|
||||
triggerRect = trigger.current.getBoundingClientRect() as DOMRect
|
||||
triggerRect = trigger.current.getBoundingClientRect()
|
||||
}
|
||||
|
||||
const {time} = events[0]
|
||||
|
|
|
|||
|
|
@ -20,6 +20,8 @@ $cell--header-button-active-color: $c-pool;
|
|||
.cell--view {
|
||||
flex: 1 0 0;
|
||||
position: relative;
|
||||
overflow: hidden;
|
||||
height: 100%;
|
||||
|
||||
> .single-stat {
|
||||
border-radius: $radius;
|
||||
|
|
|
|||
|
|
@ -0,0 +1,10 @@
|
|||
.notification--button {
|
||||
display: inline-block;
|
||||
margin: $cf-marg-a;
|
||||
margin-right: 0;
|
||||
}
|
||||
|
||||
.notification--message {
|
||||
display: inline-block;
|
||||
margin-right: $cf-marg-a;
|
||||
}
|
||||
|
|
@ -1,137 +0,0 @@
|
|||
import React, {Component, CSSProperties} from 'react'
|
||||
import {connect} from 'react-redux'
|
||||
import {bindActionCreators} from 'redux'
|
||||
import {Notification as NotificationType} from 'src/types/notifications'
|
||||
|
||||
import classnames from 'classnames'
|
||||
|
||||
import {dismissNotification as dismissNotificationAction} from 'src/shared/actions/notifications'
|
||||
|
||||
import {NOTIFICATION_TRANSITION} from 'src/shared/constants/index'
|
||||
import {ErrorHandling} from 'src/shared/decorators/errors'
|
||||
|
||||
interface Props {
|
||||
notification: NotificationType
|
||||
dismissNotification: (id: string) => void
|
||||
}
|
||||
|
||||
interface State {
|
||||
opacity: number
|
||||
height: number
|
||||
dismissed: boolean
|
||||
}
|
||||
|
||||
@ErrorHandling
|
||||
class Notification extends Component<Props, State> {
|
||||
private notificationRef: HTMLElement
|
||||
private dismissalTimer: number
|
||||
private deletionTimer: number
|
||||
|
||||
constructor(props) {
|
||||
super(props)
|
||||
|
||||
this.state = {
|
||||
opacity: 1,
|
||||
height: 0,
|
||||
dismissed: false,
|
||||
}
|
||||
}
|
||||
|
||||
public componentDidMount() {
|
||||
const {
|
||||
notification: {duration},
|
||||
} = this.props
|
||||
|
||||
this.updateHeight()
|
||||
|
||||
if (duration >= 0) {
|
||||
// Automatically dismiss notification after duration prop
|
||||
this.dismissalTimer = window.setTimeout(this.handleDismiss, duration)
|
||||
}
|
||||
}
|
||||
|
||||
public componentWillUnmount() {
|
||||
clearTimeout(this.dismissalTimer)
|
||||
clearTimeout(this.deletionTimer)
|
||||
}
|
||||
|
||||
public render() {
|
||||
const {
|
||||
notification: {message, icon},
|
||||
} = this.props
|
||||
|
||||
return (
|
||||
<div className={this.containerClassname} style={this.notificationStyle}>
|
||||
<div
|
||||
className={this.notificationClassname}
|
||||
ref={this.handleNotificationRef}
|
||||
data-testid={this.dataTestID}
|
||||
>
|
||||
<span className={`icon ${icon}`} />
|
||||
<div className="notification-message">{message}</div>
|
||||
<button className="notification-close" onClick={this.handleDismiss} />
|
||||
</div>
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
private get dataTestID(): string {
|
||||
const {style} = this.props.notification
|
||||
return `notification-${style}`
|
||||
}
|
||||
|
||||
private get notificationClassname(): string {
|
||||
const {
|
||||
notification: {style},
|
||||
} = this.props
|
||||
|
||||
return `notification notification-${style}`
|
||||
}
|
||||
|
||||
private get containerClassname(): string {
|
||||
const {height, dismissed} = this.state
|
||||
|
||||
return classnames('notification-container', {
|
||||
show: !!height,
|
||||
'notification-dismissed': dismissed,
|
||||
})
|
||||
}
|
||||
|
||||
private get notificationStyle(): CSSProperties {
|
||||
return {height: '100%'}
|
||||
}
|
||||
|
||||
private updateHeight = (): void => {
|
||||
if (this.notificationRef) {
|
||||
const {height} = this.notificationRef.getBoundingClientRect()
|
||||
this.setState({height})
|
||||
}
|
||||
}
|
||||
|
||||
private handleDismiss = (): void => {
|
||||
const {
|
||||
notification: {id},
|
||||
dismissNotification,
|
||||
} = this.props
|
||||
|
||||
this.setState({dismissed: true})
|
||||
this.deletionTimer = window.setTimeout(
|
||||
() => dismissNotification(id),
|
||||
NOTIFICATION_TRANSITION
|
||||
)
|
||||
}
|
||||
|
||||
private handleNotificationRef = (ref: HTMLElement): void => {
|
||||
this.notificationRef = ref
|
||||
this.updateHeight()
|
||||
}
|
||||
}
|
||||
|
||||
const mapDispatchToProps = dispatch => ({
|
||||
dismissNotification: bindActionCreators(dismissNotificationAction, dispatch),
|
||||
})
|
||||
|
||||
export default connect(
|
||||
null,
|
||||
mapDispatchToProps
|
||||
)(Notification)
|
||||
|
|
@ -1,196 +0,0 @@
|
|||
/*
|
||||
Notifications
|
||||
-----------------------------------------------------------------------------
|
||||
*/
|
||||
|
||||
$notification-margin: 12px;
|
||||
|
||||
.notification-center {
|
||||
position: fixed;
|
||||
right: $notification-margin;
|
||||
width: 360px;
|
||||
top: $chronograf-page-header-height + $notification-margin;
|
||||
z-index: 9999;
|
||||
}
|
||||
|
||||
.notification-center__presentation-mode {
|
||||
@extend .notification-center;
|
||||
top: $notification-margin;
|
||||
}
|
||||
|
||||
.notification {
|
||||
border-style: solid;
|
||||
border-width: 0;
|
||||
border-radius: $ix-radius;
|
||||
position: relative;
|
||||
padding: 12px 40px;
|
||||
@extend %no-user-select;
|
||||
transform: translateX(105%);
|
||||
transition: transform 0.25s ease 0.25s, opacity 0.25s ease;
|
||||
|
||||
> span.icon {
|
||||
position: absolute;
|
||||
top: 50%;
|
||||
left: 20px;
|
||||
transform: translate(-50%, -50%);
|
||||
font-size: $ix-text-base-2;
|
||||
}
|
||||
}
|
||||
|
||||
.notification-message {
|
||||
&:first-letter {
|
||||
text-transform: uppercase;
|
||||
}
|
||||
font-weight: 500;
|
||||
font-size: 14px;
|
||||
line-height: 16px;
|
||||
}
|
||||
|
||||
.notification-close {
|
||||
outline: none;
|
||||
position: absolute;
|
||||
top: 50%;
|
||||
border: 0;
|
||||
background-color: transparent;
|
||||
transform: translateY(-50%);
|
||||
right: ($ix-marg-c - $ix-marg-a);
|
||||
font-size: $ix-text-base;
|
||||
width: 20px;
|
||||
height: 20px;
|
||||
opacity: 0.25;
|
||||
transition: opacity 0.25s ease;
|
||||
|
||||
&:before,
|
||||
&:after {
|
||||
content: '';
|
||||
position: absolute;
|
||||
top: 50%;
|
||||
left: 50%;
|
||||
width: 16px;
|
||||
height: 2px;
|
||||
border-radius: 1px;
|
||||
background-color: $g20-white;
|
||||
}
|
||||
&:before {
|
||||
transform: translate(-50%, -50%) rotate(-45deg);
|
||||
}
|
||||
&:after {
|
||||
transform: translate(-50%, -50%) rotate(45deg);
|
||||
}
|
||||
|
||||
&:hover {
|
||||
cursor: pointer;
|
||||
opacity: 1;
|
||||
}
|
||||
}
|
||||
.notification-container {
|
||||
overflow: hidden;
|
||||
height: 0;
|
||||
margin-bottom: $ix-marg-a;
|
||||
transition: height 0.25s ease;
|
||||
|
||||
&.show .notification {
|
||||
transform: translateX(0);
|
||||
}
|
||||
&.notification-dismissed {
|
||||
height: 0 !important;
|
||||
.notification {
|
||||
opacity: 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Mixin for Alert Themes
|
||||
// ----------------------------------------------------------------------------
|
||||
@mixin notification-styles(
|
||||
$bg-color,
|
||||
$bg-color-2,
|
||||
$text-color,
|
||||
$link-color,
|
||||
$link-hover
|
||||
) {
|
||||
font-size: 16px;
|
||||
|
||||
@include gradient-h($bg-color, $bg-color-2);
|
||||
color: $text-color;
|
||||
|
||||
a:link,
|
||||
a:visited {
|
||||
color: $link-color;
|
||||
font-weight: 700;
|
||||
text-decoration: underline;
|
||||
transition: color 0.25s ease;
|
||||
}
|
||||
a:hover {
|
||||
color: $link-hover;
|
||||
border-color: $link-hover;
|
||||
}
|
||||
span.icon {
|
||||
color: $text-color;
|
||||
}
|
||||
.notification-close:before,
|
||||
.notification-close:after {
|
||||
background-color: $text-color;
|
||||
}
|
||||
}
|
||||
|
||||
// Alert Themes
|
||||
// ----------------------------------------------------------------------------
|
||||
.notification-success {
|
||||
@include notification-styles(
|
||||
$c-rainforest,
|
||||
$c-pool,
|
||||
$g20-white,
|
||||
$c-wasabi,
|
||||
$g20-white
|
||||
);
|
||||
}
|
||||
.notification-primary {
|
||||
@include notification-styles(
|
||||
$c-pool,
|
||||
$c-ocean,
|
||||
$g20-white,
|
||||
$c-neutrino,
|
||||
$g20-white
|
||||
);
|
||||
}
|
||||
.notification-warning {
|
||||
@include notification-styles(
|
||||
$c-star,
|
||||
$c-pool,
|
||||
$g20-white,
|
||||
$c-neutrino,
|
||||
$g20-white
|
||||
);
|
||||
}
|
||||
.notification-error {
|
||||
@include notification-styles(
|
||||
$c-curacao,
|
||||
$c-star,
|
||||
$g20-white,
|
||||
$c-marmelade,
|
||||
$g20-white
|
||||
);
|
||||
}
|
||||
.notification-info {
|
||||
@include notification-styles(
|
||||
$g20-white,
|
||||
$g16-pearl,
|
||||
$g8-storm,
|
||||
$ix-link-default,
|
||||
$ix-link-default-hover
|
||||
);
|
||||
}
|
||||
.notification-dark {
|
||||
@include notification-styles(
|
||||
$c-sapphire,
|
||||
$c-shadow,
|
||||
$c-moonstone,
|
||||
$ix-link-default,
|
||||
$ix-link-default-hover
|
||||
);
|
||||
}
|
||||
|
||||
.endpoint-description--textarea {
|
||||
max-height: 150;
|
||||
}
|
||||
|
|
@ -1,16 +1,42 @@
|
|||
import React, {PureComponent} from 'react'
|
||||
import {Link} from 'react-router'
|
||||
import {connect} from 'react-redux'
|
||||
import {Notification as NotificationType} from 'src/types/notifications'
|
||||
import Notification from 'src/shared/components/notifications/Notification'
|
||||
import {get} from 'lodash'
|
||||
|
||||
interface Props {
|
||||
//Actions
|
||||
import {dismissNotification as dismissNotificationAction} from 'src/shared/actions/notifications'
|
||||
|
||||
import {Notification, ComponentSize, Gradients} from '@influxdata/clockface'
|
||||
|
||||
//Types
|
||||
import {
|
||||
Notification as NotificationType,
|
||||
NotificationStyle,
|
||||
} from 'src/types/notifications'
|
||||
|
||||
interface StateProps {
|
||||
notifications: NotificationType[]
|
||||
inPresentationMode: boolean
|
||||
}
|
||||
|
||||
interface DispatchProps {
|
||||
dismissNotification: typeof dismissNotificationAction
|
||||
}
|
||||
|
||||
type Props = StateProps & DispatchProps
|
||||
|
||||
const matchGradientToColor = (style: NotificationStyle): Gradients => {
|
||||
const converter = {
|
||||
[NotificationStyle.Primary]: Gradients.Primary,
|
||||
[NotificationStyle.Warning]: Gradients.WarningLight,
|
||||
[NotificationStyle.Success]: Gradients.HotelBreakfast,
|
||||
[NotificationStyle.Error]: Gradients.DangerDark,
|
||||
[NotificationStyle.Info]: Gradients.DefaultLight,
|
||||
}
|
||||
return get(converter, style, Gradients.DefaultLight)
|
||||
}
|
||||
|
||||
class Notifications extends PureComponent<Props> {
|
||||
public static defaultProps = {
|
||||
inPresentationMode: false,
|
||||
notifications: [],
|
||||
}
|
||||
|
||||
|
|
@ -18,36 +44,56 @@ class Notifications extends PureComponent<Props> {
|
|||
const {notifications} = this.props
|
||||
|
||||
return (
|
||||
<div className={this.className}>
|
||||
{notifications.map(n => (
|
||||
<Notification key={n.id} notification={n} />
|
||||
))}
|
||||
</div>
|
||||
<>
|
||||
{notifications.map(
|
||||
({id, style, icon, duration, message, link, linkText}) => {
|
||||
const gradient = matchGradientToColor(style)
|
||||
|
||||
let button
|
||||
|
||||
if (link && linkText) {
|
||||
button = (
|
||||
<Link
|
||||
to={link}
|
||||
className="notification--button cf-button cf-button-xs cf-button-default"
|
||||
>
|
||||
{linkText}
|
||||
</Link>
|
||||
)
|
||||
}
|
||||
|
||||
return (
|
||||
<Notification
|
||||
key={id}
|
||||
id={id}
|
||||
icon={icon}
|
||||
duration={duration}
|
||||
size={ComponentSize.ExtraSmall}
|
||||
gradient={gradient}
|
||||
onTimeout={this.props.dismissNotification}
|
||||
onDismiss={this.props.dismissNotification}
|
||||
testID={`notification-${style}`}
|
||||
>
|
||||
<span className="notification--message">{message}</span>
|
||||
{button}
|
||||
</Notification>
|
||||
)
|
||||
}
|
||||
)}
|
||||
</>
|
||||
)
|
||||
}
|
||||
|
||||
private get className(): string {
|
||||
const {inPresentationMode} = this.props
|
||||
|
||||
if (inPresentationMode) {
|
||||
return 'notification-center__presentation-mode'
|
||||
}
|
||||
|
||||
return 'notification-center'
|
||||
}
|
||||
}
|
||||
|
||||
const mapStateToProps = ({
|
||||
const mapStateToProps = ({notifications}): StateProps => ({
|
||||
notifications,
|
||||
app: {
|
||||
ephemeral: {inPresentationMode},
|
||||
},
|
||||
}): Props => ({
|
||||
notifications,
|
||||
inPresentationMode,
|
||||
})
|
||||
|
||||
const mdtp: DispatchProps = {
|
||||
dismissNotification: dismissNotificationAction,
|
||||
}
|
||||
|
||||
export default connect(
|
||||
mapStateToProps,
|
||||
null
|
||||
mdtp
|
||||
)(Notifications)
|
||||
|
|
|
|||
|
|
@ -36,7 +36,7 @@ export const DASHBOARD_LAYOUT_ROW_HEIGHT = 83.5
|
|||
export const NOTIFICATION_TRANSITION = 250
|
||||
export const FIVE_SECONDS = 5000
|
||||
export const TEN_SECONDS = 10000
|
||||
export const INFINITE = -1
|
||||
export const FIFTEEN_SECONDS = 15000
|
||||
|
||||
export const HOMEPAGE_PATHNAME = 'me'
|
||||
|
||||
|
|
|
|||
|
|
@ -2,13 +2,17 @@
|
|||
import {binaryPrefixFormatter} from '@influxdata/giraffe'
|
||||
|
||||
// Types
|
||||
import {Notification} from 'src/types'
|
||||
import {NotificationStyle} from 'src/types/notifications'
|
||||
import {Notification, NotificationStyle} from 'src/types'
|
||||
|
||||
// Constants
|
||||
import {FIVE_SECONDS, TEN_SECONDS, INFINITE} from 'src/shared/constants/index'
|
||||
import {
|
||||
FIVE_SECONDS,
|
||||
TEN_SECONDS,
|
||||
FIFTEEN_SECONDS,
|
||||
} from 'src/shared/constants/index'
|
||||
import {QUICKSTART_SCRAPER_TARGET_URL} from 'src/dataLoaders/constants/pluginConfigs'
|
||||
import {QUICKSTART_DASHBOARD_NAME} from 'src/onboarding/constants/index'
|
||||
import {IconFont} from '@influxdata/clockface'
|
||||
|
||||
const bytesFormatter = binaryPrefixFormatter({
|
||||
suffix: 'B',
|
||||
|
|
@ -23,19 +27,19 @@ type NotificationExcludingMessage = Pick<
|
|||
|
||||
const defaultErrorNotification: NotificationExcludingMessage = {
|
||||
style: NotificationStyle.Error,
|
||||
icon: 'alert-triangle',
|
||||
icon: IconFont.AlertTriangle,
|
||||
duration: TEN_SECONDS,
|
||||
}
|
||||
|
||||
const defaultSuccessNotification: NotificationExcludingMessage = {
|
||||
style: NotificationStyle.Success,
|
||||
icon: 'checkmark',
|
||||
icon: IconFont.Checkmark,
|
||||
duration: FIVE_SECONDS,
|
||||
}
|
||||
|
||||
const defaultDeletionNotification: NotificationExcludingMessage = {
|
||||
style: NotificationStyle.Primary,
|
||||
icon: 'trash',
|
||||
icon: IconFont.Trash,
|
||||
duration: FIVE_SECONDS,
|
||||
}
|
||||
|
||||
|
|
@ -44,8 +48,7 @@ const defaultDeletionNotification: NotificationExcludingMessage = {
|
|||
|
||||
export const newVersion = (version: string): Notification => ({
|
||||
style: NotificationStyle.Info,
|
||||
icon: 'cubo-uniform',
|
||||
duration: INFINITE,
|
||||
icon: IconFont.Cubouniform,
|
||||
message: `Welcome to the latest Chronograf${version}. Local settings cleared.`,
|
||||
})
|
||||
|
||||
|
|
@ -56,21 +59,20 @@ export const loadLocalSettingsFailed = (error: string): Notification => ({
|
|||
|
||||
export const presentationMode = (): Notification => ({
|
||||
style: NotificationStyle.Primary,
|
||||
icon: 'expand-b',
|
||||
icon: IconFont.ExpandB,
|
||||
duration: 7500,
|
||||
message: 'Press ESC to exit Presentation Mode.',
|
||||
})
|
||||
|
||||
export const sessionTimedOut = (): Notification => ({
|
||||
style: NotificationStyle.Primary,
|
||||
icon: 'triangle',
|
||||
duration: INFINITE,
|
||||
icon: IconFont.Triangle,
|
||||
message: 'Your session has timed out. Log in again to continue.',
|
||||
})
|
||||
|
||||
export const resultTooLarge = (bytesRead: number): Notification => ({
|
||||
style: NotificationStyle.Error,
|
||||
icon: 'triangle',
|
||||
icon: IconFont.Triangle,
|
||||
duration: FIVE_SECONDS,
|
||||
message: `Large response truncated to first ${bytesFormatter(bytesRead)}`,
|
||||
})
|
||||
|
|
@ -145,19 +147,19 @@ export const dashboardGetFailed = (
|
|||
error: string
|
||||
): Notification => ({
|
||||
...defaultErrorNotification,
|
||||
icon: 'dash-h',
|
||||
icon: IconFont.DashH,
|
||||
message: `Failed to load dashboard with id "${dashboardID}": ${error}`,
|
||||
})
|
||||
|
||||
export const dashboardUpdateFailed = (): Notification => ({
|
||||
...defaultErrorNotification,
|
||||
icon: 'dash-h',
|
||||
icon: IconFont.DashH,
|
||||
message: 'Could not update dashboard',
|
||||
})
|
||||
|
||||
export const dashboardDeleted = (name: string): Notification => ({
|
||||
...defaultSuccessNotification,
|
||||
icon: 'dash-h',
|
||||
icon: IconFont.DashH,
|
||||
message: `Dashboard ${name} deleted successfully.`,
|
||||
})
|
||||
|
||||
|
|
@ -194,7 +196,7 @@ export const cellAdded = (
|
|||
dashboardName?: string
|
||||
): Notification => ({
|
||||
...defaultSuccessNotification,
|
||||
icon: 'dash-h',
|
||||
icon: IconFont.DashH,
|
||||
message: `Added new cell ${cellName + ' '}to dashboard ${dashboardName}`,
|
||||
})
|
||||
|
||||
|
|
@ -217,7 +219,7 @@ export const cellUpdateFailed = (): Notification => ({
|
|||
|
||||
export const cellDeleted = (): Notification => ({
|
||||
...defaultDeletionNotification,
|
||||
icon: 'dash-h',
|
||||
icon: IconFont.DashH,
|
||||
duration: 1900,
|
||||
message: `Cell deleted from dashboard.`,
|
||||
})
|
||||
|
|
@ -235,7 +237,7 @@ export const removedDashboardLabelFailed = (): Notification => ({
|
|||
// Variables & URL Queries
|
||||
export const invalidTimeRangeValueInURLQuery = (): Notification => ({
|
||||
...defaultErrorNotification,
|
||||
icon: 'cube',
|
||||
icon: IconFont.Cube,
|
||||
message: `Invalid URL query value supplied for lower or upper time range.`,
|
||||
})
|
||||
|
||||
|
|
@ -251,37 +253,37 @@ export const getVariableFailed = (): Notification => ({
|
|||
|
||||
export const createVariableFailed = (error: string): Notification => ({
|
||||
...defaultErrorNotification,
|
||||
icon: 'cube',
|
||||
icon: IconFont.Cube,
|
||||
message: `Failed to create variable: ${error}`,
|
||||
})
|
||||
|
||||
export const createVariableSuccess = (name: string): Notification => ({
|
||||
...defaultSuccessNotification,
|
||||
icon: 'cube',
|
||||
icon: IconFont.Cube,
|
||||
message: `Successfully created new variable: ${name}.`,
|
||||
})
|
||||
|
||||
export const deleteVariableFailed = (error: string): Notification => ({
|
||||
...defaultErrorNotification,
|
||||
icon: 'cube',
|
||||
icon: IconFont.Cube,
|
||||
message: `Failed to delete variable: ${error}`,
|
||||
})
|
||||
|
||||
export const deleteVariableSuccess = (): Notification => ({
|
||||
...defaultSuccessNotification,
|
||||
icon: 'cube',
|
||||
icon: IconFont.Cube,
|
||||
message: 'Successfully deleted the variable',
|
||||
})
|
||||
|
||||
export const updateVariableFailed = (error: string): Notification => ({
|
||||
...defaultErrorNotification,
|
||||
icon: 'cube',
|
||||
icon: IconFont.Cube,
|
||||
message: `Failed to update variable: ${error}`,
|
||||
})
|
||||
|
||||
export const updateVariableSuccess = (name: string): Notification => ({
|
||||
...defaultSuccessNotification,
|
||||
icon: 'cube',
|
||||
icon: IconFont.Cube,
|
||||
message: `Successfully updated variable: ${name}.`,
|
||||
})
|
||||
|
||||
|
|
@ -290,7 +292,7 @@ export const copyToClipboardSuccess = (
|
|||
title: string = ''
|
||||
): Notification => ({
|
||||
...defaultSuccessNotification,
|
||||
icon: 'dash-h',
|
||||
icon: IconFont.Cube,
|
||||
type: 'copyToClipboardSuccess',
|
||||
message: `${title} '${text}' has been copied to clipboard.`,
|
||||
})
|
||||
|
|
@ -448,6 +450,32 @@ export const getBucketFailed = (
|
|||
message: `Failed to fetch bucket with id ${bucketID}: ${error}`,
|
||||
})
|
||||
|
||||
// Demodata buckets
|
||||
|
||||
export const demoDataAddBucketFailed = (error: string): Notification => ({
|
||||
...defaultErrorNotification,
|
||||
message: error,
|
||||
})
|
||||
|
||||
export const demoDataDeleteBucketFailed = (
|
||||
bucketName: string,
|
||||
error: string
|
||||
): Notification => ({
|
||||
...defaultErrorNotification,
|
||||
message: `Failed to delete demo data bucket: ${bucketName}: ${error}`,
|
||||
})
|
||||
|
||||
export const demoDataSucceeded = (
|
||||
bucketName: string,
|
||||
link: string
|
||||
): Notification => ({
|
||||
...defaultSuccessNotification,
|
||||
message: `Successfully added demodata bucket ${bucketName}, and demodata dashboard.`,
|
||||
duration: FIFTEEN_SECONDS,
|
||||
linkText: 'Go to dashboard',
|
||||
link,
|
||||
})
|
||||
|
||||
// Limits
|
||||
export const readWriteCardinalityLimitReached = (
|
||||
message: string
|
||||
|
|
@ -537,7 +565,7 @@ export const taskUpdateSuccess = (): Notification => ({
|
|||
|
||||
export const taskImportFailed = (errorMessage: string): Notification => ({
|
||||
...defaultErrorNotification,
|
||||
duration: INFINITE,
|
||||
duration: undefined,
|
||||
message: `Failed to import Task: ${errorMessage}.`,
|
||||
})
|
||||
|
||||
|
|
|
|||
|
|
@ -6,6 +6,8 @@ import {
|
|||
import {notify, dismissNotification} from 'src/shared/actions/notifications'
|
||||
|
||||
import {FIVE_SECONDS} from 'src/shared/constants/index'
|
||||
|
||||
import {IconFont} from '@influxdata/clockface'
|
||||
import {NotificationStyle} from 'src/types/notifications'
|
||||
|
||||
const notificationID = '000'
|
||||
|
|
@ -15,7 +17,7 @@ const exampleNotification = {
|
|||
style: NotificationStyle.Success,
|
||||
message: 'Hell yeah you are a real notification!',
|
||||
duration: FIVE_SECONDS,
|
||||
icon: 'zap',
|
||||
icon: IconFont.Zap,
|
||||
}
|
||||
|
||||
const exampleNotifications = [exampleNotification]
|
||||
|
|
@ -41,7 +43,7 @@ describe('Shared.Reducers.notifications', () => {
|
|||
style: NotificationStyle.Error,
|
||||
message: 'new notification',
|
||||
duration: FIVE_SECONDS,
|
||||
icon: 'zap',
|
||||
icon: IconFont.Zap,
|
||||
}
|
||||
|
||||
const actual = notificationsReducer(
|
||||
|
|
|
|||
|
|
@ -13,7 +13,6 @@
|
|||
@import 'src/shared/components/ColorDropdown.scss';
|
||||
@import 'src/shared/components/avatar/Avatar.scss';
|
||||
@import 'src/shared/components/tables/TableGraphs.scss';
|
||||
@import 'src/shared/components/notifications/Notifications.scss';
|
||||
@import 'src/shared/components/graph_tips/GraphTips.scss';
|
||||
@import 'src/shared/components/cells/Dashboards.scss';
|
||||
@import 'src/shared/components/code_mirror/CodeMirror.scss';
|
||||
|
|
@ -123,12 +122,13 @@
|
|||
@import 'src/clientLibraries/components/ClientLibraryOverlay.scss';
|
||||
@import 'src/dashboards/components/DashboardsCardGrid.scss';
|
||||
@import 'src/dashboards/components/DashboardLightMode.scss';
|
||||
@import 'src/buckets/components/DemoDataDropdown.scss';
|
||||
@import 'src/shared/components/notifications/Notification.scss';
|
||||
|
||||
// External
|
||||
@import '../../node_modules/@influxdata/react-custom-scrollbars/dist/styles.css';
|
||||
|
||||
|
||||
// TODO: delete this later when it's addressed in Clockface
|
||||
.cf-resource-card {
|
||||
margin-bottom: $cf-border;
|
||||
margin-bottom: $cf-border;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,15 +1,21 @@
|
|||
.editor-shortcuts {
|
||||
width: 250px;
|
||||
|
||||
h5 {
|
||||
margin: 8px 0 12px;
|
||||
}
|
||||
}
|
||||
|
||||
.editor-shortcuts--body {
|
||||
font-size: 14px;
|
||||
line-height: 16px;
|
||||
dt {
|
||||
float: left;
|
||||
padding-right: $ix-marg-a;
|
||||
font-weight: 700;
|
||||
color: $g18-cloud;
|
||||
margin-top: 8px;
|
||||
}
|
||||
dd {
|
||||
white-space: nowrap;
|
||||
display: block
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -12,8 +12,10 @@ const EditorShortcutsTooltip: FC = () => {
|
|||
<div className="editor-shortcuts">
|
||||
<h5>Shortcuts</h5>
|
||||
<dl className="editor-shortcuts--body">
|
||||
<dt>Ctl-/:</dt> <dd>Toggle comment for line or lines</dd>
|
||||
<dt>Ctl-Enter:</dt> <dd>Submit Script</dd>
|
||||
<dt>[Ctl or ⌘] + /:</dt>
|
||||
<dd>Toggle comment for line or lines</dd>
|
||||
<dt>[Ctl or ⌘] + [Enter]:</dt>
|
||||
<dd>Submit Script</dd>
|
||||
</dl>
|
||||
</div>
|
||||
}
|
||||
|
|
|
|||
|
|
@ -31,22 +31,7 @@ interface DispatchProps {
|
|||
|
||||
type Props = StateProps & DispatchProps
|
||||
|
||||
interface State {
|
||||
didClick: boolean
|
||||
}
|
||||
|
||||
class SubmitQueryButton extends PureComponent<Props, State> {
|
||||
public state: State = {didClick: false}
|
||||
|
||||
public componentDidUpdate(prevProps: Props) {
|
||||
if (
|
||||
prevProps.queryStatus === RemoteDataState.Loading &&
|
||||
this.props.queryStatus === RemoteDataState.Done
|
||||
) {
|
||||
this.setState({didClick: false})
|
||||
}
|
||||
}
|
||||
|
||||
class SubmitQueryButton extends PureComponent<Props> {
|
||||
public render() {
|
||||
return (
|
||||
<Button
|
||||
|
|
@ -62,14 +47,12 @@ class SubmitQueryButton extends PureComponent<Props, State> {
|
|||
|
||||
private get buttonStatus(): ComponentStatus {
|
||||
const {queryStatus, submitButtonDisabled} = this.props
|
||||
const {didClick} = this.state
|
||||
|
||||
if (submitButtonDisabled) {
|
||||
return ComponentStatus.Disabled
|
||||
}
|
||||
|
||||
if (queryStatus === RemoteDataState.Loading && didClick) {
|
||||
// Only show loading state for button if it was just clicked
|
||||
if (queryStatus === RemoteDataState.Loading) {
|
||||
return ComponentStatus.Loading
|
||||
}
|
||||
|
||||
|
|
@ -78,7 +61,6 @@ class SubmitQueryButton extends PureComponent<Props, State> {
|
|||
|
||||
private handleClick = (): void => {
|
||||
this.props.onSubmit()
|
||||
this.setState({didClick: true})
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -9,12 +9,12 @@ import {
|
|||
SMTPNotificationRuleBase,
|
||||
PagerDutyNotificationRuleBase,
|
||||
HTTPNotificationRuleBase,
|
||||
Check as GenCheck,
|
||||
ThresholdCheck as GenThresholdCheck,
|
||||
DeadmanCheck as GenDeadmanCheck,
|
||||
Check as GCheck,
|
||||
ThresholdCheck as GThresholdCheck,
|
||||
DeadmanCheck as GDeadmanCheck,
|
||||
CustomCheck as GenCustomCheck,
|
||||
NotificationRule as GenRule,
|
||||
NotificationEndpoint as GenEndpoint,
|
||||
NotificationRule as GRule,
|
||||
NotificationEndpoint as GEndpoint,
|
||||
TaskStatusType,
|
||||
Threshold,
|
||||
CheckBase as GenCheckBase,
|
||||
|
|
@ -37,7 +37,7 @@ type EndpointOverrides = {
|
|||
labels: string[]
|
||||
}
|
||||
// GenEndpoint is the shape of a NotificationEndpoint from the server -- before any UI specific fields are or modified
|
||||
export type GenEndpoint = GenEndpoint
|
||||
export type GenEndpoint = GEndpoint
|
||||
export type NotificationEndpoint =
|
||||
| (Omit<SlackNotificationEndpoint, 'status' | 'labels'> & EndpointOverrides)
|
||||
| (Omit<PagerDutyNotificationEndpoint, 'status' | 'labels'> &
|
||||
|
|
@ -50,7 +50,7 @@ export type NotificationEndpointBase = Omit<GenEndpointBase, 'labels'> &
|
|||
type RuleOverrides = {status: RemoteDataState; activeStatus: TaskStatusType}
|
||||
|
||||
// GenRule is the shape of a NotificationRule from the server -- before any UI specific fields are added or modified
|
||||
export type GenRule = GenRule
|
||||
export type GenRule = GRule
|
||||
export type NotificationRule = GenRule & RuleOverrides
|
||||
|
||||
export type StatusRuleDraft = WithClientID<StatusRule>
|
||||
|
|
@ -127,9 +127,9 @@ type CheckOverrides = {
|
|||
export type CheckBase = Omit<GenCheckBase, 'status'> & CheckOverrides
|
||||
|
||||
// GenCheck is the shape of a Check from the server -- before UI specific properties are added
|
||||
export type GenCheck = GenCheck
|
||||
export type GenThresholdCheck = GenThresholdCheck
|
||||
export type GenDeadmanCheck = GenDeadmanCheck
|
||||
export type GenCheck = GCheck
|
||||
export type GenThresholdCheck = GThresholdCheck
|
||||
export type GenDeadmanCheck = GDeadmanCheck
|
||||
|
||||
export type ThresholdCheck = Omit<GenThresholdCheck, 'status' | 'labels'> &
|
||||
CheckOverrides
|
||||
|
|
|
|||
|
|
@ -1,9 +1,9 @@
|
|||
import {Label as GenLabel} from 'src/client'
|
||||
import {Label as GLabel} from 'src/client'
|
||||
import {RemoteDataState} from 'src/types'
|
||||
|
||||
// GenLabel is the shape of a Label returned from the server -- before it has
|
||||
// been modified with UI specific fields
|
||||
export type GenLabel = GenLabel
|
||||
export type GenLabel = GLabel
|
||||
export interface Label extends GenLabel {
|
||||
status: RemoteDataState
|
||||
properties: LabelProperties
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue