diff --git a/.circleci/config.yml b/.circleci/config.yml index 96445e307e..107ba5c9f1 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -5,9 +5,41 @@ jobs: steps: - run: docker login -u=$QUAY_USER -p=$QUAY_PASS quay.io - run: docker run --entrypoint "./run_litmus_tests_oss.sh" -e TEST_LIST=tests_lists/gateway_api_tests.list --net host -v /var/run/docker.sock:/var/run/docker.sock -v ~/project:/Litmus/result quay.io/influxdb/litmus:latest + - run: + name: Litmus Nightly Tests Success + when: on_success + command: | + curl -X POST https://slack.com/api/chat.postMessage \ + -H "Authorization: Bearer $SLACK_TOKEN" \ + -H "Content-type: application/json; charset=utf-8" \ + --data @<(cat < range(start:-5m) |> to(bucket:"%s", org:"%s")`, b // Poll for the task to have started and finished. deadline := time.Now().Add(10 * time.Second) // Arbitrary deadline; 10s seems safe for -race on a resource-constrained system. ndrString := time.Unix(ndr, 0).UTC().Format(time.RFC3339) - var targetRun platform.Run + var targetRun influxdb.Run i := 0 for { t.Logf("Looking for created run...") @@ -122,7 +122,7 @@ from(bucket:"my_bucket_in") |> range(start:-5m) |> to(bucket:"%s", org:"%s")`, b } time.Sleep(5 * time.Millisecond) - runs, _, err := be.TaskService().FindRuns(ctx, platform.RunFilter{Org: &org.ID, Task: &created.ID, Limit: 1}) + runs, _, err := be.TaskService().FindRuns(ctx, influxdb.RunFilter{Org: &org.ID, Task: &created.ID, Limit: 1}) if err != nil { t.Fatal(err) } @@ -219,7 +219,7 @@ from(bucket:"my_bucket_in") |> range(start:-5m) |> to(bucket:"%s", org:"%s")`, b }) // now lets see a logs - logs, _, err := be.TaskService().FindLogs(ctx, platform.LogFilter{Org: &org.ID, Task: &created.ID, Run: &targetRun.ID}) + logs, _, err := be.TaskService().FindLogs(ctx, influxdb.LogFilter{Org: &org.ID, Task: &created.ID, Run: &targetRun.ID}) if err != nil { t.Fatal(err) } diff --git a/docker/influxd/Dockerfile b/docker/influxd/Dockerfile index c0385ef69b..3c78907c21 100644 --- a/docker/influxd/Dockerfile +++ b/docker/influxd/Dockerfile @@ -1,13 +1,19 @@ FROM debian:stable-slim -COPY influxd /usr/bin/influxd -COPY influx /usr/bin/influx +COPY influx influxd /usr/bin/ EXPOSE 9999 -RUN apt-get update && apt-get install -y --no-install-recommends \ - ca-certificates \ - && rm -rf /var/lib/apt/lists/* +ENV DEBIAN_FRONTEND noninteractive COPY docker/influxd/entrypoint.sh /entrypoint.sh + +RUN apt-get update \ + && apt-get install -y \ + ca-certificates \ + tzdata \ + && apt-get clean autoclean \ + && apt-get autoremove --yes \ + && rm -rf /var/lib/{apt,dpkg,cache,log} + ENTRYPOINT ["/entrypoint.sh"] CMD ["influxd"] diff --git a/http/Makefile b/http/Makefile index 910531c251..50bbe4259d 100644 --- a/http/Makefile +++ b/http/Makefile @@ -1,12 +1,8 @@ -# List any generated files here -TARGETS = ../ui/src/api/api.ts -# List any source files used to generate the targets here -SOURCES = cur_swagger.yml # List any directories that have their own Makefile here SUBDIRS = # Default target -all: $(SUBDIRS) $(TARGETS) swagger_gen.go +all: $(SUBDIRS) swagger_gen.go # Recurse into subdirs for same make goal $(SUBDIRS): @@ -14,7 +10,6 @@ $(SUBDIRS): # Clean all targets recursively clean: $(SUBDIRS) - rm -f $(TARGETS) rm -f swagger_gen.go swagger_gen.go: swagger.go redoc.go swagger.yml @@ -23,9 +18,4 @@ swagger_gen.go: swagger.go redoc.go swagger.yml GO_RUN := env GO111MODULE=on go run -ifndef SKIP_CLIENT -../ui/src/api/api.ts: $(SOURCES) - openapi-generator generate -g typescript-axios -o ../ui/src/api -i cur_swagger.yml -endif - .PHONY: all clean $(SUBDIRS) diff --git a/http/api_handler.go b/http/api_handler.go index 28e3097486..f91232e145 100644 --- a/http/api_handler.go +++ b/http/api_handler.go @@ -70,89 +70,69 @@ type APIBackend struct { ChronografService *server.Service ProtoService influxdb.ProtoService OrgLookupService authorizer.OrganizationService + ViewService influxdb.ViewService } // NewAPIHandler constructs all api handlers beneath it and returns an APIHandler func NewAPIHandler(b *APIBackend) *APIHandler { h := &APIHandler{} - sessionBackend := NewSessionBackend(b) - h.SessionHandler = NewSessionHandler(sessionBackend) + internalURM := b.UserResourceMappingService b.UserResourceMappingService = authorizer.NewURMService(b.OrgLookupService, b.UserResourceMappingService) - h.BucketHandler = NewBucketHandler(b.UserResourceMappingService, b.LabelService, b.UserService) - h.BucketHandler.BucketService = authorizer.NewBucketService(b.BucketService) - h.BucketHandler.OrganizationService = b.OrganizationService - h.BucketHandler.BucketOperationLogService = b.BucketOperationLogService + sessionBackend := NewSessionBackend(b) + h.SessionHandler = NewSessionHandler(sessionBackend) - h.LabelHandler = NewLabelHandler() - h.LabelHandler.LabelService = b.LabelService + bucketBackend := NewBucketBackend(b) + bucketBackend.BucketService = authorizer.NewBucketService(b.BucketService) + h.BucketHandler = NewBucketHandler(bucketBackend) - h.OrgHandler = NewOrgHandler(b.UserResourceMappingService, b.LabelService, b.UserService) - h.OrgHandler.OrganizationService = authorizer.NewOrgService(b.OrganizationService) - h.OrgHandler.OrganizationOperationLogService = b.OrganizationOperationLogService - h.OrgHandler.SecretService = b.SecretService + orgBackend := NewOrgBackend(b) + orgBackend.OrganizationService = authorizer.NewOrgService(b.OrganizationService) + h.OrgHandler = NewOrgHandler(orgBackend) - h.UserHandler = NewUserHandler() - h.UserHandler.UserService = authorizer.NewUserService(b.UserService) - h.UserHandler.BasicAuthService = b.BasicAuthService - h.UserHandler.UserOperationLogService = b.UserOperationLogService + userBackend := NewUserBackend(b) + userBackend.UserService = authorizer.NewUserService(b.UserService) + h.UserHandler = NewUserHandler(userBackend) - h.DashboardHandler = NewDashboardHandler(b.UserResourceMappingService, b.LabelService, b.UserService) - h.DashboardHandler.DashboardService = authorizer.NewDashboardService(b.DashboardService) - h.DashboardHandler.DashboardOperationLogService = b.DashboardOperationLogService + dashboardBackend := NewDashboardBackend(b) + dashboardBackend.DashboardService = authorizer.NewDashboardService(b.DashboardService) + h.DashboardHandler = NewDashboardHandler(dashboardBackend) - h.MacroHandler = NewMacroHandler() - h.MacroHandler.MacroService = authorizer.NewMacroService(b.MacroService) + macroBackend := NewMacroBackend(b) + macroBackend.MacroService = authorizer.NewMacroService(b.MacroService) + h.MacroHandler = NewMacroHandler(macroBackend) - h.AuthorizationHandler = NewAuthorizationHandler(b.UserService) - h.AuthorizationHandler.OrganizationService = b.OrganizationService - h.AuthorizationHandler.AuthorizationService = authorizer.NewAuthorizationService(b.AuthorizationService) - h.AuthorizationHandler.LookupService = b.LookupService - h.AuthorizationHandler.Logger = b.Logger.With(zap.String("handler", "auth")) + authorizationBackend := NewAuthorizationBackend(b) + authorizationBackend.AuthorizationService = authorizer.NewAuthorizationService(b.AuthorizationService) + h.AuthorizationHandler = NewAuthorizationHandler(authorizationBackend) - h.ScraperHandler = NewScraperHandler( - b.Logger.With(zap.String("handler", "scraper")), - b.UserService, - b.UserResourceMappingService, - b.LabelService, - authorizer.NewScraperTargetStoreService(b.ScraperTargetStoreService, b.UserResourceMappingService), - b.BucketService, - b.OrganizationService, - ) + scraperBackend := NewScraperBackend(b) + scraperBackend.ScraperStorageService = authorizer.NewScraperTargetStoreService(b.ScraperTargetStoreService, b.UserResourceMappingService) + h.ScraperHandler = NewScraperHandler(scraperBackend) - h.SourceHandler = NewSourceHandler() - h.SourceHandler.SourceService = authorizer.NewSourceService(b.SourceService) - h.SourceHandler.NewBucketService = b.NewBucketService - h.SourceHandler.NewQueryService = b.NewQueryService + sourceBackend := NewSourceBackend(b) + sourceBackend.SourceService = authorizer.NewSourceService(b.SourceService) + sourceBackend.NewBucketService = b.NewBucketService + sourceBackend.NewQueryService = b.NewQueryService + h.SourceHandler = NewSourceHandler(sourceBackend) - h.SetupHandler = NewSetupHandler() - h.SetupHandler.OnboardingService = b.OnboardingService + setupBackend := NewSetupBackend(b) + h.SetupHandler = NewSetupHandler(setupBackend) - h.TaskHandler = NewTaskHandler(b.UserResourceMappingService, b.LabelService, b.Logger, b.UserService) - h.TaskHandler.TaskService = b.TaskService - h.TaskHandler.AuthorizationService = b.AuthorizationService - h.TaskHandler.OrganizationService = b.OrganizationService - h.TaskHandler.UserResourceMappingService = b.UserResourceMappingService + taskBackend := NewTaskBackend(b) + h.TaskHandler = NewTaskHandler(taskBackend) + h.TaskHandler.UserResourceMappingService = internalURM - h.TelegrafHandler = NewTelegrafHandler( - b.Logger.With(zap.String("handler", "telegraf")), - b.UserResourceMappingService, - b.LabelService, - authorizer.NewTelegrafConfigService(b.TelegrafService, b.UserResourceMappingService), - b.UserService, - b.OrganizationService, - ) + telegrafBackend := NewTelegrafBackend(b) + telegrafBackend.TelegrafService = authorizer.NewTelegrafConfigService(b.TelegrafService, b.UserResourceMappingService) + h.TelegrafHandler = NewTelegrafHandler(telegrafBackend) - h.WriteHandler = NewWriteHandler(b.PointsWriter) - h.WriteHandler.OrganizationService = b.OrganizationService - h.WriteHandler.BucketService = b.BucketService - h.WriteHandler.Logger = b.Logger.With(zap.String("handler", "write")) + writeBackend := NewWriteBackend(b) + h.WriteHandler = NewWriteHandler(writeBackend) - h.QueryHandler = NewFluxHandler() - h.QueryHandler.OrganizationService = b.OrganizationService - h.QueryHandler.Logger = b.Logger.With(zap.String("handler", "query")) - h.QueryHandler.ProxyQueryService = b.ProxyQueryService + fluxBackend := NewFluxBackend(b) + h.QueryHandler = NewFluxHandler(fluxBackend) h.ProtoHandler = NewProtoHandler(NewProtoBackend(b)) diff --git a/http/auth_service.go b/http/auth_service.go index 4ce1df17fb..85f76c3eaa 100644 --- a/http/auth_service.go +++ b/http/auth_service.go @@ -16,6 +16,29 @@ import ( "github.com/julienschmidt/httprouter" ) +// AuthorizationBackend is all services and associated parameters required to construct +// the AuthorizationHandler. +type AuthorizationBackend struct { + Logger *zap.Logger + + AuthorizationService platform.AuthorizationService + OrganizationService platform.OrganizationService + UserService platform.UserService + LookupService platform.LookupService +} + +// NewAuthorizationBackend returns a new instance of AuthorizationBackend. +func NewAuthorizationBackend(b *APIBackend) *AuthorizationBackend { + return &AuthorizationBackend{ + Logger: b.Logger.With(zap.String("handler", "authorization")), + + AuthorizationService: b.AuthorizationService, + OrganizationService: b.OrganizationService, + UserService: b.UserService, + LookupService: b.LookupService, + } +} + // AuthorizationHandler represents an HTTP API handler for authorizations. type AuthorizationHandler struct { *httprouter.Router @@ -28,11 +51,15 @@ type AuthorizationHandler struct { } // NewAuthorizationHandler returns a new instance of AuthorizationHandler. -func NewAuthorizationHandler(userService platform.UserService) *AuthorizationHandler { +func NewAuthorizationHandler(b *AuthorizationBackend) *AuthorizationHandler { h := &AuthorizationHandler{ - Router: NewRouter(), - Logger: zap.NewNop(), - UserService: userService, + Router: NewRouter(), + Logger: b.Logger, + + AuthorizationService: b.AuthorizationService, + OrganizationService: b.OrganizationService, + UserService: b.UserService, + LookupService: b.LookupService, } h.HandlerFunc("POST", "/api/v2/authorizations", h.handlePostAuthorization) @@ -161,25 +188,10 @@ func (h *AuthorizationHandler) handlePostAuthorization(w http.ResponseWriter, r return } - var user *platform.User - // allow the user id to be specified optionally, if it is not set - // we use the id from the authorizer - if req.UserID == nil { - u, err := getAuthorizedUser(r, h.UserService) - if err != nil { - EncodeError(ctx, platform.ErrUnableToCreateToken, w) - return - } - - user = u - } else { - u, err := h.UserService.FindUserByID(ctx, *req.UserID) - if err != nil { - EncodeError(ctx, platform.ErrUnableToCreateToken, w) - return - } - - user = u + user, err := getAuthorizedUser(r, h.UserService) + if err != nil { + EncodeError(ctx, platform.ErrUnableToCreateToken, w) + return } auth := req.toPlatform(user.ID) diff --git a/http/auth_test.go b/http/auth_test.go index b2e8214d14..b9c57eb5d9 100644 --- a/http/auth_test.go +++ b/http/auth_test.go @@ -10,6 +10,8 @@ import ( "net/http/httptest" "testing" + "go.uber.org/zap" + platform "github.com/influxdata/influxdb" pcontext "github.com/influxdata/influxdb/context" "github.com/influxdata/influxdb/inmem" @@ -18,6 +20,18 @@ import ( "github.com/julienschmidt/httprouter" ) +// NewMockAuthorizationBackend returns a AuthorizationBackend with mock services. +func NewMockAuthorizationBackend() *AuthorizationBackend { + return &AuthorizationBackend{ + Logger: zap.NewNop().With(zap.String("handler", "authorization")), + + AuthorizationService: mock.NewAuthorizationService(), + OrganizationService: mock.NewOrganizationService(), + UserService: mock.NewUserService(), + LookupService: mock.NewLookupService(), + } +} + func TestService_handleGetAuthorizations(t *testing.T) { type fields struct { AuthorizationService platform.AuthorizationService @@ -156,10 +170,11 @@ func TestService_handleGetAuthorizations(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - h := NewAuthorizationHandler(mock.NewUserService()) - h.AuthorizationService = tt.fields.AuthorizationService - h.UserService = tt.fields.UserService - h.OrganizationService = tt.fields.OrganizationService + authorizationBackend := NewMockAuthorizationBackend() + authorizationBackend.AuthorizationService = tt.fields.AuthorizationService + authorizationBackend.UserService = tt.fields.UserService + authorizationBackend.OrganizationService = tt.fields.OrganizationService + h := NewAuthorizationHandler(authorizationBackend) r := httptest.NewRequest("GET", "http://any.url", nil) @@ -335,11 +350,12 @@ func TestService_handleGetAuthorization(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - h := NewAuthorizationHandler(mock.NewUserService()) - h.AuthorizationService = tt.fields.AuthorizationService - h.UserService = tt.fields.UserService - h.OrganizationService = tt.fields.OrganizationService - h.LookupService = tt.fields.LookupService + authorizationBackend := NewMockAuthorizationBackend() + authorizationBackend.AuthorizationService = tt.fields.AuthorizationService + authorizationBackend.UserService = tt.fields.UserService + authorizationBackend.OrganizationService = tt.fields.OrganizationService + authorizationBackend.LookupService = tt.fields.LookupService + h := NewAuthorizationHandler(authorizationBackend) r := httptest.NewRequest("GET", "http://any.url", nil) @@ -507,128 +523,16 @@ func TestService_handlePostAuthorization(t *testing.T) { `, }, }, - { - name: "create a new authorization with user id set explicitly", - fields: fields{ - AuthorizationService: &mock.AuthorizationService{ - CreateAuthorizationFn: func(ctx context.Context, c *platform.Authorization) error { - c.ID = platformtesting.MustIDBase16("020f755c3c082000") - c.Token = "new-test-token" - return nil - }, - }, - UserService: &mock.UserService{ - FindUserByIDFn: func(ctx context.Context, id platform.ID) (*platform.User, error) { - if !id.Valid() { - return nil, &platform.Error{ - Code: platform.EInvalid, - Msg: "invalid user id", - } - } - return &platform.User{ - ID: id, - Name: "u1", - }, nil - }, - }, - OrganizationService: &mock.OrganizationService{ - FindOrganizationByIDF: func(ctx context.Context, id platform.ID) (*platform.Organization, error) { - if !id.Valid() { - return nil, &platform.Error{ - Code: platform.EInvalid, - Msg: "invalid org ID", - } - } - return &platform.Organization{ - ID: id, - Name: "o1", - }, nil - }, - }, - LookupService: &mock.LookupService{ - NameFn: func(ctx context.Context, resource platform.ResourceType, id platform.ID) (string, error) { - switch resource { - case platform.BucketsResourceType: - return "b1", nil - case platform.OrgsResourceType: - return "o1", nil - } - return "", fmt.Errorf("bad resource type %s", resource) - }, - }, - }, - args: args{ - session: &platform.Authorization{ - Token: "session-token", - ID: platformtesting.MustIDBase16("020f755c3c082000"), - UserID: platformtesting.MustIDBase16("aaaaaaaaaaaaaaaa"), - OrgID: platformtesting.MustIDBase16("020f755c3c083000"), - Description: "can write to authorization resource", - Permissions: []platform.Permission{ - { - Action: platform.WriteAction, - Resource: platform.Resource{ - Type: platform.AuthorizationsResourceType, - }, - }, - }, - }, - authorization: &platform.Authorization{ - ID: platformtesting.MustIDBase16("020f755c3c082000"), - UserID: platformtesting.MustIDBase16("bbbbbbbbbbbbbbbb"), - OrgID: platformtesting.MustIDBase16("020f755c3c083000"), - Description: "only read dashboards sucka", - Permissions: []platform.Permission{ - { - Action: platform.ReadAction, - Resource: platform.Resource{ - Type: platform.DashboardsResourceType, - OrgID: platformtesting.IDPtr(platformtesting.MustIDBase16("020f755c3c083000")), - }, - }, - }, - }, - }, - wants: wants{ - statusCode: http.StatusCreated, - contentType: "application/json; charset=utf-8", - body: ` -{ - "links": { - "user": "/api/v2/users/bbbbbbbbbbbbbbbb", - "self": "/api/v2/authorizations/020f755c3c082000" - }, - "id": "020f755c3c082000", - "user": "u1", - "userID": "bbbbbbbbbbbbbbbb", - "orgID": "020f755c3c083000", - "org": "o1", - "token": "new-test-token", - "status": "active", - "description": "only read dashboards sucka", - "permissions": [ - { - "action": "read", - "resource": { - "type": "dashboards", - "orgID": "020f755c3c083000", - "org": "o1" - } - } - ] -} - `, - }, - }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - h := NewAuthorizationHandler(tt.fields.UserService) - h.AuthorizationService = tt.fields.AuthorizationService - h.UserService = tt.fields.UserService - h.OrganizationService = tt.fields.OrganizationService - h.LookupService = tt.fields.LookupService + authorizationBackend := NewMockAuthorizationBackend() + authorizationBackend.AuthorizationService = tt.fields.AuthorizationService + authorizationBackend.UserService = tt.fields.UserService + authorizationBackend.OrganizationService = tt.fields.OrganizationService + authorizationBackend.LookupService = tt.fields.LookupService + h := NewAuthorizationHandler(authorizationBackend) req, err := newPostAuthorizationRequest(tt.args.authorization) if err != nil { @@ -734,10 +638,11 @@ func TestService_handleDeleteAuthorization(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - h := NewAuthorizationHandler(mock.NewUserService()) - h.AuthorizationService = tt.fields.AuthorizationService - h.UserService = tt.fields.UserService - h.OrganizationService = tt.fields.OrganizationService + authorizationBackend := NewMockAuthorizationBackend() + authorizationBackend.AuthorizationService = tt.fields.AuthorizationService + authorizationBackend.UserService = tt.fields.UserService + authorizationBackend.OrganizationService = tt.fields.OrganizationService + h := NewAuthorizationHandler(authorizationBackend) r := httptest.NewRequest("GET", "http://any.url", nil) @@ -815,11 +720,11 @@ func initAuthorizationService(f platformtesting.AuthorizationFields, t *testing. token = a.Token } - authZ := NewAuthorizationHandler(mock.NewUserService()) - authZ.AuthorizationService = svc - authZ.UserService = svc - authZ.OrganizationService = svc - authZ.LookupService = &mock.LookupService{ + authorizationBackend := NewMockAuthorizationBackend() + authorizationBackend.AuthorizationService = svc + authorizationBackend.UserService = svc + authorizationBackend.OrganizationService = svc + authorizationBackend.LookupService = &mock.LookupService{ NameFn: func(ctx context.Context, resource platform.ResourceType, id platform.ID) (string, error) { switch resource { case platform.BucketsResourceType: @@ -831,6 +736,7 @@ func initAuthorizationService(f platformtesting.AuthorizationFields, t *testing. }, } + authZ := NewAuthorizationHandler(authorizationBackend) authN := NewAuthenticationHandler() authN.AuthorizationService = svc authN.Handler = authZ diff --git a/http/bucket_service.go b/http/bucket_service.go index aec64081b6..1de06c062c 100644 --- a/http/bucket_service.go +++ b/http/bucket_service.go @@ -7,7 +7,6 @@ import ( "fmt" "net/http" "path" - "strconv" "time" platform "github.com/influxdata/influxdb" @@ -15,6 +14,33 @@ import ( "go.uber.org/zap" ) +// BucketBackend is all services and associated parameters required to construct +// the BucketHandler. +type BucketBackend struct { + Logger *zap.Logger + + BucketService platform.BucketService + BucketOperationLogService platform.BucketOperationLogService + UserResourceMappingService platform.UserResourceMappingService + LabelService platform.LabelService + UserService platform.UserService + OrganizationService platform.OrganizationService +} + +// NewBucketBackend returns a new instance of BucketBackend. +func NewBucketBackend(b *APIBackend) *BucketBackend { + return &BucketBackend{ + Logger: b.Logger.With(zap.String("handler", "bucket")), + + BucketService: b.BucketService, + BucketOperationLogService: b.BucketOperationLogService, + UserResourceMappingService: b.UserResourceMappingService, + LabelService: b.LabelService, + UserService: b.UserService, + OrganizationService: b.OrganizationService, + } +} + // BucketHandler represents an HTTP API handler for buckets. type BucketHandler struct { *httprouter.Router @@ -42,14 +68,17 @@ const ( ) // NewBucketHandler returns a new instance of BucketHandler. -func NewBucketHandler(mappingService platform.UserResourceMappingService, labelService platform.LabelService, userService platform.UserService) *BucketHandler { +func NewBucketHandler(b *BucketBackend) *BucketHandler { h := &BucketHandler{ Router: NewRouter(), - Logger: zap.NewNop(), + Logger: b.Logger, - UserResourceMappingService: mappingService, - LabelService: labelService, - UserService: userService, + BucketService: b.BucketService, + BucketOperationLogService: b.BucketOperationLogService, + UserResourceMappingService: b.UserResourceMappingService, + LabelService: b.LabelService, + UserService: b.UserService, + OrganizationService: b.OrganizationService, } h.HandlerFunc("POST", bucketsPath, h.handlePostBucket) @@ -59,17 +88,36 @@ func NewBucketHandler(mappingService platform.UserResourceMappingService, labelS h.HandlerFunc("PATCH", bucketsIDPath, h.handlePatchBucket) h.HandlerFunc("DELETE", bucketsIDPath, h.handleDeleteBucket) - h.HandlerFunc("POST", bucketsIDMembersPath, newPostMemberHandler(h.UserResourceMappingService, h.UserService, platform.BucketsResourceType, platform.Member)) - h.HandlerFunc("GET", bucketsIDMembersPath, newGetMembersHandler(h.UserResourceMappingService, h.UserService, platform.BucketsResourceType, platform.Member)) - h.HandlerFunc("DELETE", bucketsIDMembersIDPath, newDeleteMemberHandler(h.UserResourceMappingService, platform.Member)) + memberBackend := MemberBackend{ + Logger: b.Logger.With(zap.String("handler", "member")), + ResourceType: platform.BucketsResourceType, + UserType: platform.Member, + UserResourceMappingService: b.UserResourceMappingService, + UserService: b.UserService, + } + h.HandlerFunc("POST", bucketsIDMembersPath, newPostMemberHandler(memberBackend)) + h.HandlerFunc("GET", bucketsIDMembersPath, newGetMembersHandler(memberBackend)) + h.HandlerFunc("DELETE", bucketsIDMembersIDPath, newDeleteMemberHandler(memberBackend)) - h.HandlerFunc("POST", bucketsIDOwnersPath, newPostMemberHandler(h.UserResourceMappingService, h.UserService, platform.BucketsResourceType, platform.Owner)) - h.HandlerFunc("GET", bucketsIDOwnersPath, newGetMembersHandler(h.UserResourceMappingService, h.UserService, platform.BucketsResourceType, platform.Owner)) - h.HandlerFunc("DELETE", bucketsIDOwnersIDPath, newDeleteMemberHandler(h.UserResourceMappingService, platform.Owner)) + ownerBackend := MemberBackend{ + Logger: b.Logger.With(zap.String("handler", "member")), + ResourceType: platform.BucketsResourceType, + UserType: platform.Owner, + UserResourceMappingService: b.UserResourceMappingService, + UserService: b.UserService, + } + h.HandlerFunc("POST", bucketsIDOwnersPath, newPostMemberHandler(ownerBackend)) + h.HandlerFunc("GET", bucketsIDOwnersPath, newGetMembersHandler(ownerBackend)) + h.HandlerFunc("DELETE", bucketsIDOwnersIDPath, newDeleteMemberHandler(ownerBackend)) - h.HandlerFunc("GET", bucketsIDLabelsPath, newGetLabelsHandler(h.LabelService)) - h.HandlerFunc("POST", bucketsIDLabelsPath, newPostLabelHandler(h.LabelService)) - h.HandlerFunc("DELETE", bucketsIDLabelsIDPath, newDeleteLabelHandler(h.LabelService)) + labelBackend := &LabelBackend{ + Logger: b.Logger.With(zap.String("handler", "label")), + LabelService: b.LabelService, + } + h.HandlerFunc("GET", bucketsIDLabelsPath, newGetLabelsHandler(labelBackend)) + h.HandlerFunc("POST", bucketsIDLabelsPath, newPostLabelHandler(labelBackend)) + h.HandlerFunc("DELETE", bucketsIDLabelsIDPath, newDeleteLabelHandler(labelBackend)) + h.HandlerFunc("PATCH", bucketsIDLabelsIDPath, newPatchLabelHandler(labelBackend)) return h } @@ -810,29 +858,14 @@ func decodeGetBucketLogRequest(ctx context.Context, r *http.Request) (*getBucket return nil, err } - opts := platform.DefaultOperationLogFindOptions - qp := r.URL.Query() - if v := qp.Get("desc"); v == "false" { - opts.Descending = false - } - if v := qp.Get("limit"); v != "" { - i, err := strconv.Atoi(v) - if err != nil { - return nil, err - } - opts.Limit = i - } - if v := qp.Get("offset"); v != "" { - i, err := strconv.Atoi(v) - if err != nil { - return nil, err - } - opts.Offset = i + opts, err := decodeFindOptions(ctx, r) + if err != nil { + return nil, err } return &getBucketLogRequest{ BucketID: i, - opts: opts, + opts: *opts, }, nil } diff --git a/http/bucket_test.go b/http/bucket_test.go index adbdc2a515..fa6e310b07 100644 --- a/http/bucket_test.go +++ b/http/bucket_test.go @@ -16,8 +16,23 @@ import ( "github.com/influxdata/influxdb/mock" platformtesting "github.com/influxdata/influxdb/testing" "github.com/julienschmidt/httprouter" + "go.uber.org/zap" ) +// NewMockBucketBackend returns a BucketBackend with mock services. +func NewMockBucketBackend() *BucketBackend { + return &BucketBackend{ + Logger: zap.NewNop().With(zap.String("handler", "bucket")), + + BucketService: mock.NewBucketService(), + BucketOperationLogService: mock.NewBucketOperationLogService(), + UserResourceMappingService: mock.NewUserResourceMappingService(), + LabelService: mock.NewLabelService(), + UserService: mock.NewUserService(), + OrganizationService: mock.NewOrganizationService(), + } +} + func TestService_handleGetBuckets(t *testing.T) { type fields struct { BucketService platform.BucketService @@ -167,11 +182,10 @@ func TestService_handleGetBuckets(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - mappingService := mock.NewUserResourceMappingService() - labelService := tt.fields.LabelService - userService := mock.NewUserService() - h := NewBucketHandler(mappingService, labelService, userService) - h.BucketService = tt.fields.BucketService + bucketBackend := NewMockBucketBackend() + bucketBackend.BucketService = tt.fields.BucketService + bucketBackend.LabelService = tt.fields.LabelService + h := NewBucketHandler(bucketBackend) r := httptest.NewRequest("GET", "http://any.url", nil) @@ -287,11 +301,9 @@ func TestService_handleGetBucket(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - mappingService := mock.NewUserResourceMappingService() - labelService := mock.NewLabelService() - userService := mock.NewUserService() - h := NewBucketHandler(mappingService, labelService, userService) - h.BucketService = tt.fields.BucketService + bucketBackend := NewMockBucketBackend() + bucketBackend.BucketService = tt.fields.BucketService + h := NewBucketHandler(bucketBackend) r := httptest.NewRequest("GET", "http://any.url", nil) @@ -392,12 +404,10 @@ func TestService_handlePostBucket(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - mappingService := mock.NewUserResourceMappingService() - labelService := mock.NewLabelService() - userService := mock.NewUserService() - h := NewBucketHandler(mappingService, labelService, userService) - h.BucketService = tt.fields.BucketService - h.OrganizationService = tt.fields.OrganizationService + bucketBackend := NewMockBucketBackend() + bucketBackend.BucketService = tt.fields.BucketService + bucketBackend.OrganizationService = tt.fields.OrganizationService + h := NewBucketHandler(bucketBackend) b, err := json.Marshal(newBucket(tt.args.bucket)) if err != nil { @@ -488,11 +498,9 @@ func TestService_handleDeleteBucket(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - mappingService := mock.NewUserResourceMappingService() - labelService := mock.NewLabelService() - userService := mock.NewUserService() - h := NewBucketHandler(mappingService, labelService, userService) - h.BucketService = tt.fields.BucketService + bucketBackend := NewMockBucketBackend() + bucketBackend.BucketService = tt.fields.BucketService + h := NewBucketHandler(bucketBackend) r := httptest.NewRequest("GET", "http://any.url", nil) @@ -770,11 +778,9 @@ func TestService_handlePatchBucket(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - mappingService := mock.NewUserResourceMappingService() - labelService := mock.NewLabelService() - userService := mock.NewUserService() - h := NewBucketHandler(mappingService, labelService, userService) - h.BucketService = tt.fields.BucketService + bucketBackend := NewMockBucketBackend() + bucketBackend.BucketService = tt.fields.BucketService + h := NewBucketHandler(bucketBackend) upd := platform.BucketUpdate{} if tt.args.name != "" { @@ -881,7 +887,9 @@ func TestService_handlePostBucketMember(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - h := NewBucketHandler(mock.NewUserResourceMappingService(), mock.NewLabelService(), tt.fields.UserService) + bucketBackend := NewMockBucketBackend() + bucketBackend.UserService = tt.fields.UserService + h := NewBucketHandler(bucketBackend) b, err := json.Marshal(tt.args.user) if err != nil { @@ -969,7 +977,9 @@ func TestService_handlePostBucketOwner(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - h := NewBucketHandler(mock.NewUserResourceMappingService(), mock.NewLabelService(), tt.fields.UserService) + bucketBackend := NewMockBucketBackend() + bucketBackend.UserService = tt.fields.UserService + h := NewBucketHandler(bucketBackend) b, err := json.Marshal(tt.args.user) if err != nil { @@ -1015,13 +1025,10 @@ func initBucketService(f platformtesting.BucketFields, t *testing.T) (platform.B } } - mappingService := mock.NewUserResourceMappingService() - labelService := mock.NewLabelService() - userService := mock.NewUserService() - handler := NewBucketHandler(mappingService, labelService, userService) - handler.BucketService = svc - handler.OrganizationService = svc - + bucketBackend := NewMockBucketBackend() + bucketBackend.BucketService = svc + bucketBackend.OrganizationService = svc + handler := NewBucketHandler(bucketBackend) server := httptest.NewServer(handler) client := BucketService{ Addr: server.URL, diff --git a/http/cur_swagger.yml b/http/cur_swagger.yml deleted file mode 100644 index cf14facd40..0000000000 --- a/http/cur_swagger.yml +++ /dev/null @@ -1,6610 +0,0 @@ -openapi: "3.0.0" -info: - title: Influx API Service - version: 0.1.0 -servers: - - url: "http://localhost:9999/api/v2" -paths: - /signin: - post: - x-generated: true - summary: Exchange basic auth credentials for session - security: - - basicAuth: [] - responses: - '204': - description: succesfully authenticated - default: - description: unsuccessful authentication - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - /signout: - post: - x-generated: true - summary: Expire the current session - responses: - '204': - description: session successfully expired - default: - description: unsuccessful session exipry - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - /: - get: - summary: Map of all top level routes available - responses: - default: - description: All routes - content: - application/json: - schema: - $ref: "#/components/schemas/Routes" - /protos: - get: - tags: - - Protos - summary: List of available protos (templates of tasks/dashboards/etc) - parameters: - - $ref: '#/components/parameters/TraceSpan' - responses: - '200': - description: List of protos - content: - application/json: - schema: - $ref: "#/components/schemas/Protos" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - /protos/{protoID}/dashboards: - post: - tags: - - Protos - summary: Create instance of a proto dashboard - parameters: - - $ref: '#/components/parameters/TraceSpan' - - in: path - name: protoID - schema: - type: string - required: true - description: ID of proto - requestBody: - description: organization that the dashboard will be created as - required: true - content: - application/json: - schema: - $ref: "#/components/schemas/CreateProtoResourcesRequest" - responses: - '201': - description: List of dashboards that was created - content: - application/json: - schema: - $ref: "#/components/schemas/Dashboards" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - /setup: - get: - tags: - - Setup - summary: check if database has default user, org, bucket created, returns true if not. - responses: - '200': - description: - allowed true or false - content: - application/json: - schema: - $ref: "#/components/schemas/IsOnboarding" - post: - tags: - - Setup - summary: post onboarding request, to setup initial user, org and bucket - requestBody: - description: source to create - required: true - content: - application/json: - schema: - $ref: "#/components/schemas/OnboardingRequest" - responses: - '201': - description: Created default user, bucket, org - content: - application/json: - schema: - $ref: "#/components/schemas/OnboardingResponse" - /telegrafs: - get: - tags: - - Telegrafs - parameters: - - in: query - name: orgID - description: specifies the organization of the resource - required: true - schema: - type: string - responses: - '200': - description: a list of telegraf configs - content: - application/json: - schema: - $ref: "#/components/schemas/Telegrafs" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - post: - tags: - - Telegrafs - summary: Create a telegraf config - requestBody: - description: telegraf config to create - required: true - content: - application/json: - schema: - $ref: "#/components/schemas/TelegrafRequest" - responses: - '201': - description: Telegraf config created - content: - application/json: - schema: - $ref: "#/components/schemas/Telegraf" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - '/telegrafs/{telegrafID}': - get: - tags: - - Telegrafs - summary: Retrieve a telegraf config - parameters: - - in: path - name: telegrafID - schema: - type: string - required: true - description: ID of telegraf config - responses: - '200': - description: telegraf config details - content: - application/json: - schema: - $ref: "#/components/schemas/Telegraf" - application/toml: - example: "[agent]\ninterval = \"10s\"" - schema: - type: string - application/octet-stream: - example: "[agent]\ninterval = \"10s\"" - schema: - type: string - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - put: - tags: - - Telegrafs - summary: Update a telegraf config - parameters: - - in: path - name: telegrafID - schema: - type: string - required: true - description: ID of telegraf config - requestBody: - description: telegraf config update to apply - required: true - content: - application/json: - schema: - $ref: "#/components/schemas/TelegrafRequest" - responses: - '200': - description: An updated telegraf - content: - application/json: - schema: - $ref: "#/components/schemas/Telegraf" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - delete: - tags: - - Telegrafs - summary: delete a telegraf config - parameters: - - $ref: '#/components/parameters/TraceSpan' - - in: path - name: telegrafID - schema: - type: string - required: true - description: ID of telegraf config - responses: - '200': - description: An updated telegraf - content: - application/json: - schema: - $ref: "#/components/schemas/Telegraf" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - '/telegrafs/{telegrafID}/labels': - get: - tags: - - Telegrafs - summary: list all labels for a telegraf config - parameters: - - $ref: '#/components/parameters/TraceSpan' - - in: path - name: telegrafID - schema: - type: string - required: true - description: ID of the telegraf config - responses: - '200': - description: a list of all labels for a telegraf config - content: - application/json: - schema: - $ref: "#/components/schemas/LabelsResponse" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - post: - tags: - - Telegrafs - summary: add a label to a telegraf config - parameters: - - $ref: '#/components/parameters/TraceSpan' - - in: path - name: telegrafID - schema: - type: string - required: true - description: ID of the telegraf config - requestBody: - description: label to add - required: true - content: - application/json: - schema: - $ref: "#/components/schemas/LabelMapping" - responses: - '200': - description: a label for a telegraf config - content: - application/json: - schema: - $ref: "#/components/schemas/LabelResponse" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - '/telegrafs/{telegrafID}/labels/{labelID}': - delete: - tags: - - Telegrafs - summary: delete a label from a telegraf config - parameters: - - $ref: '#/components/parameters/TraceSpan' - - in: path - name: telegrafID - schema: - type: string - required: true - description: ID of the telegraf config - - in: path - name: labelID - schema: - type: string - required: true - description: the label ID - responses: - '204': - description: delete has been accepted - '404': - description: telegraf config not found - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - '/telegrafs/{telegrafID}/members': - get: - tags: - - Users - - Telegrafs - summary: List all users with member privileges for a telegraf config - parameters: - - in: path - name: telegrafID - schema: - type: string - required: true - description: ID of the telegraf config - responses: - '200': - description: a list of telegraf config members - content: - application/json: - schema: - $ref: "#/components/schemas/Users" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - post: - tags: - - Users - - Telegrafs - summary: Add telegraf config member - parameters: - - in: path - name: telegrafID - schema: - type: string - required: true - description: ID of the telegraf config - requestBody: - description: user to add as member - required: true - content: - application/json: - schema: - $ref: "#/components/schemas/User" - responses: - '201': - description: member added to telegraf - content: - application/json: - schema: - $ref: "#/components/schemas/User" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - '/telegrafs/{telegrafID}/members/{userID}': - delete: - tags: - - Users - - Telegrafs - summary: removes a member from a telegraf config - parameters: - - in: path - name: userID - schema: - type: string - required: true - description: ID of member to remove - - in: path - name: telegrafID - schema: - type: string - required: true - description: ID of the telegraf - responses: - '204': - description: member removed - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - '/telegrafs/{telegrafID}/owners': - get: - tags: - - Users - - Telegrafs - summary: List all owners of a telegraf config - parameters: - - in: path - name: telegrafID - schema: - type: string - required: true - description: ID of the telegraf config - responses: - '200': - description: a list of telegraf config owners - content: - application/json: - schema: - $ref: "#/components/schemas/Users" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - post: - tags: - - Users - - Telegrafs - summary: Add telegraf config owner - parameters: - - in: path - name: telegrafID - schema: - type: string - required: true - description: ID of the telegraf config - requestBody: - description: user to add as owner - required: true - content: - application/json: - schema: - $ref: "#/components/schemas/User" - responses: - '201': - description: telegraf config owner added - content: - application/json: - schema: - $ref: "#/components/schemas/User" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - '/telegrafs/{telegrafID}/owners/{userID}': - delete: - tags: - - Users - - Telegrafs - summary: removes an owner from a telegraf config - parameters: - - in: path - name: userID - schema: - type: string - required: true - description: ID of owner to remove - - in: path - name: telegrafID - schema: - type: string - required: true - description: ID of the telegraf config - responses: - '204': - description: owner removed - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - /scrapers: - get: - tags: - - ScraperTargets - summary: get all scraper targets - responses: - '200': - description: all scraper targets - content: - application/json: - schema: - $ref: "#/components/schemas/ScraperTargetResponses" - post: - summary: create a scraper target - tags: - - ScraperTargets - requestBody: - description: scraper target to create - required: true - content: - application/json: - schema: - $ref: "#/components/schemas/ScraperTargetRequest" - responses: - '201': - description: scraper target created - content: - application/json: - schema: - $ref: "#/components/schemas/ScraperTargetResponse" - default: - description: internal server error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - '/scrapers/{scraperTargetID}': - delete: - tags: - - ScraperTargets - summary: delete a scraper target - parameters: - - $ref: '#/components/parameters/TraceSpan' - - in: path - name: scraperTargetID - required: true - schema: - type: string - description: id of the scraper target - responses: - '204': - description: scraper target deleted - default: - description: internal server error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - patch: - summary: update a scraper target - tags: - - ScraperTargets - parameters: - - $ref: '#/components/parameters/TraceSpan' - - in: path - name: scraperTargetID - required: true - schema: - type: string - description: id of the scraper target - requestBody: - description: scraper target update to apply - required: true - content: - application/json: - schema: - $ref: "#/components/schemas/ScraperTargetRequest" - responses: - '200': - description: scraper target updated - content: - application/json: - schema: - $ref: "#/components/schemas/ScraperTargetResponse" - default: - description: internal server error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - /macros: - get: - tags: - - Macros - summary: keywords that specify how input data gets mapped to a replacement output sequence - parameters: - - in: header - name: Authorization - description: the authorization header should be in the format of `Token ` - required: true - schema: - type: string - - in: query - name: org - required: true - schema: - type: string - description: filter macros to a specific organization name - responses: - '200': - description: all macros for an organization - content: - application/json: - schema: - $ref: "#/components/schemas/Macros" - default: - description: internal server error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - post: - summary: keywords that specify how input data gets mapped to a replacement output sequence - tags: - - Macros - parameters: - - in: header - name: Authorization - description: the authorization header should be in the format of `Token ` - required: true - schema: - type: string - - in: query - name: org - required: true - schema: - type: string - description: filter macros to a specific organization name - responses: - '201': - description: macro created - content: - application/json: - schema: - $ref: "#/components/schemas/Macros" - default: - description: internal server error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - '/macros/{macroID}': - delete: - tags: - - Macros - summary: keywords that specify how input data gets mapped to a replacement output sequence - parameters: - - in: header - name: Authorization - description: the authorization header should be in the format of `Token ` - required: true - schema: - type: string - - in: path - name: macroID - required: true - schema: - type: string - description: id of the macro - - in: query - name: org - required: true - schema: - type: string - description: filter macros to a specific organization name - responses: - '204': - description: macro deleted - default: - description: internal server error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - patch: - summary: keywords that specify how input data gets mapped to a replacement output sequence - tags: - - Macros - parameters: - - in: header - name: Authorization - description: the authorization header should be in the format of `Token ` - required: true - schema: - type: string - - in: path - name: macroID - required: true - schema: - type: string - description: id of the macro - - in: query - name: org - required: true - schema: - type: string - description: filter macros to a specific organization name - responses: - '200': - description: macro updated - content: - application/json: - schema: - $ref: "#/components/schemas/Macros" - default: - description: internal server error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - /write: - post: - tags: - - Write - summary: write time-series data into influxdb - requestBody: - description: line protocol body - required: true - content: - text/plain: - schema: - type: string - parameters: - - in: header - name: Content-Encoding - description: when present, its value indicates to the database that compression is applied to the line-protocol body. - schema: - type: string - description: specifies that the line protocol in the body is encoded with gzip or not encoded with identity. - default: identity - enum: - - gzip - - identity - - in: header - name: Content-Type - description: Content-Type is used to indicate the format of the data sent to the server. - schema: - type: string - description: text/plain specifies the text line protocol; charset is assumed to be utf-8. - default: text/plain; charset=utf-8 - enum: - - text/plain - - text/plain; charset=utf-8 - - application/vnd.influx.arrow - - in: header - name: Content-Length - description: Content-Length is an entity header is indicating the size of the entity-body, in bytes, sent to the database. If the length is greater than the database max body configuration option, a 413 response is sent. - schema: - type: integer - description: The length in decimal number of octets. - - in: header - name: Accept - description: specifies the return content format. - schema: - type: string - description: return format of any errors - default: application/json - enum: - - application/json - - in: query - name: org - description: specifies the destination organization for writes - required: true - schema: - type: string - description: all points within batch are written to this organization. - - in: query - name: bucket - description: specifies the destination bucket for writes - required: true - schema: - type: string - description: all points within batch are written to this bucket. - - in: query - name: precision - description: specifies the precision for the unix timestamps within the body line-protocol - schema: - $ref: "#/components/schemas/WritePrecision" - responses: - '204': - description: write data is correctly formatted and accepted for writing to the bucket. - '400': - description: line protocol poorly formed and no points were written. Response can be used to determine the first malformed line in the body line-protocol. All data in body was rejected and not written. - content: - application/json: - schema: - $ref: "#/components/schemas/LineProtocolError" - '401': - description: token does not have sufficient permissions to write to this organization and bucket or the organization and bucket do not exist. - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - '403': - description: no token was sent and they are required. - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - '413': - description: write has been rejected because the payload is too large. Error message returns max size supported. All data in body was rejected and not written. - content: - application/json: - schema: - $ref: "#/components/schemas/LineProtocolLengthError" - '429': - description: token is temporarily over quota. The Retry-After header describes when to try the write again. - headers: - Retry-After: - description: A non-negative decimal integer indicating the seconds to delay after the response is received. - schema: - type: integer - format: int32 - '503': - description: server is temporarily unavailable to accept writes. The Retry-After header describes when to try the write again. - headers: - Retry-After: - description: A non-negative decimal integer indicating the seconds to delay after the response is received. - schema: - type: integer - format: int32 - default: - description: internal server error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - /health: - get: - tags: - - Health - summary: Get the health of an instance - responses: - '200': - description: the instance is healthy - content: - application/json: - schema: - $ref: "#/components/schemas/Health" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - /sources: - post: - x-generated: true - tags: - - Sources - summary: Creates a Source - parameters: - - in: query - name: org - description: specifies the organization of the resource - required: true - schema: - type: string - requestBody: - description: source to create - required: true - content: - application/json: - schema: - $ref: "#/components/schemas/Source" - responses: - '201': - description: Created Source - content: - application/json: - schema: - $ref: "#/components/schemas/Source" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - get: - x-generated: true - tags: - - Sources - summary: Get all sources - parameters: - - in: query - name: org - description: specifies the organization of the resource - required: true - schema: - type: string - responses: - '200': - description: all sources - content: - application/json: - schema: - $ref: "#/components/schemas/Sources" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - /sources/{sourceID}: - delete: - x-generated: true - tags: - - Sources - summary: Delete a source - parameters: - - in: path - name: sourceID - schema: - type: string - required: true - description: ID of the source - responses: - '204': - description: delete has been accepted - '404': - description: view not found - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - patch: - x-generated: true - tags: - - Sources - summary: Updates a Source - parameters: - - in: path - name: sourceID - schema: - type: string - required: true - description: ID of the source - requestBody: - description: source update - required: true - content: - application/json: - schema: - $ref: "#/components/schemas/Source" - responses: - '200': - description: Created Source - content: - application/json: - schema: - $ref: "#/components/schemas/Source" - '404': - description: source not found - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - get: - x-generated: true - tags: - - Sources - summary: Get a source - parameters: - - in: path - name: sourceID - schema: - type: string - required: true - description: ID of the source - responses: - '200': - description: a source - content: - application/json: - schema: - $ref: "#/components/schemas/Source" - '404': - description: source not found - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - /sources/{sourceID}/health: - get: - x-generated: true - tags: - - Sources - summary: Get a sources health - parameters: - - in: path - name: sourceID - schema: - type: string - required: true - description: ID of the source - responses: - '200': - description: a source - content: - application/json: - schema: - $ref: "#/components/schemas/Source" - '404': - description: source not found - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - /sources/{sourceID}/buckets: - get: - tags: - - Sources - - Buckets - summary: Get a sources buckets (will return dbrps in the form of buckets if it is a v1 source) - parameters: - - in: path - name: sourceID - schema: - type: string - required: true - description: ID of the source - - in: query - name: org - description: specifies the organization of the resource - required: true - schema: - type: string - responses: - '200': - description: a source - content: - application/json: - schema: - $ref: "#/components/schemas/Buckets" - '404': - description: source not found - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - /views: - post: - x-generated: true - tags: - - Views - summary: A view contains information about the visual representation of data - parameters: - - in: query - name: org - description: specifies the organization of the resource - required: true - schema: - type: string - requestBody: - description: view to create - required: true - content: - application/json: - schema: - $ref: "#/components/schemas/View" - responses: - '201': - description: Added view - content: - application/json: - schema: - $ref: "#/components/schemas/View" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - get: - x-generated: true - tags: - - Views - summary: Get all views - parameters: - - in: query - name: org - description: specifies the organization of the resource - required: true - schema: - type: string - responses: - '200': - description: all views - content: - application/json: - schema: - $ref: "#/components/schemas/Views" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - '/views/{viewID}': - get: - x-generated: true - tags: - - Views - summary: Get a single View - parameters: - - in: path - name: viewID - schema: - type: string - required: true - description: ID of view to update - responses: - '200': - description: get a single view - content: - application/json: - schema: - $ref: "#/components/schemas/View" - '404': - description: view not found - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - patch: - x-generated: true - tags: - - Views - summary: Update a single view - requestBody: - description: patching of a view - required: true - content: - application/json: - schema: - $ref: "#/components/schemas/View" - parameters: - - in: path - name: viewID - schema: - type: string - required: true - description: ID of view to update - responses: - '200': - description: Updated view - content: - application/json: - schema: - $ref: "#/components/schemas/View" - '404': - description: view not found - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - delete: - x-generated: true - tags: - - Views - summary: Delete a view - parameters: - - in: path - name: viewID - schema: - type: string - required: true - description: ID of view to update - responses: - '204': - description: delete has been accepted - '404': - description: view not found - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - /labels: - post: - tags: - - Labels - summary: Create a label - requestBody: - description: label to create - required: true - content: - application/json: - schema: - $ref: "#/components/schemas/Label" - responses: - '201': - description: Added label - content: - application/json: - schema: - $ref: "#/components/schemas/LabelResponse" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - get: - tags: - - Labels - summary: Get all labels - responses: - '200': - description: all labels - content: - application/json: - schema: - $ref: "#/components/schemas/LabelsResponse" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - /labels/{labelID}: - get: - tags: - - Labels - summary: Get a label - parameters: - - $ref: '#/components/parameters/TraceSpan' - - in: path - name: labelID - schema: - type: string - required: true - description: ID of label to update - responses: - '200': - description: a label - content: - application/json: - schema: - $ref: "#/components/schemas/LabelResponse" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - patch: - tags: - - Labels - summary: Update a single label - requestBody: - description: label update - required: true - content: - application/json: - schema: - $ref: "#/components/schemas/LabelUpdate" - parameters: - - $ref: '#/components/parameters/TraceSpan' - - in: path - name: labelID - schema: - type: string - required: true - description: ID of label to update - responses: - '200': - description: Updated label - content: - application/json: - schema: - $ref: "#/components/schemas/LabelResponse" - '404': - description: label not found - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - delete: - tags: - - Labels - summary: Delete a label - parameters: - - $ref: '#/components/parameters/TraceSpan' - - in: path - name: labelID - schema: - type: string - required: true - description: ID of label to delete - responses: - '204': - description: delete has been accepted - '404': - description: label not found - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - /dashboards: - post: - x-generated: true - tags: - - Dashboards - summary: Create a dashboard - requestBody: - description: dashboard to create - required: true - content: - application/json: - schema: - $ref: "#/components/schemas/Dashboard" - responses: - '201': - description: Added dashboard - content: - application/json: - schema: - $ref: "#/components/schemas/Dashboard" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - get: - x-generated: true - tags: - - Dashboards - summary: Get all dashboards - parameters: - - in: query - name: org - description: specifies the organization name of the resource - schema: - type: string - - in: query - name: owner - description: specifies the owner id to return resources for - schema: - type: string - - in: query - name: sortBy - description: specifies the owner id to return resources for - schema: - type: string - enum: - - "ID" - - "CreatedAt" - - "UpdatedAt" - - in: query - name: id - description: ID list of dashboards to return. If both this and owner are specified, only ids is used. - schema: - type: array - items: - type: string - responses: - '200': - description: all dashboards - content: - application/json: - schema: - $ref: "#/components/schemas/Dashboards" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - '/dashboards/{dashboardID}': - get: - x-generated: true - tags: - - Dashboards - summary: Get a single Dashboard - parameters: - - in: path - name: dashboardID - schema: - type: string - required: true - description: ID of dashboard to update - responses: - '200': - description: get a single dashboard - content: - application/json: - schema: - $ref: "#/components/schemas/Dashboard" - '404': - description: dashboard not found - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - patch: - x-generated: true - tags: - - Dashboards - summary: Update a single dashboard - requestBody: - description: patching of a dashboard - required: true - content: - application/json: - schema: - $ref: "#/components/schemas/Dashboard" - parameters: - - in: path - name: dashboardID - schema: - type: string - required: true - description: ID of dashboard to update - responses: - '200': - description: Updated dashboard - content: - application/json: - schema: - $ref: "#/components/schemas/Dashboard" - '404': - description: dashboard not found - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - delete: - x-generated: true - tags: - - Dashboards - summary: Delete a dashboard - parameters: - - in: path - name: dashboardID - schema: - type: string - required: true - description: ID of dashboard to update - responses: - '204': - description: delete has been accepted - '404': - description: dashboard not found - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - '/dashboards/{dashboardID}/cells': - put: - x-generated: true - tags: - - Cells - - Dashboards - summary: Replace a dashboards cells - requestBody: - description: batch replaces all of a dashboards cells (this is used primarily to update the positional information of all of the cells) - required: true - content: - application/json: - schema: - $ref: "#/components/schemas/Cells" - parameters: - - in: path - name: dashboardID - schema: - type: string - required: true - description: ID of dashboard to update - responses: - '200': - description: Replaced dashboard cells - content: - application/json: - schema: - $ref: "#/components/schemas/Dashboard" - '404': - description: dashboard not found - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - post: - x-generated: true - tags: - - Cells - - Dashboards - summary: Create a dashboard cell - requestBody: - description: cell that will be added - required: true - content: - application/json: - schema: - $ref: "#/components/schemas/CreateCell" - parameters: - - in: path - name: dashboardID - schema: - type: string - required: true - description: ID of dashboard to update - responses: - '201': - description: Cell successfully added - content: - application/json: - schema: - $ref: "#/components/schemas/Cell" - '404': - description: dashboard not found - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - '/dashboards/{dashboardID}/cells/{cellID}': - patch: - x-generated: true - tags: - - Cells - - Dashboards - summary: Update the non positional information related to a cell (because updates to a single cells positional data could cause grid conflicts) - requestBody: - description: updates the non positional information related to a cell - required: true - content: - application/json: - schema: - $ref: "#/components/schemas/CellUpdate" - parameters: - - in: path - name: dashboardID - schema: - type: string - required: true - description: ID of dashboard to update - - in: path - name: cellID - schema: - type: string - required: true - description: ID of cell to update - responses: - '200': - description: Updated dashboard cell - content: - application/json: - schema: - $ref: "#/components/schemas/Cell" - '404': - description: cell or dashboard not found - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - delete: - x-generated: true - tags: - - Cells - - Dashboards - summary: Delete a dashboard cell - parameters: - - in: path - name: dashboardID - schema: - type: string - required: true - description: ID of dashboard to delte - - in: path - name: cellID - schema: - type: string - required: true - description: ID of cell to delete - responses: - '204': - description: Cell successfully deleted - '404': - description: cell or dashboard not found - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - '/dashboards/{dashboardID}/cells/{cellID}/view': - get: - tags: - - Cells - - Dashboards - - Views - summary: Retrieve the view for a cell in a dashboard - parameters: - - in: path - name: dashboardID - schema: - type: string - required: true - description: ID of dashboard - - in: path - name: cellID - schema: - type: string - required: true - description: ID of cell - responses: - '200': - description: A dashboard cells view - content: - application/json: - schema: - $ref: "#/components/schemas/View" - '404': - description: cell or dashboard not found - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - patch: - tags: - - Cells - - Dashboards - - Views - summary: Update the view for a cell - requestBody: - description: updates the view for a cell - required: true - content: - application/json: - schema: - $ref: "#/components/schemas/View" - parameters: - - in: path - name: dashboardID - schema: - type: string - required: true - description: ID of dashboard to update - - in: path - name: cellID - schema: - type: string - required: true - description: ID of cell to update - responses: - '200': - description: Updated cell view - content: - application/json: - schema: - $ref: "#/components/schemas/View" - '404': - description: cell or dashboard not found - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - '/dashboards/{dashboardID}/labels': - get: - tags: - - Dashboards - summary: list all labels for a dashboard - parameters: - - $ref: '#/components/parameters/TraceSpan' - - in: path - name: dashboardID - schema: - type: string - required: true - description: ID of the dashboard - responses: - '200': - description: a list of all labels for a dashboard - content: - application/json: - schema: - $ref: "#/components/schemas/LabelsResponse" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - post: - tags: - - Dashboards - summary: add a label to a dashboard - parameters: - - $ref: '#/components/parameters/TraceSpan' - - in: path - name: dashboardID - schema: - type: string - required: true - description: ID of the dashboard - requestBody: - description: label to add - required: true - content: - application/json: - schema: - $ref: "#/components/schemas/LabelMapping" - responses: - '200': - description: a label for a dashboard - content: - application/json: - schema: - $ref: "#/components/schemas/LabelResponse" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - '/dashboards/{dashboardID}/labels/{labelID}': - delete: - tags: - - Dashboards - summary: delete a label from a dashboard config - parameters: - - $ref: '#/components/parameters/TraceSpan' - - in: path - name: dashboardID - schema: - type: string - required: true - description: ID of the dashboard config - - in: path - name: labelID - schema: - type: string - required: true - description: the label ID - responses: - '204': - description: delete has been accepted - '404': - description: dashboard config not found - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - post: - tags: - - Dashboards - summary: add a label to a dashboard - parameters: - - $ref: '#/components/parameters/TraceSpan' - - in: path - name: dashboardID - schema: - type: string - required: true - description: ID of the dashboard - requestBody: - description: label to add - required: true - content: - application/json: - schema: - $ref: "#/components/schemas/LabelMapping" - responses: - '200': - description: a label for a dashboard - content: - application/json: - schema: - $ref: "#/components/schemas/LabelResponse" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - '/dashboards/{dashboardID}/members': - get: - tags: - - Users - - Dashboards - summary: List all dashboard members - parameters: - - in: path - name: dashboardID - schema: - type: string - required: true - description: ID of the dashboard - responses: - '200': - description: a list of users who have member privileges for a dashboard - content: - application/json: - schema: - $ref: "#/components/schemas/Users" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - post: - tags: - - Users - - Dashboards - summary: Add dashboard member - parameters: - - in: path - name: dashboardID - schema: - type: string - required: true - description: ID of the dashboard - requestBody: - description: user to add as member - required: true - content: - application/json: - schema: - $ref: "#/components/schemas/User" - responses: - '201': - description: added to dashboard members - content: - application/json: - schema: - $ref: "#/components/schemas/User" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - '/dashboards/{dashboardID}/members/{userID}': - delete: - tags: - - Users - - Dashboards - summary: removes a member from an dashboard - parameters: - - in: path - name: userID - schema: - type: string - required: true - description: ID of member to remove - - in: path - name: dashboardID - schema: - type: string - required: true - description: ID of the dashboard - responses: - '204': - description: member removed - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - '/dashboards/{dashboardID}/owners': - get: - tags: - - Users - - Dashboards - summary: List all dashboard owners - parameters: - - in: path - name: dashboardID - schema: - type: string - required: true - description: ID of the dashboard - responses: - '200': - description: a list of users who have owner privileges for a dashboard - content: - application/json: - schema: - $ref: "#/components/schemas/Users" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - post: - tags: - - Users - - Dashboards - summary: Add dashboard owner - parameters: - - in: path - name: dashboardID - schema: - type: string - required: true - description: ID of the dashboard - requestBody: - description: user to add as owner - required: true - content: - application/json: - schema: - $ref: "#/components/schemas/User" - responses: - '201': - description: added to dashboard owners - content: - application/json: - schema: - $ref: "#/components/schemas/User" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - '/dashboards/{dashboardID}/owners/{userID}': - delete: - tags: - - Users - - Dashboards - summary: removes an owner from an dashboard - parameters: - - in: path - name: userID - schema: - type: string - required: true - description: ID of owner to remove - - in: path - name: dashboardID - schema: - type: string - required: true - description: ID of the dashboard - responses: - '204': - description: owner removed - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - /query/ast: - post: - description: analyzes flux query and generates a query specification. - tags: - - Query - parameters: - - in: header - name: Content-Type - schema: - type: string - enum: - - application/json - - in: header - name: Authorization - description: the authorization header should be in the format of `Token ` - schema: - type: string - requestBody: - description: analyzed flux query to generate abstract syntax tree. - content: - application/json: - schema: - $ref: "#/components/schemas/LanguageRequest" - responses: - '200': - description: Abstract syntax tree of flux query. - content: - application/json: - schema: - $ref: "#/components/schemas/ASTResponse" - default: - description: Any response other than 200 is an internal server error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - /query/spec: - post: - description: analyzes flux query and generates a query specification. - tags: - - Query - parameters: - - in: header - name: Content-Type - schema: - type: string - enum: - - application/json - - in: header - name: Authorization - description: the authorization header should be in the format of `Token ` - schema: - type: string - requestBody: - description: analyzed flux query to generate specification. - content: - application/json: - schema: - $ref: "#/components/schemas/LanguageRequest" - responses: - '200': - description: Specification of flux query. - content: - application/json: - schema: - $ref: "#/components/schemas/QuerySpecification" - default: - description: Any response other than 200 is an internal server error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - /query/suggestions: - get: - tags: - - Query - responses: - '200': - description: Suggestions for next functions in call chain - content: - application/json: - schema: - $ref: "#/components/schemas/FluxSuggestions" - default: - description: Any response other than 200 is an internal server error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - /query/suggestions/{name}: - parameters: - - in: path - name: name - schema: - type: string - required: true - description: name of branching suggestion - get: - tags: - - Query - responses: - '200': - description: Suggestions for next functions in call chain - content: - application/json: - schema: - $ref: "#/components/schemas/FluxSuggestions" - default: - description: Any response other than 200 is an internal server error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - /authorizations: - get: - tags: - - Authorizations - summary: List all authorizations - parameters: - - in: query - name: userID - schema: - type: string - description: filter authorizations belonging to a user id - - in: query - name: user - schema: - type: string - description: filter authorizations belonging to a user name - responses: - '200': - description: A list of authorizations - content: - application/json: - schema: - $ref: "#/components/schemas/Authorizations" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - post: - tags: - - Authorizations - summary: Create an authorization - requestBody: - description: authorization to create - required: true - content: - application/json: - schema: - $ref: "#/components/schemas/Authorization" - responses: - '201': - description: authorization created - content: - application/json: - schema: - $ref: "#/components/schemas/Authorization" - '400': - description: invalid request - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - /authorizations/{authID}: - get: - tags: - - Authorizations - summary: Retrieve an authorization - parameters: - - in: path - name: authID - schema: - type: string - required: true - description: ID of authorization to get - responses: - '200': - description: authorization details - content: - application/json: - schema: - $ref: "#/components/schemas/Authorization" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - patch: - tags: - - Authorizations - summary: update authorization to be active or inactive. requests using an inactive authorization will be rejected. - requestBody: - description: authorization to update to apply - required: true - content: - application/json: - schema: - $ref: "#/components/schemas/Authorization" - parameters: - - in: path - name: authID - schema: - type: string - required: true - description: ID of authorization to update - responses: - '200': - description: the active or inactie authorization - content: - application/json: - schema: - $ref: "#/components/schemas/Authorization" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - delete: - tags: - - Authorizations - summary: Delete a authorization - parameters: - - in: path - name: authID - schema: - type: string - required: true - description: ID of authorization to delete - responses: - '204': - description: authorization deleted - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - /query/analyze: - post: - tags: - - Query - summary: analyze an influxql or flux query - parameters: - - in: header - name: Content-Type - schema: - type: string - enum: - - application/json - - in: header - name: Authorization - description: the authorization header should be in the format of `Token ` - schema: - type: string - requestBody: - description: flux or influxql query to analyze - content: - application/json: - schema: - $ref: "#/components/schemas/Query" - responses: - '200': - description: query analyze results. Errors will be empty if the query is valid. - content: - application/json: - schema: - $ref: "#/components/schemas/AnalyzeQueryResponse" - default: - description: internal server error - headers: - X-Influx-Error: - description: error string describing the problem - schema: - type: string - X-Influx-Reference: - description: reference code unique to the error type - schema: - type: integer - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - /query: - post: - tags: - - Query - summary: query an influx - parameters: - - in: header - name: Accept - description: specifies the return content format. Each response content type will have its own dialect options. - schema: - type: string - description: return format of either CSV or Arrow buffers - default: text/csv - enum: - - text/csv - - application/vnd.influx.arrow - - in: header - name: Content-Type - schema: - type: string - enum: - - application/json - - application/vnd.flux - - in: header - name: Authorization - description: the authorization header should be in the format of `Token ` - schema: - type: string - - in: query - name: org - description: specifies the name of the organization executing the query. - schema: - type: string - requestBody: - description: flux query or specification to execute - content: - application/json: - schema: - $ref: "#/components/schemas/Query" - application/vnd.flux: - schema: - type: string - responses: - '200': - description: query results - content: - text/csv: - schema: - type: string - example: > - result,table,_start,_stop,_time,region,host,_value - mean,0,2018-05-08T20:50:00Z,2018-05-08T20:51:00Z,2018-05-08T20:50:00Z,east,A,15.43 - mean,0,2018-05-08T20:50:00Z,2018-05-08T20:51:00Z,2018-05-08T20:50:20Z,east,B,59.25 - mean,0,2018-05-08T20:50:00Z,2018-05-08T20:51:00Z,2018-05-08T20:50:40Z,east,C,52.62 - application/vnd.influx.arrow: - schema: - type: string - format: binary - '400': - description: error processing query - headers: - X-Influx-Error: - description: error string describing the problem - schema: - type: string - X-Influx-Reference: - description: reference code unique to the error type - schema: - type: integer - content: - text/csv: - schema: - type: string - example: > - error,reference - Failed to parse query,897 - application/vnd.influx.arrow: - schema: - type: string - format: binary - default: - description: internal server error - headers: - X-Influx-Error: - description: error string describing the problem - schema: - type: string - X-Influx-Reference: - description: reference code unique to the error type - schema: - type: integer - content: - text/csv: - schema: - type: string - example: > - error,reference - Failed to parse query,897 - application/vnd.influx.arrow: - schema: - type: string - format: binary - /buckets: - get: - x-generated: true - tags: - - Buckets - summary: List all buckets - parameters: - - in: query - name: org - description: specifies the organization of the resource - required: true - schema: - type: string - responses: - '200': - description: a list of buckets - content: - application/json: - schema: - $ref: "#/components/schemas/Buckets" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - post: - x-generated: true - tags: - - Buckets - summary: Create a bucket - parameters: - - in: query - name: org - description: specifies the organization of the resource - required: true - schema: - type: string - requestBody: - description: bucket to create - required: true - content: - application/json: - schema: - $ref: "#/components/schemas/Bucket" - responses: - '201': - description: Bucket created - content: - application/json: - schema: - $ref: "#/components/schemas/Bucket" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - '/buckets/{bucketID}': - get: - x-generated: true - tags: - - Buckets - summary: Retrieve a bucket - parameters: - - in: path - name: bucketID - schema: - type: string - required: true - description: ID of bucket to get - responses: - '200': - description: bucket details - content: - application/json: - schema: - $ref: "#/components/schemas/Bucket" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - patch: - x-generated: true - tags: - - Buckets - summary: Update a bucket - requestBody: - description: bucket update to apply - required: true - content: - application/json: - schema: - $ref: "#/components/schemas/Bucket" - parameters: - - in: path - name: bucketID - schema: - type: string - required: true - description: ID of bucket to update - responses: - '200': - description: An updated bucket - content: - application/json: - schema: - $ref: "#/components/schemas/Bucket" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - delete: - tags: - - Buckets - summary: Delete a bucket - parameters: - - $ref: '#/components/parameters/TraceSpan' - - in: path - name: bucketID - schema: - type: string - required: true - description: ID of bucket to delete - responses: - '204': - description: delete has been accepted - '404': - description: bucket not found - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - '/buckets/{bucketID}/labels': - get: - tags: - - Buckets - summary: list all labels for a bucket - parameters: - - $ref: '#/components/parameters/TraceSpan' - - in: path - name: bucketID - schema: - type: string - required: true - description: ID of the bucket - responses: - '200': - description: a list of all labels for a bucket - content: - application/json: - schema: - $ref: "#/components/schemas/LabelsResponse" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - post: - tags: - - Buckets - summary: add a label to a bucket - parameters: - - $ref: '#/components/parameters/TraceSpan' - - in: path - name: bucketID - schema: - type: string - required: true - description: ID of the bucket - requestBody: - description: label to add - required: true - content: - application/json: - schema: - $ref: "#/components/schemas/LabelMapping" - responses: - '200': - description: a label for a bucket - content: - application/json: - schema: - $ref: "#/components/schemas/LabelResponse" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - '/buckets/{bucketID}/labels/{labelID}': - delete: - tags: - - Buckets - summary: delete a label from a bucket config - parameters: - - $ref: '#/components/parameters/TraceSpan' - - in: path - name: bucketID - schema: - type: string - required: true - description: ID of the bucket config - - in: path - name: labelID - schema: - type: string - required: true - description: the label ID - responses: - '204': - description: delete has been accepted - '404': - description: bucket config not found - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - '/buckets/{bucketID}/members': - get: - tags: - - Users - - Buckets - summary: List all users with member privileges for a bucket - parameters: - - in: path - name: bucketID - schema: - type: string - required: true - description: ID of the bucket - responses: - '200': - description: a list of bucket members - content: - application/json: - schema: - $ref: "#/components/schemas/Users" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - post: - tags: - - Users - - Buckets - summary: Add bucket member - parameters: - - in: path - name: bucketID - schema: - type: string - required: true - description: ID of the bucket - requestBody: - description: user to add as member - required: true - content: - application/json: - schema: - $ref: "#/components/schemas/User" - responses: - '201': - description: member added to bucket - content: - application/json: - schema: - $ref: "#/components/schemas/User" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - '/buckets/{bucketID}/members/{userID}': - delete: - tags: - - Users - - Buckets - summary: removes a member from an bucket - parameters: - - in: path - name: userID - schema: - type: string - required: true - description: ID of member to remove - - in: path - name: bucketID - schema: - type: string - required: true - description: ID of the bucket - responses: - '204': - description: member removed - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - '/buckets/{bucketID}/owners': - get: - tags: - - Users - - Buckets - summary: List all owners of a bucket - parameters: - - in: path - name: bucketID - schema: - type: string - required: true - description: ID of the bucket - responses: - '200': - description: a list of bucket owners - content: - application/json: - schema: - $ref: "#/components/schemas/Users" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - post: - tags: - - Users - - Buckets - summary: Add bucket owner - parameters: - - in: path - name: bucketID - schema: - type: string - required: true - description: ID of the bucket - requestBody: - description: user to add as owner - required: true - content: - application/json: - schema: - $ref: "#/components/schemas/User" - responses: - '201': - description: bucket owner added - content: - application/json: - schema: - $ref: "#/components/schemas/User" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - '/buckets/{bucketID}/owners/{userID}': - delete: - tags: - - Users - - Buckets - summary: removes an owner from a bucket - parameters: - - in: path - name: userID - schema: - type: string - required: true - description: ID of owner to remove - - in: path - name: bucketID - schema: - type: string - required: true - description: ID of the bucket - responses: - '204': - description: owner removed - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - /orgs: - get: - x-generated: true - tags: - - Organizations - summary: List all organizations - responses: - '200': - description: A list of organizations - content: - application/json: - schema: - $ref: "#/components/schemas/Organizations" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - post: - x-generated: true - tags: - - Organizations - summary: Create an organization - requestBody: - description: organization to create - required: true - content: - application/json: - schema: - $ref: "#/components/schemas/Organization" - responses: - '201': - description: organization created - content: - application/json: - schema: - $ref: "#/components/schemas/Organization" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - '/orgs/{orgID}': - get: - x-generated: true - tags: - - Organizations - summary: Retrieve an organization - parameters: - - in: path - name: orgID - schema: - type: string - required: true - description: ID of organization to get - responses: - '200': - description: organization details - content: - application/json: - schema: - $ref: "#/components/schemas/Organization" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - patch: - x-generated: true - tags: - - Organizations - summary: Update an organization - requestBody: - description: organization update to apply - required: true - content: - application/json: - schema: - $ref: "#/components/schemas/Organization" - parameters: - - in: path - name: orgID - schema: - type: string - required: true - description: ID of organization to get - responses: - '200': - description: organization updated - content: - application/json: - schema: - $ref: "#/components/schemas/Organization" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - delete: - x-generated: true - tags: - - Organizations - summary: delete an organization - parameters: - - in: path - name: orgID - schema: - type: string - required: true - description: ID of organization to get - responses: - '200': - description: organization deleted - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - '/orgs/{orgID}/labels': - get: - tags: - - Organizations - summary: list all labels for a organization - parameters: - - $ref: '#/components/parameters/TraceSpan' - - in: path - name: orgID - schema: - type: string - required: true - description: ID of the organization - responses: - '200': - description: a list of all labels for an organization - content: - application/json: - schema: - $ref: "#/components/schemas/LabelsResponse" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - post: - tags: - - Organizations - summary: add a label to an organization - parameters: - - $ref: '#/components/parameters/TraceSpan' - - in: path - name: orgID - schema: - type: string - required: true - description: ID of the organization - requestBody: - description: label to add - required: true - content: - application/json: - schema: - $ref: "#/components/schemas/LabelMapping" - responses: - '200': - description: a list of all labels for an organization - content: - application/json: - schema: - $ref: "#/components/schemas/LabelResponse" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - '/orgs/{orgID}/labels/{labelID}': - delete: - tags: - - Organizations - summary: delete a label from a org config - parameters: - - $ref: '#/components/parameters/TraceSpan' - - in: path - name: orgID - schema: - type: string - required: true - description: ID of the org config - - in: path - name: labelID - schema: - type: string - required: true - description: the label ID - responses: - '204': - description: delete has been accepted - '404': - description: org config not found - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - '/orgs/{orgID}/members': - get: - tags: - - Users - - Organizations - summary: List all members of an organization - parameters: - - in: path - name: orgID - schema: - type: string - required: true - description: ID of the organization - responses: - '200': - description: a list of organization members - content: - application/json: - schema: - $ref: "#/components/schemas/ResourceOwners" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - post: - tags: - - Users - - Organizations - summary: Add organization member - parameters: - - in: path - name: orgID - schema: - type: string - required: true - description: ID of the organization - requestBody: - description: user to add as member - required: true - content: - application/json: - schema: - $ref: "#/components/schemas/User" - responses: - '201': - description: added to organization created - content: - application/json: - schema: - $ref: "#/components/schemas/User" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - '/orgs/{orgID}/members/{userID}': - delete: - tags: - - Users - - Organizations - summary: removes a member from an organization - parameters: - - in: path - name: userID - schema: - type: string - required: true - description: ID of member to remove - - in: path - name: orgID - schema: - type: string - required: true - description: ID of the organization - responses: - '204': - description: member removed - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - '/orgs/{orgID}/owners': - get: - tags: - - Users - - Organizations - summary: List all owners of an organization - parameters: - - in: path - name: orgID - schema: - type: string - required: true - description: ID of the organization - responses: - '200': - description: a list of organization owners - content: - application/json: - schema: - $ref: "#/components/schemas/Users" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - post: - tags: - - Users - - Organizations - summary: Add organization owner - parameters: - - in: path - name: orgID - schema: - type: string - required: true - description: ID of the organization - requestBody: - description: user to add as owner - required: true - content: - application/json: - schema: - $ref: "#/components/schemas/User" - responses: - '201': - description: organization owner added - content: - application/json: - schema: - $ref: "#/components/schemas/User" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - '/orgs/{orgID}/owners/{userID}': - delete: - tags: - - Users - - Organizations - summary: removes an owner from an organization - parameters: - - in: path - name: userID - schema: - type: string - required: true - description: ID of owner to remove - - in: path - name: orgID - schema: - type: string - required: true - description: ID of the organization - responses: - '204': - description: owner removed - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - /tasks: - get: - x-generated: true - tags: - - Tasks - summary: List tasks. - description: Lists tasks, limit 100 - parameters: - - in: query - name: after - schema: - type: string - description: returns tasks after specified ID - - in: query - name: user - schema: - type: string - description: filter tasks to a specific user name - - in: query - name: org - schema: - type: string - description: filter tasks to a specific organization name - responses: - '200': - description: A list of tasks - content: - application/json: - schema: - type: object - properties: - tasks: - type: array - items: - $ref: "#/components/schemas/Task" - links: - $ref: "#/components/schemas/Links" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - post: - x-generated: true - tags: - - Tasks - summary: Create a new task - requestBody: - description: task to create - required: true - content: - application/json: - schema: - $ref: "#/components/schemas/TaskCreateRequest" - responses: - '201': - description: Task created - content: - application/json: - schema: - $ref: "#/components/schemas/Task" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - '/tasks/{taskID}': - get: - x-generated: true - tags: - - Tasks - summary: Retrieve an task - parameters: - - in: path - name: taskID - schema: - type: string - required: true - description: ID of task to get - responses: - '200': - description: task details - content: - application/json: - schema: - $ref: "#/components/schemas/Task" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - patch: - x-generated: true - tags: - - Tasks - summary: Update a task - description: Update a task. This will cancel all queued runs. - requestBody: - description: task update to apply - required: true - content: - application/json: - schema: - $ref: "#/components/schemas/Task" - parameters: - - in: path - name: taskID - schema: - type: string - required: true - description: ID of task to get - responses: - '200': - description: task updated - content: - application/json: - schema: - $ref: "#/components/schemas/Task" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - delete: - x-generated: true - tags: - - Tasks - summary: Delete a task - description: Deletes a task and all associated records - parameters: - - in: path - name: taskID - schema: - type: string - required: true - description: ID of task to delete - responses: - '204': - description: task deleted - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - '/tasks/{taskID}/runs': - get: - tags: - - Tasks - summary: Retrieve list of run records for a task - parameters: - - in: path - name: taskID - schema: - type: string - required: true - description: ID of task to get runs for - - in: query - name: after - schema: - type: string - description: returns runs after specified ID - - in: query - name: limit - schema: - type: integer - minimum: 1 - maximum: 100 - default: 20 - description: the number of runs to return - - in: query - name: afterTime - schema: - type: string - format: date-time - description: filter runs to those scheduled after this time, RFC3339 - - in: query - name: beforeTime - schema: - type: string - format: date-time - description: filter runs to those scheduled before this time, RFC3339 - responses: - '200': - description: a list of task runs - content: - application/json: - schema: - type: object - properties: - runs: - type: array - items: - $ref: "#/components/schemas/Run" - links: - $ref: "#/components/schemas/Links" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - '/tasks/{taskID}/runs/{runID}': - get: - tags: - - Tasks - summary: Retrieve a single run record for a task - parameters: - - in: path - name: taskID - schema: - type: string - required: true - description: task ID - - in: path - name: runID - schema: - type: string - required: true - description: run ID - responses: - '200': - description: The run record - content: - application/json: - schema: - $ref: "#/components/schemas/Run" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - '/tasks/{taskID}/runs/{runID}/retry': - post: - tags: - - Tasks - summary: Retry a task run - parameters: - - in: path - name: taskID - schema: - type: string - required: true - description: task ID - - in: path - name: runID - schema: - type: string - required: true - description: run ID - responses: - '204': - description: retry has been queued - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - '/tasks/{taskID}/logs': - get: - tags: - - Tasks - summary: Retrieve all logs for a task - parameters: - - in: path - name: taskID - schema: - type: string - required: true - description: ID of task to get logs for - responses: - '200': - description: all logs for a task - content: - application/json: - schema: - $ref: "#/components/schemas/Logs" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - '/tasks/{taskID}/runs/{runID}/logs': - get: - tags: - - Tasks - summary: Retrieve all logs for a run - parameters: - - in: path - name: taskID - schema: - type: string - required: true - description: ID of task to get logs for. - - in: path - name: runID - schema: - type: string - required: true - description: ID of run to get logs for. - responses: - '200': - description: all logs for a run - content: - application/json: - schema: - $ref: "#/components/schemas/Logs" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - '/tasks/{taskID}/labels': - get: - tags: - - Tasks - summary: list all labels for a task - parameters: - - $ref: '#/components/parameters/TraceSpan' - - in: path - name: taskID - schema: - type: string - required: true - description: ID of the task - responses: - '200': - description: a list of all labels for a task - content: - application/json: - schema: - $ref: "#/components/schemas/LabelsResponse" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - post: - tags: - - Tasks - summary: add a label to a task - parameters: - - $ref: '#/components/parameters/TraceSpan' - - in: path - name: taskID - schema: - type: string - required: true - description: ID of the task - requestBody: - description: label to add - required: true - content: - application/json: - schema: - $ref: "#/components/schemas/LabelMapping" - responses: - '200': - description: a label for a task - content: - application/json: - schema: - $ref: "#/components/schemas/LabelResponse" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - '/tasks/{taskID}/labels/{labelID}': - delete: - tags: - - Tasks - summary: delete a label from a task config - parameters: - - $ref: '#/components/parameters/TraceSpan' - - in: path - name: taskID - schema: - type: string - required: true - description: ID of the task config - - in: path - name: labelID - schema: - type: string - required: true - description: the label ID - responses: - '204': - description: delete has been accepted - '404': - description: task config not found - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - /me: - get: - x-generated: true - tags: - - Users - summary: Returns currently authenticated user - responses: - '200': - description: currently authenticated user - content: - application/json: - schema: - $ref: "#/components/schemas/User" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - /me/password: - put: - tags: - - Users - summary: Update password - security: - - basicAuth: [] - requestBody: - description: new password - required: true - content: - application/json: - schema: - $ref: "#/components/schemas/PasswordResetBody" - responses: - '204': - description: password succesfully updated - default: - description: unsuccessful authentication - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - '/tasks/{taskID}/members': - get: - tags: - - Users - - Tasks - summary: List all task members - parameters: - - in: path - name: taskID - schema: - type: string - required: true - description: ID of the task - responses: - '200': - description: a list of users who have member privileges for a task - content: - application/json: - schema: - $ref: "#/components/schemas/Users" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - post: - tags: - - Users - - Tasks - summary: Add task member - parameters: - - in: path - name: taskID - schema: - type: string - required: true - description: ID of the task - requestBody: - description: user to add as member - required: true - content: - application/json: - schema: - $ref: "#/components/schemas/User" - responses: - '201': - description: added to task members - content: - application/json: - schema: - $ref: "#/components/schemas/User" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - '/tasks/{taskID}/members/{userID}': - delete: - tags: - - Users - - Tasks - summary: removes a member from an task - parameters: - - in: path - name: userID - schema: - type: string - required: true - description: ID of member to remove - - in: path - name: taskID - schema: - type: string - required: true - description: ID of the task - responses: - '204': - description: member removed - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - '/tasks/{taskID}/owners': - get: - tags: - - Users - - Tasks - summary: List all task owners - parameters: - - in: path - name: taskID - schema: - type: string - required: true - description: ID of the task - responses: - '200': - description: a list of users who have owner privileges for a task - content: - application/json: - schema: - $ref: "#/components/schemas/Users" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - post: - tags: - - Users - - Tasks - summary: Add task owner - parameters: - - in: path - name: taskID - schema: - type: string - required: true - description: ID of the task - requestBody: - description: user to add as owner - required: true - content: - application/json: - schema: - $ref: "#/components/schemas/User" - responses: - '201': - description: added to task owners - content: - application/json: - schema: - $ref: "#/components/schemas/User" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - '/tasks/taskID}/owners/{userID}': - delete: - tags: - - Users - - Tasks - summary: removes an owner from an task - parameters: - - in: path - name: userID - schema: - type: string - required: true - description: ID of owner to remove - - in: path - name: taskID - schema: - type: string - required: true - description: ID of the task - responses: - '204': - description: owner removed - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - /users: - get: - tags: - - Users - summary: List all users - responses: - '200': - description: a list of users - content: - application/json: - schema: - $ref: "#/components/schemas/Users" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - post: - tags: - - Users - summary: Create a user - requestBody: - description: user to create - required: true - content: - application/json: - schema: - $ref: "#/components/schemas/User" - responses: - '201': - description: user created - content: - application/json: - schema: - $ref: "#/components/schemas/User" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - '/users/{userID}': - get: - tags: - - Users - summary: Retrieve a user - parameters: - - in: path - name: userID - schema: - type: string - required: true - description: ID of user to get - responses: - '200': - description: user details - content: - application/json: - schema: - $ref: "#/components/schemas/User" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - patch: - tags: - - Users - summary: Update a user - requestBody: - description: user update to apply - required: true - content: - application/json: - schema: - $ref: "#/components/schemas/User" - parameters: - - in: path - name: userID - schema: - type: string - required: true - description: ID of user to update - responses: - '200': - description: user updated - content: - application/json: - schema: - $ref: "#/components/schemas/User" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - delete: - tags: - - Users - summary: deletes a user - parameters: - - in: path - name: userID - schema: - type: string - required: true - description: ID of user to delete - responses: - '204': - description: user deleted - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - '/users/{userID}/password': - put: - tags: - - Users - summary: Update password - security: - - basicAuth: [] - parameters: - - in: path - name: userID - schema: - type: string - required: true - description: ID of the user - requestBody: - description: new password - required: true - content: - application/json: - schema: - $ref: "#/components/schemas/PasswordResetBody" - responses: - '200': - description: user details - content: - application/json: - schema: - $ref: "#/components/schemas/User" - default: - description: unsuccessful authentication - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - '/views/{viewID}/labels': - get: - tags: - - Views - summary: list all labels for a view - parameters: - - $ref: '#/components/parameters/TraceSpan' - - in: path - name: viewID - schema: - type: string - required: true - description: ID of the view - responses: - '200': - description: a list of all labels for a view - content: - application/json: - schema: - $ref: "#/components/schemas/LabelsResponse" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - post: - tags: - - Views - summary: add a label to a view - parameters: - - $ref: '#/components/parameters/TraceSpan' - - in: path - name: viewID - schema: - type: string - required: true - description: ID of the view - requestBody: - description: label to add - required: true - content: - application/json: - schema: - $ref: "#/components/schemas/LabelMapping" - responses: - '200': - description: a label for a view - content: - application/json: - schema: - $ref: "#/components/schemas/LabelResponse" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - '/views/{viewID}/labels/{labelID}': - delete: - tags: - - Views - summary: delete a label from a view config - parameters: - - $ref: '#/components/parameters/TraceSpan' - - in: path - name: viewID - schema: - type: string - required: true - description: ID of the view config - - in: path - name: labelID - schema: - type: string - required: true - description: the label ID - responses: - '204': - description: delete has been accepted - '404': - description: view config not found - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - '/views/{viewID}/members': - get: - tags: - - Users - - Views - summary: List all view members - parameters: - - in: path - name: viewID - schema: - type: string - required: true - description: ID of the view - responses: - '200': - description: a list of users who have member privileges for a view - content: - application/json: - schema: - $ref: "#/components/schemas/Users" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - post: - tags: - - Users - - Views - summary: Add view member - parameters: - - in: path - name: viewID - schema: - type: string - required: true - description: ID of the view - requestBody: - description: user to add as member - required: true - content: - application/json: - schema: - $ref: "#/components/schemas/User" - responses: - '201': - description: added to view members - content: - application/json: - schema: - $ref: "#/components/schemas/User" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - '/views/{viewID}/members/{userID}': - delete: - tags: - - Users - - Views - summary: removes a member from an view - parameters: - - in: path - name: userID - schema: - type: string - required: true - description: ID of member to remove - - in: path - name: viewID - schema: - type: string - required: true - description: ID of the view - responses: - '204': - description: member removed - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - '/views/{viewID}/owners': - get: - tags: - - Users - - Views - summary: List all view owners - parameters: - - in: path - name: viewID - schema: - type: string - required: true - description: ID of the view - responses: - '200': - description: a list of users who have owner privileges for a view - content: - application/json: - schema: - $ref: "#/components/schemas/Users" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - post: - tags: - - Users - - Views - summary: Add view owner - parameters: - - in: path - name: viewID - schema: - type: string - required: true - description: ID of the view - requestBody: - description: user to add as owner - required: true - content: - application/json: - schema: - $ref: "#/components/schemas/User" - responses: - '201': - description: added to view owners - content: - application/json: - schema: - $ref: "#/components/schemas/User" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - '/views/{viewID}/owners/{userID}': - delete: - tags: - - Users - - Views - summary: removes an owner from a view - parameters: - - in: path - name: userID - schema: - type: string - required: true - description: ID of owner to remove - - in: path - name: viewID - schema: - type: string - required: true - description: ID of the view - responses: - '204': - description: owner removed - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" -components: - parameters: - TraceSpan: - in: header - name: Zap-Trace-Span - description: OpenTracing span context - example: - trace_id: '1' - span_id: '1' - baggage: - key: value - required: false - schema: - type: string - schemas: - LanguageRequest: - description: flux query to be analyzed. - type: object - required: - - query - properties: - query: - description: flux query script to be analyzed - type: string - Query: - description: query influx with specified return formatting. The spec and query fields are mutually exclusive. - type: object - required: - - query - properties: - query: - description: query script to execute. - type: string - spec: - $ref: "#/components/schemas/QuerySpecification" - type: - description: type of query - type: string - default: flux - enum: - - flux - - influxql - db: - description: required for influxql type queries - type: string - rp: - description: required for influxql type queries - type: string - cluster: - description: required for influxql type queries - type: string - dialect: - $ref: "#/components/schemas/Dialect" - ASTResponse: - description: contains the AST for the supplied Flux query - type: object - properties: - ast: - description: the AST of the supplied Flux query - type: object - QuerySpecification: - description: consists of a set of operations and a set of edges between those operations to instruct the query engine to operate. - type: object - properties: - operations: - type: array - items: - type: object - properties: - kind: - description: name of the operation to perform - type: string - id: - description: identifier for this operation; it must be unique per query specification; used in edges - type: string - spec: - description: set of properties that specify details of the operation. These vary by the kind of operation. - type: object - edges: - description: list of declaring a parent child id relationship between operations - type: array - items: - type: object - properties: - parent: - description: id of parent node of child within graph of opertions - type: string - child: - description: id of child node of parent within the graph of operations - type: string - resources: - description: optional set of contraints on the resources the query can consume - type: object - properties: - priority: - description: priority of the query - oneOf: - - type: string - description: lower value will move to the front of the priority queue - pattern: '^\d+$' - - type: string - description: constants to represent the extreme high and low priorities; high is effectively 0. - enum: - - high - - low - concurrency_quota: - description: number of concurrent workers allowed to process this query; 0 indicates the planner can pick the optimal concurrency. - type: integer - default: 0 - memory_bytes_quota: - description: number of bytes of RAM this query may consume; 0 means unlimited. - type: integer - default: 0 - dialect: - $ref: "#/components/schemas/Dialect" - Dialect: - description: dialect are options to change the default CSV output format; https://www.w3.org/TR/2015/REC-tabular-metadata-20151217/#dialect-descriptions - type: object - properties: - header: - description: if true, the results will contain a header row - type: boolean - default: true - delimiter: - description: separator between cells; the default is , - type: string - default: "," - maxLength: 1 - minLength: 1 - annotations: - description: https://www.w3.org/TR/2015/REC-tabular-data-model-20151217/#columns - type: array - items: - type: string - enum: - - "group" - - "datatype" - - "default" - uniqueItems: true - commentPrefix: - description: character prefixed to comment strings - type: string - default: "#" - maxLength: 1 - minLength: 0 - dateTimeFormat: - description: format of timestamps - type: string - default: "RFC3339" - enum: - - RFC3339 - - RFC3339Nano - Permission: - required: [action, resource] - properties: - action: - type: string - enum: - - read - - write - resource: - type: object - required: [type] - properties: - type: - type: string - enum: - - authorizations - - buckets - - dashboards - - orgs - - sources - - tasks - - telegrafs - - users - id: - type: string - nullable: true - description: if id is set that is a permission for a specific resource. if it is not set it is a permission for all resources of that resource type. - name: - type: string - nullable: true - description: optional name of the resource if the resource has a name field. - orgID: - type: string - nullable: true - description: if orgID is set that is a permission for all resources owned my that org. if it is not set it is a permission for all resources of that resource type. - org: - type: string - nullable: true - description: optional name of the organization of the organization with orgID. - Authorization: - required: [orgID, permissions] - properties: - orgID: - type: string - description: ID of org that authorization is scoped to. - status: - description: if inactive the token is inactive and requests using the token will be rejected. - default: active - type: string - enum: - - active - - inactive - description: - type: string - description: A description of the token. - permissions: - type: array - minLength: 1 - description: List of permissions for an auth. An auth must have at least one Permission. - items: - $ref: "#/components/schemas/Permission" - id: - readOnly: true - type: string - token: - readOnly: true - type: string - description: Passed via the Authorization Header and Token Authentication type. - userID: - readOnly: true - type: string - description: ID of user that created and owns the token. - user: - readOnly: true - type: string - description: Name of user that created and owns the token. - org: - readOnly: true - type: string - description: Name of the org token is scoped to. - links: - type: object - readOnly: true - example: - self: "/api/v2/authorizations/1" - user: "/api/v2/users/12" - properties: - self: - readOnly: true - type: string - format: uri - user: - readOnly: true - type: string - format: uri - Authorizations: - type: object - properties: - links: - readOnly: true - $ref: "#/components/schemas/Links" - authorizations: - type: array - items: - $ref: "#/components/schemas/Authorization" - Bucket: - properties: - links: - type: object - readOnly: true - example: - self: "/api/v2/buckets/1" - org: "/api/v2/orgs/2" - write: "/api/v2/write?org=myorg" - properties: - self: - readOnly: true - type: string - format: uri - org: - readOnly: true - type: string - format: uri - write: - readOnly: true - type: string - format: uri - id: - readOnly: true - type: string - owners: - $ref: "#/components/schemas/Owners" - name: - type: string - organization: - type: string - rp: - type: string - organizationID: - type: string - retentionRules: - type: array - description: rules to expire or retain data. No rules means data never expires. - items: - type: object - properties: - type: - type: string - default: expire - enum: - - expire - everySeconds: - type: integer - description: duration in seconds for how long data will be kept in the database. - example: 86400 - minimum: 1 - required: [type, everySeconds] - required: [name, retentionRules] - Buckets: - type: object - properties: - links: - readOnly: true - $ref: "#/components/schemas/Links" - buckets: - type: array - items: - $ref: "#/components/schemas/Bucket" - Link: - type: object - readOnly: true - description: URI of resource. - properties: - href: - type: string - format: uri - required: [href] - Links: - type: object - properties: - next: - $ref: "#/components/schemas/Link" - self: - $ref: "#/components/schemas/Link" - prev: - $ref: "#/components/schemas/Link" - required: [self] - Logs: - type: object - properties: - events: - readOnly: true - type: array - items: - $ref: "#/components/schemas/LogEvent" - LogEvent: - type: object - properties: - time: - readOnly: true - description: Time event occurred, RFC3339Nano. - type: string - format: date-time - message: - readOnly: true - description: A description of the event that occurred. - type: string - example: Halt and catch fire - Organization: - properties: - links: - type: object - readOnly: true - example: - self: "/api/v2/orgs/1" - users: "/api/v2/orgs/1/users" - buckets: "/api/v2/buckets?org=myorg" - tasks: "/api/v2/tasks?org=myorg" - dashboards: "/api/v2/dashboards?org=myorg" - properties: - self: - readOnly: true - type: string - format: uri - users: - readOnly: true - type: string - format: uri - buckets: - readOnly: true - type: string - format: uri - tasks: - readOnly: true - type: string - format: uri - dashboards: - readOnly: true - type: string - format: uri - id: - readOnly: true - type: string - name: - type: string - status: - description: if inactive the organization is inactive. - default: active - type: string - enum: - - active - - inactive - owners: - $ref: "#/components/schemas/Owners" - required: [name] - Organizations: - type: object - properties: - links: - $ref: "#/components/schemas/Links" - orgs: - type: array - items: - $ref: "#/components/schemas/Organization" - Owners: - properties: - users: - $ref: "#/components/schemas/Users" - organizations: - $ref: "#/components/schemas/Organizations" - Run: - properties: - id: - readOnly: true - type: string - taskID: - readOnly: true - type: string - status: - readOnly: true - type: string - enum: - - scheduled - - executing - - failed - - success - scheduledFor: - description: Time used for run's "now" option, RFC3339. - type: string - format: date-time - startedAt: - readOnly: true - description: Time run started executing, RFC3339Nano. - type: string - format: date-time - finishedAt: - readOnly: true - description: Time run finished executing, RFC3339Nano. - type: string - format: date-time - requestedAt: - readOnly: true - description: Time run was manually requested, RFC3339Nano. - type: string - format: date-time - error: - $ref: "#/components/schemas/Error" - log: - readOnly: true - description: Link to the full logs for a run. - type: string - TaskCreateRequest: - properties: - orgID: - description: The ID of the organization that owns this Task. - type: string - status: - description: Starting state of the task. 'inactive' tasks are not run until they are updated to 'active' - default: active - type: string - enum: - - active - - inactive - flux: - description: The Flux script to run for this task. - type: string - required: [orgID, flux] - TaskUpdateRequest: - properties: - status: - description: Starting state of the task. 'inactive' tasks are not run until they are updated to 'active' - default: active - type: string - enum: - - active - - inactive - flux: - description: The Flux script to run for this task. - type: string - Task: - type: object - properties: - id: - readOnly: true - type: string - orgID: - description: The ID of the organization that owns this Task. - type: string - name: - description: A description of the task. - type: string - status: - description: The current status of the task. When updated to 'inactive', cancels all queued jobs of this task. - default: active - type: string - enum: - - active - - inactive - owner: - $ref: "#/components/schemas/User" - labels: - $ref: "#/components/schemas/Labels" - flux: - description: The Flux script to run for this task. - type: string - every: - description: A simple task repetition schedule; parsed from Flux. - type: string - cron: - description: A task repetition schedule in the form '* * * * * *'; parsed from Flux. - type: string - offset: - description: Duration to delay after the schedule, before executing the task; parsed from flux. - type: string - latest_completed: - description: Timestamp of latest scheduled, completed run, RFC3339. - type: string - format: date-time - readOnly: true - links: - type: object - readOnly: true - example: - self: "/api/v2/tasks/1" - owners: "/api/v2/tasks/1/owners" - members: "/api/v2/tasks/1/members" - runs: "/api/v2/tasks/1/runs" - logs: "/api/v2/tasks/1/logs" - properties: - self: - type: string - format: uri - owners: - type: string - format: uri - members: - type: string - format: uri - runs: - type: string - format: uri - logs: - type: string - format: uri - required: [name, organization, flux] - ResourceOwner: - allOf: - - $ref: "#/components/schemas/User" - - type: object - properties: - role: - type: string - default: owner - enum: - - owner - ResourceOwners: - type: object - properties: - links: - type: object - properties: - self: - type: string - format: uri - users: - type: array - items: - $ref: "#/components/schemas/ResourceOwner" - User: - properties: - id: - readOnly: true - type: string - name: - type: string - status: - description: if inactive the user is inactive. - default: active - type: string - enum: - - active - - inactive - links: - type: object - properties: - self: - type: string - format: uri - required: [name] - Users: - type: object - properties: - links: - type: object - properties: - self: - type: string - format: uri - users: - type: array - items: - $ref: "#/components/schemas/User" - FluxSuggestions: - type: object - properties: - funcs: - type: object - properties: - name: - type: string - params: - type: object - FluxLinks: - type: object - properties: - ast: - type: string - format: uri - self: - type: string - format: uri - suggestions: - type: string - format: uri - Routes: - properties: - sources: - type: string - format: uri - dashboards: - type: string - format: uri - query: - type: string - format: uri - write: - type: string - format: uri - orgs: - type: string - format: uri - auths: - type: string - format: uri - buckets: - type: string - format: uri - users: - type: string - format: uri - tasks: - type: string - format: uri - system: - type: object - properties: - metrics: - type: string - format: uri - debug: - type: string - format: uri - health: - type: string - format: uri - external: - type: object - properties: - statusFeed: - type: string - format: uri - flux: - $ref: "#/components/schemas/FluxLinks" - Error: - properties: - code: - description: code is the machine-readable error code. - readOnly: true - type: string - enum: - - internal error - - not found - - conflict - - invalid - - empty value - message: - readOnly: true - description: message is a human-readable message. - type: string - op: - readOnly: true - description: op describes the logical code operation during error. Useful for debugging. - type: string - err: - readOnly: true - description: err is a stack of errors that occurred during processing of the request. Useful for debugging. - type: string - required: [code, message, op, err] - WritePrecision: - type: string - enum: - - ms - - s - - us - - u - - ns - LineProtocolError: - properties: - code: - description: code is the machine-readable error code. - readOnly: true - type: string - enum: - - internal error - - not found - - conflict - - invalid - - empty value - message: - readOnly: true - description: message is a human-readable message. - type: string - op: - readOnly: true - description: op describes the logical code operation during error. Useful for debugging. - type: string - err: - readOnly: true - description: err is a stack of errors that occurred during processing of the request. Useful for debugging. - type: string - line: - readOnly: true - description: first line within sent body containing malformed data - type: integer - format: int32 - required: [code, message, op, err] - LineProtocolLengthError: - properties: - code: - description: code is the machine-readable error code. - readOnly: true - type: string - enum: - - invalid - message: - readOnly: true - description: message is a human-readable message. - type: string - maxLength: - readOnly: true - description: max length in bytes for a body of line-protocol. - type: integer - format: int32 - required: [code, message, maxLength] - Field: - type: object - properties: - value: - description: >- - value is the value of the field. Meaning of the value is implied by - the `type` key - type: string - type: - description: >- - type describes the field type. func is a function; field is a field - reference - type: string - enum: - - func - - field - - integer - - number - - regex - - wildcard - alias: - description: >- - Alias overrides the field name in the returned response. Applies only - if type is `func` - type: string - args: - description: Args are the arguments to the function - type: array - items: - $ref: '#/components/schemas/Field' - QueryConfig: - type: object - required: - - database - - measurement - - retentionPolicy - - areTagsAccepted - - tags - - groupBy - - fields - properties: - id: - type: string - database: - type: string - measurement: - type: string - retentionPolicy: - type: string - areTagsAccepted: - type: boolean - rawText: - type: string - tags: - type: object - groupBy: - type: object - properties: - time: - type: string - tags: - type: array - items: - type: string - required: - - time - - tags - fields: - type: array - items: - $ref: '#/components/schemas/Field' - range: - type: object - properties: - lower: - type: string - upper: - type: string - required: - - lower - - upper - DashboardQuery: - type: object - required: - - query - properties: - label: - type: string - description: Optional Y-axis user-facing label - range: - description: Optional default range of the Y-axis - type: object - required: - - upper - - lower - properties: - upper: - description: Upper bound of the display range of the Y-axis - type: integer - format: int64 - lower: - description: Lower bound of the display range of the Y-axis - type: integer - format: int64 - query: - type: string - source: - type: string - format: uri - description: Optional URI for data source for this query - queryConfig: - $ref: '#/components/schemas/QueryConfig' - name: - type: string - description: An optional word or phrase that refers to the query - Axis: - type: object - description: A description of a particular axis for a visualization - properties: - bounds: - type: array - minItems: 0 - maxItems: 2 - description: >- - The extents of an axis in the form [lower, upper]. Clients determine - whether bounds are to be inclusive or exclusive of their limits - items: - type: integer - format: int64 - label: - description: label is a description of this Axis - type: string - prefix: - description: Prefix represents a label prefix for formatting axis values. - type: string - suffix: - description: Suffix represents a label suffix for formatting axis values. - type: string - base: - description: Base represents the radix for formatting axis values. - type: string - scale: - description: 'Scale is the axis formatting scale. Supported: "log", "linear"' - type: string - DashboardColor: - type: object - description: Color defines an encoding of data value into color space - properties: - id: - description: ID is the unique id of the view color - type: string - type: - description: Type is how the color is used. - type: string - enum: - - min - - max - - threshold - hex: - description: Hex is the hex number of the color - type: string - maxLength: 7 - minLength: 7 - name: - description: Name is the user-facing name of the hex color - type: string - value: - description: Value is the data value mapped to this color - type: number - format: float - RenamableField: - description: Describes a field that can be renamed and made visible or invisible - type: object - properties: - internalName: - description: This is the calculated name of a field - readOnly: true - type: string - displayName: - description: This is the name that a field is renamed to by the user - type: string - visible: - description: Indicates whether this field should be visible on the table - type: boolean - LogViewProperties: - description: Contains the configuration for the log viewer - type: object - required: - - columns - - shape - - type - properties: - shape: - type: string - enum: ["chronograf-v2"] - type: - type: string - enum: ["log-viewer"] - columns: - description: Defines the order, names, and visibility of columns in the log - viewer table - type: array - items: - "$ref": "#/components/schemas/LogViewerColumn" - example: - columns: - - name: severity - position: 0 - settings: - - type: label - value: icon - - type: label - value: text - - type: visibility - value: visible - - type: color - name: ruby - value: emergency - - type: color - name: rainforest - value: info - - type: displayName - value: Log Severity! - - name: messages - position: 1 - settings: - - type: visibility - value: hidden - LogViewerColumn: - description: Contains a specific column's settings. - type: object - required: - - name - - position - - settings - properties: - name: - description: Unique identifier name of the column - type: string - position: - type: integer - format: int32 - settings: - description: Composable settings options for the column - type: array - items: - description: Type and value and optional name of a setting. - type: object - required: - - type - - value - properties: - type: - type: string - value: - type: string - name: - type: string - example: - name: severity - position: 0 - settings: - - type: label - value: icon - - type: label - value: text - - type: visibility - value: visible - - type: color - name: ruby - value: emergency - - type: color - name: rainforest - value: info - - type: displayName - value: Log Severity! - V1ViewProperties: - properties: - type: - type: string - enum: ["chronograf-v1"] - queries: - type: array - items: - $ref: "#/components/schemas/DashboardQuery" - axes: - description: The viewport for a View's visualizations - type: object - properties: - x: - $ref: '#/components/schemas/Axis' - y: - $ref: '#/components/schemas/Axis' - y2: - $ref: '#/components/schemas/Axis' - graphType: - description: The viewport for a view's graph/visualization - type: string - enum: - - single-stat - - line - - line-plus-single-stat - - line-stacked - - line-stepplot - - bar - - gauge - - table - default: line - colors: - description: Colors define color encoding of data into a visualization - type: array - items: - $ref: "#/components/schemas/DashboardColor" - legend: - description: Legend define encoding of data into a view's legend - type: object - properties: - type: - description: type is the style of the legend - type: string - enum: - - static - orientation: - description: >- - orientation is the location of the legend with respect to the view - graph - type: string - enum: - - top - - bottom - - left - - right - tableOptions: - properties: - verticalTimeAxis: - description: >- - verticalTimeAxis describes the orientation of the table by - indicating whether the time axis will be displayed vertically - type: boolean - sortBy: - $ref: "#/components/schemas/RenamableField" - wrapping: - description: wrapping describes the text wrapping style to be used in table views - type: string - enum: - - truncate - - wrap - - single-line - fixFirstColumn: - description: >- - fixFirstColumn indicates whether the first column of the table - should be locked - type: boolean - fieldOptions: - description: >- - fieldOptions represent the fields retrieved by the query with - customization options - type: array - items: - $ref: '#/components/schemas/RenamableField' - timeFormat: - description: >- - timeFormat describes the display format for time values according to - moment.js date formatting - type: string - decimalPoints: - description: >- - decimal points indicates whether and how many digits to show after - decimal point - type: object - properties: - isEnforced: - description: Indicates whether decimal point setting should be enforced - type: boolean - digits: - description: The number of digists after decimal to display - type: integer - EmptyViewProperties: - properties: - type: - type: string - enum: ["empty"] - ConstantMacroProperties: - properties: - type: - type: string - enum: ["constant"] - values: - type: array - items: - type: string - MapMacroProperties: - properties: - type: - type: string - enum: ["map"] - values: - type: object - QueryMacroProperties: - properties: - type: - type: string - enum: ["query"] - query: - type: string - queryType: - type: string - Macro: - type: object - properties: - links: - type: object - properties: - self: - type: string - format: uri - id: - readOnly: true - type: string - name: - type: string - selected: - type: array - items: - type: string - arguments: - type: object - oneOf: - - $ref: "#/components/schemas/QueryMacroProperties" - - $ref: "#/components/schemas/ConstantMacroProperties" - - $ref: "#/components/schemas/MapMacroProperties" - Macros: - type: object - example: - macros: - - id: '1221432' - name: ":ok:" - selected: - - hello - arguments: - type: constant - values: - - howdy - - hello - - hi - - yo - - oy - - id: '1221432' - name: ":ok:" - selected: - - c - arguments: - type: map - values: - a: fdjaklfdjkldsfjlkjdsa - b: dfaksjfkljekfajekdljfas - c: fdjksajfdkfeawfeea - - id: '1221432' - name: ":ok:" - selected: - - host - arguments: - type: query - query: 'from(bucket: "foo") |> showMeasurements()' - language: flux - properties: - macros: - $ref: "#/components/schemas/Macro" - View: - properties: - links: - type: object - readOnly: true - properties: - self: - type: string - id: - readOnly: true - type: string - name: - type: string - properties: - oneOf: - - $ref: "#/components/schemas/V1ViewProperties" - - $ref: "#/components/schemas/EmptyViewProperties" - - $ref: "#/components/schemas/LogViewProperties" - Views: - type: object - properties: - links: - type: object - properties: - self: - type: string - views: - type: array - items: - $ref: "#/components/schemas/View" - CellUpdate: - type: object - properties: - name: - type: string - CreateCell: - type: object - properties: - name: - type: string - x: - type: integer - format: int32 - "y": - type: integer - format: int32 - w: - type: integer - format: int32 - h: - type: integer - format: int32 - usingView: - type: string - description: makes a copy of the provided view - AnalyzeQueryResponse: - type: object - properties: - errors: - type: array - items: - type: object - properties: - line: - type: integer - column: - type: integer - character: - type: integer - message: - type: string - Cell: - type: object - properties: - id: - type: string - links: - type: object - properties: - self: - type: string - view: - type: string - name: - type: string - x: - type: integer - format: int32 - "y": - type: integer - format: int32 - w: - type: integer - format: int32 - h: - type: integer - format: int32 - Cells: - type: array - items: - $ref: "#/components/schemas/Cell" - Dashboard: - properties: - links: - type: object - properties: - self: - type: string - cells: - type: string - id: - readOnly: true - type: string - orgID: - type: string - description: id of organization that owns dashboard - name: - type: string - description: user-facing name of the dashboard - description: - type: string - description: user-facing description of the dashboard - meta: - type: object - properties: - createdAt: - type: string - format: date - updatedAt: - type: string - format: date - cells: - $ref: "#/components/schemas/Cells" - labels: - $ref: "#/components/schemas/Labels" - Dashboards: - type: object - properties: - links: - $ref: "#/components/schemas/Links" - dashboards: - type: array - items: - $ref: "#/components/schemas/Dashboard" - Source: - type: object - properties: - links: - type: object - properties: - self: - type: string - query: - type: string - health: - type: string - buckets: - type: string - id: - type: string - orgID: - type: string - name: - type: string - type: - type: string - enum: ["v1","v2","self"] - url: - type: string - format: uri - insecureSkipVerify: - type: boolean - telegraf: - type: string - token: - type: string - username: - type: string - password: - type: string - sharedSecret: - type: string - metaUrl: - type: string - format: uri - defaultRP: - type: string - languages: - type: array - readOnly: true - items: - type: string - enum: - - flux - - influxql - - spec - Sources: - type: object - properties: - links: - type: object - properties: - self: - type: string - format: uri - sources: - type: array - items: - $ref: "#/components/schemas/Source" - ScraperTargetRequest: - type: object - properties: - name: - type: string - description: name of the scraper target - type: - type: string - description: type of the metrics to be parsed - enum: [prometheus] - url: - type: string - description: url of the metrics endpoint - example: http://localhost:9090/metrics - orgID: - type: string - description: id of the organization - bucketID: - type: string - description: id of the bucket to be written - ScraperTargetResponse: - type: object - allOf: - - $ref: "#/components/schemas/ScraperTargetRequest" - - type: object - properties: - id: - type: string - readOnly: true - organization: - type: string - description: name of the organization - bucket: - type: string - description: name of the bucket - links: - readOnly: true - $ref: "#/components/schemas/Links" - ScraperTargetResponses: - type: object - properties: - configurations: - type: array - items: - $ref: "#/components/schemas/ScraperTargetResponse" - TelegrafRequest: - type: object - properties: - name: - type: string - agent: - type: object - properties: - collectionInterval: - type: integer - plugins: - type: array - items: - $ref: "#/components/schemas/TelegrafRequestPlugin" - organizationID: - type: string - TelegrafRequestPlugin: - type: object - discriminator: - propertyName: "name" - required: - - name - properties: - name: - type: string - TelegrafPluginInputCpu: - type: object - required: - - name - - type - - config - properties: - name: - type: string - enum: ["cpu"] - type: - type: string - enum: ["input"] - comment: - type: string - config: - $ref: "#/components/schemas/TelegrafPluginConfig" - TelegrafPluginInputCpuRequest: - type: object - allOf: - - $ref: "#/components/schemas/TelegrafRequestPlugin" - - $ref: "#/components/schemas/TelegrafPluginInputCpu" - TelegrafPluginInputDisk: - type: object - required: - - name - - type - - config - properties: - name: - type: string - enum: ["disk"] - type: - type: string - enum: ["input"] - comment: - type: string - config: - $ref: "#/components/schemas/TelegrafPluginConfig" - TelegrafPluginInputDiskRequest: - type: object - allOf: - - $ref: "#/components/schemas/TelegrafRequestPlugin" - - $ref: "#/components/schemas/TelegrafPluginInputDisk" - TelegrafPluginInputDiskio: - type: - object - required: - - name - - type - - config - properties: - name: - type: string - enum: ["diskio"] - type: - type: string - enum: ["input"] - comment: - type: string - config: - $ref: "#/components/schemas/TelegrafPluginConfig" - TelegrafPluginInputDiskioRequest: - type: object - allOf: - - $ref: "#/components/schemas/TelegrafRequestPlugin" - - $ref: "#/components/schemas/TelegrafPluginInputDiskio" - TelegrafPluginInputDocker: - type: - object - required: - - name - - type - - config - properties: - name: - type: string - enum: ["docker"] - type: - type: string - enum: ["input"] - comment: - type: string - config: - $ref: '#/components/schemas/TelegrafPluginInputDockerConfig' - TelegrafPluginInputDockerRequest: - type: object - allOf: - - $ref: "#/components/schemas/TelegrafRequestPlugin" - - $ref: "#/components/schemas/TelegrafPluginInputDocker" - TelegrafPluginInputFile: - type: - object - required: - - name - - type - - config - properties: - name: - type: string - enum: ["file"] - type: - type: string - enum: [input] - comment: - type: string - config: - $ref: '#/components/schemas/TelegrafPluginInputFileConfig' - TelegrafPluginInputFileRequest: - type: object - allOf: - - $ref: "#/components/schemas/TelegrafRequestPlugin" - - $ref: "#/components/schemas/TelegrafPluginInputFile" - TelegrafPluginInputKernel: - type: - object - required: - - name - - type - - config - properties: - name: - type: string - enum: ["kernel"] - type: - type: string - enum: ["input"] - comment: - type: string - config: - $ref: "#/components/schemas/TelegrafPluginConfig" - TelegrafPluginInputKernelRequest: - type: object - allOf: - - $ref: "#/components/schemas/TelegrafRequestPlugin" - - $ref: "#/components/schemas/TelegrafPluginInputKernel" - TelegrafPluginInputKubernetes: - type: - object - required: - - name - - type - - config - properties: - name: - type: string - enum: ["kubernetes"] - type: - type: string - enum: ["input"] - comment: - type: string - config: - $ref: '#/components/schemas/TelegrafPluginInputKubernetesConfig' - TelegrafPluginInputKubernetesRequest: - type: object - allOf: - - $ref: "#/components/schemas/TelegrafRequestPlugin" - - $ref: "#/components/schemas/TelegrafPluginInputKubernetes" - TelegrafPluginInputLogParser: - type: - object - required: - - name - - type - - config - properties: - name: - type: string - enum: ["logparser"] - type: - type: string - enum: ["input"] - comment: - type: string - config: - $ref: '#/components/schemas/TelegrafPluginInputLogParserConfig' - TelegrafPluginInputLogParserRequest: - type: object - allOf: - - $ref: "#/components/schemas/TelegrafRequestPlugin" - - $ref: "#/components/schemas/TelegrafPluginInputLogParser" - TelegrafPluginInputMem: - type: - object - required: - - name - - type - - config - properties: - name: - type: string - enum: ["mem"] - type: - type: string - enum: ["input"] - comment: - type: string - config: - $ref: "#/components/schemas/TelegrafPluginConfig" - TelegrafPluginInputMemRequest: - type: object - allOf: - - $ref: "#/components/schemas/TelegrafRequestPlugin" - - $ref: "#/components/schemas/TelegrafPluginInputMem" - TelegrafPluginInputNetResponse: - type: - object - required: - - name - - type - - config - properties: - name: - type: string - enum: ["net_response"] - type: - type: string - enum: ["input"] - comment: - type: string - config: - $ref: "#/components/schemas/TelegrafPluginConfig" - TelegrafPluginInputNetResponseRequest: - type: object - allOf: - - $ref: "#/components/schemas/TelegrafRequestPlugin" - - $ref: "#/components/schemas/TelegrafPluginInputNetResponse" - TelegrafPluginInputNet: - type: - object - required: - - name - - type - - config - properties: - name: - type: string - enum: ["net"] - type: - type: string - enum: ["input"] - comment: - type: string - config: - $ref: "#/components/schemas/TelegrafPluginConfig" - TelegrafPluginInputNetRequest: - type: object - allOf: - - $ref: "#/components/schemas/TelegrafRequestPlugin" - - $ref: "#/components/schemas/TelegrafPluginInputNet" - TelegrafPluginInputNginx: - type: - object - required: - - name - - type - - config - properties: - name: - type: string - enum: ["nginx"] - type: - type: string - enum: ["input"] - comment: - type: string - config: - $ref: "#/components/schemas/TelegrafPluginConfig" - TelegrafPluginInputNginxRequest: - type: object - allOf: - - $ref: "#/components/schemas/TelegrafRequestPlugin" - - $ref: "#/components/schemas/TelegrafPluginInputNginx" - TelegrafPluginInputProcesses: - type: - object - required: - - name - - type - - config - properties: - name: - type: string - enum: ["processes"] - type: - type: string - enum: ["input"] - comment: - type: string - config: - $ref: "#/components/schemas/TelegrafPluginConfig" - TelegrafPluginInputProcessesRequest: - type: object - allOf: - - $ref: "#/components/schemas/TelegrafRequestPlugin" - - $ref: "#/components/schemas/TelegrafPluginInputProcesses" - TelegrafPluginInputProcstat: - type: - object - required: - - name - - type - - config - properties: - name: - type: string - enum: ["procstat"] - type: - type: string - enum: ["input"] - comment: - type: string - config: - $ref: '#/components/schemas/TelegrafPluginInputProcstatConfig' - TelegrafPluginInputProcstatRequest: - type: object - allOf: - - $ref: "#/components/schemas/TelegrafRequestPlugin" - - $ref: "#/components/schemas/TelegrafPluginInputProcstat" - TelegrafPluginInputPrometheus: - type: - object - required: - - name - - type - - config - properties: - name: - type: string - enum: ["prometheus"] - type: - type: string - enum: ["input"] - comment: - type: string - config: - $ref: '#/components/schemas/TelegrafPluginInputPrometheusConfig' - TelegrafPluginInputPrometheusRequest: - type: object - allOf: - - $ref: "#/components/schemas/TelegrafRequestPlugin" - - $ref: "#/components/schemas/TelegrafPluginInputPrometheus" - TelegrafPluginInputRedis: - type: - object - required: - - name - - type - - config - properties: - name: - type: string - enum: ["redis"] - type: - type: string - enum: ["input"] - comment: - type: string - config: - $ref: '#/components/schemas/TelegrafPluginInputRedisConfig' - TelegrafPluginInputRedisRequest: - type: object - allOf: - - $ref: "#/components/schemas/TelegrafRequestPlugin" - - $ref: "#/components/schemas/TelegrafPluginInputRedis" - TelegrafPluginInputSyslog: - type: - object - required: - - name - - type - - config - properties: - name: - type: string - enum: ["syslog"] - type: - type: string - enum: ["input"] - comment: - type: string - config: - $ref: '#/components/schemas/TelegrafPluginInputSyslogConfig' - TelegrafPluginInputSyslogRequest: - type: object - allOf: - - $ref: "#/components/schemas/TelegrafRequestPlugin" - - $ref: "#/components/schemas/TelegrafPluginInputSyslog" - TelegrafPluginInputSwap: - type: - object - required: - - name - - type - - config - properties: - name: - type: string - enum: ["swap"] - type: - type: string - enum: ["input"] - comment: - type: string - config: - $ref: '#/components/schemas/TelegrafPluginConfig' - TelegrafPluginInputSwapRequest: - type: object - allOf: - - $ref: "#/components/schemas/TelegrafRequestPlugin" - - $ref: "#/components/schemas/TelegrafPluginInputSwap" - TelegrafPluginInputSystem: - type: - object - required: - - name - - type - - config - properties: - name: - type: string - enum: ["system"] - type: - type: string - enum: ["input"] - comment: - type: string - config: - $ref: '#/components/schemas/TelegrafPluginConfig' - TelegrafPluginInputSystemRequest: - type: object - allOf: - - $ref: "#/components/schemas/TelegrafRequestPlugin" - - $ref: "#/components/schemas/TelegrafPluginInputSystem" - TelegrafPluginInputTail: - type: - object - required: - - name - - type - - config - properties: - name: - type: string - enum: ["tail"] - type: - type: string - enum: ["input"] - comment: - type: string - config: - $ref: '#/components/schemas/TelegrafPluginConfig' - TelegrafPluginInputTailRequest: - type: object - allOf: - - $ref: "#/components/schemas/TelegrafRequestPlugin" - - $ref: "#/components/schemas/TelegrafPluginInputTail" - TelegrafPluginOutputFile: - type: - object - required: - - name - - type - - config - properties: - name: - type: string - enum: ["file"] - type: - type: string - enum: ["output"] - comment: - type: string - config: - $ref: '#/components/schemas/TelegrafPluginOutputFileConfig' - TelegrafPluginOutputFileRequest: - type: object - allOf: - - $ref: "#/components/schemas/TelegrafRequestPlugin" - - $ref: "#/components/schemas/TelegrafPluginOutputFile" - TelegrafPluginOutputInfluxDBV2: - type: - object - required: - - name - - type - - config - properties: - name: - type: string - enum: ["influxdb_v2"] - type: - type: string - enum: ["output"] - comment: - type: string - config: - $ref: '#/components/schemas/TelegrafPluginOutputInfluxDBV2Config' - TelegrafPluginOutputInfluxDBV2Request: - type: object - allOf: - - $ref: "#/components/schemas/TelegrafRequestPlugin" - - $ref: "#/components/schemas/TelegrafPluginOutputInfluxDBV2" - TelegrafRequestConfig: - oneOf: - - $ref: '#/components/schemas/TelegrafPluginConfig' - - $ref: '#/components/schemas/TelegrafPluginInputDockerConfig' - - $ref: '#/components/schemas/TelegrafPluginInputFileConfig' - - $ref: '#/components/schemas/TelegrafPluginInputKubernetesConfig' - - $ref: '#/components/schemas/TelegrafPluginInputLogParserConfig' - - $ref: '#/components/schemas/TelegrafPluginInputProcstatConfig' - - $ref: '#/components/schemas/TelegrafPluginInputPrometheusConfig' - - $ref: '#/components/schemas/TelegrafPluginInputRedisConfig' - - $ref: '#/components/schemas/TelegrafPluginInputSyslogConfig' - - $ref: '#/components/schemas/TelegrafPluginOutputFileConfig' - - $ref: '#/components/schemas/TelegrafPluginOutputInfluxDBV2Config' - Telegraf: - type: object - allOf: - - $ref: "#/components/schemas/TelegrafRequest" - - type: object - properties: - id: - type: string - links: - type: object - properties: - self: - type: string - owners: - $ref: "#/components/schemas/Owners" - Telegrafs: - type: object - properties: - configurations: - type: array - items: - $ref: "#/components/schemas/Telegraf" - TelegrafPluginConfig: - type: object - TelegrafPluginInputDockerConfig: - type: object - required: - - endpoint - properties: - endpoint: - type: string - TelegrafPluginInputFileConfig: - type: object - properties: - files: - type: array - items: - type: string - TelegrafPluginInputKubernetesConfig: - type: object - properties: - url: - type: string - format: uri - TelegrafPluginInputLogParserConfig: - type: object - properties: - files: - type: array - items: - type: string - TelegrafPluginInputProcstatConfig: - type: object - properties: - exe: - type: string - TelegrafPluginInputPrometheusConfig: - type: object - properties: - urls: - type: array - items: - type: string - format: uri - TelegrafPluginInputRedisConfig: - type: object - properties: - servers: - type: array - items: - type: string - password: - type: string - TelegrafPluginInputSyslogConfig: - type: object - properties: - server: - type: string - TelegrafPluginOutputFileConfig: - type: object - required: - - files - properties: - files: - type: array - items: - type: object - properties: - type: - type: string - enum: [stdout, path] - path: - type: string - TelegrafPluginOutputInfluxDBV2Config: - type: object - required: - - urls - - token - - organization - - bucket - properties: - urls: - type: array - items: - type: string - format: uri - token: - type: string - organization: - type: string - bucket: - type: string - IsOnboarding: - type: object - properties: - allowed: - type: boolean - OnboardingRequest: - type: object - properties: - username: - type: string - password: - type: string - org: - type: string - bucket: - type: string - retentionPeriodHrs: - type: integer - required: - - username - - password - - org - - bucket - OnboardingResponse: - type: object - properties: - user: - $ref: "#/components/schemas/User" - org: - $ref: "#/components/schemas/Organization" - bucket: - $ref: "#/components/schemas/Bucket" - auth: - $ref: "#/components/schemas/Authorization" - PasswordResetBody: - properties: - password: - type: string - required: - - password - Health: - type: object - properties: - name: - type: string - message: - type: string - checks: - type: array - items: - $ref: "#/components/schemas/Health" - status: - type: string - enum: - - unhealthy - - healthy - Labels: - type: array - items: - $ref: "#/components/schemas/Label" - Label: - type: object - properties: - id: - readOnly: true - type: string - name: - type: string - properties: - type: object - description: Key/Value pairs associated with this label. Keys can be removed by sending an update with an empty value. - example: {"color": "#ffb3b3", "description": "this is a description"} - LabelUpdate: - type: object - properties: - properties: - type: object - description: Key/Value pairs associated with this label. Keys can be removed by sending an update with an empty value. - example: {"color": "ffb3b3", "description": "this is a description"} - LabelMapping: - type: object - properties: - labelID: - type: string - LabelsResponse: - type: object - properties: - labels: - $ref: "#/components/schemas/Labels" - links: - $ref: "#/components/schemas/Links" - LabelResponse: - type: object - properties: - label: - $ref: "#/components/schemas/Label" - links: - $ref: "#/components/schemas/Links" - CreateProtoResourcesRequest: - properties: - orgID: - type: string - Proto: - properties: - links: - readOnly: true - type: object - properties: - dashboard: - type: string - format: uri - id: - readOnly: true - type: string - name: - readOnly: true - type: string - description: user-facing name of the proto - dashboards: - type: array - items: - $ref: "#/components/schemas/Dashboard" - views: - type: object - additionalProperties: - $ref: "#/components/schemas/View" - Protos: - properties: - protos: - type: array - items: - $ref: "#/components/schemas/Proto" \ No newline at end of file diff --git a/http/dashboard_service.go b/http/dashboard_service.go index 42b168dc10..40c017b36e 100644 --- a/http/dashboard_service.go +++ b/http/dashboard_service.go @@ -8,13 +8,36 @@ import ( "io/ioutil" "net/http" "path" - "strconv" platform "github.com/influxdata/influxdb" "github.com/julienschmidt/httprouter" "go.uber.org/zap" ) +// DashboardBackend is all services and associated parameters required to construct +// the DashboardHandler. +type DashboardBackend struct { + Logger *zap.Logger + + DashboardService platform.DashboardService + DashboardOperationLogService platform.DashboardOperationLogService + UserResourceMappingService platform.UserResourceMappingService + LabelService platform.LabelService + UserService platform.UserService +} + +func NewDashboardBackend(b *APIBackend) *DashboardBackend { + return &DashboardBackend{ + Logger: b.Logger.With(zap.String("handler", "dashboard")), + + DashboardService: b.DashboardService, + DashboardOperationLogService: b.DashboardOperationLogService, + UserResourceMappingService: b.UserResourceMappingService, + LabelService: b.LabelService, + UserService: b.UserService, + } +} + // DashboardHandler is the handler for the dashboard service type DashboardHandler struct { *httprouter.Router @@ -44,14 +67,16 @@ const ( ) // NewDashboardHandler returns a new instance of DashboardHandler. -func NewDashboardHandler(mappingService platform.UserResourceMappingService, labelService platform.LabelService, userService platform.UserService) *DashboardHandler { +func NewDashboardHandler(b *DashboardBackend) *DashboardHandler { h := &DashboardHandler{ Router: NewRouter(), - Logger: zap.NewNop(), + Logger: b.Logger, - UserResourceMappingService: mappingService, - LabelService: labelService, - UserService: userService, + DashboardService: b.DashboardService, + DashboardOperationLogService: b.DashboardOperationLogService, + UserResourceMappingService: b.UserResourceMappingService, + LabelService: b.LabelService, + UserService: b.UserService, } h.HandlerFunc("POST", dashboardsPath, h.handlePostDashboard) @@ -69,17 +94,36 @@ func NewDashboardHandler(mappingService platform.UserResourceMappingService, lab h.HandlerFunc("GET", dashboardsIDCellsIDViewPath, h.handleGetDashboardCellView) h.HandlerFunc("PATCH", dashboardsIDCellsIDViewPath, h.handlePatchDashboardCellView) - h.HandlerFunc("POST", dashboardsIDMembersPath, newPostMemberHandler(h.UserResourceMappingService, h.UserService, platform.DashboardsResourceType, platform.Member)) - h.HandlerFunc("GET", dashboardsIDMembersPath, newGetMembersHandler(h.UserResourceMappingService, h.UserService, platform.DashboardsResourceType, platform.Member)) - h.HandlerFunc("DELETE", dashboardsIDMembersIDPath, newDeleteMemberHandler(h.UserResourceMappingService, platform.Member)) + memberBackend := MemberBackend{ + Logger: b.Logger.With(zap.String("handler", "member")), + ResourceType: platform.DashboardsResourceType, + UserType: platform.Member, + UserResourceMappingService: b.UserResourceMappingService, + UserService: b.UserService, + } + h.HandlerFunc("POST", dashboardsIDMembersPath, newPostMemberHandler(memberBackend)) + h.HandlerFunc("GET", dashboardsIDMembersPath, newGetMembersHandler(memberBackend)) + h.HandlerFunc("DELETE", dashboardsIDMembersIDPath, newDeleteMemberHandler(memberBackend)) - h.HandlerFunc("POST", dashboardsIDOwnersPath, newPostMemberHandler(h.UserResourceMappingService, h.UserService, platform.DashboardsResourceType, platform.Owner)) - h.HandlerFunc("GET", dashboardsIDOwnersPath, newGetMembersHandler(h.UserResourceMappingService, h.UserService, platform.DashboardsResourceType, platform.Owner)) - h.HandlerFunc("DELETE", dashboardsIDOwnersIDPath, newDeleteMemberHandler(h.UserResourceMappingService, platform.Owner)) + ownerBackend := MemberBackend{ + Logger: b.Logger.With(zap.String("handler", "member")), + ResourceType: platform.DashboardsResourceType, + UserType: platform.Owner, + UserResourceMappingService: b.UserResourceMappingService, + UserService: b.UserService, + } + h.HandlerFunc("POST", dashboardsIDOwnersPath, newPostMemberHandler(ownerBackend)) + h.HandlerFunc("GET", dashboardsIDOwnersPath, newGetMembersHandler(ownerBackend)) + h.HandlerFunc("DELETE", dashboardsIDOwnersIDPath, newDeleteMemberHandler(ownerBackend)) - h.HandlerFunc("GET", dashboardsIDLabelsPath, newGetLabelsHandler(h.LabelService)) - h.HandlerFunc("POST", dashboardsIDLabelsPath, newPostLabelHandler(h.LabelService)) - h.HandlerFunc("DELETE", dashboardsIDLabelsIDPath, newDeleteLabelHandler(h.LabelService)) + labelBackend := &LabelBackend{ + Logger: b.Logger.With(zap.String("handler", "label")), + LabelService: b.LabelService, + } + h.HandlerFunc("GET", dashboardsIDLabelsPath, newGetLabelsHandler(labelBackend)) + h.HandlerFunc("POST", dashboardsIDLabelsPath, newPostLabelHandler(labelBackend)) + h.HandlerFunc("DELETE", dashboardsIDLabelsIDPath, newDeleteLabelHandler(labelBackend)) + h.HandlerFunc("PATCH", dashboardsIDLabelsIDPath, newPatchLabelHandler(labelBackend)) return h } @@ -474,29 +518,14 @@ func decodeGetDashboardLogRequest(ctx context.Context, r *http.Request) (*getDas return nil, err } - opts := platform.DefaultOperationLogFindOptions - qp := r.URL.Query() - if v := qp.Get("desc"); v == "false" { - opts.Descending = false - } - if v := qp.Get("limit"); v != "" { - i, err := strconv.Atoi(v) - if err != nil { - return nil, err - } - opts.Limit = i - } - if v := qp.Get("offset"); v != "" { - i, err := strconv.Atoi(v) - if err != nil { - return nil, err - } - opts.Offset = i + opts, err := decodeFindOptions(ctx, r) + if err != nil { + return nil, err } return &getDashboardLogRequest{ DashboardID: i, - opts: opts, + opts: *opts, }, nil } diff --git a/http/dashboard_test.go b/http/dashboard_test.go index 9fd204fd1a..766b499641 100644 --- a/http/dashboard_test.go +++ b/http/dashboard_test.go @@ -5,6 +5,7 @@ import ( "context" "encoding/json" "fmt" + "go.uber.org/zap" "io/ioutil" "net/http" "net/http/httptest" @@ -18,6 +19,19 @@ import ( "github.com/julienschmidt/httprouter" ) +// NewMockDashboardBackend returns a DashboardBackend with mock services. +func NewMockDashboardBackend() *DashboardBackend { + return &DashboardBackend{ + Logger: zap.NewNop().With(zap.String("handler", "dashboard")), + + DashboardService: mock.NewDashboardService(), + DashboardOperationLogService: mock.NewDashboardOperationLogService(), + UserResourceMappingService: mock.NewUserResourceMappingService(), + LabelService: mock.NewLabelService(), + UserService: mock.NewUserService(), + } +} + func TestService_handleGetDashboards(t *testing.T) { type fields struct { DashboardService platform.DashboardService @@ -247,7 +261,7 @@ func TestService_handleGetDashboards(t *testing.T) { }, args: args{ map[string][]string{ - "orgID": []string{"0000000000000001"}, + "orgID": {"0000000000000001"}, }, }, wants: wants{ @@ -309,11 +323,10 @@ func TestService_handleGetDashboards(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - mappingService := mock.NewUserResourceMappingService() - labelService := tt.fields.LabelService - userService := mock.NewUserService() - h := NewDashboardHandler(mappingService, labelService, userService) - h.DashboardService = tt.fields.DashboardService + dashboardBackend := NewMockDashboardBackend() + dashboardBackend.LabelService = tt.fields.LabelService + dashboardBackend.DashboardService = tt.fields.DashboardService + h := NewDashboardHandler(dashboardBackend) r := httptest.NewRequest("GET", "http://any.url", nil) @@ -461,11 +474,9 @@ func TestService_handleGetDashboard(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - mappingService := mock.NewUserResourceMappingService() - labelService := mock.NewLabelService() - userService := mock.NewUserService() - h := NewDashboardHandler(mappingService, labelService, userService) - h.DashboardService = tt.fields.DashboardService + dashboardBackend := NewMockDashboardBackend() + dashboardBackend.DashboardService = tt.fields.DashboardService + h := NewDashboardHandler(dashboardBackend) r := httptest.NewRequest("GET", "http://any.url", nil) @@ -594,11 +605,9 @@ func TestService_handlePostDashboard(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - mappingService := mock.NewUserResourceMappingService() - labelService := mock.NewLabelService() - userService := mock.NewUserService() - h := NewDashboardHandler(mappingService, labelService, userService) - h.DashboardService = tt.fields.DashboardService + dashboardBackend := NewMockDashboardBackend() + dashboardBackend.DashboardService = tt.fields.DashboardService + h := NewDashboardHandler(dashboardBackend) b, err := json.Marshal(tt.args.dashboard) if err != nil { @@ -689,11 +698,9 @@ func TestService_handleDeleteDashboard(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - mappingService := mock.NewUserResourceMappingService() - labelService := mock.NewLabelService() - userService := mock.NewUserService() - h := NewDashboardHandler(mappingService, labelService, userService) - h.DashboardService = tt.fields.DashboardService + dashboardBackend := NewMockDashboardBackend() + dashboardBackend.DashboardService = tt.fields.DashboardService + h := NewDashboardHandler(dashboardBackend) r := httptest.NewRequest("GET", "http://any.url", nil) @@ -868,11 +875,9 @@ func TestService_handlePatchDashboard(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - mappingService := mock.NewUserResourceMappingService() - labelService := mock.NewLabelService() - userService := mock.NewUserService() - h := NewDashboardHandler(mappingService, labelService, userService) - h.DashboardService = tt.fields.DashboardService + dashboardBackend := NewMockDashboardBackend() + dashboardBackend.DashboardService = tt.fields.DashboardService + h := NewDashboardHandler(dashboardBackend) upd := platform.DashboardUpdate{} if tt.args.name != "" { @@ -977,11 +982,9 @@ func TestService_handlePostDashboardCell(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - mappingService := mock.NewUserResourceMappingService() - labelService := mock.NewLabelService() - userService := mock.NewUserService() - h := NewDashboardHandler(mappingService, labelService, userService) - h.DashboardService = tt.fields.DashboardService + dashboardBackend := NewMockDashboardBackend() + dashboardBackend.DashboardService = tt.fields.DashboardService + h := NewDashboardHandler(dashboardBackend) b, err := json.Marshal(tt.args.cell) if err != nil { @@ -1062,11 +1065,9 @@ func TestService_handleDeleteDashboardCell(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - mappingService := mock.NewUserResourceMappingService() - labelService := mock.NewLabelService() - userService := mock.NewUserService() - h := NewDashboardHandler(mappingService, labelService, userService) - h.DashboardService = tt.fields.DashboardService + dashboardBackend := NewMockDashboardBackend() + dashboardBackend.DashboardService = tt.fields.DashboardService + h := NewDashboardHandler(dashboardBackend) r := httptest.NewRequest("GET", "http://any.url", nil) @@ -1174,11 +1175,9 @@ func TestService_handlePatchDashboardCell(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - mappingService := mock.NewUserResourceMappingService() - labelService := mock.NewLabelService() - userService := mock.NewUserService() - h := NewDashboardHandler(mappingService, labelService, userService) - h.DashboardService = tt.fields.DashboardService + dashboardBackend := NewMockDashboardBackend() + dashboardBackend.DashboardService = tt.fields.DashboardService + h := NewDashboardHandler(dashboardBackend) upd := platform.CellUpdate{} if tt.args.x != 0 { @@ -1271,11 +1270,9 @@ func initDashboardService(f platformtesting.DashboardFields, t *testing.T) (plat } } - mappingService := mock.NewUserResourceMappingService() - labelService := mock.NewLabelService() - userService := mock.NewUserService() - h := NewDashboardHandler(mappingService, labelService, userService) - h.DashboardService = svc + dashboardBackend := NewMockDashboardBackend() + dashboardBackend.DashboardService = svc + h := NewDashboardHandler(dashboardBackend) server := httptest.NewServer(h) client := DashboardService{ Addr: server.URL, diff --git a/http/label_service.go b/http/label_service.go index 8f854629b9..e4f2c31524 100644 --- a/http/label_service.go +++ b/http/label_service.go @@ -5,12 +5,12 @@ import ( "context" "encoding/json" "fmt" + "go.uber.org/zap" "net/http" "path" platform "github.com/influxdata/influxdb" "github.com/julienschmidt/httprouter" - "go.uber.org/zap" ) // LabelHandler represents an HTTP API handler for labels @@ -293,8 +293,15 @@ func newLabelsResponse(ls []*platform.Label) *labelsResponse { } } +// LabelBackend is all services and associated parameters required to construct +// label handlers. +type LabelBackend struct { + Logger *zap.Logger + LabelService platform.LabelService +} + // newGetLabelsHandler returns a handler func for a GET to /labels endpoints -func newGetLabelsHandler(s platform.LabelService) http.HandlerFunc { +func newGetLabelsHandler(b *LabelBackend) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { ctx := r.Context() @@ -304,17 +311,14 @@ func newGetLabelsHandler(s platform.LabelService) http.HandlerFunc { return } - labels, err := s.FindResourceLabels(ctx, req.filter) + labels, err := b.LabelService.FindResourceLabels(ctx, req.filter) if err != nil { EncodeError(ctx, err, w) return } if err := encodeResponse(ctx, w, http.StatusOK, newLabelsResponse(labels)); err != nil { - // TODO: this can potentially result in calling w.WriteHeader multiple times, we need to pass a logger in here - // some how. This isn't as simple as simply passing in a logger to this function since the time that this function - // is called is distinct from the time that a potential logger is set. - EncodeError(ctx, err, w) + logEncodingError(b.Logger, r, err) return } } @@ -346,7 +350,7 @@ func decodeGetLabelsRequest(ctx context.Context, r *http.Request) (*getLabelsReq } // newPostLabelHandler returns a handler func for a POST to /labels endpoints -func newPostLabelHandler(s platform.LabelService) http.HandlerFunc { +func newPostLabelHandler(b *LabelBackend) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { ctx := r.Context() @@ -361,22 +365,19 @@ func newPostLabelHandler(s platform.LabelService) http.HandlerFunc { return } - if err := s.CreateLabelMapping(ctx, &req.Mapping); err != nil { + if err := b.LabelService.CreateLabelMapping(ctx, &req.Mapping); err != nil { EncodeError(ctx, err, w) return } - label, err := s.FindLabelByID(ctx, req.Mapping.LabelID) + label, err := b.LabelService.FindLabelByID(ctx, req.Mapping.LabelID) if err != nil { EncodeError(ctx, err, w) return } if err := encodeResponse(ctx, w, http.StatusCreated, newLabelResponse(label)); err != nil { - // TODO: this can potentially result in calling w.WriteHeader multiple times, we need to pass a logger in here - // some how. This isn't as simple as simply passing in a logger to this function since the time that this function - // is called is distinct from the time that a potential logger is set. - EncodeError(ctx, err, w) + logEncodingError(b.Logger, r, err) return } } @@ -419,8 +420,32 @@ func decodePostLabelMappingRequest(ctx context.Context, r *http.Request) (*postL return req, nil } +// newPatchLabelHandler returns a handler func for a PATCH to /labels endpoints +func newPatchLabelHandler(b *LabelBackend) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + + req, err := decodePatchLabelRequest(ctx, r) + if err != nil { + EncodeError(ctx, err, w) + return + } + + label, err := b.LabelService.UpdateLabel(ctx, req.LabelID, req.Update) + if err != nil { + EncodeError(ctx, err, w) + return + } + + if err := encodeResponse(ctx, w, http.StatusOK, newLabelResponse(label)); err != nil { + logEncodingError(b.Logger, r, err) + return + } + } +} + // newDeleteLabelHandler returns a handler func for a DELETE to /labels endpoints -func newDeleteLabelHandler(s platform.LabelService) http.HandlerFunc { +func newDeleteLabelHandler(b *LabelBackend) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { ctx := r.Context() @@ -435,7 +460,7 @@ func newDeleteLabelHandler(s platform.LabelService) http.HandlerFunc { ResourceID: req.ResourceID, } - if err := s.DeleteLabelMapping(ctx, mapping); err != nil { + if err := b.LabelService.DeleteLabelMapping(ctx, mapping); err != nil { EncodeError(ctx, err, w) return } diff --git a/http/macro_service.go b/http/macro_service.go index 651a35d6f6..5ca4dd9891 100644 --- a/http/macro_service.go +++ b/http/macro_service.go @@ -17,6 +17,20 @@ const ( macroPath = "/api/v2/macros" ) +// MacroBackend is all services and associated parameters required to construct +// the MacroHandler. +type MacroBackend struct { + Logger *zap.Logger + MacroService platform.MacroService +} + +func NewMacroBackend(b *APIBackend) *MacroBackend { + return &MacroBackend{ + Logger: b.Logger.With(zap.String("handler", "macro")), + MacroService: b.MacroService, + } +} + // MacroHandler is the handler for the macro service type MacroHandler struct { *httprouter.Router @@ -27,10 +41,12 @@ type MacroHandler struct { } // NewMacroHandler creates a new MacroHandler -func NewMacroHandler() *MacroHandler { +func NewMacroHandler(b *MacroBackend) *MacroHandler { h := &MacroHandler{ Router: NewRouter(), - Logger: zap.NewNop(), + Logger: b.Logger, + + MacroService: b.MacroService, } entityPath := fmt.Sprintf("%s/:id", macroPath) diff --git a/http/macro_test.go b/http/macro_test.go index b85c468332..6c121ef254 100644 --- a/http/macro_test.go +++ b/http/macro_test.go @@ -4,6 +4,7 @@ import ( "bytes" "context" "fmt" + "go.uber.org/zap" "io/ioutil" "net/http" "net/http/httptest" @@ -16,6 +17,14 @@ import ( "github.com/julienschmidt/httprouter" ) +// NewMockMacroBackend returns a MacroBackend with mock services. +func NewMockMacroBackend() *MacroBackend { + return &MacroBackend{ + Logger: zap.NewNop().With(zap.String("handler", "macro")), + MacroService: mock.NewMacroService(), + } +} + func TestMacroService_handleGetMacros(t *testing.T) { type fields struct { MacroService platform.MacroService @@ -82,7 +91,7 @@ func TestMacroService_handleGetMacros(t *testing.T) { }, args: args{ map[string][]string{ - "limit": []string{"1"}, + "limit": {"1"}, }, }, wants: wants{ @@ -113,7 +122,7 @@ func TestMacroService_handleGetMacros(t *testing.T) { }, args: args{ map[string][]string{ - "orgID": []string{"0000000000000001"}, + "orgID": {"0000000000000001"}, }, }, wants: wants{ @@ -126,8 +135,9 @@ func TestMacroService_handleGetMacros(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - h := NewMacroHandler() - h.MacroService = tt.fields.MacroService + macroBackend := NewMockMacroBackend() + macroBackend.MacroService = tt.fields.MacroService + h := NewMacroHandler(macroBackend) r := httptest.NewRequest("GET", "http://howdy.tld", nil) qp := r.URL.Query() @@ -249,8 +259,9 @@ func TestMacroService_handleGetMacro(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - h := NewMacroHandler() - h.MacroService = tt.fields.MacroService + macroBackend := NewMockMacroBackend() + macroBackend.MacroService = tt.fields.MacroService + h := NewMacroHandler(macroBackend) r := httptest.NewRequest("GET", "http://howdy.tld", nil) r = r.WithContext(context.WithValue( context.TODO(), @@ -381,8 +392,9 @@ func TestMacroService_handlePostMacro(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - h := NewMacroHandler() - h.MacroService = tt.fields.MacroService + macroBackend := NewMockMacroBackend() + macroBackend.MacroService = tt.fields.MacroService + h := NewMacroHandler(macroBackend) r := httptest.NewRequest("GET", "http://howdy.tld", bytes.NewReader([]byte(tt.args.macro))) w := httptest.NewRecorder() @@ -474,8 +486,9 @@ func TestMacroService_handlePatchMacro(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - h := NewMacroHandler() - h.MacroService = tt.fields.MacroService + macroBackend := NewMockMacroBackend() + macroBackend.MacroService = tt.fields.MacroService + h := NewMacroHandler(macroBackend) r := httptest.NewRequest("GET", "http://howdy.tld", bytes.NewReader([]byte(tt.args.update))) r = r.WithContext(context.WithValue( context.TODO(), @@ -564,8 +577,9 @@ func TestMacroService_handleDeleteMacro(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - h := NewMacroHandler() - h.MacroService = tt.fields.MacroService + macroBackend := NewMockMacroBackend() + macroBackend.MacroService = tt.fields.MacroService + h := NewMacroHandler(macroBackend) r := httptest.NewRequest("GET", "http://howdy.tld", nil) r = r.WithContext(context.WithValue( context.TODO(), @@ -601,8 +615,9 @@ func initMacroService(f platformtesting.MacroFields, t *testing.T) (platform.Mac } } - handler := NewMacroHandler() - handler.MacroService = svc + macroBackend := NewMockMacroBackend() + macroBackend.MacroService = svc + handler := NewMacroHandler(macroBackend) server := httptest.NewServer(handler) client := MacroService{ Addr: server.URL, diff --git a/http/onboarding.go b/http/onboarding.go index 7f8a16ce15..c42a48eed2 100644 --- a/http/onboarding.go +++ b/http/onboarding.go @@ -11,6 +11,21 @@ import ( "go.uber.org/zap" ) +// SetupBackend is all services and associated parameters required to construct +// the SetupHandler. +type SetupBackend struct { + Logger *zap.Logger + OnboardingService platform.OnboardingService +} + +// NewSetupBackend returns a new instance of SetupBackend. +func NewSetupBackend(b *APIBackend) *SetupBackend { + return &SetupBackend{ + Logger: b.Logger.With(zap.String("handler", "setup")), + OnboardingService: b.OnboardingService, + } +} + // SetupHandler represents an HTTP API handler for onboarding setup. type SetupHandler struct { *httprouter.Router @@ -25,10 +40,11 @@ const ( ) // NewSetupHandler returns a new instance of SetupHandler. -func NewSetupHandler() *SetupHandler { +func NewSetupHandler(b *SetupBackend) *SetupHandler { h := &SetupHandler{ - Router: NewRouter(), - Logger: zap.NewNop(), + Router: NewRouter(), + Logger: b.Logger, + OnboardingService: b.OnboardingService, } h.HandlerFunc("POST", setupPath, h.handlePostSetup) h.HandlerFunc("GET", setupPath, h.isOnboarding) diff --git a/http/onboarding_test.go b/http/onboarding_test.go index 6bf1dbafe5..d7cd5b08dc 100644 --- a/http/onboarding_test.go +++ b/http/onboarding_test.go @@ -5,11 +5,22 @@ import ( "net/http/httptest" "testing" + "github.com/influxdata/influxdb/mock" + "go.uber.org/zap" + platform "github.com/influxdata/influxdb" "github.com/influxdata/influxdb/inmem" platformtesting "github.com/influxdata/influxdb/testing" ) +// NewMockSetupBackend returns a SetupBackend with mock services. +func NewMockSetupBackend() *SetupBackend { + return &SetupBackend{ + Logger: zap.NewNop().With(zap.String("handler", "scraper")), + OnboardingService: mock.NewOnboardingService(), + } +} + func initOnboardingService(f platformtesting.OnboardingFields, t *testing.T) (platform.OnboardingService, func()) { t.Helper() svc := inmem.NewService() @@ -21,8 +32,9 @@ func initOnboardingService(f platformtesting.OnboardingFields, t *testing.T) (pl t.Fatalf("failed to set new onboarding finished: %v", err) } - handler := NewSetupHandler() - handler.OnboardingService = svc + setupBackend := NewMockSetupBackend() + setupBackend.OnboardingService = svc + handler := NewSetupHandler(setupBackend) server := httptest.NewServer(handler) client := struct { *SetupService diff --git a/http/org_service.go b/http/org_service.go index 3f9682a571..52172736fd 100644 --- a/http/org_service.go +++ b/http/org_service.go @@ -5,15 +5,39 @@ import ( "context" "encoding/json" "fmt" - "net/http" - "path" - "strconv" - platform "github.com/influxdata/influxdb" "github.com/julienschmidt/httprouter" "go.uber.org/zap" + "net/http" + "path" ) +// OrgBackend is all services and associated parameters required to construct +// the OrgHandler. +type OrgBackend struct { + Logger *zap.Logger + + OrganizationService platform.OrganizationService + OrganizationOperationLogService platform.OrganizationOperationLogService + UserResourceMappingService platform.UserResourceMappingService + SecretService platform.SecretService + LabelService platform.LabelService + UserService platform.UserService +} + +func NewOrgBackend(b *APIBackend) *OrgBackend { + return &OrgBackend{ + Logger: b.Logger.With(zap.String("handler", "org")), + + OrganizationService: b.OrganizationService, + OrganizationOperationLogService: b.OrganizationOperationLogService, + UserResourceMappingService: b.UserResourceMappingService, + SecretService: b.SecretService, + LabelService: b.LabelService, + UserService: b.UserService, + } +} + // OrgHandler represents an HTTP API handler for orgs. type OrgHandler struct { *httprouter.Router @@ -44,15 +68,17 @@ const ( ) // NewOrgHandler returns a new instance of OrgHandler. -func NewOrgHandler(mappingService platform.UserResourceMappingService, - labelService platform.LabelService, userService platform.UserService) *OrgHandler { +func NewOrgHandler(b *OrgBackend) *OrgHandler { h := &OrgHandler{ Router: NewRouter(), Logger: zap.NewNop(), - UserResourceMappingService: mappingService, - LabelService: labelService, - UserService: userService, + OrganizationService: b.OrganizationService, + OrganizationOperationLogService: b.OrganizationOperationLogService, + UserResourceMappingService: b.UserResourceMappingService, + SecretService: b.SecretService, + LabelService: b.LabelService, + UserService: b.UserService, } h.HandlerFunc("POST", organizationsPath, h.handlePostOrg) @@ -62,22 +88,41 @@ func NewOrgHandler(mappingService platform.UserResourceMappingService, h.HandlerFunc("PATCH", organizationsIDPath, h.handlePatchOrg) h.HandlerFunc("DELETE", organizationsIDPath, h.handleDeleteOrg) - h.HandlerFunc("POST", organizationsIDMembersPath, newPostMemberHandler(h.UserResourceMappingService, h.UserService, platform.OrgsResourceType, platform.Member)) - h.HandlerFunc("GET", organizationsIDMembersPath, newGetMembersHandler(h.UserResourceMappingService, h.UserService, platform.OrgsResourceType, platform.Member)) - h.HandlerFunc("DELETE", organizationsIDMembersIDPath, newDeleteMemberHandler(h.UserResourceMappingService, platform.Member)) + memberBackend := MemberBackend{ + Logger: b.Logger.With(zap.String("handler", "member")), + ResourceType: platform.OrgsResourceType, + UserType: platform.Member, + UserResourceMappingService: b.UserResourceMappingService, + UserService: b.UserService, + } + h.HandlerFunc("POST", organizationsIDMembersPath, newPostMemberHandler(memberBackend)) + h.HandlerFunc("GET", organizationsIDMembersPath, newGetMembersHandler(memberBackend)) + h.HandlerFunc("DELETE", organizationsIDMembersIDPath, newDeleteMemberHandler(memberBackend)) - h.HandlerFunc("POST", organizationsIDOwnersPath, newPostMemberHandler(h.UserResourceMappingService, h.UserService, platform.OrgsResourceType, platform.Owner)) - h.HandlerFunc("GET", organizationsIDOwnersPath, newGetMembersHandler(h.UserResourceMappingService, h.UserService, platform.OrgsResourceType, platform.Owner)) - h.HandlerFunc("DELETE", organizationsIDOwnersIDPath, newDeleteMemberHandler(h.UserResourceMappingService, platform.Owner)) + ownerBackend := MemberBackend{ + Logger: b.Logger.With(zap.String("handler", "member")), + ResourceType: platform.OrgsResourceType, + UserType: platform.Owner, + UserResourceMappingService: b.UserResourceMappingService, + UserService: b.UserService, + } + h.HandlerFunc("POST", organizationsIDOwnersPath, newPostMemberHandler(ownerBackend)) + h.HandlerFunc("GET", organizationsIDOwnersPath, newGetMembersHandler(ownerBackend)) + h.HandlerFunc("DELETE", organizationsIDOwnersIDPath, newDeleteMemberHandler(ownerBackend)) h.HandlerFunc("GET", organizationsIDSecretsPath, h.handleGetSecrets) h.HandlerFunc("PATCH", organizationsIDSecretsPath, h.handlePatchSecrets) // TODO(desa): need a way to specify which secrets to delete. this should work for now h.HandlerFunc("POST", organizationsIDSecretsDeletePath, h.handleDeleteSecrets) - h.HandlerFunc("GET", organizationsIDLabelsPath, newGetLabelsHandler(h.LabelService)) - h.HandlerFunc("POST", organizationsIDLabelsPath, newPostLabelHandler(h.LabelService)) - h.HandlerFunc("DELETE", organizationsIDLabelsIDPath, newDeleteLabelHandler(h.LabelService)) + labelBackend := &LabelBackend{ + Logger: b.Logger.With(zap.String("handler", "label")), + LabelService: b.LabelService, + } + h.HandlerFunc("GET", organizationsIDLabelsPath, newGetLabelsHandler(labelBackend)) + h.HandlerFunc("POST", organizationsIDLabelsPath, newPostLabelHandler(labelBackend)) + h.HandlerFunc("DELETE", organizationsIDLabelsIDPath, newDeleteLabelHandler(labelBackend)) + h.HandlerFunc("PATCH", organizationsIDLabelsIDPath, newPatchLabelHandler(labelBackend)) return h } @@ -758,29 +803,14 @@ func decodeGetOrganizationLogRequest(ctx context.Context, r *http.Request) (*get return nil, err } - opts := platform.DefaultOperationLogFindOptions - qp := r.URL.Query() - if v := qp.Get("desc"); v == "false" { - opts.Descending = false - } - if v := qp.Get("limit"); v != "" { - i, err := strconv.Atoi(v) - if err != nil { - return nil, err - } - opts.Limit = i - } - if v := qp.Get("offset"); v != "" { - i, err := strconv.Atoi(v) - if err != nil { - return nil, err - } - opts.Offset = i + opts, err := decodeFindOptions(ctx, r) + if err != nil { + return nil, err } return &getOrganizationLogRequest{ OrganizationID: i, - opts: opts, + opts: *opts, }, nil } diff --git a/http/org_test.go b/http/org_test.go index e5c02f5a6b..bf9e1aa284 100644 --- a/http/org_test.go +++ b/http/org_test.go @@ -5,6 +5,7 @@ import ( "context" "encoding/json" "fmt" + "go.uber.org/zap" "io/ioutil" "net/http" "net/http/httptest" @@ -16,6 +17,20 @@ import ( platformtesting "github.com/influxdata/influxdb/testing" ) +// NewMockOrgBackend returns a OrgBackend with mock services. +func NewMockOrgBackend() *OrgBackend { + return &OrgBackend{ + Logger: zap.NewNop().With(zap.String("handler", "org")), + + OrganizationService: mock.NewOrganizationService(), + OrganizationOperationLogService: mock.NewOrganizationOperationLogService(), + UserResourceMappingService: mock.NewUserResourceMappingService(), + SecretService: mock.NewSecretService(), + LabelService: mock.NewLabelService(), + UserService: mock.NewUserService(), + } +} + func initOrganizationService(f platformtesting.OrganizationFields, t *testing.T) (platform.OrganizationService, string, func()) { t.Helper() svc := inmem.NewService() @@ -28,8 +43,9 @@ func initOrganizationService(f platformtesting.OrganizationFields, t *testing.T) } } - handler := NewOrgHandler(mock.NewUserResourceMappingService(), mock.NewLabelService(), mock.NewUserService()) - handler.OrganizationService = svc + orgBackend := NewMockOrgBackend() + orgBackend.OrganizationService = svc + handler := NewOrgHandler(orgBackend) server := httptest.NewServer(handler) client := OrganizationService{ Addr: server.URL, @@ -122,8 +138,9 @@ func TestSecretService_handleGetSecrets(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - h := NewOrgHandler(mock.NewUserResourceMappingService(), mock.NewLabelService(), mock.NewUserService()) - h.SecretService = tt.fields.SecretService + orgBackend := NewMockOrgBackend() + orgBackend.SecretService = tt.fields.SecretService + h := NewOrgHandler(orgBackend) u := fmt.Sprintf("http://any.url/api/v2/orgs/%s/secrets", tt.args.orgID) r := httptest.NewRequest("GET", u, nil) @@ -192,8 +209,9 @@ func TestSecretService_handlePatchSecrets(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - h := NewOrgHandler(mock.NewUserResourceMappingService(), mock.NewLabelService(), mock.NewUserService()) - h.SecretService = tt.fields.SecretService + orgBackend := NewMockOrgBackend() + orgBackend.SecretService = tt.fields.SecretService + h := NewOrgHandler(orgBackend) b, err := json.Marshal(tt.args.secrets) if err != nil { @@ -268,8 +286,9 @@ func TestSecretService_handleDeleteSecrets(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - h := NewOrgHandler(mock.NewUserResourceMappingService(), mock.NewLabelService(), mock.NewUserService()) - h.SecretService = tt.fields.SecretService + orgBackend := NewMockOrgBackend() + orgBackend.SecretService = tt.fields.SecretService + h := NewOrgHandler(orgBackend) b, err := json.Marshal(tt.args.secrets) if err != nil { diff --git a/http/query_handler.go b/http/query_handler.go index c1a5c749fb..b1541ceabe 100644 --- a/http/query_handler.go +++ b/http/query_handler.go @@ -27,6 +27,25 @@ const ( fluxPath = "/api/v2/query" ) +// FluxBackend is all services and associated parameters required to construct +// the FluxHandler. +type FluxBackend struct { + Logger *zap.Logger + + OrganizationService platform.OrganizationService + ProxyQueryService query.ProxyQueryService +} + +// NewFluxBackend returns a new instance of FluxBackend. +func NewFluxBackend(b *APIBackend) *FluxBackend { + return &FluxBackend{ + Logger: b.Logger.With(zap.String("handler", "query")), + + ProxyQueryService: b.ProxyQueryService, + OrganizationService: b.OrganizationService, + } +} + // FluxHandler implements handling flux queries. type FluxHandler struct { *httprouter.Router @@ -39,11 +58,14 @@ type FluxHandler struct { } // NewFluxHandler returns a new handler at /api/v2/query for flux queries. -func NewFluxHandler() *FluxHandler { +func NewFluxHandler(b *FluxBackend) *FluxHandler { h := &FluxHandler{ Router: NewRouter(), Now: time.Now, - Logger: zap.NewNop(), + Logger: b.Logger, + + ProxyQueryService: b.ProxyQueryService, + OrganizationService: b.OrganizationService, } h.HandlerFunc("POST", fluxPath, h.handleQuery) diff --git a/http/scraper_service.go b/http/scraper_service.go index 606dbd03ed..e45d2fed5b 100644 --- a/http/scraper_service.go +++ b/http/scraper_service.go @@ -13,6 +13,30 @@ import ( "go.uber.org/zap" ) +// ScraperBackend is all services and associated parameters required to construct +// the ScraperHandler. +type ScraperBackend struct { + Logger *zap.Logger + + ScraperStorageService influxdb.ScraperTargetStoreService + BucketService influxdb.BucketService + OrganizationService influxdb.OrganizationService + UserService influxdb.UserService + UserResourceMappingService influxdb.UserResourceMappingService + LabelService influxdb.LabelService +} + +// NewScraperBackend returns a new instance of ScraperBackend. +func NewScraperBackend(b *APIBackend) *ScraperBackend { + return &ScraperBackend{ + Logger: b.Logger.With(zap.String("handler", "scraper")), + + ScraperStorageService: b.ScraperTargetStoreService, + BucketService: b.BucketService, + OrganizationService: b.OrganizationService, + } +} + // ScraperHandler represents an HTTP API handler for scraper targets. type ScraperHandler struct { *httprouter.Router @@ -36,24 +60,16 @@ const ( ) // NewScraperHandler returns a new instance of ScraperHandler. -func NewScraperHandler( - logger *zap.Logger, - userService influxdb.UserService, - userResourceMappingService influxdb.UserResourceMappingService, - labelService influxdb.LabelService, - scraperStorageService influxdb.ScraperTargetStoreService, - bucketService influxdb.BucketService, - organizationService influxdb.OrganizationService, -) *ScraperHandler { +func NewScraperHandler(b *ScraperBackend) *ScraperHandler { h := &ScraperHandler{ Router: NewRouter(), - Logger: logger, - UserService: userService, - UserResourceMappingService: userResourceMappingService, - LabelService: labelService, - ScraperStorageService: scraperStorageService, - BucketService: bucketService, - OrganizationService: organizationService, + Logger: b.Logger, + UserService: b.UserService, + UserResourceMappingService: b.UserResourceMappingService, + LabelService: b.LabelService, + ScraperStorageService: b.ScraperStorageService, + BucketService: b.BucketService, + OrganizationService: b.OrganizationService, } h.HandlerFunc("POST", targetsPath, h.handlePostScraperTarget) h.HandlerFunc("GET", targetsPath, h.handleGetScraperTargets) @@ -61,17 +77,35 @@ func NewScraperHandler( h.HandlerFunc("PATCH", targetsPath+"/:id", h.handlePatchScraperTarget) h.HandlerFunc("DELETE", targetsPath+"/:id", h.handleDeleteScraperTarget) - h.HandlerFunc("POST", targetsIDMembersPath, newPostMemberHandler(h.UserResourceMappingService, h.UserService, influxdb.ScraperResourceType, influxdb.Member)) - h.HandlerFunc("GET", targetsIDMembersPath, newGetMembersHandler(h.UserResourceMappingService, h.UserService, influxdb.ScraperResourceType, influxdb.Member)) - h.HandlerFunc("DELETE", targetsIDMembersIDPath, newDeleteMemberHandler(h.UserResourceMappingService, influxdb.Member)) + memberBackend := MemberBackend{ + Logger: b.Logger.With(zap.String("handler", "member")), + ResourceType: influxdb.ScraperResourceType, + UserType: influxdb.Member, + UserResourceMappingService: b.UserResourceMappingService, + UserService: b.UserService, + } + h.HandlerFunc("POST", targetsIDMembersPath, newPostMemberHandler(memberBackend)) + h.HandlerFunc("GET", targetsIDMembersPath, newGetMembersHandler(memberBackend)) + h.HandlerFunc("DELETE", targetsIDMembersIDPath, newDeleteMemberHandler(memberBackend)) - h.HandlerFunc("POST", targetsIDOwnersPath, newPostMemberHandler(h.UserResourceMappingService, h.UserService, influxdb.ScraperResourceType, influxdb.Owner)) - h.HandlerFunc("GET", targetsIDOwnersPath, newGetMembersHandler(h.UserResourceMappingService, h.UserService, influxdb.ScraperResourceType, influxdb.Owner)) - h.HandlerFunc("DELETE", targetsIDOwnersIDPath, newDeleteMemberHandler(h.UserResourceMappingService, influxdb.Owner)) + ownerBackend := MemberBackend{ + Logger: b.Logger.With(zap.String("handler", "member")), + ResourceType: influxdb.ScraperResourceType, + UserType: influxdb.Owner, + UserResourceMappingService: b.UserResourceMappingService, + UserService: b.UserService, + } + h.HandlerFunc("POST", targetsIDOwnersPath, newPostMemberHandler(ownerBackend)) + h.HandlerFunc("GET", targetsIDOwnersPath, newGetMembersHandler(ownerBackend)) + h.HandlerFunc("DELETE", targetsIDOwnersIDPath, newDeleteMemberHandler(ownerBackend)) - h.HandlerFunc("GET", targetsIDLabelsPath, newGetLabelsHandler(h.LabelService)) - h.HandlerFunc("POST", targetsIDLabelsPath, newPostLabelHandler(h.LabelService)) - h.HandlerFunc("DELETE", targetsIDLabelsIDPath, newDeleteLabelHandler(h.LabelService)) + labelBackend := &LabelBackend{ + Logger: b.Logger.With(zap.String("handler", "label")), + LabelService: b.LabelService, + } + h.HandlerFunc("GET", targetsIDLabelsPath, newGetLabelsHandler(labelBackend)) + h.HandlerFunc("POST", targetsIDLabelsPath, newPostLabelHandler(labelBackend)) + h.HandlerFunc("DELETE", targetsIDLabelsIDPath, newDeleteLabelHandler(labelBackend)) return h } diff --git a/http/scraper_service_test.go b/http/scraper_service_test.go index 5b5c2c8e67..c4329f6635 100644 --- a/http/scraper_service_test.go +++ b/http/scraper_service_test.go @@ -5,17 +5,16 @@ import ( "context" "encoding/json" "fmt" + "go.uber.org/zap" "io/ioutil" "net/http" "net/http/httptest" - "os" "testing" platform "github.com/influxdata/influxdb" platcontext "github.com/influxdata/influxdb/context" httpMock "github.com/influxdata/influxdb/http/mock" "github.com/influxdata/influxdb/inmem" - "github.com/influxdata/influxdb/logger" "github.com/influxdata/influxdb/mock" platformtesting "github.com/influxdata/influxdb/testing" "github.com/julienschmidt/httprouter" @@ -31,6 +30,20 @@ var ( targetTwoID = platformtesting.MustIDBase16(targetTwoIDString) ) +// NewMockScraperBackend returns a ScraperBackend with mock services. +func NewMockScraperBackend() *ScraperBackend { + return &ScraperBackend{ + Logger: zap.NewNop().With(zap.String("handler", "scraper")), + + ScraperStorageService: &mock.ScraperTargetStoreService{}, + BucketService: mock.NewBucketService(), + OrganizationService: mock.NewOrganizationService(), + UserService: mock.NewUserService(), + UserResourceMappingService: &mock.UserResourceMappingService{}, + LabelService: mock.NewLabelService(), + } +} + func TestService_handleGetScraperTargets(t *testing.T) { type fields struct { ScraperTargetStoreService platform.ScraperTargetStoreService @@ -188,15 +201,11 @@ func TestService_handleGetScraperTargets(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - h := NewScraperHandler( - logger.New(os.Stdout), - mock.NewUserService(), - &mock.UserResourceMappingService{}, - mock.NewLabelService(), - tt.fields.ScraperTargetStoreService, - tt.fields.BucketService, - tt.fields.OrganizationService, - ) + scraperBackend := NewMockScraperBackend() + scraperBackend.ScraperStorageService = tt.fields.ScraperTargetStoreService + scraperBackend.OrganizationService = tt.fields.OrganizationService + scraperBackend.BucketService = tt.fields.BucketService + h := NewScraperHandler(scraperBackend) r := httptest.NewRequest("GET", "http://any.tld", nil) @@ -322,15 +331,11 @@ func TestService_handleGetScraperTarget(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - h := NewScraperHandler( - logger.New(os.Stdout), - mock.NewUserService(), - &mock.UserResourceMappingService{}, - mock.NewLabelService(), - tt.fields.ScraperTargetStoreService, - tt.fields.BucketService, - tt.fields.OrganizationService, - ) + scraperBackend := NewMockScraperBackend() + scraperBackend.ScraperStorageService = tt.fields.ScraperTargetStoreService + scraperBackend.OrganizationService = tt.fields.OrganizationService + scraperBackend.BucketService = tt.fields.BucketService + h := NewScraperHandler(scraperBackend) r := httptest.NewRequest("GET", "http://any.tld", nil) @@ -429,15 +434,10 @@ func TestService_handleDeleteScraperTarget(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - h := NewScraperHandler( - logger.New(os.Stdout), - mock.NewUserService(), - &mock.UserResourceMappingService{}, - mock.NewLabelService(), - tt.fields.Service, - mock.NewBucketService(), - &mock.OrganizationService{}, - ) + scraperBackend := NewMockScraperBackend() + scraperBackend.ScraperStorageService = tt.fields.Service + h := NewScraperHandler(scraperBackend) + r := httptest.NewRequest("GET", "http://any.tld", nil) r = r.WithContext(context.WithValue( @@ -558,15 +558,11 @@ func TestService_handlePostScraperTarget(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - h := NewScraperHandler( - logger.New(os.Stdout), - mock.NewUserService(), - &mock.UserResourceMappingService{}, - mock.NewLabelService(), - tt.fields.ScraperTargetStoreService, - tt.fields.BucketService, - tt.fields.OrganizationService, - ) + scraperBackend := NewMockScraperBackend() + scraperBackend.ScraperStorageService = tt.fields.ScraperTargetStoreService + scraperBackend.OrganizationService = tt.fields.OrganizationService + scraperBackend.BucketService = tt.fields.BucketService + h := NewScraperHandler(scraperBackend) st, err := json.Marshal(tt.args.target) if err != nil { @@ -733,15 +729,11 @@ func TestService_handlePatchScraperTarget(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - h := NewScraperHandler( - logger.New(os.Stdout), - mock.NewUserService(), - &mock.UserResourceMappingService{}, - mock.NewLabelService(), - tt.fields.ScraperTargetStoreService, - tt.fields.BucketService, - tt.fields.OrganizationService, - ) + scraperBackend := NewMockScraperBackend() + scraperBackend.ScraperStorageService = tt.fields.ScraperTargetStoreService + scraperBackend.OrganizationService = tt.fields.OrganizationService + scraperBackend.BucketService = tt.fields.BucketService + h := NewScraperHandler(scraperBackend) var err error st := make([]byte, 0) @@ -802,33 +794,30 @@ func initScraperService(f platformtesting.TargetFields, t *testing.T) (platform. } } - handler := NewScraperHandler( - logger.New(os.Stdout), - mock.NewUserService(), - &mock.UserResourceMappingService{}, - mock.NewLabelService(), - svc, - &mock.BucketService{ - FindBucketByIDFn: func(ctx context.Context, id platform.ID) (*platform.Bucket, error) { - return &platform.Bucket{ - ID: id, - Name: "bucket1", - }, nil - }, + scraperBackend := NewMockScraperBackend() + scraperBackend.ScraperStorageService = svc + scraperBackend.OrganizationService = &mock.OrganizationService{ + FindOrganizationByIDF: func(ctx context.Context, id platform.ID) (*platform.Organization, error) { + return &platform.Organization{ + ID: id, + Name: "org1", + }, nil }, - &mock.OrganizationService{ - FindOrganizationByIDF: func(ctx context.Context, id platform.ID) (*platform.Organization, error) { - return &platform.Organization{ - ID: id, - Name: "org1", - }, nil - }, + } + scraperBackend.BucketService = &mock.BucketService{ + FindBucketByIDFn: func(ctx context.Context, id platform.ID) (*platform.Bucket, error) { + return &platform.Bucket{ + ID: id, + Name: "bucket1", + }, nil }, - ) - userID, _ := platform.IDFromString("020f755c3c082002") + } + + handler := NewScraperHandler(scraperBackend) server := httptest.NewServer(httpMock.NewAuthMiddlewareHandler( - handler, &platform.Authorization{ - UserID: *userID, + handler, + &platform.Authorization{ + UserID: platformtesting.MustIDBase16("020f755c3c082002"), Token: "tok", }, )) diff --git a/http/source_service.go b/http/source_service.go index d82d795616..61b602781b 100644 --- a/http/source_service.go +++ b/http/source_service.go @@ -74,6 +74,27 @@ func newSourcesResponse(srcs []*platform.Source) *sourcesResponse { return res } +// SourceBackend is all services and associated parameters required to construct +// the SourceHandler. +type SourceBackend struct { + Logger *zap.Logger + + SourceService platform.SourceService + NewBucketService func(s *platform.Source) (platform.BucketService, error) + NewQueryService func(s *platform.Source) (query.ProxyQueryService, error) +} + +// NewSourceBackend returns a new instance of SourceBackend. +func NewSourceBackend(b *APIBackend) *SourceBackend { + return &SourceBackend{ + Logger: b.Logger.With(zap.String("handler", "source")), + + SourceService: b.SourceService, + NewBucketService: b.NewBucketService, + NewQueryService: b.NewQueryService, + } +} + // SourceHandler is a handler for sources type SourceHandler struct { *httprouter.Router @@ -87,16 +108,14 @@ type SourceHandler struct { } // NewSourceHandler returns a new instance of SourceHandler. -func NewSourceHandler() *SourceHandler { +func NewSourceHandler(b *SourceBackend) *SourceHandler { h := &SourceHandler{ Router: NewRouter(), - Logger: zap.NewNop(), - NewBucketService: func(s *platform.Source) (platform.BucketService, error) { - return nil, fmt.Errorf("bucket service not set") - }, - NewQueryService: func(s *platform.Source) (query.ProxyQueryService, error) { - return nil, fmt.Errorf("query service not set") - }, + Logger: b.Logger, + + SourceService: b.SourceService, + NewBucketService: b.NewBucketService, + NewQueryService: b.NewQueryService, } h.HandlerFunc("POST", "/api/v2/sources", h.handlePostSource) diff --git a/http/swagger.yml b/http/swagger.yml index 4545dce17a..7d42be3cc8 100644 --- a/http/swagger.yml +++ b/http/swagger.yml @@ -308,12 +308,7 @@ paths: content: application/json: schema: - type: object - properties: - labels: - $ref: "#/components/schemas/Labels" - links: - $ref: "#/components/schemas/Links" + $ref: "#/components/schemas/LabelsResponse" default: description: unexpected error content: @@ -345,12 +340,7 @@ paths: content: application/json: schema: - type: object - properties: - labels: - $ref: "#/components/schemas/Labels" - links: - $ref: "#/components/schemas/Links" + $ref: "#/components/schemas/LabelsResponse" default: description: unexpected error content: @@ -727,7 +717,7 @@ paths: application/json: schema: $ref: "#/components/schemas/Error" - '/scrapers/{scraperTargetID}/labels/{label}': + '/scrapers/{scraperTargetID}/labels/{labelID}': delete: tags: - ScraperTargets @@ -774,7 +764,7 @@ paths: required: true description: ID of the scraper target - in: path - name: label + name: labelID schema: type: string required: true @@ -1132,6 +1122,13 @@ paths: tags: - Write summary: write time-series data into influxdb + requestBody: + description: line protocol body + required: true + content: + text/plain: + schema: + type: string parameters: - $ref: '#/components/parameters/TraceSpan' - in: header @@ -1188,14 +1185,7 @@ paths: name: precision description: specifies the precision for the unix timestamps within the body line-protocol schema: - type: string - default: ns - description: specifies the unit of time - enum: - - ns - - us - - ms - - s + $ref: "#/components/schemas/WritePrecision" responses: '204': description: write data is correctly formatted and accepted for writing to the bucket. @@ -1350,6 +1340,32 @@ paths: schema: $ref: "#/components/schemas/Error" /sources/{sourceID}: + delete: + tags: + - Sources + summary: Delete a source + parameters: + - in: path + name: sourceID + schema: + type: string + required: true + description: ID of the source + responses: + '204': + description: delete has been accepted + '404': + description: view not found + content: + application/json: + schema: + $ref: "#/components/schemas/Error" + default: + description: unexpected error + content: + application/json: + schema: + $ref: "#/components/schemas/Error" patch: tags: - Sources @@ -1490,448 +1506,6 @@ paths: application/json: schema: $ref: "#/components/schemas/Error" - /views: - post: - tags: - - Views - summary: A view contains information about the visual representation of data - parameters: - - $ref: '#/components/parameters/TraceSpan' - requestBody: - description: view to create - required: true - content: - application/json: - schema: - $ref: "#/components/schemas/View" - responses: - '201': - description: Added view - content: - application/json: - schema: - $ref: "#/components/schemas/View" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - get: - tags: - - Views - summary: Get all views - parameters: - - $ref: '#/components/parameters/TraceSpan' - - in: query - name: org - description: specifies the organization of the resource - required: true - schema: - type: string - - in: query - name: type - description: filters results to a specified type. Can be used multiple times in a request, to filter to multiple types. - schema: - type: string - enum: - - "xy" - - "single-stat" - - "gauge" - - "table" - - "markdown" - - "log-viewer" - - "line-plus-single-stat" - responses: - '200': - description: all views - content: - application/json: - schema: - $ref: "#/components/schemas/Views" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - '/views/{viewID}': - get: - tags: - - Views - summary: Get a single View - parameters: - - $ref: '#/components/parameters/TraceSpan' - - in: path - name: viewID - schema: - type: string - required: true - description: ID of view to update - responses: - '200': - description: get a single view - content: - application/json: - schema: - $ref: "#/components/schemas/View" - '404': - description: view not found - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - patch: - tags: - - Views - summary: Update a single view - requestBody: - description: patching of a view - required: true - content: - application/json: - schema: - $ref: "#/components/schemas/View" - parameters: - - $ref: '#/components/parameters/TraceSpan' - - in: path - name: viewID - schema: - type: string - required: true - description: ID of view to update - responses: - '200': - description: Updated view - content: - application/json: - schema: - $ref: "#/components/schemas/View" - '404': - description: view not found - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - delete: - tags: - - Views - summary: Delete a view - parameters: - - $ref: '#/components/parameters/TraceSpan' - - in: path - name: viewID - schema: - type: string - required: true - description: ID of view to update - responses: - '204': - description: delete has been accepted - '404': - description: view not found - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - '/views/{viewID}/labels': - get: - tags: - - Views - summary: list all labels for a view - parameters: - - $ref: '#/components/parameters/TraceSpan' - - in: path - name: viewID - schema: - type: string - required: true - description: ID of the view - responses: - '200': - description: a list of all labels for a view - content: - application/json: - schema: - type: object - properties: - labels: - $ref: "#/components/schemas/Labels" - links: - $ref: "#/components/schemas/Links" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - post: - tags: - - Views - summary: add a label to a view - parameters: - - $ref: '#/components/parameters/TraceSpan' - - in: path - name: viewID - schema: - type: string - required: true - description: ID of the view - requestBody: - description: label to add - required: true - content: - application/json: - schema: - $ref: "#/components/schemas/LabelMapping" - responses: - '200': - description: a list of all labels for a view - content: - application/json: - schema: - type: object - properties: - labels: - $ref: "#/components/schemas/Labels" - links: - $ref: "#/components/schemas/Links" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - '/views/{viewID}/labels/{labelID}': - delete: - tags: - - Views - summary: delete a label from a view - parameters: - - $ref: '#/components/parameters/TraceSpan' - - in: path - name: viewID - schema: - type: string - required: true - description: ID of the view - - in: path - name: labelID - schema: - type: string - required: true - description: the label id - responses: - '204': - description: delete has been accepted - '404': - description: view not found - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - '/views/{viewID}/members': - get: - tags: - - Users - - Views - summary: List all view members - parameters: - - $ref: '#/components/parameters/TraceSpan' - - in: path - name: viewID - schema: - type: string - required: true - description: ID of the view - responses: - '200': - description: a list of users who have member privileges for a view - content: - application/json: - schema: - $ref: "#/components/schemas/ResourceMembers" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - post: - tags: - - Users - - Views - summary: Add view member - parameters: - - $ref: '#/components/parameters/TraceSpan' - - in: path - name: viewID - schema: - type: string - required: true - description: ID of the view - requestBody: - description: user to add as member - required: true - content: - application/json: - schema: - $ref: "#/components/schemas/AddResourceMemberRequestBody" - responses: - '201': - description: added to view members - content: - application/json: - schema: - $ref: "#/components/schemas/ResourceMember" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - '/views/{viewID}/members/{userID}': - delete: - tags: - - Users - - Views - summary: removes a member from an view - parameters: - - $ref: '#/components/parameters/TraceSpan' - - in: path - name: userID - schema: - type: string - required: true - description: ID of member to remove - - in: path - name: viewID - schema: - type: string - required: true - description: ID of the view - responses: - '204': - description: member removed - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - '/views/{viewID}/owners': - get: - tags: - - Users - - Views - summary: List all view owners - parameters: - - $ref: '#/components/parameters/TraceSpan' - - in: path - name: viewID - schema: - type: string - required: true - description: ID of the view - responses: - '200': - description: a list of users who have owner privileges for a view - content: - application/json: - schema: - $ref: "#/components/schemas/ResourceOwners" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - post: - tags: - - Users - - Views - summary: Add view owner - parameters: - - $ref: '#/components/parameters/TraceSpan' - - in: path - name: viewID - schema: - type: string - required: true - description: ID of the view - requestBody: - description: user to add as owner - required: true - content: - application/json: - schema: - $ref: "#/components/schemas/AddResourceMemberRequestBody" - responses: - '201': - description: added to view owners - content: - application/json: - schema: - $ref: "#/components/schemas/ResourceOwner" - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - '/views/{viewID}/owners/{userID}': - delete: - tags: - - Users - - Views - summary: removes an owner from a view - parameters: - - $ref: '#/components/parameters/TraceSpan' - - in: path - name: userID - schema: - type: string - required: true - description: ID of owner to remove - - in: path - name: viewID - schema: - type: string - required: true - description: ID of the view - responses: - '204': - description: owner removed - default: - description: unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" /labels: post: tags: @@ -1950,7 +1524,7 @@ paths: content: application/json: schema: - $ref: "#/components/schemas/Label" + $ref: "#/components/schemas/LabelResponse" default: description: unexpected error content: @@ -1967,7 +1541,7 @@ paths: content: application/json: schema: - $ref: "#/components/schemas/Labels" + $ref: "#/components/schemas/LabelsResponse" default: description: unexpected error content: @@ -1993,7 +1567,7 @@ paths: content: application/json: schema: - $ref: "#/components/schemas/Label" + $ref: "#/components/schemas/LabelResponse" default: description: unexpected error content: @@ -2025,7 +1599,7 @@ paths: content: application/json: schema: - $ref: "#/components/schemas/Label" + $ref: "#/components/schemas/LabelResponse" '404': description: label not found content: @@ -2503,12 +2077,7 @@ paths: content: application/json: schema: - type: object - properties: - labels: - $ref: "#/components/schemas/Labels" - links: - $ref: "#/components/schemas/Links" + $ref: "#/components/schemas/LabelsResponse" default: description: unexpected error content: @@ -2540,12 +2109,7 @@ paths: content: application/json: schema: - type: object - properties: - labels: - $ref: "#/components/schemas/Labels" - links: - $ref: "#/components/schemas/Links" + $ref: "#/components/schemas/LabelResponse" default: description: unexpected error content: @@ -2787,11 +2351,9 @@ paths: '200': description: Abstract syntax tree of flux query. content: - application/json: #TODO(goller): document the AST JSON schema + application/json: schema: - properties: - todo: - type: string # swagger editor was yelling at me here + $ref: "#/components/schemas/ASTResponse" default: description: Any response other than 200 is an internal server error content: @@ -3768,16 +3330,11 @@ paths: $ref: "#/components/schemas/LabelMapping" responses: '200': - description: a list of all labels for an organization + description: returns the created label content: application/json: schema: - type: object - properties: - labels: - $ref: "#/components/schemas/Labels" - links: - $ref: "#/components/schemas/Links" + $ref: "#/components/schemas/LabelResponse" default: description: unexpected error content: @@ -3924,7 +3481,7 @@ paths: content: application/json: schema: - $ref: "#/components/schemas/ResourceOwners" + $ref: "#/components/schemas/ResourceMembers" default: description: unexpected error content: @@ -4013,7 +3570,7 @@ paths: content: application/json: schema: - $ref: "#/components/schemas/ResourceMembers" + $ref: "#/components/schemas/ResourceOwners" default: description: unexpected error content: @@ -4118,7 +3675,14 @@ paths: content: application/json: schema: - $ref: "#/components/schemas/Tasks" + type: object + properties: + tasks: + type: array + items: + $ref: "#/components/schemas/Task" + links: + $ref: "#/components/schemas/Links" default: description: unexpected error content: @@ -4137,7 +3701,7 @@ paths: content: application/json: schema: - $ref: "#/components/schemas/Task" + $ref: "#/components/schemas/TaskCreateRequest" responses: '201': description: Task created @@ -4496,14 +4060,7 @@ paths: content: application/json: schema: - type: object - properties: - labels: - type: array - items: - type: string - links: - $ref: "#/components/schemas/Links" + $ref: "#/components/schemas/LabelResponse" default: description: unexpected error content: @@ -5079,24 +4636,23 @@ components: annotations: description: https://www.w3.org/TR/2015/REC-tabular-data-model-20151217/#columns type: array - default: [] items: type: string enum: - - group - - datatype - - default + - "group" + - "datatype" + - "default" uniqueItems: true commentPrefix: description: character prefixed to comment strings type: string - default: \# + default: "#" maxLength: 1 minLength: 0 dateTimeFormat: description: format of timestamps type: string - default: RFC3339 + default: "RFC3339" enum: - RFC3339 - RFC3339Nano @@ -5271,7 +4827,6 @@ components: Link: type: string readOnly: true - format: uri description: URI of resource. Links: type: object @@ -5387,13 +4942,12 @@ components: status: readOnly: true type: string - enum: [ - "scheduled", - "started", - "failed", - "success", - "canceled" - ] + enum: + - scheduled + - started + - failed + - success + - canceled scheduledFor: description: Time used for run's "now" option, RFC3339. type: string @@ -5456,6 +5010,8 @@ components: name: description: A description of the task. type: string + owner: + $ref: "#/components/schemas/User" status: description: The current status of the task. When updated to 'inactive', cancels all queued jobs of this task. default: active @@ -5782,75 +5338,6 @@ components: type: integer format: int32 required: [code, message, maxLength] - InfluxQLResults: - properties: - error: - description: error during processing of the message - type: string - results: - type: array - description: result for each query - items: - type: object - properties: - error: - type: string - description: error during processing of the message - partial: - type: boolean - description: If a max row limit has been placed in the configuration file and the number of returned values is larger, this will be set to true and values truncated. - statement_id: - type: integer - description: statement's position in the query. - series: - description: The collection of data in InfluxDB’s data structure that share a measurement, tag set, and retention policy. - type: array - items: - type: object - description: values for a unique series - properties: - name: - description: The part of InfluxDB’s structure that describes the data stored in the associated fields. Measurements are strings. - type: string - tags: - description: The key-value pairs in InfluxDB’s data structure that records metadata. - type: object - columns: - description: list of columns describing the content of a single value array - type: array - items: - type: string - values: - description: array of arrays of the values return from the query - type: array - items: - type: array - description: single row of results in the order of the columns field. - items: - oneOf: - - type: string - - type: number - - type: integer - partial: - type: boolean - messages: - type: array - description: represents a user-facing message to be included with the result. - items: - type: object - properties: - level: - type: string - text: - type: string - InfluxqlQueryError: - properties: - error: - description: message describing why the query was rejected - readOnly: true - type: string - required: - - error Field: type: object properties: @@ -6393,6 +5880,8 @@ components: Cell: type: object properties: + id: + type: string links: type: object properties: @@ -6524,6 +6013,12 @@ components: properties: self: type: string + query: + type: string + health: + type: string + buckets: + type: string id: type: string orgID: @@ -7374,3 +6869,48 @@ components: properties: labelID: type: string + LabelsResponse: + type: object + properties: + labels: + $ref: "#/components/schemas/Labels" + links: + $ref: "#/components/schemas/Links" + LabelResponse: + type: object + properties: + label: + $ref: "#/components/schemas/Label" + links: + $ref: "#/components/schemas/Links" + ASTResponse: + description: contains the AST for the supplied Flux query + type: object + properties: + ast: + description: the AST of the supplied Flux query + type: object + WritePrecision: + type: string + enum: + - ms + - s + - us + - u + - ns + TaskCreateRequest: + properties: + orgID: + description: The ID of the organization that owns this Task. + type: string + status: + description: Starting state of the task. 'inactive' tasks are not run until they are updated to 'active' + default: active + type: string + enum: + - active + - inactive + flux: + description: The Flux script to run for this task. + type: string + required: [orgID, flux] \ No newline at end of file diff --git a/http/swagger_test.go b/http/swagger_test.go index b9e8cbd099..9343cb82be 100644 --- a/http/swagger_test.go +++ b/http/swagger_test.go @@ -21,17 +21,3 @@ func TestValidSwagger(t *testing.T) { t.Errorf("invalid swagger specification: %v", err) } } - -func TestValidCurSwagger(t *testing.T) { - data, err := ioutil.ReadFile("./cur_swagger.yml") - if err != nil { - t.Fatalf("unable to read swagger specification: %v", err) - } - swagger, err := openapi3.NewSwaggerLoader().LoadSwaggerFromYAMLData(data) - if err != nil { - t.Fatalf("unable to load swagger specification: %v", err) - } - if err := swagger.Validate(context.Background()); err != nil { - t.Errorf("invalid swagger specification: %v", err) - } -} diff --git a/http/task_service.go b/http/task_service.go index 41cf73034e..b275b5f392 100644 --- a/http/task_service.go +++ b/http/task_service.go @@ -20,6 +20,32 @@ import ( "go.uber.org/zap" ) +// TaskBackend is all services and associated parameters required to construct +// the TaskHandler. +type TaskBackend struct { + Logger *zap.Logger + + TaskService platform.TaskService + AuthorizationService platform.AuthorizationService + OrganizationService platform.OrganizationService + UserResourceMappingService platform.UserResourceMappingService + LabelService platform.LabelService + UserService platform.UserService +} + +// NewTaskBackend returns a new instance of TaskBackend. +func NewTaskBackend(b *APIBackend) *TaskBackend { + return &TaskBackend{ + Logger: b.Logger.With(zap.String("handler", "task")), + TaskService: b.TaskService, + AuthorizationService: b.AuthorizationService, + OrganizationService: b.OrganizationService, + UserResourceMappingService: b.UserResourceMappingService, + LabelService: b.LabelService, + UserService: b.UserService, + } +} + // TaskHandler represents an HTTP API handler for tasks. type TaskHandler struct { *httprouter.Router @@ -50,14 +76,17 @@ const ( ) // NewTaskHandler returns a new instance of TaskHandler. -func NewTaskHandler(mappingService platform.UserResourceMappingService, labelService platform.LabelService, logger *zap.Logger, userService platform.UserService) *TaskHandler { +func NewTaskHandler(b *TaskBackend) *TaskHandler { h := &TaskHandler{ - logger: logger, Router: NewRouter(), + logger: b.Logger, - UserResourceMappingService: mappingService, - LabelService: labelService, - UserService: userService, + TaskService: b.TaskService, + AuthorizationService: b.AuthorizationService, + OrganizationService: b.OrganizationService, + UserResourceMappingService: b.UserResourceMappingService, + LabelService: b.LabelService, + UserService: b.UserService, } h.HandlerFunc("GET", tasksPath, h.handleGetTasks) @@ -70,13 +99,27 @@ func NewTaskHandler(mappingService platform.UserResourceMappingService, labelSer h.HandlerFunc("GET", tasksIDLogsPath, h.handleGetLogs) h.HandlerFunc("GET", tasksIDRunsIDLogsPath, h.handleGetLogs) - h.HandlerFunc("POST", tasksIDMembersPath, newPostMemberHandler(h.UserResourceMappingService, h.UserService, platform.TasksResourceType, platform.Member)) - h.HandlerFunc("GET", tasksIDMembersPath, newGetMembersHandler(h.UserResourceMappingService, h.UserService, platform.TasksResourceType, platform.Member)) - h.HandlerFunc("DELETE", tasksIDMembersIDPath, newDeleteMemberHandler(h.UserResourceMappingService, platform.Member)) + memberBackend := MemberBackend{ + Logger: b.Logger.With(zap.String("handler", "member")), + ResourceType: platform.TasksResourceType, + UserType: platform.Member, + UserResourceMappingService: b.UserResourceMappingService, + UserService: b.UserService, + } + h.HandlerFunc("POST", tasksIDMembersPath, newPostMemberHandler(memberBackend)) + h.HandlerFunc("GET", tasksIDMembersPath, newGetMembersHandler(memberBackend)) + h.HandlerFunc("DELETE", tasksIDMembersIDPath, newDeleteMemberHandler(memberBackend)) - h.HandlerFunc("POST", tasksIDOwnersPath, newPostMemberHandler(h.UserResourceMappingService, h.UserService, platform.TasksResourceType, platform.Owner)) - h.HandlerFunc("GET", tasksIDOwnersPath, newGetMembersHandler(h.UserResourceMappingService, h.UserService, platform.TasksResourceType, platform.Owner)) - h.HandlerFunc("DELETE", tasksIDOwnersIDPath, newDeleteMemberHandler(h.UserResourceMappingService, platform.Owner)) + ownerBackend := MemberBackend{ + Logger: b.Logger.With(zap.String("handler", "member")), + ResourceType: platform.TasksResourceType, + UserType: platform.Owner, + UserResourceMappingService: b.UserResourceMappingService, + UserService: b.UserService, + } + h.HandlerFunc("POST", tasksIDOwnersPath, newPostMemberHandler(ownerBackend)) + h.HandlerFunc("GET", tasksIDOwnersPath, newGetMembersHandler(ownerBackend)) + h.HandlerFunc("DELETE", tasksIDOwnersIDPath, newDeleteMemberHandler(ownerBackend)) h.HandlerFunc("GET", tasksIDRunsPath, h.handleGetRuns) h.HandlerFunc("POST", tasksIDRunsPath, h.handleForceRun) @@ -84,9 +127,14 @@ func NewTaskHandler(mappingService platform.UserResourceMappingService, labelSer h.HandlerFunc("POST", tasksIDRunsIDRetryPath, h.handleRetryRun) h.HandlerFunc("DELETE", tasksIDRunsIDPath, h.handleCancelRun) - h.HandlerFunc("GET", tasksIDLabelsPath, newGetLabelsHandler(h.LabelService)) - h.HandlerFunc("POST", tasksIDLabelsPath, newPostLabelHandler(h.LabelService)) - h.HandlerFunc("DELETE", tasksIDLabelsIDPath, newDeleteLabelHandler(h.LabelService)) + labelBackend := &LabelBackend{ + Logger: b.Logger.With(zap.String("handler", "label")), + LabelService: b.LabelService, + } + h.HandlerFunc("GET", tasksIDLabelsPath, newGetLabelsHandler(labelBackend)) + h.HandlerFunc("POST", tasksIDLabelsPath, newPostLabelHandler(labelBackend)) + h.HandlerFunc("DELETE", tasksIDLabelsIDPath, newDeleteLabelHandler(labelBackend)) + h.HandlerFunc("PATCH", tasksIDLabelsIDPath, newPatchLabelHandler(labelBackend)) return h } @@ -350,6 +398,29 @@ func (h *TaskHandler) handlePostTask(w http.ResponseWriter, r *http.Request) { return } + // add User resource map + urm := &platform.UserResourceMapping{ + UserID: auth.GetUserID(), + UserType: platform.Owner, + ResourceType: platform.TasksResourceType, + ResourceID: req.Task.ID, + } + if err := h.UserResourceMappingService.CreateUserResourceMapping(ctx, urm); err != nil { + // clean up the task if we fail to map the user and resource + // TODO(lh): Multi step creates could benefit from a service wide transactional request + if derr := h.TaskService.DeleteTask(ctx, req.Task.ID); derr != nil { + err = fmt.Errorf("%s: failed to clean up task: %s", err.Error(), derr.Error()) + } + + err = &platform.Error{ + Err: err, + Msg: "failed to add user permissions", + } + + EncodeError(ctx, err, w) + return + } + if err := encodeResponse(ctx, w, http.StatusCreated, newTaskResponse(*req.Task, []*platform.Label{})); err != nil { logEncodingError(h.logger, r, err) return @@ -462,10 +533,13 @@ func (h *TaskHandler) handleUpdateTask(w http.ResponseWriter, r *http.Request) { } task, err := h.TaskService.UpdateTask(ctx, req.TaskID, req.Update) if err != nil { - err = &platform.Error{ + err := &platform.Error{ Err: err, Msg: "failed to update task", } + if err.Err == backend.ErrTaskNotFound { + err.Code = platform.ENotFound + } EncodeError(ctx, err, w) return } @@ -541,13 +615,31 @@ func (h *TaskHandler) handleDeleteTask(w http.ResponseWriter, r *http.Request) { } if err := h.TaskService.DeleteTask(ctx, req.TaskID); err != nil { - err = &platform.Error{ + err := &platform.Error{ Err: err, Msg: "failed to delete task", } + if err.Err == backend.ErrTaskNotFound { + err.Code = platform.ENotFound + } EncodeError(ctx, err, w) return } + // clean up resource maps for deleted task + urms, _, err := h.UserResourceMappingService.FindUserResourceMappings(ctx, platform.UserResourceMappingFilter{ + ResourceID: req.TaskID, + ResourceType: platform.TasksResourceType, + }) + + if err != nil { + h.logger.Warn("failed to pull user resource mapping", zap.Error(err)) + } else { + for _, m := range urms { + if err := h.UserResourceMappingService.DeleteUserResourceMapping(ctx, m.ResourceID, m.UserID); err != nil { + h.logger.Warn(fmt.Sprintf("failed to remove user resource mapping for task %s", m.ResourceID.String()), zap.Error(err)) + } + } + } w.WriteHeader(http.StatusNoContent) } @@ -592,10 +684,13 @@ func (h *TaskHandler) handleGetLogs(w http.ResponseWriter, r *http.Request) { logs, _, err := h.TaskService.FindLogs(ctx, req.filter) if err != nil { - err = &platform.Error{ + err := &platform.Error{ Err: err, Msg: "failed to find task logs", } + if err.Err == backend.ErrTaskNotFound || err.Err == backend.ErrRunNotFound { + err.Code = platform.ENotFound + } EncodeError(ctx, err, w) return } @@ -671,10 +766,12 @@ func (h *TaskHandler) handleGetRuns(w http.ResponseWriter, r *http.Request) { runs, _, err := h.TaskService.FindRuns(ctx, req.filter) if err != nil { - err = &platform.Error{ - Err: err, - Code: platform.EInvalid, - Msg: "failed to find runs", + err := &platform.Error{ + Err: err, + Msg: "failed to find runs", + } + if err.Err == backend.ErrTaskNotFound { + err.Code = platform.ENotFound } EncodeError(ctx, err, w) return @@ -792,12 +889,12 @@ func (h *TaskHandler) handleForceRun(w http.ResponseWriter, r *http.Request) { run, err := h.TaskService.ForceRun(ctx, req.TaskID, req.Timestamp) if err != nil { - if err == backend.ErrRunNotFound { - err = &platform.Error{ - Code: platform.ENotFound, - Msg: "failed to force run", - Err: err, - } + err := &platform.Error{ + Err: err, + Msg: "failed to force run", + } + if err.Err == backend.ErrTaskNotFound { + err.Code = platform.ENotFound } EncodeError(ctx, err, w) return @@ -868,12 +965,12 @@ func (h *TaskHandler) handleGetRun(w http.ResponseWriter, r *http.Request) { run, err := h.TaskService.FindRunByID(ctx, req.TaskID, req.RunID) if err != nil { - if err == backend.ErrRunNotFound { - err = &platform.Error{ - Err: err, - Msg: "failed to find run", - Code: platform.ENotFound, - } + err := &platform.Error{ + Err: err, + Msg: "failed to find run", + } + if err.Err == backend.ErrTaskNotFound || err.Err == backend.ErrRunNotFound { + err.Code = platform.ENotFound } EncodeError(ctx, err, w) return @@ -974,10 +1071,13 @@ func (h *TaskHandler) handleCancelRun(w http.ResponseWriter, r *http.Request) { err = h.TaskService.CancelRun(ctx, req.TaskID, req.RunID) if err != nil { - err = &platform.Error{ + err := &platform.Error{ Err: err, Msg: "failed to cancel run", } + if err.Err == backend.ErrTaskNotFound || err.Err == backend.ErrRunNotFound { + err.Code = platform.ENotFound + } EncodeError(ctx, err, w) return } @@ -999,12 +1099,12 @@ func (h *TaskHandler) handleRetryRun(w http.ResponseWriter, r *http.Request) { run, err := h.TaskService.RetryRun(ctx, req.TaskID, req.RunID) if err != nil { - if err == backend.ErrRunNotFound { - err = &platform.Error{ - Code: platform.ENotFound, - Msg: "failed to retry run", - Err: err, - } + err := &platform.Error{ + Err: err, + Msg: "failed to retry run", + } + if err.Err == backend.ErrTaskNotFound || err.Err == backend.ErrRunNotFound { + err.Code = platform.ENotFound } EncodeError(ctx, err, w) return diff --git a/http/task_service_test.go b/http/task_service_test.go index 0ba937b01a..e8cf6eb41b 100644 --- a/http/task_service_test.go +++ b/http/task_service_test.go @@ -4,37 +4,50 @@ import ( "bytes" "context" "encoding/json" + "fmt" + "go.uber.org/zap" "io/ioutil" "net/http" "net/http/httptest" - "os" + "strings" "testing" platform "github.com/influxdata/influxdb" pcontext "github.com/influxdata/influxdb/context" - "github.com/influxdata/influxdb/logger" + "github.com/influxdata/influxdb/inmem" "github.com/influxdata/influxdb/mock" _ "github.com/influxdata/influxdb/query/builtin" + "github.com/influxdata/influxdb/task/backend" platformtesting "github.com/influxdata/influxdb/testing" "github.com/julienschmidt/httprouter" ) -func mockOrgService() platform.OrganizationService { - return &mock.OrganizationService{ - FindOrganizationByIDF: func(ctx context.Context, id platform.ID) (*platform.Organization, error) { - return &platform.Organization{ID: id, Name: "test"}, nil - }, - FindOrganizationF: func(ctx context.Context, filter platform.OrganizationFilter) (*platform.Organization, error) { - org := &platform.Organization{} - if filter.Name != nil { - org.Name = *filter.Name - } - if filter.ID != nil { - org.ID = *filter.ID - } +// NewMockTaskBackend returns a TaskBackend with mock services. +func NewMockTaskBackend() *TaskBackend { + return &TaskBackend{ + Logger: zap.NewNop().With(zap.String("handler", "task")), - return org, nil + AuthorizationService: mock.NewAuthorizationService(), + TaskService: &mock.TaskService{}, + OrganizationService: &mock.OrganizationService{ + FindOrganizationByIDF: func(ctx context.Context, id platform.ID) (*platform.Organization, error) { + return &platform.Organization{ID: id, Name: "test"}, nil + }, + FindOrganizationF: func(ctx context.Context, filter platform.OrganizationFilter) (*platform.Organization, error) { + org := &platform.Organization{} + if filter.Name != nil { + org.Name = *filter.Name + } + if filter.ID != nil { + org.ID = *filter.ID + } + + return org, nil + }, }, + UserResourceMappingService: mock.NewUserResourceMappingService(), + LabelService: mock.NewLabelService(), + UserService: mock.NewUserService(), } } @@ -161,10 +174,10 @@ func TestTaskHandler_handleGetTasks(t *testing.T) { r := httptest.NewRequest("GET", "http://any.url", nil) w := httptest.NewRecorder() - h := NewTaskHandler(mock.NewUserResourceMappingService(), mock.NewLabelService(), logger.New(os.Stdout), mock.NewUserService()) - h.OrganizationService = mockOrgService() - h.TaskService = tt.fields.taskService - h.LabelService = tt.fields.labelService + taskBackend := NewMockTaskBackend() + taskBackend.TaskService = tt.fields.taskService + taskBackend.LabelService = tt.fields.labelService + h := NewTaskHandler(taskBackend) h.handleGetTasks(w, r) res := w.Result() @@ -262,9 +275,9 @@ func TestTaskHandler_handlePostTasks(t *testing.T) { w := httptest.NewRecorder() - h := NewTaskHandler(mock.NewUserResourceMappingService(), mock.NewLabelService(), logger.New(os.Stdout), mock.NewUserService()) - h.OrganizationService = mockOrgService() - h.TaskService = tt.fields.taskService + taskBackend := NewMockTaskBackend() + taskBackend.TaskService = tt.fields.taskService + h := NewTaskHandler(taskBackend) h.handlePostTask(w, r) res := w.Result() @@ -367,9 +380,9 @@ func TestTaskHandler_handleGetRun(t *testing.T) { }, })) w := httptest.NewRecorder() - h := NewTaskHandler(mock.NewUserResourceMappingService(), mock.NewLabelService(), logger.New(os.Stdout), mock.NewUserService()) - h.OrganizationService = mockOrgService() - h.TaskService = tt.fields.taskService + taskBackend := NewMockTaskBackend() + taskBackend.TaskService = tt.fields.taskService + h := NewTaskHandler(taskBackend) h.handleGetRun(w, r) res := w.Result() @@ -476,9 +489,9 @@ func TestTaskHandler_handleGetRuns(t *testing.T) { }, })) w := httptest.NewRecorder() - h := NewTaskHandler(mock.NewUserResourceMappingService(), mock.NewLabelService(), logger.New(os.Stdout), mock.NewUserService()) - h.OrganizationService = mockOrgService() - h.TaskService = tt.fields.taskService + taskBackend := NewMockTaskBackend() + taskBackend.TaskService = tt.fields.taskService + h := NewTaskHandler(taskBackend) h.handleGetRuns(w, r) res := w.Result() @@ -497,3 +510,328 @@ func TestTaskHandler_handleGetRuns(t *testing.T) { }) } } + +func TestTaskHandler_NotFoundStatus(t *testing.T) { + // Ensure that the HTTP handlers return 404s for missing resources, and OKs for matching. + + im := inmem.NewService() + taskBackend := NewMockTaskBackend() + h := NewTaskHandler(taskBackend) + h.UserResourceMappingService = im + h.LabelService = im + h.UserService = im + h.OrganizationService = im + + o := platform.Organization{Name: "o"} + ctx := context.Background() + if err := h.OrganizationService.CreateOrganization(ctx, &o); err != nil { + t.Fatal(err) + } + + const taskID, runID = platform.ID(0xCCCCCC), platform.ID(0xAAAAAA) + + var ( + okTask = []interface{}{taskID} + okTaskRun = []interface{}{taskID, runID} + + notFoundTask = [][]interface{}{ + {taskID + 1}, + } + notFoundTaskRun = [][]interface{}{ + {taskID, runID + 1}, + {taskID + 1, runID}, + {taskID + 1, runID + 1}, + } + ) + + tcs := []struct { + name string + svc *mock.TaskService + method string + body string + pathFmt string + okPathArgs []interface{} + notFoundPathArgs [][]interface{} + }{ + { + name: "get task", + svc: &mock.TaskService{ + FindTaskByIDFn: func(_ context.Context, id platform.ID) (*platform.Task, error) { + if id == taskID { + return &platform.Task{ID: taskID, Organization: "o"}, nil + } + + return nil, backend.ErrTaskNotFound + }, + }, + method: http.MethodGet, + pathFmt: "/tasks/%s", + okPathArgs: okTask, + notFoundPathArgs: notFoundTask, + }, + { + name: "update task", + svc: &mock.TaskService{ + UpdateTaskFn: func(_ context.Context, id platform.ID, _ platform.TaskUpdate) (*platform.Task, error) { + if id == taskID { + return &platform.Task{ID: taskID, Organization: "o"}, nil + } + + return nil, backend.ErrTaskNotFound + }, + }, + method: http.MethodPatch, + body: "{}", + pathFmt: "/tasks/%s", + okPathArgs: okTask, + notFoundPathArgs: notFoundTask, + }, + { + name: "delete task", + svc: &mock.TaskService{ + DeleteTaskFn: func(_ context.Context, id platform.ID) error { + if id == taskID { + return nil + } + + return backend.ErrTaskNotFound + }, + }, + method: http.MethodDelete, + pathFmt: "/tasks/%s", + okPathArgs: okTask, + notFoundPathArgs: notFoundTask, + }, + { + name: "get task logs", + svc: &mock.TaskService{ + FindLogsFn: func(_ context.Context, f platform.LogFilter) ([]*platform.Log, int, error) { + if *f.Task == taskID { + return nil, 0, nil + } + + return nil, 0, backend.ErrTaskNotFound + }, + }, + method: http.MethodGet, + pathFmt: "/tasks/%s/logs", + okPathArgs: okTask, + notFoundPathArgs: notFoundTask, + }, + { + name: "get run logs", + svc: &mock.TaskService{ + FindLogsFn: func(_ context.Context, f platform.LogFilter) ([]*platform.Log, int, error) { + if *f.Task != taskID { + return nil, 0, backend.ErrTaskNotFound + } + if *f.Run != runID { + return nil, 0, backend.ErrRunNotFound + } + + return nil, 0, nil + }, + }, + method: http.MethodGet, + pathFmt: "/tasks/%s/runs/%s/logs", + okPathArgs: okTaskRun, + notFoundPathArgs: notFoundTaskRun, + }, + { + name: "get runs", + svc: &mock.TaskService{ + FindRunsFn: func(_ context.Context, f platform.RunFilter) ([]*platform.Run, int, error) { + if *f.Task != taskID { + return nil, 0, backend.ErrTaskNotFound + } + + return nil, 0, nil + }, + }, + method: http.MethodGet, + pathFmt: "/tasks/%s/runs", + okPathArgs: okTask, + notFoundPathArgs: notFoundTask, + }, + { + name: "force run", + svc: &mock.TaskService{ + ForceRunFn: func(_ context.Context, tid platform.ID, _ int64) (*platform.Run, error) { + if tid != taskID { + return nil, backend.ErrTaskNotFound + } + + return &platform.Run{ID: runID, TaskID: taskID, Status: backend.RunScheduled.String()}, nil + }, + }, + method: http.MethodPost, + body: "{}", + pathFmt: "/tasks/%s/runs", + okPathArgs: okTask, + notFoundPathArgs: notFoundTask, + }, + { + name: "get run", + svc: &mock.TaskService{ + FindRunByIDFn: func(_ context.Context, tid, rid platform.ID) (*platform.Run, error) { + if tid != taskID { + return nil, backend.ErrTaskNotFound + } + if rid != runID { + return nil, backend.ErrRunNotFound + } + + return &platform.Run{ID: runID, TaskID: taskID, Status: backend.RunScheduled.String()}, nil + }, + }, + method: http.MethodGet, + pathFmt: "/tasks/%s/runs/%s", + okPathArgs: okTaskRun, + notFoundPathArgs: notFoundTaskRun, + }, + { + name: "retry run", + svc: &mock.TaskService{ + RetryRunFn: func(_ context.Context, tid, rid platform.ID) (*platform.Run, error) { + if tid != taskID { + return nil, backend.ErrTaskNotFound + } + if rid != runID { + return nil, backend.ErrRunNotFound + } + + return &platform.Run{ID: runID, TaskID: taskID, Status: backend.RunScheduled.String()}, nil + }, + }, + method: http.MethodPost, + pathFmt: "/tasks/%s/runs/%s/retry", + okPathArgs: okTaskRun, + notFoundPathArgs: notFoundTaskRun, + }, + { + name: "cancel run", + svc: &mock.TaskService{ + CancelRunFn: func(_ context.Context, tid, rid platform.ID) error { + if tid != taskID { + return backend.ErrTaskNotFound + } + if rid != runID { + return backend.ErrRunNotFound + } + + return nil + }, + }, + method: http.MethodDelete, + pathFmt: "/tasks/%s/runs/%s", + okPathArgs: okTaskRun, + notFoundPathArgs: notFoundTaskRun, + }, + } + + for _, tc := range tcs { + tc := tc + t.Run(tc.name, func(t *testing.T) { + h.TaskService = tc.svc + + okPath := fmt.Sprintf(tc.pathFmt, tc.okPathArgs...) + t.Run("matching ID: "+tc.method+" "+okPath, func(t *testing.T) { + w := httptest.NewRecorder() + r := httptest.NewRequest(tc.method, "http://task.example/api/v2"+okPath, strings.NewReader(tc.body)) + + h.ServeHTTP(w, r) + + res := w.Result() + defer res.Body.Close() + + if res.StatusCode < 200 || res.StatusCode > 299 { + t.Errorf("expected OK, got %d", res.StatusCode) + b, _ := ioutil.ReadAll(res.Body) + t.Fatalf("body: %s", string(b)) + } + }) + + t.Run("mismatched ID", func(t *testing.T) { + for _, nfa := range tc.notFoundPathArgs { + path := fmt.Sprintf(tc.pathFmt, nfa...) + t.Run(tc.method+" "+path, func(t *testing.T) { + w := httptest.NewRecorder() + r := httptest.NewRequest(tc.method, "http://task.example/api/v2"+path, strings.NewReader(tc.body)) + + h.ServeHTTP(w, r) + + res := w.Result() + defer res.Body.Close() + + if res.StatusCode != http.StatusNotFound { + t.Errorf("expected Not Found, got %d", res.StatusCode) + b, _ := ioutil.ReadAll(res.Body) + t.Fatalf("body: %s", string(b)) + } + }) + } + }) + }) + } +} + +func TestTaskUserResourceMap(t *testing.T) { + task := platform.Task{ + Name: "task1", + OrganizationID: 1, + } + + b, err := json.Marshal(task) + if err != nil { + t.Fatalf("failed to unmarshal task: %v", err) + } + + r := httptest.NewRequest("POST", "http://any.url/v1", bytes.NewReader(b)) + ctx := pcontext.SetAuthorizer(context.Background(), &platform.Authorization{UserID: 2}) + r = r.WithContext(ctx) + + w := httptest.NewRecorder() + + var created *platform.UserResourceMapping + var deletedUser platform.ID + var deletedResource platform.ID + + urms := &mock.UserResourceMappingService{ + CreateMappingFn: func(_ context.Context, urm *platform.UserResourceMapping) error { created = urm; return nil }, + DeleteMappingFn: func(_ context.Context, rid platform.ID, uid platform.ID) error { + deletedUser = uid + deletedResource = rid + return nil + }, + FindMappingsFn: func(context.Context, platform.UserResourceMappingFilter) ([]*platform.UserResourceMapping, int, error) { + return []*platform.UserResourceMapping{created}, 1, nil + }, + } + + taskBackend := NewMockTaskBackend() + taskBackend.UserResourceMappingService = urms + h := NewTaskHandler(taskBackend) + taskID := platform.ID(1) + + h.TaskService = &mock.TaskService{ + CreateTaskFn: func(ctx context.Context, t *platform.Task) error { + t.ID = taskID + return nil + }, + DeleteTaskFn: func(ctx context.Context, id platform.ID) error { + return nil + }, + } + h.handlePostTask(w, r) + r = httptest.NewRequest("DELETE", "http://any.url/api/v2/tasks/"+taskID.String(), nil) + + h.ServeHTTP(w, r) + + if created.UserID != deletedUser { + t.Fatalf("deleted user (%s) doesn't match created user (%s)", deletedUser, created.UserID) + } + + if created.ResourceID != deletedResource { + t.Fatalf("deleted resource (%s) doesn't match created resource (%s)", deletedResource, created.ResourceID) + } +} diff --git a/http/task_test.go b/http/task_test.go index 8ecb6854ac..7c7fbfb889 100644 --- a/http/task_test.go +++ b/http/task_test.go @@ -8,13 +8,11 @@ import ( platform "github.com/influxdata/influxdb" "github.com/influxdata/influxdb/http" "github.com/influxdata/influxdb/inmem" - "github.com/influxdata/influxdb/mock" _ "github.com/influxdata/influxdb/query/builtin" "github.com/influxdata/influxdb/task" "github.com/influxdata/influxdb/task/backend" tmock "github.com/influxdata/influxdb/task/mock" "github.com/influxdata/influxdb/task/servicetest" - "go.uber.org/zap/zaptest" ) func httpTaskServiceFactory(t *testing.T) (*servicetest.System, context.CancelFunc) { @@ -30,23 +28,7 @@ func httpTaskServiceFactory(t *testing.T) (*servicetest.System, context.CancelFu h := http.NewAuthenticationHandler() h.AuthorizationService = i - th := http.NewTaskHandler(mock.NewUserResourceMappingService(), mock.NewLabelService(), zaptest.NewLogger(t), mock.NewUserService()) - th.OrganizationService = &mock.OrganizationService{ - FindOrganizationByIDF: func(ctx context.Context, id platform.ID) (*platform.Organization, error) { - return &platform.Organization{ID: id, Name: "test"}, nil - }, - FindOrganizationF: func(ctx context.Context, filter platform.OrganizationFilter) (*platform.Organization, error) { - org := &platform.Organization{} - if filter.Name != nil { - org.Name = *filter.Name - } - if filter.ID != nil { - org.ID = *filter.ID - } - - return org, nil - }, - } + th := http.NewTaskHandler(http.NewMockTaskBackend()) th.TaskService = backingTS th.AuthorizationService = i h.Handler = th diff --git a/http/telegraf.go b/http/telegraf.go index 416fd8efa2..362525e957 100644 --- a/http/telegraf.go +++ b/http/telegraf.go @@ -14,6 +14,31 @@ import ( "go.uber.org/zap" ) +// TelegrafBackend is all services and associated parameters required to construct +// the TelegrafHandler. +type TelegrafBackend struct { + Logger *zap.Logger + + TelegrafService platform.TelegrafConfigStore + UserResourceMappingService platform.UserResourceMappingService + LabelService platform.LabelService + UserService platform.UserService + OrganizationService platform.OrganizationService +} + +// NewTelegrafBackend returns a new instance of TelegrafBackend. +func NewTelegrafBackend(b *APIBackend) *TelegrafBackend { + return &TelegrafBackend{ + Logger: b.Logger.With(zap.String("handler", "telegraf")), + + TelegrafService: b.TelegrafService, + UserResourceMappingService: b.UserResourceMappingService, + LabelService: b.LabelService, + UserService: b.UserService, + OrganizationService: b.OrganizationService, + } +} + // TelegrafHandler is the handler for the telegraf service type TelegrafHandler struct { *httprouter.Router @@ -38,23 +63,16 @@ const ( ) // NewTelegrafHandler returns a new instance of TelegrafHandler. -func NewTelegrafHandler( - logger *zap.Logger, - mappingService platform.UserResourceMappingService, - labelService platform.LabelService, - telegrafSvc platform.TelegrafConfigStore, - userService platform.UserService, - orgService platform.OrganizationService, -) *TelegrafHandler { +func NewTelegrafHandler(b *TelegrafBackend) *TelegrafHandler { h := &TelegrafHandler{ Router: NewRouter(), + Logger: b.Logger, - UserResourceMappingService: mappingService, - LabelService: labelService, - TelegrafService: telegrafSvc, - Logger: logger, - UserService: userService, - OrganizationService: orgService, + TelegrafService: b.TelegrafService, + UserResourceMappingService: b.UserResourceMappingService, + LabelService: b.LabelService, + UserService: b.UserService, + OrganizationService: b.OrganizationService, } h.HandlerFunc("POST", telegrafsPath, h.handlePostTelegraf) h.HandlerFunc("GET", telegrafsPath, h.handleGetTelegrafs) @@ -62,17 +80,36 @@ func NewTelegrafHandler( h.HandlerFunc("DELETE", telegrafsIDPath, h.handleDeleteTelegraf) h.HandlerFunc("PUT", telegrafsIDPath, h.handlePutTelegraf) - h.HandlerFunc("POST", telegrafsIDMembersPath, newPostMemberHandler(h.UserResourceMappingService, h.UserService, platform.TelegrafsResourceType, platform.Member)) - h.HandlerFunc("GET", telegrafsIDMembersPath, newGetMembersHandler(h.UserResourceMappingService, h.UserService, platform.TelegrafsResourceType, platform.Member)) - h.HandlerFunc("DELETE", telegrafsIDMembersIDPath, newDeleteMemberHandler(h.UserResourceMappingService, platform.Member)) + memberBackend := MemberBackend{ + Logger: b.Logger.With(zap.String("handler", "member")), + ResourceType: platform.TelegrafsResourceType, + UserType: platform.Member, + UserResourceMappingService: b.UserResourceMappingService, + UserService: b.UserService, + } + h.HandlerFunc("POST", telegrafsIDMembersPath, newPostMemberHandler(memberBackend)) + h.HandlerFunc("GET", telegrafsIDMembersPath, newGetMembersHandler(memberBackend)) + h.HandlerFunc("DELETE", telegrafsIDMembersIDPath, newDeleteMemberHandler(memberBackend)) - h.HandlerFunc("POST", telegrafsIDOwnersPath, newPostMemberHandler(h.UserResourceMappingService, h.UserService, platform.TelegrafsResourceType, platform.Owner)) - h.HandlerFunc("GET", telegrafsIDOwnersPath, newGetMembersHandler(h.UserResourceMappingService, h.UserService, platform.TelegrafsResourceType, platform.Owner)) - h.HandlerFunc("DELETE", telegrafsIDOwnersIDPath, newDeleteMemberHandler(h.UserResourceMappingService, platform.Owner)) + ownerBackend := MemberBackend{ + Logger: b.Logger.With(zap.String("handler", "member")), + ResourceType: platform.TelegrafsResourceType, + UserType: platform.Owner, + UserResourceMappingService: b.UserResourceMappingService, + UserService: b.UserService, + } + h.HandlerFunc("POST", telegrafsIDOwnersPath, newPostMemberHandler(ownerBackend)) + h.HandlerFunc("GET", telegrafsIDOwnersPath, newGetMembersHandler(ownerBackend)) + h.HandlerFunc("DELETE", telegrafsIDOwnersIDPath, newDeleteMemberHandler(ownerBackend)) - h.HandlerFunc("GET", telegrafsIDLabelsPath, newGetLabelsHandler(h.LabelService)) - h.HandlerFunc("POST", telegrafsIDLabelsPath, newPostLabelHandler(h.LabelService)) - h.HandlerFunc("DELETE", telegrafsIDLabelsIDPath, newDeleteLabelHandler(h.LabelService)) + labelBackend := &LabelBackend{ + Logger: b.Logger.With(zap.String("handler", "label")), + LabelService: b.LabelService, + } + h.HandlerFunc("GET", telegrafsIDLabelsPath, newGetLabelsHandler(labelBackend)) + h.HandlerFunc("POST", telegrafsIDLabelsPath, newPostLabelHandler(labelBackend)) + h.HandlerFunc("DELETE", telegrafsIDLabelsIDPath, newDeleteLabelHandler(labelBackend)) + h.HandlerFunc("PATCH", telegrafsIDLabelsIDPath, newPatchLabelHandler(labelBackend)) return h } diff --git a/http/telegraf_test.go b/http/telegraf_test.go index afbe8c0462..81841de1e4 100644 --- a/http/telegraf_test.go +++ b/http/telegraf_test.go @@ -10,13 +10,27 @@ import ( "strings" "testing" + "go.uber.org/zap" + platform "github.com/influxdata/influxdb" "github.com/influxdata/influxdb/mock" "github.com/influxdata/influxdb/telegraf/plugins/inputs" "github.com/influxdata/influxdb/telegraf/plugins/outputs" - "go.uber.org/zap/zaptest" ) +// NewMockTelegrafBackend returns a TelegrafBackend with mock services. +func NewMockTelegrafBackend() *TelegrafBackend { + return &TelegrafBackend{ + Logger: zap.NewNop().With(zap.String("handler", "telegraf")), + + TelegrafService: &mock.TelegrafConfigStore{}, + UserResourceMappingService: mock.NewUserResourceMappingService(), + LabelService: mock.NewLabelService(), + UserService: mock.NewUserService(), + OrganizationService: mock.NewOrganizationService(), + } +} + func TestTelegrafHandler_handleGetTelegrafs(t *testing.T) { type wants struct { statusCode int @@ -36,7 +50,7 @@ func TestTelegrafHandler_handleGetTelegrafs(t *testing.T) { FindTelegrafConfigsF: func(ctx context.Context, filter platform.TelegrafConfigFilter, opt ...platform.FindOptions) ([]*platform.TelegrafConfig, int, error) { if filter.OrganizationID != nil && *filter.OrganizationID == platform.ID(2) { return []*platform.TelegrafConfig{ - &platform.TelegrafConfig{ + { ID: platform.ID(1), OrganizationID: platform.ID(2), Name: "tc1", @@ -86,7 +100,7 @@ func TestTelegrafHandler_handleGetTelegrafs(t *testing.T) { svc: &mock.TelegrafConfigStore{ FindTelegrafConfigsF: func(ctx context.Context, filter platform.TelegrafConfigFilter, opt ...platform.FindOptions) ([]*platform.TelegrafConfig, int, error) { return []*platform.TelegrafConfig{ - &platform.TelegrafConfig{ + { ID: platform.ID(1), OrganizationID: platform.ID(2), Name: "my config", @@ -155,7 +169,9 @@ func TestTelegrafHandler_handleGetTelegrafs(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { w := httptest.NewRecorder() - h := NewTelegrafHandler(zaptest.NewLogger(t), mock.NewUserResourceMappingService(), mock.NewLabelService(), tt.svc, mock.NewUserService(), &mock.OrganizationService{}) + telegrafBackend := NewMockTelegrafBackend() + telegrafBackend.TelegrafService = tt.svc + h := NewTelegrafHandler(telegrafBackend) h.ServeHTTP(w, tt.r) res := w.Result() @@ -674,15 +690,11 @@ func TestTelegrafHandler_handleGetTelegraf(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - logger := zaptest.NewLogger(t) - mapping := mock.NewUserResourceMappingService() - labels := mock.NewLabelService() - users := mock.NewUserService() - orgs := &mock.OrganizationService{} - tt.r.Header.Set("Accept", tt.acceptHeader) w := httptest.NewRecorder() - h := NewTelegrafHandler(logger, mapping, labels, tt.svc, users, orgs) + telegrafBackend := NewMockTelegrafBackend() + telegrafBackend.TelegrafService = tt.svc + h := NewTelegrafHandler(telegrafBackend) h.ServeHTTP(w, tt.r) @@ -722,7 +734,7 @@ func Test_newTelegrafResponses(t *testing.T) { { args: args{ tcs: []*platform.TelegrafConfig{ - &platform.TelegrafConfig{ + { ID: platform.ID(1), OrganizationID: platform.ID(2), Name: "my config", diff --git a/http/user_resource_mapping_service.go b/http/user_resource_mapping_service.go index b33caab4e6..67f8a503f1 100644 --- a/http/user_resource_mapping_service.go +++ b/http/user_resource_mapping_service.go @@ -5,6 +5,7 @@ import ( "context" "encoding/json" "fmt" + "go.uber.org/zap" "net/http" "path" @@ -51,8 +52,20 @@ func newResourceUsersResponse(opts platform.FindOptions, f platform.UserResource return &rs } +// MemberBackend is all services and associated parameters required to construct +// member handler. +type MemberBackend struct { + Logger *zap.Logger + + ResourceType platform.ResourceType + UserType platform.UserType + + UserResourceMappingService platform.UserResourceMappingService + UserService platform.UserService +} + // newPostMemberHandler returns a handler func for a POST to /members or /owners endpoints -func newPostMemberHandler(s platform.UserResourceMappingService, userService platform.UserService, resourceType platform.ResourceType, userType platform.UserType) http.HandlerFunc { +func newPostMemberHandler(b MemberBackend) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { ctx := r.Context() @@ -62,7 +75,7 @@ func newPostMemberHandler(s platform.UserResourceMappingService, userService pla return } - user, err := userService.FindUserByID(ctx, req.MemberID) + user, err := b.UserService.FindUserByID(ctx, req.MemberID) if err != nil { EncodeError(ctx, err, w) return @@ -70,17 +83,17 @@ func newPostMemberHandler(s platform.UserResourceMappingService, userService pla mapping := &platform.UserResourceMapping{ ResourceID: req.ResourceID, - ResourceType: resourceType, + ResourceType: b.ResourceType, UserID: req.MemberID, - UserType: userType, + UserType: b.UserType, } - if err := s.CreateUserResourceMapping(ctx, mapping); err != nil { + if err := b.UserResourceMappingService.CreateUserResourceMapping(ctx, mapping); err != nil { EncodeError(ctx, err, w) return } - if err := encodeResponse(ctx, w, http.StatusCreated, newResourceUserResponse(user, userType)); err != nil { + if err := encodeResponse(ctx, w, http.StatusCreated, newResourceUserResponse(user, b.UserType)); err != nil { EncodeError(ctx, err, w) return } @@ -126,7 +139,7 @@ func decodePostMemberRequest(ctx context.Context, r *http.Request) (*postMemberR } // newGetMembersHandler returns a handler func for a GET to /members or /owners endpoints -func newGetMembersHandler(s platform.UserResourceMappingService, userService platform.UserService, resourceType platform.ResourceType, userType platform.UserType) http.HandlerFunc { +func newGetMembersHandler(b MemberBackend) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { ctx := r.Context() @@ -138,12 +151,12 @@ func newGetMembersHandler(s platform.UserResourceMappingService, userService pla filter := platform.UserResourceMappingFilter{ ResourceID: req.ResourceID, - ResourceType: resourceType, - UserType: userType, + ResourceType: b.ResourceType, + UserType: b.UserType, } opts := platform.FindOptions{} - mappings, _, err := s.FindUserResourceMappings(ctx, filter) + mappings, _, err := b.UserResourceMappingService.FindUserResourceMappings(ctx, filter) if err != nil { EncodeError(ctx, err, w) return @@ -151,7 +164,7 @@ func newGetMembersHandler(s platform.UserResourceMappingService, userService pla users := make([]*platform.User, 0, len(mappings)) for _, m := range mappings { - user, err := userService.FindUserByID(ctx, m.UserID) + user, err := b.UserService.FindUserByID(ctx, m.UserID) if err != nil { EncodeError(ctx, err, w) return @@ -195,7 +208,7 @@ func decodeGetMembersRequest(ctx context.Context, r *http.Request) (*getMembersR } // newDeleteMemberHandler returns a handler func for a DELETE to /members or /owners endpoints -func newDeleteMemberHandler(s platform.UserResourceMappingService, userType platform.UserType) http.HandlerFunc { +func newDeleteMemberHandler(b MemberBackend) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { ctx := r.Context() @@ -205,7 +218,7 @@ func newDeleteMemberHandler(s platform.UserResourceMappingService, userType plat return } - if err := s.DeleteUserResourceMapping(ctx, req.ResourceID, req.MemberID); err != nil { + if err := b.UserResourceMappingService.DeleteUserResourceMapping(ctx, req.ResourceID, req.MemberID); err != nil { EncodeError(ctx, err, w) return } diff --git a/http/user_resource_mapping_test.go b/http/user_resource_mapping_test.go index 8a0e5a6d07..77071029cb 100644 --- a/http/user_resource_mapping_test.go +++ b/http/user_resource_mapping_test.go @@ -5,6 +5,7 @@ import ( "context" "encoding/json" "fmt" + "go.uber.org/zap" "io/ioutil" "net/http" "net/http/httptest" @@ -190,7 +191,14 @@ func TestUserResourceMappingService_GetMembersHandler(t *testing.T) { })) w := httptest.NewRecorder() - h := newGetMembersHandler(tt.fields.userResourceMappingService, tt.fields.userService, resourceType, tt.args.userType) + memberBackend := MemberBackend{ + Logger: zap.NewNop().With(zap.String("handler", "member")), + ResourceType: resourceType, + UserType: tt.args.userType, + UserResourceMappingService: tt.fields.userResourceMappingService, + UserService: tt.fields.userService, + } + h := newGetMembersHandler(memberBackend) h.ServeHTTP(w, r) res := w.Result() @@ -340,7 +348,14 @@ func TestUserResourceMappingService_PostMembersHandler(t *testing.T) { })) w := httptest.NewRecorder() - h := newPostMemberHandler(tt.fields.userResourceMappingService, tt.fields.userService, resourceType, tt.args.userType) + memberBackend := MemberBackend{ + Logger: zap.NewNop().With(zap.String("handler", "member")), + ResourceType: resourceType, + UserType: tt.args.userType, + UserResourceMappingService: tt.fields.userResourceMappingService, + UserService: tt.fields.userService, + } + h := newPostMemberHandler(memberBackend) h.ServeHTTP(w, r) res := w.Result() diff --git a/http/user_service.go b/http/user_service.go index c42c8d0dea..e11f974a16 100644 --- a/http/user_service.go +++ b/http/user_service.go @@ -5,18 +5,40 @@ import ( "context" "encoding/json" "fmt" + "go.uber.org/zap" "net/http" "path" - "strconv" platform "github.com/influxdata/influxdb" platcontext "github.com/influxdata/influxdb/context" "github.com/julienschmidt/httprouter" ) +// UserBackend is all services and associated parameters required to construct +// the UserHandler. +type UserBackend struct { + Logger *zap.Logger + + UserService platform.UserService + UserOperationLogService platform.UserOperationLogService + BasicAuthService platform.BasicAuthService +} + +func NewUserBackend(b *APIBackend) *UserBackend { + return &UserBackend{ + Logger: b.Logger.With(zap.String("handler", "user")), + + UserService: b.UserService, + UserOperationLogService: b.UserOperationLogService, + BasicAuthService: b.BasicAuthService, + } +} + // UserHandler represents an HTTP API handler for users. type UserHandler struct { *httprouter.Router + Logger *zap.Logger + UserService platform.UserService UserOperationLogService platform.UserOperationLogService BasicAuthService platform.BasicAuthService @@ -32,9 +54,14 @@ const ( ) // NewUserHandler returns a new instance of UserHandler. -func NewUserHandler() *UserHandler { +func NewUserHandler(b *UserBackend) *UserHandler { h := &UserHandler{ Router: NewRouter(), + Logger: b.Logger, + + UserService: b.UserService, + UserOperationLogService: b.UserOperationLogService, + BasicAuthService: b.BasicAuthService, } h.HandlerFunc("POST", usersPath, h.handlePostUser) @@ -703,29 +730,14 @@ func decodeGetUserLogRequest(ctx context.Context, r *http.Request) (*getUserLogR return nil, err } - opts := platform.DefaultOperationLogFindOptions - qp := r.URL.Query() - if v := qp.Get("desc"); v == "false" { - opts.Descending = false - } - if v := qp.Get("limit"); v != "" { - i, err := strconv.Atoi(v) - if err != nil { - return nil, err - } - opts.Limit = i - } - if v := qp.Get("offset"); v != "" { - i, err := strconv.Atoi(v) - if err != nil { - return nil, err - } - opts.Offset = i + opts, err := decodeFindOptions(ctx, r) + if err != nil { + return nil, err } return &getUserLogRequest{ UserID: i, - opts: opts, + opts: *opts, }, nil } diff --git a/http/user_test.go b/http/user_test.go index 5dc078f980..e872e7eeeb 100644 --- a/http/user_test.go +++ b/http/user_test.go @@ -7,9 +7,23 @@ import ( platform "github.com/influxdata/influxdb" "github.com/influxdata/influxdb/inmem" + "github.com/influxdata/influxdb/mock" platformtesting "github.com/influxdata/influxdb/testing" + "go.uber.org/zap" ) +// NewMockUserBackend returns a UserBackend with mock services. +func NewMockUserBackend() *UserBackend { + return &UserBackend{ + Logger: zap.NewNop().With(zap.String("handler", "user")), + + UserService: mock.NewUserService(), + + UserOperationLogService: mock.NewUserOperationLogService(), + BasicAuthService: mock.NewBasicAuthService("", ""), + } +} + func initUserService(f platformtesting.UserFields, t *testing.T) (platform.UserService, string, func()) { t.Helper() svc := inmem.NewService() @@ -22,8 +36,9 @@ func initUserService(f platformtesting.UserFields, t *testing.T) (platform.UserS } } - handler := NewUserHandler() - handler.UserService = svc + userBackend := NewMockUserBackend() + userBackend.UserService = svc + handler := NewUserHandler(userBackend) server := httptest.NewServer(handler) client := UserService{ Addr: server.URL, diff --git a/http/view_service.go b/http/view_service.go index 057e96b2ae..1f02884368 100644 --- a/http/view_service.go +++ b/http/view_service.go @@ -9,21 +9,43 @@ import ( "fmt" "net/http" - platform "github.com/influxdata/influxdb" + "github.com/influxdata/influxdb" "github.com/julienschmidt/httprouter" "go.uber.org/zap" ) +// ViewBackend is all services and associated parameters required to construct +// the ScraperHandler. +type ViewBackend struct { + Logger *zap.Logger + + ViewService influxdb.ViewService + UserService influxdb.UserService + UserResourceMappingService influxdb.UserResourceMappingService + LabelService influxdb.LabelService +} + +// NewViewBackend returns a new instance of ViewBackend. +func NewViewBackend(b *APIBackend) *ViewBackend { + return &ViewBackend{ + Logger: b.Logger.With(zap.String("handler", "scraper")), + + ViewService: b.ViewService, + UserService: b.UserService, + LabelService: b.LabelService, + } +} + // ViewHandler is the handler for the view service type ViewHandler struct { *httprouter.Router Logger *zap.Logger - ViewService platform.ViewService - UserResourceMappingService platform.UserResourceMappingService - LabelService platform.LabelService - UserService platform.UserService + ViewService influxdb.ViewService + UserService influxdb.UserService + UserResourceMappingService influxdb.UserResourceMappingService + LabelService influxdb.LabelService } const ( @@ -38,14 +60,15 @@ const ( ) // NewViewHandler returns a new instance of ViewHandler. -func NewViewHandler(mappingService platform.UserResourceMappingService, labelService platform.LabelService, userService platform.UserService) *ViewHandler { +func NewViewHandler(b *ViewBackend) *ViewHandler { h := &ViewHandler{ Router: NewRouter(), - Logger: zap.NewNop(), + Logger: b.Logger, - UserResourceMappingService: mappingService, - LabelService: labelService, - UserService: userService, + ViewService: b.ViewService, + UserResourceMappingService: b.UserResourceMappingService, + LabelService: b.LabelService, + UserService: b.UserService, } h.HandlerFunc("POST", viewsPath, h.handlePostViews) @@ -55,17 +78,35 @@ func NewViewHandler(mappingService platform.UserResourceMappingService, labelSer h.HandlerFunc("DELETE", viewsIDPath, h.handleDeleteView) h.HandlerFunc("PATCH", viewsIDPath, h.handlePatchView) - h.HandlerFunc("POST", viewsIDMembersPath, newPostMemberHandler(h.UserResourceMappingService, h.UserService, platform.DashboardsResourceType, platform.Member)) - h.HandlerFunc("GET", viewsIDMembersPath, newGetMembersHandler(h.UserResourceMappingService, h.UserService, platform.DashboardsResourceType, platform.Member)) - h.HandlerFunc("DELETE", viewsIDMembersIDPath, newDeleteMemberHandler(h.UserResourceMappingService, platform.Member)) + memberBackend := MemberBackend{ + Logger: b.Logger.With(zap.String("handler", "member")), + ResourceType: influxdb.ViewsResourceType, + UserType: influxdb.Member, + UserResourceMappingService: b.UserResourceMappingService, + UserService: b.UserService, + } + h.HandlerFunc("POST", viewsIDMembersPath, newPostMemberHandler(memberBackend)) + h.HandlerFunc("GET", viewsIDMembersPath, newGetMembersHandler(memberBackend)) + h.HandlerFunc("DELETE", viewsIDMembersIDPath, newDeleteMemberHandler(memberBackend)) - h.HandlerFunc("POST", viewsIDOwnersPath, newPostMemberHandler(h.UserResourceMappingService, h.UserService, platform.DashboardsResourceType, platform.Owner)) - h.HandlerFunc("GET", viewsIDOwnersPath, newGetMembersHandler(h.UserResourceMappingService, h.UserService, platform.DashboardsResourceType, platform.Owner)) - h.HandlerFunc("DELETE", viewsIDOwnersIDPath, newDeleteMemberHandler(h.UserResourceMappingService, platform.Owner)) + ownerBackend := MemberBackend{ + Logger: b.Logger.With(zap.String("handler", "member")), + ResourceType: influxdb.ViewsResourceType, + UserType: influxdb.Owner, + UserResourceMappingService: b.UserResourceMappingService, + UserService: b.UserService, + } + h.HandlerFunc("POST", viewsIDOwnersPath, newPostMemberHandler(ownerBackend)) + h.HandlerFunc("GET", viewsIDOwnersPath, newGetMembersHandler(ownerBackend)) + h.HandlerFunc("DELETE", viewsIDOwnersIDPath, newDeleteMemberHandler(ownerBackend)) - h.HandlerFunc("GET", viewsIDLabelsPath, newGetLabelsHandler(h.LabelService)) - h.HandlerFunc("POST", viewsIDLabelsPath, newPostLabelHandler(h.LabelService)) - h.HandlerFunc("DELETE", viewsIDLabelsIDPath, newDeleteLabelHandler(h.LabelService)) + labelBackend := &LabelBackend{ + Logger: b.Logger.With(zap.String("handler", "label")), + LabelService: b.LabelService, + } + h.HandlerFunc("GET", viewsIDLabelsPath, newGetLabelsHandler(labelBackend)) + h.HandlerFunc("POST", viewsIDLabelsPath, newPostLabelHandler(labelBackend)) + h.HandlerFunc("DELETE", viewsIDLabelsIDPath, newDeleteLabelHandler(labelBackend)) return h } @@ -76,18 +117,18 @@ type viewLinks struct { } type viewResponse struct { - platform.View + influxdb.View Links viewLinks `json:"links"` } func (r viewResponse) MarshalJSON() ([]byte, error) { - props, err := platform.MarshalViewPropertiesJSON(r.Properties) + props, err := influxdb.MarshalViewPropertiesJSON(r.Properties) if err != nil { return nil, err } return json.Marshal(struct { - platform.ViewContents + influxdb.ViewContents Links viewLinks `json:"links"` Properties json.RawMessage `json:"properties"` }{ @@ -97,7 +138,7 @@ func (r viewResponse) MarshalJSON() ([]byte, error) { }) } -func newViewResponse(c *platform.View) viewResponse { +func newViewResponse(c *influxdb.View) viewResponse { return viewResponse{ Links: viewLinks{ Self: fmt.Sprintf("/api/v2/views/%s", c.ID), @@ -126,14 +167,14 @@ func (h *ViewHandler) handleGetViews(w http.ResponseWriter, r *http.Request) { } type getViewsRequest struct { - filter platform.ViewFilter + filter influxdb.ViewFilter } func decodeGetViewsRequest(ctx context.Context, r *http.Request) *getViewsRequest { qp := r.URL.Query() return &getViewsRequest{ - filter: platform.ViewFilter{ + filter: influxdb.ViewFilter{ Types: qp["type"], }, } @@ -148,7 +189,7 @@ type getViewsResponse struct { Views []viewResponse `json:"views"` } -func newGetViewsResponse(views []*platform.View) getViewsResponse { +func newGetViewsResponse(views []*influxdb.View) getViewsResponse { res := getViewsResponse{ Links: getViewsLinks{ Self: "/api/v2/views", @@ -184,14 +225,14 @@ func (h *ViewHandler) handlePostViews(w http.ResponseWriter, r *http.Request) { } type postViewRequest struct { - View *platform.View + View *influxdb.View } func decodePostViewRequest(ctx context.Context, r *http.Request) (*postViewRequest, error) { - c := &platform.View{} + c := &influxdb.View{} if err := json.NewDecoder(r.Body).Decode(c); err != nil { - return nil, &platform.Error{ - Code: platform.EInvalid, + return nil, &influxdb.Error{ + Code: influxdb.EInvalid, Msg: err.Error(), } } @@ -223,20 +264,20 @@ func (h *ViewHandler) handleGetView(w http.ResponseWriter, r *http.Request) { } type getViewRequest struct { - ViewID platform.ID + ViewID influxdb.ID } func decodeGetViewRequest(ctx context.Context, r *http.Request) (*getViewRequest, error) { params := httprouter.ParamsFromContext(ctx) id := params.ByName("id") if id == "" { - return nil, &platform.Error{ - Code: platform.EInvalid, + return nil, &influxdb.Error{ + Code: influxdb.EInvalid, Msg: "url missing id", } } - var i platform.ID + var i influxdb.ID if err := i.DecodeFromString(id); err != nil { return nil, err } @@ -265,20 +306,20 @@ func (h *ViewHandler) handleDeleteView(w http.ResponseWriter, r *http.Request) { } type deleteViewRequest struct { - ViewID platform.ID + ViewID influxdb.ID } func decodeDeleteViewRequest(ctx context.Context, r *http.Request) (*deleteViewRequest, error) { params := httprouter.ParamsFromContext(ctx) id := params.ByName("id") if id == "" { - return nil, &platform.Error{ - Code: platform.EInvalid, + return nil, &influxdb.Error{ + Code: influxdb.EInvalid, Msg: "url missing id", } } - var i platform.ID + var i influxdb.ID if err := i.DecodeFromString(id); err != nil { return nil, err } @@ -310,16 +351,16 @@ func (h *ViewHandler) handlePatchView(w http.ResponseWriter, r *http.Request) { } type patchViewRequest struct { - ViewID platform.ID - Upd platform.ViewUpdate + ViewID influxdb.ID + Upd influxdb.ViewUpdate } -func decodePatchViewRequest(ctx context.Context, r *http.Request) (*patchViewRequest, *platform.Error) { +func decodePatchViewRequest(ctx context.Context, r *http.Request) (*patchViewRequest, *influxdb.Error) { req := &patchViewRequest{} - upd := platform.ViewUpdate{} + upd := influxdb.ViewUpdate{} if err := json.NewDecoder(r.Body).Decode(&upd); err != nil { - return nil, &platform.Error{ - Code: platform.EInvalid, + return nil, &influxdb.Error{ + Code: influxdb.EInvalid, Msg: err.Error(), } } @@ -329,15 +370,15 @@ func decodePatchViewRequest(ctx context.Context, r *http.Request) (*patchViewReq params := httprouter.ParamsFromContext(ctx) id := params.ByName("id") if id == "" { - return nil, &platform.Error{ - Code: platform.EInvalid, + return nil, &influxdb.Error{ + Code: influxdb.EInvalid, Msg: "url missing id", } } - var i platform.ID + var i influxdb.ID if err := i.DecodeFromString(id); err != nil { - return nil, &platform.Error{ - Code: platform.EInvalid, + return nil, &influxdb.Error{ + Code: influxdb.EInvalid, Err: err, } } @@ -345,7 +386,7 @@ func decodePatchViewRequest(ctx context.Context, r *http.Request) (*patchViewReq req.ViewID = i if err := req.Valid(); err != nil { - return nil, &platform.Error{ + return nil, &influxdb.Error{ Err: err, } } @@ -354,10 +395,10 @@ func decodePatchViewRequest(ctx context.Context, r *http.Request) (*patchViewReq } // Valid validates that the view ID is non zero valued and update has expected values set. -func (r *patchViewRequest) Valid() *platform.Error { +func (r *patchViewRequest) Valid() *influxdb.Error { if !r.ViewID.Valid() { - return &platform.Error{ - Code: platform.EInvalid, + return &influxdb.Error{ + Code: influxdb.EInvalid, Msg: "missing view ID", } } diff --git a/http/view_test.go b/http/view_test.go index ae7f1e99a5..b6e390b00a 100644 --- a/http/view_test.go +++ b/http/view_test.go @@ -11,17 +11,30 @@ import ( "testing" "github.com/google/go-cmp/cmp" - platform "github.com/influxdata/influxdb" + "github.com/influxdata/influxdb" "github.com/influxdata/influxdb/mock" - platformtesting "github.com/influxdata/influxdb/testing" + influxdbtesting "github.com/influxdata/influxdb/testing" "github.com/julienschmidt/httprouter" "github.com/yudai/gojsondiff" "github.com/yudai/gojsondiff/formatter" + "go.uber.org/zap" ) +// NewMockViewBackend returns a ViewBackend with mock services. +func NewMockViewBackend() *ViewBackend { + return &ViewBackend{ + Logger: zap.NewNop().With(zap.String("handler", "view")), + + ViewService: &mock.ViewService{}, + UserService: mock.NewUserService(), + UserResourceMappingService: &mock.UserResourceMappingService{}, + LabelService: mock.NewLabelService(), + } +} + func TestService_handleGetViews(t *testing.T) { type fields struct { - ViewService platform.ViewService + ViewService influxdb.ViewService } type args struct { queryParams map[string][]string @@ -42,20 +55,20 @@ func TestService_handleGetViews(t *testing.T) { name: "get all views", fields: fields{ &mock.ViewService{ - FindViewsF: func(ctx context.Context, filter platform.ViewFilter) ([]*platform.View, int, error) { - return []*platform.View{ + FindViewsF: func(ctx context.Context, filter influxdb.ViewFilter) ([]*influxdb.View, int, error) { + return []*influxdb.View{ { - ViewContents: platform.ViewContents{ - ID: platformtesting.MustIDBase16("7365637465747572"), + ViewContents: influxdb.ViewContents{ + ID: influxdbtesting.MustIDBase16("7365637465747572"), Name: "hello", }, - Properties: platform.XYViewProperties{ + Properties: influxdb.XYViewProperties{ Type: "xy", }, }, { - ViewContents: platform.ViewContents{ - ID: platformtesting.MustIDBase16("6167697474697320"), + ViewContents: influxdb.ViewContents{ + ID: influxdbtesting.MustIDBase16("6167697474697320"), Name: "example", }, }, @@ -111,8 +124,8 @@ func TestService_handleGetViews(t *testing.T) { name: "get all views when there are none", fields: fields{ &mock.ViewService{ - FindViewsF: func(ctx context.Context, filter platform.ViewFilter) ([]*platform.View, int, error) { - return []*platform.View{}, 0, nil + FindViewsF: func(ctx context.Context, filter influxdb.ViewFilter) ([]*influxdb.View, int, error) { + return []*influxdb.View{}, 0, nil }, }, }, @@ -133,8 +146,9 @@ func TestService_handleGetViews(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - h := NewViewHandler(mock.NewUserResourceMappingService(), mock.NewLabelService(), mock.NewUserService()) - h.ViewService = tt.fields.ViewService + viewBackend := NewMockViewBackend() + viewBackend.ViewService = tt.fields.ViewService + h := NewViewHandler(viewBackend) r := httptest.NewRequest("GET", "http://any.url", nil) @@ -170,7 +184,7 @@ func TestService_handleGetViews(t *testing.T) { func TestService_handleGetView(t *testing.T) { type fields struct { - ViewService platform.ViewService + ViewService influxdb.ViewService } type args struct { id string @@ -191,10 +205,10 @@ func TestService_handleGetView(t *testing.T) { name: "get a view by id", fields: fields{ &mock.ViewService{ - FindViewByIDF: func(ctx context.Context, id platform.ID) (*platform.View, error) { - return &platform.View{ - ViewContents: platform.ViewContents{ - ID: platformtesting.MustIDBase16("020f755c3c082000"), + FindViewByIDF: func(ctx context.Context, id influxdb.ID) (*influxdb.View, error) { + return &influxdb.View{ + ViewContents: influxdb.ViewContents{ + ID: influxdbtesting.MustIDBase16("020f755c3c082000"), Name: "example", }, }, nil @@ -226,10 +240,10 @@ func TestService_handleGetView(t *testing.T) { name: "not found", fields: fields{ &mock.ViewService{ - FindViewByIDF: func(ctx context.Context, id platform.ID) (*platform.View, error) { - return nil, &platform.Error{ - Code: platform.ENotFound, - Msg: platform.ErrViewNotFound, + FindViewByIDF: func(ctx context.Context, id influxdb.ID) (*influxdb.View, error) { + return nil, &influxdb.Error{ + Code: influxdb.ENotFound, + Msg: influxdb.ErrViewNotFound, } }, }, @@ -245,8 +259,9 @@ func TestService_handleGetView(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - h := NewViewHandler(mock.NewUserResourceMappingService(), mock.NewLabelService(), mock.NewUserService()) - h.ViewService = tt.fields.ViewService + viewBackend := NewMockViewBackend() + viewBackend.ViewService = tt.fields.ViewService + h := NewViewHandler(viewBackend) r := httptest.NewRequest("GET", "http://any.url", nil) @@ -283,10 +298,10 @@ func TestService_handleGetView(t *testing.T) { func TestService_handlePostViews(t *testing.T) { type fields struct { - ViewService platform.ViewService + ViewService influxdb.ViewService } type args struct { - view *platform.View + view *influxdb.View } type wants struct { statusCode int @@ -304,19 +319,19 @@ func TestService_handlePostViews(t *testing.T) { name: "create a new view", fields: fields{ &mock.ViewService{ - CreateViewF: func(ctx context.Context, c *platform.View) error { - c.ID = platformtesting.MustIDBase16("020f755c3c082000") + CreateViewF: func(ctx context.Context, c *influxdb.View) error { + c.ID = influxdbtesting.MustIDBase16("020f755c3c082000") return nil }, }, }, args: args{ - view: &platform.View{ - ViewContents: platform.ViewContents{ - ID: platformtesting.MustIDBase16("020f755c3c082000"), + view: &influxdb.View{ + ViewContents: influxdb.ViewContents{ + ID: influxdbtesting.MustIDBase16("020f755c3c082000"), Name: "hello", }, - Properties: platform.XYViewProperties{ + Properties: influxdb.XYViewProperties{ Type: "xy", }, }, @@ -351,8 +366,9 @@ func TestService_handlePostViews(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - h := NewViewHandler(mock.NewUserResourceMappingService(), mock.NewLabelService(), mock.NewUserService()) - h.ViewService = tt.fields.ViewService + viewBackend := NewMockViewBackend() + viewBackend.ViewService = tt.fields.ViewService + h := NewViewHandler(viewBackend) b, err := json.Marshal(tt.args.view) if err != nil { @@ -383,7 +399,7 @@ func TestService_handlePostViews(t *testing.T) { func TestService_handleDeleteView(t *testing.T) { type fields struct { - ViewService platform.ViewService + ViewService influxdb.ViewService } type args struct { id string @@ -404,8 +420,8 @@ func TestService_handleDeleteView(t *testing.T) { name: "remove a view by id", fields: fields{ &mock.ViewService{ - DeleteViewF: func(ctx context.Context, id platform.ID) error { - if id == platformtesting.MustIDBase16("020f755c3c082000") { + DeleteViewF: func(ctx context.Context, id influxdb.ID) error { + if id == influxdbtesting.MustIDBase16("020f755c3c082000") { return nil } @@ -424,10 +440,10 @@ func TestService_handleDeleteView(t *testing.T) { name: "view not found", fields: fields{ &mock.ViewService{ - DeleteViewF: func(ctx context.Context, id platform.ID) error { - return &platform.Error{ - Code: platform.ENotFound, - Msg: platform.ErrViewNotFound, + DeleteViewF: func(ctx context.Context, id influxdb.ID) error { + return &influxdb.Error{ + Code: influxdb.ENotFound, + Msg: influxdb.ErrViewNotFound, } }, }, @@ -443,8 +459,9 @@ func TestService_handleDeleteView(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - h := NewViewHandler(mock.NewUserResourceMappingService(), mock.NewLabelService(), mock.NewUserService()) - h.ViewService = tt.fields.ViewService + viewBackend := NewMockViewBackend() + viewBackend.ViewService = tt.fields.ViewService + h := NewViewHandler(viewBackend) r := httptest.NewRequest("GET", "http://any.url", nil) @@ -481,12 +498,12 @@ func TestService_handleDeleteView(t *testing.T) { func TestService_handlePatchView(t *testing.T) { type fields struct { - ViewService platform.ViewService + ViewService influxdb.ViewService } type args struct { id string name string - properties platform.ViewProperties + properties influxdb.ViewProperties } type wants struct { statusCode int @@ -504,14 +521,14 @@ func TestService_handlePatchView(t *testing.T) { name: "update a view", fields: fields{ &mock.ViewService{ - UpdateViewF: func(ctx context.Context, id platform.ID, upd platform.ViewUpdate) (*platform.View, error) { - if id == platformtesting.MustIDBase16("020f755c3c082000") { - return &platform.View{ - ViewContents: platform.ViewContents{ - ID: platformtesting.MustIDBase16("020f755c3c082000"), + UpdateViewF: func(ctx context.Context, id influxdb.ID, upd influxdb.ViewUpdate) (*influxdb.View, error) { + if id == influxdbtesting.MustIDBase16("020f755c3c082000") { + return &influxdb.View{ + ViewContents: influxdb.ViewContents{ + ID: influxdbtesting.MustIDBase16("020f755c3c082000"), Name: "example", }, - Properties: platform.XYViewProperties{ + Properties: influxdb.XYViewProperties{ Type: "xy", }, }, nil @@ -555,14 +572,14 @@ func TestService_handlePatchView(t *testing.T) { name: "update a view with empty request body", fields: fields{ &mock.ViewService{ - UpdateViewF: func(ctx context.Context, id platform.ID, upd platform.ViewUpdate) (*platform.View, error) { - if id == platformtesting.MustIDBase16("020f755c3c082000") { - return &platform.View{ - ViewContents: platform.ViewContents{ - ID: platformtesting.MustIDBase16("020f755c3c082000"), + UpdateViewF: func(ctx context.Context, id influxdb.ID, upd influxdb.ViewUpdate) (*influxdb.View, error) { + if id == influxdbtesting.MustIDBase16("020f755c3c082000") { + return &influxdb.View{ + ViewContents: influxdb.ViewContents{ + ID: influxdbtesting.MustIDBase16("020f755c3c082000"), Name: "example", }, - Properties: platform.XYViewProperties{ + Properties: influxdb.XYViewProperties{ Type: "xy", }, }, nil @@ -583,10 +600,10 @@ func TestService_handlePatchView(t *testing.T) { name: "view not found", fields: fields{ &mock.ViewService{ - UpdateViewF: func(ctx context.Context, id platform.ID, upd platform.ViewUpdate) (*platform.View, error) { - return nil, &platform.Error{ - Code: platform.ENotFound, - Msg: platform.ErrViewNotFound, + UpdateViewF: func(ctx context.Context, id influxdb.ID, upd influxdb.ViewUpdate) (*influxdb.View, error) { + return nil, &influxdb.Error{ + Code: influxdb.ENotFound, + Msg: influxdb.ErrViewNotFound, } }, }, @@ -603,10 +620,11 @@ func TestService_handlePatchView(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - h := NewViewHandler(mock.NewUserResourceMappingService(), mock.NewLabelService(), mock.NewUserService()) - h.ViewService = tt.fields.ViewService + viewBackend := NewMockViewBackend() + viewBackend.ViewService = tt.fields.ViewService + h := NewViewHandler(viewBackend) - upd := platform.ViewUpdate{} + upd := influxdb.ViewUpdate{} if tt.args.name != "" { upd.Name = &tt.args.name } @@ -688,9 +706,8 @@ func jsonEqual(s1, s2 string) (eq bool, diff string, err error) { return cmp.Equal(o1, o2), diff, err } -/* TODO: Add a go view service client - -func initViewService(f platformtesting.ViewFields, t *testing.T) (platform.ViewService, func()) { +/* todo +func initViewService(f influxdbtesting.ViewFields, t *testing.T) (influxdb.ViewService, func()) { t.Helper() svc := inmem.NewService() svc.IDGenerator = f.IDGenerator @@ -713,22 +730,7 @@ func initViewService(f platformtesting.ViewFields, t *testing.T) (platform.ViewS return &client, done } -func TestViewService_CreateView(t *testing.T) { - platformtesting.CreateView(initViewService, t) -} - -func TestViewService_FindViewByID(t *testing.T) { - platformtesting.FindViewByID(initViewService, t) -} -func TestViewService_FindViews(t *testing.T) { - platformtesting.FindViews(initViewService, t) -} - -func TestViewService_DeleteView(t *testing.T) { - platformtesting.DeleteView(initViewService, t) -} - -func TestViewService_UpdateView(t *testing.T) { - platformtesting.UpdateView(initViewService, t) +func TestViewService(t *testing.T) { + influxdbtesting.ViewService(initViewService, t) } */ diff --git a/http/write_handler.go b/http/write_handler.go index 3b058c2f79..c310bb6f86 100644 --- a/http/write_handler.go +++ b/http/write_handler.go @@ -18,6 +18,27 @@ import ( "go.uber.org/zap" ) +// WriteBackend is all services and associated parameters required to construct +// the WriteHandler. +type WriteBackend struct { + Logger *zap.Logger + + PointsWriter storage.PointsWriter + BucketService platform.BucketService + OrganizationService platform.OrganizationService +} + +// NewWriteBackend returns a new instance of WriteBackend. +func NewWriteBackend(b *APIBackend) *WriteBackend { + return &WriteBackend{ + Logger: b.Logger.With(zap.String("handler", "write")), + + PointsWriter: b.PointsWriter, + BucketService: b.BucketService, + OrganizationService: b.OrganizationService, + } +} + // WriteHandler receives line protocol and sends to a publish function. type WriteHandler struct { *httprouter.Router @@ -37,11 +58,14 @@ const ( ) // NewWriteHandler creates a new handler at /api/v2/write to receive line protocol. -func NewWriteHandler(writer storage.PointsWriter) *WriteHandler { +func NewWriteHandler(b *WriteBackend) *WriteHandler { h := &WriteHandler{ - Router: NewRouter(), - Logger: zap.NewNop(), - PointsWriter: writer, + Router: NewRouter(), + Logger: b.Logger, + + PointsWriter: b.PointsWriter, + BucketService: b.BucketService, + OrganizationService: b.OrganizationService, } h.HandlerFunc("POST", writePath, h.handleWrite) diff --git a/mock/dashboard_service.go b/mock/dashboard_service.go index 34c806c9b6..f80b0ac32c 100644 --- a/mock/dashboard_service.go +++ b/mock/dashboard_service.go @@ -24,6 +24,39 @@ type DashboardService struct { ReplaceDashboardCellsF func(ctx context.Context, id platform.ID, cs []*platform.Cell) error } +// NewDashboardService returns a mock of DashboardService where its methods will return zero values. +func NewDashboardService() *DashboardService { + return &DashboardService{ + CreateDashboardF: func(context.Context, *platform.Dashboard) error { return nil }, + FindDashboardByIDF: func(context.Context, platform.ID) (*platform.Dashboard, error) { return nil, nil }, + FindDashboardsF: func(context.Context, platform.DashboardFilter, platform.FindOptions) ([]*platform.Dashboard, int, error) { + return nil, 0, nil + }, + UpdateDashboardF: func(context.Context, platform.ID, platform.DashboardUpdate) (*platform.Dashboard, error) { + return nil, nil + }, + DeleteDashboardF: func(context.Context, platform.ID) error { return nil }, + + AddDashboardCellF: func(ctx context.Context, id platform.ID, c *platform.Cell, opts platform.AddDashboardCellOptions) error { + return nil + }, + RemoveDashboardCellF: func(ctx context.Context, dashboardID platform.ID, cellID platform.ID) error { return nil }, + GetDashboardCellViewF: func(ctx context.Context, dashboardID platform.ID, cellID platform.ID) (*platform.View, error) { + return nil, nil + }, + UpdateDashboardCellViewF: func(ctx context.Context, dashboardID platform.ID, cellID platform.ID, upd platform.ViewUpdate) (*platform.View, error) { + return nil, nil + }, + UpdateDashboardCellF: func(ctx context.Context, dashbaordID platform.ID, cellID platform.ID, upd platform.CellUpdate) (*platform.Cell, error) { + return nil, nil + }, + CopyDashboardCellF: func(ctx context.Context, dashbaordID platform.ID, cellID platform.ID) (*platform.Cell, error) { + return nil, nil + }, + ReplaceDashboardCellsF: func(ctx context.Context, id platform.ID, cs []*platform.Cell) error { return nil }, + } +} + func (s *DashboardService) FindDashboardByID(ctx context.Context, id platform.ID) (*platform.Dashboard, error) { return s.FindDashboardByIDF(ctx, id) } diff --git a/mock/lookup_service.go b/mock/lookup_service.go index ac85318c2c..d0e1724285 100644 --- a/mock/lookup_service.go +++ b/mock/lookup_service.go @@ -11,6 +11,15 @@ type LookupService struct { NameFn func(ctx context.Context, resource platform.ResourceType, id platform.ID) (string, error) } +// NewLookupService returns a mock of LookupService where its methods will return zero values. +func NewLookupService() *LookupService { + return &LookupService{ + NameFn: func(ctx context.Context, resource platform.ResourceType, id platform.ID) (string, error) { + return "", nil + }, + } +} + // Name returns the name for the resource and ID. func (s *LookupService) Name(ctx context.Context, resource platform.ResourceType, id platform.ID) (string, error) { return s.NameFn(ctx, resource, id) diff --git a/mock/macro_service.go b/mock/macro_service.go index bd314a0f44..62e950d616 100644 --- a/mock/macro_service.go +++ b/mock/macro_service.go @@ -17,6 +17,22 @@ type MacroService struct { DeleteMacroF func(context.Context, platform.ID) error } +// NewMacroService returns a mock of MacroService where its methods will return zero values. +func NewMacroService() *MacroService { + return &MacroService{ + FindMacrosF: func(context.Context, platform.MacroFilter, ...platform.FindOptions) ([]*platform.Macro, error) { + return nil, nil + }, + FindMacroByIDF: func(context.Context, platform.ID) (*platform.Macro, error) { return nil, nil }, + CreateMacroF: func(context.Context, *platform.Macro) error { return nil }, + UpdateMacroF: func(ctx context.Context, id platform.ID, update *platform.MacroUpdate) (*platform.Macro, error) { + return nil, nil + }, + ReplaceMacroF: func(context.Context, *platform.Macro) error { return nil }, + DeleteMacroF: func(context.Context, platform.ID) error { return nil }, + } +} + func (s *MacroService) CreateMacro(ctx context.Context, macro *platform.Macro) error { return s.CreateMacroF(ctx, macro) } diff --git a/mock/org_service.go b/mock/org_service.go index 48213da49b..3d568ea42a 100644 --- a/mock/org_service.go +++ b/mock/org_service.go @@ -2,7 +2,6 @@ package mock import ( "context" - platform "github.com/influxdata/influxdb" ) @@ -18,6 +17,25 @@ type OrganizationService struct { DeleteOrganizationF func(ctx context.Context, id platform.ID) error } +// NewOrganizationService returns a mock OrganizationService where its methods will return +// zero values. +func NewOrganizationService() *OrganizationService { + return &OrganizationService{ + FindOrganizationByIDF: func(ctx context.Context, id platform.ID) (*platform.Organization, error) { return nil, nil }, + FindOrganizationF: func(ctx context.Context, filter platform.OrganizationFilter) (*platform.Organization, error) { + return nil, nil + }, + FindOrganizationsF: func(ctx context.Context, filter platform.OrganizationFilter, opt ...platform.FindOptions) ([]*platform.Organization, int, error) { + return nil, 0, nil + }, + CreateOrganizationF: func(ctx context.Context, b *platform.Organization) error { return nil }, + UpdateOrganizationF: func(ctx context.Context, id platform.ID, upd platform.OrganizationUpdate) (*platform.Organization, error) { + return nil, nil + }, + DeleteOrganizationF: func(ctx context.Context, id platform.ID) error { return nil }, + } +} + //FindOrganizationByID calls FindOrganizationByIDF. func (s *OrganizationService) FindOrganizationByID(ctx context.Context, id platform.ID) (*platform.Organization, error) { return s.FindOrganizationByIDF(ctx, id) diff --git a/mock/user_service.go b/mock/user_service.go index 99838cd7dd..5203554ed0 100644 --- a/mock/user_service.go +++ b/mock/user_service.go @@ -20,7 +20,7 @@ type UserService struct { UpdateUserFn func(context.Context, platform.ID, platform.UserUpdate) (*platform.User, error) } -// NewUserService returns a mock of NewUserService where its methods will return zero values. +// NewUserService returns a mock of UserService where its methods will return zero values. func NewUserService() *UserService { return &UserService{ FindUserByIDFn: func(context.Context, platform.ID) (*platform.User, error) { return nil, nil }, diff --git a/models/points.go b/models/points.go index a11b2df18e..dd985a6c4a 100644 --- a/models/points.go +++ b/models/points.go @@ -1353,6 +1353,15 @@ func NewPoint(name string, tags Tags, fields Fields, t time.Time) (Point, error) }, nil } +// NewPointFromSeries returns a Point given the serialized key, some fields, and a time. +func NewPointFromSeries(key []byte, fields Fields, t time.Time) Point { + return &point{ + key: key, + time: t, + fields: fields.MarshalBinary(), + } +} + // pointKey checks some basic requirements for valid points, and returns the // key, along with an possible error. func pointKey(measurement string, tags Tags, fields Fields, t time.Time) ([]byte, error) { diff --git a/storage/engine.go b/storage/engine.go index c0c7429648..d7416bf4ac 100644 --- a/storage/engine.go +++ b/storage/engine.go @@ -12,9 +12,11 @@ import ( platform "github.com/influxdata/influxdb" "github.com/influxdata/influxdb/logger" "github.com/influxdata/influxdb/models" + "github.com/influxdata/influxdb/storage/wal" "github.com/influxdata/influxdb/tsdb" "github.com/influxdata/influxdb/tsdb/tsi1" "github.com/influxdata/influxdb/tsdb/tsm1" + "github.com/influxdata/influxdb/tsdb/value" "github.com/influxdata/influxql" "github.com/prometheus/client_golang/prometheus" "go.uber.org/zap" @@ -38,7 +40,7 @@ type Engine struct { index *tsi1.Index sfile *tsdb.SeriesFile engine *tsm1.Engine - wal *tsm1.WAL + wal *wal.WAL retentionEnforcer *retentionEnforcer defaultMetricLabels prometheus.Labels @@ -119,27 +121,28 @@ func NewEngine(path string, c Config, options ...Option) *Engine { tsi1.WithPath(c.GetIndexPath(path))) // Initialize WAL - var wal tsm1.Log = new(tsm1.NopWAL) - if c.WAL.Enabled { - e.wal = tsm1.NewWAL(c.GetWALPath(path)) - e.wal.WithFsyncDelay(time.Duration(c.WAL.FsyncDelay)) - e.wal.EnableTraceLogging(c.TraceLoggingEnabled) - wal = e.wal - } + e.wal = wal.NewWAL(c.GetWALPath(path)) + e.wal.WithFsyncDelay(time.Duration(c.WAL.FsyncDelay)) + e.wal.EnableTraceLogging(c.TraceLoggingEnabled) + e.wal.SetEnabled(c.WAL.Enabled) // Initialise Engine e.engine = tsm1.NewEngine(c.GetEnginePath(path), e.index, c.Engine, - tsm1.WithWAL(wal), - tsm1.WithTraceLogging(c.TraceLoggingEnabled)) + tsm1.WithTraceLogging(c.TraceLoggingEnabled), + tsm1.WithSnapshotter(e)) // Apply options. for _, option := range options { option(e) } + // Set default metrics labels. e.engine.SetDefaultMetricLabels(e.defaultMetricLabels) e.sfile.SetDefaultMetricLabels(e.defaultMetricLabels) e.index.SetDefaultMetricLabels(e.defaultMetricLabels) + if e.wal != nil { + e.wal.SetDefaultMetricLabels(e.defaultMetricLabels) + } return e } @@ -160,6 +163,7 @@ func (e *Engine) WithLogger(log *zap.Logger) { e.sfile.WithLogger(e.logger) e.index.WithLogger(e.logger) e.engine.WithLogger(e.logger) + e.wal.WithLogger(e.logger) e.retentionEnforcer.WithLogger(e.logger) } @@ -170,13 +174,14 @@ func (e *Engine) PrometheusCollectors() []prometheus.Collector { metrics = append(metrics, tsdb.PrometheusCollectors()...) metrics = append(metrics, tsi1.PrometheusCollectors()...) metrics = append(metrics, tsm1.PrometheusCollectors()...) + metrics = append(metrics, wal.PrometheusCollectors()...) metrics = append(metrics, e.retentionEnforcer.PrometheusCollectors()...) return metrics } // Open opens the store and all underlying resources. It returns an error if // any of the underlying systems fail to open. -func (e *Engine) Open() error { +func (e *Engine) Open() (err error) { e.mu.Lock() defer e.mu.Unlock() @@ -184,19 +189,20 @@ func (e *Engine) Open() error { return nil // Already open } - if err := e.sfile.Open(); err != nil { + // Open the services in order and clean up if any fail. + var oh openHelper + oh.Open(e.sfile) + oh.Open(e.index) + oh.Open(e.wal) + oh.Open(e.engine) + if err := oh.Done(); err != nil { return err } - if err := e.index.Open(); err != nil { + if err := e.replayWAL(); err != nil { return err } - if err := e.engine.Open(); err != nil { - return err - } - e.engine.SetCompactionsEnabled(true) // TODO(edd):is this needed? - e.closing = make(chan struct{}) // TODO(edd) background tasks will be run in priority order via a scheduler. @@ -207,6 +213,54 @@ func (e *Engine) Open() error { return nil } +// replayWAL reads the WAL segment files and replays them. +func (e *Engine) replayWAL() error { + if !e.config.WAL.Enabled { + return nil + } + now := time.Now() + + walPaths, err := wal.SegmentFileNames(e.wal.Path()) + if err != nil { + return err + } + + // TODO(jeff): we should just do snapshots and wait for them so that we don't hit + // OOM situations when reloading huge WALs. + + // Disable the max size during loading + limit := e.engine.Cache.MaxSize() + defer func() { e.engine.Cache.SetMaxSize(limit) }() + e.engine.Cache.SetMaxSize(0) + + // Execute all the entries in the WAL again + reader := wal.NewWALReader(walPaths) + reader.WithLogger(e.logger) + err = reader.Read(func(entry wal.WALEntry) error { + switch en := entry.(type) { + case *wal.WriteWALEntry: + points := tsm1.ValuesToPoints(en.Values) + err := e.writePointsLocked(tsdb.NewSeriesCollection(points), en.Values) + if _, ok := err.(tsdb.PartialWriteError); ok { + err = nil + } + return err + + case *wal.DeleteBucketRangeWALEntry: + return e.deleteBucketRangeLocked(en.OrgID, en.BucketID, en.Min, en.Max) + } + + return nil + }) + + e.logger.Info("Reloaded WAL", + zap.String("path", e.wal.Path()), + zap.Duration("duration", time.Since(now)), + zap.Error(err)) + + return err +} + // runRetentionEnforcer runs the retention enforcer in a separate goroutine. // // Currently this just runs on an interval, but in the future we will add the @@ -267,17 +321,15 @@ func (e *Engine) Close() error { defer e.mu.Unlock() e.closing = nil - if err := e.sfile.Close(); err != nil { - return err - } - - if err := e.index.Close(); err != nil { - return err - } - - return e.engine.Close() + var ch closeHelper + ch.Close(e.engine) + ch.Close(e.wal) + ch.Close(e.index) + ch.Close(e.sfile) + return ch.Done() } +// CreateSeriesCursor creates a SeriesCursor for usage with the read service. func (e *Engine) CreateSeriesCursor(ctx context.Context, req SeriesCursorRequest, cond influxql.Expr) (SeriesCursor, error) { e.mu.RLock() defer e.mu.RUnlock() @@ -287,6 +339,7 @@ func (e *Engine) CreateSeriesCursor(ctx context.Context, req SeriesCursorRequest return newSeriesCursor(req, e.index, cond) } +// CreateCursorIterator creates a CursorIterator for usage with the read service. func (e *Engine) CreateCursorIterator(ctx context.Context) (tsdb.CursorIterator, error) { e.mu.RLock() defer e.mu.RUnlock() @@ -302,9 +355,7 @@ func (e *Engine) CreateCursorIterator(ctx context.Context) (tsdb.CursorIterator, // WritePoints will however determine if there are any field type conflicts, and // return an appropriate error in that case. func (e *Engine) WritePoints(points []models.Point) error { - collection := tsdb.NewSeriesCollection(points) - - j := 0 + collection, j := tsdb.NewSeriesCollection(points), 0 for iter := collection.Iterator(); iter.Next(); { tags := iter.Tags() @@ -350,47 +401,110 @@ func (e *Engine) WritePoints(points []models.Point) error { return ErrEngineClosed } - // Add new series to the index and series file. Check for partial writes. + // Convert the points to values for adding to the WAL/Cache. + values, err := tsm1.PointsToValues(collection.Points) + if err != nil { + return err + } + + // Add the write to the WAL to be replayed if there is a crash or shutdown. + if _, err := e.wal.WriteMulti(values); err != nil { + return err + } + + return e.writePointsLocked(collection, values) +} + +// writePointsLocked does the work of writing points and must be called under some sort of lock. +func (e *Engine) writePointsLocked(collection *tsdb.SeriesCollection, values map[string][]value.Value) error { + // TODO(jeff): keep track of the values in the collection so that partial write + // errors get tracked all the way. Right now, the engine doesn't drop any values + // but if it ever did, the errors could end up missing some data. + + // Add new series to the index and series file. if err := e.index.CreateSeriesListIfNotExists(collection); err != nil { - // ignore PartialWriteErrors. The collection captures it. - // TODO(edd/jeff): should we just remove PartialWriteError from the index then? - if _, ok := err.(tsdb.PartialWriteError); !ok { + return err + } + + // If there was a PartialWriteError, that means the passed in values may contain + // more than the points so we need to recreate them. + if collection.PartialWriteError() != nil { + var err error + values, err = tsm1.PointsToValues(collection.Points) + if err != nil { return err } } - // Write the points to the cache and WAL. - if err := e.engine.WritePoints(collection.Points); err != nil { + // Write the values to the engine. + if err := e.engine.WriteValues(values); err != nil { return err } + return collection.PartialWriteError() } +// AcquireSegments closes the current WAL segment, gets the set of all the currently closed +// segments, and calls the callback. It does all of this under the lock on the engine. +func (e *Engine) AcquireSegments(fn func(segs []string) error) error { + e.mu.Lock() + defer e.mu.Unlock() + + if err := e.wal.CloseSegment(); err != nil { + return err + } + + segments, err := e.wal.ClosedSegments() + if err != nil { + return err + } + + return fn(segments) +} + +// CommitSegments calls the callback and if that does not return an error, removes the segment +// files from the WAL. It does all of this under the lock on the engine. +func (e *Engine) CommitSegments(segs []string, fn func() error) error { + e.mu.Lock() + defer e.mu.Unlock() + + if err := fn(); err != nil { + return err + } + + return e.wal.Remove(segs) +} + // DeleteBucket deletes an entire bucket from the storage engine. func (e *Engine) DeleteBucket(orgID, bucketID platform.ID) error { + return e.DeleteBucketRange(orgID, bucketID, math.MinInt64, math.MaxInt64) +} + +// DeleteBucketRange deletes an entire bucket from the storage engine. +func (e *Engine) DeleteBucketRange(orgID, bucketID platform.ID, min, max int64) error { e.mu.RLock() defer e.mu.RUnlock() if e.closing == nil { return ErrEngineClosed } + // Add the delete to the WAL to be replayed if there is a crash or shutdown. + if _, err := e.wal.DeleteBucketRange(orgID, bucketID, min, max); err != nil { + return err + } + + return e.deleteBucketRangeLocked(orgID, bucketID, min, max) +} + +// deleteBucketRangeLocked does the work of deleting a bucket range and must be called under +// some sort of lock. +func (e *Engine) deleteBucketRangeLocked(orgID, bucketID platform.ID, min, max int64) error { // TODO(edd): we need to clean up how we're encoding the prefix so that we // don't have to remember to get it right everywhere we need to touch TSM data. encoded := tsdb.EncodeName(orgID, bucketID) name := models.EscapeMeasurement(encoded[:]) - return e.engine.DeleteBucket(name, math.MinInt64, math.MaxInt64) -} - -// DeleteSeriesRangeWithPredicate deletes all series data iterated over if fn returns -// true for that series. -func (e *Engine) DeleteSeriesRangeWithPredicate(itr tsdb.SeriesIterator, fn func([]byte, models.Tags) (int64, int64, bool)) error { - e.mu.RLock() - defer e.mu.RUnlock() - if e.closing == nil { - return ErrEngineClosed - } - return e.engine.DeleteSeriesRangeWithPredicate(itr, fn) + return e.engine.DeleteBucketRange(name, min, max) } // SeriesCardinality returns the number of series in the engine. diff --git a/storage/metrics.go b/storage/metrics.go index 277386ab40..cf22be1acf 100644 --- a/storage/metrics.go +++ b/storage/metrics.go @@ -16,8 +16,6 @@ type retentionMetrics struct { labels prometheus.Labels Checks *prometheus.CounterVec CheckDuration *prometheus.HistogramVec - Unprocessable *prometheus.CounterVec - Series *prometheus.CounterVec } func newRetentionMetrics(labels prometheus.Labels) *retentionMetrics { @@ -25,17 +23,19 @@ func newRetentionMetrics(labels prometheus.Labels) *retentionMetrics { for k := range labels { names = append(names, k) } - names = append(names, "status") // All metrics include status sort.Strings(names) + checksNames := append(append([]string(nil), names...), "status", "org_id", "bucket_id") + sort.Strings(checksNames) + return &retentionMetrics{ labels: labels, Checks: prometheus.NewCounterVec(prometheus.CounterOpts{ Namespace: namespace, Subsystem: retentionSubsystem, Name: "checks_total", - Help: "Number of retention check operations performed.", - }, names), + Help: "Number of retention check operations performed by org/bucket id.", + }, checksNames), CheckDuration: prometheus.NewHistogramVec(prometheus.HistogramOpts{ Namespace: namespace, @@ -45,20 +45,6 @@ func newRetentionMetrics(labels prometheus.Labels) *retentionMetrics { // 25 buckets spaced exponentially between 10s and ~2h Buckets: prometheus.ExponentialBuckets(10, 1.32, 25), }, names), - - Unprocessable: prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: namespace, - Subsystem: retentionSubsystem, - Name: "unprocessable_buckets_total", - Help: "Number of buckets that could not be operated on.", - }, names), - - Series: prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: namespace, - Subsystem: retentionSubsystem, - Name: "series_total", - Help: "Number of series that a delete was applied to.", - }, names), } } @@ -76,7 +62,5 @@ func (rm *retentionMetrics) PrometheusCollectors() []prometheus.Collector { return []prometheus.Collector{ rm.Checks, rm.CheckDuration, - rm.Unprocessable, - rm.Series, } } diff --git a/storage/opener.go b/storage/opener.go new file mode 100644 index 0000000000..024d872f5a --- /dev/null +++ b/storage/opener.go @@ -0,0 +1,61 @@ +package storage + +import ( + "io" +) + +// opener is something that can be opened and closed. +type opener interface { + Open() error + io.Closer +} + +// openHelper is a helper to abstract the pattern of opening multiple things, +// exiting early if any open fails, and closing any of the opened things +// in the case of failure. +type openHelper struct { + opened []io.Closer + err error +} + +// Open attempts to open the opener. If an error has happened already +// then no calls are made to the opener. +func (o *openHelper) Open(op opener) { + if o.err != nil { + return + } + o.err = op.Open() + if o.err == nil { + o.opened = append(o.opened, op) + } +} + +// Done returns the error of the first open and closes in reverse +// order any opens that have already happened if there was an error. +func (o *openHelper) Done() error { + if o.err == nil { + return nil + } + for i := len(o.opened) - 1; i >= 0; i-- { + o.opened[i].Close() + } + return o.err +} + +// closeHelper is a helper to abstract the pattern of closing multiple +// things and keeping track of the first encountered error. +type closeHelper struct { + err error +} + +// Close closes the closer and keeps track of the first error. +func (c *closeHelper) Close(cl io.Closer) { + if err := cl.Close(); c.err == nil { + c.err = err + } +} + +// Done returns the first error. +func (c *closeHelper) Done() error { + return c.err +} diff --git a/storage/reads/array_cursor.gen.go.tmpl b/storage/reads/array_cursor.gen.go.tmpl index e56c4e0ad7..8bf15b0319 100644 --- a/storage/reads/array_cursor.gen.go.tmpl +++ b/storage/reads/array_cursor.gen.go.tmpl @@ -106,7 +106,7 @@ func (c *{{.name}}MultiShardArrayCursor) reset(cur cursors.{{.Name}}ArrayCursor, } -func (c *{{.name}}MultiShardArrayCursor) Err() error { return c.err } +func (c *{{.name}}MultiShardArrayCursor) Err() error { return c.err } func (c *{{.name}}MultiShardArrayCursor) Stats() cursors.CursorStats { return c.{{.Name}}ArrayCursor.Stats() @@ -200,17 +200,17 @@ func (c {{$type}}) Next() {{$arrayType}} { var acc {{.Type}} for { - for _, v := range a.Values { - acc += v - } + for _, v := range a.Values { + acc += v + } a = c.{{.Name}}ArrayCursor.Next() - if len(a.Timestamps) == 0 { + if len(a.Timestamps) == 0 { c.ts[0] = ts c.vs[0] = acc c.res.Timestamps = c.ts[:] c.res.Values = c.vs[:] return c.res - } + } } } @@ -230,11 +230,11 @@ func (c *integer{{.Name}}CountArrayCursor) Next() *cursors.IntegerArray { return &cursors.IntegerArray{} } - ts := a.Timestamps[0] - var acc int64 - for { - acc += int64(len(a.Timestamps)) - a = c.{{.Name}}ArrayCursor.Next() + ts := a.Timestamps[0] + var acc int64 + for { + acc += int64(len(a.Timestamps)) + a = c.{{.Name}}ArrayCursor.Next() if len(a.Timestamps) == 0 { res := cursors.NewIntegerArrayLen(1) res.Timestamps[0] = ts diff --git a/storage/retention.go b/storage/retention.go index ed3a08282a..323f8882b7 100644 --- a/storage/retention.go +++ b/storage/retention.go @@ -4,28 +4,21 @@ import ( "context" "errors" "math" - "sync" - "sync/atomic" "time" platform "github.com/influxdata/influxdb" "github.com/influxdata/influxdb/logger" - "github.com/influxdata/influxdb/models" - "github.com/influxdata/influxdb/tsdb" - "github.com/influxdata/influxql" "github.com/prometheus/client_golang/prometheus" "go.uber.org/zap" ) const ( bucketAPITimeout = 10 * time.Second - engineAPITimeout = time.Minute ) // A Deleter implementation is capable of deleting data from a storage engine. type Deleter interface { - CreateSeriesCursor(context.Context, SeriesCursorRequest, influxql.Expr) (SeriesCursor, error) - DeleteSeriesRangeWithPredicate(tsdb.SeriesIterator, func([]byte, models.Tags) (int64, int64, bool)) error + DeleteBucketRange(orgID, bucketID platform.ID, min, max int64) error } // A BucketFinder is responsible for providing access to buckets via a filter. @@ -78,169 +71,59 @@ func (s *retentionEnforcer) run() { log, logEnd := logger.NewOperation(s.logger, "Data retention check", "data_retention_check") defer logEnd() - rpByBucketID, err := s.getRetentionPeriodPerBucket() + buckets, err := s.getBucketInformation() if err != nil { - log.Error("Unable to determine bucket:RP mapping", zap.Error(err)) + log.Error("Unable to determine bucket information", zap.Error(err)) return } now := time.Now().UTC() - labels := s.metrics.Labels() - labels["status"] = "ok" - - if err := s.expireData(rpByBucketID, now); err != nil { - log.Error("Deletion not successful", zap.Error(err)) - labels["status"] = "error" - } - s.metrics.CheckDuration.With(labels).Observe(time.Since(now).Seconds()) - s.metrics.Checks.With(labels).Inc() + s.expireData(buckets, now) + s.metrics.CheckDuration.With(s.metrics.Labels()).Observe(time.Since(now).Seconds()) } // expireData runs a delete operation on the storage engine. // -// Any series data that (1) belongs to a bucket in the provided map and +// Any series data that (1) belongs to a bucket in the provided list and // (2) falls outside the bucket's indicated retention period will be deleted. -func (s *retentionEnforcer) expireData(rpByBucketID map[platform.ID]time.Duration, now time.Time) error { - _, logEnd := logger.NewOperation(s.logger, "Data deletion", "data_deletion") +func (s *retentionEnforcer) expireData(buckets []*platform.Bucket, now time.Time) { + logger, logEnd := logger.NewOperation(s.logger, "Data deletion", "data_deletion") defer logEnd() - ctx, cancel := context.WithTimeout(context.Background(), engineAPITimeout) - defer cancel() - cur, err := s.Engine.CreateSeriesCursor(ctx, SeriesCursorRequest{}, nil) - if err != nil { - return err - } - defer cur.Close() - - var mu sync.Mutex - badMSketch := make(map[string]struct{}) // Badly formatted measurements. - missingBSketch := make(map[platform.ID]struct{}) // Missing buckets. - - var seriesDeleted uint64 // Number of series where a delete is attempted. - var seriesSkipped uint64 // Number of series that were skipped from delete. - - fn := func(name []byte, tags models.Tags) (int64, int64, bool) { - if len(name) != platform.IDLength { - mu.Lock() - badMSketch[string(name)] = struct{}{} - mu.Unlock() - atomic.AddUint64(&seriesSkipped, 1) - return 0, 0, false - + labels := s.metrics.Labels() + for _, b := range buckets { + if b.RetentionPeriod == 0 { + continue } - var n [16]byte - copy(n[:], name) - _, bucketID := tsdb.DecodeName(n) - - retentionPeriod, ok := rpByBucketID[bucketID] - if !ok { - mu.Lock() - missingBSketch[bucketID] = struct{}{} - mu.Unlock() - atomic.AddUint64(&seriesSkipped, 1) - return 0, 0, false - } - if retentionPeriod == 0 { - return 0, 0, false - } - - atomic.AddUint64(&seriesDeleted, 1) - to := now.Add(-retentionPeriod).UnixNano() - return math.MinInt64, to, true - } - - defer func() { - if s.metrics == nil { - return - } - labels := s.metrics.Labels() - labels["status"] = "bad_measurement" - s.metrics.Unprocessable.With(labels).Add(float64(len(badMSketch))) - - labels["status"] = "missing_bucket" - s.metrics.Unprocessable.With(labels).Add(float64(len(missingBSketch))) - labels["status"] = "ok" - s.metrics.Series.With(labels).Add(float64(atomic.LoadUint64(&seriesDeleted))) + labels["org_id"] = b.OrganizationID.String() + labels["bucket_id"] = b.ID.String() - labels["status"] = "skipped" - s.metrics.Series.With(labels).Add(float64(atomic.LoadUint64(&seriesSkipped))) - }() + max := now.Add(-b.RetentionPeriod).UnixNano() + err := s.Engine.DeleteBucketRange(b.OrganizationID, b.ID, math.MinInt64, max) + if err != nil { + labels["status"] = "error" + logger.Info("unable to delete bucket range", + zap.String("bucket id", b.ID.String()), + zap.String("org id", b.OrganizationID.String()), + zap.Error(err)) + } - return s.Engine.DeleteSeriesRangeWithPredicate(newSeriesIteratorAdapter(cur), fn) + s.metrics.Checks.With(labels).Inc() + } } -// getRetentionPeriodPerBucket returns a map of (bucket ID -> retention period) -// for all buckets. -func (s *retentionEnforcer) getRetentionPeriodPerBucket() (map[platform.ID]time.Duration, error) { +// getBucketInformation returns a slice of buckets to run retention on. +func (s *retentionEnforcer) getBucketInformation() ([]*platform.Bucket, error) { ctx, cancel := context.WithTimeout(context.Background(), bucketAPITimeout) defer cancel() + buckets, _, err := s.BucketService.FindBuckets(ctx, platform.BucketFilter{}) - if err != nil { - return nil, err - } - rpByBucketID := make(map[platform.ID]time.Duration, len(buckets)) - for _, bucket := range buckets { - rpByBucketID[bucket.ID] = bucket.RetentionPeriod - } - return rpByBucketID, nil + return buckets, err } // PrometheusCollectors satisfies the prom.PrometheusCollector interface. func (s *retentionEnforcer) PrometheusCollectors() []prometheus.Collector { return s.metrics.PrometheusCollectors() } - -type seriesIteratorAdapter struct { - itr SeriesCursor - ea seriesElemAdapter - elem tsdb.SeriesElem -} - -func newSeriesIteratorAdapter(itr SeriesCursor) *seriesIteratorAdapter { - si := &seriesIteratorAdapter{itr: itr} - si.elem = &si.ea - return si -} - -// Next returns the next tsdb.SeriesElem. -// -// The returned tsdb.SeriesElem is valid for use until Next is called again. -func (s *seriesIteratorAdapter) Next() (tsdb.SeriesElem, error) { - if s.itr == nil { - return nil, nil - } - - row, err := s.itr.Next() - if err != nil { - return nil, err - } - - if row == nil { - return nil, nil - } - - s.ea.name = row.Name - s.ea.tags = row.Tags - return s.elem, nil -} - -func (s *seriesIteratorAdapter) Close() error { - if s.itr != nil { - err := s.itr.Close() - s.itr = nil - return err - } - return nil -} - -type seriesElemAdapter struct { - name []byte - tags models.Tags -} - -func (e *seriesElemAdapter) Name() []byte { return e.name } -func (e *seriesElemAdapter) Tags() models.Tags { return e.tags } -func (e *seriesElemAdapter) Deleted() bool { return false } -func (e *seriesElemAdapter) Expr() influxql.Expr { return nil } diff --git a/storage/retention_test.go b/storage/retention_test.go index c4d2a8e9b2..988426706b 100644 --- a/storage/retention_test.go +++ b/storage/retention_test.go @@ -2,7 +2,6 @@ package storage import ( "context" - "fmt" "math" "math/rand" "reflect" @@ -10,103 +9,74 @@ import ( "time" platform "github.com/influxdata/influxdb" - "github.com/influxdata/influxdb/models" "github.com/influxdata/influxdb/tsdb" - "github.com/influxdata/influxql" ) -func TestService_expireData(t *testing.T) { +func TestRetentionService(t *testing.T) { engine := NewTestEngine() service := newRetentionEnforcer(engine, NewTestBucketFinder()) now := time.Date(2018, 4, 10, 23, 12, 33, 0, time.UTC) - t.Run("no rpByBucketID", func(t *testing.T) { - if err := service.expireData(nil, now); err != nil { - t.Error(err) - } - - if err := service.expireData(map[platform.ID]time.Duration{}, now); err != nil { - t.Error(err) - } + t.Run("no buckets", func(t *testing.T) { + service.expireData(nil, now) + service.expireData([]*platform.Bucket{}, now) }) - // Generate some measurement names - var names [][]byte - rpByBucketID := map[platform.ID]time.Duration{} - expMatchedFrequencies := map[string]int{} // To be used for verifying test results. - expRejectedFrequencies := map[string]int{} // To be used for verifying test results. + // Generate some buckets to expire + buckets := []*platform.Bucket{} + expMatched := map[string]struct{}{} // To be used for verifying test results. + expRejected := map[string]struct{}{} // To be used for verifying test results. for i := 0; i < 15; i++ { - repeat := rand.Intn(10) + 1 // [1, 10] name := genMeasurementName() - for j := 0; j < repeat; j++ { - names = append(names, name) - } var n [16]byte copy(n[:], name) - _, bucketID := tsdb.DecodeName(n) + orgID, bucketID := tsdb.DecodeName(n) // Put 1/3rd in the rpByBucketID into the set to delete and 1/3rd into the set // to not delete because no rp, and 1/3rd into the set to not delete because 0 rp. if i%3 == 0 { - rpByBucketID[bucketID] = 3 * time.Hour - expMatchedFrequencies[string(name)] = repeat + buckets = append(buckets, &platform.Bucket{ + OrganizationID: orgID, + ID: bucketID, + RetentionPeriod: 3 * time.Hour, + }) + expMatched[string(name)] = struct{}{} } else if i%3 == 1 { - expRejectedFrequencies[string(name)] = repeat + expRejected[string(name)] = struct{}{} } else if i%3 == 2 { - rpByBucketID[bucketID] = 0 - expRejectedFrequencies[string(name)] = repeat + buckets = append(buckets, &platform.Bucket{ + OrganizationID: orgID, + ID: bucketID, + RetentionPeriod: 0, + }) + expRejected[string(name)] = struct{}{} } } - // Add a badly formatted measurement. - for i := 0; i < 5; i++ { - names = append(names, []byte("zyzwrong")) - } - expRejectedFrequencies["zyzwrong"] = 5 - - gotMatchedFrequencies := map[string]int{} - gotRejectedFrequencies := map[string]int{} - engine.DeleteSeriesRangeWithPredicateFn = func(_ tsdb.SeriesIterator, fn func([]byte, models.Tags) (int64, int64, bool)) error { - - // Iterate over the generated names updating the frequencies by which - // the predicate function in expireData matches or rejects them. - for _, name := range names { - from, to, shouldDelete := fn(name, nil) - if shouldDelete { - gotMatchedFrequencies[string(name)]++ - if from != math.MinInt64 { - return fmt.Errorf("got from %d, expected %d", from, math.MinInt64) - } - wantTo := now.Add(-3 * time.Hour).UnixNano() - if to != wantTo { - return fmt.Errorf("got to %d, expected %d", to, wantTo) - } - } else { - gotRejectedFrequencies[string(name)]++ - } + gotMatched := map[string]struct{}{} + engine.DeleteBucketRangeFn = func(orgID, bucketID platform.ID, from, to int64) error { + if from != math.MinInt64 { + t.Fatalf("got from %d, expected %d", from, math.MinInt64) } + wantTo := now.Add(-3 * time.Hour).UnixNano() + if to != wantTo { + t.Fatalf("got to %d, expected %d", to, wantTo) + } + + name := tsdb.EncodeName(orgID, bucketID) + if _, ok := expRejected[string(name[:])]; ok { + t.Fatalf("got a delete for %x", name) + } + gotMatched[string(name[:])] = struct{}{} return nil } - t.Run("multiple bucket", func(t *testing.T) { - if err := service.expireData(rpByBucketID, now); err != nil { - t.Error(err) + t.Run("multiple buckets", func(t *testing.T) { + service.expireData(buckets, now) + if !reflect.DeepEqual(gotMatched, expMatched) { + t.Fatalf("got\n%#v\nexpected\n%#v", gotMatched, expMatched) } - - // Verify that the correct series were marked to be deleted. - t.Run("matched", func(t *testing.T) { - if !reflect.DeepEqual(gotMatchedFrequencies, expMatchedFrequencies) { - t.Fatalf("got\n%#v\nexpected\n%#v", gotMatchedFrequencies, expMatchedFrequencies) - } - }) - - t.Run("rejected", func(t *testing.T) { - // Verify that badly formatted measurements were rejected. - if !reflect.DeepEqual(gotRejectedFrequencies, expRejectedFrequencies) { - t.Fatalf("got\n%#v\nexpected\n%#v", gotRejectedFrequencies, expRejectedFrequencies) - } - }) }) } @@ -120,40 +90,18 @@ func genMeasurementName() []byte { return b } -type TestSeriesCursor struct { - CloseFn func() error - NextFn func() (*SeriesCursorRow, error) -} - -func (f *TestSeriesCursor) Close() error { return f.CloseFn() } -func (f *TestSeriesCursor) Next() (*SeriesCursorRow, error) { return f.NextFn() } - type TestEngine struct { - CreateSeriesCursorFn func(context.Context, SeriesCursorRequest, influxql.Expr) (SeriesCursor, error) - DeleteSeriesRangeWithPredicateFn func(tsdb.SeriesIterator, func([]byte, models.Tags) (int64, int64, bool)) error - - SeriesCursor *TestSeriesCursor + DeleteBucketRangeFn func(platform.ID, platform.ID, int64, int64) error } func NewTestEngine() *TestEngine { - cursor := &TestSeriesCursor{ - CloseFn: func() error { return nil }, - NextFn: func() (*SeriesCursorRow, error) { return nil, nil }, - } - return &TestEngine{ - SeriesCursor: cursor, - CreateSeriesCursorFn: func(context.Context, SeriesCursorRequest, influxql.Expr) (SeriesCursor, error) { return cursor, nil }, - DeleteSeriesRangeWithPredicateFn: func(tsdb.SeriesIterator, func([]byte, models.Tags) (int64, int64, bool)) error { return nil }, + DeleteBucketRangeFn: func(platform.ID, platform.ID, int64, int64) error { return nil }, } } -func (e *TestEngine) CreateSeriesCursor(ctx context.Context, req SeriesCursorRequest, cond influxql.Expr) (SeriesCursor, error) { - return e.CreateSeriesCursorFn(ctx, req, cond) -} - -func (e *TestEngine) DeleteSeriesRangeWithPredicate(itr tsdb.SeriesIterator, fn func([]byte, models.Tags) (int64, int64, bool)) error { - return e.DeleteSeriesRangeWithPredicateFn(itr, fn) +func (e *TestEngine) DeleteBucketRange(orgID, bucketID platform.ID, min, max int64) error { + return e.DeleteBucketRangeFn(orgID, bucketID, min, max) } type TestBucketFinder struct { diff --git a/storage/wal/helpers_test.go b/storage/wal/helpers_test.go new file mode 100644 index 0000000000..e5a5278d1c --- /dev/null +++ b/storage/wal/helpers_test.go @@ -0,0 +1,28 @@ +package wal + +import ( + "fmt" + "io/ioutil" + "os" + "testing" +) + +func MustTempDir() string { + dir, err := ioutil.TempDir("", "tsm1-test") + if err != nil { + panic(fmt.Sprintf("failed to create temp dir: %v", err)) + } + return dir +} + +func MustTempFile(dir string) *os.File { + f, err := ioutil.TempFile(dir, "tsm1test") + if err != nil { + panic(fmt.Sprintf("failed to create temp file: %v", err)) + } + return f +} + +func fatal(t *testing.T, msg string, err error) { + t.Fatalf("unexpected error %v: %v", msg, err) +} diff --git a/storage/wal/metrics.go b/storage/wal/metrics.go new file mode 100644 index 0000000000..132a5f19a8 --- /dev/null +++ b/storage/wal/metrics.go @@ -0,0 +1,91 @@ +package wal + +import ( + "sort" + "sync" + + "github.com/prometheus/client_golang/prometheus" +) + +// The following package variables act as singletons, to be shared by all +// storage.Engine instantiations. This allows multiple WALs to be monitored +// within the same process. +var ( + wms *walMetrics // main metrics + mmu sync.RWMutex +) + +// PrometheusCollectors returns all the metrics associated with the tsdb package. +func PrometheusCollectors() []prometheus.Collector { + mmu.RLock() + defer mmu.RUnlock() + + var collectors []prometheus.Collector + if wms != nil { + collectors = append(collectors, wms.PrometheusCollectors()...) + } + + return collectors +} + +// namespace is the leading part of all published metrics for the Storage service. +const namespace = "storage" + +const walSubsystem = "wal" // sub-system associated with metrics for the WAL. + +// walMetrics are a set of metrics concerned with tracking data about compactions. +type walMetrics struct { + OldSegmentBytes *prometheus.GaugeVec + CurrentSegmentBytes *prometheus.GaugeVec + Segments *prometheus.GaugeVec + Writes *prometheus.CounterVec +} + +// newWALMetrics initialises the prometheus metrics for tracking the WAL. +func newWALMetrics(labels prometheus.Labels) *walMetrics { + var names []string + for k := range labels { + names = append(names, k) + } + sort.Strings(names) + + writeNames := append(append([]string(nil), names...), "status") + sort.Strings(writeNames) + + return &walMetrics{ + OldSegmentBytes: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: walSubsystem, + Name: "old_segment_bytes", + Help: "Number of bytes old WAL segments using on disk.", + }, names), + CurrentSegmentBytes: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: walSubsystem, + Name: "current_segment_bytes", + Help: "Number of bytes TSM files using on disk.", + }, names), + Segments: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: walSubsystem, + Name: "segments_total", + Help: "Number of WAL segment files on disk.", + }, names), + Writes: prometheus.NewCounterVec(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: walSubsystem, + Name: "writes_total", + Help: "Number of writes to the WAL.", + }, writeNames), + } +} + +// PrometheusCollectors satisfies the prom.PrometheusCollector interface. +func (m *walMetrics) PrometheusCollectors() []prometheus.Collector { + return []prometheus.Collector{ + m.OldSegmentBytes, + m.CurrentSegmentBytes, + m.Segments, + m.Writes, + } +} diff --git a/storage/wal/metrics_test.go b/storage/wal/metrics_test.go new file mode 100644 index 0000000000..12025892f7 --- /dev/null +++ b/storage/wal/metrics_test.go @@ -0,0 +1,75 @@ +package wal + +import ( + "testing" + + "github.com/influxdata/influxdb/kit/prom/promtest" + "github.com/prometheus/client_golang/prometheus" +) + +func TestMetrics_WAL(t *testing.T) { + // metrics to be shared by multiple file stores. + metrics := newWALMetrics(prometheus.Labels{"engine_id": "", "node_id": ""}) + + t1 := newWALTracker(metrics, prometheus.Labels{"engine_id": "0", "node_id": "0"}) + t2 := newWALTracker(metrics, prometheus.Labels{"engine_id": "1", "node_id": "0"}) + + reg := prometheus.NewRegistry() + reg.MustRegister(metrics.PrometheusCollectors()...) + + base := namespace + "_" + walSubsystem + "_" + + // All the metric names + gauges := []string{ + base + "old_segment_bytes", + base + "current_segment_bytes", + base + "segments_total", + } + + counters := []string{ + base + "writes_total", + } + + // Generate some measurements. + for i, tracker := range []*walTracker{t1, t2} { + tracker.SetOldSegmentSize(uint64(i + len(gauges[0]))) + tracker.SetCurrentSegmentSize(uint64(i + len(gauges[1]))) + tracker.SetSegments(uint64(i + len(gauges[2]))) + + labels := tracker.Labels() + labels["status"] = "ok" + tracker.metrics.Writes.With(labels).Add(float64(i + len(counters[0]))) + } + + // Test that all the correct metrics are present. + mfs, err := reg.Gather() + if err != nil { + t.Fatal(err) + } + + // The label variants for the two caches. + labelVariants := []prometheus.Labels{ + prometheus.Labels{"engine_id": "0", "node_id": "0"}, + prometheus.Labels{"engine_id": "1", "node_id": "0"}, + } + + for i, labels := range labelVariants { + for _, name := range gauges { + exp := float64(i + len(name)) + metric := promtest.MustFindMetric(t, mfs, name, labels) + if got := metric.GetGauge().GetValue(); got != exp { + t.Errorf("[%s %d] got %v, expected %v", name, i, got, exp) + } + } + + for _, name := range counters { + exp := float64(i + len(name)) + + labels["status"] = "ok" + metric := promtest.MustFindMetric(t, mfs, name, labels) + if got := metric.GetCounter().GetValue(); got != exp { + t.Errorf("[%s %d] got %v, expected %v", name, i, got, exp) + } + } + } +} diff --git a/tsdb/tsm1/pools.go b/storage/wal/pools.go similarity index 97% rename from tsdb/tsm1/pools.go rename to storage/wal/pools.go index 02d4d6231e..140102f528 100644 --- a/tsdb/tsm1/pools.go +++ b/storage/wal/pools.go @@ -1,4 +1,4 @@ -package tsm1 +package wal import "sync" diff --git a/storage/wal/reader.go b/storage/wal/reader.go new file mode 100644 index 0000000000..2ebf126b18 --- /dev/null +++ b/storage/wal/reader.go @@ -0,0 +1,86 @@ +package wal + +import ( + "os" + "sort" + + "go.uber.org/zap" +) + +// WALReader helps one read out the WAL into entries. +type WALReader struct { + files []string + logger *zap.Logger + r *WALSegmentReader +} + +// NewWALReader constructs a WALReader over the given set of files. +func NewWALReader(files []string) *WALReader { + sort.Strings(files) + return &WALReader{ + files: files, + logger: zap.NewNop(), + r: nil, + } +} + +// WithLogger sets the logger for the WALReader. +func (r *WALReader) WithLogger(logger *zap.Logger) { r.logger = logger } + +// Read calls the callback with every entry in the WAL files. If, during +// reading of a segment file, corruption is encountered, that segment file +// is truncated up to and including the last valid byte, and processing +// continues with the next segment file. +func (r *WALReader) Read(cb func(WALEntry) error) error { + for _, file := range r.files { + if err := r.readFile(file, cb); err != nil { + return err + } + } + return nil +} + +// readFile reads the file and calls the callback with each WAL entry. +// It uses the provided logger for information about progress and corruptions. +func (r *WALReader) readFile(file string, cb func(WALEntry) error) error { + f, err := os.OpenFile(file, os.O_CREATE|os.O_RDWR, 0666) + if err != nil { + return err + } + defer f.Close() + + stat, err := f.Stat() + if err != nil { + return err + } + r.logger.Info("Reading file", zap.String("path", file), zap.Int64("size", stat.Size())) + + if stat.Size() == 0 { + return nil + } + + if r.r == nil { + r.r = NewWALSegmentReader(f) + } else { + r.r.Reset(f) + } + defer r.r.Close() + + for r.r.Next() { + entry, err := r.r.Read() + if err != nil { + n := r.r.Count() + r.logger.Info("File corrupt", zap.Error(err), zap.String("path", file), zap.Int64("pos", n)) + if err := f.Truncate(n); err != nil { + return err + } + break + } + + if err := cb(entry); err != nil { + return err + } + } + + return r.r.Close() +} diff --git a/tsdb/tsm1/wal.go b/storage/wal/wal.go similarity index 80% rename from tsdb/tsm1/wal.go rename to storage/wal/wal.go index 2200a2bacd..cb63303345 100644 --- a/tsdb/tsm1/wal.go +++ b/storage/wal/wal.go @@ -1,8 +1,7 @@ -package tsm1 +package wal import ( "bufio" - "bytes" "encoding/binary" "fmt" "io" @@ -18,29 +17,14 @@ import ( "time" "github.com/golang/snappy" + "github.com/influxdata/influxdb" "github.com/influxdata/influxdb/pkg/limiter" "github.com/influxdata/influxdb/pkg/pool" + "github.com/influxdata/influxdb/tsdb/value" "github.com/prometheus/client_golang/prometheus" "go.uber.org/zap" ) -// Log describes an interface for a durable disk-based log. -type Log interface { - Open() error - Close() error - Path() string - - LastWriteTime() time.Time - DiskSizeBytes() int64 - - WriteMulti(values map[string][]Value) (int, error) - DeleteRange(keys [][]byte, min, max int64) (int, error) - - CloseSegment() error - ClosedSegments() ([]string, error) - Remove(files []string) error -} - const ( // DefaultSegmentSize of 10MB is the size at which segment files will be rolled over. DefaultSegmentSize = 10 * 1024 * 1024 @@ -68,11 +52,14 @@ const ( // WriteWALEntryType indicates a write entry. WriteWALEntryType WalEntryType = 0x01 - // DeleteWALEntryType indicates a delete entry. - DeleteWALEntryType WalEntryType = 0x02 + // DeleteWALEntryType indicates a delete entry. Deprecated. + _ WalEntryType = 0x02 - // DeleteRangeWALEntryType indicates a delete range entry. - DeleteRangeWALEntryType WalEntryType = 0x03 + // DeleteRangeWALEntryType indicates a delete range entry. Deprecated. + _ WalEntryType = 0x03 + + // DeleteBucketRangeWALEntryType indicates a delete bucket range entry. + DeleteBucketRangeWALEntryType WalEntryType = 0x04 ) var ( @@ -97,7 +84,8 @@ type WAL struct { mu sync.RWMutex lastWriteTime time.Time - path string + path string + enabled bool // write variables currentSegmentID int @@ -120,7 +108,9 @@ type WAL struct { // SegmentSize is the file size at which a segment file will be rotated SegmentSize int - tracker *walTracker + tracker *walTracker + defaultMetricLabels prometheus.Labels // N.B this must not be mutated after Open is called. + limiter limiter.Fixed } @@ -128,7 +118,8 @@ type WAL struct { func NewWAL(path string) *WAL { logger := zap.NewNop() return &WAL{ - path: path, + path: path, + enabled: true, // these options should be overriden by any options in the config SegmentSize: DefaultSegmentSize, @@ -137,7 +128,6 @@ func NewWAL(path string) *WAL { limiter: limiter.NewFixed(defaultWaitingWALWrites), logger: logger, traceLogger: logger, - tracker: newWALTracker(newWALMetrics(nil), nil), } } @@ -154,6 +144,11 @@ func (l *WAL) WithFsyncDelay(delay time.Duration) { l.syncDelay = delay } +// SetEnabled sets if the WAL is enabled and should be called before the WAL is opened. +func (l *WAL) SetEnabled(enabled bool) { + l.enabled = enabled +} + // WithLogger sets the WAL's logger. func (l *WAL) WithLogger(log *zap.Logger) { l.logger = log.With(zap.String("service", "wal")) @@ -163,6 +158,15 @@ func (l *WAL) WithLogger(log *zap.Logger) { } } +// SetDefaultMetricLabels sets the default labels for metrics on the engine. +// It must be called before the Engine is opened. +func (l *WAL) SetDefaultMetricLabels(labels prometheus.Labels) { + l.defaultMetricLabels = make(prometheus.Labels, len(labels)) + for k, v := range labels { + l.defaultMetricLabels[k] = v + } +} + // Path returns the directory the log was initialized with. func (l *WAL) Path() string { l.mu.RLock() @@ -175,6 +179,20 @@ func (l *WAL) Open() error { l.mu.Lock() defer l.mu.Unlock() + if !l.enabled { + return nil + } + + // Initialise metrics for trackers. + mmu.Lock() + if wms == nil { + wms = newWALMetrics(l.defaultMetricLabels) + } + mmu.Unlock() + + // Set the shared metrics for the tracker + l.tracker = newWALTracker(wms, l.defaultMetricLabels) + l.traceLogger.Info("tsm1 WAL starting", zap.Int("segment_size", l.SegmentSize)) l.traceLogger.Info("tsm1 WAL writing", zap.String("path", l.path)) @@ -182,7 +200,7 @@ func (l *WAL) Open() error { return err } - segments, err := segmentFileNames(l.path) + segments, err := SegmentFileNames(l.path) if err != nil { return err } @@ -299,8 +317,12 @@ func (l *WAL) sync() { // WriteMulti writes the given values to the WAL. It returns the WAL segment ID to // which the points were written. If an error is returned the segment ID should -// be ignored. -func (l *WAL) WriteMulti(values map[string][]Value) (int, error) { +// be ignored. If the WAL is disabled, -1 and nil is returned. +func (l *WAL) WriteMulti(values map[string][]value.Value) (int, error) { + if !l.enabled { + return -1, nil + } + entry := &WriteWALEntry{ Values: values, } @@ -317,8 +339,13 @@ func (l *WAL) WriteMulti(values map[string][]Value) (int, error) { // ClosedSegments returns a slice of the names of the closed segment files. func (l *WAL) ClosedSegments() ([]string, error) { + if !l.enabled { + return nil, nil + } + l.mu.RLock() defer l.mu.RUnlock() + // Not loading files from disk so nothing to do if l.path == "" { return nil, nil @@ -329,7 +356,7 @@ func (l *WAL) ClosedSegments() ([]string, error) { currentFile = l.currentSegmentWriter.path() } - files, err := segmentFileNames(l.path) + files, err := SegmentFileNames(l.path) if err != nil { return nil, err } @@ -349,15 +376,20 @@ func (l *WAL) ClosedSegments() ([]string, error) { // Remove deletes the given segment file paths from disk and cleans up any associated objects. func (l *WAL) Remove(files []string) error { + if !l.enabled { + return nil + } + l.mu.Lock() defer l.mu.Unlock() + for _, fn := range files { l.traceLogger.Info("Removing WAL file", zap.String("path", fn)) os.RemoveAll(fn) } // Refresh the on-disk size stats - segments, err := segmentFileNames(l.path) + segments, err := SegmentFileNames(l.path) if err != nil { return err } @@ -470,8 +502,13 @@ func (l *WAL) rollSegment() error { // CloseSegment closes the current segment if it is non-empty and opens a new one. func (l *WAL) CloseSegment() error { + if !l.enabled { + return nil + } + l.mu.Lock() defer l.mu.Unlock() + if l.currentSegmentWriter == nil || l.currentSegmentWriter.size > 0 { if err := l.newSegmentFile(); err != nil { // A drop database or RP call could trigger this error if writes were in-flight @@ -483,32 +520,18 @@ func (l *WAL) CloseSegment() error { return nil } -// Delete deletes the given keys, returning the segment ID for the operation. -func (l *WAL) Delete(keys [][]byte) (int, error) { - if len(keys) == 0 { - return 0, nil - } - entry := &DeleteWALEntry{ - Keys: keys, +// DeleteBucketRange deletes the data inside of the bucket between the two times, returning +// the segment ID for the operation. +func (l *WAL) DeleteBucketRange(orgID, bucketID influxdb.ID, min, max int64) (int, error) { + if !l.enabled { + return -1, nil } - id, err := l.writeToLog(entry) - if err != nil { - return -1, err - } - return id, nil -} - -// DeleteRange deletes the given keys within the given time range, -// returning the segment ID for the operation. -func (l *WAL) DeleteRange(keys [][]byte, min, max int64) (int, error) { - if len(keys) == 0 { - return 0, nil - } - entry := &DeleteRangeWALEntry{ - Keys: keys, - Min: min, - Max: max, + entry := &DeleteBucketRangeWALEntry{ + OrgID: orgID, + BucketID: bucketID, + Min: min, + Max: max, } id, err := l.writeToLog(entry) @@ -523,6 +546,10 @@ func (l *WAL) Close() error { l.mu.Lock() defer l.mu.Unlock() + if !l.enabled { + return nil + } + l.once.Do(func() { // Close, but don't set to nil so future goroutines can still be signaled l.traceLogger.Info("Closing WAL file", zap.String("path", l.path)) @@ -538,8 +565,8 @@ func (l *WAL) Close() error { return nil } -// segmentFileNames will return all files that are WAL segment files in sorted order by ascending ID. -func segmentFileNames(dir string) ([]string, error) { +// SegmentFileNames will return all files that are WAL segment files in sorted order by ascending ID. +func SegmentFileNames(dir string) ([]string, error) { names, err := filepath.Glob(filepath.Join(dir, fmt.Sprintf("%s*.%s", WALFilePrefix, WALFileExtension))) if err != nil { return nil, err @@ -666,10 +693,11 @@ type WALEntry interface { // WriteWALEntry represents a write of points. type WriteWALEntry struct { - Values map[string][]Value + Values map[string][]value.Value sz int } +// MarshalSize returns the number of bytes the entry takes when marshaled. func (w *WriteWALEntry) MarshalSize() int { if w.sz > 0 || len(w.Values) == 0 { return w.sz @@ -687,17 +715,17 @@ func (w *WriteWALEntry) MarshalSize() int { encLen += 8 * len(v) // timestamps (8) switch v[0].(type) { - case FloatValue, IntegerValue, UnsignedValue: + case value.FloatValue, value.IntegerValue, value.UnsignedValue: encLen += 8 * len(v) - case BooleanValue: + case value.BooleanValue: encLen += 1 * len(v) - case StringValue: + case value.StringValue: for _, vv := range v { - str, ok := vv.(StringValue) + str, ok := vv.(value.StringValue) if !ok { return 0 } - encLen += 4 + len(str.value) + encLen += 4 + len(str.RawValue()) } default: return 0 @@ -746,15 +774,15 @@ func (w *WriteWALEntry) Encode(dst []byte) ([]byte, error) { for k, v := range w.Values { switch v[0].(type) { - case FloatValue: + case value.FloatValue: curType = float64EntryType - case IntegerValue: + case value.IntegerValue: curType = integerEntryType - case UnsignedValue: + case value.UnsignedValue: curType = unsignedEntryType - case BooleanValue: + case value.BooleanValue: curType = booleanEntryType - case StringValue: + case value.StringValue: curType = stringEntryType default: return nil, fmt.Errorf("unsupported value type: %T", v[0]) @@ -774,41 +802,41 @@ func (w *WriteWALEntry) Encode(dst []byte) ([]byte, error) { n += 8 switch vv := vv.(type) { - case FloatValue: + case value.FloatValue: if curType != float64EntryType { return nil, fmt.Errorf("incorrect value found in %T slice: %T", v[0].Value(), vv) } - binary.BigEndian.PutUint64(dst[n:n+8], math.Float64bits(vv.value)) + binary.BigEndian.PutUint64(dst[n:n+8], math.Float64bits(vv.RawValue())) n += 8 - case IntegerValue: + case value.IntegerValue: if curType != integerEntryType { return nil, fmt.Errorf("incorrect value found in %T slice: %T", v[0].Value(), vv) } - binary.BigEndian.PutUint64(dst[n:n+8], uint64(vv.value)) + binary.BigEndian.PutUint64(dst[n:n+8], uint64(vv.RawValue())) n += 8 - case UnsignedValue: + case value.UnsignedValue: if curType != unsignedEntryType { return nil, fmt.Errorf("incorrect value found in %T slice: %T", v[0].Value(), vv) } - binary.BigEndian.PutUint64(dst[n:n+8], uint64(vv.value)) + binary.BigEndian.PutUint64(dst[n:n+8], uint64(vv.RawValue())) n += 8 - case BooleanValue: + case value.BooleanValue: if curType != booleanEntryType { return nil, fmt.Errorf("incorrect value found in %T slice: %T", v[0].Value(), vv) } - if vv.value { + if vv.RawValue() { dst[n] = 1 } else { dst[n] = 0 } n++ - case StringValue: + case value.StringValue: if curType != stringEntryType { return nil, fmt.Errorf("incorrect value found in %T slice: %T", v[0].Value(), vv) } - binary.BigEndian.PutUint32(dst[n:n+4], uint32(len(vv.value))) + binary.BigEndian.PutUint32(dst[n:n+4], uint32(len(vv.RawValue()))) n += 4 - n += copy(dst[n:], vv.value) + n += copy(dst[n:], vv.RawValue()) default: return nil, fmt.Errorf("unsupported value found in %T slice: %T", v[0].Value(), vv) } @@ -863,13 +891,13 @@ func (w *WriteWALEntry) UnmarshalBinary(b []byte) error { return ErrWALCorrupt } - values := make([]Value, 0, nvals) + values := make([]value.Value, 0, nvals) for j := 0; j < nvals; j++ { un := int64(binary.BigEndian.Uint64(b[i : i+8])) i += 8 v := math.Float64frombits((binary.BigEndian.Uint64(b[i : i+8]))) i += 8 - values = append(values, NewFloatValue(un, v)) + values = append(values, value.NewFloatValue(un, v)) } w.Values[k] = values case integerEntryType: @@ -877,13 +905,13 @@ func (w *WriteWALEntry) UnmarshalBinary(b []byte) error { return ErrWALCorrupt } - values := make([]Value, 0, nvals) + values := make([]value.Value, 0, nvals) for j := 0; j < nvals; j++ { un := int64(binary.BigEndian.Uint64(b[i : i+8])) i += 8 v := int64(binary.BigEndian.Uint64(b[i : i+8])) i += 8 - values = append(values, NewIntegerValue(un, v)) + values = append(values, value.NewIntegerValue(un, v)) } w.Values[k] = values @@ -892,13 +920,13 @@ func (w *WriteWALEntry) UnmarshalBinary(b []byte) error { return ErrWALCorrupt } - values := make([]Value, 0, nvals) + values := make([]value.Value, 0, nvals) for j := 0; j < nvals; j++ { un := int64(binary.BigEndian.Uint64(b[i : i+8])) i += 8 v := binary.BigEndian.Uint64(b[i : i+8]) i += 8 - values = append(values, NewUnsignedValue(un, v)) + values = append(values, value.NewUnsignedValue(un, v)) } w.Values[k] = values @@ -907,7 +935,7 @@ func (w *WriteWALEntry) UnmarshalBinary(b []byte) error { return ErrWALCorrupt } - values := make([]Value, 0, nvals) + values := make([]value.Value, 0, nvals) for j := 0; j < nvals; j++ { un := int64(binary.BigEndian.Uint64(b[i : i+8])) i += 8 @@ -915,15 +943,15 @@ func (w *WriteWALEntry) UnmarshalBinary(b []byte) error { v := b[i] i += 1 if v == 1 { - values = append(values, NewBooleanValue(un, true)) + values = append(values, value.NewBooleanValue(un, true)) } else { - values = append(values, NewBooleanValue(un, false)) + values = append(values, value.NewBooleanValue(un, false)) } } w.Values[k] = values case stringEntryType: - values := make([]Value, 0, nvals) + values := make([]value.Value, 0, nvals) for j := 0; j < nvals; j++ { if i+12 > len(b) { return ErrWALCorrupt @@ -945,7 +973,7 @@ func (w *WriteWALEntry) UnmarshalBinary(b []byte) error { v := string(b[i : i+length]) i += length - values = append(values, NewStringValue(un, v)) + values = append(values, value.NewStringValue(un, v)) } w.Values[k] = values @@ -961,152 +989,70 @@ func (w *WriteWALEntry) Type() WalEntryType { return WriteWALEntryType } -// DeleteWALEntry represents the deletion of multiple series. -type DeleteWALEntry struct { - Keys [][]byte - sz int -} - -// MarshalBinary returns a binary representation of the entry in a new byte slice. -func (w *DeleteWALEntry) MarshalBinary() ([]byte, error) { - b := make([]byte, w.MarshalSize()) - return w.Encode(b) -} - -// UnmarshalBinary deserializes the byte slice into w. -func (w *DeleteWALEntry) UnmarshalBinary(b []byte) error { - if len(b) == 0 { - return nil - } - - // b originates from a pool. Copy what needs to be retained. - buf := make([]byte, len(b)) - copy(buf, b) - w.Keys = bytes.Split(buf, []byte("\n")) - return nil -} - -func (w *DeleteWALEntry) MarshalSize() int { - if w.sz > 0 || len(w.Keys) == 0 { - return w.sz - } - - encLen := len(w.Keys) // newlines - for _, k := range w.Keys { - encLen += len(k) - } - - w.sz = encLen - - return encLen -} - -// Encode converts the DeleteWALEntry into a byte slice, appending to dst. -func (w *DeleteWALEntry) Encode(dst []byte) ([]byte, error) { - sz := w.MarshalSize() - - if len(dst) < sz { - dst = make([]byte, sz) - } - - var n int - for _, k := range w.Keys { - n += copy(dst[n:], k) - n += copy(dst[n:], "\n") - } - - // We return n-1 to strip off the last newline so that unmarshalling the value - // does not produce an empty string - return []byte(dst[:n-1]), nil -} - -// Type returns DeleteWALEntryType. -func (w *DeleteWALEntry) Type() WalEntryType { - return DeleteWALEntryType -} - -// DeleteRangeWALEntry represents the deletion of multiple series. -type DeleteRangeWALEntry struct { - Keys [][]byte +// DeleteBucketRangeWALEntry represents the deletion of data in a bucket. +type DeleteBucketRangeWALEntry struct { + OrgID influxdb.ID + BucketID influxdb.ID Min, Max int64 - sz int } // MarshalBinary returns a binary representation of the entry in a new byte slice. -func (w *DeleteRangeWALEntry) MarshalBinary() ([]byte, error) { +func (w *DeleteBucketRangeWALEntry) MarshalBinary() ([]byte, error) { b := make([]byte, w.MarshalSize()) return w.Encode(b) } // UnmarshalBinary deserializes the byte slice into w. -func (w *DeleteRangeWALEntry) UnmarshalBinary(b []byte) error { - if len(b) < 16 { +func (w *DeleteBucketRangeWALEntry) UnmarshalBinary(b []byte) error { + if len(b) != 2*influxdb.IDLength+16 { return ErrWALCorrupt } - w.Min = int64(binary.BigEndian.Uint64(b[:8])) - w.Max = int64(binary.BigEndian.Uint64(b[8:16])) - - i := 16 - for i < len(b) { - if i+4 > len(b) { - return ErrWALCorrupt - } - sz := int(binary.BigEndian.Uint32(b[i : i+4])) - i += 4 - - if i+sz > len(b) { - return ErrWALCorrupt - } - - // b originates from a pool. Copy what needs to be retained. - buf := make([]byte, sz) - copy(buf, b[i:i+sz]) - w.Keys = append(w.Keys, buf) - i += sz + if err := w.OrgID.Decode(b[0:influxdb.IDLength]); err != nil { + return err } + if err := w.BucketID.Decode(b[influxdb.IDLength : 2*influxdb.IDLength]); err != nil { + return err + } + w.Min = int64(binary.BigEndian.Uint64(b[2*influxdb.IDLength : 2*influxdb.IDLength+8])) + w.Max = int64(binary.BigEndian.Uint64(b[2*influxdb.IDLength+8 : 2*influxdb.IDLength+16])) + return nil } -func (w *DeleteRangeWALEntry) MarshalSize() int { - if w.sz > 0 { - return w.sz - } - - sz := 16 + len(w.Keys)*4 - for _, k := range w.Keys { - sz += len(k) - } - - w.sz = sz - - return sz +// MarshalSize returns the number of bytes the entry takes when marshaled. +func (w *DeleteBucketRangeWALEntry) MarshalSize() int { + return 2*influxdb.IDLength + 16 } -// Encode converts the DeleteRangeWALEntry into a byte slice, appending to b. -func (w *DeleteRangeWALEntry) Encode(b []byte) ([]byte, error) { +// Encode converts the entry into a byte stream using b if it is large enough. +// If b is too small, a newly allocated slice is returned. +func (w *DeleteBucketRangeWALEntry) Encode(b []byte) ([]byte, error) { sz := w.MarshalSize() - if len(b) < sz { b = make([]byte, sz) } - binary.BigEndian.PutUint64(b[:8], uint64(w.Min)) - binary.BigEndian.PutUint64(b[8:16], uint64(w.Max)) - - i := 16 - for _, k := range w.Keys { - binary.BigEndian.PutUint32(b[i:i+4], uint32(len(k))) - i += 4 - i += copy(b[i:], k) + orgID, err := w.OrgID.Encode() + if err != nil { + return nil, err + } + bucketID, err := w.BucketID.Encode() + if err != nil { + return nil, err } - return b[:i], nil + copy(b, orgID) + copy(b[influxdb.IDLength:], bucketID) + binary.BigEndian.PutUint64(b[2*influxdb.IDLength:], uint64(w.Min)) + binary.BigEndian.PutUint64(b[2*influxdb.IDLength+8:], uint64(w.Max)) + + return b[:sz], nil } -// Type returns DeleteRangeWALEntryType. -func (w *DeleteRangeWALEntry) Type() WalEntryType { - return DeleteRangeWALEntryType +// Type returns DeleteBucketRangeWALEntryType. +func (w *DeleteBucketRangeWALEntry) Type() WalEntryType { + return DeleteBucketRangeWALEntryType } // WALSegmentWriter writes WAL segments. @@ -1250,12 +1196,10 @@ func (r *WALSegmentReader) Next() bool { switch WalEntryType(entryType) { case WriteWALEntryType: r.entry = &WriteWALEntry{ - Values: make(map[string][]Value), + Values: make(map[string][]value.Value), } - case DeleteWALEntryType: - r.entry = &DeleteWALEntry{} - case DeleteRangeWALEntryType: - r.entry = &DeleteRangeWALEntry{} + case DeleteBucketRangeWALEntryType: + r.entry = &DeleteBucketRangeWALEntry{} default: r.err = fmt.Errorf("unknown wal entry type: %v", entryType) return true @@ -1310,20 +1254,3 @@ func idFromFileName(name string) (int, error) { return int(id), err } - -// NopWAL implements the Log interface and provides a no-op WAL implementation. -type NopWAL struct{} - -func (w NopWAL) Open() error { return nil } -func (w NopWAL) Close() error { return nil } -func (w NopWAL) Path() string { return "" } - -func (w NopWAL) LastWriteTime() time.Time { return time.Time{} } -func (w NopWAL) DiskSizeBytes() int64 { return 0 } - -func (w NopWAL) WriteMulti(values map[string][]Value) (int, error) { return 0, nil } -func (w NopWAL) DeleteRange(keys [][]byte, min, max int64) (int, error) { return 0, nil } - -func (w NopWAL) CloseSegment() error { return nil } -func (w NopWAL) ClosedSegments() ([]string, error) { return nil, nil } -func (w NopWAL) Remove(files []string) error { return nil } diff --git a/storage/wal/wal_test.go b/storage/wal/wal_test.go new file mode 100644 index 0000000000..8347510375 --- /dev/null +++ b/storage/wal/wal_test.go @@ -0,0 +1,580 @@ +package wal + +import ( + "fmt" + "io" + "math/rand" + "os" + "reflect" + "testing" + + "github.com/golang/snappy" + "github.com/influxdata/influxdb" + "github.com/influxdata/influxdb/tsdb/value" +) + +func TestWALWriter_WriteMulti_Single(t *testing.T) { + dir := MustTempDir() + defer os.RemoveAll(dir) + f := MustTempFile(dir) + w := NewWALSegmentWriter(f) + + p1 := value.NewValue(1, 1.1) + p2 := value.NewValue(1, int64(1)) + p3 := value.NewValue(1, true) + p4 := value.NewValue(1, "string") + p5 := value.NewValue(1, ^uint64(0)) + + values := map[string][]value.Value{ + "cpu,host=A#!~#float": []value.Value{p1}, + "cpu,host=A#!~#int": []value.Value{p2}, + "cpu,host=A#!~#bool": []value.Value{p3}, + "cpu,host=A#!~#string": []value.Value{p4}, + "cpu,host=A#!~#unsigned": []value.Value{p5}, + } + + entry := &WriteWALEntry{ + Values: values, + } + + if err := w.Write(mustMarshalEntry(entry)); err != nil { + fatal(t, "write points", err) + } + + if err := w.Flush(); err != nil { + fatal(t, "flush", err) + } + + if _, err := f.Seek(0, io.SeekStart); err != nil { + fatal(t, "seek", err) + } + + r := NewWALSegmentReader(f) + + if !r.Next() { + t.Fatalf("expected next, got false") + } + + we, err := r.Read() + if err != nil { + fatal(t, "read entry", err) + } + + e, ok := we.(*WriteWALEntry) + if !ok { + t.Fatalf("expected WriteWALEntry: got %#v", e) + } + + for k, v := range e.Values { + for i, vv := range v { + if got, exp := vv.String(), values[k][i].String(); got != exp { + t.Fatalf("points mismatch: got %v, exp %v", got, exp) + } + } + } + + if n := r.Count(); n != MustReadFileSize(f) { + t.Fatalf("wrong count of bytes read, got %d, exp %d", n, MustReadFileSize(f)) + } +} + +func TestWALWriter_WriteMulti_LargeBatch(t *testing.T) { + dir := MustTempDir() + defer os.RemoveAll(dir) + f := MustTempFile(dir) + w := NewWALSegmentWriter(f) + + var points []value.Value + for i := 0; i < 100000; i++ { + points = append(points, value.NewValue(int64(i), int64(1))) + } + + values := map[string][]value.Value{ + "cpu,host=A,server=01,foo=bar,tag=really-long#!~#float": points, + "mem,host=A,server=01,foo=bar,tag=really-long#!~#float": points, + } + + entry := &WriteWALEntry{ + Values: values, + } + + if err := w.Write(mustMarshalEntry(entry)); err != nil { + fatal(t, "write points", err) + } + + if err := w.Flush(); err != nil { + fatal(t, "flush", err) + } + + if _, err := f.Seek(0, io.SeekStart); err != nil { + fatal(t, "seek", err) + } + + r := NewWALSegmentReader(f) + + if !r.Next() { + t.Fatalf("expected next, got false") + } + + we, err := r.Read() + if err != nil { + fatal(t, "read entry", err) + } + + e, ok := we.(*WriteWALEntry) + if !ok { + t.Fatalf("expected WriteWALEntry: got %#v", e) + } + + for k, v := range e.Values { + for i, vv := range v { + if got, exp := vv.String(), values[k][i].String(); got != exp { + t.Fatalf("points mismatch: got %v, exp %v", got, exp) + } + } + } + + if n := r.Count(); n != MustReadFileSize(f) { + t.Fatalf("wrong count of bytes read, got %d, exp %d", n, MustReadFileSize(f)) + } +} + +func TestWALWriter_WriteMulti_Multiple(t *testing.T) { + dir := MustTempDir() + defer os.RemoveAll(dir) + f := MustTempFile(dir) + w := NewWALSegmentWriter(f) + + p1 := value.NewValue(1, int64(1)) + p2 := value.NewValue(1, int64(2)) + + exp := []struct { + key string + values []value.Value + }{ + {"cpu,host=A#!~#value", []value.Value{p1}}, + {"cpu,host=B#!~#value", []value.Value{p2}}, + } + + for _, v := range exp { + entry := &WriteWALEntry{ + Values: map[string][]value.Value{v.key: v.values}, + } + + if err := w.Write(mustMarshalEntry(entry)); err != nil { + fatal(t, "write points", err) + } + if err := w.Flush(); err != nil { + fatal(t, "flush", err) + } + } + + // Seek back to the beinning of the file for reading + if _, err := f.Seek(0, io.SeekStart); err != nil { + fatal(t, "seek", err) + } + + r := NewWALSegmentReader(f) + + for _, ep := range exp { + if !r.Next() { + t.Fatalf("expected next, got false") + } + + we, err := r.Read() + if err != nil { + fatal(t, "read entry", err) + } + + e, ok := we.(*WriteWALEntry) + if !ok { + t.Fatalf("expected WriteWALEntry: got %#v", e) + } + + for k, v := range e.Values { + if got, exp := k, ep.key; got != exp { + t.Fatalf("key mismatch. got %v, exp %v", got, exp) + } + + if got, exp := len(v), len(ep.values); got != exp { + t.Fatalf("values length mismatch: got %v, exp %v", got, exp) + } + + for i, vv := range v { + if got, exp := vv.String(), ep.values[i].String(); got != exp { + t.Fatalf("points mismatch: got %v, exp %v", got, exp) + } + } + } + } + + if n := r.Count(); n != MustReadFileSize(f) { + t.Fatalf("wrong count of bytes read, got %d, exp %d", n, MustReadFileSize(f)) + } +} + +func TestWALWriter_DeleteBucketRange(t *testing.T) { + dir := MustTempDir() + defer os.RemoveAll(dir) + f := MustTempFile(dir) + w := NewWALSegmentWriter(f) + + entry := &DeleteBucketRangeWALEntry{ + OrgID: influxdb.ID(1), + BucketID: influxdb.ID(2), + Min: 3, + Max: 4, + } + + if err := w.Write(mustMarshalEntry(entry)); err != nil { + fatal(t, "write points", err) + } + + if err := w.Flush(); err != nil { + fatal(t, "flush", err) + } + + if _, err := f.Seek(0, io.SeekStart); err != nil { + fatal(t, "seek", err) + } + + r := NewWALSegmentReader(f) + + if !r.Next() { + t.Fatalf("expected next, got false") + } + + we, err := r.Read() + if err != nil { + fatal(t, "read entry", err) + } + + e, ok := we.(*DeleteBucketRangeWALEntry) + if !ok { + t.Fatalf("expected WriteWALEntry: got %#v", e) + } + + if !reflect.DeepEqual(entry, e) { + t.Fatalf("expected %+v but got %+v", entry, e) + } +} + +func TestWAL_ClosedSegments(t *testing.T) { + dir := MustTempDir() + defer os.RemoveAll(dir) + + w := NewWAL(dir) + if err := w.Open(); err != nil { + t.Fatalf("error opening WAL: %v", err) + } + + files, err := w.ClosedSegments() + if err != nil { + t.Fatalf("error getting closed segments: %v", err) + } + + if got, exp := len(files), 0; got != exp { + t.Fatalf("close segment length mismatch: got %v, exp %v", got, exp) + } + + if _, err := w.WriteMulti(map[string][]value.Value{ + "cpu,host=A#!~#value": []value.Value{ + value.NewValue(1, 1.1), + }, + }); err != nil { + t.Fatalf("error writing points: %v", err) + } + + if err := w.Close(); err != nil { + t.Fatalf("error closing wal: %v", err) + } + + // Re-open the WAL + w = NewWAL(dir) + defer w.Close() + if err := w.Open(); err != nil { + t.Fatalf("error opening WAL: %v", err) + } + + files, err = w.ClosedSegments() + if err != nil { + t.Fatalf("error getting closed segments: %v", err) + } + if got, exp := len(files), 0; got != exp { + t.Fatalf("close segment length mismatch: got %v, exp %v", got, exp) + } +} + +func TestWALWriter_Corrupt(t *testing.T) { + dir := MustTempDir() + defer os.RemoveAll(dir) + f := MustTempFile(dir) + w := NewWALSegmentWriter(f) + corruption := []byte{1, 4, 0, 0, 0} + + p1 := value.NewValue(1, 1.1) + values := map[string][]value.Value{ + "cpu,host=A#!~#float": []value.Value{p1}, + } + + entry := &WriteWALEntry{ + Values: values, + } + if err := w.Write(mustMarshalEntry(entry)); err != nil { + fatal(t, "write points", err) + } + + if err := w.Flush(); err != nil { + fatal(t, "flush", err) + } + + // Write some random bytes to the file to simulate corruption. + if _, err := f.Write(corruption); err != nil { + fatal(t, "corrupt WAL segment", err) + } + + // Create the WAL segment reader. + if _, err := f.Seek(0, io.SeekStart); err != nil { + fatal(t, "seek", err) + } + r := NewWALSegmentReader(f) + + // Try to decode two entries. + + if !r.Next() { + t.Fatalf("expected next, got false") + } + if _, err := r.Read(); err != nil { + fatal(t, "read entry", err) + } + + if !r.Next() { + t.Fatalf("expected next, got false") + } + if _, err := r.Read(); err == nil { + fatal(t, "read entry did not return err", nil) + } + + // Count should only return size of valid data. + expCount := MustReadFileSize(f) - int64(len(corruption)) + if n := r.Count(); n != expCount { + t.Fatalf("wrong count of bytes read, got %d, exp %d", n, expCount) + } +} + +// Reproduces a `panic: runtime error: makeslice: cap out of range` when run with +// GOARCH=386 go test -run TestWALSegmentReader_Corrupt -v ./tsdb/engine/tsm1/ +func TestWALSegmentReader_Corrupt(t *testing.T) { + dir := MustTempDir() + defer os.RemoveAll(dir) + f := MustTempFile(dir) + w := NewWALSegmentWriter(f) + + p4 := value.NewValue(1, "string") + + values := map[string][]value.Value{ + "cpu,host=A#!~#string": []value.Value{p4, p4}, + } + + entry := &WriteWALEntry{ + Values: values, + } + + typ, b := mustMarshalEntry(entry) + + // This causes the nvals field to overflow on 32 bit systems which produces a + // negative count and a panic when reading the segment. + b[25] = 255 + + if err := w.Write(typ, b); err != nil { + fatal(t, "write points", err) + } + + if err := w.Flush(); err != nil { + fatal(t, "flush", err) + } + + // Create the WAL segment reader. + if _, err := f.Seek(0, io.SeekStart); err != nil { + fatal(t, "seek", err) + } + + r := NewWALSegmentReader(f) + defer r.Close() + + // Try to decode two entries. + for r.Next() { + r.Read() + } +} + +func TestWriteWALSegment_UnmarshalBinary_WriteWALCorrupt(t *testing.T) { + p1 := value.NewValue(1, 1.1) + p2 := value.NewValue(1, int64(1)) + p3 := value.NewValue(1, true) + p4 := value.NewValue(1, "string") + p5 := value.NewValue(1, uint64(1)) + + values := map[string][]value.Value{ + "cpu,host=A#!~#float": []value.Value{p1, p1}, + "cpu,host=A#!~#int": []value.Value{p2, p2}, + "cpu,host=A#!~#bool": []value.Value{p3, p3}, + "cpu,host=A#!~#string": []value.Value{p4, p4}, + "cpu,host=A#!~#unsigned": []value.Value{p5, p5}, + } + + w := &WriteWALEntry{ + Values: values, + } + + b, err := w.MarshalBinary() + if err != nil { + t.Fatalf("unexpected error, got %v", err) + } + + // Test every possible truncation of a write WAL entry + for i := 0; i < len(b); i++ { + // re-allocated to ensure capacity would be exceed if slicing + truncated := make([]byte, i) + copy(truncated, b[:i]) + err := w.UnmarshalBinary(truncated) + if err != nil && err != ErrWALCorrupt { + t.Fatalf("unexpected error: %v", err) + } + } +} + +func TestDeleteBucketRangeWALEntry_UnmarshalBinary(t *testing.T) { + for i := 0; i < 1000; i++ { + in := &DeleteBucketRangeWALEntry{ + OrgID: influxdb.ID(rand.Int63()) + 1, + BucketID: influxdb.ID(rand.Int63()) + 1, + Min: rand.Int63(), + Max: rand.Int63(), + } + + b, err := in.MarshalBinary() + if err != nil { + t.Fatalf("unexpected error, got %v", err) + } + + out := &DeleteBucketRangeWALEntry{} + if err := out.UnmarshalBinary(b); err != nil { + t.Fatalf("%v", err) + } + + if !reflect.DeepEqual(in, out) { + t.Errorf("got %+v, expected %+v", out, in) + } + } +} + +func TestWriteWALSegment_UnmarshalBinary_DeleteBucketRangeWALCorrupt(t *testing.T) { + w := &DeleteBucketRangeWALEntry{ + OrgID: influxdb.ID(1), + BucketID: influxdb.ID(2), + Min: 3, + Max: 4, + } + + b, err := w.MarshalBinary() + if err != nil { + t.Fatalf("unexpected error, got %v", err) + } + + // Test every possible truncation of a write WAL entry + for i := 0; i < len(b); i++ { + // re-allocated to ensure capacity would be exceed if slicing + truncated := make([]byte, i) + copy(truncated, b[:i]) + err := w.UnmarshalBinary(truncated) + if err != nil && err != ErrWALCorrupt { + t.Fatalf("unexpected error: %v", err) + } + } +} + +func BenchmarkWALSegmentWriter(b *testing.B) { + points := map[string][]value.Value{} + for i := 0; i < 5000; i++ { + k := "cpu,host=A#!~#value" + points[k] = append(points[k], value.NewValue(int64(i), 1.1)) + } + + dir := MustTempDir() + defer os.RemoveAll(dir) + + f := MustTempFile(dir) + w := NewWALSegmentWriter(f) + + write := &WriteWALEntry{ + Values: points, + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + if err := w.Write(mustMarshalEntry(write)); err != nil { + b.Fatalf("unexpected error writing entry: %v", err) + } + } +} + +func BenchmarkWALSegmentReader(b *testing.B) { + points := map[string][]value.Value{} + for i := 0; i < 5000; i++ { + k := "cpu,host=A#!~#value" + points[k] = append(points[k], value.NewValue(int64(i), 1.1)) + } + + dir := MustTempDir() + defer os.RemoveAll(dir) + + f := MustTempFile(dir) + w := NewWALSegmentWriter(f) + + write := &WriteWALEntry{ + Values: points, + } + + for i := 0; i < 100; i++ { + if err := w.Write(mustMarshalEntry(write)); err != nil { + b.Fatalf("unexpected error writing entry: %v", err) + } + } + + r := NewWALSegmentReader(f) + b.ResetTimer() + + for i := 0; i < b.N; i++ { + b.StopTimer() + f.Seek(0, io.SeekStart) + b.StartTimer() + + for r.Next() { + _, err := r.Read() + if err != nil { + b.Fatalf("unexpected error reading entry: %v", err) + } + } + } +} + +// MustReadFileSize returns the size of the file, or panics. +func MustReadFileSize(f *os.File) int64 { + stat, err := os.Stat(f.Name()) + if err != nil { + panic(fmt.Sprintf("failed to get size of file at %s: %s", f.Name(), err.Error())) + } + return stat.Size() +} + +func mustMarshalEntry(entry WALEntry) (WalEntryType, []byte) { + bytes := make([]byte, 1024<<2) + + b, err := entry.Encode(bytes) + if err != nil { + panic(fmt.Sprintf("error encoding: %v", err)) + } + + return entry.Type(), snappy.Encode(b, b) +} diff --git a/task/backend/query_logreader.go b/task/backend/query_logreader.go index f0a4241b3e..abb6555d9a 100644 --- a/task/backend/query_logreader.go +++ b/task/backend/query_logreader.go @@ -179,7 +179,10 @@ from(bucketID: "000000000000000a") if err != nil { return nil, err } - if len(runs) != 1 { + if len(runs) == 0 { + return nil, ErrRunNotFound + } + if len(runs) > 1 { return nil, fmt.Errorf("expected one run, got %d", len(runs)) } diff --git a/task/backend/scheduler.go b/task/backend/scheduler.go index dfb3deb4f7..1198b4c15d 100644 --- a/task/backend/scheduler.go +++ b/task/backend/scheduler.go @@ -10,7 +10,7 @@ import ( "time" platform "github.com/influxdata/influxdb" - "github.com/opentracing/opentracing-go" + opentracing "github.com/opentracing/opentracing-go" "github.com/prometheus/client_golang/prometheus" "go.uber.org/zap" ) @@ -659,8 +659,8 @@ func (r *runner) executeAndWait(ctx context.Context, qr QueuedRun, runLogger *za } }() - // TODO(mr): handle res.IsRetryable(). - _, err = rp.Wait() + // TODO(mr): handle rr.IsRetryable(). + rr, err := rp.Wait() close(ready) if err != nil { if err == ErrRunCanceled { @@ -678,6 +678,13 @@ func (r *runner) executeAndWait(ctx context.Context, qr QueuedRun, runLogger *za atomic.StoreUint32(r.state, runnerIdle) return } + if err := rr.Err(); err != nil { + runLogger.Info("Run failed to execute", zap.Error(err)) + // TODO(mr): retry? + r.updateRunState(qr, RunFail, runLogger) + atomic.StoreUint32(r.state, runnerIdle) + return + } if err := r.desiredState.FinishRun(r.ctx, qr.TaskID, qr.RunID); err != nil { runLogger.Info("Failed to finish run", zap.Error(err)) diff --git a/task/backend/scheduler_test.go b/task/backend/scheduler_test.go index 8d488193fe..2dbcb18ecc 100644 --- a/task/backend/scheduler_test.go +++ b/task/backend/scheduler_test.go @@ -483,7 +483,7 @@ func TestScheduler_RunLog(t *testing.T) { pollForRunStatus(t, rl, task.ID, 2, 1, backend.RunStarted.String()) - // Finish with failure. + // Finish with failure to create the run. promises[0].Finish(nil, errors.New("forced failure")) if _, err := e.PollForNumberRunning(task.ID, 0); err != nil { t.Fatal(err) @@ -491,7 +491,7 @@ func TestScheduler_RunLog(t *testing.T) { pollForRunStatus(t, rl, task.ID, 2, 1, backend.RunFail.String()) - // One more run, but cancel this time. + // Create a new run that starts but fails. s.Tick(8) promises, err = e.PollForNumberRunning(task.ID, 1) if err != nil { @@ -499,6 +499,20 @@ func TestScheduler_RunLog(t *testing.T) { } pollForRunStatus(t, rl, task.ID, 3, 2, backend.RunStarted.String()) + promises[0].Finish(mock.NewRunResult(errors.New("started but failed to finish properly"), false), nil) + if _, err := e.PollForNumberRunning(task.ID, 0); err != nil { + t.Fatal(err) + } + pollForRunStatus(t, rl, task.ID, 3, 2, backend.RunFail.String()) + + // One more run, but cancel this time. + s.Tick(9) + promises, err = e.PollForNumberRunning(task.ID, 1) + if err != nil { + t.Fatal(err) + } + + pollForRunStatus(t, rl, task.ID, 4, 3, backend.RunStarted.String()) // Finish with failure. promises[0].Cancel() @@ -506,7 +520,7 @@ func TestScheduler_RunLog(t *testing.T) { t.Fatal(err) } - pollForRunStatus(t, rl, task.ID, 3, 2, backend.RunCanceled.String()) + pollForRunStatus(t, rl, task.ID, 4, 3, backend.RunCanceled.String()) } func TestScheduler_Metrics(t *testing.T) { diff --git a/task/backend/storetest/logstoretest.go b/task/backend/storetest/logstoretest.go index 628e92666a..388f7b40cd 100644 --- a/task/backend/storetest/logstoretest.go +++ b/task/backend/storetest/logstoretest.go @@ -330,6 +330,11 @@ func findRunByIDTest(t *testing.T, crf CreateRunStoreFunc, drf DestroyRunStoreFu if reflect.DeepEqual(returnedRun, rr2) { t.Fatalf("updateing returned run modified RunStore data") } + + _, err = reader.FindRunByID(ctx, task.Org, 0xccc) + if err != backend.ErrRunNotFound { + t.Fatalf("expected finding run with invalid ID to return %v, got %v", backend.ErrRunNotFound, err) + } } func listLogsTest(t *testing.T, crf CreateRunStoreFunc, drf DestroyRunStoreFunc) { diff --git a/tsdb/cursor.go b/tsdb/cursor.go index 849835d446..f63461c5f9 100644 --- a/tsdb/cursor.go +++ b/tsdb/cursor.go @@ -2,7 +2,7 @@ package tsdb import "github.com/influxdata/influxdb/tsdb/cursors" -// These aliases exist to maintain api compatability when they were moved +// These aliases exist to maintain api compatibility when they were moved // into their own package to avoid having a heavy dependency in order to // talk about consuming data. diff --git a/tsdb/tsi1/DESIGN.md b/tsdb/tsi1/DESIGN.md new file mode 100644 index 0000000000..9935a2bac3 --- /dev/null +++ b/tsdb/tsi1/DESIGN.md @@ -0,0 +1,83 @@ +# Time-Series Index + +## Introduction + +## Architecture + +### index structures and access patterns +### series ID sets +### partitioning and file types +### compactions + +## File Format + +## Access Times + +### Insertion + +TODO + +### Retrieval + +This section provides some general idea of the typical timings one can expect to experience when accessing the index. + +#### Measurement Retrieval + +Approximate times for retrieving _all_ measurements, equivalent to executing `SHOW MEASUREMENTS`, follow. These types of query only involve materialising data held in the index. + + - Retrieve 1 measurement from TSI index: `~100µs` + - Retrieve 100 measurements from TSI index: `~200µs` + - Retrieve 10,000 measurements from TSI index: `~8ms` + + +Note: as the number of measurements gets larger, much of the time will be spent allocating and materialising the measurements into a `[][]byte` to be returned to the caller. + + +#### Tag Keys Retrieval + +Approximate times for retrieving _all_ tag keys, equivalent to executing `SHOW TAG KEYS`, follow. These types of query only involve materialising data held in the index. + + - Retrieve 1 tag key from TSI index: `~65µs` + - Retrieve 100 tag keys from TSI index: `~90µs` + - Retrieve 1,000 tag keys from TSI index: `~1.3ms` + +Note: the times here show only the TSI index access for retrieving the tag keys. In practice, the measurement retrieval times need to be added on top, since you need a measurement name to access the tag keys. + + +#### Tag Value Retrieval + +Approximate times for retrieving _all_ tag values for a _specific_ tag key, equivalent to `SHOW TAG VALUES WITH KEY = "region"`, follow. These types of query only involve materialising data held in the index. + + - Retrieve 1 tag value from TSI index: `~20µs` + - Retrieve 100 tag values from TSI index: `~240µs` + - Retrieve 10,000 tag values from TSI index: `~13ms` + + +#### Series ID Retrieval + +Approximate times for retrieving a set of matching series ids for different total cardinalities, follow. + + - Retrieve 1 series id for db with cardinality 1: `~50µs` (`10µs`) + - Retrieve 10 series ids for db with cardinality 100: `~50µs` (`10µs`) + - Retrieve 100 series ids for db with cardinality 10,000: `~80µs` (`10µs`) + - Retrieve 10,000 series ids for db with cardinality 1,000,000: `~600µs` (`10µs`) + - Retrieve 100,000 series ids for db with cardinality 10,000,000: `~22ms` (`10µs`) + + +Note: the initial time is for the first observation. The second—parenthesised—time is for subsequent observations. Subsequent observations make use of the TSI bitset cache introduced in [#10234](https://github.com/influxdata/influxdb/pull/10234). + + +## Complex Series ID Retrieval + +Approximate times for retrieving a set of matching series ids for different total cardinalities. In these cases, each retrieval is based on two tag key/value predicates, e.g., `SHOW SERIES WHERE "region" = 'west' AND "zone" = 'a'` + + - Retrieve 1,000 series ids for db with cardinality 1,000,000: `~8ms` (`15µs`) + - Retrieve 10,000 series ids for db with cardinality 10,000,000: `~7ms` (`25µs`) + + +Note: the initial time is for the first observation. The second—parenthesised—time is for subsequent observations. Subsequent observations make use of the TSI bitset cache introduced in [#10234](https://github.com/influxdata/influxdb/pull/10234). +In these more complex cases, a series ID set is retrieved for each of the predicates. The sets are then intersected to identify the final set. Cache times, then, are typically doubled since each series id set for each predicate is stored separately. +There will be some additional overhead for the intersection operation. + + + diff --git a/tsdb/tsm1/array_cursor.gen.go b/tsdb/tsm1/array_cursor.gen.go index a18b183784..a6e753fe48 100644 --- a/tsdb/tsm1/array_cursor.gen.go +++ b/tsdb/tsm1/array_cursor.gen.go @@ -83,12 +83,12 @@ func (c *floatArrayAscendingCursor) Next() *tsdb.FloatArray { tkey := tvals.Timestamps[c.tsm.pos] if ckey == tkey { c.res.Timestamps[pos] = ckey - c.res.Values[pos] = cvals[c.cache.pos].(FloatValue).value + c.res.Values[pos] = cvals[c.cache.pos].(FloatValue).RawValue() c.cache.pos++ c.tsm.pos++ } else if ckey < tkey { c.res.Timestamps[pos] = ckey - c.res.Values[pos] = cvals[c.cache.pos].(FloatValue).value + c.res.Values[pos] = cvals[c.cache.pos].(FloatValue).RawValue() c.cache.pos++ } else { c.res.Timestamps[pos] = tkey @@ -126,7 +126,7 @@ func (c *floatArrayAscendingCursor) Next() *tsdb.FloatArray { // TSM was exhausted for pos < len(c.res.Timestamps) && c.cache.pos < len(cvals) { c.res.Timestamps[pos] = cvals[c.cache.pos].UnixNano() - c.res.Values[pos] = cvals[c.cache.pos].(FloatValue).value + c.res.Values[pos] = cvals[c.cache.pos].(FloatValue).RawValue() pos++ c.cache.pos++ } @@ -247,12 +247,12 @@ func (c *floatArrayDescendingCursor) Next() *tsdb.FloatArray { tkey := tvals.Timestamps[c.tsm.pos] if ckey == tkey { c.res.Timestamps[pos] = ckey - c.res.Values[pos] = cvals[c.cache.pos].(FloatValue).value + c.res.Values[pos] = cvals[c.cache.pos].(FloatValue).RawValue() c.cache.pos-- c.tsm.pos-- } else if ckey > tkey { c.res.Timestamps[pos] = ckey - c.res.Values[pos] = cvals[c.cache.pos].(FloatValue).value + c.res.Values[pos] = cvals[c.cache.pos].(FloatValue).RawValue() c.cache.pos-- } else { c.res.Timestamps[pos] = tkey @@ -285,7 +285,7 @@ func (c *floatArrayDescendingCursor) Next() *tsdb.FloatArray { // TSM was exhausted for pos < len(c.res.Timestamps) && c.cache.pos >= 0 { c.res.Timestamps[pos] = cvals[c.cache.pos].UnixNano() - c.res.Values[pos] = cvals[c.cache.pos].(FloatValue).value + c.res.Values[pos] = cvals[c.cache.pos].(FloatValue).RawValue() pos++ c.cache.pos-- } @@ -391,12 +391,12 @@ func (c *integerArrayAscendingCursor) Next() *tsdb.IntegerArray { tkey := tvals.Timestamps[c.tsm.pos] if ckey == tkey { c.res.Timestamps[pos] = ckey - c.res.Values[pos] = cvals[c.cache.pos].(IntegerValue).value + c.res.Values[pos] = cvals[c.cache.pos].(IntegerValue).RawValue() c.cache.pos++ c.tsm.pos++ } else if ckey < tkey { c.res.Timestamps[pos] = ckey - c.res.Values[pos] = cvals[c.cache.pos].(IntegerValue).value + c.res.Values[pos] = cvals[c.cache.pos].(IntegerValue).RawValue() c.cache.pos++ } else { c.res.Timestamps[pos] = tkey @@ -434,7 +434,7 @@ func (c *integerArrayAscendingCursor) Next() *tsdb.IntegerArray { // TSM was exhausted for pos < len(c.res.Timestamps) && c.cache.pos < len(cvals) { c.res.Timestamps[pos] = cvals[c.cache.pos].UnixNano() - c.res.Values[pos] = cvals[c.cache.pos].(IntegerValue).value + c.res.Values[pos] = cvals[c.cache.pos].(IntegerValue).RawValue() pos++ c.cache.pos++ } @@ -555,12 +555,12 @@ func (c *integerArrayDescendingCursor) Next() *tsdb.IntegerArray { tkey := tvals.Timestamps[c.tsm.pos] if ckey == tkey { c.res.Timestamps[pos] = ckey - c.res.Values[pos] = cvals[c.cache.pos].(IntegerValue).value + c.res.Values[pos] = cvals[c.cache.pos].(IntegerValue).RawValue() c.cache.pos-- c.tsm.pos-- } else if ckey > tkey { c.res.Timestamps[pos] = ckey - c.res.Values[pos] = cvals[c.cache.pos].(IntegerValue).value + c.res.Values[pos] = cvals[c.cache.pos].(IntegerValue).RawValue() c.cache.pos-- } else { c.res.Timestamps[pos] = tkey @@ -593,7 +593,7 @@ func (c *integerArrayDescendingCursor) Next() *tsdb.IntegerArray { // TSM was exhausted for pos < len(c.res.Timestamps) && c.cache.pos >= 0 { c.res.Timestamps[pos] = cvals[c.cache.pos].UnixNano() - c.res.Values[pos] = cvals[c.cache.pos].(IntegerValue).value + c.res.Values[pos] = cvals[c.cache.pos].(IntegerValue).RawValue() pos++ c.cache.pos-- } @@ -699,12 +699,12 @@ func (c *unsignedArrayAscendingCursor) Next() *tsdb.UnsignedArray { tkey := tvals.Timestamps[c.tsm.pos] if ckey == tkey { c.res.Timestamps[pos] = ckey - c.res.Values[pos] = cvals[c.cache.pos].(UnsignedValue).value + c.res.Values[pos] = cvals[c.cache.pos].(UnsignedValue).RawValue() c.cache.pos++ c.tsm.pos++ } else if ckey < tkey { c.res.Timestamps[pos] = ckey - c.res.Values[pos] = cvals[c.cache.pos].(UnsignedValue).value + c.res.Values[pos] = cvals[c.cache.pos].(UnsignedValue).RawValue() c.cache.pos++ } else { c.res.Timestamps[pos] = tkey @@ -742,7 +742,7 @@ func (c *unsignedArrayAscendingCursor) Next() *tsdb.UnsignedArray { // TSM was exhausted for pos < len(c.res.Timestamps) && c.cache.pos < len(cvals) { c.res.Timestamps[pos] = cvals[c.cache.pos].UnixNano() - c.res.Values[pos] = cvals[c.cache.pos].(UnsignedValue).value + c.res.Values[pos] = cvals[c.cache.pos].(UnsignedValue).RawValue() pos++ c.cache.pos++ } @@ -863,12 +863,12 @@ func (c *unsignedArrayDescendingCursor) Next() *tsdb.UnsignedArray { tkey := tvals.Timestamps[c.tsm.pos] if ckey == tkey { c.res.Timestamps[pos] = ckey - c.res.Values[pos] = cvals[c.cache.pos].(UnsignedValue).value + c.res.Values[pos] = cvals[c.cache.pos].(UnsignedValue).RawValue() c.cache.pos-- c.tsm.pos-- } else if ckey > tkey { c.res.Timestamps[pos] = ckey - c.res.Values[pos] = cvals[c.cache.pos].(UnsignedValue).value + c.res.Values[pos] = cvals[c.cache.pos].(UnsignedValue).RawValue() c.cache.pos-- } else { c.res.Timestamps[pos] = tkey @@ -901,7 +901,7 @@ func (c *unsignedArrayDescendingCursor) Next() *tsdb.UnsignedArray { // TSM was exhausted for pos < len(c.res.Timestamps) && c.cache.pos >= 0 { c.res.Timestamps[pos] = cvals[c.cache.pos].UnixNano() - c.res.Values[pos] = cvals[c.cache.pos].(UnsignedValue).value + c.res.Values[pos] = cvals[c.cache.pos].(UnsignedValue).RawValue() pos++ c.cache.pos-- } @@ -1007,12 +1007,12 @@ func (c *stringArrayAscendingCursor) Next() *tsdb.StringArray { tkey := tvals.Timestamps[c.tsm.pos] if ckey == tkey { c.res.Timestamps[pos] = ckey - c.res.Values[pos] = cvals[c.cache.pos].(StringValue).value + c.res.Values[pos] = cvals[c.cache.pos].(StringValue).RawValue() c.cache.pos++ c.tsm.pos++ } else if ckey < tkey { c.res.Timestamps[pos] = ckey - c.res.Values[pos] = cvals[c.cache.pos].(StringValue).value + c.res.Values[pos] = cvals[c.cache.pos].(StringValue).RawValue() c.cache.pos++ } else { c.res.Timestamps[pos] = tkey @@ -1050,7 +1050,7 @@ func (c *stringArrayAscendingCursor) Next() *tsdb.StringArray { // TSM was exhausted for pos < len(c.res.Timestamps) && c.cache.pos < len(cvals) { c.res.Timestamps[pos] = cvals[c.cache.pos].UnixNano() - c.res.Values[pos] = cvals[c.cache.pos].(StringValue).value + c.res.Values[pos] = cvals[c.cache.pos].(StringValue).RawValue() pos++ c.cache.pos++ } @@ -1173,12 +1173,12 @@ func (c *stringArrayDescendingCursor) Next() *tsdb.StringArray { tkey := tvals.Timestamps[c.tsm.pos] if ckey == tkey { c.res.Timestamps[pos] = ckey - c.res.Values[pos] = cvals[c.cache.pos].(StringValue).value + c.res.Values[pos] = cvals[c.cache.pos].(StringValue).RawValue() c.cache.pos-- c.tsm.pos-- } else if ckey > tkey { c.res.Timestamps[pos] = ckey - c.res.Values[pos] = cvals[c.cache.pos].(StringValue).value + c.res.Values[pos] = cvals[c.cache.pos].(StringValue).RawValue() c.cache.pos-- } else { c.res.Timestamps[pos] = tkey @@ -1211,7 +1211,7 @@ func (c *stringArrayDescendingCursor) Next() *tsdb.StringArray { // TSM was exhausted for pos < len(c.res.Timestamps) && c.cache.pos >= 0 { c.res.Timestamps[pos] = cvals[c.cache.pos].UnixNano() - c.res.Values[pos] = cvals[c.cache.pos].(StringValue).value + c.res.Values[pos] = cvals[c.cache.pos].(StringValue).RawValue() pos++ c.cache.pos-- } @@ -1319,12 +1319,12 @@ func (c *booleanArrayAscendingCursor) Next() *tsdb.BooleanArray { tkey := tvals.Timestamps[c.tsm.pos] if ckey == tkey { c.res.Timestamps[pos] = ckey - c.res.Values[pos] = cvals[c.cache.pos].(BooleanValue).value + c.res.Values[pos] = cvals[c.cache.pos].(BooleanValue).RawValue() c.cache.pos++ c.tsm.pos++ } else if ckey < tkey { c.res.Timestamps[pos] = ckey - c.res.Values[pos] = cvals[c.cache.pos].(BooleanValue).value + c.res.Values[pos] = cvals[c.cache.pos].(BooleanValue).RawValue() c.cache.pos++ } else { c.res.Timestamps[pos] = tkey @@ -1362,7 +1362,7 @@ func (c *booleanArrayAscendingCursor) Next() *tsdb.BooleanArray { // TSM was exhausted for pos < len(c.res.Timestamps) && c.cache.pos < len(cvals) { c.res.Timestamps[pos] = cvals[c.cache.pos].UnixNano() - c.res.Values[pos] = cvals[c.cache.pos].(BooleanValue).value + c.res.Values[pos] = cvals[c.cache.pos].(BooleanValue).RawValue() pos++ c.cache.pos++ } @@ -1483,12 +1483,12 @@ func (c *booleanArrayDescendingCursor) Next() *tsdb.BooleanArray { tkey := tvals.Timestamps[c.tsm.pos] if ckey == tkey { c.res.Timestamps[pos] = ckey - c.res.Values[pos] = cvals[c.cache.pos].(BooleanValue).value + c.res.Values[pos] = cvals[c.cache.pos].(BooleanValue).RawValue() c.cache.pos-- c.tsm.pos-- } else if ckey > tkey { c.res.Timestamps[pos] = ckey - c.res.Values[pos] = cvals[c.cache.pos].(BooleanValue).value + c.res.Values[pos] = cvals[c.cache.pos].(BooleanValue).RawValue() c.cache.pos-- } else { c.res.Timestamps[pos] = tkey @@ -1521,7 +1521,7 @@ func (c *booleanArrayDescendingCursor) Next() *tsdb.BooleanArray { // TSM was exhausted for pos < len(c.res.Timestamps) && c.cache.pos >= 0 { c.res.Timestamps[pos] = cvals[c.cache.pos].UnixNano() - c.res.Values[pos] = cvals[c.cache.pos].(BooleanValue).value + c.res.Values[pos] = cvals[c.cache.pos].(BooleanValue).RawValue() pos++ c.cache.pos-- } diff --git a/tsdb/tsm1/array_cursor.gen.go.tmpl b/tsdb/tsm1/array_cursor.gen.go.tmpl index 3ec768e28a..e7b27dd765 100644 --- a/tsdb/tsm1/array_cursor.gen.go.tmpl +++ b/tsdb/tsm1/array_cursor.gen.go.tmpl @@ -54,7 +54,7 @@ c.end = end }) } -func (c *{{$type}}) Err() error { return nil } +func (c *{{$type}}) Err() error { return nil } // close closes the cursor and any dependent cursors. func (c *{{$type}}) Close() { @@ -82,12 +82,12 @@ func (c *{{$type}}) Next() {{$arrayType}} { tkey := tvals.Timestamps[c.tsm.pos] if ckey == tkey { c.res.Timestamps[pos] = ckey - c.res.Values[pos] = cvals[c.cache.pos].({{.Name}}Value).value + c.res.Values[pos] = cvals[c.cache.pos].({{.Name}}Value).RawValue() c.cache.pos++ c.tsm.pos++ } else if ckey < tkey { c.res.Timestamps[pos] = ckey - c.res.Values[pos] = cvals[c.cache.pos].({{.Name}}Value).value + c.res.Values[pos] = cvals[c.cache.pos].({{.Name}}Value).RawValue() c.cache.pos++ } else { c.res.Timestamps[pos] = tkey @@ -125,7 +125,7 @@ func (c *{{$type}}) Next() {{$arrayType}} { // TSM was exhausted for pos < len(c.res.Timestamps) && c.cache.pos < len(cvals) { c.res.Timestamps[pos] = cvals[c.cache.pos].UnixNano() - c.res.Values[pos] = cvals[c.cache.pos].({{.Name}}Value).value + c.res.Values[pos] = cvals[c.cache.pos].({{.Name}}Value).RawValue() pos++ c.cache.pos++ } @@ -228,7 +228,7 @@ func (c *{{$type}}) reset(seek, end int64, cacheValues Values, tsmKeyCursor *Key } } -func (c *{{$type}}) Err() error { return nil } +func (c *{{$type}}) Err() error { return nil } func (c *{{$type}}) Close() { if c.tsm.keyCursor != nil { @@ -254,12 +254,12 @@ func (c *{{$type}}) Next() {{$arrayType}} { tkey := tvals.Timestamps[c.tsm.pos] if ckey == tkey { c.res.Timestamps[pos] = ckey - c.res.Values[pos] = cvals[c.cache.pos].({{.Name}}Value).value + c.res.Values[pos] = cvals[c.cache.pos].({{.Name}}Value).RawValue() c.cache.pos-- c.tsm.pos-- } else if ckey > tkey { c.res.Timestamps[pos] = ckey - c.res.Values[pos] = cvals[c.cache.pos].({{.Name}}Value).value + c.res.Values[pos] = cvals[c.cache.pos].({{.Name}}Value).RawValue() c.cache.pos-- } else { c.res.Timestamps[pos] = tkey @@ -292,7 +292,7 @@ func (c *{{$type}}) Next() {{$arrayType}} { // TSM was exhausted for pos < len(c.res.Timestamps) && c.cache.pos >= 0 { c.res.Timestamps[pos] = cvals[c.cache.pos].UnixNano() - c.res.Values[pos] = cvals[c.cache.pos].({{.Name}}Value).value + c.res.Values[pos] = cvals[c.cache.pos].({{.Name}}Value).RawValue() pos++ c.cache.pos-- } diff --git a/tsdb/tsm1/array_cursor.gen.go.tmpldata b/tsdb/tsm1/array_cursor.gen.go.tmpldata new file mode 100644 index 0000000000..648898fbdb --- /dev/null +++ b/tsdb/tsm1/array_cursor.gen.go.tmpldata @@ -0,0 +1,42 @@ +[ + { + "Name":"Float", + "name":"float", + "Type":"float64", + "ValueType":"FloatValue", + "Nil":"0", + "Size":"8" + }, + { + "Name":"Integer", + "name":"integer", + "Type":"int64", + "ValueType":"IntegerValue", + "Nil":"0", + "Size":"8" + }, + { + "Name":"Unsigned", + "name":"unsigned", + "Type":"uint64", + "ValueType":"UnsignedValue", + "Nil":"0", + "Size":"8" + }, + { + "Name":"String", + "name":"string", + "Type":"string", + "ValueType":"StringValue", + "Nil":"\"\"", + "Size":"0" + }, + { + "Name":"Boolean", + "name":"boolean", + "Type":"bool", + "ValueType":"BooleanValue", + "Nil":"false", + "Size":"1" + } +] diff --git a/tsdb/tsm1/cache.go b/tsdb/tsm1/cache.go index 25f973d00b..ea5868a97d 100644 --- a/tsdb/tsm1/cache.go +++ b/tsdb/tsm1/cache.go @@ -1,14 +1,15 @@ package tsm1 import ( + "bytes" "fmt" "math" - "os" "sync" "sync/atomic" "time" "github.com/influxdata/influxdb/models" + "github.com/influxdata/influxdb/storage/wal" "github.com/influxdata/influxdb/tsdb" "github.com/influxdata/influxql" "github.com/prometheus/client_golang/prometheus" @@ -545,50 +546,50 @@ func (c *Cache) Values(key []byte) Values { return values } -// Delete removes all values for the given keys from the cache. -func (c *Cache) Delete(keys [][]byte) { - c.DeleteRange(keys, math.MinInt64, math.MaxInt64) -} - -// DeleteRange removes the values for all keys containing points -// with timestamps between between min and max from the cache. -// -// TODO(edd): Lock usage could possibly be optimised if necessary. -func (c *Cache) DeleteRange(keys [][]byte, min, max int64) { +// DeleteBucketRange removes values for all keys containing points +// with timestamps between min and max contained in the bucket identified +// by name from the cache. +func (c *Cache) DeleteBucketRange(name []byte, min, max int64) { c.init() + // TODO(edd/jeff): find a way to optimize lock usage c.mu.Lock() defer c.mu.Unlock() + var toDelete [][]byte var total uint64 - for _, k := range keys { - // Make sure key exist in the cache, skip if it does not - e := c.store.entry(k) - if e == nil { - continue - } + // applySerial only errors if the closure returns an error. + _ = c.store.applySerial(func(k []byte, e *entry) error { + if !bytes.HasPrefix(k, name) { + return nil + } total += uint64(e.size()) - // Everything is being deleted. + + // if everything is being deleted, just stage it to be deleted and move on. if min == math.MinInt64 && max == math.MaxInt64 { - total += uint64(len(k)) // all entries and the key. - c.store.remove(k) - continue + toDelete = append(toDelete, k) + return nil } - // Filter what to delete by time range. + // filter the values and subtract out the remaining bytes from the reduction. e.filter(min, max) + total -= uint64(e.size()) + + // if it has no entries left, flag it to be deleted. if e.count() == 0 { - // Nothing left in cache for that key - total += uint64(len(k)) // all entries and the key. - c.store.remove(k) - continue + toDelete = append(toDelete, k) } - // Just update what is being deleted by the size of the filtered entries. - total -= uint64(e.size()) + return nil + }) + + for _, k := range toDelete { + total += uint64(len(k)) + c.store.remove(k) } - c.tracker.DecCacheSize(total) // Decrease the live cache size. + + c.tracker.DecCacheSize(total) c.tracker.SetMemBytes(uint64(c.Size())) } @@ -624,92 +625,45 @@ func (c *Cache) ApplyEntryFn(f func(key []byte, entry *entry) error) error { } // CacheLoader processes a set of WAL segment files, and loads a cache with the data -// contained within those files. Processing of the supplied files take place in the -// order they exist in the files slice. +// contained within those files. type CacheLoader struct { - files []string - - Logger *zap.Logger + reader *wal.WALReader } // NewCacheLoader returns a new instance of a CacheLoader. func NewCacheLoader(files []string) *CacheLoader { return &CacheLoader{ - files: files, - Logger: zap.NewNop(), + reader: wal.NewWALReader(files), } } // Load returns a cache loaded with the data contained within the segment files. -// If, during reading of a segment file, corruption is encountered, that segment -// file is truncated up to and including the last valid byte, and processing -// continues with the next segment file. func (cl *CacheLoader) Load(cache *Cache) error { + return cl.reader.Read(func(entry wal.WALEntry) error { + switch en := entry.(type) { + case *wal.WriteWALEntry: + return cache.WriteMulti(en.Values) - var r *WALSegmentReader - for _, fn := range cl.files { - if err := func() error { - f, err := os.OpenFile(fn, os.O_CREATE|os.O_RDWR, 0666) - if err != nil { - return err - } - defer f.Close() + case *wal.DeleteBucketRangeWALEntry: + // TODO(edd): we need to clean up how we're encoding the prefix so that we + // don't have to remember to get it right everywhere we need to touch TSM data. + encoded := tsdb.EncodeName(en.OrgID, en.BucketID) + name := models.EscapeMeasurement(encoded[:]) - // Log some information about the segments. - stat, err := os.Stat(f.Name()) - if err != nil { - return err - } - cl.Logger.Info("Reading file", zap.String("path", f.Name()), zap.Int64("size", stat.Size())) - - // Nothing to read, skip it - if stat.Size() == 0 { - return nil - } - - if r == nil { - r = NewWALSegmentReader(f) - defer r.Close() - } else { - r.Reset(f) - } - - for r.Next() { - entry, err := r.Read() - if err != nil { - n := r.Count() - cl.Logger.Info("File corrupt", zap.Error(err), zap.String("path", f.Name()), zap.Int64("pos", n)) - if err := f.Truncate(n); err != nil { - return err - } - break - } - - switch t := entry.(type) { - case *WriteWALEntry: - if err := cache.WriteMulti(t.Values); err != nil { - return err - } - case *DeleteRangeWALEntry: - cache.DeleteRange(t.Keys, t.Min, t.Max) - case *DeleteWALEntry: - cache.Delete(t.Keys) - } - } - - return r.Close() - }(); err != nil { - return err + cache.DeleteBucketRange(name, en.Min, en.Max) + return nil } - } - return nil + + return nil + }) } // WithLogger sets the logger on the CacheLoader. -func (cl *CacheLoader) WithLogger(log *zap.Logger) { - cl.Logger = log.With(zap.String("service", "cacheloader")) +func (cl *CacheLoader) WithLogger(logger *zap.Logger) { + cl.reader.WithLogger(logger.With(zap.String("service", "cacheloader"))) } +// LastWriteTime returns the time that the cache was last written to. func (c *Cache) LastWriteTime() time.Time { c.mu.RLock() defer c.mu.RUnlock() diff --git a/tsdb/tsm1/cache_test.go b/tsdb/tsm1/cache_test.go index a5f107529c..4626ce2ed9 100644 --- a/tsdb/tsm1/cache_test.go +++ b/tsdb/tsm1/cache_test.go @@ -15,6 +15,8 @@ import ( "sync/atomic" "testing" + "github.com/influxdata/influxdb/storage/wal" + "github.com/golang/snappy" ) @@ -167,7 +169,7 @@ func TestCache_CacheWriteMulti_TypeConflict(t *testing.T) { } } -func TestCache_Cache_DeleteRange(t *testing.T) { +func TestCache_Cache_DeleteBucketRange(t *testing.T) { v0 := NewValue(1, 1.0) v1 := NewValue(2, 2.0) v2 := NewValue(3, 3.0) @@ -187,7 +189,7 @@ func TestCache_Cache_DeleteRange(t *testing.T) { t.Fatalf("cache keys incorrect after 2 writes, exp %v, got %v", exp, keys) } - c.DeleteRange([][]byte{[]byte("bar")}, 2, math.MaxInt64) + c.DeleteBucketRange([]byte("bar"), 2, math.MaxInt64) if exp, keys := [][]byte{[]byte("bar"), []byte("foo")}, c.Keys(); !reflect.DeepEqual(keys, exp) { t.Fatalf("cache keys incorrect after delete, exp %v, got %v", exp, keys) @@ -206,7 +208,7 @@ func TestCache_Cache_DeleteRange(t *testing.T) { } } -func TestCache_DeleteRange_NoValues(t *testing.T) { +func TestCache_DeleteBucketRange_NoValues(t *testing.T) { v0 := NewValue(1, 1.0) v1 := NewValue(2, 2.0) v2 := NewValue(3, 3.0) @@ -226,7 +228,7 @@ func TestCache_DeleteRange_NoValues(t *testing.T) { t.Fatalf("cache keys incorrect after 2 writes, exp %v, got %v", exp, keys) } - c.DeleteRange([][]byte{[]byte("foo")}, math.MinInt64, math.MaxInt64) + c.DeleteBucketRange([]byte("foo"), math.MinInt64, math.MaxInt64) if exp, keys := 0, len(c.Keys()); !reflect.DeepEqual(keys, exp) { t.Fatalf("cache keys incorrect after 2 writes, exp %v, got %v", exp, keys) @@ -241,7 +243,7 @@ func TestCache_DeleteRange_NoValues(t *testing.T) { } } -func TestCache_DeleteRange_NotSorted(t *testing.T) { +func TestCache_DeleteBucketRange_NotSorted(t *testing.T) { v0 := NewValue(1, 1.0) v1 := NewValue(3, 3.0) v2 := NewValue(2, 2.0) @@ -261,7 +263,7 @@ func TestCache_DeleteRange_NotSorted(t *testing.T) { t.Fatalf("cache keys incorrect after 2 writes, exp %v, got %v", exp, keys) } - c.DeleteRange([][]byte{[]byte("foo")}, 1, 3) + c.DeleteBucketRange([]byte("foo"), 1, 3) if exp, keys := 0, len(c.Keys()); !reflect.DeepEqual(keys, exp) { t.Fatalf("cache keys incorrect after delete, exp %v, got %v", exp, keys) @@ -276,49 +278,10 @@ func TestCache_DeleteRange_NotSorted(t *testing.T) { } } -func TestCache_Cache_Delete(t *testing.T) { - v0 := NewValue(1, 1.0) - v1 := NewValue(2, 2.0) - v2 := NewValue(3, 3.0) - values := Values{v0, v1, v2} - valuesSize := uint64(v0.Size() + v1.Size() + v2.Size()) - - c := NewCache(30 * valuesSize) - - if err := c.WriteMulti(map[string][]Value{"foo": values, "bar": values}); err != nil { - t.Fatalf("failed to write key foo to cache: %s", err.Error()) - } - if n := c.Size(); n != 2*valuesSize+6 { - t.Fatalf("cache size incorrect after 2 writes, exp %d, got %d", 2*valuesSize, n) - } - - if exp, keys := [][]byte{[]byte("bar"), []byte("foo")}, c.Keys(); !reflect.DeepEqual(keys, exp) { - t.Fatalf("cache keys incorrect after 2 writes, exp %v, got %v", exp, keys) - } - - c.Delete([][]byte{[]byte("bar")}) - - if exp, keys := [][]byte{[]byte("foo")}, c.Keys(); !reflect.DeepEqual(keys, exp) { - t.Fatalf("cache keys incorrect after 2 writes, exp %v, got %v", exp, keys) - } - - if got, exp := c.Size(), valuesSize+3; exp != got { - t.Fatalf("cache size incorrect after 2 writes, exp %d, got %d", exp, got) - } - - if got, exp := len(c.Values([]byte("bar"))), 0; got != exp { - t.Fatalf("cache values mismatch: got %v, exp %v", got, exp) - } - - if got, exp := len(c.Values([]byte("foo"))), 3; got != exp { - t.Fatalf("cache values mismatch: got %v, exp %v", got, exp) - } -} - -func TestCache_Cache_Delete_NonExistent(t *testing.T) { +func TestCache_DeleteBucketRange_NonExistent(t *testing.T) { c := NewCache(1024) - c.Delete([][]byte{[]byte("bar")}) + c.DeleteBucketRange([]byte("bar"), math.MinInt64, math.MaxInt64) if got, exp := c.Size(), uint64(0); exp != got { t.Fatalf("cache size incorrect exp %d, got %d", exp, got) @@ -604,7 +567,7 @@ func TestCacheLoader_LoadSingle(t *testing.T) { dir := mustTempDir() defer os.RemoveAll(dir) f := mustTempFile(dir) - w := NewWALSegmentWriter(f) + w := wal.NewWALSegmentWriter(f) p1 := NewValue(1, 1.1) p2 := NewValue(1, int64(1)) @@ -616,7 +579,7 @@ func TestCacheLoader_LoadSingle(t *testing.T) { "baz": {p3}, } - entry := &WriteWALEntry{ + entry := &wal.WriteWALEntry{ Values: values, } @@ -676,7 +639,7 @@ func TestCacheLoader_LoadDouble(t *testing.T) { dir := mustTempDir() defer os.RemoveAll(dir) f1, f2 := mustTempFile(dir), mustTempFile(dir) - w1, w2 := NewWALSegmentWriter(f1), NewWALSegmentWriter(f2) + w1, w2 := wal.NewWALSegmentWriter(f1), wal.NewWALSegmentWriter(f2) p1 := NewValue(1, 1.1) p2 := NewValue(1, int64(1)) @@ -685,8 +648,8 @@ func TestCacheLoader_LoadDouble(t *testing.T) { // Write first and second segment. - segmentWrite := func(w *WALSegmentWriter, values map[string][]Value) { - entry := &WriteWALEntry{ + segmentWrite := func(w *wal.WALSegmentWriter, values map[string][]Value) { + entry := &wal.WriteWALEntry{ Values: values, } if err := w1.Write(mustMarshalEntry(entry)); err != nil { @@ -735,73 +698,6 @@ func TestCacheLoader_LoadDouble(t *testing.T) { } } -// Ensure the CacheLoader can load deleted series -func TestCacheLoader_LoadDeleted(t *testing.T) { - // Create a WAL segment. - dir := mustTempDir() - defer os.RemoveAll(dir) - f := mustTempFile(dir) - w := NewWALSegmentWriter(f) - - p1 := NewValue(1, 1.0) - p2 := NewValue(2, 2.0) - p3 := NewValue(3, 3.0) - - values := map[string][]Value{ - "foo": {p1, p2, p3}, - } - - entry := &WriteWALEntry{ - Values: values, - } - - if err := w.Write(mustMarshalEntry(entry)); err != nil { - t.Fatal("write points", err) - } - - if err := w.Flush(); err != nil { - t.Fatalf("flush error: %v", err) - } - - dentry := &DeleteRangeWALEntry{ - Keys: [][]byte{[]byte("foo")}, - Min: 2, - Max: 3, - } - - if err := w.Write(mustMarshalEntry(dentry)); err != nil { - t.Fatal("write points", err) - } - - if err := w.Flush(); err != nil { - t.Fatalf("flush error: %v", err) - } - - // Load the cache using the segment. - cache := NewCache(1024) - loader := NewCacheLoader([]string{f.Name()}) - if err := loader.Load(cache); err != nil { - t.Fatalf("failed to load cache: %s", err.Error()) - } - - // Check the cache. - if values := cache.Values([]byte("foo")); !reflect.DeepEqual(values, Values{p1}) { - t.Fatalf("cache key foo not as expected, got %v, exp %v", values, Values{p1}) - } - - // Reload the cache using the segment. - cache = NewCache(1024) - loader = NewCacheLoader([]string{f.Name()}) - if err := loader.Load(cache); err != nil { - t.Fatalf("failed to load cache: %s", err.Error()) - } - - // Check the cache. - if values := cache.Values([]byte("foo")); !reflect.DeepEqual(values, Values{p1}) { - t.Fatalf("cache key foo not as expected, got %v, exp %v", values, Values{p1}) - } -} - func TestCache_Split(t *testing.T) { v0 := NewValue(1, 1.0) v1 := NewValue(2, 2.0) @@ -857,7 +753,7 @@ func mustTempFile(dir string) *os.File { return f } -func mustMarshalEntry(entry WALEntry) (WalEntryType, []byte) { +func mustMarshalEntry(entry wal.WALEntry) (wal.WalEntryType, []byte) { bytes := make([]byte, 1024<<2) b, err := entry.Encode(bytes) diff --git a/tsdb/tsm1/encoding.gen.go b/tsdb/tsm1/encoding.gen.go index 274e35b3c8..3ff8c8db24 100644 --- a/tsdb/tsm1/encoding.gen.go +++ b/tsdb/tsm1/encoding.gen.go @@ -212,8 +212,8 @@ type FloatValues []FloatValue func NewFloatArrayFromValues(v FloatValues) *tsdb.FloatArray { a := tsdb.NewFloatArrayLen(len(v)) for i, val := range v { - a.Timestamps[i] = val.unixnano - a.Values[i] = val.value + a.Timestamps[i] = val.UnixNano() + a.Values[i] = val.RawValue() } return a } @@ -442,8 +442,8 @@ func encodeFloatValuesBlock(buf []byte, values []FloatValue) ([]byte, error) { var b []byte err := func() error { for _, v := range values { - tsenc.Write(v.unixnano) - venc.Write(v.value) + tsenc.Write(v.UnixNano()) + venc.Write(v.RawValue()) } venc.Flush() @@ -482,8 +482,8 @@ type IntegerValues []IntegerValue func NewIntegerArrayFromValues(v IntegerValues) *tsdb.IntegerArray { a := tsdb.NewIntegerArrayLen(len(v)) for i, val := range v { - a.Timestamps[i] = val.unixnano - a.Values[i] = val.value + a.Timestamps[i] = val.UnixNano() + a.Values[i] = val.RawValue() } return a } @@ -712,8 +712,8 @@ func encodeIntegerValuesBlock(buf []byte, values []IntegerValue) ([]byte, error) var b []byte err := func() error { for _, v := range values { - tsenc.Write(v.unixnano) - venc.Write(v.value) + tsenc.Write(v.UnixNano()) + venc.Write(v.RawValue()) } venc.Flush() @@ -752,8 +752,8 @@ type UnsignedValues []UnsignedValue func NewUnsignedArrayFromValues(v UnsignedValues) *tsdb.UnsignedArray { a := tsdb.NewUnsignedArrayLen(len(v)) for i, val := range v { - a.Timestamps[i] = val.unixnano - a.Values[i] = val.value + a.Timestamps[i] = val.UnixNano() + a.Values[i] = val.RawValue() } return a } @@ -982,8 +982,8 @@ func encodeUnsignedValuesBlock(buf []byte, values []UnsignedValue) ([]byte, erro var b []byte err := func() error { for _, v := range values { - tsenc.Write(v.unixnano) - venc.Write(int64(v.value)) + tsenc.Write(v.UnixNano()) + venc.Write(int64(v.RawValue())) } venc.Flush() @@ -1022,8 +1022,8 @@ type StringValues []StringValue func NewStringArrayFromValues(v StringValues) *tsdb.StringArray { a := tsdb.NewStringArrayLen(len(v)) for i, val := range v { - a.Timestamps[i] = val.unixnano - a.Values[i] = val.value + a.Timestamps[i] = val.UnixNano() + a.Values[i] = val.RawValue() } return a } @@ -1252,8 +1252,8 @@ func encodeStringValuesBlock(buf []byte, values []StringValue) ([]byte, error) { var b []byte err := func() error { for _, v := range values { - tsenc.Write(v.unixnano) - venc.Write(v.value) + tsenc.Write(v.UnixNano()) + venc.Write(v.RawValue()) } venc.Flush() @@ -1292,8 +1292,8 @@ type BooleanValues []BooleanValue func NewBooleanArrayFromValues(v BooleanValues) *tsdb.BooleanArray { a := tsdb.NewBooleanArrayLen(len(v)) for i, val := range v { - a.Timestamps[i] = val.unixnano - a.Values[i] = val.value + a.Timestamps[i] = val.UnixNano() + a.Values[i] = val.RawValue() } return a } @@ -1522,8 +1522,8 @@ func encodeBooleanValuesBlock(buf []byte, values []BooleanValue) ([]byte, error) var b []byte err := func() error { for _, v := range values { - tsenc.Write(v.unixnano) - venc.Write(v.value) + tsenc.Write(v.UnixNano()) + venc.Write(v.RawValue()) } venc.Flush() diff --git a/tsdb/tsm1/encoding.gen.go.tmpl b/tsdb/tsm1/encoding.gen.go.tmpl index 4f1114459c..9956985700 100644 --- a/tsdb/tsm1/encoding.gen.go.tmpl +++ b/tsdb/tsm1/encoding.gen.go.tmpl @@ -15,8 +15,8 @@ type {{.Name}}Values []{{.Name}}Value func New{{.Name}}ArrayFromValues(v {{.Name}}Values) *tsdb.{{.Name}}Array { a := tsdb.New{{.Name}}ArrayLen(len(v)) for i, val := range v { - a.Timestamps[i] = val.unixnano - a.Values[i] = val.value + a.Timestamps[i] = val.UnixNano() + a.Values[i] = val.RawValue() } return a } @@ -247,8 +247,8 @@ func encode{{ .Name }}ValuesBlock(buf []byte, values []{{.Name}}Value) ([]byte, var b []byte err := func() error { for _, v := range values { - tsenc.Write(v.unixnano) - venc.Write({{if .CastType}}{{.CastType}}(v.value){{else}}v.value{{end}}) + tsenc.Write(v.UnixNano()) + venc.Write({{if .CastType}}{{.CastType}}(v.RawValue()){{else}}v.RawValue(){{end}}) } venc.Flush() diff --git a/tsdb/tsm1/encoding.gen_test.go b/tsdb/tsm1/encoding.gen_test.go index 9fb01893f0..a14c15d71c 100644 --- a/tsdb/tsm1/encoding.gen_test.go +++ b/tsdb/tsm1/encoding.gen_test.go @@ -14,7 +14,7 @@ func makeIntegerValues(count int, min, max int64) IntegerValues { inc := (max - min) / int64(count) for i := 0; i < count; i++ { - vals[i].unixnano = ts + vals[i] = NewRawIntegerValue(ts, 0) ts += inc } @@ -24,7 +24,7 @@ func makeIntegerValues(count int, min, max int64) IntegerValues { func makeIntegerValuesFromSlice(t []int64) IntegerValues { iv := make(IntegerValues, len(t)) for i, v := range t { - iv[i].unixnano = v + iv[i] = NewRawIntegerValue(v, 0) } return iv } @@ -91,7 +91,7 @@ func TestIntegerValues_Exclude(t *testing.T) { vals = vals.Exclude(tc.min, tc.max) var got []int64 for _, v := range vals { - got = append(got, v.unixnano) + got = append(got, v.UnixNano()) } opt := cmp.AllowUnexported(IntegerValue{}) if !cmp.Equal(tc.exp, got, opt) { @@ -122,7 +122,7 @@ func TestIntegerValues_Include(t *testing.T) { vals = vals.Include(tc.min, tc.max) var got []int64 for _, v := range vals { - got = append(got, v.unixnano) + got = append(got, v.UnixNano()) } opt := cmp.AllowUnexported(IntegerValue{}) if !cmp.Equal(tc.exp, got, opt) { diff --git a/tsdb/tsm1/encoding.go b/tsdb/tsm1/encoding.go index 2f5e48d493..a176f42856 100644 --- a/tsdb/tsm1/encoding.go +++ b/tsdb/tsm1/encoding.go @@ -4,10 +4,8 @@ import ( "encoding/binary" "fmt" "runtime" - "time" "github.com/influxdata/influxdb/pkg/pool" - "github.com/influxdata/influxdb/tsdb" "github.com/influxdata/influxql" ) @@ -93,89 +91,6 @@ var ( }) ) -// Value represents a TSM-encoded value. -type Value interface { - // UnixNano returns the timestamp of the value in nanoseconds since unix epoch. - UnixNano() int64 - - // Value returns the underlying value. - Value() interface{} - - // Size returns the number of bytes necessary to represent the value and its timestamp. - Size() int - - // String returns the string representation of the value and its timestamp. - String() string - - // internalOnly is unexported to ensure implementations of Value - // can only originate in this package. - internalOnly() -} - -// NewValue returns a new Value with the underlying type dependent on value. -func NewValue(t int64, value interface{}) Value { - switch v := value.(type) { - case int64: - return IntegerValue{unixnano: t, value: v} - case uint64: - return UnsignedValue{unixnano: t, value: v} - case float64: - return FloatValue{unixnano: t, value: v} - case bool: - return BooleanValue{unixnano: t, value: v} - case string: - return StringValue{unixnano: t, value: v} - } - return EmptyValue{} -} - -// NewIntegerValue returns a new integer value. -func NewIntegerValue(t int64, v int64) Value { - return IntegerValue{unixnano: t, value: v} -} - -// NewUnsignedValue returns a new unsigned integer value. -func NewUnsignedValue(t int64, v uint64) Value { - return UnsignedValue{unixnano: t, value: v} -} - -// NewFloatValue returns a new float value. -func NewFloatValue(t int64, v float64) Value { - return FloatValue{unixnano: t, value: v} -} - -// NewBooleanValue returns a new boolean value. -func NewBooleanValue(t int64, v bool) Value { - return BooleanValue{unixnano: t, value: v} -} - -// NewStringValue returns a new string value. -func NewStringValue(t int64, v string) Value { - return StringValue{unixnano: t, value: v} -} - -// EmptyValue is used when there is no appropriate other value. -type EmptyValue struct{} - -// UnixNano returns tsdb.EOF. -func (e EmptyValue) UnixNano() int64 { return tsdb.EOF } - -// Value returns nil. -func (e EmptyValue) Value() interface{} { return nil } - -// Size returns 0. -func (e EmptyValue) Size() int { return 0 } - -// String returns the empty string. -func (e EmptyValue) String() string { return "" } - -func (EmptyValue) internalOnly() {} -func (StringValue) internalOnly() {} -func (IntegerValue) internalOnly() {} -func (UnsignedValue) internalOnly() {} -func (BooleanValue) internalOnly() {} -func (FloatValue) internalOnly() {} - // Encode converts the values to a byte slice. If there are no values, // this function panics. func (a Values) Encode(buf []byte) ([]byte, error) { @@ -318,32 +233,6 @@ func DecodeBlock(block []byte, vals []Value) ([]Value, error) { } } -// FloatValue represents a float64 value. -type FloatValue struct { - unixnano int64 - value float64 -} - -// UnixNano returns the timestamp of the value. -func (v FloatValue) UnixNano() int64 { - return v.unixnano -} - -// Value returns the underlying float64 value. -func (v FloatValue) Value() interface{} { - return v.value -} - -// Size returns the number of bytes necessary to represent the value and its timestamp. -func (v FloatValue) Size() int { - return 16 -} - -// String returns the string representation of the value and its timestamp. -func (v FloatValue) String() string { - return fmt.Sprintf("%v %v", time.Unix(0, v.unixnano), v.value) -} - func encodeFloatBlock(buf []byte, values []Value) ([]byte, error) { if len(values) == 0 { return nil, nil @@ -373,8 +262,8 @@ func encodeFloatBlockUsing(buf []byte, values []Value, tsenc TimeEncoder, venc * for _, v := range values { vv := v.(FloatValue) - tsenc.Write(vv.unixnano) - venc.Write(vv.value) + tsenc.Write(vv.UnixNano()) + venc.Write(vv.RawValue()) } venc.Flush() @@ -432,7 +321,7 @@ func DecodeFloatBlock(block []byte, a *[]FloatValue) ([]FloatValue, error) { // Decode both a timestamp and value j := 0 for j < len(a) && tdec.Next() && vdec.Next() { - a[j] = FloatValue{unixnano: tdec.Read(), value: vdec.Values()} + a[j] = NewRawFloatValue(tdec.Read(), vdec.Values()) j++ } i = j @@ -453,32 +342,6 @@ func DecodeFloatBlock(block []byte, a *[]FloatValue) ([]FloatValue, error) { return (*a)[:i], err } -// BooleanValue represents a boolean value. -type BooleanValue struct { - unixnano int64 - value bool -} - -// Size returns the number of bytes necessary to represent the value and its timestamp. -func (v BooleanValue) Size() int { - return 9 -} - -// UnixNano returns the timestamp of the value in nanoseconds since unix epoch. -func (v BooleanValue) UnixNano() int64 { - return v.unixnano -} - -// Value returns the underlying boolean value. -func (v BooleanValue) Value() interface{} { - return v.value -} - -// String returns the string representation of the value and its timestamp. -func (v BooleanValue) String() string { - return fmt.Sprintf("%v %v", time.Unix(0, v.unixnano), v.Value()) -} - func encodeBooleanBlock(buf []byte, values []Value) ([]byte, error) { if len(values) == 0 { return nil, nil @@ -505,8 +368,8 @@ func encodeBooleanBlockUsing(buf []byte, values []Value, tenc TimeEncoder, venc for _, v := range values { vv := v.(BooleanValue) - tenc.Write(vv.unixnano) - venc.Write(vv.value) + tenc.Write(vv.UnixNano()) + venc.Write(vv.RawValue()) } // Encoded timestamp values @@ -560,7 +423,7 @@ func DecodeBooleanBlock(block []byte, a *[]BooleanValue) ([]BooleanValue, error) // Decode both a timestamp and value j := 0 for j < len(a) && tdec.Next() && vdec.Next() { - a[j] = BooleanValue{unixnano: tdec.Read(), value: vdec.Read()} + a[j] = NewRawBooleanValue(tdec.Read(), vdec.Read()) j++ } i = j @@ -580,32 +443,6 @@ func DecodeBooleanBlock(block []byte, a *[]BooleanValue) ([]BooleanValue, error) return (*a)[:i], err } -// IntegerValue represents an int64 value. -type IntegerValue struct { - unixnano int64 - value int64 -} - -// Value returns the underlying int64 value. -func (v IntegerValue) Value() interface{} { - return v.value -} - -// UnixNano returns the timestamp of the value. -func (v IntegerValue) UnixNano() int64 { - return v.unixnano -} - -// Size returns the number of bytes necessary to represent the value and its timestamp. -func (v IntegerValue) Size() int { - return 16 -} - -// String returns the string representation of the value and its timestamp. -func (v IntegerValue) String() string { - return fmt.Sprintf("%v %v", time.Unix(0, v.unixnano), v.Value()) -} - func encodeIntegerBlock(buf []byte, values []Value) ([]byte, error) { tenc := getTimeEncoder(len(values)) venc := getIntegerEncoder(len(values)) @@ -624,8 +461,8 @@ func encodeIntegerBlockUsing(buf []byte, values []Value, tenc TimeEncoder, venc for _, v := range values { vv := v.(IntegerValue) - tenc.Write(vv.unixnano) - venc.Write(vv.value) + tenc.Write(vv.UnixNano()) + venc.Write(vv.RawValue()) } // Encoded timestamp values @@ -679,7 +516,7 @@ func DecodeIntegerBlock(block []byte, a *[]IntegerValue) ([]IntegerValue, error) // Decode both a timestamp and value j := 0 for j < len(a) && tdec.Next() && vdec.Next() { - a[j] = IntegerValue{unixnano: tdec.Read(), value: vdec.Read()} + a[j] = NewRawIntegerValue(tdec.Read(), vdec.Read()) j++ } i = j @@ -699,32 +536,6 @@ func DecodeIntegerBlock(block []byte, a *[]IntegerValue) ([]IntegerValue, error) return (*a)[:i], err } -// UnsignedValue represents an int64 value. -type UnsignedValue struct { - unixnano int64 - value uint64 -} - -// Value returns the underlying int64 value. -func (v UnsignedValue) Value() interface{} { - return v.value -} - -// UnixNano returns the timestamp of the value. -func (v UnsignedValue) UnixNano() int64 { - return v.unixnano -} - -// Size returns the number of bytes necessary to represent the value and its timestamp. -func (v UnsignedValue) Size() int { - return 16 -} - -// String returns the string representation of the value and its timestamp. -func (v UnsignedValue) String() string { - return fmt.Sprintf("%v %v", time.Unix(0, v.unixnano), v.Value()) -} - func encodeUnsignedBlock(buf []byte, values []Value) ([]byte, error) { tenc := getTimeEncoder(len(values)) venc := getUnsignedEncoder(len(values)) @@ -743,8 +554,8 @@ func encodeUnsignedBlockUsing(buf []byte, values []Value, tenc TimeEncoder, venc for _, v := range values { vv := v.(UnsignedValue) - tenc.Write(vv.unixnano) - venc.Write(int64(vv.value)) + tenc.Write(vv.UnixNano()) + venc.Write(int64(vv.RawValue())) } // Encoded timestamp values @@ -798,7 +609,7 @@ func DecodeUnsignedBlock(block []byte, a *[]UnsignedValue) ([]UnsignedValue, err // Decode both a timestamp and value j := 0 for j < len(a) && tdec.Next() && vdec.Next() { - a[j] = UnsignedValue{unixnano: tdec.Read(), value: uint64(vdec.Read())} + a[j] = NewRawUnsignedValue(tdec.Read(), uint64(vdec.Read())) j++ } i = j @@ -818,35 +629,9 @@ func DecodeUnsignedBlock(block []byte, a *[]UnsignedValue) ([]UnsignedValue, err return (*a)[:i], err } -// StringValue represents a string value. -type StringValue struct { - unixnano int64 - value string -} - -// Value returns the underlying string value. -func (v StringValue) Value() interface{} { - return v.value -} - -// UnixNano returns the timestamp of the value. -func (v StringValue) UnixNano() int64 { - return v.unixnano -} - -// Size returns the number of bytes necessary to represent the value and its timestamp. -func (v StringValue) Size() int { - return 8 + len(v.value) -} - -// String returns the string representation of the value and its timestamp. -func (v StringValue) String() string { - return fmt.Sprintf("%v %v", time.Unix(0, v.unixnano), v.Value()) -} - func encodeStringBlock(buf []byte, values []Value) ([]byte, error) { tenc := getTimeEncoder(len(values)) - venc := getStringEncoder(len(values) * len(values[0].(StringValue).value)) + venc := getStringEncoder(len(values) * len(values[0].(StringValue).RawValue())) b, err := encodeStringBlockUsing(buf, values, tenc, venc) @@ -862,8 +647,8 @@ func encodeStringBlockUsing(buf []byte, values []Value, tenc TimeEncoder, venc S for _, v := range values { vv := v.(StringValue) - tenc.Write(vv.unixnano) - venc.Write(vv.value) + tenc.Write(vv.UnixNano()) + venc.Write(vv.RawValue()) } // Encoded timestamp values @@ -920,7 +705,7 @@ func DecodeStringBlock(block []byte, a *[]StringValue) ([]StringValue, error) { // Decode both a timestamp and value j := 0 for j < len(a) && tdec.Next() && vdec.Next() { - a[j] = StringValue{unixnano: tdec.Read(), value: vdec.Read()} + a[j] = NewRawStringValue(tdec.Read(), vdec.Read()) j++ } i = j diff --git a/tsdb/tsm1/engine.go b/tsdb/tsm1/engine.go index 71b68edea1..fbe751397b 100644 --- a/tsdb/tsm1/engine.go +++ b/tsdb/tsm1/engine.go @@ -4,11 +4,8 @@ package tsm1 // import "github.com/influxdata/influxdb/tsdb/tsm1" import ( "bytes" "context" - "errors" "fmt" - "io" "io/ioutil" - "math" "os" "path/filepath" "regexp" @@ -20,7 +17,6 @@ import ( "github.com/influxdata/influxdb/logger" "github.com/influxdata/influxdb/models" - "github.com/influxdata/influxdb/pkg/bytesutil" "github.com/influxdata/influxdb/pkg/limiter" "github.com/influxdata/influxdb/pkg/metrics" "github.com/influxdata/influxdb/query" @@ -31,6 +27,7 @@ import ( "go.uber.org/zap" ) +//go:generate env GO111MODULE=on go run github.com/benbjohnson/tmpl -data=@array_cursor.gen.go.tmpldata array_cursor.gen.go.tmpl array_cursor_iterator.gen.go.tmpl //go:generate env GO111MODULE=on go run github.com/influxdata/influxdb/tools/tmpl -i -data=file_store.gen.go.tmpldata file_store.gen.go.tmpl=file_store.gen.go //go:generate env GO111MODULE=on go run github.com/influxdata/influxdb/tools/tmpl -i -d isArray=y -data=file_store.gen.go.tmpldata file_store.gen.go.tmpl=file_store_array.gen.go //go:generate env GO111MODULE=on go run github.com/benbjohnson/tmpl -data=@encoding.gen.go.tmpldata encoding.gen.go.tmpl @@ -40,7 +37,6 @@ import ( var ( // Static objects to prevent small allocs. keyFieldSeparatorBytes = []byte(keyFieldSeparator) - emptyBytes = []byte{} ) var ( @@ -66,9 +62,6 @@ const ( // that identifies a specific field in series keyFieldSeparator = "#!~#" - // deleteFlushThreshold is the size in bytes of a batch of series keys to delete. - deleteFlushThreshold = 50 * 1024 * 1024 - // MaxPointsPerBlock is the maximum number of points in an encoded block in a TSM file MaxPointsPerBlock = 1000 ) @@ -77,34 +70,41 @@ const ( // an Engine. type EngineOption func(i *Engine) -// WithWAL sets the WAL for the Engine -var WithWAL = func(wal Log) EngineOption { - // be defensive: it's very easy to pass in a nil WAL here - // which will panic. Set any nil WALs to the NopWAL. - if pwal, _ := wal.(*WAL); pwal == nil { - wal = NopWAL{} - } - - return func(e *Engine) { - e.WAL = wal - } -} - // WithTraceLogging sets if trace logging is enabled for the engine. -var WithTraceLogging = func(logging bool) EngineOption { +func WithTraceLogging(logging bool) EngineOption { return func(e *Engine) { e.FileStore.enableTraceLogging(logging) } } // WithCompactionPlanner sets the compaction planner for the engine. -var WithCompactionPlanner = func(planner CompactionPlanner) EngineOption { +func WithCompactionPlanner(planner CompactionPlanner) EngineOption { return func(e *Engine) { planner.SetFileStore(e.FileStore) e.CompactionPlan = planner } } +// Snapshotter allows upward signaling of the tsm1 engine to the storage engine. Hopefully +// it can be removed one day. The weird interface is due to the weird inversion of locking +// that has to happen. +type Snapshotter interface { + AcquireSegments(func(segments []string) error) error + CommitSegments(segments []string, fn func() error) error +} + +type noSnapshotter struct{} + +func (noSnapshotter) AcquireSegments(fn func([]string) error) error { return fn(nil) } +func (noSnapshotter) CommitSegments(_ []string, fn func() error) error { return fn() } + +// WithSnapshotter sets the callbacks for the engine to use when creating snapshots. +func WithSnapshotter(snapshotter Snapshotter) EngineOption { + return func(e *Engine) { + e.snapshotter = snapshotter + } +} + // Engine represents a storage engine with compressed blocks. type Engine struct { mu sync.RWMutex @@ -132,7 +132,6 @@ type Engine struct { traceLogger *zap.Logger // Logger to be used when trace-logging is on. traceLogging bool - WAL Log Cache *Cache Compactor *Compactor CompactionPlan CompactionPlanner @@ -161,7 +160,8 @@ type Engine struct { // Limiter for concurrent compactions. compactionLimiter limiter.Fixed - scheduler *scheduler + scheduler *scheduler + snapshotter Snapshotter } // NewEngine returns a new instance of Engine. @@ -207,7 +207,6 @@ func NewEngine(path string, idx *tsi1.Index, config Config, options ...EngineOpt logger: logger, traceLogger: logger, - WAL: NopWAL{}, Cache: cache, FileStore: fs, @@ -221,6 +220,7 @@ func NewEngine(path string, idx *tsi1.Index, config Config, options ...EngineOpt formatFileName: DefaultFormatFileName, compactionLimiter: limiter.NewFixed(maxCompactions), scheduler: newScheduler(maxCompactions), + snapshotter: new(noSnapshotter), } for _, option := range options { @@ -477,27 +477,11 @@ func (e *Engine) SeriesN() int64 { return e.index.SeriesN() } -// LastModified returns the time when this shard was last modified. -func (e *Engine) LastModified() time.Time { - fsTime := e.FileStore.LastModified() - - if e.WAL.LastWriteTime().After(fsTime) { - return e.WAL.LastWriteTime() - } - return fsTime -} - // MeasurementStats returns the current measurement stats for the engine. func (e *Engine) MeasurementStats() (MeasurementStats, error) { return e.FileStore.MeasurementStats() } -// DiskSize returns the total size in bytes of all TSM and WAL segments on disk. -func (e *Engine) DiskSize() int64 { - walDiskSizeBytes := e.WAL.DiskSizeBytes() - return e.FileStore.DiskSizeBytes() + walDiskSizeBytes -} - func (e *Engine) initTrackers() { mmu.Lock() defer mmu.Unlock() @@ -512,10 +496,6 @@ func (e *Engine) initTrackers() { e.FileStore.tracker = newFileTracker(bms.fileMetrics, e.defaultMetricLabels) e.Cache.tracker = newCacheTracker(bms.cacheMetrics, e.defaultMetricLabels) - // Set default metrics on WAL if enabled. - if wal, ok := e.WAL.(*WAL); ok { - wal.tracker = newWALTracker(bms.walMetrics, e.defaultMetricLabels) - } e.scheduler.setCompactionTracker(e.compactionTracker) } @@ -531,18 +511,10 @@ func (e *Engine) Open() error { return err } - if err := e.WAL.Open(); err != nil { - return err - } - if err := e.FileStore.Open(); err != nil { return err } - if err := e.reloadCache(); err != nil { - return err - } - e.Compactor.Open() if e.enableCompactionsOnOpen { @@ -564,7 +536,8 @@ func (e *Engine) Close() error { if err := e.FileStore.Close(); err != nil { return err } - return e.WAL.Close() + + return nil } // WithLogger sets the logger for the engine. @@ -575,10 +548,6 @@ func (e *Engine) WithLogger(log *zap.Logger) { e.traceLogger = e.logger } - if wal, ok := e.WAL.(*WAL); ok { - wal.WithLogger(e.logger) - } - e.FileStore.WithLogger(e.logger) } @@ -595,435 +564,33 @@ func (e *Engine) Free() error { return e.FileStore.Free() } -// WritePoints writes metadata and point data into the engine. -// It returns an error if new points are added to an existing key. +// WritePoints saves the set of points in the engine. func (e *Engine) WritePoints(points []models.Point) error { - values := make(map[string][]Value, len(points)) - var ( - keyBuf []byte - baseLen int - ) - - for _, p := range points { - keyBuf = append(keyBuf[:0], p.Key()...) - keyBuf = append(keyBuf, keyFieldSeparator...) - baseLen = len(keyBuf) - iter := p.FieldIterator() - t := p.Time().UnixNano() - for iter.Next() { - keyBuf = append(keyBuf[:baseLen], iter.FieldKey()...) - - var v Value - switch iter.Type() { - case models.Float: - fv, err := iter.FloatValue() - if err != nil { - return err - } - v = NewFloatValue(t, fv) - case models.Integer: - iv, err := iter.IntegerValue() - if err != nil { - return err - } - v = NewIntegerValue(t, iv) - case models.Unsigned: - iv, err := iter.UnsignedValue() - if err != nil { - return err - } - v = NewUnsignedValue(t, iv) - case models.String: - v = NewStringValue(t, iter.StringValue()) - case models.Boolean: - bv, err := iter.BooleanValue() - if err != nil { - return err - } - v = NewBooleanValue(t, bv) - default: - return fmt.Errorf("unknown field type for %s: %s", string(iter.FieldKey()), p.String()) - } - values[string(keyBuf)] = append(values[string(keyBuf)], v) - } + values, err := PointsToValues(points) + if err != nil { + return err } + return e.WriteValues(values) +} + +// WriteValues saves the set of values in the engine. +func (e *Engine) WriteValues(values map[string][]Value) error { e.mu.RLock() defer e.mu.RUnlock() - // first try to write to the cache if err := e.Cache.WriteMulti(values); err != nil { return err } - // Then make the write durable in the cache. - if _, err := e.WAL.WriteMulti(values); err != nil { - return err - } - return nil } -// DeleteSeriesRange removes the values between min and max (inclusive) from all series -func (e *Engine) DeleteSeriesRange(itr tsdb.SeriesIterator, min, max int64) error { - return e.DeleteSeriesRangeWithPredicate(itr, func(name []byte, tags models.Tags) (int64, int64, bool) { - return min, max, true - }) -} - -// DeleteSeriesRangeWithPredicate removes the values between min and max (inclusive) from all series -// for which predicate() returns true. If predicate() is nil, then all values in range are removed. -func (e *Engine) DeleteSeriesRangeWithPredicate(itr tsdb.SeriesIterator, predicate func(name []byte, tags models.Tags) (int64, int64, bool)) error { - var disableOnce bool - - // Ensure that the index does not compact away the measurement or series we're - // going to delete before we're done with them. - e.index.DisableCompactions() - defer e.index.EnableCompactions() - e.index.Wait() - - fs, err := e.index.RetainFileSet() - if err != nil { - return err - } - defer fs.Release() - - var ( - sz int - min, max int64 = math.MinInt64, math.MaxInt64 - - // Indicator that the min/max time for the current batch has changed and - // we need to flush the current batch before appending to it. - flushBatch bool - ) - - // These are reversed from min/max to ensure they are different the first time through. - newMin, newMax := int64(math.MaxInt64), int64(math.MinInt64) - - // There is no predicate, so setup newMin/newMax to delete the full time range. - if predicate == nil { - newMin = min - newMax = max - } - - batch := make([][]byte, 0, 10000) - for { - elem, err := itr.Next() - if err != nil { - return err - } else if elem == nil { - break - } - - // See if the series should be deleted and if so, what range of time. - if predicate != nil { - var shouldDelete bool - newMin, newMax, shouldDelete = predicate(elem.Name(), elem.Tags()) - if !shouldDelete { - continue - } - - // If the min/max happens to change for the batch, we need to flush - // the current batch and start a new one. - flushBatch = (min != newMin || max != newMax) && len(batch) > 0 - } - - if elem.Expr() != nil { - if v, ok := elem.Expr().(*influxql.BooleanLiteral); !ok || !v.Val { - return errors.New("fields not supported in WHERE clause during deletion") - } - } - - if !disableOnce { - // Disable and abort running compactions so that tombstones added existing tsm - // files don't get removed. This would cause deleted measurements/series to - // re-appear once the compaction completed. We only disable the level compactions - // so that snapshotting does not stop while writing out tombstones. If it is stopped, - // and writing tombstones takes a long time, writes can get rejected due to the cache - // filling up. - e.disableLevelCompactions(true) - defer e.enableLevelCompactions(true) - - e.sfile.DisableCompactions() - defer e.sfile.EnableCompactions() - e.sfile.Wait() - - disableOnce = true - } - - if sz >= deleteFlushThreshold || flushBatch { - // Delete all matching batch. - if err := e.deleteSeriesRange(batch, min, max); err != nil { - return err - } - batch = batch[:0] - sz = 0 - flushBatch = false - } - - // Use the new min/max time for the next iteration - min = newMin - max = newMax - - key := models.MakeKey(elem.Name(), elem.Tags()) - sz += len(key) - batch = append(batch, key) - } - - if len(batch) > 0 { - // Delete all matching batch. - if err := e.deleteSeriesRange(batch, min, max); err != nil { - return err - } - } - - e.index.Rebuild() - return nil -} - -// deleteSeriesRange removes the values between min and max (inclusive) from all series. This -// does not update the index or disable compactions. This should mainly be called by DeleteSeriesRange -// and not directly. -func (e *Engine) deleteSeriesRange(seriesKeys [][]byte, min, max int64) error { - if len(seriesKeys) == 0 { - return nil - } - - // Ensure keys are sorted since lower layers require them to be. - if !bytesutil.IsSorted(seriesKeys) { - bytesutil.Sort(seriesKeys) - } - - // Min and max time in the engine are slightly different from the query language values. - if min == influxql.MinTime { - min = math.MinInt64 - } - if max == influxql.MaxTime { - max = math.MaxInt64 - } - - // Run the delete on each TSM file in parallel - if err := e.FileStore.Apply(func(r TSMFile) error { - // See if this TSM file contains the keys and time range - minKey, maxKey := seriesKeys[0], seriesKeys[len(seriesKeys)-1] - tsmMin, tsmMax := r.KeyRange() - - tsmMin, _ = SeriesAndFieldFromCompositeKey(tsmMin) - tsmMax, _ = SeriesAndFieldFromCompositeKey(tsmMax) - - overlaps := bytes.Compare(tsmMin, maxKey) <= 0 && bytes.Compare(tsmMax, minKey) >= 0 - if !overlaps || !r.OverlapsTimeRange(min, max) { - return nil - } - - // Delete each key we find in the file. We seek to the min key and walk from there. - batch := r.BatchDelete() - iter := r.Iterator(minKey) - var j int - for iter.Next() { - indexKey := iter.Key() - seriesKey, _ := SeriesAndFieldFromCompositeKey(indexKey) - - for j < len(seriesKeys) && bytes.Compare(seriesKeys[j], seriesKey) < 0 { - j++ - } - - if j >= len(seriesKeys) { - break - } - if bytes.Equal(seriesKeys[j], seriesKey) { - if err := batch.DeleteRange([][]byte{indexKey}, min, max); err != nil { - batch.Rollback() - return err - } - } - } - if err := iter.Err(); err != nil { - batch.Rollback() - return err - } - - return batch.Commit() - }); err != nil { - return err - } - - // find the keys in the cache and remove them - deleteKeys := make([][]byte, 0, len(seriesKeys)) - - // ApplySerialEntryFn cannot return an error in this invocation. - _ = e.Cache.ApplyEntryFn(func(k []byte, _ *entry) error { - seriesKey, _ := SeriesAndFieldFromCompositeKey([]byte(k)) - - // Cache does not walk keys in sorted order, so search the sorted - // series we need to delete to see if any of the cache keys match. - i := bytesutil.SearchBytes(seriesKeys, seriesKey) - if i < len(seriesKeys) && bytes.Equal(seriesKey, seriesKeys[i]) { - // k is the measurement + tags + sep + field - deleteKeys = append(deleteKeys, k) - } - return nil - }) - - // Sort the series keys because ApplyEntryFn iterates over the keys randomly. - bytesutil.Sort(deleteKeys) - - e.Cache.DeleteRange(deleteKeys, min, max) - - // delete from the WAL - if _, err := e.WAL.DeleteRange(deleteKeys, min, max); err != nil { - return err - } - - // The series are deleted on disk, but the index may still say they exist. - // Depending on the the min,max time passed in, the series may or not actually - // exists now. To reconcile the index, we walk the series keys that still exists - // on disk and cross out any keys that match the passed in series. Any series - // left in the slice at the end do not exist and can be deleted from the index. - // Note: this is inherently racy if writes are occurring to the same measurement/series are - // being removed. A write could occur and exist in the cache at this point, but we - // would delete it from the index. - minKey := seriesKeys[0] - - // Apply runs this func concurrently. The seriesKeys slice is mutated concurrently - // by different goroutines setting positions to nil. - if err := e.FileStore.Apply(func(r TSMFile) error { - var j int - - // Start from the min deleted key that exists in this file. - iter := r.Iterator(minKey) - for iter.Next() { - if j >= len(seriesKeys) { - return nil - } - - indexKey := iter.Key() - seriesKey, _ := SeriesAndFieldFromCompositeKey(indexKey) - - // Skip over any deleted keys that are less than our tsm key - cmp := bytes.Compare(seriesKeys[j], seriesKey) - for j < len(seriesKeys) && cmp < 0 { - j++ - if j >= len(seriesKeys) { - return nil - } - cmp = bytes.Compare(seriesKeys[j], seriesKey) - } - - // We've found a matching key, cross it out so we do not remove it from the index. - if j < len(seriesKeys) && cmp == 0 { - seriesKeys[j] = emptyBytes - j++ - } - } - - return iter.Err() - }); err != nil { - return err - } - - // Have we deleted all values for the series? If so, we need to remove - // the series from the index. - if len(seriesKeys) > 0 { - buf := make([]byte, 1024) // For use when accessing series file. - ids := tsdb.NewSeriesIDSet() - measurements := make(map[string]struct{}, 1) - - for _, k := range seriesKeys { - if len(k) == 0 { - continue // This key was wiped because it shouldn't be removed from index. - } - - name, tags := models.ParseKeyBytes(k) - sid := e.sfile.SeriesID(name, tags, buf) - if sid.IsZero() { - continue - } - - // See if this series was found in the cache earlier - i := bytesutil.SearchBytes(deleteKeys, k) - - var hasCacheValues bool - // If there are multiple fields, they will have the same prefix. If any field - // has values, then we can't delete it from the index. - for i < len(deleteKeys) && bytes.HasPrefix(deleteKeys[i], k) { - if e.Cache.Values(deleteKeys[i]).Len() > 0 { - hasCacheValues = true - break - } - i++ - } - - if hasCacheValues { - continue - } - - measurements[string(name)] = struct{}{} - // Remove the series from the local index. - if err := e.index.DropSeries(sid, k, false); err != nil { - return err - } - - // Add the id to the set of delete ids. - ids.Add(sid) - } - - for k := range measurements { - if err := e.index.DropMeasurementIfSeriesNotExist([]byte(k)); err != nil { - return err - } - } - - // Remove the remaining ids from the series file as they no longer exist - // in any shard. - var err error - ids.ForEach(func(id tsdb.SeriesID) { - if err1 := e.sfile.DeleteSeriesID(id); err1 != nil { - err = err1 - } - }) - if err != nil { - return err - } - } - - return nil -} - -// DeleteMeasurement deletes a measurement and all related series. -func (e *Engine) DeleteMeasurement(name []byte) error { - // Delete the bulk of data outside of the fields lock. - if err := e.deleteMeasurement(name); err != nil { - return err - } - return nil -} - -// DeleteMeasurement deletes a measurement and all related series. -func (e *Engine) deleteMeasurement(name []byte) error { - // Attempt to find the series keys. - itr, err := e.index.MeasurementSeriesIDIterator(name) - if err != nil { - return err - } else if itr == nil { - return nil - } - defer itr.Close() - return e.DeleteSeriesRange(tsdb.NewSeriesIteratorAdapter(e.sfile, itr), math.MinInt64, math.MaxInt64) -} - // ForEachMeasurementName iterates over each measurement name in the engine. func (e *Engine) ForEachMeasurementName(fn func(name []byte) error) error { return e.index.ForEachMeasurementName(fn) } -func (e *Engine) CreateSeriesListIfNotExists(collection *tsdb.SeriesCollection) error { - return e.index.CreateSeriesListIfNotExists(collection) -} - -// WriteTo is not implemented. -func (e *Engine) WriteTo(w io.Writer) (n int64, err error) { panic("not implemented") } - // compactionLevel describes a snapshot or levelled compaction. type compactionLevel int @@ -1194,24 +761,18 @@ func (e *Engine) WriteSnapshot() error { logEnd() }() - closedFiles, snapshot, err := func() (segments []string, snapshot *Cache, err error) { + var ( + snapshot *Cache + segments []string + ) + if err := e.snapshotter.AcquireSegments(func(segs []string) (err error) { + segments = segs + e.mu.Lock() - defer e.mu.Unlock() - - if err = e.WAL.CloseSegment(); err != nil { - return nil, nil, err - } - - segments, err = e.WAL.ClosedSegments() - if err != nil { - return nil, nil, err - } - snapshot, err = e.Cache.Snapshot() - return segments, snapshot, err - }() - - if err != nil { + e.mu.Unlock() + return err + }); err != nil { return err } @@ -1229,11 +790,11 @@ func (e *Engine) WriteSnapshot() error { zap.String("path", e.path), zap.Duration("duration", time.Since(dedup))) - return e.writeSnapshotAndCommit(log, closedFiles, snapshot) + return e.writeSnapshotAndCommit(log, snapshot, segments) } // writeSnapshotAndCommit will write the passed cache to a new TSM file and remove the closed WAL segments. -func (e *Engine) writeSnapshotAndCommit(log *zap.Logger, closedFiles []string, snapshot *Cache) (err error) { +func (e *Engine) writeSnapshotAndCommit(log *zap.Logger, snapshot *Cache, segments []string) (err error) { defer func() { if err != nil { e.Cache.ClearSnapshot(false) @@ -1247,23 +808,20 @@ func (e *Engine) writeSnapshotAndCommit(log *zap.Logger, closedFiles []string, s return err } - e.mu.RLock() - defer e.mu.RUnlock() + return e.snapshotter.CommitSegments(segments, func() error { + e.mu.RLock() + defer e.mu.RUnlock() - // update the file store with these new files - if err := e.FileStore.Replace(nil, newFiles); err != nil { - log.Info("Error adding new TSM files from snapshot", zap.Error(err)) - return err - } + // update the file store with these new files + if err := e.FileStore.Replace(nil, newFiles); err != nil { + log.Info("Error adding new TSM files from snapshot", zap.Error(err)) + return err + } - // clear the snapshot from the in-memory cache, then the old WAL files - e.Cache.ClearSnapshot(true) - - if err := e.WAL.Remove(closedFiles); err != nil { - log.Info("Error removing closed WAL segments", zap.Error(err)) - } - - return nil + // clear the snapshot from the in-memory cache + e.Cache.ClearSnapshot(true) + return nil + }) } // compactCache continually checks if the WAL cache should be written to disk. @@ -1568,32 +1126,6 @@ func (e *Engine) fullCompactionStrategy(group CompactionGroup, optimize bool) *c return s } -// reloadCache reads the WAL segment files and loads them into the cache. -func (e *Engine) reloadCache() error { - now := time.Now() - files, err := segmentFileNames(e.WAL.Path()) - if err != nil { - return err - } - - limit := e.Cache.MaxSize() - defer func() { - e.Cache.SetMaxSize(limit) - }() - - // Disable the max size during loading - e.Cache.SetMaxSize(0) - - loader := NewCacheLoader(files) - loader.WithLogger(e.logger) - if err := loader.Load(e.Cache); err != nil { - return err - } - - e.traceLogger.Info("Reloaded WAL cache", zap.String("path", e.WAL.Path()), zap.Duration("duration", time.Since(now))) - return nil -} - // cleanup removes all temp files and dirs that exist on disk. This is should only be run at startup to avoid // removing tmp files that are still in use. func (e *Engine) cleanup() error { diff --git a/tsdb/tsm1/engine_delete_measurement.go b/tsdb/tsm1/engine_delete_bucket.go similarity index 92% rename from tsdb/tsm1/engine_delete_measurement.go rename to tsdb/tsm1/engine_delete_bucket.go index 9a32886617..ee379a1673 100644 --- a/tsdb/tsm1/engine_delete_measurement.go +++ b/tsdb/tsm1/engine_delete_bucket.go @@ -11,10 +11,10 @@ import ( "github.com/influxdata/influxql" ) -// DeleteBucket removes all TSM data belonging to a bucket, and removes all index +// DeleteBucketRange removes all TSM data belonging to a bucket, and removes all index // and series file data associated with the bucket. The provided time range ensures // that only bucket data for that range is removed. -func (e *Engine) DeleteBucket(name []byte, min, max int64) error { +func (e *Engine) DeleteBucketRange(name []byte, min, max int64) error { // TODO(jeff): we need to block writes to this prefix while deletes are in progress // otherwise we can end up in a situation where we have staged data in the cache or // WAL that was deleted from the index, or worse. This needs to happen at a higher @@ -76,9 +76,6 @@ func (e *Engine) DeleteBucket(name []byte, min, max int64) error { return err } - // TODO(jeff): add a DeletePrefix to the Cache and WAL. - // TODO(jeff): add a Tombstone entry into the WAL for deletes. - var deleteKeys [][]byte // ApplySerialEntryFn cannot return an error in this invocation. @@ -99,11 +96,8 @@ func (e *Engine) DeleteBucket(name []byte, min, max int64) error { // Sort the series keys because ApplyEntryFn iterates over the keys randomly. bytesutil.Sort(deleteKeys) - // Delete from the cache and WAL. - e.Cache.DeleteRange(deleteKeys, min, max) - if _, err := e.WAL.DeleteRange(deleteKeys, min, max); err != nil { - return err - } + // Delete from the cache. + e.Cache.DeleteBucketRange(name, min, max) // Now that all of the data is purged, we need to find if some keys are fully deleted // and if so, remove them from the index. @@ -138,6 +132,14 @@ func (e *Engine) DeleteBucket(name []byte, min, max int64) error { return err } + // ApplySerialEntryFn cannot return an error in this invocation. + _ = e.Cache.ApplyEntryFn(func(k []byte, _ *entry) error { + if bytes.HasPrefix(k, name) { + delete(possiblyDead.keys, string(k)) + } + return nil + }) + if len(possiblyDead.keys) > 0 { buf := make([]byte, 1024) diff --git a/tsdb/tsm1/engine_delete_measurement_test.go b/tsdb/tsm1/engine_delete_bucket_test.go similarity index 94% rename from tsdb/tsm1/engine_delete_measurement_test.go rename to tsdb/tsm1/engine_delete_bucket_test.go index c0b21bb378..755d116fd9 100644 --- a/tsdb/tsm1/engine_delete_measurement_test.go +++ b/tsdb/tsm1/engine_delete_bucket_test.go @@ -8,7 +8,7 @@ import ( "github.com/influxdata/influxdb/models" ) -func TestEngine_DeletePrefix(t *testing.T) { +func TestEngine_DeleteBucket(t *testing.T) { // Create a few points. p1 := MustParsePointString("cpu,host=0 value=1.1 6") p2 := MustParsePointString("cpu,host=A value=1.2 2") @@ -44,7 +44,7 @@ func TestEngine_DeletePrefix(t *testing.T) { t.Fatalf("series count mismatch: exp %v, got %v", exp, got) } - if err := e.DeleteBucket([]byte("cpu"), 0, 3); err != nil { + if err := e.DeleteBucketRange([]byte("cpu"), 0, 3); err != nil { t.Fatalf("failed to delete series: %v", err) } @@ -90,7 +90,7 @@ func TestEngine_DeletePrefix(t *testing.T) { iter.Close() // Deleting remaining series should remove them from the series. - if err := e.DeleteBucket([]byte("cpu"), 0, 9); err != nil { + if err := e.DeleteBucketRange([]byte("cpu"), 0, 9); err != nil { t.Fatalf("failed to delete series: %v", err) } diff --git a/tsdb/tsm1/engine_test.go b/tsdb/tsm1/engine_test.go index 5b6e4c0149..26e94b5e8e 100644 --- a/tsdb/tsm1/engine_test.go +++ b/tsdb/tsm1/engine_test.go @@ -1,7 +1,6 @@ package tsm1_test import ( - "bytes" "fmt" "io/ioutil" "math" @@ -59,8 +58,9 @@ func TestIndex_SeriesIDSet(t *testing.T) { } // Drop all the series for the gpu measurement and they should no longer - // be in the series ID set. - if err := engine.DeleteMeasurement([]byte("gpu")); err != nil { + // be in the series ID set. This relies on the fact that DeleteBucketRange is really + // operating on prefixes. + if err := engine.DeleteBucketRange([]byte("gpu"), math.MinInt64, math.MaxInt64); err != nil { t.Fatal(err) } @@ -72,17 +72,6 @@ func TestIndex_SeriesIDSet(t *testing.T) { delete(seriesIDMap, "gpu") delete(seriesIDMap, "gpu,host=b") - // Drop the specific mem series - ditr := &seriesIterator{keys: [][]byte{[]byte("mem,host=z")}} - if err := engine.DeleteSeriesRange(ditr, math.MinInt64, math.MaxInt64); err != nil { - t.Fatal(err) - } - - if engine.SeriesIDSet().Contains(seriesIDMap["mem,host=z"]) { - t.Fatalf("bitmap does not contain ID: %d for key %s, but should", seriesIDMap["mem,host=z"], "mem,host=z") - } - delete(seriesIDMap, "mem,host=z") - // The rest of the keys should still be in the set. for key, id := range seriesIDMap { if !engine.SeriesIDSet().Contains(id) { @@ -106,589 +95,6 @@ func TestIndex_SeriesIDSet(t *testing.T) { } } -// Ensures that deleting series from TSM files with multiple fields removes all the -/// series -func TestEngine_DeleteSeries(t *testing.T) { - // Create a few points. - p1 := MustParsePointString("cpu,host=A value=1.1 1000000000") - p2 := MustParsePointString("cpu,host=B value=1.2 2000000000") - p3 := MustParsePointString("cpu,host=A sum=1.3 3000000000") - - e, err := NewEngine() - if err != nil { - t.Fatal(err) - } - - // mock the planner so compactions don't run during the test - e.CompactionPlan = &mockPlanner{} - if err := e.Open(); err != nil { - t.Fatal(err) - } - defer e.Close() - - if err := e.writePoints(p1, p2, p3); err != nil { - t.Fatalf("failed to write points: %s", err.Error()) - } - if err := e.WriteSnapshot(); err != nil { - t.Fatalf("failed to snapshot: %s", err.Error()) - } - - keys := e.FileStore.Keys() - if exp, got := 3, len(keys); exp != got { - t.Fatalf("series count mismatch: exp %v, got %v", exp, got) - } - - itr := &seriesIterator{keys: [][]byte{[]byte("cpu,host=A")}} - if err := e.DeleteSeriesRange(itr, math.MinInt64, math.MaxInt64); err != nil { - t.Fatalf("failed to delete series: %v", err) - } - - keys = e.FileStore.Keys() - if exp, got := 1, len(keys); exp != got { - t.Fatalf("series count mismatch: exp %v, got %v", exp, got) - } - - exp := "cpu,host=B#!~#value" - if _, ok := keys[exp]; !ok { - t.Fatalf("wrong series deleted: exp %v, got %v", exp, keys) - } -} - -func TestEngine_DeleteSeriesRange(t *testing.T) { - // Create a few points. - p1 := MustParsePointString("cpu,host=0 value=1.1 6000000000") // Should not be deleted - p2 := MustParsePointString("cpu,host=A value=1.2 2000000000") - p3 := MustParsePointString("cpu,host=A value=1.3 3000000000") - p4 := MustParsePointString("cpu,host=B value=1.3 4000000000") // Should not be deleted - p5 := MustParsePointString("cpu,host=B value=1.3 5000000000") // Should not be deleted - p6 := MustParsePointString("cpu,host=C value=1.3 1000000000") - p7 := MustParsePointString("mem,host=C value=1.3 1000000000") // Should not be deleted - p8 := MustParsePointString("disk,host=C value=1.3 1000000000") // Should not be deleted - - e, err := NewEngine() - if err != nil { - t.Fatal(err) - } - - // mock the planner so compactions don't run during the test - e.CompactionPlan = &mockPlanner{} - if err := e.Open(); err != nil { - t.Fatal(err) - } - defer e.Close() - - if err := e.writePoints(p1, p2, p3, p4, p5, p6, p7, p8); err != nil { - t.Fatalf("failed to write points: %s", err.Error()) - } - - if err := e.WriteSnapshot(); err != nil { - t.Fatalf("failed to snapshot: %s", err.Error()) - } - - keys := e.FileStore.Keys() - if exp, got := 6, len(keys); exp != got { - t.Fatalf("series count mismatch: exp %v, got %v", exp, got) - } - - itr := &seriesIterator{keys: [][]byte{[]byte("cpu,host=0"), []byte("cpu,host=A"), []byte("cpu,host=B"), []byte("cpu,host=C")}} - if err := e.DeleteSeriesRange(itr, 0, 3000000000); err != nil { - t.Fatalf("failed to delete series: %v", err) - } - - keys = e.FileStore.Keys() - if exp, got := 4, len(keys); exp != got { - t.Fatalf("series count mismatch: exp %v, got %v", exp, got) - } - - exp := "cpu,host=B#!~#value" - if _, ok := keys[exp]; !ok { - t.Fatalf("wrong series deleted: exp %v, got %v", exp, keys) - } - - // Check that the series still exists in the index - iter, err := e.index.MeasurementSeriesIDIterator([]byte("cpu")) - if err != nil { - t.Fatalf("iterator error: %v", err) - } - defer iter.Close() - - elem, err := iter.Next() - if err != nil { - t.Fatal(err) - } - if elem.SeriesID.IsZero() { - t.Fatalf("series index mismatch: EOF, exp 2 series") - } - - // Lookup series. - name, tags := e.sfile.Series(elem.SeriesID) - if got, exp := name, []byte("cpu"); !bytes.Equal(got, exp) { - t.Fatalf("series mismatch: got %s, exp %s", got, exp) - } - - if !tags.Equal(models.NewTags(map[string]string{"host": "0"})) && !tags.Equal(models.NewTags(map[string]string{"host": "B"})) { - t.Fatalf(`series mismatch: got %s, exp either "host=0" or "host=B"`, tags) - } - iter.Close() - - // Deleting remaining series should remove them from the series. - itr = &seriesIterator{keys: [][]byte{[]byte("cpu,host=0"), []byte("cpu,host=B")}} - if err := e.DeleteSeriesRange(itr, 0, 9000000000); err != nil { - t.Fatalf("failed to delete series: %v", err) - } - - if iter, err = e.index.MeasurementSeriesIDIterator([]byte("cpu")); err != nil { - t.Fatalf("iterator error: %v", err) - } - if iter == nil { - return - } - - defer iter.Close() - if elem, err = iter.Next(); err != nil { - t.Fatal(err) - } - if !elem.SeriesID.IsZero() { - t.Fatalf("got an undeleted series id, but series should be dropped from index") - } -} - -func TestEngine_DeleteSeriesRangeWithPredicate(t *testing.T) { - // Create a few points. - p1 := MustParsePointString("cpu,host=A value=1.1 6000000000") // Should not be deleted - p2 := MustParsePointString("cpu,host=A value=1.2 2000000000") // Should not be deleted - p3 := MustParsePointString("cpu,host=B value=1.3 3000000000") - p4 := MustParsePointString("cpu,host=B value=1.3 4000000000") - p5 := MustParsePointString("cpu,host=C value=1.3 5000000000") // Should not be deleted - p6 := MustParsePointString("mem,host=B value=1.3 1000000000") - p7 := MustParsePointString("mem,host=C value=1.3 1000000000") - p8 := MustParsePointString("disk,host=C value=1.3 1000000000") // Should not be deleted - - e, err := NewEngine() - if err != nil { - t.Fatal(err) - } - - // mock the planner so compactions don't run during the test - e.CompactionPlan = &mockPlanner{} - if err := e.Open(); err != nil { - t.Fatal(err) - } - defer e.Close() - - if err := e.writePoints(p1, p2, p3, p4, p5, p6, p7, p8); err != nil { - t.Fatalf("failed to write points: %s", err.Error()) - } - - if err := e.WriteSnapshot(); err != nil { - t.Fatalf("failed to snapshot: %s", err.Error()) - } - - keys := e.FileStore.Keys() - if exp, got := 6, len(keys); exp != got { - t.Fatalf("series count mismatch: exp %v, got %v", exp, got) - } - - itr := &seriesIterator{keys: [][]byte{[]byte("cpu,host=A"), []byte("cpu,host=B"), []byte("cpu,host=C"), []byte("mem,host=B"), []byte("mem,host=C")}} - predicate := func(name []byte, tags models.Tags) (int64, int64, bool) { - if bytes.Equal(name, []byte("mem")) { - return math.MinInt64, math.MaxInt64, true - } - if bytes.Equal(name, []byte("cpu")) { - for _, tag := range tags { - if bytes.Equal(tag.Key, []byte("host")) && bytes.Equal(tag.Value, []byte("B")) { - return math.MinInt64, math.MaxInt64, true - } - } - } - return math.MinInt64, math.MaxInt64, false - } - if err := e.DeleteSeriesRangeWithPredicate(itr, predicate); err != nil { - t.Fatalf("failed to delete series: %v", err) - } - - keys = e.FileStore.Keys() - if exp, got := 3, len(keys); exp != got { - t.Fatalf("series count mismatch: exp %v, got %v", exp, got) - } - - exps := []string{"cpu,host=A#!~#value", "cpu,host=C#!~#value", "disk,host=C#!~#value"} - for _, exp := range exps { - if _, ok := keys[exp]; !ok { - t.Fatalf("wrong series deleted: exp %v, got %v", exps, keys) - } - } - - // Check that the series still exists in the index - iter, err := e.index.MeasurementSeriesIDIterator([]byte("cpu")) - if err != nil { - t.Fatalf("iterator error: %v", err) - } - defer iter.Close() - - elem, err := iter.Next() - if err != nil { - t.Fatal(err) - } - if elem.SeriesID.IsZero() { - t.Fatalf("series index mismatch: EOF, exp 2 series") - } - - // Lookup series. - name, tags := e.sfile.Series(elem.SeriesID) - if got, exp := name, []byte("cpu"); !bytes.Equal(got, exp) { - t.Fatalf("series mismatch: got %s, exp %s", got, exp) - } - - if !tags.Equal(models.NewTags(map[string]string{"host": "A"})) && !tags.Equal(models.NewTags(map[string]string{"host": "C"})) { - t.Fatalf(`series mismatch: got %s, exp either "host=A" or "host=C"`, tags) - } - iter.Close() - - // Deleting remaining series should remove them from the series. - itr = &seriesIterator{keys: [][]byte{[]byte("cpu,host=A"), []byte("cpu,host=C")}} - if err := e.DeleteSeriesRange(itr, 0, 9000000000); err != nil { - t.Fatalf("failed to delete series: %v", err) - } - - if iter, err = e.index.MeasurementSeriesIDIterator([]byte("cpu")); err != nil { - t.Fatalf("iterator error: %v", err) - } - if iter == nil { - return - } - - defer iter.Close() - if elem, err = iter.Next(); err != nil { - t.Fatal(err) - } - if !elem.SeriesID.IsZero() { - t.Fatalf("got an undeleted series id, but series should be dropped from index") - } -} - -// Tests that a nil predicate deletes all values returned from the series iterator. -func TestEngine_DeleteSeriesRangeWithPredicate_Nil(t *testing.T) { - // Create a few points. - p1 := MustParsePointString("cpu,host=A value=1.1 6000000000") // Should not be deleted - p2 := MustParsePointString("cpu,host=A value=1.2 2000000000") // Should not be deleted - p3 := MustParsePointString("cpu,host=B value=1.3 3000000000") - p4 := MustParsePointString("cpu,host=B value=1.3 4000000000") - p5 := MustParsePointString("cpu,host=C value=1.3 5000000000") // Should not be deleted - p6 := MustParsePointString("mem,host=B value=1.3 1000000000") - p7 := MustParsePointString("mem,host=C value=1.3 1000000000") - p8 := MustParsePointString("disk,host=C value=1.3 1000000000") // Should not be deleted - - e, err := NewEngine() - if err != nil { - t.Fatal(err) - } - - // mock the planner so compactions don't run during the test - e.CompactionPlan = &mockPlanner{} - if err := e.Open(); err != nil { - t.Fatal(err) - } - defer e.Close() - - if err := e.writePoints(p1, p2, p3, p4, p5, p6, p7, p8); err != nil { - t.Fatalf("failed to write points: %s", err.Error()) - } - - if err := e.WriteSnapshot(); err != nil { - t.Fatalf("failed to snapshot: %s", err.Error()) - } - - keys := e.FileStore.Keys() - if exp, got := 6, len(keys); exp != got { - t.Fatalf("series count mismatch: exp %v, got %v", exp, got) - } - - itr := &seriesIterator{keys: [][]byte{[]byte("cpu,host=A"), []byte("cpu,host=B"), []byte("cpu,host=C"), []byte("mem,host=B"), []byte("mem,host=C")}} - if err := e.DeleteSeriesRangeWithPredicate(itr, nil); err != nil { - t.Fatalf("failed to delete series: %v", err) - } - - keys = e.FileStore.Keys() - if exp, got := 1, len(keys); exp != got { - t.Fatalf("series count mismatch: exp %v, got %v", exp, got) - } - - // Check that the series still exists in the index - iter, err := e.index.MeasurementSeriesIDIterator([]byte("cpu")) - if err != nil { - t.Fatalf("iterator error: %v", err) - } else if iter == nil { - return - } - defer iter.Close() - - if elem, err := iter.Next(); err != nil { - t.Fatal(err) - } else if !elem.SeriesID.IsZero() { - t.Fatalf("got an undeleted series id, but series should be dropped from index") - } - - // Check that disk series still exists - iter, err = e.index.MeasurementSeriesIDIterator([]byte("disk")) - if err != nil { - t.Fatalf("iterator error: %v", err) - } else if iter == nil { - return - } - defer iter.Close() - - if elem, err := iter.Next(); err != nil { - t.Fatal(err) - } else if elem.SeriesID.IsZero() { - t.Fatalf("got an undeleted series id, but series should be dropped from index") - } -} - -func TestEngine_DeleteSeriesRangeWithPredicate_FlushBatch(t *testing.T) { - // Create a few points. - p1 := MustParsePointString("cpu,host=A value=1.1 6000000000") // Should not be deleted - p2 := MustParsePointString("cpu,host=A value=1.2 2000000000") // Should not be deleted - p3 := MustParsePointString("cpu,host=B value=1.3 3000000000") - p4 := MustParsePointString("cpu,host=B value=1.3 4000000000") - p5 := MustParsePointString("cpu,host=C value=1.3 5000000000") // Should not be deleted - p6 := MustParsePointString("mem,host=B value=1.3 1000000000") - p7 := MustParsePointString("mem,host=C value=1.3 1000000000") - p8 := MustParsePointString("disk,host=C value=1.3 1000000000") // Should not be deleted - - e, err := NewEngine() - if err != nil { - t.Fatal(err) - } - - // mock the planner so compactions don't run during the test - e.CompactionPlan = &mockPlanner{} - if err := e.Open(); err != nil { - t.Fatal(err) - } - defer e.Close() - - if err := e.writePoints(p1, p2, p3, p4, p5, p6, p7, p8); err != nil { - t.Fatalf("failed to write points: %s", err.Error()) - } - - if err := e.WriteSnapshot(); err != nil { - t.Fatalf("failed to snapshot: %s", err.Error()) - } - - keys := e.FileStore.Keys() - if exp, got := 6, len(keys); exp != got { - t.Fatalf("series count mismatch: exp %v, got %v", exp, got) - } - - itr := &seriesIterator{keys: [][]byte{[]byte("cpu,host=A"), []byte("cpu,host=B"), []byte("cpu,host=C"), []byte("mem,host=B"), []byte("mem,host=C")}} - predicate := func(name []byte, tags models.Tags) (int64, int64, bool) { - if bytes.Equal(name, []byte("mem")) { - return 1000000000, 1000000000, true - } - - if bytes.Equal(name, []byte("cpu")) { - for _, tag := range tags { - if bytes.Equal(tag.Key, []byte("host")) && bytes.Equal(tag.Value, []byte("B")) { - return 3000000000, 4000000000, true - } - } - } - return math.MinInt64, math.MaxInt64, false - } - if err := e.DeleteSeriesRangeWithPredicate(itr, predicate); err != nil { - t.Fatalf("failed to delete series: %v", err) - } - - keys = e.FileStore.Keys() - if exp, got := 3, len(keys); exp != got { - t.Fatalf("series count mismatch: exp %v, got %v", exp, got) - } - - exps := []string{"cpu,host=A#!~#value", "cpu,host=C#!~#value", "disk,host=C#!~#value"} - for _, exp := range exps { - if _, ok := keys[exp]; !ok { - t.Fatalf("wrong series deleted: exp %v, got %v", exps, keys) - } - } - - // Check that the series still exists in the index - iter, err := e.index.MeasurementSeriesIDIterator([]byte("cpu")) - if err != nil { - t.Fatalf("iterator error: %v", err) - } - defer iter.Close() - - elem, err := iter.Next() - if err != nil { - t.Fatal(err) - } - if elem.SeriesID.IsZero() { - t.Fatalf("series index mismatch: EOF, exp 2 series") - } - - // Lookup series. - name, tags := e.sfile.Series(elem.SeriesID) - if got, exp := name, []byte("cpu"); !bytes.Equal(got, exp) { - t.Fatalf("series mismatch: got %s, exp %s", got, exp) - } - - if !tags.Equal(models.NewTags(map[string]string{"host": "A"})) && !tags.Equal(models.NewTags(map[string]string{"host": "C"})) { - t.Fatalf(`series mismatch: got %s, exp either "host=A" or "host=C"`, tags) - } - iter.Close() - - // Deleting remaining series should remove them from the series. - itr = &seriesIterator{keys: [][]byte{[]byte("cpu,host=A"), []byte("cpu,host=C")}} - if err := e.DeleteSeriesRange(itr, 0, 9000000000); err != nil { - t.Fatalf("failed to delete series: %v", err) - } - - if iter, err = e.index.MeasurementSeriesIDIterator([]byte("cpu")); err != nil { - t.Fatalf("iterator error: %v", err) - } - if iter == nil { - return - } - - defer iter.Close() - if elem, err = iter.Next(); err != nil { - t.Fatal(err) - } - if !elem.SeriesID.IsZero() { - t.Fatalf("got an undeleted series id, but series should be dropped from index") - } -} - -func TestEngine_DeleteSeriesRange_OutsideTime(t *testing.T) { - // Create a few points. - p1 := MustParsePointString("cpu,host=A value=1.1 1000000000") // Should not be deleted - - e, err := NewEngine() - if err != nil { - t.Fatal(err) - } - - // mock the planner so compactions don't run during the test - e.CompactionPlan = &mockPlanner{} - if err := e.Open(); err != nil { - t.Fatal(err) - } - defer e.Close() - - if err := e.writePoints(p1); err != nil { - t.Fatalf("failed to write points: %s", err.Error()) - } - - if err := e.WriteSnapshot(); err != nil { - t.Fatalf("failed to snapshot: %s", err.Error()) - } - - keys := e.FileStore.Keys() - if exp, got := 1, len(keys); exp != got { - t.Fatalf("series count mismatch: exp %v, got %v", exp, got) - } - - itr := &seriesIterator{keys: [][]byte{[]byte("cpu,host=A")}} - if err := e.DeleteSeriesRange(itr, 0, 0); err != nil { - t.Fatalf("failed to delete series: %v", err) - } - - keys = e.FileStore.Keys() - if exp, got := 1, len(keys); exp != got { - t.Fatalf("series count mismatch: exp %v, got %v", exp, got) - } - - exp := "cpu,host=A#!~#value" - if _, ok := keys[exp]; !ok { - t.Fatalf("wrong series deleted: exp %v, got %v", exp, keys) - } - - // Check that the series still exists in the index - iter, err := e.index.MeasurementSeriesIDIterator([]byte("cpu")) - if err != nil { - t.Fatalf("iterator error: %v", err) - } - defer iter.Close() - - elem, err := iter.Next() - if err != nil { - t.Fatal(err) - } - if elem.SeriesID.IsZero() { - t.Fatalf("series index mismatch: EOF, exp 1 series") - } - - // Lookup series. - name, tags := e.sfile.Series(elem.SeriesID) - if got, exp := name, []byte("cpu"); !bytes.Equal(got, exp) { - t.Fatalf("series mismatch: got %s, exp %s", got, exp) - } - - if got, exp := tags, models.NewTags(map[string]string{"host": "A"}); !got.Equal(exp) { - t.Fatalf("series mismatch: got %s, exp %s", got, exp) - } -} - -func TestEngine_LastModified(t *testing.T) { - if testing.Short() { - t.Skip("skipping test in short mode") - } - - // Create a few points. - p1 := MustParsePointString("cpu,host=A value=1.1 1000000000") - p2 := MustParsePointString("cpu,host=B value=1.2 2000000000") - p3 := MustParsePointString("cpu,host=A sum=1.3 3000000000") - - e, err := NewEngine() - if err != nil { - t.Fatal(err) - } - - // mock the planner so compactions don't run during the test - e.CompactionPlan = &mockPlanner{} - e.SetEnabled(false) - if err := e.Open(); err != nil { - t.Fatal(err) - } - defer e.Close() - - if err := e.writePoints(p1, p2, p3); err != nil { - t.Fatalf("failed to write points: %s", err.Error()) - } - - lm := e.LastModified() - if lm.IsZero() { - t.Fatalf("expected non-zero time, got %v", lm.UTC()) - } - e.SetEnabled(true) - - // Artificial sleep added due to filesystems caching the mod time - // of files. This prevents the WAL last modified time from being - // returned and newer than the filestore's mod time. - time.Sleep(2 * time.Second) // Covers most filesystems. - - if err := e.WriteSnapshot(); err != nil { - t.Fatalf("failed to snapshot: %s", err.Error()) - } - - lm2 := e.LastModified() - - if got, exp := lm.Equal(lm2), false; exp != got { - t.Fatalf("expected time change, got %v, exp %v: %s == %s", got, exp, lm.String(), lm2.String()) - } - - itr := &seriesIterator{keys: [][]byte{[]byte("cpu,host=A")}} - if err := e.DeleteSeriesRange(itr, math.MinInt64, math.MaxInt64); err != nil { - t.Fatalf("failed to delete series: %v", err) - } - - lm3 := e.LastModified() - if got, exp := lm2.Equal(lm3), false; exp != got { - t.Fatalf("expected time change, got %v, exp %v", got, exp) - } -} - func TestEngine_SnapshotsDisabled(t *testing.T) { sfile := MustOpenSeriesFile() defer sfile.Close() @@ -1027,7 +433,7 @@ func (e *Engine) WritePointsString(ptstr ...string) error { func (e *Engine) writePoints(points ...models.Point) error { // Write into the index. collection := tsdb.NewSeriesCollection(points) - if err := e.CreateSeriesListIfNotExists(collection); err != nil { + if err := e.index.CreateSeriesListIfNotExists(collection); err != nil { return err } // Write the points into the cache/wal. @@ -1112,30 +518,3 @@ func (m *mockPlanner) Release(groups []tsm1.CompactionGroup) {} func (m *mockPlanner) FullyCompacted() bool { return false } func (m *mockPlanner) ForceFull() {} func (m *mockPlanner) SetFileStore(fs *tsm1.FileStore) {} - -type seriesIterator struct { - keys [][]byte -} - -type series struct { - name []byte - tags models.Tags - deleted bool -} - -func (s series) Name() []byte { return s.name } -func (s series) Tags() models.Tags { return s.tags } -func (s series) Deleted() bool { return s.deleted } -func (s series) Expr() influxql.Expr { return nil } - -func (itr *seriesIterator) Close() error { return nil } - -func (itr *seriesIterator) Next() (tsdb.SeriesElem, error) { - if len(itr.keys) == 0 { - return nil, nil - } - name, tags := models.ParseKeyBytes(itr.keys[0]) - s := series{name: name, tags: tags} - itr.keys = itr.keys[1:] - return s, nil -} diff --git a/tsdb/tsm1/metrics.go b/tsdb/tsm1/metrics.go index 0d2299ffd6..7677f4aaf4 100644 --- a/tsdb/tsm1/metrics.go +++ b/tsdb/tsm1/metrics.go @@ -25,7 +25,6 @@ func PrometheusCollectors() []prometheus.Collector { collectors = append(collectors, bms.compactionMetrics.PrometheusCollectors()...) collectors = append(collectors, bms.fileMetrics.PrometheusCollectors()...) collectors = append(collectors, bms.cacheMetrics.PrometheusCollectors()...) - collectors = append(collectors, bms.walMetrics.PrometheusCollectors()...) } return collectors } @@ -36,7 +35,6 @@ const namespace = "storage" const compactionSubsystem = "compactions" // sub-system associated with metrics for compactions. const fileStoreSubsystem = "tsm_files" // sub-system associated with metrics for TSM files. const cacheSubsystem = "cache" // sub-system associated with metrics for the cache. -const walSubsystem = "wal" // sub-system associated with metrics for the WAL. // blockMetrics are a set of metrics concerned with tracking data about block storage. type blockMetrics struct { @@ -44,7 +42,6 @@ type blockMetrics struct { *compactionMetrics *fileMetrics *cacheMetrics - *walMetrics } // newBlockMetrics initialises the prometheus metrics for the block subsystem. @@ -54,7 +51,6 @@ func newBlockMetrics(labels prometheus.Labels) *blockMetrics { compactionMetrics: newCompactionMetrics(labels), fileMetrics: newFileMetrics(labels), cacheMetrics: newCacheMetrics(labels), - walMetrics: newWALMetrics(labels), } } @@ -64,7 +60,6 @@ func (m *blockMetrics) PrometheusCollectors() []prometheus.Collector { metrics = append(metrics, m.compactionMetrics.PrometheusCollectors()...) metrics = append(metrics, m.fileMetrics.PrometheusCollectors()...) metrics = append(metrics, m.cacheMetrics.PrometheusCollectors()...) - metrics = append(metrics, m.walMetrics.PrometheusCollectors()...) return metrics } @@ -249,60 +244,3 @@ func (m *cacheMetrics) PrometheusCollectors() []prometheus.Collector { m.Writes, } } - -// walMetrics are a set of metrics concerned with tracking data about compactions. -type walMetrics struct { - OldSegmentBytes *prometheus.GaugeVec - CurrentSegmentBytes *prometheus.GaugeVec - Segments *prometheus.GaugeVec - Writes *prometheus.CounterVec -} - -// newWALMetrics initialises the prometheus metrics for tracking the WAL. -func newWALMetrics(labels prometheus.Labels) *walMetrics { - var names []string - for k := range labels { - names = append(names, k) - } - sort.Strings(names) - - writeNames := append(append([]string(nil), names...), "status") - sort.Strings(writeNames) - - return &walMetrics{ - OldSegmentBytes: prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: namespace, - Subsystem: walSubsystem, - Name: "old_segment_bytes", - Help: "Number of bytes old WAL segments using on disk.", - }, names), - CurrentSegmentBytes: prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: namespace, - Subsystem: walSubsystem, - Name: "current_segment_bytes", - Help: "Number of bytes TSM files using on disk.", - }, names), - Segments: prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: namespace, - Subsystem: walSubsystem, - Name: "segments_total", - Help: "Number of WAL segment files on disk.", - }, names), - Writes: prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: namespace, - Subsystem: walSubsystem, - Name: "writes_total", - Help: "Number of writes to the WAL.", - }, writeNames), - } -} - -// PrometheusCollectors satisfies the prom.PrometheusCollector interface. -func (m *walMetrics) PrometheusCollectors() []prometheus.Collector { - return []prometheus.Collector{ - m.OldSegmentBytes, - m.CurrentSegmentBytes, - m.Segments, - m.Writes, - } -} diff --git a/tsdb/tsm1/metrics_test.go b/tsdb/tsm1/metrics_test.go index 4b8dfb2a64..3372adf3f0 100644 --- a/tsdb/tsm1/metrics_test.go +++ b/tsdb/tsm1/metrics_test.go @@ -130,73 +130,6 @@ func TestMetrics_Cache(t *testing.T) { } } -func TestMetrics_WAL(t *testing.T) { - // metrics to be shared by multiple file stores. - metrics := newWALMetrics(prometheus.Labels{"engine_id": "", "node_id": ""}) - - t1 := newWALTracker(metrics, prometheus.Labels{"engine_id": "0", "node_id": "0"}) - t2 := newWALTracker(metrics, prometheus.Labels{"engine_id": "1", "node_id": "0"}) - - reg := prometheus.NewRegistry() - reg.MustRegister(metrics.PrometheusCollectors()...) - - base := namespace + "_" + walSubsystem + "_" - - // All the metric names - gauges := []string{ - base + "old_segment_bytes", - base + "current_segment_bytes", - base + "segments_total", - } - - counters := []string{ - base + "writes_total", - } - - // Generate some measurements. - for i, tracker := range []*walTracker{t1, t2} { - tracker.SetOldSegmentSize(uint64(i + len(gauges[0]))) - tracker.SetCurrentSegmentSize(uint64(i + len(gauges[1]))) - tracker.SetSegments(uint64(i + len(gauges[2]))) - - labels := tracker.Labels() - labels["status"] = "ok" - tracker.metrics.Writes.With(labels).Add(float64(i + len(counters[0]))) - } - - // Test that all the correct metrics are present. - mfs, err := reg.Gather() - if err != nil { - t.Fatal(err) - } - - // The label variants for the two caches. - labelVariants := []prometheus.Labels{ - prometheus.Labels{"engine_id": "0", "node_id": "0"}, - prometheus.Labels{"engine_id": "1", "node_id": "0"}, - } - - for i, labels := range labelVariants { - for _, name := range gauges { - exp := float64(i + len(name)) - metric := promtest.MustFindMetric(t, mfs, name, labels) - if got := metric.GetGauge().GetValue(); got != exp { - t.Errorf("[%s %d] got %v, expected %v", name, i, got, exp) - } - } - - for _, name := range counters { - exp := float64(i + len(name)) - - labels["status"] = "ok" - metric := promtest.MustFindMetric(t, mfs, name, labels) - if got := metric.GetCounter().GetValue(); got != exp { - t.Errorf("[%s %d] got %v, expected %v", name, i, got, exp) - } - } - } -} - func TestMetrics_Compactions(t *testing.T) { // metrics to be shared by multiple file stores. metrics := newCompactionMetrics(prometheus.Labels{"engine_id": "", "node_id": ""}) diff --git a/tsdb/tsm1/value.go b/tsdb/tsm1/value.go new file mode 100644 index 0000000000..652dcdfa1d --- /dev/null +++ b/tsdb/tsm1/value.go @@ -0,0 +1,123 @@ +package tsm1 + +import ( + "fmt" + "time" + + "github.com/influxdata/influxdb/models" + "github.com/influxdata/influxdb/tsdb/value" +) + +type ( + Value = value.Value + IntegerValue = value.IntegerValue + UnsignedValue = value.UnsignedValue + FloatValue = value.FloatValue + BooleanValue = value.BooleanValue + StringValue = value.StringValue +) + +// NewValue returns a new Value with the underlying type dependent on value. +func NewValue(t int64, v interface{}) Value { return value.NewValue(t, v) } + +// NewRawIntegerValue returns a new integer value. +func NewRawIntegerValue(t int64, v int64) IntegerValue { return value.NewRawIntegerValue(t, v) } + +// NewRawUnsignedValue returns a new unsigned integer value. +func NewRawUnsignedValue(t int64, v uint64) UnsignedValue { return value.NewRawUnsignedValue(t, v) } + +// NewRawFloatValue returns a new float value. +func NewRawFloatValue(t int64, v float64) FloatValue { return value.NewRawFloatValue(t, v) } + +// NewRawBooleanValue returns a new boolean value. +func NewRawBooleanValue(t int64, v bool) BooleanValue { return value.NewRawBooleanValue(t, v) } + +// NewRawStringValue returns a new string value. +func NewRawStringValue(t int64, v string) StringValue { return value.NewRawStringValue(t, v) } + +// NewIntegerValue returns a new integer value. +func NewIntegerValue(t int64, v int64) Value { return value.NewIntegerValue(t, v) } + +// NewUnsignedValue returns a new unsigned integer value. +func NewUnsignedValue(t int64, v uint64) Value { return value.NewUnsignedValue(t, v) } + +// NewFloatValue returns a new float value. +func NewFloatValue(t int64, v float64) Value { return value.NewFloatValue(t, v) } + +// NewBooleanValue returns a new boolean value. +func NewBooleanValue(t int64, v bool) Value { return value.NewBooleanValue(t, v) } + +// NewStringValue returns a new string value. +func NewStringValue(t int64, v string) Value { return value.NewStringValue(t, v) } + +// PointsToValues takes in a slice of points and returns it as a map of series key to +// values. It returns an error if any of the points could not be converted. +func PointsToValues(points []models.Point) (map[string][]Value, error) { + values := make(map[string][]Value, len(points)) + var ( + keyBuf []byte + baseLen int + ) + + for _, p := range points { + keyBuf = append(keyBuf[:0], p.Key()...) + keyBuf = append(keyBuf, keyFieldSeparator...) + baseLen = len(keyBuf) + iter := p.FieldIterator() + t := p.Time().UnixNano() + for iter.Next() { + keyBuf = append(keyBuf[:baseLen], iter.FieldKey()...) + + var v Value + switch iter.Type() { + case models.Float: + fv, err := iter.FloatValue() + if err != nil { + return nil, err + } + v = NewFloatValue(t, fv) + case models.Integer: + iv, err := iter.IntegerValue() + if err != nil { + return nil, err + } + v = NewIntegerValue(t, iv) + case models.Unsigned: + iv, err := iter.UnsignedValue() + if err != nil { + return nil, err + } + v = NewUnsignedValue(t, iv) + case models.String: + v = NewStringValue(t, iter.StringValue()) + case models.Boolean: + bv, err := iter.BooleanValue() + if err != nil { + return nil, err + } + v = NewBooleanValue(t, bv) + default: + return nil, fmt.Errorf("unknown field type for %s: %s", + string(iter.FieldKey()), p.String()) + } + values[string(keyBuf)] = append(values[string(keyBuf)], v) + } + } + + return values, nil +} + +// ValuesToPoints takes in a map of values and returns a slice of models.Point. +func ValuesToPoints(values map[string][]Value) []models.Point { + points := make([]models.Point, 0, len(values)) + for composite, vals := range values { + series, field := SeriesAndFieldFromCompositeKey([]byte(composite)) + strField := string(field) + for _, val := range vals { + t := time.Unix(0, val.UnixNano()) + fields := models.Fields{strField: val.Value()} + points = append(points, models.NewPointFromSeries(series, fields, t)) + } + } + return points +} diff --git a/tsdb/tsm1/wal_test.go b/tsdb/tsm1/wal_test.go deleted file mode 100644 index de78110fd7..0000000000 --- a/tsdb/tsm1/wal_test.go +++ /dev/null @@ -1,864 +0,0 @@ -package tsm1_test - -import ( - "fmt" - "io" - "os" - "reflect" - "testing" - - "github.com/golang/snappy" - "github.com/influxdata/influxdb/pkg/slices" - "github.com/influxdata/influxdb/tsdb/tsm1" -) - -func TestWALWriter_WriteMulti_Single(t *testing.T) { - dir := MustTempDir() - defer os.RemoveAll(dir) - f := MustTempFile(dir) - w := tsm1.NewWALSegmentWriter(f) - - p1 := tsm1.NewValue(1, 1.1) - p2 := tsm1.NewValue(1, int64(1)) - p3 := tsm1.NewValue(1, true) - p4 := tsm1.NewValue(1, "string") - p5 := tsm1.NewValue(1, ^uint64(0)) - - values := map[string][]tsm1.Value{ - "cpu,host=A#!~#float": []tsm1.Value{p1}, - "cpu,host=A#!~#int": []tsm1.Value{p2}, - "cpu,host=A#!~#bool": []tsm1.Value{p3}, - "cpu,host=A#!~#string": []tsm1.Value{p4}, - "cpu,host=A#!~#unsigned": []tsm1.Value{p5}, - } - - entry := &tsm1.WriteWALEntry{ - Values: values, - } - - if err := w.Write(mustMarshalEntry(entry)); err != nil { - fatal(t, "write points", err) - } - - if err := w.Flush(); err != nil { - fatal(t, "flush", err) - } - - if _, err := f.Seek(0, io.SeekStart); err != nil { - fatal(t, "seek", err) - } - - r := tsm1.NewWALSegmentReader(f) - - if !r.Next() { - t.Fatalf("expected next, got false") - } - - we, err := r.Read() - if err != nil { - fatal(t, "read entry", err) - } - - e, ok := we.(*tsm1.WriteWALEntry) - if !ok { - t.Fatalf("expected WriteWALEntry: got %#v", e) - } - - for k, v := range e.Values { - for i, vv := range v { - if got, exp := vv.String(), values[k][i].String(); got != exp { - t.Fatalf("points mismatch: got %v, exp %v", got, exp) - } - } - } - - if n := r.Count(); n != MustReadFileSize(f) { - t.Fatalf("wrong count of bytes read, got %d, exp %d", n, MustReadFileSize(f)) - } -} - -func TestWALWriter_WriteMulti_LargeBatch(t *testing.T) { - dir := MustTempDir() - defer os.RemoveAll(dir) - f := MustTempFile(dir) - w := tsm1.NewWALSegmentWriter(f) - - var points []tsm1.Value - for i := 0; i < 100000; i++ { - points = append(points, tsm1.NewValue(int64(i), int64(1))) - } - - values := map[string][]tsm1.Value{ - "cpu,host=A,server=01,foo=bar,tag=really-long#!~#float": points, - "mem,host=A,server=01,foo=bar,tag=really-long#!~#float": points, - } - - entry := &tsm1.WriteWALEntry{ - Values: values, - } - - if err := w.Write(mustMarshalEntry(entry)); err != nil { - fatal(t, "write points", err) - } - - if err := w.Flush(); err != nil { - fatal(t, "flush", err) - } - - if _, err := f.Seek(0, io.SeekStart); err != nil { - fatal(t, "seek", err) - } - - r := tsm1.NewWALSegmentReader(f) - - if !r.Next() { - t.Fatalf("expected next, got false") - } - - we, err := r.Read() - if err != nil { - fatal(t, "read entry", err) - } - - e, ok := we.(*tsm1.WriteWALEntry) - if !ok { - t.Fatalf("expected WriteWALEntry: got %#v", e) - } - - for k, v := range e.Values { - for i, vv := range v { - if got, exp := vv.String(), values[k][i].String(); got != exp { - t.Fatalf("points mismatch: got %v, exp %v", got, exp) - } - } - } - - if n := r.Count(); n != MustReadFileSize(f) { - t.Fatalf("wrong count of bytes read, got %d, exp %d", n, MustReadFileSize(f)) - } -} -func TestWALWriter_WriteMulti_Multiple(t *testing.T) { - dir := MustTempDir() - defer os.RemoveAll(dir) - f := MustTempFile(dir) - w := tsm1.NewWALSegmentWriter(f) - - p1 := tsm1.NewValue(1, int64(1)) - p2 := tsm1.NewValue(1, int64(2)) - - exp := []struct { - key string - values []tsm1.Value - }{ - {"cpu,host=A#!~#value", []tsm1.Value{p1}}, - {"cpu,host=B#!~#value", []tsm1.Value{p2}}, - } - - for _, v := range exp { - entry := &tsm1.WriteWALEntry{ - Values: map[string][]tsm1.Value{v.key: v.values}, - } - - if err := w.Write(mustMarshalEntry(entry)); err != nil { - fatal(t, "write points", err) - } - if err := w.Flush(); err != nil { - fatal(t, "flush", err) - } - } - - // Seek back to the beinning of the file for reading - if _, err := f.Seek(0, io.SeekStart); err != nil { - fatal(t, "seek", err) - } - - r := tsm1.NewWALSegmentReader(f) - - for _, ep := range exp { - if !r.Next() { - t.Fatalf("expected next, got false") - } - - we, err := r.Read() - if err != nil { - fatal(t, "read entry", err) - } - - e, ok := we.(*tsm1.WriteWALEntry) - if !ok { - t.Fatalf("expected WriteWALEntry: got %#v", e) - } - - for k, v := range e.Values { - if got, exp := k, ep.key; got != exp { - t.Fatalf("key mismatch. got %v, exp %v", got, exp) - } - - if got, exp := len(v), len(ep.values); got != exp { - t.Fatalf("values length mismatch: got %v, exp %v", got, exp) - } - - for i, vv := range v { - if got, exp := vv.String(), ep.values[i].String(); got != exp { - t.Fatalf("points mismatch: got %v, exp %v", got, exp) - } - } - } - } - - if n := r.Count(); n != MustReadFileSize(f) { - t.Fatalf("wrong count of bytes read, got %d, exp %d", n, MustReadFileSize(f)) - } -} - -func TestWALWriter_WriteDelete_Single(t *testing.T) { - dir := MustTempDir() - defer os.RemoveAll(dir) - f := MustTempFile(dir) - w := tsm1.NewWALSegmentWriter(f) - - entry := &tsm1.DeleteWALEntry{ - Keys: [][]byte{[]byte("cpu")}, - } - - if err := w.Write(mustMarshalEntry(entry)); err != nil { - fatal(t, "write points", err) - } - - if err := w.Flush(); err != nil { - fatal(t, "flush", err) - } - - if _, err := f.Seek(0, io.SeekStart); err != nil { - fatal(t, "seek", err) - } - - r := tsm1.NewWALSegmentReader(f) - - if !r.Next() { - t.Fatalf("expected next, got false") - } - - we, err := r.Read() - if err != nil { - fatal(t, "read entry", err) - } - - e, ok := we.(*tsm1.DeleteWALEntry) - if !ok { - t.Fatalf("expected WriteWALEntry: got %#v", e) - } - - if got, exp := len(e.Keys), len(entry.Keys); got != exp { - t.Fatalf("key length mismatch: got %v, exp %v", got, exp) - } - - if got, exp := string(e.Keys[0]), string(entry.Keys[0]); got != exp { - t.Fatalf("key mismatch: got %v, exp %v", got, exp) - } -} - -func TestWALWriter_WriteMultiDelete_Multiple(t *testing.T) { - dir := MustTempDir() - defer os.RemoveAll(dir) - f := MustTempFile(dir) - w := tsm1.NewWALSegmentWriter(f) - - p1 := tsm1.NewValue(1, true) - values := map[string][]tsm1.Value{ - "cpu,host=A#!~#value": []tsm1.Value{p1}, - } - - writeEntry := &tsm1.WriteWALEntry{ - Values: values, - } - - if err := w.Write(mustMarshalEntry(writeEntry)); err != nil { - fatal(t, "write points", err) - } - - if err := w.Flush(); err != nil { - fatal(t, "flush", err) - } - - // Write the delete entry - deleteEntry := &tsm1.DeleteWALEntry{ - Keys: [][]byte{[]byte("cpu,host=A#!~value")}, - } - - if err := w.Write(mustMarshalEntry(deleteEntry)); err != nil { - fatal(t, "write points", err) - } - - if err := w.Flush(); err != nil { - fatal(t, "flush", err) - } - - // Seek back to the beinning of the file for reading - if _, err := f.Seek(0, io.SeekStart); err != nil { - fatal(t, "seek", err) - } - - r := tsm1.NewWALSegmentReader(f) - - // Read the write points first - if !r.Next() { - t.Fatalf("expected next, got false") - } - - we, err := r.Read() - if err != nil { - fatal(t, "read entry", err) - } - - e, ok := we.(*tsm1.WriteWALEntry) - if !ok { - t.Fatalf("expected WriteWALEntry: got %#v", e) - } - - for k, v := range e.Values { - if got, exp := len(v), len(values[k]); got != exp { - t.Fatalf("values length mismatch: got %v, exp %v", got, exp) - } - - for i, vv := range v { - if got, exp := vv.String(), values[k][i].String(); got != exp { - t.Fatalf("points mismatch: got %v, exp %v", got, exp) - } - } - } - - // Read the delete second - if !r.Next() { - t.Fatalf("expected next, got false") - } - - we, err = r.Read() - if err != nil { - fatal(t, "read entry", err) - } - - de, ok := we.(*tsm1.DeleteWALEntry) - if !ok { - t.Fatalf("expected DeleteWALEntry: got %#v", e) - } - - if got, exp := len(de.Keys), len(deleteEntry.Keys); got != exp { - t.Fatalf("key length mismatch: got %v, exp %v", got, exp) - } - - if got, exp := string(de.Keys[0]), string(deleteEntry.Keys[0]); got != exp { - t.Fatalf("key mismatch: got %v, exp %v", got, exp) - } -} - -func TestWALWriter_WriteMultiDeleteRange_Multiple(t *testing.T) { - dir := MustTempDir() - defer os.RemoveAll(dir) - f := MustTempFile(dir) - w := tsm1.NewWALSegmentWriter(f) - - p1 := tsm1.NewValue(1, 1.0) - p2 := tsm1.NewValue(2, 2.0) - p3 := tsm1.NewValue(3, 3.0) - - values := map[string][]tsm1.Value{ - "cpu,host=A#!~#value": []tsm1.Value{p1, p2, p3}, - } - - writeEntry := &tsm1.WriteWALEntry{ - Values: values, - } - - if err := w.Write(mustMarshalEntry(writeEntry)); err != nil { - fatal(t, "write points", err) - } - - if err := w.Flush(); err != nil { - fatal(t, "flush", err) - } - - // Write the delete entry - deleteEntry := &tsm1.DeleteRangeWALEntry{ - Keys: [][]byte{[]byte("cpu,host=A#!~value")}, - Min: 2, - Max: 3, - } - - if err := w.Write(mustMarshalEntry(deleteEntry)); err != nil { - fatal(t, "write points", err) - } - - if err := w.Flush(); err != nil { - fatal(t, "flush", err) - } - - // Seek back to the beinning of the file for reading - if _, err := f.Seek(0, io.SeekStart); err != nil { - fatal(t, "seek", err) - } - - r := tsm1.NewWALSegmentReader(f) - - // Read the write points first - if !r.Next() { - t.Fatalf("expected next, got false") - } - - we, err := r.Read() - if err != nil { - fatal(t, "read entry", err) - } - - e, ok := we.(*tsm1.WriteWALEntry) - if !ok { - t.Fatalf("expected WriteWALEntry: got %#v", e) - } - - for k, v := range e.Values { - if got, exp := len(v), len(values[k]); got != exp { - t.Fatalf("values length mismatch: got %v, exp %v", got, exp) - } - - for i, vv := range v { - if got, exp := vv.String(), values[k][i].String(); got != exp { - t.Fatalf("points mismatch: got %v, exp %v", got, exp) - } - } - } - - // Read the delete second - if !r.Next() { - t.Fatalf("expected next, got false") - } - - we, err = r.Read() - if err != nil { - fatal(t, "read entry", err) - } - - de, ok := we.(*tsm1.DeleteRangeWALEntry) - if !ok { - t.Fatalf("expected DeleteWALEntry: got %#v", e) - } - - if got, exp := len(de.Keys), len(deleteEntry.Keys); got != exp { - t.Fatalf("key length mismatch: got %v, exp %v", got, exp) - } - - if got, exp := string(de.Keys[0]), string(deleteEntry.Keys[0]); got != exp { - t.Fatalf("key mismatch: got %v, exp %v", got, exp) - } - - if got, exp := de.Min, int64(2); got != exp { - t.Fatalf("min time mismatch: got %v, exp %v", got, exp) - } - - if got, exp := de.Max, int64(3); got != exp { - t.Fatalf("min time mismatch: got %v, exp %v", got, exp) - } - -} - -func TestWAL_ClosedSegments(t *testing.T) { - dir := MustTempDir() - defer os.RemoveAll(dir) - - w := tsm1.NewWAL(dir) - if err := w.Open(); err != nil { - t.Fatalf("error opening WAL: %v", err) - } - - files, err := w.ClosedSegments() - if err != nil { - t.Fatalf("error getting closed segments: %v", err) - } - - if got, exp := len(files), 0; got != exp { - t.Fatalf("close segment length mismatch: got %v, exp %v", got, exp) - } - - if _, err := w.WriteMulti(map[string][]tsm1.Value{ - "cpu,host=A#!~#value": []tsm1.Value{ - tsm1.NewValue(1, 1.1), - }, - }); err != nil { - t.Fatalf("error writing points: %v", err) - } - - if err := w.Close(); err != nil { - t.Fatalf("error closing wal: %v", err) - } - - // Re-open the WAL - w = tsm1.NewWAL(dir) - defer w.Close() - if err := w.Open(); err != nil { - t.Fatalf("error opening WAL: %v", err) - } - - files, err = w.ClosedSegments() - if err != nil { - t.Fatalf("error getting closed segments: %v", err) - } - if got, exp := len(files), 0; got != exp { - t.Fatalf("close segment length mismatch: got %v, exp %v", got, exp) - } -} - -func TestWAL_Delete(t *testing.T) { - dir := MustTempDir() - defer os.RemoveAll(dir) - - w := tsm1.NewWAL(dir) - if err := w.Open(); err != nil { - t.Fatalf("error opening WAL: %v", err) - } - - files, err := w.ClosedSegments() - if err != nil { - t.Fatalf("error getting closed segments: %v", err) - } - - if got, exp := len(files), 0; got != exp { - t.Fatalf("close segment length mismatch: got %v, exp %v", got, exp) - } - - if _, err := w.Delete([][]byte{[]byte("cpu")}); err != nil { - t.Fatalf("error writing points: %v", err) - } - - if err := w.Close(); err != nil { - t.Fatalf("error closing wal: %v", err) - } - - // Re-open the WAL - w = tsm1.NewWAL(dir) - defer w.Close() - if err := w.Open(); err != nil { - t.Fatalf("error opening WAL: %v", err) - } - - files, err = w.ClosedSegments() - if err != nil { - t.Fatalf("error getting closed segments: %v", err) - } - if got, exp := len(files), 0; got != exp { - t.Fatalf("close segment length mismatch: got %v, exp %v", got, exp) - } -} - -func TestWALWriter_Corrupt(t *testing.T) { - dir := MustTempDir() - defer os.RemoveAll(dir) - f := MustTempFile(dir) - w := tsm1.NewWALSegmentWriter(f) - corruption := []byte{1, 4, 0, 0, 0} - - p1 := tsm1.NewValue(1, 1.1) - values := map[string][]tsm1.Value{ - "cpu,host=A#!~#float": []tsm1.Value{p1}, - } - - entry := &tsm1.WriteWALEntry{ - Values: values, - } - if err := w.Write(mustMarshalEntry(entry)); err != nil { - fatal(t, "write points", err) - } - - if err := w.Flush(); err != nil { - fatal(t, "flush", err) - } - - // Write some random bytes to the file to simulate corruption. - if _, err := f.Write(corruption); err != nil { - fatal(t, "corrupt WAL segment", err) - } - - // Create the WAL segment reader. - if _, err := f.Seek(0, io.SeekStart); err != nil { - fatal(t, "seek", err) - } - r := tsm1.NewWALSegmentReader(f) - - // Try to decode two entries. - - if !r.Next() { - t.Fatalf("expected next, got false") - } - if _, err := r.Read(); err != nil { - fatal(t, "read entry", err) - } - - if !r.Next() { - t.Fatalf("expected next, got false") - } - if _, err := r.Read(); err == nil { - fatal(t, "read entry did not return err", nil) - } - - // Count should only return size of valid data. - expCount := MustReadFileSize(f) - int64(len(corruption)) - if n := r.Count(); n != expCount { - t.Fatalf("wrong count of bytes read, got %d, exp %d", n, expCount) - } -} - -// Reproduces a `panic: runtime error: makeslice: cap out of range` when run with -// GOARCH=386 go test -run TestWALSegmentReader_Corrupt -v ./tsdb/engine/tsm1/ -func TestWALSegmentReader_Corrupt(t *testing.T) { - dir := MustTempDir() - defer os.RemoveAll(dir) - f := MustTempFile(dir) - w := tsm1.NewWALSegmentWriter(f) - - p4 := tsm1.NewValue(1, "string") - - values := map[string][]tsm1.Value{ - "cpu,host=A#!~#string": []tsm1.Value{p4, p4}, - } - - entry := &tsm1.WriteWALEntry{ - Values: values, - } - - typ, b := mustMarshalEntry(entry) - - // This causes the nvals field to overflow on 32 bit systems which produces a - // negative count and a panic when reading the segment. - b[25] = 255 - - if err := w.Write(typ, b); err != nil { - fatal(t, "write points", err) - } - - if err := w.Flush(); err != nil { - fatal(t, "flush", err) - } - - // Create the WAL segment reader. - if _, err := f.Seek(0, io.SeekStart); err != nil { - fatal(t, "seek", err) - } - - r := tsm1.NewWALSegmentReader(f) - defer r.Close() - - // Try to decode two entries. - for r.Next() { - r.Read() - } -} - -func TestWriteWALSegment_UnmarshalBinary_WriteWALCorrupt(t *testing.T) { - p1 := tsm1.NewValue(1, 1.1) - p2 := tsm1.NewValue(1, int64(1)) - p3 := tsm1.NewValue(1, true) - p4 := tsm1.NewValue(1, "string") - p5 := tsm1.NewValue(1, uint64(1)) - - values := map[string][]tsm1.Value{ - "cpu,host=A#!~#float": []tsm1.Value{p1, p1}, - "cpu,host=A#!~#int": []tsm1.Value{p2, p2}, - "cpu,host=A#!~#bool": []tsm1.Value{p3, p3}, - "cpu,host=A#!~#string": []tsm1.Value{p4, p4}, - "cpu,host=A#!~#unsigned": []tsm1.Value{p5, p5}, - } - - w := &tsm1.WriteWALEntry{ - Values: values, - } - - b, err := w.MarshalBinary() - if err != nil { - t.Fatalf("unexpected error, got %v", err) - } - - // Test every possible truncation of a write WAL entry - for i := 0; i < len(b); i++ { - // re-allocated to ensure capacity would be exceed if slicing - truncated := make([]byte, i) - copy(truncated, b[:i]) - err := w.UnmarshalBinary(truncated) - if err != nil && err != tsm1.ErrWALCorrupt { - t.Fatalf("unexpected error: %v", err) - } - } -} - -func TestDeleteWALEntry_UnmarshalBinary(t *testing.T) { - examples := []struct { - In []string - Out [][]byte - }{ - { - In: []string{""}, - Out: nil, - }, - { - In: []string{"foo"}, - Out: [][]byte{[]byte("foo")}, - }, - { - In: []string{"foo", "bar"}, - Out: [][]byte{[]byte("foo"), []byte("bar")}, - }, - { - In: []string{"foo", "bar", "z", "abc"}, - Out: [][]byte{[]byte("foo"), []byte("bar"), []byte("z"), []byte("abc")}, - }, - { - In: []string{"foo", "bar", "z", "a"}, - Out: [][]byte{[]byte("foo"), []byte("bar"), []byte("z"), []byte("a")}, - }, - } - - for i, example := range examples { - w := &tsm1.DeleteWALEntry{Keys: slices.StringsToBytes(example.In...)} - b, err := w.MarshalBinary() - if err != nil { - t.Fatalf("[example %d] unexpected error, got %v", i, err) - } - - out := &tsm1.DeleteWALEntry{} - if err := out.UnmarshalBinary(b); err != nil { - t.Fatalf("[example %d] %v", i, err) - } - - if !reflect.DeepEqual(example.Out, out.Keys) { - t.Errorf("[example %d] got %v, expected %v", i, out.Keys, example.Out) - } - } -} - -func TestWriteWALSegment_UnmarshalBinary_DeleteWALCorrupt(t *testing.T) { - w := &tsm1.DeleteWALEntry{ - Keys: [][]byte{[]byte("foo"), []byte("bar")}, - } - - b, err := w.MarshalBinary() - if err != nil { - t.Fatalf("unexpected error, got %v", err) - } - - // Test every possible truncation of a write WAL entry - for i := 0; i < len(b); i++ { - // re-allocated to ensure capacity would be exceed if slicing - truncated := make([]byte, i) - copy(truncated, b[:i]) - err := w.UnmarshalBinary(truncated) - if err != nil && err != tsm1.ErrWALCorrupt { - t.Fatalf("unexpected error: %v", err) - } - } -} - -func TestWriteWALSegment_UnmarshalBinary_DeleteRangeWALCorrupt(t *testing.T) { - w := &tsm1.DeleteRangeWALEntry{ - Keys: [][]byte{[]byte("foo"), []byte("bar")}, - Min: 1, - Max: 2, - } - - b, err := w.MarshalBinary() - if err != nil { - t.Fatalf("unexpected error, got %v", err) - } - - // Test every possible truncation of a write WAL entry - for i := 0; i < len(b); i++ { - // re-allocated to ensure capacity would be exceed if slicing - truncated := make([]byte, i) - copy(truncated, b[:i]) - err := w.UnmarshalBinary(truncated) - if err != nil && err != tsm1.ErrWALCorrupt { - t.Fatalf("unexpected error: %v", err) - } - } -} - -func BenchmarkWALSegmentWriter(b *testing.B) { - points := map[string][]tsm1.Value{} - for i := 0; i < 5000; i++ { - k := "cpu,host=A#!~#value" - points[k] = append(points[k], tsm1.NewValue(int64(i), 1.1)) - } - - dir := MustTempDir() - defer os.RemoveAll(dir) - - f := MustTempFile(dir) - w := tsm1.NewWALSegmentWriter(f) - - write := &tsm1.WriteWALEntry{ - Values: points, - } - - b.ResetTimer() - for i := 0; i < b.N; i++ { - if err := w.Write(mustMarshalEntry(write)); err != nil { - b.Fatalf("unexpected error writing entry: %v", err) - } - } -} - -func BenchmarkWALSegmentReader(b *testing.B) { - points := map[string][]tsm1.Value{} - for i := 0; i < 5000; i++ { - k := "cpu,host=A#!~#value" - points[k] = append(points[k], tsm1.NewValue(int64(i), 1.1)) - } - - dir := MustTempDir() - defer os.RemoveAll(dir) - - f := MustTempFile(dir) - w := tsm1.NewWALSegmentWriter(f) - - write := &tsm1.WriteWALEntry{ - Values: points, - } - - for i := 0; i < 100; i++ { - if err := w.Write(mustMarshalEntry(write)); err != nil { - b.Fatalf("unexpected error writing entry: %v", err) - } - } - - r := tsm1.NewWALSegmentReader(f) - b.ResetTimer() - - for i := 0; i < b.N; i++ { - b.StopTimer() - f.Seek(0, io.SeekStart) - b.StartTimer() - - for r.Next() { - _, err := r.Read() - if err != nil { - b.Fatalf("unexpected error reading entry: %v", err) - } - } - } -} - -// MustReadFileSize returns the size of the file, or panics. -func MustReadFileSize(f *os.File) int64 { - stat, err := os.Stat(f.Name()) - if err != nil { - panic(fmt.Sprintf("failed to get size of file at %s: %s", f.Name(), err.Error())) - } - return stat.Size() -} - -func mustMarshalEntry(entry tsm1.WALEntry) (tsm1.WalEntryType, []byte) { - bytes := make([]byte, 1024<<2) - - b, err := entry.Encode(bytes) - if err != nil { - panic(fmt.Sprintf("error encoding: %v", err)) - } - - return entry.Type(), snappy.Encode(b, b) -} diff --git a/tsdb/value/value.go b/tsdb/value/value.go new file mode 100644 index 0000000000..41320278c6 --- /dev/null +++ b/tsdb/value/value.go @@ -0,0 +1,236 @@ +package value + +import ( + "fmt" + "time" + + "github.com/influxdata/influxdb/tsdb" +) + +// Value represents a TSM-encoded value. +type Value interface { + // UnixNano returns the timestamp of the value in nanoseconds since unix epoch. + UnixNano() int64 + + // Value returns the underlying value. + Value() interface{} + + // Size returns the number of bytes necessary to represent the value and its timestamp. + Size() int + + // String returns the string representation of the value and its timestamp. + String() string + + // internalOnly is unexported to ensure implementations of Value + // can only originate in this package. + internalOnly() +} + +// NewValue returns a new Value with the underlying type dependent on value. +func NewValue(t int64, value interface{}) Value { + switch v := value.(type) { + case int64: + return IntegerValue{unixnano: t, value: v} + case uint64: + return UnsignedValue{unixnano: t, value: v} + case float64: + return FloatValue{unixnano: t, value: v} + case bool: + return BooleanValue{unixnano: t, value: v} + case string: + return StringValue{unixnano: t, value: v} + } + return EmptyValue{} +} + +// NewRawIntegerValue returns a new integer value. +func NewRawIntegerValue(t int64, v int64) IntegerValue { return IntegerValue{unixnano: t, value: v} } + +// NewRawUnsignedValue returns a new unsigned integer value. +func NewRawUnsignedValue(t int64, v uint64) UnsignedValue { return UnsignedValue{unixnano: t, value: v} } + +// NewRawFloatValue returns a new float value. +func NewRawFloatValue(t int64, v float64) FloatValue { return FloatValue{unixnano: t, value: v} } + +// NewRawBooleanValue returns a new boolean value. +func NewRawBooleanValue(t int64, v bool) BooleanValue { return BooleanValue{unixnano: t, value: v} } + +// NewRawStringValue returns a new string value. +func NewRawStringValue(t int64, v string) StringValue { return StringValue{unixnano: t, value: v} } + +// NewIntegerValue returns a new integer value. +func NewIntegerValue(t int64, v int64) Value { return NewRawIntegerValue(t, v) } + +// NewUnsignedValue returns a new unsigned integer value. +func NewUnsignedValue(t int64, v uint64) Value { return NewRawUnsignedValue(t, v) } + +// NewFloatValue returns a new float value. +func NewFloatValue(t int64, v float64) Value { return NewRawFloatValue(t, v) } + +// NewBooleanValue returns a new boolean value. +func NewBooleanValue(t int64, v bool) Value { return NewRawBooleanValue(t, v) } + +// NewStringValue returns a new string value. +func NewStringValue(t int64, v string) Value { return NewRawStringValue(t, v) } + +// EmptyValue is used when there is no appropriate other value. +type EmptyValue struct{} + +// UnixNano returns tsdb.EOF. +func (e EmptyValue) UnixNano() int64 { return tsdb.EOF } + +// Value returns nil. +func (e EmptyValue) Value() interface{} { return nil } + +// Size returns 0. +func (e EmptyValue) Size() int { return 0 } + +// String returns the empty string. +func (e EmptyValue) String() string { return "" } + +func (EmptyValue) internalOnly() {} +func (StringValue) internalOnly() {} +func (IntegerValue) internalOnly() {} +func (UnsignedValue) internalOnly() {} +func (BooleanValue) internalOnly() {} +func (FloatValue) internalOnly() {} + +// IntegerValue represents an int64 value. +type IntegerValue struct { + unixnano int64 + value int64 +} + +// Value returns the underlying int64 value. +func (v IntegerValue) Value() interface{} { + return v.value +} + +// UnixNano returns the timestamp of the value. +func (v IntegerValue) UnixNano() int64 { + return v.unixnano +} + +// Size returns the number of bytes necessary to represent the value and its timestamp. +func (v IntegerValue) Size() int { + return 16 +} + +// String returns the string representation of the value and its timestamp. +func (v IntegerValue) String() string { + return fmt.Sprintf("%v %v", time.Unix(0, v.unixnano), v.Value()) +} + +func (v IntegerValue) RawValue() int64 { return v.value } + +// UnsignedValue represents an int64 value. +type UnsignedValue struct { + unixnano int64 + value uint64 +} + +// Value returns the underlying int64 value. +func (v UnsignedValue) Value() interface{} { + return v.value +} + +// UnixNano returns the timestamp of the value. +func (v UnsignedValue) UnixNano() int64 { + return v.unixnano +} + +// Size returns the number of bytes necessary to represent the value and its timestamp. +func (v UnsignedValue) Size() int { + return 16 +} + +// String returns the string representation of the value and its timestamp. +func (v UnsignedValue) String() string { + return fmt.Sprintf("%v %v", time.Unix(0, v.unixnano), v.Value()) +} + +func (v UnsignedValue) RawValue() uint64 { return v.value } + +// FloatValue represents a float64 value. +type FloatValue struct { + unixnano int64 + value float64 +} + +// UnixNano returns the timestamp of the value. +func (v FloatValue) UnixNano() int64 { + return v.unixnano +} + +// Value returns the underlying float64 value. +func (v FloatValue) Value() interface{} { + return v.value +} + +// Size returns the number of bytes necessary to represent the value and its timestamp. +func (v FloatValue) Size() int { + return 16 +} + +// String returns the string representation of the value and its timestamp. +func (v FloatValue) String() string { + return fmt.Sprintf("%v %v", time.Unix(0, v.unixnano), v.value) +} + +func (v FloatValue) RawValue() float64 { return v.value } + +// BooleanValue represents a boolean value. +type BooleanValue struct { + unixnano int64 + value bool +} + +// Size returns the number of bytes necessary to represent the value and its timestamp. +func (v BooleanValue) Size() int { + return 9 +} + +// UnixNano returns the timestamp of the value in nanoseconds since unix epoch. +func (v BooleanValue) UnixNano() int64 { + return v.unixnano +} + +// Value returns the underlying boolean value. +func (v BooleanValue) Value() interface{} { + return v.value +} + +// String returns the string representation of the value and its timestamp. +func (v BooleanValue) String() string { + return fmt.Sprintf("%v %v", time.Unix(0, v.unixnano), v.Value()) +} + +func (v BooleanValue) RawValue() bool { return v.value } + +// StringValue represents a string value. +type StringValue struct { + unixnano int64 + value string +} + +// Value returns the underlying string value. +func (v StringValue) Value() interface{} { + return v.value +} + +// UnixNano returns the timestamp of the value. +func (v StringValue) UnixNano() int64 { + return v.unixnano +} + +// Size returns the number of bytes necessary to represent the value and its timestamp. +func (v StringValue) Size() int { + return 8 + len(v.value) +} + +// String returns the string representation of the value and its timestamp. +func (v StringValue) String() string { + return fmt.Sprintf("%v %v", time.Unix(0, v.unixnano), v.Value()) +} + +func (v StringValue) RawValue() string { return v.value } diff --git a/ui/jestSetup.ts b/ui/jestSetup.ts new file mode 100644 index 0000000000..fb260ffb9f --- /dev/null +++ b/ui/jestSetup.ts @@ -0,0 +1,6 @@ +import {cleanup} from 'react-testing-library' + +// cleans up state between react-testing-library tests +afterEach(() => { + cleanup() +}) diff --git a/ui/mocks/dummyData.ts b/ui/mocks/dummyData.ts index f902b53bc3..7c181d4f9d 100644 --- a/ui/mocks/dummyData.ts +++ b/ui/mocks/dummyData.ts @@ -1,11 +1,5 @@ -import { - Source, - SourceAuthenticationMethod, - Template, - SourceLinks, - TemplateType, - TemplateValueType, -} from 'src/types' +import {Template, SourceLinks, TemplateType, TemplateValueType} from 'src/types' +import {Source} from '@influxdata/influx' import {Cell, Dashboard, Label} from 'src/types/v2' import {Links} from 'src/types/v2/links' import {Task} from 'src/types/v2/tasks' @@ -23,9 +17,10 @@ import { TelegrafPluginInputNet, TelegrafPluginInputProcstat, TelegrafPluginInputDocker, + TelegrafPluginInputSwap, Task as TaskApi, Organization, -} from 'src/api' +} from '@influxdata/influx' export const links: Links = { authorizations: '/api/v2/authorizations', @@ -123,14 +118,12 @@ export const sourceLinks: SourceLinks = { export const source: Source = { id: '16', name: 'ssl', - type: 'influx', + type: Source.TypeEnum.Self, username: 'admin', url: 'https://localhost:9086', insecureSkipVerify: true, - default: false, telegraf: 'telegraf', links: sourceLinks, - authentication: SourceAuthenticationMethod.Basic, } export const timeRange = { @@ -408,6 +401,12 @@ export const redisTelegrafPlugin = { name: TelegrafPluginInputRedis.NameEnum.Redis, } +export const swapTelegrafPlugin = { + ...telegrafPlugin, + name: TelegrafPluginInputSwap.NameEnum.Swap, + configured: ConfigurationState.Configured, +} + export const redisPlugin = { name: TelegrafPluginInputRedis.NameEnum.Redis, type: TelegrafPluginInputRedis.TypeEnum.Input, @@ -583,7 +582,11 @@ export const setSetupParamsResponse = { userID: '033bc62520fe3000', user: 'iris', permissions: [ - {action: 'read', resource: 'authorizations', orgID: '033bc62534be3000'}, + { + action: 'read', + resource: 'authorizations', + orgID: '033bc62534be3000', + }, { action: 'write', resource: 'authorizations', diff --git a/ui/package-lock.json b/ui/package-lock.json index ccd47ce491..c414f6fe22 100644 --- a/ui/package-lock.json +++ b/ui/package-lock.json @@ -829,13 +829,23 @@ "dev": true }, "@influxdata/influx": { - "version": "0.1.3", - "resolved": "https://registry.npmjs.org/@influxdata/influx/-/influx-0.1.3.tgz", - "integrity": "sha512-5RkmG8lABKNmorMq4s20WYXkoleG55bPjzor/NzPDiqvEF1Rrcp6TJMDuvsolO1/624CBLcieZ55nlImhCqNsw==", + "version": "0.2.2", + "resolved": "https://registry.npmjs.org/@influxdata/influx/-/influx-0.2.2.tgz", + "integrity": "sha512-ozz53ZrhWodRb72Dp0LBxd6mU/c++qVyKbd0l59VTTQ1BGHMKTJEZEDtgrnX+IkWaq4Ry4JUxJVkDAJ/QBem8A==", "requires": { "axios": "^0.18.0" } }, + "@influxdata/react-custom-scrollbars": { + "version": "4.3.8", + "resolved": "https://registry.npmjs.org/@influxdata/react-custom-scrollbars/-/react-custom-scrollbars-4.3.8.tgz", + "integrity": "sha512-tfBCP+L69nCqq/HgFDBmkvo0l6++6NnVZ2vj7vP5gHLenbTgODGNa9FLoaA3x5QQ8+OzvTt98UkVCAjKuY6aRg==", + "requires": { + "dom-css": "^2.0.0", + "prop-types": "^15.5.10", + "raf": "^3.1.0" + } + }, "@mrmlnc/readdir-enhanced": { "version": "2.2.1", "resolved": "https://registry.npmjs.org/@mrmlnc/readdir-enhanced/-/readdir-enhanced-2.2.1.tgz", @@ -902,6 +912,12 @@ "physical-cpu-count": "^2.0.0" } }, + "@sheerun/mutationobserver-shim": { + "version": "0.3.2", + "resolved": "https://registry.npmjs.org/@sheerun/mutationobserver-shim/-/mutationobserver-shim-0.3.2.tgz", + "integrity": "sha512-vTCdPp/T/Q3oSqwHmZ5Kpa9oI7iLtGl3RQaA/NyLHikvcrPxACkkKVr/XzkSPJWXHRhKGzVvb0urJsbMlRxi1Q==", + "dev": true + }, "@types/abstract-leveldown": { "version": "5.0.1", "resolved": "https://registry.npmjs.org/@types/abstract-leveldown/-/abstract-leveldown-5.0.1.tgz", @@ -1057,9 +1073,9 @@ "dev": true }, "@types/react": { - "version": "16.4.16", - "resolved": "https://registry.npmjs.org/@types/react/-/react-16.4.16.tgz", - "integrity": "sha512-lxyoipLWweAnLnSsV4Ho2NAZTKKmxeYgkTQ6PaDiPDU9JJBUY2zJVVGiK1smzYv8+ZgbqEmcm5xM74GCpunSEA==", + "version": "16.8.2", + "resolved": "https://registry.npmjs.org/@types/react/-/react-16.8.2.tgz", + "integrity": "sha512-6mcKsqlqkN9xADrwiUz2gm9Wg4iGnlVGciwBRYFQSMWG6MQjhOZ/AVnxn+6v8nslFgfYTV8fNdE6XwKu6va5PA==", "dev": true, "requires": { "@types/prop-types": "*", @@ -3354,9 +3370,9 @@ } }, "csstype": { - "version": "2.5.7", - "resolved": "https://registry.npmjs.org/csstype/-/csstype-2.5.7.tgz", - "integrity": "sha512-Nt5VDyOTIIV4/nRFswoCKps1R5CD1hkiyjBE9/thNaNZILLEviVw9yWQw15+O+CpNjQKB/uvdcxFFOrSflY3Yw==", + "version": "2.6.2", + "resolved": "https://registry.npmjs.org/csstype/-/csstype-2.6.2.tgz", + "integrity": "sha512-Rl7PvTae0pflc1YtxtKbiSqq20Ts6vpIYOD5WBafl4y123DyHUeLrRdQP66sQW8/6gmX8jrYJLXwNeMqYVJcow==", "dev": true }, "custom-event": { @@ -3738,6 +3754,18 @@ } } }, + "dom-testing-library": { + "version": "3.16.4", + "resolved": "https://registry.npmjs.org/dom-testing-library/-/dom-testing-library-3.16.4.tgz", + "integrity": "sha512-D8tFLGe0xInL8F/KxZM7gc4r/vOCTgFGM93zXLB/AjFPz2O86y0UaruXl45K6xhqyclJFHHxUtgwaRddRyqxFw==", + "dev": true, + "requires": { + "@babel/runtime": "^7.1.5", + "@sheerun/mutationobserver-shim": "^0.3.2", + "pretty-format": "^23.6.0", + "wait-for-expect": "^1.1.0" + } + }, "domain-browser": { "version": "1.2.0", "resolved": "https://registry.npmjs.org/domain-browser/-/domain-browser-1.2.0.tgz", @@ -10445,14 +10473,14 @@ } }, "react": { - "version": "16.5.2", - "resolved": "https://registry.npmjs.org/react/-/react-16.5.2.tgz", - "integrity": "sha512-FDCSVd3DjVTmbEAjUNX6FgfAmQ+ypJfHUsqUJOYNCBUp1h8lqmtC+0mXJ+JjsWx4KAVTkk1vKd1hLQPvEviSuw==", + "version": "16.8.0", + "resolved": "https://registry.npmjs.org/react/-/react-16.8.0.tgz", + "integrity": "sha512-g+nikW2D48kqgWSPwNo0NH9tIGG3DsQFlrtrQ1kj6W77z5ahyIHG0w8kPpz4Sdj6gyLnz0lEd/xsjOoGge2MYQ==", "requires": { "loose-envify": "^1.1.0", "object-assign": "^4.1.1", "prop-types": "^15.6.2", - "schedule": "^0.5.0" + "scheduler": "^0.13.0" } }, "react-codemirror2": { @@ -10469,16 +10497,6 @@ "prop-types": "^15.5.8" } }, - "react-custom-scrollbars": { - "version": "4.2.1", - "resolved": "https://registry.npmjs.org/react-custom-scrollbars/-/react-custom-scrollbars-4.2.1.tgz", - "integrity": "sha1-gw/ZUCkn6X6KeMIIaBOJmyqLZts=", - "requires": { - "dom-css": "^2.0.0", - "prop-types": "^15.5.10", - "raf": "^3.1.0" - } - }, "react-dimensions": { "version": "1.3.1", "resolved": "https://registry.npmjs.org/react-dimensions/-/react-dimensions-1.3.1.tgz", @@ -10509,14 +10527,14 @@ } }, "react-dom": { - "version": "16.5.2", - "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-16.5.2.tgz", - "integrity": "sha512-RC8LDw8feuZOHVgzEf7f+cxBr/DnKdqp56VU0lAs1f4UfKc4cU8wU4fTq/mgnvynLQo8OtlPC19NUFh/zjZPuA==", + "version": "16.8.0", + "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-16.8.0.tgz", + "integrity": "sha512-dBzoAGYZpW9Yggp+CzBPC7q1HmWSeRc93DWrwbskmG1eHJWznZB/p0l/Sm+69leIGUS91AXPB/qB3WcPnKx8Sw==", "requires": { "loose-envify": "^1.1.0", "object-assign": "^4.1.1", "prop-types": "^15.6.2", - "schedule": "^0.5.0" + "scheduler": "^0.13.0" } }, "react-draggable": { @@ -10629,6 +10647,15 @@ "schedule": "^0.5.0" } }, + "react-testing-library": { + "version": "5.4.4", + "resolved": "https://registry.npmjs.org/react-testing-library/-/react-testing-library-5.4.4.tgz", + "integrity": "sha512-/TiERZ+URSNhZQfjrUXh0VLsiLSmhqP1WP+2e2wWqWqrRIWpcAxrfuBxzlT75LYMDNmicEikaXJqRDi/pqCEDg==", + "dev": true, + "requires": { + "dom-testing-library": "^3.13.1" + } + }, "react-tooltip": { "version": "3.8.4", "resolved": "https://registry.npmjs.org/react-tooltip/-/react-tooltip-3.8.4.tgz", @@ -11602,10 +11629,20 @@ "version": "0.5.0", "resolved": "https://registry.npmjs.org/schedule/-/schedule-0.5.0.tgz", "integrity": "sha512-HUcJicG5Ou8xfR//c2rPT0lPIRR09vVvN81T9fqfVgBmhERUbDEQoYKjpBxbueJnCPpSu2ujXzOnRQt6x9o/jw==", + "dev": true, "requires": { "object-assign": "^4.1.1" } }, + "scheduler": { + "version": "0.13.0", + "resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.13.0.tgz", + "integrity": "sha512-w7aJnV30jc7OsiZQNPVmBc+HooZuvQZIZIShKutC3tnMFMkcwVN9CZRRSSNw03OnSCKmEkK8usmwcw6dqBaLzw==", + "requires": { + "loose-envify": "^1.1.0", + "object-assign": "^4.1.1" + } + }, "seleccion": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/seleccion/-/seleccion-2.0.0.tgz", @@ -13000,6 +13037,12 @@ "browser-process-hrtime": "^0.1.2" } }, + "wait-for-expect": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/wait-for-expect/-/wait-for-expect-1.1.0.tgz", + "integrity": "sha512-vQDokqxyMyknfX3luCDn16bSaRcOyH6gGuUXMIbxBLeTo6nWuEWYqMTT9a+44FmW8c2m6TRWBdNvBBjA1hwEKg==", + "dev": true + }, "walker": { "version": "1.0.7", "resolved": "https://registry.npmjs.org/walker/-/walker-1.0.7.tgz", diff --git a/ui/package.json b/ui/package.json index 1d634145eb..51b9b9a7d6 100644 --- a/ui/package.json +++ b/ui/package.json @@ -30,6 +30,7 @@ "tsc:watch": "tsc -p ./tsconfig.json --noEmit --pretty -w" }, "jest": { + "setupTestFrameworkScriptFile": "./jestSetup.ts", "displayName": "test", "testURL": "http://localhost", "testPathIgnorePatterns": [ @@ -82,7 +83,7 @@ "@types/papaparse": "^4.1.34", "@types/prop-types": "^15.5.2", "@types/qs": "^6.5.1", - "@types/react": "^16.4.14", + "@types/react": "^16.8.0", "@types/react-dnd": "^2.0.36", "@types/react-dnd-html5-backend": "^2.1.9", "@types/react-grid-layout": "^0.16.5", @@ -104,6 +105,7 @@ "jsdom": "^9.0.0", "parcel": "^1.11.0", "prettier": "^1.14.3", + "react-testing-library": "^5.4.4", "sass": "^1.15.3", "ts-jest": "^23.10.3", "tslib": "^1.9.0", @@ -114,7 +116,8 @@ "typescript": "^3.1.3" }, "dependencies": { - "@influxdata/influx": "^0.1.3", + "@influxdata/react-custom-scrollbars": "4.3.8", + "@influxdata/influx": "^0.2.2", "axios": "^0.18.0", "babel-polyfill": "^6.26.0", "bignumber.js": "^4.0.2", @@ -138,14 +141,13 @@ "papaparse": "^4.4.0", "prop-types": "^15.6.1", "qs": "^6.5.2", - "react": "^15.0.0 || ^16.0.0", + "react": "^16.8.0", "react-codemirror2": "^4.2.1", "react-copy-to-clipboard": "^5.0.1", - "react-custom-scrollbars": "^4.1.1", "react-dimensions": "^1.2.0", "react-dnd": "^2.6.0", "react-dnd-html5-backend": "^2.6.0", - "react-dom": "^16.3.1", + "react-dom": "^16.8.0", "react-grid-layout": "^0.16.6", "react-markdown": "^4.0.3", "react-redux": "^5.0.7", diff --git a/ui/src/Setup.tsx b/ui/src/Setup.tsx index 42c541aec9..f76e10ba9e 100644 --- a/ui/src/Setup.tsx +++ b/ui/src/Setup.tsx @@ -4,13 +4,14 @@ import {connect} from 'react-redux' import {InjectedRouter} from 'react-router' // APIs -import {getSetupStatus} from 'src/onboarding/apis' +import {client} from 'src/utils/api' // Actions import {notify as notifyAction} from 'src/shared/actions/notifications' // Components import {ErrorHandling} from 'src/shared/decorators/errors' +import {SpinnerContainer, TechnoSpinner} from 'src/clockface' // Utils import {isOnboardingURL} from 'src/onboarding/utils' @@ -50,12 +51,12 @@ export class Setup extends PureComponent { return } - const isSetupAllowed = await getSetupStatus() + const {allowed} = await client.setup.status() this.setState({ loading: RemoteDataState.Done, }) - if (!isSetupAllowed) { + if (!allowed) { return } @@ -63,18 +64,12 @@ export class Setup extends PureComponent { } public render() { - if (this.isLoading) { - return
- } else { - return this.props.children && React.cloneElement(this.props.children) - } - } - - private get isLoading(): boolean { const {loading} = this.state + return ( - loading === RemoteDataState.Loading || - loading === RemoteDataState.NotStarted + }> + {this.props.children && React.cloneElement(this.props.children)} + ) } } diff --git a/ui/src/Signin.tsx b/ui/src/Signin.tsx index a901e6e5c8..25b641d079 100644 --- a/ui/src/Signin.tsx +++ b/ui/src/Signin.tsx @@ -7,6 +7,7 @@ import {client} from 'src/utils/api' // Components import {ErrorHandling} from 'src/shared/decorators/errors' +import {SpinnerContainer, TechnoSpinner} from 'src/clockface' // Actions import {notify as notifyAction} from 'src/shared/actions/notifications' @@ -55,18 +56,12 @@ export class Signin extends PureComponent { } public render() { - if (this.isLoading) { - return
- } - - return this.props.children && React.cloneElement(this.props.children) - } - - private get isLoading(): boolean { const {loading} = this.state + return ( - loading === RemoteDataState.Loading || - loading === RemoteDataState.NotStarted + }> + {this.props.children && React.cloneElement(this.props.children)} + ) } diff --git a/ui/src/api/api.ts b/ui/src/api/api.ts deleted file mode 100644 index 87c888e8ea..0000000000 --- a/ui/src/api/api.ts +++ /dev/null @@ -1,20524 +0,0 @@ -// tslint:disable -/// -/** - * Influx API Service - * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - * - * OpenAPI spec version: 0.1.0 - * - * - * NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). - * https://openapi-generator.tech - * Do not edit the class manually. - */ - - -import * as url from "url"; -import { Configuration } from "./configuration"; -import globalAxios, { AxiosPromise, AxiosInstance } from 'axios'; - -const BASE_PATH = "http://localhost:9999/api/v2".replace(/\/+$/, ""); - -/** - * - * @export - */ -export const COLLECTION_FORMATS = { - csv: ",", - ssv: " ", - tsv: "\t", - pipes: "|", -}; - -/** - * - * @export - * @interface RequestArgs - */ -export interface RequestArgs { - url: string; - options: any; -} - -/** - * - * @export - * @class BaseAPI - */ -export class BaseAPI { - protected configuration: Configuration | undefined; - - constructor(configuration?: Configuration, protected basePath: string = BASE_PATH, protected axios: AxiosInstance = globalAxios) { - if (configuration) { - this.configuration = configuration; - this.basePath = configuration.basePath || this.basePath; - } - } -}; - -/** - * - * @export - * @class RequiredError - * @extends {Error} - */ -export class RequiredError extends Error { - name: "RequiredError" = "RequiredError"; - constructor(public field: string, msg?: string) { - super(msg); - } -} - -/** - * contains the AST for the supplied Flux query - * @export - * @interface ASTResponse - */ -export interface ASTResponse { - /** - * the AST of the supplied Flux query - * @type {any} - * @memberof ASTResponse - */ - ast?: any; -} - -/** - * - * @export - * @interface AnalyzeQueryResponse - */ -export interface AnalyzeQueryResponse { - /** - * - * @type {Array} - * @memberof AnalyzeQueryResponse - */ - errors?: Array; -} - -/** - * - * @export - * @interface AnalyzeQueryResponseErrors - */ -export interface AnalyzeQueryResponseErrors { - /** - * - * @type {number} - * @memberof AnalyzeQueryResponseErrors - */ - line?: number; - /** - * - * @type {number} - * @memberof AnalyzeQueryResponseErrors - */ - column?: number; - /** - * - * @type {number} - * @memberof AnalyzeQueryResponseErrors - */ - character?: number; - /** - * - * @type {string} - * @memberof AnalyzeQueryResponseErrors - */ - message?: string; -} - -/** - * - * @export - * @interface Authorization - */ -export interface Authorization { - /** - * ID of org that authorization is scoped to. - * @type {string} - * @memberof Authorization - */ - orgID: string; - /** - * if inactive the token is inactive and requests using the token will be rejected. - * @type {string} - * @memberof Authorization - */ - status?: Authorization.StatusEnum; - /** - * A description of the token. - * @type {string} - * @memberof Authorization - */ - description?: string; - /** - * List of permissions for an auth. An auth must have at least one Permission. - * @type {Array} - * @memberof Authorization - */ - permissions: Array; - /** - * - * @type {string} - * @memberof Authorization - */ - id?: string; - /** - * Passed via the Authorization Header and Token Authentication type. - * @type {string} - * @memberof Authorization - */ - token?: string; - /** - * ID of user that created and owns the token. - * @type {string} - * @memberof Authorization - */ - userID?: string; - /** - * Name of user that created and owns the token. - * @type {string} - * @memberof Authorization - */ - user?: string; - /** - * Name of the org token is scoped to. - * @type {string} - * @memberof Authorization - */ - org?: string; - /** - * - * @type {AuthorizationLinks} - * @memberof Authorization - */ - links?: AuthorizationLinks; -} - -/** - * @export - * @namespace Authorization - */ -export namespace Authorization { - /** - * @export - * @enum {string} - */ - export enum StatusEnum { - Active = 'active', - Inactive = 'inactive' - } -} - -/** - * - * @export - * @interface AuthorizationLinks - */ -export interface AuthorizationLinks { - /** - * - * @type {string} - * @memberof AuthorizationLinks - */ - self?: string; - /** - * - * @type {string} - * @memberof AuthorizationLinks - */ - user?: string; -} - -/** - * - * @export - * @interface Authorizations - */ -export interface Authorizations { - /** - * - * @type {Links} - * @memberof Authorizations - */ - links?: Links; - /** - * - * @type {Array} - * @memberof Authorizations - */ - authorizations?: Array; -} - -/** - * A description of a particular axis for a visualization - * @export - * @interface Axis - */ -export interface Axis { - /** - * The extents of an axis in the form [lower, upper]. Clients determine whether bounds are to be inclusive or exclusive of their limits - * @type {Array} - * @memberof Axis - */ - bounds?: Array; - /** - * label is a description of this Axis - * @type {string} - * @memberof Axis - */ - label?: string; - /** - * Prefix represents a label prefix for formatting axis values. - * @type {string} - * @memberof Axis - */ - prefix?: string; - /** - * Suffix represents a label suffix for formatting axis values. - * @type {string} - * @memberof Axis - */ - suffix?: string; - /** - * Base represents the radix for formatting axis values. - * @type {string} - * @memberof Axis - */ - base?: string; - /** - * Scale is the axis formatting scale. Supported: \"log\", \"linear\" - * @type {string} - * @memberof Axis - */ - scale?: string; -} - -/** - * - * @export - * @interface Bucket - */ -export interface Bucket { - /** - * - * @type {BucketLinks} - * @memberof Bucket - */ - links?: BucketLinks; - /** - * - * @type {string} - * @memberof Bucket - */ - id?: string; - /** - * - * @type {Owners} - * @memberof Bucket - */ - owners?: Owners; - /** - * - * @type {string} - * @memberof Bucket - */ - name: string; - /** - * - * @type {string} - * @memberof Bucket - */ - organization?: string; - /** - * - * @type {string} - * @memberof Bucket - */ - rp?: string; - /** - * - * @type {string} - * @memberof Bucket - */ - organizationID?: string; - /** - * rules to expire or retain data. No rules means data never expires. - * @type {Array} - * @memberof Bucket - */ - retentionRules: Array; -} - -/** - * - * @export - * @interface BucketLinks - */ -export interface BucketLinks { - /** - * - * @type {string} - * @memberof BucketLinks - */ - self?: string; - /** - * - * @type {string} - * @memberof BucketLinks - */ - org?: string; - /** - * - * @type {string} - * @memberof BucketLinks - */ - write?: string; -} - -/** - * - * @export - * @interface BucketRetentionRules - */ -export interface BucketRetentionRules { - /** - * - * @type {string} - * @memberof BucketRetentionRules - */ - type: BucketRetentionRules.TypeEnum; - /** - * duration in seconds for how long data will be kept in the database. - * @type {number} - * @memberof BucketRetentionRules - */ - everySeconds: number; -} - -/** - * @export - * @namespace BucketRetentionRules - */ -export namespace BucketRetentionRules { - /** - * @export - * @enum {string} - */ - export enum TypeEnum { - Expire = 'expire' - } -} - -/** - * - * @export - * @interface Buckets - */ -export interface Buckets { - /** - * - * @type {Links} - * @memberof Buckets - */ - links?: Links; - /** - * - * @type {Array} - * @memberof Buckets - */ - buckets?: Array; -} - -/** - * - * @export - * @interface Cell - */ -export interface Cell { - /** - * - * @type {string} - * @memberof Cell - */ - id?: string; - /** - * - * @type {CellLinks} - * @memberof Cell - */ - links?: CellLinks; - /** - * - * @type {string} - * @memberof Cell - */ - name?: string; - /** - * - * @type {number} - * @memberof Cell - */ - x?: number; - /** - * - * @type {number} - * @memberof Cell - */ - y?: number; - /** - * - * @type {number} - * @memberof Cell - */ - w?: number; - /** - * - * @type {number} - * @memberof Cell - */ - h?: number; -} - -/** - * - * @export - * @interface CellLinks - */ -export interface CellLinks { - /** - * - * @type {string} - * @memberof CellLinks - */ - self?: string; - /** - * - * @type {string} - * @memberof CellLinks - */ - view?: string; -} - -/** - * - * @export - * @interface CellUpdate - */ -export interface CellUpdate { - /** - * - * @type {string} - * @memberof CellUpdate - */ - name?: string; -} - -/** - * - * @export - * @interface ConstantMacroProperties - */ -export interface ConstantMacroProperties { - /** - * - * @type {string} - * @memberof ConstantMacroProperties - */ - type?: ConstantMacroProperties.TypeEnum; - /** - * - * @type {Array} - * @memberof ConstantMacroProperties - */ - values?: Array; -} - -/** - * @export - * @namespace ConstantMacroProperties - */ -export namespace ConstantMacroProperties { - /** - * @export - * @enum {string} - */ - export enum TypeEnum { - Constant = 'constant' - } -} - -/** - * - * @export - * @interface CreateCell - */ -export interface CreateCell { - /** - * - * @type {string} - * @memberof CreateCell - */ - name?: string; - /** - * - * @type {number} - * @memberof CreateCell - */ - x?: number; - /** - * - * @type {number} - * @memberof CreateCell - */ - y?: number; - /** - * - * @type {number} - * @memberof CreateCell - */ - w?: number; - /** - * - * @type {number} - * @memberof CreateCell - */ - h?: number; - /** - * makes a copy of the provided view - * @type {string} - * @memberof CreateCell - */ - usingView?: string; -} - -/** - * - * @export - * @interface CreateProtoResourcesRequest - */ -export interface CreateProtoResourcesRequest { - /** - * - * @type {string} - * @memberof CreateProtoResourcesRequest - */ - orgID?: string; -} - -/** - * - * @export - * @interface Dashboard - */ -export interface Dashboard { - /** - * - * @type {DashboardLinks} - * @memberof Dashboard - */ - links?: DashboardLinks; - /** - * - * @type {string} - * @memberof Dashboard - */ - id?: string; - /** - * id of organization that owns dashboard - * @type {string} - * @memberof Dashboard - */ - orgID?: string; - /** - * user-facing name of the dashboard - * @type {string} - * @memberof Dashboard - */ - name?: string; - /** - * user-facing description of the dashboard - * @type {string} - * @memberof Dashboard - */ - description?: string; - /** - * - * @type {DashboardMeta} - * @memberof Dashboard - */ - meta?: DashboardMeta; - /** - * - * @type {Array} - * @memberof Dashboard - */ - cells?: Array; - /** - * - * @type {Array