Merge branch 'master' into flux-staging

pull/11751/head
Jonathan A. Sternberg 2019-02-07 10:26:21 -06:00
commit e0e3a17ed9
No known key found for this signature in database
GPG Key ID: 4A0C1200CB8B9D2E
327 changed files with 8608 additions and 44724 deletions

View File

@ -5,9 +5,41 @@ jobs:
steps:
- run: docker login -u=$QUAY_USER -p=$QUAY_PASS quay.io
- run: docker run --entrypoint "./run_litmus_tests_oss.sh" -e TEST_LIST=tests_lists/gateway_api_tests.list --net host -v /var/run/docker.sock:/var/run/docker.sock -v ~/project:/Litmus/result quay.io/influxdb/litmus:latest
- run:
name: Litmus Nightly Tests Success
when: on_success
command: |
curl -X POST https://slack.com/api/chat.postMessage \
-H "Authorization: Bearer $SLACK_TOKEN" \
-H "Content-type: application/json; charset=utf-8" \
--data @<(cat <<EOF
{
"channel":"#testing",
"text":"SUCCESSFUL: Branch: $CIRCLE_BRANCH, Job: $CIRCLE_JOB, Build: $CIRCLE_BUILD_NUM, Build URL: $CIRCLE_BUILD_URL",
"username":"CircleCI Nightly Litmus OSS Litmus Tests",
"icon_emoji":":circleci-pass:"
}
EOF
)
- run:
name: Litmus Nightly Tests Fail
when: on_fail
command: |
curl -X POST https://slack.com/api/chat.postMessage \
-H "Authorization: Bearer $SLACK_TOKEN" \
-H "Content-type: application/json; charset=utf-8" \
--data @<(cat <<EOF
{
"channel":"#testing",
"text":"FAIL: Branch: $CIRCLE_BRANCH, Job: $CIRCLE_JOB, Build: $CIRCLE_BUILD_NUM, Build URL: $CIRCLE_BUILD_URL",
"username":"CircleCI Nightly Litmus OSS Litmus Tests",
"icon_emoji":":circleci-fail:"
}
EOF
)
- store_artifacts:
path: ~/project
jstest:
docker:
- image: circleci/golang:1.11-node-browsers

View File

@ -1,3 +1,24 @@
## v2.0.0-alpha.2 [unreleased]
## Features
1. [11677](https://github.com/influxdata/influxdb/pull/11677): Add instructions button to view `$INFLUX_TOKEN` setup for telegraf configs
1. [11693](https://github.com/influxdata/influxdb/pull/11693): Save the $INFLUX_TOKEN environmental variable in telegraf configs
1. [11700](https://github.com/influxdata/influxdb/pull/11700): Update Tasks tab on Org page to look like Tasks Page
1. [11740](https://github.com/influxdata/influxdb/pull/11740): Add view button to view the telegraf config toml
1. [11522](https://github.com/influxdata/influxdb/pull/11522): Add plugin information step to allow for config naming and configure one plugin at a time
## Bug Fixes
1. [11678](https://github.com/influxdata/influxdb/pull/11678): Update the System Telegraf Plugin bundle to include the swap plugin
1. [11722](https://github.com/influxdata/influxdb/pull/11722): Revert behavior allowing users to create authorizations on behalf of another user
## UI Improvements
1. [11683](https://github.com/influxdata/influxdb/pull/11683): Change the wording for the plugin config form button to Done
1. [11689](https://github.com/influxdata/influxdb/pull/11689): Change the wording for the Collectors configure step button to Create and Verify
1. [11697](https://github.com/influxdata/influxdb/pull/11697): Standardize page loading spinner styles
1. [11711](https://github.com/influxdata/influxdb/pull/11711): Show checkbox on Save As button in data explorer
1. [11705](https://github.com/influxdata/influxdb/pull/11705): Make collectors plugins side bar visible in only the configure step
1. [11745](https://github.com/influxdata/influxdb/pull/11745): Swap retention policies on Create bucket page
## v2.0.0-alpha.1 [2019-01-23]
### Release Notes

View File

@ -135,9 +135,6 @@ clean:
$(RM) -r bin
$(RM) -r dist
generate-typescript-client:
make -C http
define CHRONOGIRAFFE
._ o o
\_`-)|_

View File

@ -119,6 +119,8 @@ const (
SecretsResourceType = ResourceType("secrets") // 10
// LabelsResourceType gives permission to one or more labels.
LabelsResourceType = ResourceType("labels") // 11
// ViewsResourceType gives permission to one or more views.
ViewsResourceType = ResourceType("views") // 12
)
// AllResourceTypes is the list of all known resource types.
@ -135,6 +137,7 @@ var AllResourceTypes = []ResourceType{
ScraperResourceType, // 9
SecretsResourceType, // 10
LabelsResourceType, // 11
ViewsResourceType, // 12
}
// OrgResourceTypes is the list of all known resource types that belong to an organization.
@ -169,6 +172,7 @@ func (t ResourceType) Valid() (err error) {
case ScraperResourceType: // 9
case SecretsResourceType: // 10
case LabelsResourceType: // 11
case ViewsResourceType: // 12
default:
err = ErrInvalidResourceType
}

View File

@ -228,23 +228,6 @@ func authorizationCreateF(cmd *cobra.Command, args []string) error {
OrgID: o.ID,
}
if authorizationCreateFlags.user != "" {
// if the user flag is supplied, then set the user ID explicitly on the request
userSvc, err := newUserService(flags)
if err != nil {
return err
}
userFilter := platform.UserFilter{
Name: &authorizationCreateFlags.user,
}
user, err := userSvc.FindUser(context.Background(), userFilter)
if err != nil {
return err
}
authorization.UserID = user.ID
}
s, err := newAuthorizationService(flags)
if err != nil {
return err

View File

@ -17,6 +17,7 @@ import (
"github.com/influxdata/influxdb/logger"
"github.com/influxdata/influxdb/models"
"github.com/influxdata/influxdb/storage"
"github.com/influxdata/influxdb/storage/wal"
"github.com/influxdata/influxdb/toml"
"github.com/influxdata/influxdb/tsdb"
"github.com/influxdata/influxdb/tsdb/tsi1"
@ -422,7 +423,7 @@ func collectWALFiles(path string) ([]string, error) {
var paths []string
for _, fi := range fis {
if filepath.Ext(fi.Name()) != "."+tsm1.WALFileExtension {
if filepath.Ext(fi.Name()) != "."+wal.WALFileExtension {
continue
}
paths = append(paths, filepath.Join(path, fi.Name()))

View File

@ -252,6 +252,18 @@ func (l *Launcher) FluxService() *http.FluxService {
return &http.FluxService{Addr: l.URL(), Token: l.Auth.Token}
}
func (l *Launcher) BucketService() *http.BucketService {
return &http.BucketService{Addr: l.URL(), Token: l.Auth.Token}
}
func (l *Launcher) AuthorizationService() *http.AuthorizationService {
return &http.AuthorizationService{Addr: l.URL(), Token: l.Auth.Token}
}
func (l *Launcher) TaskService() *http.TaskService {
return &http.TaskService{Addr: l.URL(), Token: l.Auth.Token}
}
// MustNewHTTPRequest returns a new nethttp.Request with base URL and auth attached. Fail on error.
func (l *Launcher) MustNewHTTPRequest(method, rawurl, body string) *nethttp.Request {
req, err := nethttp.NewRequest(method, l.URL()+rawurl, strings.NewReader(body))

View File

@ -10,7 +10,7 @@ import (
"github.com/google/go-cmp/cmp"
"github.com/influxdata/flux"
"github.com/influxdata/flux/execute/executetest"
platform "github.com/influxdata/influxdb"
"github.com/influxdata/influxdb"
pctx "github.com/influxdata/influxdb/context"
"github.com/influxdata/influxdb/task/backend"
)
@ -23,40 +23,40 @@ func TestLauncher_Task(t *testing.T) {
now := time.Now().Unix() // Need to track now at the start of the test, for a query later.
org := be.Org
bIn := &platform.Bucket{OrganizationID: org.ID, Organization: org.Name, Name: "my_bucket_in"}
bIn := &influxdb.Bucket{OrganizationID: org.ID, Organization: org.Name, Name: "my_bucket_in"}
if err := be.BucketService().CreateBucket(context.Background(), bIn); err != nil {
t.Fatal(err)
}
bOut := &platform.Bucket{OrganizationID: org.ID, Organization: org.Name, Name: "my_bucket_out"}
bOut := &influxdb.Bucket{OrganizationID: org.ID, Organization: org.Name, Name: "my_bucket_out"}
if err := be.BucketService().CreateBucket(context.Background(), bOut); err != nil {
t.Fatal(err)
}
u := be.User
writeBIn, err := platform.NewPermissionAtID(bIn.ID, platform.WriteAction, platform.BucketsResourceType, org.ID)
writeBIn, err := influxdb.NewPermissionAtID(bIn.ID, influxdb.WriteAction, influxdb.BucketsResourceType, org.ID)
if err != nil {
t.Fatal(err)
}
writeBOut, err := platform.NewPermissionAtID(bOut.ID, platform.WriteAction, platform.BucketsResourceType, org.ID)
writeBOut, err := influxdb.NewPermissionAtID(bOut.ID, influxdb.WriteAction, influxdb.BucketsResourceType, org.ID)
if err != nil {
t.Fatal(err)
}
writeT, err := platform.NewPermission(platform.WriteAction, platform.TasksResourceType, org.ID)
writeT, err := influxdb.NewPermission(influxdb.WriteAction, influxdb.TasksResourceType, org.ID)
if err != nil {
t.Fatal(err)
}
readT, err := platform.NewPermission(platform.ReadAction, platform.TasksResourceType, org.ID)
readT, err := influxdb.NewPermission(influxdb.ReadAction, influxdb.TasksResourceType, org.ID)
if err != nil {
t.Fatal(err)
}
ctx = pctx.SetAuthorizer(context.Background(), be.Auth)
be.Auth = &platform.Authorization{UserID: u.ID, OrgID: org.ID, Permissions: []platform.Permission{*writeBIn, *writeBOut, *writeT, *readT}}
if err := be.AuthorizationService().CreateAuthorization(context.Background(), be.Auth); err != nil {
a := &influxdb.Authorization{UserID: u.ID, OrgID: org.ID, Permissions: []influxdb.Permission{*writeBIn, *writeBOut, *writeT, *readT}}
if err := be.AuthorizationService().CreateAuthorization(context.Background(), a); err != nil {
t.Fatal(err)
}
if !be.Org.ID.Valid() {
@ -85,7 +85,7 @@ stuff f=-123.456,b=true,s="hello"
t.Fatalf("exp status %d; got %d", nethttp.StatusNoContent, resp.StatusCode)
}
created := &platform.Task{
created := &influxdb.Task{
OrganizationID: org.ID,
Owner: *be.User,
Flux: fmt.Sprintf(`option task = {
@ -113,7 +113,7 @@ from(bucket:"my_bucket_in") |> range(start:-5m) |> to(bucket:"%s", org:"%s")`, b
// Poll for the task to have started and finished.
deadline := time.Now().Add(10 * time.Second) // Arbitrary deadline; 10s seems safe for -race on a resource-constrained system.
ndrString := time.Unix(ndr, 0).UTC().Format(time.RFC3339)
var targetRun platform.Run
var targetRun influxdb.Run
i := 0
for {
t.Logf("Looking for created run...")
@ -122,7 +122,7 @@ from(bucket:"my_bucket_in") |> range(start:-5m) |> to(bucket:"%s", org:"%s")`, b
}
time.Sleep(5 * time.Millisecond)
runs, _, err := be.TaskService().FindRuns(ctx, platform.RunFilter{Org: &org.ID, Task: &created.ID, Limit: 1})
runs, _, err := be.TaskService().FindRuns(ctx, influxdb.RunFilter{Org: &org.ID, Task: &created.ID, Limit: 1})
if err != nil {
t.Fatal(err)
}
@ -219,7 +219,7 @@ from(bucket:"my_bucket_in") |> range(start:-5m) |> to(bucket:"%s", org:"%s")`, b
})
// now lets see a logs
logs, _, err := be.TaskService().FindLogs(ctx, platform.LogFilter{Org: &org.ID, Task: &created.ID, Run: &targetRun.ID})
logs, _, err := be.TaskService().FindLogs(ctx, influxdb.LogFilter{Org: &org.ID, Task: &created.ID, Run: &targetRun.ID})
if err != nil {
t.Fatal(err)
}

View File

@ -1,13 +1,19 @@
FROM debian:stable-slim
COPY influxd /usr/bin/influxd
COPY influx /usr/bin/influx
COPY influx influxd /usr/bin/
EXPOSE 9999
RUN apt-get update && apt-get install -y --no-install-recommends \
ca-certificates \
&& rm -rf /var/lib/apt/lists/*
ENV DEBIAN_FRONTEND noninteractive
COPY docker/influxd/entrypoint.sh /entrypoint.sh
RUN apt-get update \
&& apt-get install -y \
ca-certificates \
tzdata \
&& apt-get clean autoclean \
&& apt-get autoremove --yes \
&& rm -rf /var/lib/{apt,dpkg,cache,log}
ENTRYPOINT ["/entrypoint.sh"]
CMD ["influxd"]

View File

@ -1,12 +1,8 @@
# List any generated files here
TARGETS = ../ui/src/api/api.ts
# List any source files used to generate the targets here
SOURCES = cur_swagger.yml
# List any directories that have their own Makefile here
SUBDIRS =
# Default target
all: $(SUBDIRS) $(TARGETS) swagger_gen.go
all: $(SUBDIRS) swagger_gen.go
# Recurse into subdirs for same make goal
$(SUBDIRS):
@ -14,7 +10,6 @@ $(SUBDIRS):
# Clean all targets recursively
clean: $(SUBDIRS)
rm -f $(TARGETS)
rm -f swagger_gen.go
swagger_gen.go: swagger.go redoc.go swagger.yml
@ -23,9 +18,4 @@ swagger_gen.go: swagger.go redoc.go swagger.yml
GO_RUN := env GO111MODULE=on go run
ifndef SKIP_CLIENT
../ui/src/api/api.ts: $(SOURCES)
openapi-generator generate -g typescript-axios -o ../ui/src/api -i cur_swagger.yml
endif
.PHONY: all clean $(SUBDIRS)

View File

@ -70,89 +70,69 @@ type APIBackend struct {
ChronografService *server.Service
ProtoService influxdb.ProtoService
OrgLookupService authorizer.OrganizationService
ViewService influxdb.ViewService
}
// NewAPIHandler constructs all api handlers beneath it and returns an APIHandler
func NewAPIHandler(b *APIBackend) *APIHandler {
h := &APIHandler{}
sessionBackend := NewSessionBackend(b)
h.SessionHandler = NewSessionHandler(sessionBackend)
internalURM := b.UserResourceMappingService
b.UserResourceMappingService = authorizer.NewURMService(b.OrgLookupService, b.UserResourceMappingService)
h.BucketHandler = NewBucketHandler(b.UserResourceMappingService, b.LabelService, b.UserService)
h.BucketHandler.BucketService = authorizer.NewBucketService(b.BucketService)
h.BucketHandler.OrganizationService = b.OrganizationService
h.BucketHandler.BucketOperationLogService = b.BucketOperationLogService
sessionBackend := NewSessionBackend(b)
h.SessionHandler = NewSessionHandler(sessionBackend)
h.LabelHandler = NewLabelHandler()
h.LabelHandler.LabelService = b.LabelService
bucketBackend := NewBucketBackend(b)
bucketBackend.BucketService = authorizer.NewBucketService(b.BucketService)
h.BucketHandler = NewBucketHandler(bucketBackend)
h.OrgHandler = NewOrgHandler(b.UserResourceMappingService, b.LabelService, b.UserService)
h.OrgHandler.OrganizationService = authorizer.NewOrgService(b.OrganizationService)
h.OrgHandler.OrganizationOperationLogService = b.OrganizationOperationLogService
h.OrgHandler.SecretService = b.SecretService
orgBackend := NewOrgBackend(b)
orgBackend.OrganizationService = authorizer.NewOrgService(b.OrganizationService)
h.OrgHandler = NewOrgHandler(orgBackend)
h.UserHandler = NewUserHandler()
h.UserHandler.UserService = authorizer.NewUserService(b.UserService)
h.UserHandler.BasicAuthService = b.BasicAuthService
h.UserHandler.UserOperationLogService = b.UserOperationLogService
userBackend := NewUserBackend(b)
userBackend.UserService = authorizer.NewUserService(b.UserService)
h.UserHandler = NewUserHandler(userBackend)
h.DashboardHandler = NewDashboardHandler(b.UserResourceMappingService, b.LabelService, b.UserService)
h.DashboardHandler.DashboardService = authorizer.NewDashboardService(b.DashboardService)
h.DashboardHandler.DashboardOperationLogService = b.DashboardOperationLogService
dashboardBackend := NewDashboardBackend(b)
dashboardBackend.DashboardService = authorizer.NewDashboardService(b.DashboardService)
h.DashboardHandler = NewDashboardHandler(dashboardBackend)
h.MacroHandler = NewMacroHandler()
h.MacroHandler.MacroService = authorizer.NewMacroService(b.MacroService)
macroBackend := NewMacroBackend(b)
macroBackend.MacroService = authorizer.NewMacroService(b.MacroService)
h.MacroHandler = NewMacroHandler(macroBackend)
h.AuthorizationHandler = NewAuthorizationHandler(b.UserService)
h.AuthorizationHandler.OrganizationService = b.OrganizationService
h.AuthorizationHandler.AuthorizationService = authorizer.NewAuthorizationService(b.AuthorizationService)
h.AuthorizationHandler.LookupService = b.LookupService
h.AuthorizationHandler.Logger = b.Logger.With(zap.String("handler", "auth"))
authorizationBackend := NewAuthorizationBackend(b)
authorizationBackend.AuthorizationService = authorizer.NewAuthorizationService(b.AuthorizationService)
h.AuthorizationHandler = NewAuthorizationHandler(authorizationBackend)
h.ScraperHandler = NewScraperHandler(
b.Logger.With(zap.String("handler", "scraper")),
b.UserService,
b.UserResourceMappingService,
b.LabelService,
authorizer.NewScraperTargetStoreService(b.ScraperTargetStoreService, b.UserResourceMappingService),
b.BucketService,
b.OrganizationService,
)
scraperBackend := NewScraperBackend(b)
scraperBackend.ScraperStorageService = authorizer.NewScraperTargetStoreService(b.ScraperTargetStoreService, b.UserResourceMappingService)
h.ScraperHandler = NewScraperHandler(scraperBackend)
h.SourceHandler = NewSourceHandler()
h.SourceHandler.SourceService = authorizer.NewSourceService(b.SourceService)
h.SourceHandler.NewBucketService = b.NewBucketService
h.SourceHandler.NewQueryService = b.NewQueryService
sourceBackend := NewSourceBackend(b)
sourceBackend.SourceService = authorizer.NewSourceService(b.SourceService)
sourceBackend.NewBucketService = b.NewBucketService
sourceBackend.NewQueryService = b.NewQueryService
h.SourceHandler = NewSourceHandler(sourceBackend)
h.SetupHandler = NewSetupHandler()
h.SetupHandler.OnboardingService = b.OnboardingService
setupBackend := NewSetupBackend(b)
h.SetupHandler = NewSetupHandler(setupBackend)
h.TaskHandler = NewTaskHandler(b.UserResourceMappingService, b.LabelService, b.Logger, b.UserService)
h.TaskHandler.TaskService = b.TaskService
h.TaskHandler.AuthorizationService = b.AuthorizationService
h.TaskHandler.OrganizationService = b.OrganizationService
h.TaskHandler.UserResourceMappingService = b.UserResourceMappingService
taskBackend := NewTaskBackend(b)
h.TaskHandler = NewTaskHandler(taskBackend)
h.TaskHandler.UserResourceMappingService = internalURM
h.TelegrafHandler = NewTelegrafHandler(
b.Logger.With(zap.String("handler", "telegraf")),
b.UserResourceMappingService,
b.LabelService,
authorizer.NewTelegrafConfigService(b.TelegrafService, b.UserResourceMappingService),
b.UserService,
b.OrganizationService,
)
telegrafBackend := NewTelegrafBackend(b)
telegrafBackend.TelegrafService = authorizer.NewTelegrafConfigService(b.TelegrafService, b.UserResourceMappingService)
h.TelegrafHandler = NewTelegrafHandler(telegrafBackend)
h.WriteHandler = NewWriteHandler(b.PointsWriter)
h.WriteHandler.OrganizationService = b.OrganizationService
h.WriteHandler.BucketService = b.BucketService
h.WriteHandler.Logger = b.Logger.With(zap.String("handler", "write"))
writeBackend := NewWriteBackend(b)
h.WriteHandler = NewWriteHandler(writeBackend)
h.QueryHandler = NewFluxHandler()
h.QueryHandler.OrganizationService = b.OrganizationService
h.QueryHandler.Logger = b.Logger.With(zap.String("handler", "query"))
h.QueryHandler.ProxyQueryService = b.ProxyQueryService
fluxBackend := NewFluxBackend(b)
h.QueryHandler = NewFluxHandler(fluxBackend)
h.ProtoHandler = NewProtoHandler(NewProtoBackend(b))

View File

@ -16,6 +16,29 @@ import (
"github.com/julienschmidt/httprouter"
)
// AuthorizationBackend is all services and associated parameters required to construct
// the AuthorizationHandler.
type AuthorizationBackend struct {
Logger *zap.Logger
AuthorizationService platform.AuthorizationService
OrganizationService platform.OrganizationService
UserService platform.UserService
LookupService platform.LookupService
}
// NewAuthorizationBackend returns a new instance of AuthorizationBackend.
func NewAuthorizationBackend(b *APIBackend) *AuthorizationBackend {
return &AuthorizationBackend{
Logger: b.Logger.With(zap.String("handler", "authorization")),
AuthorizationService: b.AuthorizationService,
OrganizationService: b.OrganizationService,
UserService: b.UserService,
LookupService: b.LookupService,
}
}
// AuthorizationHandler represents an HTTP API handler for authorizations.
type AuthorizationHandler struct {
*httprouter.Router
@ -28,11 +51,15 @@ type AuthorizationHandler struct {
}
// NewAuthorizationHandler returns a new instance of AuthorizationHandler.
func NewAuthorizationHandler(userService platform.UserService) *AuthorizationHandler {
func NewAuthorizationHandler(b *AuthorizationBackend) *AuthorizationHandler {
h := &AuthorizationHandler{
Router: NewRouter(),
Logger: zap.NewNop(),
UserService: userService,
Router: NewRouter(),
Logger: b.Logger,
AuthorizationService: b.AuthorizationService,
OrganizationService: b.OrganizationService,
UserService: b.UserService,
LookupService: b.LookupService,
}
h.HandlerFunc("POST", "/api/v2/authorizations", h.handlePostAuthorization)
@ -161,25 +188,10 @@ func (h *AuthorizationHandler) handlePostAuthorization(w http.ResponseWriter, r
return
}
var user *platform.User
// allow the user id to be specified optionally, if it is not set
// we use the id from the authorizer
if req.UserID == nil {
u, err := getAuthorizedUser(r, h.UserService)
if err != nil {
EncodeError(ctx, platform.ErrUnableToCreateToken, w)
return
}
user = u
} else {
u, err := h.UserService.FindUserByID(ctx, *req.UserID)
if err != nil {
EncodeError(ctx, platform.ErrUnableToCreateToken, w)
return
}
user = u
user, err := getAuthorizedUser(r, h.UserService)
if err != nil {
EncodeError(ctx, platform.ErrUnableToCreateToken, w)
return
}
auth := req.toPlatform(user.ID)

View File

@ -10,6 +10,8 @@ import (
"net/http/httptest"
"testing"
"go.uber.org/zap"
platform "github.com/influxdata/influxdb"
pcontext "github.com/influxdata/influxdb/context"
"github.com/influxdata/influxdb/inmem"
@ -18,6 +20,18 @@ import (
"github.com/julienschmidt/httprouter"
)
// NewMockAuthorizationBackend returns a AuthorizationBackend with mock services.
func NewMockAuthorizationBackend() *AuthorizationBackend {
return &AuthorizationBackend{
Logger: zap.NewNop().With(zap.String("handler", "authorization")),
AuthorizationService: mock.NewAuthorizationService(),
OrganizationService: mock.NewOrganizationService(),
UserService: mock.NewUserService(),
LookupService: mock.NewLookupService(),
}
}
func TestService_handleGetAuthorizations(t *testing.T) {
type fields struct {
AuthorizationService platform.AuthorizationService
@ -156,10 +170,11 @@ func TestService_handleGetAuthorizations(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
h := NewAuthorizationHandler(mock.NewUserService())
h.AuthorizationService = tt.fields.AuthorizationService
h.UserService = tt.fields.UserService
h.OrganizationService = tt.fields.OrganizationService
authorizationBackend := NewMockAuthorizationBackend()
authorizationBackend.AuthorizationService = tt.fields.AuthorizationService
authorizationBackend.UserService = tt.fields.UserService
authorizationBackend.OrganizationService = tt.fields.OrganizationService
h := NewAuthorizationHandler(authorizationBackend)
r := httptest.NewRequest("GET", "http://any.url", nil)
@ -335,11 +350,12 @@ func TestService_handleGetAuthorization(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
h := NewAuthorizationHandler(mock.NewUserService())
h.AuthorizationService = tt.fields.AuthorizationService
h.UserService = tt.fields.UserService
h.OrganizationService = tt.fields.OrganizationService
h.LookupService = tt.fields.LookupService
authorizationBackend := NewMockAuthorizationBackend()
authorizationBackend.AuthorizationService = tt.fields.AuthorizationService
authorizationBackend.UserService = tt.fields.UserService
authorizationBackend.OrganizationService = tt.fields.OrganizationService
authorizationBackend.LookupService = tt.fields.LookupService
h := NewAuthorizationHandler(authorizationBackend)
r := httptest.NewRequest("GET", "http://any.url", nil)
@ -507,128 +523,16 @@ func TestService_handlePostAuthorization(t *testing.T) {
`,
},
},
{
name: "create a new authorization with user id set explicitly",
fields: fields{
AuthorizationService: &mock.AuthorizationService{
CreateAuthorizationFn: func(ctx context.Context, c *platform.Authorization) error {
c.ID = platformtesting.MustIDBase16("020f755c3c082000")
c.Token = "new-test-token"
return nil
},
},
UserService: &mock.UserService{
FindUserByIDFn: func(ctx context.Context, id platform.ID) (*platform.User, error) {
if !id.Valid() {
return nil, &platform.Error{
Code: platform.EInvalid,
Msg: "invalid user id",
}
}
return &platform.User{
ID: id,
Name: "u1",
}, nil
},
},
OrganizationService: &mock.OrganizationService{
FindOrganizationByIDF: func(ctx context.Context, id platform.ID) (*platform.Organization, error) {
if !id.Valid() {
return nil, &platform.Error{
Code: platform.EInvalid,
Msg: "invalid org ID",
}
}
return &platform.Organization{
ID: id,
Name: "o1",
}, nil
},
},
LookupService: &mock.LookupService{
NameFn: func(ctx context.Context, resource platform.ResourceType, id platform.ID) (string, error) {
switch resource {
case platform.BucketsResourceType:
return "b1", nil
case platform.OrgsResourceType:
return "o1", nil
}
return "", fmt.Errorf("bad resource type %s", resource)
},
},
},
args: args{
session: &platform.Authorization{
Token: "session-token",
ID: platformtesting.MustIDBase16("020f755c3c082000"),
UserID: platformtesting.MustIDBase16("aaaaaaaaaaaaaaaa"),
OrgID: platformtesting.MustIDBase16("020f755c3c083000"),
Description: "can write to authorization resource",
Permissions: []platform.Permission{
{
Action: platform.WriteAction,
Resource: platform.Resource{
Type: platform.AuthorizationsResourceType,
},
},
},
},
authorization: &platform.Authorization{
ID: platformtesting.MustIDBase16("020f755c3c082000"),
UserID: platformtesting.MustIDBase16("bbbbbbbbbbbbbbbb"),
OrgID: platformtesting.MustIDBase16("020f755c3c083000"),
Description: "only read dashboards sucka",
Permissions: []platform.Permission{
{
Action: platform.ReadAction,
Resource: platform.Resource{
Type: platform.DashboardsResourceType,
OrgID: platformtesting.IDPtr(platformtesting.MustIDBase16("020f755c3c083000")),
},
},
},
},
},
wants: wants{
statusCode: http.StatusCreated,
contentType: "application/json; charset=utf-8",
body: `
{
"links": {
"user": "/api/v2/users/bbbbbbbbbbbbbbbb",
"self": "/api/v2/authorizations/020f755c3c082000"
},
"id": "020f755c3c082000",
"user": "u1",
"userID": "bbbbbbbbbbbbbbbb",
"orgID": "020f755c3c083000",
"org": "o1",
"token": "new-test-token",
"status": "active",
"description": "only read dashboards sucka",
"permissions": [
{
"action": "read",
"resource": {
"type": "dashboards",
"orgID": "020f755c3c083000",
"org": "o1"
}
}
]
}
`,
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
h := NewAuthorizationHandler(tt.fields.UserService)
h.AuthorizationService = tt.fields.AuthorizationService
h.UserService = tt.fields.UserService
h.OrganizationService = tt.fields.OrganizationService
h.LookupService = tt.fields.LookupService
authorizationBackend := NewMockAuthorizationBackend()
authorizationBackend.AuthorizationService = tt.fields.AuthorizationService
authorizationBackend.UserService = tt.fields.UserService
authorizationBackend.OrganizationService = tt.fields.OrganizationService
authorizationBackend.LookupService = tt.fields.LookupService
h := NewAuthorizationHandler(authorizationBackend)
req, err := newPostAuthorizationRequest(tt.args.authorization)
if err != nil {
@ -734,10 +638,11 @@ func TestService_handleDeleteAuthorization(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
h := NewAuthorizationHandler(mock.NewUserService())
h.AuthorizationService = tt.fields.AuthorizationService
h.UserService = tt.fields.UserService
h.OrganizationService = tt.fields.OrganizationService
authorizationBackend := NewMockAuthorizationBackend()
authorizationBackend.AuthorizationService = tt.fields.AuthorizationService
authorizationBackend.UserService = tt.fields.UserService
authorizationBackend.OrganizationService = tt.fields.OrganizationService
h := NewAuthorizationHandler(authorizationBackend)
r := httptest.NewRequest("GET", "http://any.url", nil)
@ -815,11 +720,11 @@ func initAuthorizationService(f platformtesting.AuthorizationFields, t *testing.
token = a.Token
}
authZ := NewAuthorizationHandler(mock.NewUserService())
authZ.AuthorizationService = svc
authZ.UserService = svc
authZ.OrganizationService = svc
authZ.LookupService = &mock.LookupService{
authorizationBackend := NewMockAuthorizationBackend()
authorizationBackend.AuthorizationService = svc
authorizationBackend.UserService = svc
authorizationBackend.OrganizationService = svc
authorizationBackend.LookupService = &mock.LookupService{
NameFn: func(ctx context.Context, resource platform.ResourceType, id platform.ID) (string, error) {
switch resource {
case platform.BucketsResourceType:
@ -831,6 +736,7 @@ func initAuthorizationService(f platformtesting.AuthorizationFields, t *testing.
},
}
authZ := NewAuthorizationHandler(authorizationBackend)
authN := NewAuthenticationHandler()
authN.AuthorizationService = svc
authN.Handler = authZ

View File

@ -7,7 +7,6 @@ import (
"fmt"
"net/http"
"path"
"strconv"
"time"
platform "github.com/influxdata/influxdb"
@ -15,6 +14,33 @@ import (
"go.uber.org/zap"
)
// BucketBackend is all services and associated parameters required to construct
// the BucketHandler.
type BucketBackend struct {
Logger *zap.Logger
BucketService platform.BucketService
BucketOperationLogService platform.BucketOperationLogService
UserResourceMappingService platform.UserResourceMappingService
LabelService platform.LabelService
UserService platform.UserService
OrganizationService platform.OrganizationService
}
// NewBucketBackend returns a new instance of BucketBackend.
func NewBucketBackend(b *APIBackend) *BucketBackend {
return &BucketBackend{
Logger: b.Logger.With(zap.String("handler", "bucket")),
BucketService: b.BucketService,
BucketOperationLogService: b.BucketOperationLogService,
UserResourceMappingService: b.UserResourceMappingService,
LabelService: b.LabelService,
UserService: b.UserService,
OrganizationService: b.OrganizationService,
}
}
// BucketHandler represents an HTTP API handler for buckets.
type BucketHandler struct {
*httprouter.Router
@ -42,14 +68,17 @@ const (
)
// NewBucketHandler returns a new instance of BucketHandler.
func NewBucketHandler(mappingService platform.UserResourceMappingService, labelService platform.LabelService, userService platform.UserService) *BucketHandler {
func NewBucketHandler(b *BucketBackend) *BucketHandler {
h := &BucketHandler{
Router: NewRouter(),
Logger: zap.NewNop(),
Logger: b.Logger,
UserResourceMappingService: mappingService,
LabelService: labelService,
UserService: userService,
BucketService: b.BucketService,
BucketOperationLogService: b.BucketOperationLogService,
UserResourceMappingService: b.UserResourceMappingService,
LabelService: b.LabelService,
UserService: b.UserService,
OrganizationService: b.OrganizationService,
}
h.HandlerFunc("POST", bucketsPath, h.handlePostBucket)
@ -59,17 +88,36 @@ func NewBucketHandler(mappingService platform.UserResourceMappingService, labelS
h.HandlerFunc("PATCH", bucketsIDPath, h.handlePatchBucket)
h.HandlerFunc("DELETE", bucketsIDPath, h.handleDeleteBucket)
h.HandlerFunc("POST", bucketsIDMembersPath, newPostMemberHandler(h.UserResourceMappingService, h.UserService, platform.BucketsResourceType, platform.Member))
h.HandlerFunc("GET", bucketsIDMembersPath, newGetMembersHandler(h.UserResourceMappingService, h.UserService, platform.BucketsResourceType, platform.Member))
h.HandlerFunc("DELETE", bucketsIDMembersIDPath, newDeleteMemberHandler(h.UserResourceMappingService, platform.Member))
memberBackend := MemberBackend{
Logger: b.Logger.With(zap.String("handler", "member")),
ResourceType: platform.BucketsResourceType,
UserType: platform.Member,
UserResourceMappingService: b.UserResourceMappingService,
UserService: b.UserService,
}
h.HandlerFunc("POST", bucketsIDMembersPath, newPostMemberHandler(memberBackend))
h.HandlerFunc("GET", bucketsIDMembersPath, newGetMembersHandler(memberBackend))
h.HandlerFunc("DELETE", bucketsIDMembersIDPath, newDeleteMemberHandler(memberBackend))
h.HandlerFunc("POST", bucketsIDOwnersPath, newPostMemberHandler(h.UserResourceMappingService, h.UserService, platform.BucketsResourceType, platform.Owner))
h.HandlerFunc("GET", bucketsIDOwnersPath, newGetMembersHandler(h.UserResourceMappingService, h.UserService, platform.BucketsResourceType, platform.Owner))
h.HandlerFunc("DELETE", bucketsIDOwnersIDPath, newDeleteMemberHandler(h.UserResourceMappingService, platform.Owner))
ownerBackend := MemberBackend{
Logger: b.Logger.With(zap.String("handler", "member")),
ResourceType: platform.BucketsResourceType,
UserType: platform.Owner,
UserResourceMappingService: b.UserResourceMappingService,
UserService: b.UserService,
}
h.HandlerFunc("POST", bucketsIDOwnersPath, newPostMemberHandler(ownerBackend))
h.HandlerFunc("GET", bucketsIDOwnersPath, newGetMembersHandler(ownerBackend))
h.HandlerFunc("DELETE", bucketsIDOwnersIDPath, newDeleteMemberHandler(ownerBackend))
h.HandlerFunc("GET", bucketsIDLabelsPath, newGetLabelsHandler(h.LabelService))
h.HandlerFunc("POST", bucketsIDLabelsPath, newPostLabelHandler(h.LabelService))
h.HandlerFunc("DELETE", bucketsIDLabelsIDPath, newDeleteLabelHandler(h.LabelService))
labelBackend := &LabelBackend{
Logger: b.Logger.With(zap.String("handler", "label")),
LabelService: b.LabelService,
}
h.HandlerFunc("GET", bucketsIDLabelsPath, newGetLabelsHandler(labelBackend))
h.HandlerFunc("POST", bucketsIDLabelsPath, newPostLabelHandler(labelBackend))
h.HandlerFunc("DELETE", bucketsIDLabelsIDPath, newDeleteLabelHandler(labelBackend))
h.HandlerFunc("PATCH", bucketsIDLabelsIDPath, newPatchLabelHandler(labelBackend))
return h
}
@ -810,29 +858,14 @@ func decodeGetBucketLogRequest(ctx context.Context, r *http.Request) (*getBucket
return nil, err
}
opts := platform.DefaultOperationLogFindOptions
qp := r.URL.Query()
if v := qp.Get("desc"); v == "false" {
opts.Descending = false
}
if v := qp.Get("limit"); v != "" {
i, err := strconv.Atoi(v)
if err != nil {
return nil, err
}
opts.Limit = i
}
if v := qp.Get("offset"); v != "" {
i, err := strconv.Atoi(v)
if err != nil {
return nil, err
}
opts.Offset = i
opts, err := decodeFindOptions(ctx, r)
if err != nil {
return nil, err
}
return &getBucketLogRequest{
BucketID: i,
opts: opts,
opts: *opts,
}, nil
}

View File

@ -16,8 +16,23 @@ import (
"github.com/influxdata/influxdb/mock"
platformtesting "github.com/influxdata/influxdb/testing"
"github.com/julienschmidt/httprouter"
"go.uber.org/zap"
)
// NewMockBucketBackend returns a BucketBackend with mock services.
func NewMockBucketBackend() *BucketBackend {
return &BucketBackend{
Logger: zap.NewNop().With(zap.String("handler", "bucket")),
BucketService: mock.NewBucketService(),
BucketOperationLogService: mock.NewBucketOperationLogService(),
UserResourceMappingService: mock.NewUserResourceMappingService(),
LabelService: mock.NewLabelService(),
UserService: mock.NewUserService(),
OrganizationService: mock.NewOrganizationService(),
}
}
func TestService_handleGetBuckets(t *testing.T) {
type fields struct {
BucketService platform.BucketService
@ -167,11 +182,10 @@ func TestService_handleGetBuckets(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
mappingService := mock.NewUserResourceMappingService()
labelService := tt.fields.LabelService
userService := mock.NewUserService()
h := NewBucketHandler(mappingService, labelService, userService)
h.BucketService = tt.fields.BucketService
bucketBackend := NewMockBucketBackend()
bucketBackend.BucketService = tt.fields.BucketService
bucketBackend.LabelService = tt.fields.LabelService
h := NewBucketHandler(bucketBackend)
r := httptest.NewRequest("GET", "http://any.url", nil)
@ -287,11 +301,9 @@ func TestService_handleGetBucket(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
mappingService := mock.NewUserResourceMappingService()
labelService := mock.NewLabelService()
userService := mock.NewUserService()
h := NewBucketHandler(mappingService, labelService, userService)
h.BucketService = tt.fields.BucketService
bucketBackend := NewMockBucketBackend()
bucketBackend.BucketService = tt.fields.BucketService
h := NewBucketHandler(bucketBackend)
r := httptest.NewRequest("GET", "http://any.url", nil)
@ -392,12 +404,10 @@ func TestService_handlePostBucket(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
mappingService := mock.NewUserResourceMappingService()
labelService := mock.NewLabelService()
userService := mock.NewUserService()
h := NewBucketHandler(mappingService, labelService, userService)
h.BucketService = tt.fields.BucketService
h.OrganizationService = tt.fields.OrganizationService
bucketBackend := NewMockBucketBackend()
bucketBackend.BucketService = tt.fields.BucketService
bucketBackend.OrganizationService = tt.fields.OrganizationService
h := NewBucketHandler(bucketBackend)
b, err := json.Marshal(newBucket(tt.args.bucket))
if err != nil {
@ -488,11 +498,9 @@ func TestService_handleDeleteBucket(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
mappingService := mock.NewUserResourceMappingService()
labelService := mock.NewLabelService()
userService := mock.NewUserService()
h := NewBucketHandler(mappingService, labelService, userService)
h.BucketService = tt.fields.BucketService
bucketBackend := NewMockBucketBackend()
bucketBackend.BucketService = tt.fields.BucketService
h := NewBucketHandler(bucketBackend)
r := httptest.NewRequest("GET", "http://any.url", nil)
@ -770,11 +778,9 @@ func TestService_handlePatchBucket(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
mappingService := mock.NewUserResourceMappingService()
labelService := mock.NewLabelService()
userService := mock.NewUserService()
h := NewBucketHandler(mappingService, labelService, userService)
h.BucketService = tt.fields.BucketService
bucketBackend := NewMockBucketBackend()
bucketBackend.BucketService = tt.fields.BucketService
h := NewBucketHandler(bucketBackend)
upd := platform.BucketUpdate{}
if tt.args.name != "" {
@ -881,7 +887,9 @@ func TestService_handlePostBucketMember(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
h := NewBucketHandler(mock.NewUserResourceMappingService(), mock.NewLabelService(), tt.fields.UserService)
bucketBackend := NewMockBucketBackend()
bucketBackend.UserService = tt.fields.UserService
h := NewBucketHandler(bucketBackend)
b, err := json.Marshal(tt.args.user)
if err != nil {
@ -969,7 +977,9 @@ func TestService_handlePostBucketOwner(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
h := NewBucketHandler(mock.NewUserResourceMappingService(), mock.NewLabelService(), tt.fields.UserService)
bucketBackend := NewMockBucketBackend()
bucketBackend.UserService = tt.fields.UserService
h := NewBucketHandler(bucketBackend)
b, err := json.Marshal(tt.args.user)
if err != nil {
@ -1015,13 +1025,10 @@ func initBucketService(f platformtesting.BucketFields, t *testing.T) (platform.B
}
}
mappingService := mock.NewUserResourceMappingService()
labelService := mock.NewLabelService()
userService := mock.NewUserService()
handler := NewBucketHandler(mappingService, labelService, userService)
handler.BucketService = svc
handler.OrganizationService = svc
bucketBackend := NewMockBucketBackend()
bucketBackend.BucketService = svc
bucketBackend.OrganizationService = svc
handler := NewBucketHandler(bucketBackend)
server := httptest.NewServer(handler)
client := BucketService{
Addr: server.URL,

File diff suppressed because it is too large Load Diff

View File

@ -8,13 +8,36 @@ import (
"io/ioutil"
"net/http"
"path"
"strconv"
platform "github.com/influxdata/influxdb"
"github.com/julienschmidt/httprouter"
"go.uber.org/zap"
)
// DashboardBackend is all services and associated parameters required to construct
// the DashboardHandler.
type DashboardBackend struct {
Logger *zap.Logger
DashboardService platform.DashboardService
DashboardOperationLogService platform.DashboardOperationLogService
UserResourceMappingService platform.UserResourceMappingService
LabelService platform.LabelService
UserService platform.UserService
}
func NewDashboardBackend(b *APIBackend) *DashboardBackend {
return &DashboardBackend{
Logger: b.Logger.With(zap.String("handler", "dashboard")),
DashboardService: b.DashboardService,
DashboardOperationLogService: b.DashboardOperationLogService,
UserResourceMappingService: b.UserResourceMappingService,
LabelService: b.LabelService,
UserService: b.UserService,
}
}
// DashboardHandler is the handler for the dashboard service
type DashboardHandler struct {
*httprouter.Router
@ -44,14 +67,16 @@ const (
)
// NewDashboardHandler returns a new instance of DashboardHandler.
func NewDashboardHandler(mappingService platform.UserResourceMappingService, labelService platform.LabelService, userService platform.UserService) *DashboardHandler {
func NewDashboardHandler(b *DashboardBackend) *DashboardHandler {
h := &DashboardHandler{
Router: NewRouter(),
Logger: zap.NewNop(),
Logger: b.Logger,
UserResourceMappingService: mappingService,
LabelService: labelService,
UserService: userService,
DashboardService: b.DashboardService,
DashboardOperationLogService: b.DashboardOperationLogService,
UserResourceMappingService: b.UserResourceMappingService,
LabelService: b.LabelService,
UserService: b.UserService,
}
h.HandlerFunc("POST", dashboardsPath, h.handlePostDashboard)
@ -69,17 +94,36 @@ func NewDashboardHandler(mappingService platform.UserResourceMappingService, lab
h.HandlerFunc("GET", dashboardsIDCellsIDViewPath, h.handleGetDashboardCellView)
h.HandlerFunc("PATCH", dashboardsIDCellsIDViewPath, h.handlePatchDashboardCellView)
h.HandlerFunc("POST", dashboardsIDMembersPath, newPostMemberHandler(h.UserResourceMappingService, h.UserService, platform.DashboardsResourceType, platform.Member))
h.HandlerFunc("GET", dashboardsIDMembersPath, newGetMembersHandler(h.UserResourceMappingService, h.UserService, platform.DashboardsResourceType, platform.Member))
h.HandlerFunc("DELETE", dashboardsIDMembersIDPath, newDeleteMemberHandler(h.UserResourceMappingService, platform.Member))
memberBackend := MemberBackend{
Logger: b.Logger.With(zap.String("handler", "member")),
ResourceType: platform.DashboardsResourceType,
UserType: platform.Member,
UserResourceMappingService: b.UserResourceMappingService,
UserService: b.UserService,
}
h.HandlerFunc("POST", dashboardsIDMembersPath, newPostMemberHandler(memberBackend))
h.HandlerFunc("GET", dashboardsIDMembersPath, newGetMembersHandler(memberBackend))
h.HandlerFunc("DELETE", dashboardsIDMembersIDPath, newDeleteMemberHandler(memberBackend))
h.HandlerFunc("POST", dashboardsIDOwnersPath, newPostMemberHandler(h.UserResourceMappingService, h.UserService, platform.DashboardsResourceType, platform.Owner))
h.HandlerFunc("GET", dashboardsIDOwnersPath, newGetMembersHandler(h.UserResourceMappingService, h.UserService, platform.DashboardsResourceType, platform.Owner))
h.HandlerFunc("DELETE", dashboardsIDOwnersIDPath, newDeleteMemberHandler(h.UserResourceMappingService, platform.Owner))
ownerBackend := MemberBackend{
Logger: b.Logger.With(zap.String("handler", "member")),
ResourceType: platform.DashboardsResourceType,
UserType: platform.Owner,
UserResourceMappingService: b.UserResourceMappingService,
UserService: b.UserService,
}
h.HandlerFunc("POST", dashboardsIDOwnersPath, newPostMemberHandler(ownerBackend))
h.HandlerFunc("GET", dashboardsIDOwnersPath, newGetMembersHandler(ownerBackend))
h.HandlerFunc("DELETE", dashboardsIDOwnersIDPath, newDeleteMemberHandler(ownerBackend))
h.HandlerFunc("GET", dashboardsIDLabelsPath, newGetLabelsHandler(h.LabelService))
h.HandlerFunc("POST", dashboardsIDLabelsPath, newPostLabelHandler(h.LabelService))
h.HandlerFunc("DELETE", dashboardsIDLabelsIDPath, newDeleteLabelHandler(h.LabelService))
labelBackend := &LabelBackend{
Logger: b.Logger.With(zap.String("handler", "label")),
LabelService: b.LabelService,
}
h.HandlerFunc("GET", dashboardsIDLabelsPath, newGetLabelsHandler(labelBackend))
h.HandlerFunc("POST", dashboardsIDLabelsPath, newPostLabelHandler(labelBackend))
h.HandlerFunc("DELETE", dashboardsIDLabelsIDPath, newDeleteLabelHandler(labelBackend))
h.HandlerFunc("PATCH", dashboardsIDLabelsIDPath, newPatchLabelHandler(labelBackend))
return h
}
@ -474,29 +518,14 @@ func decodeGetDashboardLogRequest(ctx context.Context, r *http.Request) (*getDas
return nil, err
}
opts := platform.DefaultOperationLogFindOptions
qp := r.URL.Query()
if v := qp.Get("desc"); v == "false" {
opts.Descending = false
}
if v := qp.Get("limit"); v != "" {
i, err := strconv.Atoi(v)
if err != nil {
return nil, err
}
opts.Limit = i
}
if v := qp.Get("offset"); v != "" {
i, err := strconv.Atoi(v)
if err != nil {
return nil, err
}
opts.Offset = i
opts, err := decodeFindOptions(ctx, r)
if err != nil {
return nil, err
}
return &getDashboardLogRequest{
DashboardID: i,
opts: opts,
opts: *opts,
}, nil
}

View File

@ -5,6 +5,7 @@ import (
"context"
"encoding/json"
"fmt"
"go.uber.org/zap"
"io/ioutil"
"net/http"
"net/http/httptest"
@ -18,6 +19,19 @@ import (
"github.com/julienschmidt/httprouter"
)
// NewMockDashboardBackend returns a DashboardBackend with mock services.
func NewMockDashboardBackend() *DashboardBackend {
return &DashboardBackend{
Logger: zap.NewNop().With(zap.String("handler", "dashboard")),
DashboardService: mock.NewDashboardService(),
DashboardOperationLogService: mock.NewDashboardOperationLogService(),
UserResourceMappingService: mock.NewUserResourceMappingService(),
LabelService: mock.NewLabelService(),
UserService: mock.NewUserService(),
}
}
func TestService_handleGetDashboards(t *testing.T) {
type fields struct {
DashboardService platform.DashboardService
@ -247,7 +261,7 @@ func TestService_handleGetDashboards(t *testing.T) {
},
args: args{
map[string][]string{
"orgID": []string{"0000000000000001"},
"orgID": {"0000000000000001"},
},
},
wants: wants{
@ -309,11 +323,10 @@ func TestService_handleGetDashboards(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
mappingService := mock.NewUserResourceMappingService()
labelService := tt.fields.LabelService
userService := mock.NewUserService()
h := NewDashboardHandler(mappingService, labelService, userService)
h.DashboardService = tt.fields.DashboardService
dashboardBackend := NewMockDashboardBackend()
dashboardBackend.LabelService = tt.fields.LabelService
dashboardBackend.DashboardService = tt.fields.DashboardService
h := NewDashboardHandler(dashboardBackend)
r := httptest.NewRequest("GET", "http://any.url", nil)
@ -461,11 +474,9 @@ func TestService_handleGetDashboard(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
mappingService := mock.NewUserResourceMappingService()
labelService := mock.NewLabelService()
userService := mock.NewUserService()
h := NewDashboardHandler(mappingService, labelService, userService)
h.DashboardService = tt.fields.DashboardService
dashboardBackend := NewMockDashboardBackend()
dashboardBackend.DashboardService = tt.fields.DashboardService
h := NewDashboardHandler(dashboardBackend)
r := httptest.NewRequest("GET", "http://any.url", nil)
@ -594,11 +605,9 @@ func TestService_handlePostDashboard(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
mappingService := mock.NewUserResourceMappingService()
labelService := mock.NewLabelService()
userService := mock.NewUserService()
h := NewDashboardHandler(mappingService, labelService, userService)
h.DashboardService = tt.fields.DashboardService
dashboardBackend := NewMockDashboardBackend()
dashboardBackend.DashboardService = tt.fields.DashboardService
h := NewDashboardHandler(dashboardBackend)
b, err := json.Marshal(tt.args.dashboard)
if err != nil {
@ -689,11 +698,9 @@ func TestService_handleDeleteDashboard(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
mappingService := mock.NewUserResourceMappingService()
labelService := mock.NewLabelService()
userService := mock.NewUserService()
h := NewDashboardHandler(mappingService, labelService, userService)
h.DashboardService = tt.fields.DashboardService
dashboardBackend := NewMockDashboardBackend()
dashboardBackend.DashboardService = tt.fields.DashboardService
h := NewDashboardHandler(dashboardBackend)
r := httptest.NewRequest("GET", "http://any.url", nil)
@ -868,11 +875,9 @@ func TestService_handlePatchDashboard(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
mappingService := mock.NewUserResourceMappingService()
labelService := mock.NewLabelService()
userService := mock.NewUserService()
h := NewDashboardHandler(mappingService, labelService, userService)
h.DashboardService = tt.fields.DashboardService
dashboardBackend := NewMockDashboardBackend()
dashboardBackend.DashboardService = tt.fields.DashboardService
h := NewDashboardHandler(dashboardBackend)
upd := platform.DashboardUpdate{}
if tt.args.name != "" {
@ -977,11 +982,9 @@ func TestService_handlePostDashboardCell(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
mappingService := mock.NewUserResourceMappingService()
labelService := mock.NewLabelService()
userService := mock.NewUserService()
h := NewDashboardHandler(mappingService, labelService, userService)
h.DashboardService = tt.fields.DashboardService
dashboardBackend := NewMockDashboardBackend()
dashboardBackend.DashboardService = tt.fields.DashboardService
h := NewDashboardHandler(dashboardBackend)
b, err := json.Marshal(tt.args.cell)
if err != nil {
@ -1062,11 +1065,9 @@ func TestService_handleDeleteDashboardCell(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
mappingService := mock.NewUserResourceMappingService()
labelService := mock.NewLabelService()
userService := mock.NewUserService()
h := NewDashboardHandler(mappingService, labelService, userService)
h.DashboardService = tt.fields.DashboardService
dashboardBackend := NewMockDashboardBackend()
dashboardBackend.DashboardService = tt.fields.DashboardService
h := NewDashboardHandler(dashboardBackend)
r := httptest.NewRequest("GET", "http://any.url", nil)
@ -1174,11 +1175,9 @@ func TestService_handlePatchDashboardCell(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
mappingService := mock.NewUserResourceMappingService()
labelService := mock.NewLabelService()
userService := mock.NewUserService()
h := NewDashboardHandler(mappingService, labelService, userService)
h.DashboardService = tt.fields.DashboardService
dashboardBackend := NewMockDashboardBackend()
dashboardBackend.DashboardService = tt.fields.DashboardService
h := NewDashboardHandler(dashboardBackend)
upd := platform.CellUpdate{}
if tt.args.x != 0 {
@ -1271,11 +1270,9 @@ func initDashboardService(f platformtesting.DashboardFields, t *testing.T) (plat
}
}
mappingService := mock.NewUserResourceMappingService()
labelService := mock.NewLabelService()
userService := mock.NewUserService()
h := NewDashboardHandler(mappingService, labelService, userService)
h.DashboardService = svc
dashboardBackend := NewMockDashboardBackend()
dashboardBackend.DashboardService = svc
h := NewDashboardHandler(dashboardBackend)
server := httptest.NewServer(h)
client := DashboardService{
Addr: server.URL,

View File

@ -5,12 +5,12 @@ import (
"context"
"encoding/json"
"fmt"
"go.uber.org/zap"
"net/http"
"path"
platform "github.com/influxdata/influxdb"
"github.com/julienschmidt/httprouter"
"go.uber.org/zap"
)
// LabelHandler represents an HTTP API handler for labels
@ -293,8 +293,15 @@ func newLabelsResponse(ls []*platform.Label) *labelsResponse {
}
}
// LabelBackend is all services and associated parameters required to construct
// label handlers.
type LabelBackend struct {
Logger *zap.Logger
LabelService platform.LabelService
}
// newGetLabelsHandler returns a handler func for a GET to /labels endpoints
func newGetLabelsHandler(s platform.LabelService) http.HandlerFunc {
func newGetLabelsHandler(b *LabelBackend) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
@ -304,17 +311,14 @@ func newGetLabelsHandler(s platform.LabelService) http.HandlerFunc {
return
}
labels, err := s.FindResourceLabels(ctx, req.filter)
labels, err := b.LabelService.FindResourceLabels(ctx, req.filter)
if err != nil {
EncodeError(ctx, err, w)
return
}
if err := encodeResponse(ctx, w, http.StatusOK, newLabelsResponse(labels)); err != nil {
// TODO: this can potentially result in calling w.WriteHeader multiple times, we need to pass a logger in here
// some how. This isn't as simple as simply passing in a logger to this function since the time that this function
// is called is distinct from the time that a potential logger is set.
EncodeError(ctx, err, w)
logEncodingError(b.Logger, r, err)
return
}
}
@ -346,7 +350,7 @@ func decodeGetLabelsRequest(ctx context.Context, r *http.Request) (*getLabelsReq
}
// newPostLabelHandler returns a handler func for a POST to /labels endpoints
func newPostLabelHandler(s platform.LabelService) http.HandlerFunc {
func newPostLabelHandler(b *LabelBackend) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
@ -361,22 +365,19 @@ func newPostLabelHandler(s platform.LabelService) http.HandlerFunc {
return
}
if err := s.CreateLabelMapping(ctx, &req.Mapping); err != nil {
if err := b.LabelService.CreateLabelMapping(ctx, &req.Mapping); err != nil {
EncodeError(ctx, err, w)
return
}
label, err := s.FindLabelByID(ctx, req.Mapping.LabelID)
label, err := b.LabelService.FindLabelByID(ctx, req.Mapping.LabelID)
if err != nil {
EncodeError(ctx, err, w)
return
}
if err := encodeResponse(ctx, w, http.StatusCreated, newLabelResponse(label)); err != nil {
// TODO: this can potentially result in calling w.WriteHeader multiple times, we need to pass a logger in here
// some how. This isn't as simple as simply passing in a logger to this function since the time that this function
// is called is distinct from the time that a potential logger is set.
EncodeError(ctx, err, w)
logEncodingError(b.Logger, r, err)
return
}
}
@ -419,8 +420,32 @@ func decodePostLabelMappingRequest(ctx context.Context, r *http.Request) (*postL
return req, nil
}
// newPatchLabelHandler returns a handler func for a PATCH to /labels endpoints
func newPatchLabelHandler(b *LabelBackend) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
req, err := decodePatchLabelRequest(ctx, r)
if err != nil {
EncodeError(ctx, err, w)
return
}
label, err := b.LabelService.UpdateLabel(ctx, req.LabelID, req.Update)
if err != nil {
EncodeError(ctx, err, w)
return
}
if err := encodeResponse(ctx, w, http.StatusOK, newLabelResponse(label)); err != nil {
logEncodingError(b.Logger, r, err)
return
}
}
}
// newDeleteLabelHandler returns a handler func for a DELETE to /labels endpoints
func newDeleteLabelHandler(s platform.LabelService) http.HandlerFunc {
func newDeleteLabelHandler(b *LabelBackend) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
@ -435,7 +460,7 @@ func newDeleteLabelHandler(s platform.LabelService) http.HandlerFunc {
ResourceID: req.ResourceID,
}
if err := s.DeleteLabelMapping(ctx, mapping); err != nil {
if err := b.LabelService.DeleteLabelMapping(ctx, mapping); err != nil {
EncodeError(ctx, err, w)
return
}

View File

@ -17,6 +17,20 @@ const (
macroPath = "/api/v2/macros"
)
// MacroBackend is all services and associated parameters required to construct
// the MacroHandler.
type MacroBackend struct {
Logger *zap.Logger
MacroService platform.MacroService
}
func NewMacroBackend(b *APIBackend) *MacroBackend {
return &MacroBackend{
Logger: b.Logger.With(zap.String("handler", "macro")),
MacroService: b.MacroService,
}
}
// MacroHandler is the handler for the macro service
type MacroHandler struct {
*httprouter.Router
@ -27,10 +41,12 @@ type MacroHandler struct {
}
// NewMacroHandler creates a new MacroHandler
func NewMacroHandler() *MacroHandler {
func NewMacroHandler(b *MacroBackend) *MacroHandler {
h := &MacroHandler{
Router: NewRouter(),
Logger: zap.NewNop(),
Logger: b.Logger,
MacroService: b.MacroService,
}
entityPath := fmt.Sprintf("%s/:id", macroPath)

View File

@ -4,6 +4,7 @@ import (
"bytes"
"context"
"fmt"
"go.uber.org/zap"
"io/ioutil"
"net/http"
"net/http/httptest"
@ -16,6 +17,14 @@ import (
"github.com/julienschmidt/httprouter"
)
// NewMockMacroBackend returns a MacroBackend with mock services.
func NewMockMacroBackend() *MacroBackend {
return &MacroBackend{
Logger: zap.NewNop().With(zap.String("handler", "macro")),
MacroService: mock.NewMacroService(),
}
}
func TestMacroService_handleGetMacros(t *testing.T) {
type fields struct {
MacroService platform.MacroService
@ -82,7 +91,7 @@ func TestMacroService_handleGetMacros(t *testing.T) {
},
args: args{
map[string][]string{
"limit": []string{"1"},
"limit": {"1"},
},
},
wants: wants{
@ -113,7 +122,7 @@ func TestMacroService_handleGetMacros(t *testing.T) {
},
args: args{
map[string][]string{
"orgID": []string{"0000000000000001"},
"orgID": {"0000000000000001"},
},
},
wants: wants{
@ -126,8 +135,9 @@ func TestMacroService_handleGetMacros(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
h := NewMacroHandler()
h.MacroService = tt.fields.MacroService
macroBackend := NewMockMacroBackend()
macroBackend.MacroService = tt.fields.MacroService
h := NewMacroHandler(macroBackend)
r := httptest.NewRequest("GET", "http://howdy.tld", nil)
qp := r.URL.Query()
@ -249,8 +259,9 @@ func TestMacroService_handleGetMacro(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
h := NewMacroHandler()
h.MacroService = tt.fields.MacroService
macroBackend := NewMockMacroBackend()
macroBackend.MacroService = tt.fields.MacroService
h := NewMacroHandler(macroBackend)
r := httptest.NewRequest("GET", "http://howdy.tld", nil)
r = r.WithContext(context.WithValue(
context.TODO(),
@ -381,8 +392,9 @@ func TestMacroService_handlePostMacro(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
h := NewMacroHandler()
h.MacroService = tt.fields.MacroService
macroBackend := NewMockMacroBackend()
macroBackend.MacroService = tt.fields.MacroService
h := NewMacroHandler(macroBackend)
r := httptest.NewRequest("GET", "http://howdy.tld", bytes.NewReader([]byte(tt.args.macro)))
w := httptest.NewRecorder()
@ -474,8 +486,9 @@ func TestMacroService_handlePatchMacro(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
h := NewMacroHandler()
h.MacroService = tt.fields.MacroService
macroBackend := NewMockMacroBackend()
macroBackend.MacroService = tt.fields.MacroService
h := NewMacroHandler(macroBackend)
r := httptest.NewRequest("GET", "http://howdy.tld", bytes.NewReader([]byte(tt.args.update)))
r = r.WithContext(context.WithValue(
context.TODO(),
@ -564,8 +577,9 @@ func TestMacroService_handleDeleteMacro(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
h := NewMacroHandler()
h.MacroService = tt.fields.MacroService
macroBackend := NewMockMacroBackend()
macroBackend.MacroService = tt.fields.MacroService
h := NewMacroHandler(macroBackend)
r := httptest.NewRequest("GET", "http://howdy.tld", nil)
r = r.WithContext(context.WithValue(
context.TODO(),
@ -601,8 +615,9 @@ func initMacroService(f platformtesting.MacroFields, t *testing.T) (platform.Mac
}
}
handler := NewMacroHandler()
handler.MacroService = svc
macroBackend := NewMockMacroBackend()
macroBackend.MacroService = svc
handler := NewMacroHandler(macroBackend)
server := httptest.NewServer(handler)
client := MacroService{
Addr: server.URL,

View File

@ -11,6 +11,21 @@ import (
"go.uber.org/zap"
)
// SetupBackend is all services and associated parameters required to construct
// the SetupHandler.
type SetupBackend struct {
Logger *zap.Logger
OnboardingService platform.OnboardingService
}
// NewSetupBackend returns a new instance of SetupBackend.
func NewSetupBackend(b *APIBackend) *SetupBackend {
return &SetupBackend{
Logger: b.Logger.With(zap.String("handler", "setup")),
OnboardingService: b.OnboardingService,
}
}
// SetupHandler represents an HTTP API handler for onboarding setup.
type SetupHandler struct {
*httprouter.Router
@ -25,10 +40,11 @@ const (
)
// NewSetupHandler returns a new instance of SetupHandler.
func NewSetupHandler() *SetupHandler {
func NewSetupHandler(b *SetupBackend) *SetupHandler {
h := &SetupHandler{
Router: NewRouter(),
Logger: zap.NewNop(),
Router: NewRouter(),
Logger: b.Logger,
OnboardingService: b.OnboardingService,
}
h.HandlerFunc("POST", setupPath, h.handlePostSetup)
h.HandlerFunc("GET", setupPath, h.isOnboarding)

View File

@ -5,11 +5,22 @@ import (
"net/http/httptest"
"testing"
"github.com/influxdata/influxdb/mock"
"go.uber.org/zap"
platform "github.com/influxdata/influxdb"
"github.com/influxdata/influxdb/inmem"
platformtesting "github.com/influxdata/influxdb/testing"
)
// NewMockSetupBackend returns a SetupBackend with mock services.
func NewMockSetupBackend() *SetupBackend {
return &SetupBackend{
Logger: zap.NewNop().With(zap.String("handler", "scraper")),
OnboardingService: mock.NewOnboardingService(),
}
}
func initOnboardingService(f platformtesting.OnboardingFields, t *testing.T) (platform.OnboardingService, func()) {
t.Helper()
svc := inmem.NewService()
@ -21,8 +32,9 @@ func initOnboardingService(f platformtesting.OnboardingFields, t *testing.T) (pl
t.Fatalf("failed to set new onboarding finished: %v", err)
}
handler := NewSetupHandler()
handler.OnboardingService = svc
setupBackend := NewMockSetupBackend()
setupBackend.OnboardingService = svc
handler := NewSetupHandler(setupBackend)
server := httptest.NewServer(handler)
client := struct {
*SetupService

View File

@ -5,15 +5,39 @@ import (
"context"
"encoding/json"
"fmt"
"net/http"
"path"
"strconv"
platform "github.com/influxdata/influxdb"
"github.com/julienschmidt/httprouter"
"go.uber.org/zap"
"net/http"
"path"
)
// OrgBackend is all services and associated parameters required to construct
// the OrgHandler.
type OrgBackend struct {
Logger *zap.Logger
OrganizationService platform.OrganizationService
OrganizationOperationLogService platform.OrganizationOperationLogService
UserResourceMappingService platform.UserResourceMappingService
SecretService platform.SecretService
LabelService platform.LabelService
UserService platform.UserService
}
func NewOrgBackend(b *APIBackend) *OrgBackend {
return &OrgBackend{
Logger: b.Logger.With(zap.String("handler", "org")),
OrganizationService: b.OrganizationService,
OrganizationOperationLogService: b.OrganizationOperationLogService,
UserResourceMappingService: b.UserResourceMappingService,
SecretService: b.SecretService,
LabelService: b.LabelService,
UserService: b.UserService,
}
}
// OrgHandler represents an HTTP API handler for orgs.
type OrgHandler struct {
*httprouter.Router
@ -44,15 +68,17 @@ const (
)
// NewOrgHandler returns a new instance of OrgHandler.
func NewOrgHandler(mappingService platform.UserResourceMappingService,
labelService platform.LabelService, userService platform.UserService) *OrgHandler {
func NewOrgHandler(b *OrgBackend) *OrgHandler {
h := &OrgHandler{
Router: NewRouter(),
Logger: zap.NewNop(),
UserResourceMappingService: mappingService,
LabelService: labelService,
UserService: userService,
OrganizationService: b.OrganizationService,
OrganizationOperationLogService: b.OrganizationOperationLogService,
UserResourceMappingService: b.UserResourceMappingService,
SecretService: b.SecretService,
LabelService: b.LabelService,
UserService: b.UserService,
}
h.HandlerFunc("POST", organizationsPath, h.handlePostOrg)
@ -62,22 +88,41 @@ func NewOrgHandler(mappingService platform.UserResourceMappingService,
h.HandlerFunc("PATCH", organizationsIDPath, h.handlePatchOrg)
h.HandlerFunc("DELETE", organizationsIDPath, h.handleDeleteOrg)
h.HandlerFunc("POST", organizationsIDMembersPath, newPostMemberHandler(h.UserResourceMappingService, h.UserService, platform.OrgsResourceType, platform.Member))
h.HandlerFunc("GET", organizationsIDMembersPath, newGetMembersHandler(h.UserResourceMappingService, h.UserService, platform.OrgsResourceType, platform.Member))
h.HandlerFunc("DELETE", organizationsIDMembersIDPath, newDeleteMemberHandler(h.UserResourceMappingService, platform.Member))
memberBackend := MemberBackend{
Logger: b.Logger.With(zap.String("handler", "member")),
ResourceType: platform.OrgsResourceType,
UserType: platform.Member,
UserResourceMappingService: b.UserResourceMappingService,
UserService: b.UserService,
}
h.HandlerFunc("POST", organizationsIDMembersPath, newPostMemberHandler(memberBackend))
h.HandlerFunc("GET", organizationsIDMembersPath, newGetMembersHandler(memberBackend))
h.HandlerFunc("DELETE", organizationsIDMembersIDPath, newDeleteMemberHandler(memberBackend))
h.HandlerFunc("POST", organizationsIDOwnersPath, newPostMemberHandler(h.UserResourceMappingService, h.UserService, platform.OrgsResourceType, platform.Owner))
h.HandlerFunc("GET", organizationsIDOwnersPath, newGetMembersHandler(h.UserResourceMappingService, h.UserService, platform.OrgsResourceType, platform.Owner))
h.HandlerFunc("DELETE", organizationsIDOwnersIDPath, newDeleteMemberHandler(h.UserResourceMappingService, platform.Owner))
ownerBackend := MemberBackend{
Logger: b.Logger.With(zap.String("handler", "member")),
ResourceType: platform.OrgsResourceType,
UserType: platform.Owner,
UserResourceMappingService: b.UserResourceMappingService,
UserService: b.UserService,
}
h.HandlerFunc("POST", organizationsIDOwnersPath, newPostMemberHandler(ownerBackend))
h.HandlerFunc("GET", organizationsIDOwnersPath, newGetMembersHandler(ownerBackend))
h.HandlerFunc("DELETE", organizationsIDOwnersIDPath, newDeleteMemberHandler(ownerBackend))
h.HandlerFunc("GET", organizationsIDSecretsPath, h.handleGetSecrets)
h.HandlerFunc("PATCH", organizationsIDSecretsPath, h.handlePatchSecrets)
// TODO(desa): need a way to specify which secrets to delete. this should work for now
h.HandlerFunc("POST", organizationsIDSecretsDeletePath, h.handleDeleteSecrets)
h.HandlerFunc("GET", organizationsIDLabelsPath, newGetLabelsHandler(h.LabelService))
h.HandlerFunc("POST", organizationsIDLabelsPath, newPostLabelHandler(h.LabelService))
h.HandlerFunc("DELETE", organizationsIDLabelsIDPath, newDeleteLabelHandler(h.LabelService))
labelBackend := &LabelBackend{
Logger: b.Logger.With(zap.String("handler", "label")),
LabelService: b.LabelService,
}
h.HandlerFunc("GET", organizationsIDLabelsPath, newGetLabelsHandler(labelBackend))
h.HandlerFunc("POST", organizationsIDLabelsPath, newPostLabelHandler(labelBackend))
h.HandlerFunc("DELETE", organizationsIDLabelsIDPath, newDeleteLabelHandler(labelBackend))
h.HandlerFunc("PATCH", organizationsIDLabelsIDPath, newPatchLabelHandler(labelBackend))
return h
}
@ -758,29 +803,14 @@ func decodeGetOrganizationLogRequest(ctx context.Context, r *http.Request) (*get
return nil, err
}
opts := platform.DefaultOperationLogFindOptions
qp := r.URL.Query()
if v := qp.Get("desc"); v == "false" {
opts.Descending = false
}
if v := qp.Get("limit"); v != "" {
i, err := strconv.Atoi(v)
if err != nil {
return nil, err
}
opts.Limit = i
}
if v := qp.Get("offset"); v != "" {
i, err := strconv.Atoi(v)
if err != nil {
return nil, err
}
opts.Offset = i
opts, err := decodeFindOptions(ctx, r)
if err != nil {
return nil, err
}
return &getOrganizationLogRequest{
OrganizationID: i,
opts: opts,
opts: *opts,
}, nil
}

View File

@ -5,6 +5,7 @@ import (
"context"
"encoding/json"
"fmt"
"go.uber.org/zap"
"io/ioutil"
"net/http"
"net/http/httptest"
@ -16,6 +17,20 @@ import (
platformtesting "github.com/influxdata/influxdb/testing"
)
// NewMockOrgBackend returns a OrgBackend with mock services.
func NewMockOrgBackend() *OrgBackend {
return &OrgBackend{
Logger: zap.NewNop().With(zap.String("handler", "org")),
OrganizationService: mock.NewOrganizationService(),
OrganizationOperationLogService: mock.NewOrganizationOperationLogService(),
UserResourceMappingService: mock.NewUserResourceMappingService(),
SecretService: mock.NewSecretService(),
LabelService: mock.NewLabelService(),
UserService: mock.NewUserService(),
}
}
func initOrganizationService(f platformtesting.OrganizationFields, t *testing.T) (platform.OrganizationService, string, func()) {
t.Helper()
svc := inmem.NewService()
@ -28,8 +43,9 @@ func initOrganizationService(f platformtesting.OrganizationFields, t *testing.T)
}
}
handler := NewOrgHandler(mock.NewUserResourceMappingService(), mock.NewLabelService(), mock.NewUserService())
handler.OrganizationService = svc
orgBackend := NewMockOrgBackend()
orgBackend.OrganizationService = svc
handler := NewOrgHandler(orgBackend)
server := httptest.NewServer(handler)
client := OrganizationService{
Addr: server.URL,
@ -122,8 +138,9 @@ func TestSecretService_handleGetSecrets(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
h := NewOrgHandler(mock.NewUserResourceMappingService(), mock.NewLabelService(), mock.NewUserService())
h.SecretService = tt.fields.SecretService
orgBackend := NewMockOrgBackend()
orgBackend.SecretService = tt.fields.SecretService
h := NewOrgHandler(orgBackend)
u := fmt.Sprintf("http://any.url/api/v2/orgs/%s/secrets", tt.args.orgID)
r := httptest.NewRequest("GET", u, nil)
@ -192,8 +209,9 @@ func TestSecretService_handlePatchSecrets(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
h := NewOrgHandler(mock.NewUserResourceMappingService(), mock.NewLabelService(), mock.NewUserService())
h.SecretService = tt.fields.SecretService
orgBackend := NewMockOrgBackend()
orgBackend.SecretService = tt.fields.SecretService
h := NewOrgHandler(orgBackend)
b, err := json.Marshal(tt.args.secrets)
if err != nil {
@ -268,8 +286,9 @@ func TestSecretService_handleDeleteSecrets(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
h := NewOrgHandler(mock.NewUserResourceMappingService(), mock.NewLabelService(), mock.NewUserService())
h.SecretService = tt.fields.SecretService
orgBackend := NewMockOrgBackend()
orgBackend.SecretService = tt.fields.SecretService
h := NewOrgHandler(orgBackend)
b, err := json.Marshal(tt.args.secrets)
if err != nil {

View File

@ -27,6 +27,25 @@ const (
fluxPath = "/api/v2/query"
)
// FluxBackend is all services and associated parameters required to construct
// the FluxHandler.
type FluxBackend struct {
Logger *zap.Logger
OrganizationService platform.OrganizationService
ProxyQueryService query.ProxyQueryService
}
// NewFluxBackend returns a new instance of FluxBackend.
func NewFluxBackend(b *APIBackend) *FluxBackend {
return &FluxBackend{
Logger: b.Logger.With(zap.String("handler", "query")),
ProxyQueryService: b.ProxyQueryService,
OrganizationService: b.OrganizationService,
}
}
// FluxHandler implements handling flux queries.
type FluxHandler struct {
*httprouter.Router
@ -39,11 +58,14 @@ type FluxHandler struct {
}
// NewFluxHandler returns a new handler at /api/v2/query for flux queries.
func NewFluxHandler() *FluxHandler {
func NewFluxHandler(b *FluxBackend) *FluxHandler {
h := &FluxHandler{
Router: NewRouter(),
Now: time.Now,
Logger: zap.NewNop(),
Logger: b.Logger,
ProxyQueryService: b.ProxyQueryService,
OrganizationService: b.OrganizationService,
}
h.HandlerFunc("POST", fluxPath, h.handleQuery)

View File

@ -13,6 +13,30 @@ import (
"go.uber.org/zap"
)
// ScraperBackend is all services and associated parameters required to construct
// the ScraperHandler.
type ScraperBackend struct {
Logger *zap.Logger
ScraperStorageService influxdb.ScraperTargetStoreService
BucketService influxdb.BucketService
OrganizationService influxdb.OrganizationService
UserService influxdb.UserService
UserResourceMappingService influxdb.UserResourceMappingService
LabelService influxdb.LabelService
}
// NewScraperBackend returns a new instance of ScraperBackend.
func NewScraperBackend(b *APIBackend) *ScraperBackend {
return &ScraperBackend{
Logger: b.Logger.With(zap.String("handler", "scraper")),
ScraperStorageService: b.ScraperTargetStoreService,
BucketService: b.BucketService,
OrganizationService: b.OrganizationService,
}
}
// ScraperHandler represents an HTTP API handler for scraper targets.
type ScraperHandler struct {
*httprouter.Router
@ -36,24 +60,16 @@ const (
)
// NewScraperHandler returns a new instance of ScraperHandler.
func NewScraperHandler(
logger *zap.Logger,
userService influxdb.UserService,
userResourceMappingService influxdb.UserResourceMappingService,
labelService influxdb.LabelService,
scraperStorageService influxdb.ScraperTargetStoreService,
bucketService influxdb.BucketService,
organizationService influxdb.OrganizationService,
) *ScraperHandler {
func NewScraperHandler(b *ScraperBackend) *ScraperHandler {
h := &ScraperHandler{
Router: NewRouter(),
Logger: logger,
UserService: userService,
UserResourceMappingService: userResourceMappingService,
LabelService: labelService,
ScraperStorageService: scraperStorageService,
BucketService: bucketService,
OrganizationService: organizationService,
Logger: b.Logger,
UserService: b.UserService,
UserResourceMappingService: b.UserResourceMappingService,
LabelService: b.LabelService,
ScraperStorageService: b.ScraperStorageService,
BucketService: b.BucketService,
OrganizationService: b.OrganizationService,
}
h.HandlerFunc("POST", targetsPath, h.handlePostScraperTarget)
h.HandlerFunc("GET", targetsPath, h.handleGetScraperTargets)
@ -61,17 +77,35 @@ func NewScraperHandler(
h.HandlerFunc("PATCH", targetsPath+"/:id", h.handlePatchScraperTarget)
h.HandlerFunc("DELETE", targetsPath+"/:id", h.handleDeleteScraperTarget)
h.HandlerFunc("POST", targetsIDMembersPath, newPostMemberHandler(h.UserResourceMappingService, h.UserService, influxdb.ScraperResourceType, influxdb.Member))
h.HandlerFunc("GET", targetsIDMembersPath, newGetMembersHandler(h.UserResourceMappingService, h.UserService, influxdb.ScraperResourceType, influxdb.Member))
h.HandlerFunc("DELETE", targetsIDMembersIDPath, newDeleteMemberHandler(h.UserResourceMappingService, influxdb.Member))
memberBackend := MemberBackend{
Logger: b.Logger.With(zap.String("handler", "member")),
ResourceType: influxdb.ScraperResourceType,
UserType: influxdb.Member,
UserResourceMappingService: b.UserResourceMappingService,
UserService: b.UserService,
}
h.HandlerFunc("POST", targetsIDMembersPath, newPostMemberHandler(memberBackend))
h.HandlerFunc("GET", targetsIDMembersPath, newGetMembersHandler(memberBackend))
h.HandlerFunc("DELETE", targetsIDMembersIDPath, newDeleteMemberHandler(memberBackend))
h.HandlerFunc("POST", targetsIDOwnersPath, newPostMemberHandler(h.UserResourceMappingService, h.UserService, influxdb.ScraperResourceType, influxdb.Owner))
h.HandlerFunc("GET", targetsIDOwnersPath, newGetMembersHandler(h.UserResourceMappingService, h.UserService, influxdb.ScraperResourceType, influxdb.Owner))
h.HandlerFunc("DELETE", targetsIDOwnersIDPath, newDeleteMemberHandler(h.UserResourceMappingService, influxdb.Owner))
ownerBackend := MemberBackend{
Logger: b.Logger.With(zap.String("handler", "member")),
ResourceType: influxdb.ScraperResourceType,
UserType: influxdb.Owner,
UserResourceMappingService: b.UserResourceMappingService,
UserService: b.UserService,
}
h.HandlerFunc("POST", targetsIDOwnersPath, newPostMemberHandler(ownerBackend))
h.HandlerFunc("GET", targetsIDOwnersPath, newGetMembersHandler(ownerBackend))
h.HandlerFunc("DELETE", targetsIDOwnersIDPath, newDeleteMemberHandler(ownerBackend))
h.HandlerFunc("GET", targetsIDLabelsPath, newGetLabelsHandler(h.LabelService))
h.HandlerFunc("POST", targetsIDLabelsPath, newPostLabelHandler(h.LabelService))
h.HandlerFunc("DELETE", targetsIDLabelsIDPath, newDeleteLabelHandler(h.LabelService))
labelBackend := &LabelBackend{
Logger: b.Logger.With(zap.String("handler", "label")),
LabelService: b.LabelService,
}
h.HandlerFunc("GET", targetsIDLabelsPath, newGetLabelsHandler(labelBackend))
h.HandlerFunc("POST", targetsIDLabelsPath, newPostLabelHandler(labelBackend))
h.HandlerFunc("DELETE", targetsIDLabelsIDPath, newDeleteLabelHandler(labelBackend))
return h
}

View File

@ -5,17 +5,16 @@ import (
"context"
"encoding/json"
"fmt"
"go.uber.org/zap"
"io/ioutil"
"net/http"
"net/http/httptest"
"os"
"testing"
platform "github.com/influxdata/influxdb"
platcontext "github.com/influxdata/influxdb/context"
httpMock "github.com/influxdata/influxdb/http/mock"
"github.com/influxdata/influxdb/inmem"
"github.com/influxdata/influxdb/logger"
"github.com/influxdata/influxdb/mock"
platformtesting "github.com/influxdata/influxdb/testing"
"github.com/julienschmidt/httprouter"
@ -31,6 +30,20 @@ var (
targetTwoID = platformtesting.MustIDBase16(targetTwoIDString)
)
// NewMockScraperBackend returns a ScraperBackend with mock services.
func NewMockScraperBackend() *ScraperBackend {
return &ScraperBackend{
Logger: zap.NewNop().With(zap.String("handler", "scraper")),
ScraperStorageService: &mock.ScraperTargetStoreService{},
BucketService: mock.NewBucketService(),
OrganizationService: mock.NewOrganizationService(),
UserService: mock.NewUserService(),
UserResourceMappingService: &mock.UserResourceMappingService{},
LabelService: mock.NewLabelService(),
}
}
func TestService_handleGetScraperTargets(t *testing.T) {
type fields struct {
ScraperTargetStoreService platform.ScraperTargetStoreService
@ -188,15 +201,11 @@ func TestService_handleGetScraperTargets(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
h := NewScraperHandler(
logger.New(os.Stdout),
mock.NewUserService(),
&mock.UserResourceMappingService{},
mock.NewLabelService(),
tt.fields.ScraperTargetStoreService,
tt.fields.BucketService,
tt.fields.OrganizationService,
)
scraperBackend := NewMockScraperBackend()
scraperBackend.ScraperStorageService = tt.fields.ScraperTargetStoreService
scraperBackend.OrganizationService = tt.fields.OrganizationService
scraperBackend.BucketService = tt.fields.BucketService
h := NewScraperHandler(scraperBackend)
r := httptest.NewRequest("GET", "http://any.tld", nil)
@ -322,15 +331,11 @@ func TestService_handleGetScraperTarget(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
h := NewScraperHandler(
logger.New(os.Stdout),
mock.NewUserService(),
&mock.UserResourceMappingService{},
mock.NewLabelService(),
tt.fields.ScraperTargetStoreService,
tt.fields.BucketService,
tt.fields.OrganizationService,
)
scraperBackend := NewMockScraperBackend()
scraperBackend.ScraperStorageService = tt.fields.ScraperTargetStoreService
scraperBackend.OrganizationService = tt.fields.OrganizationService
scraperBackend.BucketService = tt.fields.BucketService
h := NewScraperHandler(scraperBackend)
r := httptest.NewRequest("GET", "http://any.tld", nil)
@ -429,15 +434,10 @@ func TestService_handleDeleteScraperTarget(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
h := NewScraperHandler(
logger.New(os.Stdout),
mock.NewUserService(),
&mock.UserResourceMappingService{},
mock.NewLabelService(),
tt.fields.Service,
mock.NewBucketService(),
&mock.OrganizationService{},
)
scraperBackend := NewMockScraperBackend()
scraperBackend.ScraperStorageService = tt.fields.Service
h := NewScraperHandler(scraperBackend)
r := httptest.NewRequest("GET", "http://any.tld", nil)
r = r.WithContext(context.WithValue(
@ -558,15 +558,11 @@ func TestService_handlePostScraperTarget(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
h := NewScraperHandler(
logger.New(os.Stdout),
mock.NewUserService(),
&mock.UserResourceMappingService{},
mock.NewLabelService(),
tt.fields.ScraperTargetStoreService,
tt.fields.BucketService,
tt.fields.OrganizationService,
)
scraperBackend := NewMockScraperBackend()
scraperBackend.ScraperStorageService = tt.fields.ScraperTargetStoreService
scraperBackend.OrganizationService = tt.fields.OrganizationService
scraperBackend.BucketService = tt.fields.BucketService
h := NewScraperHandler(scraperBackend)
st, err := json.Marshal(tt.args.target)
if err != nil {
@ -733,15 +729,11 @@ func TestService_handlePatchScraperTarget(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
h := NewScraperHandler(
logger.New(os.Stdout),
mock.NewUserService(),
&mock.UserResourceMappingService{},
mock.NewLabelService(),
tt.fields.ScraperTargetStoreService,
tt.fields.BucketService,
tt.fields.OrganizationService,
)
scraperBackend := NewMockScraperBackend()
scraperBackend.ScraperStorageService = tt.fields.ScraperTargetStoreService
scraperBackend.OrganizationService = tt.fields.OrganizationService
scraperBackend.BucketService = tt.fields.BucketService
h := NewScraperHandler(scraperBackend)
var err error
st := make([]byte, 0)
@ -802,33 +794,30 @@ func initScraperService(f platformtesting.TargetFields, t *testing.T) (platform.
}
}
handler := NewScraperHandler(
logger.New(os.Stdout),
mock.NewUserService(),
&mock.UserResourceMappingService{},
mock.NewLabelService(),
svc,
&mock.BucketService{
FindBucketByIDFn: func(ctx context.Context, id platform.ID) (*platform.Bucket, error) {
return &platform.Bucket{
ID: id,
Name: "bucket1",
}, nil
},
scraperBackend := NewMockScraperBackend()
scraperBackend.ScraperStorageService = svc
scraperBackend.OrganizationService = &mock.OrganizationService{
FindOrganizationByIDF: func(ctx context.Context, id platform.ID) (*platform.Organization, error) {
return &platform.Organization{
ID: id,
Name: "org1",
}, nil
},
&mock.OrganizationService{
FindOrganizationByIDF: func(ctx context.Context, id platform.ID) (*platform.Organization, error) {
return &platform.Organization{
ID: id,
Name: "org1",
}, nil
},
}
scraperBackend.BucketService = &mock.BucketService{
FindBucketByIDFn: func(ctx context.Context, id platform.ID) (*platform.Bucket, error) {
return &platform.Bucket{
ID: id,
Name: "bucket1",
}, nil
},
)
userID, _ := platform.IDFromString("020f755c3c082002")
}
handler := NewScraperHandler(scraperBackend)
server := httptest.NewServer(httpMock.NewAuthMiddlewareHandler(
handler, &platform.Authorization{
UserID: *userID,
handler,
&platform.Authorization{
UserID: platformtesting.MustIDBase16("020f755c3c082002"),
Token: "tok",
},
))

View File

@ -74,6 +74,27 @@ func newSourcesResponse(srcs []*platform.Source) *sourcesResponse {
return res
}
// SourceBackend is all services and associated parameters required to construct
// the SourceHandler.
type SourceBackend struct {
Logger *zap.Logger
SourceService platform.SourceService
NewBucketService func(s *platform.Source) (platform.BucketService, error)
NewQueryService func(s *platform.Source) (query.ProxyQueryService, error)
}
// NewSourceBackend returns a new instance of SourceBackend.
func NewSourceBackend(b *APIBackend) *SourceBackend {
return &SourceBackend{
Logger: b.Logger.With(zap.String("handler", "source")),
SourceService: b.SourceService,
NewBucketService: b.NewBucketService,
NewQueryService: b.NewQueryService,
}
}
// SourceHandler is a handler for sources
type SourceHandler struct {
*httprouter.Router
@ -87,16 +108,14 @@ type SourceHandler struct {
}
// NewSourceHandler returns a new instance of SourceHandler.
func NewSourceHandler() *SourceHandler {
func NewSourceHandler(b *SourceBackend) *SourceHandler {
h := &SourceHandler{
Router: NewRouter(),
Logger: zap.NewNop(),
NewBucketService: func(s *platform.Source) (platform.BucketService, error) {
return nil, fmt.Errorf("bucket service not set")
},
NewQueryService: func(s *platform.Source) (query.ProxyQueryService, error) {
return nil, fmt.Errorf("query service not set")
},
Logger: b.Logger,
SourceService: b.SourceService,
NewBucketService: b.NewBucketService,
NewQueryService: b.NewQueryService,
}
h.HandlerFunc("POST", "/api/v2/sources", h.handlePostSource)

View File

@ -308,12 +308,7 @@ paths:
content:
application/json:
schema:
type: object
properties:
labels:
$ref: "#/components/schemas/Labels"
links:
$ref: "#/components/schemas/Links"
$ref: "#/components/schemas/LabelsResponse"
default:
description: unexpected error
content:
@ -345,12 +340,7 @@ paths:
content:
application/json:
schema:
type: object
properties:
labels:
$ref: "#/components/schemas/Labels"
links:
$ref: "#/components/schemas/Links"
$ref: "#/components/schemas/LabelsResponse"
default:
description: unexpected error
content:
@ -727,7 +717,7 @@ paths:
application/json:
schema:
$ref: "#/components/schemas/Error"
'/scrapers/{scraperTargetID}/labels/{label}':
'/scrapers/{scraperTargetID}/labels/{labelID}':
delete:
tags:
- ScraperTargets
@ -774,7 +764,7 @@ paths:
required: true
description: ID of the scraper target
- in: path
name: label
name: labelID
schema:
type: string
required: true
@ -1132,6 +1122,13 @@ paths:
tags:
- Write
summary: write time-series data into influxdb
requestBody:
description: line protocol body
required: true
content:
text/plain:
schema:
type: string
parameters:
- $ref: '#/components/parameters/TraceSpan'
- in: header
@ -1188,14 +1185,7 @@ paths:
name: precision
description: specifies the precision for the unix timestamps within the body line-protocol
schema:
type: string
default: ns
description: specifies the unit of time
enum:
- ns
- us
- ms
- s
$ref: "#/components/schemas/WritePrecision"
responses:
'204':
description: write data is correctly formatted and accepted for writing to the bucket.
@ -1350,6 +1340,32 @@ paths:
schema:
$ref: "#/components/schemas/Error"
/sources/{sourceID}:
delete:
tags:
- Sources
summary: Delete a source
parameters:
- in: path
name: sourceID
schema:
type: string
required: true
description: ID of the source
responses:
'204':
description: delete has been accepted
'404':
description: view not found
content:
application/json:
schema:
$ref: "#/components/schemas/Error"
default:
description: unexpected error
content:
application/json:
schema:
$ref: "#/components/schemas/Error"
patch:
tags:
- Sources
@ -1490,448 +1506,6 @@ paths:
application/json:
schema:
$ref: "#/components/schemas/Error"
/views:
post:
tags:
- Views
summary: A view contains information about the visual representation of data
parameters:
- $ref: '#/components/parameters/TraceSpan'
requestBody:
description: view to create
required: true
content:
application/json:
schema:
$ref: "#/components/schemas/View"
responses:
'201':
description: Added view
content:
application/json:
schema:
$ref: "#/components/schemas/View"
default:
description: unexpected error
content:
application/json:
schema:
$ref: "#/components/schemas/Error"
get:
tags:
- Views
summary: Get all views
parameters:
- $ref: '#/components/parameters/TraceSpan'
- in: query
name: org
description: specifies the organization of the resource
required: true
schema:
type: string
- in: query
name: type
description: filters results to a specified type. Can be used multiple times in a request, to filter to multiple types.
schema:
type: string
enum:
- "xy"
- "single-stat"
- "gauge"
- "table"
- "markdown"
- "log-viewer"
- "line-plus-single-stat"
responses:
'200':
description: all views
content:
application/json:
schema:
$ref: "#/components/schemas/Views"
default:
description: unexpected error
content:
application/json:
schema:
$ref: "#/components/schemas/Error"
'/views/{viewID}':
get:
tags:
- Views
summary: Get a single View
parameters:
- $ref: '#/components/parameters/TraceSpan'
- in: path
name: viewID
schema:
type: string
required: true
description: ID of view to update
responses:
'200':
description: get a single view
content:
application/json:
schema:
$ref: "#/components/schemas/View"
'404':
description: view not found
content:
application/json:
schema:
$ref: "#/components/schemas/Error"
default:
description: unexpected error
content:
application/json:
schema:
$ref: "#/components/schemas/Error"
patch:
tags:
- Views
summary: Update a single view
requestBody:
description: patching of a view
required: true
content:
application/json:
schema:
$ref: "#/components/schemas/View"
parameters:
- $ref: '#/components/parameters/TraceSpan'
- in: path
name: viewID
schema:
type: string
required: true
description: ID of view to update
responses:
'200':
description: Updated view
content:
application/json:
schema:
$ref: "#/components/schemas/View"
'404':
description: view not found
content:
application/json:
schema:
$ref: "#/components/schemas/Error"
default:
description: unexpected error
content:
application/json:
schema:
$ref: "#/components/schemas/Error"
delete:
tags:
- Views
summary: Delete a view
parameters:
- $ref: '#/components/parameters/TraceSpan'
- in: path
name: viewID
schema:
type: string
required: true
description: ID of view to update
responses:
'204':
description: delete has been accepted
'404':
description: view not found
content:
application/json:
schema:
$ref: "#/components/schemas/Error"
default:
description: unexpected error
content:
application/json:
schema:
$ref: "#/components/schemas/Error"
'/views/{viewID}/labels':
get:
tags:
- Views
summary: list all labels for a view
parameters:
- $ref: '#/components/parameters/TraceSpan'
- in: path
name: viewID
schema:
type: string
required: true
description: ID of the view
responses:
'200':
description: a list of all labels for a view
content:
application/json:
schema:
type: object
properties:
labels:
$ref: "#/components/schemas/Labels"
links:
$ref: "#/components/schemas/Links"
default:
description: unexpected error
content:
application/json:
schema:
$ref: "#/components/schemas/Error"
post:
tags:
- Views
summary: add a label to a view
parameters:
- $ref: '#/components/parameters/TraceSpan'
- in: path
name: viewID
schema:
type: string
required: true
description: ID of the view
requestBody:
description: label to add
required: true
content:
application/json:
schema:
$ref: "#/components/schemas/LabelMapping"
responses:
'200':
description: a list of all labels for a view
content:
application/json:
schema:
type: object
properties:
labels:
$ref: "#/components/schemas/Labels"
links:
$ref: "#/components/schemas/Links"
default:
description: unexpected error
content:
application/json:
schema:
$ref: "#/components/schemas/Error"
'/views/{viewID}/labels/{labelID}':
delete:
tags:
- Views
summary: delete a label from a view
parameters:
- $ref: '#/components/parameters/TraceSpan'
- in: path
name: viewID
schema:
type: string
required: true
description: ID of the view
- in: path
name: labelID
schema:
type: string
required: true
description: the label id
responses:
'204':
description: delete has been accepted
'404':
description: view not found
content:
application/json:
schema:
$ref: "#/components/schemas/Error"
default:
description: unexpected error
content:
application/json:
schema:
$ref: "#/components/schemas/Error"
'/views/{viewID}/members':
get:
tags:
- Users
- Views
summary: List all view members
parameters:
- $ref: '#/components/parameters/TraceSpan'
- in: path
name: viewID
schema:
type: string
required: true
description: ID of the view
responses:
'200':
description: a list of users who have member privileges for a view
content:
application/json:
schema:
$ref: "#/components/schemas/ResourceMembers"
default:
description: unexpected error
content:
application/json:
schema:
$ref: "#/components/schemas/Error"
post:
tags:
- Users
- Views
summary: Add view member
parameters:
- $ref: '#/components/parameters/TraceSpan'
- in: path
name: viewID
schema:
type: string
required: true
description: ID of the view
requestBody:
description: user to add as member
required: true
content:
application/json:
schema:
$ref: "#/components/schemas/AddResourceMemberRequestBody"
responses:
'201':
description: added to view members
content:
application/json:
schema:
$ref: "#/components/schemas/ResourceMember"
default:
description: unexpected error
content:
application/json:
schema:
$ref: "#/components/schemas/Error"
'/views/{viewID}/members/{userID}':
delete:
tags:
- Users
- Views
summary: removes a member from an view
parameters:
- $ref: '#/components/parameters/TraceSpan'
- in: path
name: userID
schema:
type: string
required: true
description: ID of member to remove
- in: path
name: viewID
schema:
type: string
required: true
description: ID of the view
responses:
'204':
description: member removed
default:
description: unexpected error
content:
application/json:
schema:
$ref: "#/components/schemas/Error"
'/views/{viewID}/owners':
get:
tags:
- Users
- Views
summary: List all view owners
parameters:
- $ref: '#/components/parameters/TraceSpan'
- in: path
name: viewID
schema:
type: string
required: true
description: ID of the view
responses:
'200':
description: a list of users who have owner privileges for a view
content:
application/json:
schema:
$ref: "#/components/schemas/ResourceOwners"
default:
description: unexpected error
content:
application/json:
schema:
$ref: "#/components/schemas/Error"
post:
tags:
- Users
- Views
summary: Add view owner
parameters:
- $ref: '#/components/parameters/TraceSpan'
- in: path
name: viewID
schema:
type: string
required: true
description: ID of the view
requestBody:
description: user to add as owner
required: true
content:
application/json:
schema:
$ref: "#/components/schemas/AddResourceMemberRequestBody"
responses:
'201':
description: added to view owners
content:
application/json:
schema:
$ref: "#/components/schemas/ResourceOwner"
default:
description: unexpected error
content:
application/json:
schema:
$ref: "#/components/schemas/Error"
'/views/{viewID}/owners/{userID}':
delete:
tags:
- Users
- Views
summary: removes an owner from a view
parameters:
- $ref: '#/components/parameters/TraceSpan'
- in: path
name: userID
schema:
type: string
required: true
description: ID of owner to remove
- in: path
name: viewID
schema:
type: string
required: true
description: ID of the view
responses:
'204':
description: owner removed
default:
description: unexpected error
content:
application/json:
schema:
$ref: "#/components/schemas/Error"
/labels:
post:
tags:
@ -1950,7 +1524,7 @@ paths:
content:
application/json:
schema:
$ref: "#/components/schemas/Label"
$ref: "#/components/schemas/LabelResponse"
default:
description: unexpected error
content:
@ -1967,7 +1541,7 @@ paths:
content:
application/json:
schema:
$ref: "#/components/schemas/Labels"
$ref: "#/components/schemas/LabelsResponse"
default:
description: unexpected error
content:
@ -1993,7 +1567,7 @@ paths:
content:
application/json:
schema:
$ref: "#/components/schemas/Label"
$ref: "#/components/schemas/LabelResponse"
default:
description: unexpected error
content:
@ -2025,7 +1599,7 @@ paths:
content:
application/json:
schema:
$ref: "#/components/schemas/Label"
$ref: "#/components/schemas/LabelResponse"
'404':
description: label not found
content:
@ -2503,12 +2077,7 @@ paths:
content:
application/json:
schema:
type: object
properties:
labels:
$ref: "#/components/schemas/Labels"
links:
$ref: "#/components/schemas/Links"
$ref: "#/components/schemas/LabelsResponse"
default:
description: unexpected error
content:
@ -2540,12 +2109,7 @@ paths:
content:
application/json:
schema:
type: object
properties:
labels:
$ref: "#/components/schemas/Labels"
links:
$ref: "#/components/schemas/Links"
$ref: "#/components/schemas/LabelResponse"
default:
description: unexpected error
content:
@ -2787,11 +2351,9 @@ paths:
'200':
description: Abstract syntax tree of flux query.
content:
application/json: #TODO(goller): document the AST JSON schema
application/json:
schema:
properties:
todo:
type: string # swagger editor was yelling at me here
$ref: "#/components/schemas/ASTResponse"
default:
description: Any response other than 200 is an internal server error
content:
@ -3768,16 +3330,11 @@ paths:
$ref: "#/components/schemas/LabelMapping"
responses:
'200':
description: a list of all labels for an organization
description: returns the created label
content:
application/json:
schema:
type: object
properties:
labels:
$ref: "#/components/schemas/Labels"
links:
$ref: "#/components/schemas/Links"
$ref: "#/components/schemas/LabelResponse"
default:
description: unexpected error
content:
@ -3924,7 +3481,7 @@ paths:
content:
application/json:
schema:
$ref: "#/components/schemas/ResourceOwners"
$ref: "#/components/schemas/ResourceMembers"
default:
description: unexpected error
content:
@ -4013,7 +3570,7 @@ paths:
content:
application/json:
schema:
$ref: "#/components/schemas/ResourceMembers"
$ref: "#/components/schemas/ResourceOwners"
default:
description: unexpected error
content:
@ -4118,7 +3675,14 @@ paths:
content:
application/json:
schema:
$ref: "#/components/schemas/Tasks"
type: object
properties:
tasks:
type: array
items:
$ref: "#/components/schemas/Task"
links:
$ref: "#/components/schemas/Links"
default:
description: unexpected error
content:
@ -4137,7 +3701,7 @@ paths:
content:
application/json:
schema:
$ref: "#/components/schemas/Task"
$ref: "#/components/schemas/TaskCreateRequest"
responses:
'201':
description: Task created
@ -4496,14 +4060,7 @@ paths:
content:
application/json:
schema:
type: object
properties:
labels:
type: array
items:
type: string
links:
$ref: "#/components/schemas/Links"
$ref: "#/components/schemas/LabelResponse"
default:
description: unexpected error
content:
@ -5079,24 +4636,23 @@ components:
annotations:
description: https://www.w3.org/TR/2015/REC-tabular-data-model-20151217/#columns
type: array
default: []
items:
type: string
enum:
- group
- datatype
- default
- "group"
- "datatype"
- "default"
uniqueItems: true
commentPrefix:
description: character prefixed to comment strings
type: string
default: \#
default: "#"
maxLength: 1
minLength: 0
dateTimeFormat:
description: format of timestamps
type: string
default: RFC3339
default: "RFC3339"
enum:
- RFC3339
- RFC3339Nano
@ -5271,7 +4827,6 @@ components:
Link:
type: string
readOnly: true
format: uri
description: URI of resource.
Links:
type: object
@ -5387,13 +4942,12 @@ components:
status:
readOnly: true
type: string
enum: [
"scheduled",
"started",
"failed",
"success",
"canceled"
]
enum:
- scheduled
- started
- failed
- success
- canceled
scheduledFor:
description: Time used for run's "now" option, RFC3339.
type: string
@ -5456,6 +5010,8 @@ components:
name:
description: A description of the task.
type: string
owner:
$ref: "#/components/schemas/User"
status:
description: The current status of the task. When updated to 'inactive', cancels all queued jobs of this task.
default: active
@ -5782,75 +5338,6 @@ components:
type: integer
format: int32
required: [code, message, maxLength]
InfluxQLResults:
properties:
error:
description: error during processing of the message
type: string
results:
type: array
description: result for each query
items:
type: object
properties:
error:
type: string
description: error during processing of the message
partial:
type: boolean
description: If a max row limit has been placed in the configuration file and the number of returned values is larger, this will be set to true and values truncated.
statement_id:
type: integer
description: statement's position in the query.
series:
description: The collection of data in InfluxDBs data structure that share a measurement, tag set, and retention policy.
type: array
items:
type: object
description: values for a unique series
properties:
name:
description: The part of InfluxDBs structure that describes the data stored in the associated fields. Measurements are strings.
type: string
tags:
description: The key-value pairs in InfluxDBs data structure that records metadata.
type: object
columns:
description: list of columns describing the content of a single value array
type: array
items:
type: string
values:
description: array of arrays of the values return from the query
type: array
items:
type: array
description: single row of results in the order of the columns field.
items:
oneOf:
- type: string
- type: number
- type: integer
partial:
type: boolean
messages:
type: array
description: represents a user-facing message to be included with the result.
items:
type: object
properties:
level:
type: string
text:
type: string
InfluxqlQueryError:
properties:
error:
description: message describing why the query was rejected
readOnly: true
type: string
required:
- error
Field:
type: object
properties:
@ -6393,6 +5880,8 @@ components:
Cell:
type: object
properties:
id:
type: string
links:
type: object
properties:
@ -6524,6 +6013,12 @@ components:
properties:
self:
type: string
query:
type: string
health:
type: string
buckets:
type: string
id:
type: string
orgID:
@ -7374,3 +6869,48 @@ components:
properties:
labelID:
type: string
LabelsResponse:
type: object
properties:
labels:
$ref: "#/components/schemas/Labels"
links:
$ref: "#/components/schemas/Links"
LabelResponse:
type: object
properties:
label:
$ref: "#/components/schemas/Label"
links:
$ref: "#/components/schemas/Links"
ASTResponse:
description: contains the AST for the supplied Flux query
type: object
properties:
ast:
description: the AST of the supplied Flux query
type: object
WritePrecision:
type: string
enum:
- ms
- s
- us
- u
- ns
TaskCreateRequest:
properties:
orgID:
description: The ID of the organization that owns this Task.
type: string
status:
description: Starting state of the task. 'inactive' tasks are not run until they are updated to 'active'
default: active
type: string
enum:
- active
- inactive
flux:
description: The Flux script to run for this task.
type: string
required: [orgID, flux]

View File

@ -21,17 +21,3 @@ func TestValidSwagger(t *testing.T) {
t.Errorf("invalid swagger specification: %v", err)
}
}
func TestValidCurSwagger(t *testing.T) {
data, err := ioutil.ReadFile("./cur_swagger.yml")
if err != nil {
t.Fatalf("unable to read swagger specification: %v", err)
}
swagger, err := openapi3.NewSwaggerLoader().LoadSwaggerFromYAMLData(data)
if err != nil {
t.Fatalf("unable to load swagger specification: %v", err)
}
if err := swagger.Validate(context.Background()); err != nil {
t.Errorf("invalid swagger specification: %v", err)
}
}

View File

@ -20,6 +20,32 @@ import (
"go.uber.org/zap"
)
// TaskBackend is all services and associated parameters required to construct
// the TaskHandler.
type TaskBackend struct {
Logger *zap.Logger
TaskService platform.TaskService
AuthorizationService platform.AuthorizationService
OrganizationService platform.OrganizationService
UserResourceMappingService platform.UserResourceMappingService
LabelService platform.LabelService
UserService platform.UserService
}
// NewTaskBackend returns a new instance of TaskBackend.
func NewTaskBackend(b *APIBackend) *TaskBackend {
return &TaskBackend{
Logger: b.Logger.With(zap.String("handler", "task")),
TaskService: b.TaskService,
AuthorizationService: b.AuthorizationService,
OrganizationService: b.OrganizationService,
UserResourceMappingService: b.UserResourceMappingService,
LabelService: b.LabelService,
UserService: b.UserService,
}
}
// TaskHandler represents an HTTP API handler for tasks.
type TaskHandler struct {
*httprouter.Router
@ -50,14 +76,17 @@ const (
)
// NewTaskHandler returns a new instance of TaskHandler.
func NewTaskHandler(mappingService platform.UserResourceMappingService, labelService platform.LabelService, logger *zap.Logger, userService platform.UserService) *TaskHandler {
func NewTaskHandler(b *TaskBackend) *TaskHandler {
h := &TaskHandler{
logger: logger,
Router: NewRouter(),
logger: b.Logger,
UserResourceMappingService: mappingService,
LabelService: labelService,
UserService: userService,
TaskService: b.TaskService,
AuthorizationService: b.AuthorizationService,
OrganizationService: b.OrganizationService,
UserResourceMappingService: b.UserResourceMappingService,
LabelService: b.LabelService,
UserService: b.UserService,
}
h.HandlerFunc("GET", tasksPath, h.handleGetTasks)
@ -70,13 +99,27 @@ func NewTaskHandler(mappingService platform.UserResourceMappingService, labelSer
h.HandlerFunc("GET", tasksIDLogsPath, h.handleGetLogs)
h.HandlerFunc("GET", tasksIDRunsIDLogsPath, h.handleGetLogs)
h.HandlerFunc("POST", tasksIDMembersPath, newPostMemberHandler(h.UserResourceMappingService, h.UserService, platform.TasksResourceType, platform.Member))
h.HandlerFunc("GET", tasksIDMembersPath, newGetMembersHandler(h.UserResourceMappingService, h.UserService, platform.TasksResourceType, platform.Member))
h.HandlerFunc("DELETE", tasksIDMembersIDPath, newDeleteMemberHandler(h.UserResourceMappingService, platform.Member))
memberBackend := MemberBackend{
Logger: b.Logger.With(zap.String("handler", "member")),
ResourceType: platform.TasksResourceType,
UserType: platform.Member,
UserResourceMappingService: b.UserResourceMappingService,
UserService: b.UserService,
}
h.HandlerFunc("POST", tasksIDMembersPath, newPostMemberHandler(memberBackend))
h.HandlerFunc("GET", tasksIDMembersPath, newGetMembersHandler(memberBackend))
h.HandlerFunc("DELETE", tasksIDMembersIDPath, newDeleteMemberHandler(memberBackend))
h.HandlerFunc("POST", tasksIDOwnersPath, newPostMemberHandler(h.UserResourceMappingService, h.UserService, platform.TasksResourceType, platform.Owner))
h.HandlerFunc("GET", tasksIDOwnersPath, newGetMembersHandler(h.UserResourceMappingService, h.UserService, platform.TasksResourceType, platform.Owner))
h.HandlerFunc("DELETE", tasksIDOwnersIDPath, newDeleteMemberHandler(h.UserResourceMappingService, platform.Owner))
ownerBackend := MemberBackend{
Logger: b.Logger.With(zap.String("handler", "member")),
ResourceType: platform.TasksResourceType,
UserType: platform.Owner,
UserResourceMappingService: b.UserResourceMappingService,
UserService: b.UserService,
}
h.HandlerFunc("POST", tasksIDOwnersPath, newPostMemberHandler(ownerBackend))
h.HandlerFunc("GET", tasksIDOwnersPath, newGetMembersHandler(ownerBackend))
h.HandlerFunc("DELETE", tasksIDOwnersIDPath, newDeleteMemberHandler(ownerBackend))
h.HandlerFunc("GET", tasksIDRunsPath, h.handleGetRuns)
h.HandlerFunc("POST", tasksIDRunsPath, h.handleForceRun)
@ -84,9 +127,14 @@ func NewTaskHandler(mappingService platform.UserResourceMappingService, labelSer
h.HandlerFunc("POST", tasksIDRunsIDRetryPath, h.handleRetryRun)
h.HandlerFunc("DELETE", tasksIDRunsIDPath, h.handleCancelRun)
h.HandlerFunc("GET", tasksIDLabelsPath, newGetLabelsHandler(h.LabelService))
h.HandlerFunc("POST", tasksIDLabelsPath, newPostLabelHandler(h.LabelService))
h.HandlerFunc("DELETE", tasksIDLabelsIDPath, newDeleteLabelHandler(h.LabelService))
labelBackend := &LabelBackend{
Logger: b.Logger.With(zap.String("handler", "label")),
LabelService: b.LabelService,
}
h.HandlerFunc("GET", tasksIDLabelsPath, newGetLabelsHandler(labelBackend))
h.HandlerFunc("POST", tasksIDLabelsPath, newPostLabelHandler(labelBackend))
h.HandlerFunc("DELETE", tasksIDLabelsIDPath, newDeleteLabelHandler(labelBackend))
h.HandlerFunc("PATCH", tasksIDLabelsIDPath, newPatchLabelHandler(labelBackend))
return h
}
@ -350,6 +398,29 @@ func (h *TaskHandler) handlePostTask(w http.ResponseWriter, r *http.Request) {
return
}
// add User resource map
urm := &platform.UserResourceMapping{
UserID: auth.GetUserID(),
UserType: platform.Owner,
ResourceType: platform.TasksResourceType,
ResourceID: req.Task.ID,
}
if err := h.UserResourceMappingService.CreateUserResourceMapping(ctx, urm); err != nil {
// clean up the task if we fail to map the user and resource
// TODO(lh): Multi step creates could benefit from a service wide transactional request
if derr := h.TaskService.DeleteTask(ctx, req.Task.ID); derr != nil {
err = fmt.Errorf("%s: failed to clean up task: %s", err.Error(), derr.Error())
}
err = &platform.Error{
Err: err,
Msg: "failed to add user permissions",
}
EncodeError(ctx, err, w)
return
}
if err := encodeResponse(ctx, w, http.StatusCreated, newTaskResponse(*req.Task, []*platform.Label{})); err != nil {
logEncodingError(h.logger, r, err)
return
@ -462,10 +533,13 @@ func (h *TaskHandler) handleUpdateTask(w http.ResponseWriter, r *http.Request) {
}
task, err := h.TaskService.UpdateTask(ctx, req.TaskID, req.Update)
if err != nil {
err = &platform.Error{
err := &platform.Error{
Err: err,
Msg: "failed to update task",
}
if err.Err == backend.ErrTaskNotFound {
err.Code = platform.ENotFound
}
EncodeError(ctx, err, w)
return
}
@ -541,13 +615,31 @@ func (h *TaskHandler) handleDeleteTask(w http.ResponseWriter, r *http.Request) {
}
if err := h.TaskService.DeleteTask(ctx, req.TaskID); err != nil {
err = &platform.Error{
err := &platform.Error{
Err: err,
Msg: "failed to delete task",
}
if err.Err == backend.ErrTaskNotFound {
err.Code = platform.ENotFound
}
EncodeError(ctx, err, w)
return
}
// clean up resource maps for deleted task
urms, _, err := h.UserResourceMappingService.FindUserResourceMappings(ctx, platform.UserResourceMappingFilter{
ResourceID: req.TaskID,
ResourceType: platform.TasksResourceType,
})
if err != nil {
h.logger.Warn("failed to pull user resource mapping", zap.Error(err))
} else {
for _, m := range urms {
if err := h.UserResourceMappingService.DeleteUserResourceMapping(ctx, m.ResourceID, m.UserID); err != nil {
h.logger.Warn(fmt.Sprintf("failed to remove user resource mapping for task %s", m.ResourceID.String()), zap.Error(err))
}
}
}
w.WriteHeader(http.StatusNoContent)
}
@ -592,10 +684,13 @@ func (h *TaskHandler) handleGetLogs(w http.ResponseWriter, r *http.Request) {
logs, _, err := h.TaskService.FindLogs(ctx, req.filter)
if err != nil {
err = &platform.Error{
err := &platform.Error{
Err: err,
Msg: "failed to find task logs",
}
if err.Err == backend.ErrTaskNotFound || err.Err == backend.ErrRunNotFound {
err.Code = platform.ENotFound
}
EncodeError(ctx, err, w)
return
}
@ -671,10 +766,12 @@ func (h *TaskHandler) handleGetRuns(w http.ResponseWriter, r *http.Request) {
runs, _, err := h.TaskService.FindRuns(ctx, req.filter)
if err != nil {
err = &platform.Error{
Err: err,
Code: platform.EInvalid,
Msg: "failed to find runs",
err := &platform.Error{
Err: err,
Msg: "failed to find runs",
}
if err.Err == backend.ErrTaskNotFound {
err.Code = platform.ENotFound
}
EncodeError(ctx, err, w)
return
@ -792,12 +889,12 @@ func (h *TaskHandler) handleForceRun(w http.ResponseWriter, r *http.Request) {
run, err := h.TaskService.ForceRun(ctx, req.TaskID, req.Timestamp)
if err != nil {
if err == backend.ErrRunNotFound {
err = &platform.Error{
Code: platform.ENotFound,
Msg: "failed to force run",
Err: err,
}
err := &platform.Error{
Err: err,
Msg: "failed to force run",
}
if err.Err == backend.ErrTaskNotFound {
err.Code = platform.ENotFound
}
EncodeError(ctx, err, w)
return
@ -868,12 +965,12 @@ func (h *TaskHandler) handleGetRun(w http.ResponseWriter, r *http.Request) {
run, err := h.TaskService.FindRunByID(ctx, req.TaskID, req.RunID)
if err != nil {
if err == backend.ErrRunNotFound {
err = &platform.Error{
Err: err,
Msg: "failed to find run",
Code: platform.ENotFound,
}
err := &platform.Error{
Err: err,
Msg: "failed to find run",
}
if err.Err == backend.ErrTaskNotFound || err.Err == backend.ErrRunNotFound {
err.Code = platform.ENotFound
}
EncodeError(ctx, err, w)
return
@ -974,10 +1071,13 @@ func (h *TaskHandler) handleCancelRun(w http.ResponseWriter, r *http.Request) {
err = h.TaskService.CancelRun(ctx, req.TaskID, req.RunID)
if err != nil {
err = &platform.Error{
err := &platform.Error{
Err: err,
Msg: "failed to cancel run",
}
if err.Err == backend.ErrTaskNotFound || err.Err == backend.ErrRunNotFound {
err.Code = platform.ENotFound
}
EncodeError(ctx, err, w)
return
}
@ -999,12 +1099,12 @@ func (h *TaskHandler) handleRetryRun(w http.ResponseWriter, r *http.Request) {
run, err := h.TaskService.RetryRun(ctx, req.TaskID, req.RunID)
if err != nil {
if err == backend.ErrRunNotFound {
err = &platform.Error{
Code: platform.ENotFound,
Msg: "failed to retry run",
Err: err,
}
err := &platform.Error{
Err: err,
Msg: "failed to retry run",
}
if err.Err == backend.ErrTaskNotFound || err.Err == backend.ErrRunNotFound {
err.Code = platform.ENotFound
}
EncodeError(ctx, err, w)
return

View File

@ -4,37 +4,50 @@ import (
"bytes"
"context"
"encoding/json"
"fmt"
"go.uber.org/zap"
"io/ioutil"
"net/http"
"net/http/httptest"
"os"
"strings"
"testing"
platform "github.com/influxdata/influxdb"
pcontext "github.com/influxdata/influxdb/context"
"github.com/influxdata/influxdb/logger"
"github.com/influxdata/influxdb/inmem"
"github.com/influxdata/influxdb/mock"
_ "github.com/influxdata/influxdb/query/builtin"
"github.com/influxdata/influxdb/task/backend"
platformtesting "github.com/influxdata/influxdb/testing"
"github.com/julienschmidt/httprouter"
)
func mockOrgService() platform.OrganizationService {
return &mock.OrganizationService{
FindOrganizationByIDF: func(ctx context.Context, id platform.ID) (*platform.Organization, error) {
return &platform.Organization{ID: id, Name: "test"}, nil
},
FindOrganizationF: func(ctx context.Context, filter platform.OrganizationFilter) (*platform.Organization, error) {
org := &platform.Organization{}
if filter.Name != nil {
org.Name = *filter.Name
}
if filter.ID != nil {
org.ID = *filter.ID
}
// NewMockTaskBackend returns a TaskBackend with mock services.
func NewMockTaskBackend() *TaskBackend {
return &TaskBackend{
Logger: zap.NewNop().With(zap.String("handler", "task")),
return org, nil
AuthorizationService: mock.NewAuthorizationService(),
TaskService: &mock.TaskService{},
OrganizationService: &mock.OrganizationService{
FindOrganizationByIDF: func(ctx context.Context, id platform.ID) (*platform.Organization, error) {
return &platform.Organization{ID: id, Name: "test"}, nil
},
FindOrganizationF: func(ctx context.Context, filter platform.OrganizationFilter) (*platform.Organization, error) {
org := &platform.Organization{}
if filter.Name != nil {
org.Name = *filter.Name
}
if filter.ID != nil {
org.ID = *filter.ID
}
return org, nil
},
},
UserResourceMappingService: mock.NewUserResourceMappingService(),
LabelService: mock.NewLabelService(),
UserService: mock.NewUserService(),
}
}
@ -161,10 +174,10 @@ func TestTaskHandler_handleGetTasks(t *testing.T) {
r := httptest.NewRequest("GET", "http://any.url", nil)
w := httptest.NewRecorder()
h := NewTaskHandler(mock.NewUserResourceMappingService(), mock.NewLabelService(), logger.New(os.Stdout), mock.NewUserService())
h.OrganizationService = mockOrgService()
h.TaskService = tt.fields.taskService
h.LabelService = tt.fields.labelService
taskBackend := NewMockTaskBackend()
taskBackend.TaskService = tt.fields.taskService
taskBackend.LabelService = tt.fields.labelService
h := NewTaskHandler(taskBackend)
h.handleGetTasks(w, r)
res := w.Result()
@ -262,9 +275,9 @@ func TestTaskHandler_handlePostTasks(t *testing.T) {
w := httptest.NewRecorder()
h := NewTaskHandler(mock.NewUserResourceMappingService(), mock.NewLabelService(), logger.New(os.Stdout), mock.NewUserService())
h.OrganizationService = mockOrgService()
h.TaskService = tt.fields.taskService
taskBackend := NewMockTaskBackend()
taskBackend.TaskService = tt.fields.taskService
h := NewTaskHandler(taskBackend)
h.handlePostTask(w, r)
res := w.Result()
@ -367,9 +380,9 @@ func TestTaskHandler_handleGetRun(t *testing.T) {
},
}))
w := httptest.NewRecorder()
h := NewTaskHandler(mock.NewUserResourceMappingService(), mock.NewLabelService(), logger.New(os.Stdout), mock.NewUserService())
h.OrganizationService = mockOrgService()
h.TaskService = tt.fields.taskService
taskBackend := NewMockTaskBackend()
taskBackend.TaskService = tt.fields.taskService
h := NewTaskHandler(taskBackend)
h.handleGetRun(w, r)
res := w.Result()
@ -476,9 +489,9 @@ func TestTaskHandler_handleGetRuns(t *testing.T) {
},
}))
w := httptest.NewRecorder()
h := NewTaskHandler(mock.NewUserResourceMappingService(), mock.NewLabelService(), logger.New(os.Stdout), mock.NewUserService())
h.OrganizationService = mockOrgService()
h.TaskService = tt.fields.taskService
taskBackend := NewMockTaskBackend()
taskBackend.TaskService = tt.fields.taskService
h := NewTaskHandler(taskBackend)
h.handleGetRuns(w, r)
res := w.Result()
@ -497,3 +510,328 @@ func TestTaskHandler_handleGetRuns(t *testing.T) {
})
}
}
func TestTaskHandler_NotFoundStatus(t *testing.T) {
// Ensure that the HTTP handlers return 404s for missing resources, and OKs for matching.
im := inmem.NewService()
taskBackend := NewMockTaskBackend()
h := NewTaskHandler(taskBackend)
h.UserResourceMappingService = im
h.LabelService = im
h.UserService = im
h.OrganizationService = im
o := platform.Organization{Name: "o"}
ctx := context.Background()
if err := h.OrganizationService.CreateOrganization(ctx, &o); err != nil {
t.Fatal(err)
}
const taskID, runID = platform.ID(0xCCCCCC), platform.ID(0xAAAAAA)
var (
okTask = []interface{}{taskID}
okTaskRun = []interface{}{taskID, runID}
notFoundTask = [][]interface{}{
{taskID + 1},
}
notFoundTaskRun = [][]interface{}{
{taskID, runID + 1},
{taskID + 1, runID},
{taskID + 1, runID + 1},
}
)
tcs := []struct {
name string
svc *mock.TaskService
method string
body string
pathFmt string
okPathArgs []interface{}
notFoundPathArgs [][]interface{}
}{
{
name: "get task",
svc: &mock.TaskService{
FindTaskByIDFn: func(_ context.Context, id platform.ID) (*platform.Task, error) {
if id == taskID {
return &platform.Task{ID: taskID, Organization: "o"}, nil
}
return nil, backend.ErrTaskNotFound
},
},
method: http.MethodGet,
pathFmt: "/tasks/%s",
okPathArgs: okTask,
notFoundPathArgs: notFoundTask,
},
{
name: "update task",
svc: &mock.TaskService{
UpdateTaskFn: func(_ context.Context, id platform.ID, _ platform.TaskUpdate) (*platform.Task, error) {
if id == taskID {
return &platform.Task{ID: taskID, Organization: "o"}, nil
}
return nil, backend.ErrTaskNotFound
},
},
method: http.MethodPatch,
body: "{}",
pathFmt: "/tasks/%s",
okPathArgs: okTask,
notFoundPathArgs: notFoundTask,
},
{
name: "delete task",
svc: &mock.TaskService{
DeleteTaskFn: func(_ context.Context, id platform.ID) error {
if id == taskID {
return nil
}
return backend.ErrTaskNotFound
},
},
method: http.MethodDelete,
pathFmt: "/tasks/%s",
okPathArgs: okTask,
notFoundPathArgs: notFoundTask,
},
{
name: "get task logs",
svc: &mock.TaskService{
FindLogsFn: func(_ context.Context, f platform.LogFilter) ([]*platform.Log, int, error) {
if *f.Task == taskID {
return nil, 0, nil
}
return nil, 0, backend.ErrTaskNotFound
},
},
method: http.MethodGet,
pathFmt: "/tasks/%s/logs",
okPathArgs: okTask,
notFoundPathArgs: notFoundTask,
},
{
name: "get run logs",
svc: &mock.TaskService{
FindLogsFn: func(_ context.Context, f platform.LogFilter) ([]*platform.Log, int, error) {
if *f.Task != taskID {
return nil, 0, backend.ErrTaskNotFound
}
if *f.Run != runID {
return nil, 0, backend.ErrRunNotFound
}
return nil, 0, nil
},
},
method: http.MethodGet,
pathFmt: "/tasks/%s/runs/%s/logs",
okPathArgs: okTaskRun,
notFoundPathArgs: notFoundTaskRun,
},
{
name: "get runs",
svc: &mock.TaskService{
FindRunsFn: func(_ context.Context, f platform.RunFilter) ([]*platform.Run, int, error) {
if *f.Task != taskID {
return nil, 0, backend.ErrTaskNotFound
}
return nil, 0, nil
},
},
method: http.MethodGet,
pathFmt: "/tasks/%s/runs",
okPathArgs: okTask,
notFoundPathArgs: notFoundTask,
},
{
name: "force run",
svc: &mock.TaskService{
ForceRunFn: func(_ context.Context, tid platform.ID, _ int64) (*platform.Run, error) {
if tid != taskID {
return nil, backend.ErrTaskNotFound
}
return &platform.Run{ID: runID, TaskID: taskID, Status: backend.RunScheduled.String()}, nil
},
},
method: http.MethodPost,
body: "{}",
pathFmt: "/tasks/%s/runs",
okPathArgs: okTask,
notFoundPathArgs: notFoundTask,
},
{
name: "get run",
svc: &mock.TaskService{
FindRunByIDFn: func(_ context.Context, tid, rid platform.ID) (*platform.Run, error) {
if tid != taskID {
return nil, backend.ErrTaskNotFound
}
if rid != runID {
return nil, backend.ErrRunNotFound
}
return &platform.Run{ID: runID, TaskID: taskID, Status: backend.RunScheduled.String()}, nil
},
},
method: http.MethodGet,
pathFmt: "/tasks/%s/runs/%s",
okPathArgs: okTaskRun,
notFoundPathArgs: notFoundTaskRun,
},
{
name: "retry run",
svc: &mock.TaskService{
RetryRunFn: func(_ context.Context, tid, rid platform.ID) (*platform.Run, error) {
if tid != taskID {
return nil, backend.ErrTaskNotFound
}
if rid != runID {
return nil, backend.ErrRunNotFound
}
return &platform.Run{ID: runID, TaskID: taskID, Status: backend.RunScheduled.String()}, nil
},
},
method: http.MethodPost,
pathFmt: "/tasks/%s/runs/%s/retry",
okPathArgs: okTaskRun,
notFoundPathArgs: notFoundTaskRun,
},
{
name: "cancel run",
svc: &mock.TaskService{
CancelRunFn: func(_ context.Context, tid, rid platform.ID) error {
if tid != taskID {
return backend.ErrTaskNotFound
}
if rid != runID {
return backend.ErrRunNotFound
}
return nil
},
},
method: http.MethodDelete,
pathFmt: "/tasks/%s/runs/%s",
okPathArgs: okTaskRun,
notFoundPathArgs: notFoundTaskRun,
},
}
for _, tc := range tcs {
tc := tc
t.Run(tc.name, func(t *testing.T) {
h.TaskService = tc.svc
okPath := fmt.Sprintf(tc.pathFmt, tc.okPathArgs...)
t.Run("matching ID: "+tc.method+" "+okPath, func(t *testing.T) {
w := httptest.NewRecorder()
r := httptest.NewRequest(tc.method, "http://task.example/api/v2"+okPath, strings.NewReader(tc.body))
h.ServeHTTP(w, r)
res := w.Result()
defer res.Body.Close()
if res.StatusCode < 200 || res.StatusCode > 299 {
t.Errorf("expected OK, got %d", res.StatusCode)
b, _ := ioutil.ReadAll(res.Body)
t.Fatalf("body: %s", string(b))
}
})
t.Run("mismatched ID", func(t *testing.T) {
for _, nfa := range tc.notFoundPathArgs {
path := fmt.Sprintf(tc.pathFmt, nfa...)
t.Run(tc.method+" "+path, func(t *testing.T) {
w := httptest.NewRecorder()
r := httptest.NewRequest(tc.method, "http://task.example/api/v2"+path, strings.NewReader(tc.body))
h.ServeHTTP(w, r)
res := w.Result()
defer res.Body.Close()
if res.StatusCode != http.StatusNotFound {
t.Errorf("expected Not Found, got %d", res.StatusCode)
b, _ := ioutil.ReadAll(res.Body)
t.Fatalf("body: %s", string(b))
}
})
}
})
})
}
}
func TestTaskUserResourceMap(t *testing.T) {
task := platform.Task{
Name: "task1",
OrganizationID: 1,
}
b, err := json.Marshal(task)
if err != nil {
t.Fatalf("failed to unmarshal task: %v", err)
}
r := httptest.NewRequest("POST", "http://any.url/v1", bytes.NewReader(b))
ctx := pcontext.SetAuthorizer(context.Background(), &platform.Authorization{UserID: 2})
r = r.WithContext(ctx)
w := httptest.NewRecorder()
var created *platform.UserResourceMapping
var deletedUser platform.ID
var deletedResource platform.ID
urms := &mock.UserResourceMappingService{
CreateMappingFn: func(_ context.Context, urm *platform.UserResourceMapping) error { created = urm; return nil },
DeleteMappingFn: func(_ context.Context, rid platform.ID, uid platform.ID) error {
deletedUser = uid
deletedResource = rid
return nil
},
FindMappingsFn: func(context.Context, platform.UserResourceMappingFilter) ([]*platform.UserResourceMapping, int, error) {
return []*platform.UserResourceMapping{created}, 1, nil
},
}
taskBackend := NewMockTaskBackend()
taskBackend.UserResourceMappingService = urms
h := NewTaskHandler(taskBackend)
taskID := platform.ID(1)
h.TaskService = &mock.TaskService{
CreateTaskFn: func(ctx context.Context, t *platform.Task) error {
t.ID = taskID
return nil
},
DeleteTaskFn: func(ctx context.Context, id platform.ID) error {
return nil
},
}
h.handlePostTask(w, r)
r = httptest.NewRequest("DELETE", "http://any.url/api/v2/tasks/"+taskID.String(), nil)
h.ServeHTTP(w, r)
if created.UserID != deletedUser {
t.Fatalf("deleted user (%s) doesn't match created user (%s)", deletedUser, created.UserID)
}
if created.ResourceID != deletedResource {
t.Fatalf("deleted resource (%s) doesn't match created resource (%s)", deletedResource, created.ResourceID)
}
}

View File

@ -8,13 +8,11 @@ import (
platform "github.com/influxdata/influxdb"
"github.com/influxdata/influxdb/http"
"github.com/influxdata/influxdb/inmem"
"github.com/influxdata/influxdb/mock"
_ "github.com/influxdata/influxdb/query/builtin"
"github.com/influxdata/influxdb/task"
"github.com/influxdata/influxdb/task/backend"
tmock "github.com/influxdata/influxdb/task/mock"
"github.com/influxdata/influxdb/task/servicetest"
"go.uber.org/zap/zaptest"
)
func httpTaskServiceFactory(t *testing.T) (*servicetest.System, context.CancelFunc) {
@ -30,23 +28,7 @@ func httpTaskServiceFactory(t *testing.T) (*servicetest.System, context.CancelFu
h := http.NewAuthenticationHandler()
h.AuthorizationService = i
th := http.NewTaskHandler(mock.NewUserResourceMappingService(), mock.NewLabelService(), zaptest.NewLogger(t), mock.NewUserService())
th.OrganizationService = &mock.OrganizationService{
FindOrganizationByIDF: func(ctx context.Context, id platform.ID) (*platform.Organization, error) {
return &platform.Organization{ID: id, Name: "test"}, nil
},
FindOrganizationF: func(ctx context.Context, filter platform.OrganizationFilter) (*platform.Organization, error) {
org := &platform.Organization{}
if filter.Name != nil {
org.Name = *filter.Name
}
if filter.ID != nil {
org.ID = *filter.ID
}
return org, nil
},
}
th := http.NewTaskHandler(http.NewMockTaskBackend())
th.TaskService = backingTS
th.AuthorizationService = i
h.Handler = th

View File

@ -14,6 +14,31 @@ import (
"go.uber.org/zap"
)
// TelegrafBackend is all services and associated parameters required to construct
// the TelegrafHandler.
type TelegrafBackend struct {
Logger *zap.Logger
TelegrafService platform.TelegrafConfigStore
UserResourceMappingService platform.UserResourceMappingService
LabelService platform.LabelService
UserService platform.UserService
OrganizationService platform.OrganizationService
}
// NewTelegrafBackend returns a new instance of TelegrafBackend.
func NewTelegrafBackend(b *APIBackend) *TelegrafBackend {
return &TelegrafBackend{
Logger: b.Logger.With(zap.String("handler", "telegraf")),
TelegrafService: b.TelegrafService,
UserResourceMappingService: b.UserResourceMappingService,
LabelService: b.LabelService,
UserService: b.UserService,
OrganizationService: b.OrganizationService,
}
}
// TelegrafHandler is the handler for the telegraf service
type TelegrafHandler struct {
*httprouter.Router
@ -38,23 +63,16 @@ const (
)
// NewTelegrafHandler returns a new instance of TelegrafHandler.
func NewTelegrafHandler(
logger *zap.Logger,
mappingService platform.UserResourceMappingService,
labelService platform.LabelService,
telegrafSvc platform.TelegrafConfigStore,
userService platform.UserService,
orgService platform.OrganizationService,
) *TelegrafHandler {
func NewTelegrafHandler(b *TelegrafBackend) *TelegrafHandler {
h := &TelegrafHandler{
Router: NewRouter(),
Logger: b.Logger,
UserResourceMappingService: mappingService,
LabelService: labelService,
TelegrafService: telegrafSvc,
Logger: logger,
UserService: userService,
OrganizationService: orgService,
TelegrafService: b.TelegrafService,
UserResourceMappingService: b.UserResourceMappingService,
LabelService: b.LabelService,
UserService: b.UserService,
OrganizationService: b.OrganizationService,
}
h.HandlerFunc("POST", telegrafsPath, h.handlePostTelegraf)
h.HandlerFunc("GET", telegrafsPath, h.handleGetTelegrafs)
@ -62,17 +80,36 @@ func NewTelegrafHandler(
h.HandlerFunc("DELETE", telegrafsIDPath, h.handleDeleteTelegraf)
h.HandlerFunc("PUT", telegrafsIDPath, h.handlePutTelegraf)
h.HandlerFunc("POST", telegrafsIDMembersPath, newPostMemberHandler(h.UserResourceMappingService, h.UserService, platform.TelegrafsResourceType, platform.Member))
h.HandlerFunc("GET", telegrafsIDMembersPath, newGetMembersHandler(h.UserResourceMappingService, h.UserService, platform.TelegrafsResourceType, platform.Member))
h.HandlerFunc("DELETE", telegrafsIDMembersIDPath, newDeleteMemberHandler(h.UserResourceMappingService, platform.Member))
memberBackend := MemberBackend{
Logger: b.Logger.With(zap.String("handler", "member")),
ResourceType: platform.TelegrafsResourceType,
UserType: platform.Member,
UserResourceMappingService: b.UserResourceMappingService,
UserService: b.UserService,
}
h.HandlerFunc("POST", telegrafsIDMembersPath, newPostMemberHandler(memberBackend))
h.HandlerFunc("GET", telegrafsIDMembersPath, newGetMembersHandler(memberBackend))
h.HandlerFunc("DELETE", telegrafsIDMembersIDPath, newDeleteMemberHandler(memberBackend))
h.HandlerFunc("POST", telegrafsIDOwnersPath, newPostMemberHandler(h.UserResourceMappingService, h.UserService, platform.TelegrafsResourceType, platform.Owner))
h.HandlerFunc("GET", telegrafsIDOwnersPath, newGetMembersHandler(h.UserResourceMappingService, h.UserService, platform.TelegrafsResourceType, platform.Owner))
h.HandlerFunc("DELETE", telegrafsIDOwnersIDPath, newDeleteMemberHandler(h.UserResourceMappingService, platform.Owner))
ownerBackend := MemberBackend{
Logger: b.Logger.With(zap.String("handler", "member")),
ResourceType: platform.TelegrafsResourceType,
UserType: platform.Owner,
UserResourceMappingService: b.UserResourceMappingService,
UserService: b.UserService,
}
h.HandlerFunc("POST", telegrafsIDOwnersPath, newPostMemberHandler(ownerBackend))
h.HandlerFunc("GET", telegrafsIDOwnersPath, newGetMembersHandler(ownerBackend))
h.HandlerFunc("DELETE", telegrafsIDOwnersIDPath, newDeleteMemberHandler(ownerBackend))
h.HandlerFunc("GET", telegrafsIDLabelsPath, newGetLabelsHandler(h.LabelService))
h.HandlerFunc("POST", telegrafsIDLabelsPath, newPostLabelHandler(h.LabelService))
h.HandlerFunc("DELETE", telegrafsIDLabelsIDPath, newDeleteLabelHandler(h.LabelService))
labelBackend := &LabelBackend{
Logger: b.Logger.With(zap.String("handler", "label")),
LabelService: b.LabelService,
}
h.HandlerFunc("GET", telegrafsIDLabelsPath, newGetLabelsHandler(labelBackend))
h.HandlerFunc("POST", telegrafsIDLabelsPath, newPostLabelHandler(labelBackend))
h.HandlerFunc("DELETE", telegrafsIDLabelsIDPath, newDeleteLabelHandler(labelBackend))
h.HandlerFunc("PATCH", telegrafsIDLabelsIDPath, newPatchLabelHandler(labelBackend))
return h
}

View File

@ -10,13 +10,27 @@ import (
"strings"
"testing"
"go.uber.org/zap"
platform "github.com/influxdata/influxdb"
"github.com/influxdata/influxdb/mock"
"github.com/influxdata/influxdb/telegraf/plugins/inputs"
"github.com/influxdata/influxdb/telegraf/plugins/outputs"
"go.uber.org/zap/zaptest"
)
// NewMockTelegrafBackend returns a TelegrafBackend with mock services.
func NewMockTelegrafBackend() *TelegrafBackend {
return &TelegrafBackend{
Logger: zap.NewNop().With(zap.String("handler", "telegraf")),
TelegrafService: &mock.TelegrafConfigStore{},
UserResourceMappingService: mock.NewUserResourceMappingService(),
LabelService: mock.NewLabelService(),
UserService: mock.NewUserService(),
OrganizationService: mock.NewOrganizationService(),
}
}
func TestTelegrafHandler_handleGetTelegrafs(t *testing.T) {
type wants struct {
statusCode int
@ -36,7 +50,7 @@ func TestTelegrafHandler_handleGetTelegrafs(t *testing.T) {
FindTelegrafConfigsF: func(ctx context.Context, filter platform.TelegrafConfigFilter, opt ...platform.FindOptions) ([]*platform.TelegrafConfig, int, error) {
if filter.OrganizationID != nil && *filter.OrganizationID == platform.ID(2) {
return []*platform.TelegrafConfig{
&platform.TelegrafConfig{
{
ID: platform.ID(1),
OrganizationID: platform.ID(2),
Name: "tc1",
@ -86,7 +100,7 @@ func TestTelegrafHandler_handleGetTelegrafs(t *testing.T) {
svc: &mock.TelegrafConfigStore{
FindTelegrafConfigsF: func(ctx context.Context, filter platform.TelegrafConfigFilter, opt ...platform.FindOptions) ([]*platform.TelegrafConfig, int, error) {
return []*platform.TelegrafConfig{
&platform.TelegrafConfig{
{
ID: platform.ID(1),
OrganizationID: platform.ID(2),
Name: "my config",
@ -155,7 +169,9 @@ func TestTelegrafHandler_handleGetTelegrafs(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
w := httptest.NewRecorder()
h := NewTelegrafHandler(zaptest.NewLogger(t), mock.NewUserResourceMappingService(), mock.NewLabelService(), tt.svc, mock.NewUserService(), &mock.OrganizationService{})
telegrafBackend := NewMockTelegrafBackend()
telegrafBackend.TelegrafService = tt.svc
h := NewTelegrafHandler(telegrafBackend)
h.ServeHTTP(w, tt.r)
res := w.Result()
@ -674,15 +690,11 @@ func TestTelegrafHandler_handleGetTelegraf(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
logger := zaptest.NewLogger(t)
mapping := mock.NewUserResourceMappingService()
labels := mock.NewLabelService()
users := mock.NewUserService()
orgs := &mock.OrganizationService{}
tt.r.Header.Set("Accept", tt.acceptHeader)
w := httptest.NewRecorder()
h := NewTelegrafHandler(logger, mapping, labels, tt.svc, users, orgs)
telegrafBackend := NewMockTelegrafBackend()
telegrafBackend.TelegrafService = tt.svc
h := NewTelegrafHandler(telegrafBackend)
h.ServeHTTP(w, tt.r)
@ -722,7 +734,7 @@ func Test_newTelegrafResponses(t *testing.T) {
{
args: args{
tcs: []*platform.TelegrafConfig{
&platform.TelegrafConfig{
{
ID: platform.ID(1),
OrganizationID: platform.ID(2),
Name: "my config",

View File

@ -5,6 +5,7 @@ import (
"context"
"encoding/json"
"fmt"
"go.uber.org/zap"
"net/http"
"path"
@ -51,8 +52,20 @@ func newResourceUsersResponse(opts platform.FindOptions, f platform.UserResource
return &rs
}
// MemberBackend is all services and associated parameters required to construct
// member handler.
type MemberBackend struct {
Logger *zap.Logger
ResourceType platform.ResourceType
UserType platform.UserType
UserResourceMappingService platform.UserResourceMappingService
UserService platform.UserService
}
// newPostMemberHandler returns a handler func for a POST to /members or /owners endpoints
func newPostMemberHandler(s platform.UserResourceMappingService, userService platform.UserService, resourceType platform.ResourceType, userType platform.UserType) http.HandlerFunc {
func newPostMemberHandler(b MemberBackend) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
@ -62,7 +75,7 @@ func newPostMemberHandler(s platform.UserResourceMappingService, userService pla
return
}
user, err := userService.FindUserByID(ctx, req.MemberID)
user, err := b.UserService.FindUserByID(ctx, req.MemberID)
if err != nil {
EncodeError(ctx, err, w)
return
@ -70,17 +83,17 @@ func newPostMemberHandler(s platform.UserResourceMappingService, userService pla
mapping := &platform.UserResourceMapping{
ResourceID: req.ResourceID,
ResourceType: resourceType,
ResourceType: b.ResourceType,
UserID: req.MemberID,
UserType: userType,
UserType: b.UserType,
}
if err := s.CreateUserResourceMapping(ctx, mapping); err != nil {
if err := b.UserResourceMappingService.CreateUserResourceMapping(ctx, mapping); err != nil {
EncodeError(ctx, err, w)
return
}
if err := encodeResponse(ctx, w, http.StatusCreated, newResourceUserResponse(user, userType)); err != nil {
if err := encodeResponse(ctx, w, http.StatusCreated, newResourceUserResponse(user, b.UserType)); err != nil {
EncodeError(ctx, err, w)
return
}
@ -126,7 +139,7 @@ func decodePostMemberRequest(ctx context.Context, r *http.Request) (*postMemberR
}
// newGetMembersHandler returns a handler func for a GET to /members or /owners endpoints
func newGetMembersHandler(s platform.UserResourceMappingService, userService platform.UserService, resourceType platform.ResourceType, userType platform.UserType) http.HandlerFunc {
func newGetMembersHandler(b MemberBackend) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
@ -138,12 +151,12 @@ func newGetMembersHandler(s platform.UserResourceMappingService, userService pla
filter := platform.UserResourceMappingFilter{
ResourceID: req.ResourceID,
ResourceType: resourceType,
UserType: userType,
ResourceType: b.ResourceType,
UserType: b.UserType,
}
opts := platform.FindOptions{}
mappings, _, err := s.FindUserResourceMappings(ctx, filter)
mappings, _, err := b.UserResourceMappingService.FindUserResourceMappings(ctx, filter)
if err != nil {
EncodeError(ctx, err, w)
return
@ -151,7 +164,7 @@ func newGetMembersHandler(s platform.UserResourceMappingService, userService pla
users := make([]*platform.User, 0, len(mappings))
for _, m := range mappings {
user, err := userService.FindUserByID(ctx, m.UserID)
user, err := b.UserService.FindUserByID(ctx, m.UserID)
if err != nil {
EncodeError(ctx, err, w)
return
@ -195,7 +208,7 @@ func decodeGetMembersRequest(ctx context.Context, r *http.Request) (*getMembersR
}
// newDeleteMemberHandler returns a handler func for a DELETE to /members or /owners endpoints
func newDeleteMemberHandler(s platform.UserResourceMappingService, userType platform.UserType) http.HandlerFunc {
func newDeleteMemberHandler(b MemberBackend) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
@ -205,7 +218,7 @@ func newDeleteMemberHandler(s platform.UserResourceMappingService, userType plat
return
}
if err := s.DeleteUserResourceMapping(ctx, req.ResourceID, req.MemberID); err != nil {
if err := b.UserResourceMappingService.DeleteUserResourceMapping(ctx, req.ResourceID, req.MemberID); err != nil {
EncodeError(ctx, err, w)
return
}

View File

@ -5,6 +5,7 @@ import (
"context"
"encoding/json"
"fmt"
"go.uber.org/zap"
"io/ioutil"
"net/http"
"net/http/httptest"
@ -190,7 +191,14 @@ func TestUserResourceMappingService_GetMembersHandler(t *testing.T) {
}))
w := httptest.NewRecorder()
h := newGetMembersHandler(tt.fields.userResourceMappingService, tt.fields.userService, resourceType, tt.args.userType)
memberBackend := MemberBackend{
Logger: zap.NewNop().With(zap.String("handler", "member")),
ResourceType: resourceType,
UserType: tt.args.userType,
UserResourceMappingService: tt.fields.userResourceMappingService,
UserService: tt.fields.userService,
}
h := newGetMembersHandler(memberBackend)
h.ServeHTTP(w, r)
res := w.Result()
@ -340,7 +348,14 @@ func TestUserResourceMappingService_PostMembersHandler(t *testing.T) {
}))
w := httptest.NewRecorder()
h := newPostMemberHandler(tt.fields.userResourceMappingService, tt.fields.userService, resourceType, tt.args.userType)
memberBackend := MemberBackend{
Logger: zap.NewNop().With(zap.String("handler", "member")),
ResourceType: resourceType,
UserType: tt.args.userType,
UserResourceMappingService: tt.fields.userResourceMappingService,
UserService: tt.fields.userService,
}
h := newPostMemberHandler(memberBackend)
h.ServeHTTP(w, r)
res := w.Result()

View File

@ -5,18 +5,40 @@ import (
"context"
"encoding/json"
"fmt"
"go.uber.org/zap"
"net/http"
"path"
"strconv"
platform "github.com/influxdata/influxdb"
platcontext "github.com/influxdata/influxdb/context"
"github.com/julienschmidt/httprouter"
)
// UserBackend is all services and associated parameters required to construct
// the UserHandler.
type UserBackend struct {
Logger *zap.Logger
UserService platform.UserService
UserOperationLogService platform.UserOperationLogService
BasicAuthService platform.BasicAuthService
}
func NewUserBackend(b *APIBackend) *UserBackend {
return &UserBackend{
Logger: b.Logger.With(zap.String("handler", "user")),
UserService: b.UserService,
UserOperationLogService: b.UserOperationLogService,
BasicAuthService: b.BasicAuthService,
}
}
// UserHandler represents an HTTP API handler for users.
type UserHandler struct {
*httprouter.Router
Logger *zap.Logger
UserService platform.UserService
UserOperationLogService platform.UserOperationLogService
BasicAuthService platform.BasicAuthService
@ -32,9 +54,14 @@ const (
)
// NewUserHandler returns a new instance of UserHandler.
func NewUserHandler() *UserHandler {
func NewUserHandler(b *UserBackend) *UserHandler {
h := &UserHandler{
Router: NewRouter(),
Logger: b.Logger,
UserService: b.UserService,
UserOperationLogService: b.UserOperationLogService,
BasicAuthService: b.BasicAuthService,
}
h.HandlerFunc("POST", usersPath, h.handlePostUser)
@ -703,29 +730,14 @@ func decodeGetUserLogRequest(ctx context.Context, r *http.Request) (*getUserLogR
return nil, err
}
opts := platform.DefaultOperationLogFindOptions
qp := r.URL.Query()
if v := qp.Get("desc"); v == "false" {
opts.Descending = false
}
if v := qp.Get("limit"); v != "" {
i, err := strconv.Atoi(v)
if err != nil {
return nil, err
}
opts.Limit = i
}
if v := qp.Get("offset"); v != "" {
i, err := strconv.Atoi(v)
if err != nil {
return nil, err
}
opts.Offset = i
opts, err := decodeFindOptions(ctx, r)
if err != nil {
return nil, err
}
return &getUserLogRequest{
UserID: i,
opts: opts,
opts: *opts,
}, nil
}

View File

@ -7,9 +7,23 @@ import (
platform "github.com/influxdata/influxdb"
"github.com/influxdata/influxdb/inmem"
"github.com/influxdata/influxdb/mock"
platformtesting "github.com/influxdata/influxdb/testing"
"go.uber.org/zap"
)
// NewMockUserBackend returns a UserBackend with mock services.
func NewMockUserBackend() *UserBackend {
return &UserBackend{
Logger: zap.NewNop().With(zap.String("handler", "user")),
UserService: mock.NewUserService(),
UserOperationLogService: mock.NewUserOperationLogService(),
BasicAuthService: mock.NewBasicAuthService("", ""),
}
}
func initUserService(f platformtesting.UserFields, t *testing.T) (platform.UserService, string, func()) {
t.Helper()
svc := inmem.NewService()
@ -22,8 +36,9 @@ func initUserService(f platformtesting.UserFields, t *testing.T) (platform.UserS
}
}
handler := NewUserHandler()
handler.UserService = svc
userBackend := NewMockUserBackend()
userBackend.UserService = svc
handler := NewUserHandler(userBackend)
server := httptest.NewServer(handler)
client := UserService{
Addr: server.URL,

View File

@ -9,21 +9,43 @@ import (
"fmt"
"net/http"
platform "github.com/influxdata/influxdb"
"github.com/influxdata/influxdb"
"github.com/julienschmidt/httprouter"
"go.uber.org/zap"
)
// ViewBackend is all services and associated parameters required to construct
// the ScraperHandler.
type ViewBackend struct {
Logger *zap.Logger
ViewService influxdb.ViewService
UserService influxdb.UserService
UserResourceMappingService influxdb.UserResourceMappingService
LabelService influxdb.LabelService
}
// NewViewBackend returns a new instance of ViewBackend.
func NewViewBackend(b *APIBackend) *ViewBackend {
return &ViewBackend{
Logger: b.Logger.With(zap.String("handler", "scraper")),
ViewService: b.ViewService,
UserService: b.UserService,
LabelService: b.LabelService,
}
}
// ViewHandler is the handler for the view service
type ViewHandler struct {
*httprouter.Router
Logger *zap.Logger
ViewService platform.ViewService
UserResourceMappingService platform.UserResourceMappingService
LabelService platform.LabelService
UserService platform.UserService
ViewService influxdb.ViewService
UserService influxdb.UserService
UserResourceMappingService influxdb.UserResourceMappingService
LabelService influxdb.LabelService
}
const (
@ -38,14 +60,15 @@ const (
)
// NewViewHandler returns a new instance of ViewHandler.
func NewViewHandler(mappingService platform.UserResourceMappingService, labelService platform.LabelService, userService platform.UserService) *ViewHandler {
func NewViewHandler(b *ViewBackend) *ViewHandler {
h := &ViewHandler{
Router: NewRouter(),
Logger: zap.NewNop(),
Logger: b.Logger,
UserResourceMappingService: mappingService,
LabelService: labelService,
UserService: userService,
ViewService: b.ViewService,
UserResourceMappingService: b.UserResourceMappingService,
LabelService: b.LabelService,
UserService: b.UserService,
}
h.HandlerFunc("POST", viewsPath, h.handlePostViews)
@ -55,17 +78,35 @@ func NewViewHandler(mappingService platform.UserResourceMappingService, labelSer
h.HandlerFunc("DELETE", viewsIDPath, h.handleDeleteView)
h.HandlerFunc("PATCH", viewsIDPath, h.handlePatchView)
h.HandlerFunc("POST", viewsIDMembersPath, newPostMemberHandler(h.UserResourceMappingService, h.UserService, platform.DashboardsResourceType, platform.Member))
h.HandlerFunc("GET", viewsIDMembersPath, newGetMembersHandler(h.UserResourceMappingService, h.UserService, platform.DashboardsResourceType, platform.Member))
h.HandlerFunc("DELETE", viewsIDMembersIDPath, newDeleteMemberHandler(h.UserResourceMappingService, platform.Member))
memberBackend := MemberBackend{
Logger: b.Logger.With(zap.String("handler", "member")),
ResourceType: influxdb.ViewsResourceType,
UserType: influxdb.Member,
UserResourceMappingService: b.UserResourceMappingService,
UserService: b.UserService,
}
h.HandlerFunc("POST", viewsIDMembersPath, newPostMemberHandler(memberBackend))
h.HandlerFunc("GET", viewsIDMembersPath, newGetMembersHandler(memberBackend))
h.HandlerFunc("DELETE", viewsIDMembersIDPath, newDeleteMemberHandler(memberBackend))
h.HandlerFunc("POST", viewsIDOwnersPath, newPostMemberHandler(h.UserResourceMappingService, h.UserService, platform.DashboardsResourceType, platform.Owner))
h.HandlerFunc("GET", viewsIDOwnersPath, newGetMembersHandler(h.UserResourceMappingService, h.UserService, platform.DashboardsResourceType, platform.Owner))
h.HandlerFunc("DELETE", viewsIDOwnersIDPath, newDeleteMemberHandler(h.UserResourceMappingService, platform.Owner))
ownerBackend := MemberBackend{
Logger: b.Logger.With(zap.String("handler", "member")),
ResourceType: influxdb.ViewsResourceType,
UserType: influxdb.Owner,
UserResourceMappingService: b.UserResourceMappingService,
UserService: b.UserService,
}
h.HandlerFunc("POST", viewsIDOwnersPath, newPostMemberHandler(ownerBackend))
h.HandlerFunc("GET", viewsIDOwnersPath, newGetMembersHandler(ownerBackend))
h.HandlerFunc("DELETE", viewsIDOwnersIDPath, newDeleteMemberHandler(ownerBackend))
h.HandlerFunc("GET", viewsIDLabelsPath, newGetLabelsHandler(h.LabelService))
h.HandlerFunc("POST", viewsIDLabelsPath, newPostLabelHandler(h.LabelService))
h.HandlerFunc("DELETE", viewsIDLabelsIDPath, newDeleteLabelHandler(h.LabelService))
labelBackend := &LabelBackend{
Logger: b.Logger.With(zap.String("handler", "label")),
LabelService: b.LabelService,
}
h.HandlerFunc("GET", viewsIDLabelsPath, newGetLabelsHandler(labelBackend))
h.HandlerFunc("POST", viewsIDLabelsPath, newPostLabelHandler(labelBackend))
h.HandlerFunc("DELETE", viewsIDLabelsIDPath, newDeleteLabelHandler(labelBackend))
return h
}
@ -76,18 +117,18 @@ type viewLinks struct {
}
type viewResponse struct {
platform.View
influxdb.View
Links viewLinks `json:"links"`
}
func (r viewResponse) MarshalJSON() ([]byte, error) {
props, err := platform.MarshalViewPropertiesJSON(r.Properties)
props, err := influxdb.MarshalViewPropertiesJSON(r.Properties)
if err != nil {
return nil, err
}
return json.Marshal(struct {
platform.ViewContents
influxdb.ViewContents
Links viewLinks `json:"links"`
Properties json.RawMessage `json:"properties"`
}{
@ -97,7 +138,7 @@ func (r viewResponse) MarshalJSON() ([]byte, error) {
})
}
func newViewResponse(c *platform.View) viewResponse {
func newViewResponse(c *influxdb.View) viewResponse {
return viewResponse{
Links: viewLinks{
Self: fmt.Sprintf("/api/v2/views/%s", c.ID),
@ -126,14 +167,14 @@ func (h *ViewHandler) handleGetViews(w http.ResponseWriter, r *http.Request) {
}
type getViewsRequest struct {
filter platform.ViewFilter
filter influxdb.ViewFilter
}
func decodeGetViewsRequest(ctx context.Context, r *http.Request) *getViewsRequest {
qp := r.URL.Query()
return &getViewsRequest{
filter: platform.ViewFilter{
filter: influxdb.ViewFilter{
Types: qp["type"],
},
}
@ -148,7 +189,7 @@ type getViewsResponse struct {
Views []viewResponse `json:"views"`
}
func newGetViewsResponse(views []*platform.View) getViewsResponse {
func newGetViewsResponse(views []*influxdb.View) getViewsResponse {
res := getViewsResponse{
Links: getViewsLinks{
Self: "/api/v2/views",
@ -184,14 +225,14 @@ func (h *ViewHandler) handlePostViews(w http.ResponseWriter, r *http.Request) {
}
type postViewRequest struct {
View *platform.View
View *influxdb.View
}
func decodePostViewRequest(ctx context.Context, r *http.Request) (*postViewRequest, error) {
c := &platform.View{}
c := &influxdb.View{}
if err := json.NewDecoder(r.Body).Decode(c); err != nil {
return nil, &platform.Error{
Code: platform.EInvalid,
return nil, &influxdb.Error{
Code: influxdb.EInvalid,
Msg: err.Error(),
}
}
@ -223,20 +264,20 @@ func (h *ViewHandler) handleGetView(w http.ResponseWriter, r *http.Request) {
}
type getViewRequest struct {
ViewID platform.ID
ViewID influxdb.ID
}
func decodeGetViewRequest(ctx context.Context, r *http.Request) (*getViewRequest, error) {
params := httprouter.ParamsFromContext(ctx)
id := params.ByName("id")
if id == "" {
return nil, &platform.Error{
Code: platform.EInvalid,
return nil, &influxdb.Error{
Code: influxdb.EInvalid,
Msg: "url missing id",
}
}
var i platform.ID
var i influxdb.ID
if err := i.DecodeFromString(id); err != nil {
return nil, err
}
@ -265,20 +306,20 @@ func (h *ViewHandler) handleDeleteView(w http.ResponseWriter, r *http.Request) {
}
type deleteViewRequest struct {
ViewID platform.ID
ViewID influxdb.ID
}
func decodeDeleteViewRequest(ctx context.Context, r *http.Request) (*deleteViewRequest, error) {
params := httprouter.ParamsFromContext(ctx)
id := params.ByName("id")
if id == "" {
return nil, &platform.Error{
Code: platform.EInvalid,
return nil, &influxdb.Error{
Code: influxdb.EInvalid,
Msg: "url missing id",
}
}
var i platform.ID
var i influxdb.ID
if err := i.DecodeFromString(id); err != nil {
return nil, err
}
@ -310,16 +351,16 @@ func (h *ViewHandler) handlePatchView(w http.ResponseWriter, r *http.Request) {
}
type patchViewRequest struct {
ViewID platform.ID
Upd platform.ViewUpdate
ViewID influxdb.ID
Upd influxdb.ViewUpdate
}
func decodePatchViewRequest(ctx context.Context, r *http.Request) (*patchViewRequest, *platform.Error) {
func decodePatchViewRequest(ctx context.Context, r *http.Request) (*patchViewRequest, *influxdb.Error) {
req := &patchViewRequest{}
upd := platform.ViewUpdate{}
upd := influxdb.ViewUpdate{}
if err := json.NewDecoder(r.Body).Decode(&upd); err != nil {
return nil, &platform.Error{
Code: platform.EInvalid,
return nil, &influxdb.Error{
Code: influxdb.EInvalid,
Msg: err.Error(),
}
}
@ -329,15 +370,15 @@ func decodePatchViewRequest(ctx context.Context, r *http.Request) (*patchViewReq
params := httprouter.ParamsFromContext(ctx)
id := params.ByName("id")
if id == "" {
return nil, &platform.Error{
Code: platform.EInvalid,
return nil, &influxdb.Error{
Code: influxdb.EInvalid,
Msg: "url missing id",
}
}
var i platform.ID
var i influxdb.ID
if err := i.DecodeFromString(id); err != nil {
return nil, &platform.Error{
Code: platform.EInvalid,
return nil, &influxdb.Error{
Code: influxdb.EInvalid,
Err: err,
}
}
@ -345,7 +386,7 @@ func decodePatchViewRequest(ctx context.Context, r *http.Request) (*patchViewReq
req.ViewID = i
if err := req.Valid(); err != nil {
return nil, &platform.Error{
return nil, &influxdb.Error{
Err: err,
}
}
@ -354,10 +395,10 @@ func decodePatchViewRequest(ctx context.Context, r *http.Request) (*patchViewReq
}
// Valid validates that the view ID is non zero valued and update has expected values set.
func (r *patchViewRequest) Valid() *platform.Error {
func (r *patchViewRequest) Valid() *influxdb.Error {
if !r.ViewID.Valid() {
return &platform.Error{
Code: platform.EInvalid,
return &influxdb.Error{
Code: influxdb.EInvalid,
Msg: "missing view ID",
}
}

View File

@ -11,17 +11,30 @@ import (
"testing"
"github.com/google/go-cmp/cmp"
platform "github.com/influxdata/influxdb"
"github.com/influxdata/influxdb"
"github.com/influxdata/influxdb/mock"
platformtesting "github.com/influxdata/influxdb/testing"
influxdbtesting "github.com/influxdata/influxdb/testing"
"github.com/julienschmidt/httprouter"
"github.com/yudai/gojsondiff"
"github.com/yudai/gojsondiff/formatter"
"go.uber.org/zap"
)
// NewMockViewBackend returns a ViewBackend with mock services.
func NewMockViewBackend() *ViewBackend {
return &ViewBackend{
Logger: zap.NewNop().With(zap.String("handler", "view")),
ViewService: &mock.ViewService{},
UserService: mock.NewUserService(),
UserResourceMappingService: &mock.UserResourceMappingService{},
LabelService: mock.NewLabelService(),
}
}
func TestService_handleGetViews(t *testing.T) {
type fields struct {
ViewService platform.ViewService
ViewService influxdb.ViewService
}
type args struct {
queryParams map[string][]string
@ -42,20 +55,20 @@ func TestService_handleGetViews(t *testing.T) {
name: "get all views",
fields: fields{
&mock.ViewService{
FindViewsF: func(ctx context.Context, filter platform.ViewFilter) ([]*platform.View, int, error) {
return []*platform.View{
FindViewsF: func(ctx context.Context, filter influxdb.ViewFilter) ([]*influxdb.View, int, error) {
return []*influxdb.View{
{
ViewContents: platform.ViewContents{
ID: platformtesting.MustIDBase16("7365637465747572"),
ViewContents: influxdb.ViewContents{
ID: influxdbtesting.MustIDBase16("7365637465747572"),
Name: "hello",
},
Properties: platform.XYViewProperties{
Properties: influxdb.XYViewProperties{
Type: "xy",
},
},
{
ViewContents: platform.ViewContents{
ID: platformtesting.MustIDBase16("6167697474697320"),
ViewContents: influxdb.ViewContents{
ID: influxdbtesting.MustIDBase16("6167697474697320"),
Name: "example",
},
},
@ -111,8 +124,8 @@ func TestService_handleGetViews(t *testing.T) {
name: "get all views when there are none",
fields: fields{
&mock.ViewService{
FindViewsF: func(ctx context.Context, filter platform.ViewFilter) ([]*platform.View, int, error) {
return []*platform.View{}, 0, nil
FindViewsF: func(ctx context.Context, filter influxdb.ViewFilter) ([]*influxdb.View, int, error) {
return []*influxdb.View{}, 0, nil
},
},
},
@ -133,8 +146,9 @@ func TestService_handleGetViews(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
h := NewViewHandler(mock.NewUserResourceMappingService(), mock.NewLabelService(), mock.NewUserService())
h.ViewService = tt.fields.ViewService
viewBackend := NewMockViewBackend()
viewBackend.ViewService = tt.fields.ViewService
h := NewViewHandler(viewBackend)
r := httptest.NewRequest("GET", "http://any.url", nil)
@ -170,7 +184,7 @@ func TestService_handleGetViews(t *testing.T) {
func TestService_handleGetView(t *testing.T) {
type fields struct {
ViewService platform.ViewService
ViewService influxdb.ViewService
}
type args struct {
id string
@ -191,10 +205,10 @@ func TestService_handleGetView(t *testing.T) {
name: "get a view by id",
fields: fields{
&mock.ViewService{
FindViewByIDF: func(ctx context.Context, id platform.ID) (*platform.View, error) {
return &platform.View{
ViewContents: platform.ViewContents{
ID: platformtesting.MustIDBase16("020f755c3c082000"),
FindViewByIDF: func(ctx context.Context, id influxdb.ID) (*influxdb.View, error) {
return &influxdb.View{
ViewContents: influxdb.ViewContents{
ID: influxdbtesting.MustIDBase16("020f755c3c082000"),
Name: "example",
},
}, nil
@ -226,10 +240,10 @@ func TestService_handleGetView(t *testing.T) {
name: "not found",
fields: fields{
&mock.ViewService{
FindViewByIDF: func(ctx context.Context, id platform.ID) (*platform.View, error) {
return nil, &platform.Error{
Code: platform.ENotFound,
Msg: platform.ErrViewNotFound,
FindViewByIDF: func(ctx context.Context, id influxdb.ID) (*influxdb.View, error) {
return nil, &influxdb.Error{
Code: influxdb.ENotFound,
Msg: influxdb.ErrViewNotFound,
}
},
},
@ -245,8 +259,9 @@ func TestService_handleGetView(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
h := NewViewHandler(mock.NewUserResourceMappingService(), mock.NewLabelService(), mock.NewUserService())
h.ViewService = tt.fields.ViewService
viewBackend := NewMockViewBackend()
viewBackend.ViewService = tt.fields.ViewService
h := NewViewHandler(viewBackend)
r := httptest.NewRequest("GET", "http://any.url", nil)
@ -283,10 +298,10 @@ func TestService_handleGetView(t *testing.T) {
func TestService_handlePostViews(t *testing.T) {
type fields struct {
ViewService platform.ViewService
ViewService influxdb.ViewService
}
type args struct {
view *platform.View
view *influxdb.View
}
type wants struct {
statusCode int
@ -304,19 +319,19 @@ func TestService_handlePostViews(t *testing.T) {
name: "create a new view",
fields: fields{
&mock.ViewService{
CreateViewF: func(ctx context.Context, c *platform.View) error {
c.ID = platformtesting.MustIDBase16("020f755c3c082000")
CreateViewF: func(ctx context.Context, c *influxdb.View) error {
c.ID = influxdbtesting.MustIDBase16("020f755c3c082000")
return nil
},
},
},
args: args{
view: &platform.View{
ViewContents: platform.ViewContents{
ID: platformtesting.MustIDBase16("020f755c3c082000"),
view: &influxdb.View{
ViewContents: influxdb.ViewContents{
ID: influxdbtesting.MustIDBase16("020f755c3c082000"),
Name: "hello",
},
Properties: platform.XYViewProperties{
Properties: influxdb.XYViewProperties{
Type: "xy",
},
},
@ -351,8 +366,9 @@ func TestService_handlePostViews(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
h := NewViewHandler(mock.NewUserResourceMappingService(), mock.NewLabelService(), mock.NewUserService())
h.ViewService = tt.fields.ViewService
viewBackend := NewMockViewBackend()
viewBackend.ViewService = tt.fields.ViewService
h := NewViewHandler(viewBackend)
b, err := json.Marshal(tt.args.view)
if err != nil {
@ -383,7 +399,7 @@ func TestService_handlePostViews(t *testing.T) {
func TestService_handleDeleteView(t *testing.T) {
type fields struct {
ViewService platform.ViewService
ViewService influxdb.ViewService
}
type args struct {
id string
@ -404,8 +420,8 @@ func TestService_handleDeleteView(t *testing.T) {
name: "remove a view by id",
fields: fields{
&mock.ViewService{
DeleteViewF: func(ctx context.Context, id platform.ID) error {
if id == platformtesting.MustIDBase16("020f755c3c082000") {
DeleteViewF: func(ctx context.Context, id influxdb.ID) error {
if id == influxdbtesting.MustIDBase16("020f755c3c082000") {
return nil
}
@ -424,10 +440,10 @@ func TestService_handleDeleteView(t *testing.T) {
name: "view not found",
fields: fields{
&mock.ViewService{
DeleteViewF: func(ctx context.Context, id platform.ID) error {
return &platform.Error{
Code: platform.ENotFound,
Msg: platform.ErrViewNotFound,
DeleteViewF: func(ctx context.Context, id influxdb.ID) error {
return &influxdb.Error{
Code: influxdb.ENotFound,
Msg: influxdb.ErrViewNotFound,
}
},
},
@ -443,8 +459,9 @@ func TestService_handleDeleteView(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
h := NewViewHandler(mock.NewUserResourceMappingService(), mock.NewLabelService(), mock.NewUserService())
h.ViewService = tt.fields.ViewService
viewBackend := NewMockViewBackend()
viewBackend.ViewService = tt.fields.ViewService
h := NewViewHandler(viewBackend)
r := httptest.NewRequest("GET", "http://any.url", nil)
@ -481,12 +498,12 @@ func TestService_handleDeleteView(t *testing.T) {
func TestService_handlePatchView(t *testing.T) {
type fields struct {
ViewService platform.ViewService
ViewService influxdb.ViewService
}
type args struct {
id string
name string
properties platform.ViewProperties
properties influxdb.ViewProperties
}
type wants struct {
statusCode int
@ -504,14 +521,14 @@ func TestService_handlePatchView(t *testing.T) {
name: "update a view",
fields: fields{
&mock.ViewService{
UpdateViewF: func(ctx context.Context, id platform.ID, upd platform.ViewUpdate) (*platform.View, error) {
if id == platformtesting.MustIDBase16("020f755c3c082000") {
return &platform.View{
ViewContents: platform.ViewContents{
ID: platformtesting.MustIDBase16("020f755c3c082000"),
UpdateViewF: func(ctx context.Context, id influxdb.ID, upd influxdb.ViewUpdate) (*influxdb.View, error) {
if id == influxdbtesting.MustIDBase16("020f755c3c082000") {
return &influxdb.View{
ViewContents: influxdb.ViewContents{
ID: influxdbtesting.MustIDBase16("020f755c3c082000"),
Name: "example",
},
Properties: platform.XYViewProperties{
Properties: influxdb.XYViewProperties{
Type: "xy",
},
}, nil
@ -555,14 +572,14 @@ func TestService_handlePatchView(t *testing.T) {
name: "update a view with empty request body",
fields: fields{
&mock.ViewService{
UpdateViewF: func(ctx context.Context, id platform.ID, upd platform.ViewUpdate) (*platform.View, error) {
if id == platformtesting.MustIDBase16("020f755c3c082000") {
return &platform.View{
ViewContents: platform.ViewContents{
ID: platformtesting.MustIDBase16("020f755c3c082000"),
UpdateViewF: func(ctx context.Context, id influxdb.ID, upd influxdb.ViewUpdate) (*influxdb.View, error) {
if id == influxdbtesting.MustIDBase16("020f755c3c082000") {
return &influxdb.View{
ViewContents: influxdb.ViewContents{
ID: influxdbtesting.MustIDBase16("020f755c3c082000"),
Name: "example",
},
Properties: platform.XYViewProperties{
Properties: influxdb.XYViewProperties{
Type: "xy",
},
}, nil
@ -583,10 +600,10 @@ func TestService_handlePatchView(t *testing.T) {
name: "view not found",
fields: fields{
&mock.ViewService{
UpdateViewF: func(ctx context.Context, id platform.ID, upd platform.ViewUpdate) (*platform.View, error) {
return nil, &platform.Error{
Code: platform.ENotFound,
Msg: platform.ErrViewNotFound,
UpdateViewF: func(ctx context.Context, id influxdb.ID, upd influxdb.ViewUpdate) (*influxdb.View, error) {
return nil, &influxdb.Error{
Code: influxdb.ENotFound,
Msg: influxdb.ErrViewNotFound,
}
},
},
@ -603,10 +620,11 @@ func TestService_handlePatchView(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
h := NewViewHandler(mock.NewUserResourceMappingService(), mock.NewLabelService(), mock.NewUserService())
h.ViewService = tt.fields.ViewService
viewBackend := NewMockViewBackend()
viewBackend.ViewService = tt.fields.ViewService
h := NewViewHandler(viewBackend)
upd := platform.ViewUpdate{}
upd := influxdb.ViewUpdate{}
if tt.args.name != "" {
upd.Name = &tt.args.name
}
@ -688,9 +706,8 @@ func jsonEqual(s1, s2 string) (eq bool, diff string, err error) {
return cmp.Equal(o1, o2), diff, err
}
/* TODO: Add a go view service client
func initViewService(f platformtesting.ViewFields, t *testing.T) (platform.ViewService, func()) {
/* todo
func initViewService(f influxdbtesting.ViewFields, t *testing.T) (influxdb.ViewService, func()) {
t.Helper()
svc := inmem.NewService()
svc.IDGenerator = f.IDGenerator
@ -713,22 +730,7 @@ func initViewService(f platformtesting.ViewFields, t *testing.T) (platform.ViewS
return &client, done
}
func TestViewService_CreateView(t *testing.T) {
platformtesting.CreateView(initViewService, t)
}
func TestViewService_FindViewByID(t *testing.T) {
platformtesting.FindViewByID(initViewService, t)
}
func TestViewService_FindViews(t *testing.T) {
platformtesting.FindViews(initViewService, t)
}
func TestViewService_DeleteView(t *testing.T) {
platformtesting.DeleteView(initViewService, t)
}
func TestViewService_UpdateView(t *testing.T) {
platformtesting.UpdateView(initViewService, t)
func TestViewService(t *testing.T) {
influxdbtesting.ViewService(initViewService, t)
}
*/

View File

@ -18,6 +18,27 @@ import (
"go.uber.org/zap"
)
// WriteBackend is all services and associated parameters required to construct
// the WriteHandler.
type WriteBackend struct {
Logger *zap.Logger
PointsWriter storage.PointsWriter
BucketService platform.BucketService
OrganizationService platform.OrganizationService
}
// NewWriteBackend returns a new instance of WriteBackend.
func NewWriteBackend(b *APIBackend) *WriteBackend {
return &WriteBackend{
Logger: b.Logger.With(zap.String("handler", "write")),
PointsWriter: b.PointsWriter,
BucketService: b.BucketService,
OrganizationService: b.OrganizationService,
}
}
// WriteHandler receives line protocol and sends to a publish function.
type WriteHandler struct {
*httprouter.Router
@ -37,11 +58,14 @@ const (
)
// NewWriteHandler creates a new handler at /api/v2/write to receive line protocol.
func NewWriteHandler(writer storage.PointsWriter) *WriteHandler {
func NewWriteHandler(b *WriteBackend) *WriteHandler {
h := &WriteHandler{
Router: NewRouter(),
Logger: zap.NewNop(),
PointsWriter: writer,
Router: NewRouter(),
Logger: b.Logger,
PointsWriter: b.PointsWriter,
BucketService: b.BucketService,
OrganizationService: b.OrganizationService,
}
h.HandlerFunc("POST", writePath, h.handleWrite)

View File

@ -24,6 +24,39 @@ type DashboardService struct {
ReplaceDashboardCellsF func(ctx context.Context, id platform.ID, cs []*platform.Cell) error
}
// NewDashboardService returns a mock of DashboardService where its methods will return zero values.
func NewDashboardService() *DashboardService {
return &DashboardService{
CreateDashboardF: func(context.Context, *platform.Dashboard) error { return nil },
FindDashboardByIDF: func(context.Context, platform.ID) (*platform.Dashboard, error) { return nil, nil },
FindDashboardsF: func(context.Context, platform.DashboardFilter, platform.FindOptions) ([]*platform.Dashboard, int, error) {
return nil, 0, nil
},
UpdateDashboardF: func(context.Context, platform.ID, platform.DashboardUpdate) (*platform.Dashboard, error) {
return nil, nil
},
DeleteDashboardF: func(context.Context, platform.ID) error { return nil },
AddDashboardCellF: func(ctx context.Context, id platform.ID, c *platform.Cell, opts platform.AddDashboardCellOptions) error {
return nil
},
RemoveDashboardCellF: func(ctx context.Context, dashboardID platform.ID, cellID platform.ID) error { return nil },
GetDashboardCellViewF: func(ctx context.Context, dashboardID platform.ID, cellID platform.ID) (*platform.View, error) {
return nil, nil
},
UpdateDashboardCellViewF: func(ctx context.Context, dashboardID platform.ID, cellID platform.ID, upd platform.ViewUpdate) (*platform.View, error) {
return nil, nil
},
UpdateDashboardCellF: func(ctx context.Context, dashbaordID platform.ID, cellID platform.ID, upd platform.CellUpdate) (*platform.Cell, error) {
return nil, nil
},
CopyDashboardCellF: func(ctx context.Context, dashbaordID platform.ID, cellID platform.ID) (*platform.Cell, error) {
return nil, nil
},
ReplaceDashboardCellsF: func(ctx context.Context, id platform.ID, cs []*platform.Cell) error { return nil },
}
}
func (s *DashboardService) FindDashboardByID(ctx context.Context, id platform.ID) (*platform.Dashboard, error) {
return s.FindDashboardByIDF(ctx, id)
}

View File

@ -11,6 +11,15 @@ type LookupService struct {
NameFn func(ctx context.Context, resource platform.ResourceType, id platform.ID) (string, error)
}
// NewLookupService returns a mock of LookupService where its methods will return zero values.
func NewLookupService() *LookupService {
return &LookupService{
NameFn: func(ctx context.Context, resource platform.ResourceType, id platform.ID) (string, error) {
return "", nil
},
}
}
// Name returns the name for the resource and ID.
func (s *LookupService) Name(ctx context.Context, resource platform.ResourceType, id platform.ID) (string, error) {
return s.NameFn(ctx, resource, id)

View File

@ -17,6 +17,22 @@ type MacroService struct {
DeleteMacroF func(context.Context, platform.ID) error
}
// NewMacroService returns a mock of MacroService where its methods will return zero values.
func NewMacroService() *MacroService {
return &MacroService{
FindMacrosF: func(context.Context, platform.MacroFilter, ...platform.FindOptions) ([]*platform.Macro, error) {
return nil, nil
},
FindMacroByIDF: func(context.Context, platform.ID) (*platform.Macro, error) { return nil, nil },
CreateMacroF: func(context.Context, *platform.Macro) error { return nil },
UpdateMacroF: func(ctx context.Context, id platform.ID, update *platform.MacroUpdate) (*platform.Macro, error) {
return nil, nil
},
ReplaceMacroF: func(context.Context, *platform.Macro) error { return nil },
DeleteMacroF: func(context.Context, platform.ID) error { return nil },
}
}
func (s *MacroService) CreateMacro(ctx context.Context, macro *platform.Macro) error {
return s.CreateMacroF(ctx, macro)
}

View File

@ -2,7 +2,6 @@ package mock
import (
"context"
platform "github.com/influxdata/influxdb"
)
@ -18,6 +17,25 @@ type OrganizationService struct {
DeleteOrganizationF func(ctx context.Context, id platform.ID) error
}
// NewOrganizationService returns a mock OrganizationService where its methods will return
// zero values.
func NewOrganizationService() *OrganizationService {
return &OrganizationService{
FindOrganizationByIDF: func(ctx context.Context, id platform.ID) (*platform.Organization, error) { return nil, nil },
FindOrganizationF: func(ctx context.Context, filter platform.OrganizationFilter) (*platform.Organization, error) {
return nil, nil
},
FindOrganizationsF: func(ctx context.Context, filter platform.OrganizationFilter, opt ...platform.FindOptions) ([]*platform.Organization, int, error) {
return nil, 0, nil
},
CreateOrganizationF: func(ctx context.Context, b *platform.Organization) error { return nil },
UpdateOrganizationF: func(ctx context.Context, id platform.ID, upd platform.OrganizationUpdate) (*platform.Organization, error) {
return nil, nil
},
DeleteOrganizationF: func(ctx context.Context, id platform.ID) error { return nil },
}
}
//FindOrganizationByID calls FindOrganizationByIDF.
func (s *OrganizationService) FindOrganizationByID(ctx context.Context, id platform.ID) (*platform.Organization, error) {
return s.FindOrganizationByIDF(ctx, id)

View File

@ -20,7 +20,7 @@ type UserService struct {
UpdateUserFn func(context.Context, platform.ID, platform.UserUpdate) (*platform.User, error)
}
// NewUserService returns a mock of NewUserService where its methods will return zero values.
// NewUserService returns a mock of UserService where its methods will return zero values.
func NewUserService() *UserService {
return &UserService{
FindUserByIDFn: func(context.Context, platform.ID) (*platform.User, error) { return nil, nil },

View File

@ -1353,6 +1353,15 @@ func NewPoint(name string, tags Tags, fields Fields, t time.Time) (Point, error)
}, nil
}
// NewPointFromSeries returns a Point given the serialized key, some fields, and a time.
func NewPointFromSeries(key []byte, fields Fields, t time.Time) Point {
return &point{
key: key,
time: t,
fields: fields.MarshalBinary(),
}
}
// pointKey checks some basic requirements for valid points, and returns the
// key, along with an possible error.
func pointKey(measurement string, tags Tags, fields Fields, t time.Time) ([]byte, error) {

View File

@ -12,9 +12,11 @@ import (
platform "github.com/influxdata/influxdb"
"github.com/influxdata/influxdb/logger"
"github.com/influxdata/influxdb/models"
"github.com/influxdata/influxdb/storage/wal"
"github.com/influxdata/influxdb/tsdb"
"github.com/influxdata/influxdb/tsdb/tsi1"
"github.com/influxdata/influxdb/tsdb/tsm1"
"github.com/influxdata/influxdb/tsdb/value"
"github.com/influxdata/influxql"
"github.com/prometheus/client_golang/prometheus"
"go.uber.org/zap"
@ -38,7 +40,7 @@ type Engine struct {
index *tsi1.Index
sfile *tsdb.SeriesFile
engine *tsm1.Engine
wal *tsm1.WAL
wal *wal.WAL
retentionEnforcer *retentionEnforcer
defaultMetricLabels prometheus.Labels
@ -119,27 +121,28 @@ func NewEngine(path string, c Config, options ...Option) *Engine {
tsi1.WithPath(c.GetIndexPath(path)))
// Initialize WAL
var wal tsm1.Log = new(tsm1.NopWAL)
if c.WAL.Enabled {
e.wal = tsm1.NewWAL(c.GetWALPath(path))
e.wal.WithFsyncDelay(time.Duration(c.WAL.FsyncDelay))
e.wal.EnableTraceLogging(c.TraceLoggingEnabled)
wal = e.wal
}
e.wal = wal.NewWAL(c.GetWALPath(path))
e.wal.WithFsyncDelay(time.Duration(c.WAL.FsyncDelay))
e.wal.EnableTraceLogging(c.TraceLoggingEnabled)
e.wal.SetEnabled(c.WAL.Enabled)
// Initialise Engine
e.engine = tsm1.NewEngine(c.GetEnginePath(path), e.index, c.Engine,
tsm1.WithWAL(wal),
tsm1.WithTraceLogging(c.TraceLoggingEnabled))
tsm1.WithTraceLogging(c.TraceLoggingEnabled),
tsm1.WithSnapshotter(e))
// Apply options.
for _, option := range options {
option(e)
}
// Set default metrics labels.
e.engine.SetDefaultMetricLabels(e.defaultMetricLabels)
e.sfile.SetDefaultMetricLabels(e.defaultMetricLabels)
e.index.SetDefaultMetricLabels(e.defaultMetricLabels)
if e.wal != nil {
e.wal.SetDefaultMetricLabels(e.defaultMetricLabels)
}
return e
}
@ -160,6 +163,7 @@ func (e *Engine) WithLogger(log *zap.Logger) {
e.sfile.WithLogger(e.logger)
e.index.WithLogger(e.logger)
e.engine.WithLogger(e.logger)
e.wal.WithLogger(e.logger)
e.retentionEnforcer.WithLogger(e.logger)
}
@ -170,13 +174,14 @@ func (e *Engine) PrometheusCollectors() []prometheus.Collector {
metrics = append(metrics, tsdb.PrometheusCollectors()...)
metrics = append(metrics, tsi1.PrometheusCollectors()...)
metrics = append(metrics, tsm1.PrometheusCollectors()...)
metrics = append(metrics, wal.PrometheusCollectors()...)
metrics = append(metrics, e.retentionEnforcer.PrometheusCollectors()...)
return metrics
}
// Open opens the store and all underlying resources. It returns an error if
// any of the underlying systems fail to open.
func (e *Engine) Open() error {
func (e *Engine) Open() (err error) {
e.mu.Lock()
defer e.mu.Unlock()
@ -184,19 +189,20 @@ func (e *Engine) Open() error {
return nil // Already open
}
if err := e.sfile.Open(); err != nil {
// Open the services in order and clean up if any fail.
var oh openHelper
oh.Open(e.sfile)
oh.Open(e.index)
oh.Open(e.wal)
oh.Open(e.engine)
if err := oh.Done(); err != nil {
return err
}
if err := e.index.Open(); err != nil {
if err := e.replayWAL(); err != nil {
return err
}
if err := e.engine.Open(); err != nil {
return err
}
e.engine.SetCompactionsEnabled(true) // TODO(edd):is this needed?
e.closing = make(chan struct{})
// TODO(edd) background tasks will be run in priority order via a scheduler.
@ -207,6 +213,54 @@ func (e *Engine) Open() error {
return nil
}
// replayWAL reads the WAL segment files and replays them.
func (e *Engine) replayWAL() error {
if !e.config.WAL.Enabled {
return nil
}
now := time.Now()
walPaths, err := wal.SegmentFileNames(e.wal.Path())
if err != nil {
return err
}
// TODO(jeff): we should just do snapshots and wait for them so that we don't hit
// OOM situations when reloading huge WALs.
// Disable the max size during loading
limit := e.engine.Cache.MaxSize()
defer func() { e.engine.Cache.SetMaxSize(limit) }()
e.engine.Cache.SetMaxSize(0)
// Execute all the entries in the WAL again
reader := wal.NewWALReader(walPaths)
reader.WithLogger(e.logger)
err = reader.Read(func(entry wal.WALEntry) error {
switch en := entry.(type) {
case *wal.WriteWALEntry:
points := tsm1.ValuesToPoints(en.Values)
err := e.writePointsLocked(tsdb.NewSeriesCollection(points), en.Values)
if _, ok := err.(tsdb.PartialWriteError); ok {
err = nil
}
return err
case *wal.DeleteBucketRangeWALEntry:
return e.deleteBucketRangeLocked(en.OrgID, en.BucketID, en.Min, en.Max)
}
return nil
})
e.logger.Info("Reloaded WAL",
zap.String("path", e.wal.Path()),
zap.Duration("duration", time.Since(now)),
zap.Error(err))
return err
}
// runRetentionEnforcer runs the retention enforcer in a separate goroutine.
//
// Currently this just runs on an interval, but in the future we will add the
@ -267,17 +321,15 @@ func (e *Engine) Close() error {
defer e.mu.Unlock()
e.closing = nil
if err := e.sfile.Close(); err != nil {
return err
}
if err := e.index.Close(); err != nil {
return err
}
return e.engine.Close()
var ch closeHelper
ch.Close(e.engine)
ch.Close(e.wal)
ch.Close(e.index)
ch.Close(e.sfile)
return ch.Done()
}
// CreateSeriesCursor creates a SeriesCursor for usage with the read service.
func (e *Engine) CreateSeriesCursor(ctx context.Context, req SeriesCursorRequest, cond influxql.Expr) (SeriesCursor, error) {
e.mu.RLock()
defer e.mu.RUnlock()
@ -287,6 +339,7 @@ func (e *Engine) CreateSeriesCursor(ctx context.Context, req SeriesCursorRequest
return newSeriesCursor(req, e.index, cond)
}
// CreateCursorIterator creates a CursorIterator for usage with the read service.
func (e *Engine) CreateCursorIterator(ctx context.Context) (tsdb.CursorIterator, error) {
e.mu.RLock()
defer e.mu.RUnlock()
@ -302,9 +355,7 @@ func (e *Engine) CreateCursorIterator(ctx context.Context) (tsdb.CursorIterator,
// WritePoints will however determine if there are any field type conflicts, and
// return an appropriate error in that case.
func (e *Engine) WritePoints(points []models.Point) error {
collection := tsdb.NewSeriesCollection(points)
j := 0
collection, j := tsdb.NewSeriesCollection(points), 0
for iter := collection.Iterator(); iter.Next(); {
tags := iter.Tags()
@ -350,47 +401,110 @@ func (e *Engine) WritePoints(points []models.Point) error {
return ErrEngineClosed
}
// Add new series to the index and series file. Check for partial writes.
// Convert the points to values for adding to the WAL/Cache.
values, err := tsm1.PointsToValues(collection.Points)
if err != nil {
return err
}
// Add the write to the WAL to be replayed if there is a crash or shutdown.
if _, err := e.wal.WriteMulti(values); err != nil {
return err
}
return e.writePointsLocked(collection, values)
}
// writePointsLocked does the work of writing points and must be called under some sort of lock.
func (e *Engine) writePointsLocked(collection *tsdb.SeriesCollection, values map[string][]value.Value) error {
// TODO(jeff): keep track of the values in the collection so that partial write
// errors get tracked all the way. Right now, the engine doesn't drop any values
// but if it ever did, the errors could end up missing some data.
// Add new series to the index and series file.
if err := e.index.CreateSeriesListIfNotExists(collection); err != nil {
// ignore PartialWriteErrors. The collection captures it.
// TODO(edd/jeff): should we just remove PartialWriteError from the index then?
if _, ok := err.(tsdb.PartialWriteError); !ok {
return err
}
// If there was a PartialWriteError, that means the passed in values may contain
// more than the points so we need to recreate them.
if collection.PartialWriteError() != nil {
var err error
values, err = tsm1.PointsToValues(collection.Points)
if err != nil {
return err
}
}
// Write the points to the cache and WAL.
if err := e.engine.WritePoints(collection.Points); err != nil {
// Write the values to the engine.
if err := e.engine.WriteValues(values); err != nil {
return err
}
return collection.PartialWriteError()
}
// AcquireSegments closes the current WAL segment, gets the set of all the currently closed
// segments, and calls the callback. It does all of this under the lock on the engine.
func (e *Engine) AcquireSegments(fn func(segs []string) error) error {
e.mu.Lock()
defer e.mu.Unlock()
if err := e.wal.CloseSegment(); err != nil {
return err
}
segments, err := e.wal.ClosedSegments()
if err != nil {
return err
}
return fn(segments)
}
// CommitSegments calls the callback and if that does not return an error, removes the segment
// files from the WAL. It does all of this under the lock on the engine.
func (e *Engine) CommitSegments(segs []string, fn func() error) error {
e.mu.Lock()
defer e.mu.Unlock()
if err := fn(); err != nil {
return err
}
return e.wal.Remove(segs)
}
// DeleteBucket deletes an entire bucket from the storage engine.
func (e *Engine) DeleteBucket(orgID, bucketID platform.ID) error {
return e.DeleteBucketRange(orgID, bucketID, math.MinInt64, math.MaxInt64)
}
// DeleteBucketRange deletes an entire bucket from the storage engine.
func (e *Engine) DeleteBucketRange(orgID, bucketID platform.ID, min, max int64) error {
e.mu.RLock()
defer e.mu.RUnlock()
if e.closing == nil {
return ErrEngineClosed
}
// Add the delete to the WAL to be replayed if there is a crash or shutdown.
if _, err := e.wal.DeleteBucketRange(orgID, bucketID, min, max); err != nil {
return err
}
return e.deleteBucketRangeLocked(orgID, bucketID, min, max)
}
// deleteBucketRangeLocked does the work of deleting a bucket range and must be called under
// some sort of lock.
func (e *Engine) deleteBucketRangeLocked(orgID, bucketID platform.ID, min, max int64) error {
// TODO(edd): we need to clean up how we're encoding the prefix so that we
// don't have to remember to get it right everywhere we need to touch TSM data.
encoded := tsdb.EncodeName(orgID, bucketID)
name := models.EscapeMeasurement(encoded[:])
return e.engine.DeleteBucket(name, math.MinInt64, math.MaxInt64)
}
// DeleteSeriesRangeWithPredicate deletes all series data iterated over if fn returns
// true for that series.
func (e *Engine) DeleteSeriesRangeWithPredicate(itr tsdb.SeriesIterator, fn func([]byte, models.Tags) (int64, int64, bool)) error {
e.mu.RLock()
defer e.mu.RUnlock()
if e.closing == nil {
return ErrEngineClosed
}
return e.engine.DeleteSeriesRangeWithPredicate(itr, fn)
return e.engine.DeleteBucketRange(name, min, max)
}
// SeriesCardinality returns the number of series in the engine.

View File

@ -16,8 +16,6 @@ type retentionMetrics struct {
labels prometheus.Labels
Checks *prometheus.CounterVec
CheckDuration *prometheus.HistogramVec
Unprocessable *prometheus.CounterVec
Series *prometheus.CounterVec
}
func newRetentionMetrics(labels prometheus.Labels) *retentionMetrics {
@ -25,17 +23,19 @@ func newRetentionMetrics(labels prometheus.Labels) *retentionMetrics {
for k := range labels {
names = append(names, k)
}
names = append(names, "status") // All metrics include status
sort.Strings(names)
checksNames := append(append([]string(nil), names...), "status", "org_id", "bucket_id")
sort.Strings(checksNames)
return &retentionMetrics{
labels: labels,
Checks: prometheus.NewCounterVec(prometheus.CounterOpts{
Namespace: namespace,
Subsystem: retentionSubsystem,
Name: "checks_total",
Help: "Number of retention check operations performed.",
}, names),
Help: "Number of retention check operations performed by org/bucket id.",
}, checksNames),
CheckDuration: prometheus.NewHistogramVec(prometheus.HistogramOpts{
Namespace: namespace,
@ -45,20 +45,6 @@ func newRetentionMetrics(labels prometheus.Labels) *retentionMetrics {
// 25 buckets spaced exponentially between 10s and ~2h
Buckets: prometheus.ExponentialBuckets(10, 1.32, 25),
}, names),
Unprocessable: prometheus.NewCounterVec(prometheus.CounterOpts{
Namespace: namespace,
Subsystem: retentionSubsystem,
Name: "unprocessable_buckets_total",
Help: "Number of buckets that could not be operated on.",
}, names),
Series: prometheus.NewCounterVec(prometheus.CounterOpts{
Namespace: namespace,
Subsystem: retentionSubsystem,
Name: "series_total",
Help: "Number of series that a delete was applied to.",
}, names),
}
}
@ -76,7 +62,5 @@ func (rm *retentionMetrics) PrometheusCollectors() []prometheus.Collector {
return []prometheus.Collector{
rm.Checks,
rm.CheckDuration,
rm.Unprocessable,
rm.Series,
}
}

61
storage/opener.go Normal file
View File

@ -0,0 +1,61 @@
package storage
import (
"io"
)
// opener is something that can be opened and closed.
type opener interface {
Open() error
io.Closer
}
// openHelper is a helper to abstract the pattern of opening multiple things,
// exiting early if any open fails, and closing any of the opened things
// in the case of failure.
type openHelper struct {
opened []io.Closer
err error
}
// Open attempts to open the opener. If an error has happened already
// then no calls are made to the opener.
func (o *openHelper) Open(op opener) {
if o.err != nil {
return
}
o.err = op.Open()
if o.err == nil {
o.opened = append(o.opened, op)
}
}
// Done returns the error of the first open and closes in reverse
// order any opens that have already happened if there was an error.
func (o *openHelper) Done() error {
if o.err == nil {
return nil
}
for i := len(o.opened) - 1; i >= 0; i-- {
o.opened[i].Close()
}
return o.err
}
// closeHelper is a helper to abstract the pattern of closing multiple
// things and keeping track of the first encountered error.
type closeHelper struct {
err error
}
// Close closes the closer and keeps track of the first error.
func (c *closeHelper) Close(cl io.Closer) {
if err := cl.Close(); c.err == nil {
c.err = err
}
}
// Done returns the first error.
func (c *closeHelper) Done() error {
return c.err
}

View File

@ -106,7 +106,7 @@ func (c *{{.name}}MultiShardArrayCursor) reset(cur cursors.{{.Name}}ArrayCursor,
}
func (c *{{.name}}MultiShardArrayCursor) Err() error { return c.err }
func (c *{{.name}}MultiShardArrayCursor) Err() error { return c.err }
func (c *{{.name}}MultiShardArrayCursor) Stats() cursors.CursorStats {
return c.{{.Name}}ArrayCursor.Stats()
@ -200,17 +200,17 @@ func (c {{$type}}) Next() {{$arrayType}} {
var acc {{.Type}}
for {
for _, v := range a.Values {
acc += v
}
for _, v := range a.Values {
acc += v
}
a = c.{{.Name}}ArrayCursor.Next()
if len(a.Timestamps) == 0 {
if len(a.Timestamps) == 0 {
c.ts[0] = ts
c.vs[0] = acc
c.res.Timestamps = c.ts[:]
c.res.Values = c.vs[:]
return c.res
}
}
}
}
@ -230,11 +230,11 @@ func (c *integer{{.Name}}CountArrayCursor) Next() *cursors.IntegerArray {
return &cursors.IntegerArray{}
}
ts := a.Timestamps[0]
var acc int64
for {
acc += int64(len(a.Timestamps))
a = c.{{.Name}}ArrayCursor.Next()
ts := a.Timestamps[0]
var acc int64
for {
acc += int64(len(a.Timestamps))
a = c.{{.Name}}ArrayCursor.Next()
if len(a.Timestamps) == 0 {
res := cursors.NewIntegerArrayLen(1)
res.Timestamps[0] = ts

View File

@ -4,28 +4,21 @@ import (
"context"
"errors"
"math"
"sync"
"sync/atomic"
"time"
platform "github.com/influxdata/influxdb"
"github.com/influxdata/influxdb/logger"
"github.com/influxdata/influxdb/models"
"github.com/influxdata/influxdb/tsdb"
"github.com/influxdata/influxql"
"github.com/prometheus/client_golang/prometheus"
"go.uber.org/zap"
)
const (
bucketAPITimeout = 10 * time.Second
engineAPITimeout = time.Minute
)
// A Deleter implementation is capable of deleting data from a storage engine.
type Deleter interface {
CreateSeriesCursor(context.Context, SeriesCursorRequest, influxql.Expr) (SeriesCursor, error)
DeleteSeriesRangeWithPredicate(tsdb.SeriesIterator, func([]byte, models.Tags) (int64, int64, bool)) error
DeleteBucketRange(orgID, bucketID platform.ID, min, max int64) error
}
// A BucketFinder is responsible for providing access to buckets via a filter.
@ -78,169 +71,59 @@ func (s *retentionEnforcer) run() {
log, logEnd := logger.NewOperation(s.logger, "Data retention check", "data_retention_check")
defer logEnd()
rpByBucketID, err := s.getRetentionPeriodPerBucket()
buckets, err := s.getBucketInformation()
if err != nil {
log.Error("Unable to determine bucket:RP mapping", zap.Error(err))
log.Error("Unable to determine bucket information", zap.Error(err))
return
}
now := time.Now().UTC()
labels := s.metrics.Labels()
labels["status"] = "ok"
if err := s.expireData(rpByBucketID, now); err != nil {
log.Error("Deletion not successful", zap.Error(err))
labels["status"] = "error"
}
s.metrics.CheckDuration.With(labels).Observe(time.Since(now).Seconds())
s.metrics.Checks.With(labels).Inc()
s.expireData(buckets, now)
s.metrics.CheckDuration.With(s.metrics.Labels()).Observe(time.Since(now).Seconds())
}
// expireData runs a delete operation on the storage engine.
//
// Any series data that (1) belongs to a bucket in the provided map and
// Any series data that (1) belongs to a bucket in the provided list and
// (2) falls outside the bucket's indicated retention period will be deleted.
func (s *retentionEnforcer) expireData(rpByBucketID map[platform.ID]time.Duration, now time.Time) error {
_, logEnd := logger.NewOperation(s.logger, "Data deletion", "data_deletion")
func (s *retentionEnforcer) expireData(buckets []*platform.Bucket, now time.Time) {
logger, logEnd := logger.NewOperation(s.logger, "Data deletion", "data_deletion")
defer logEnd()
ctx, cancel := context.WithTimeout(context.Background(), engineAPITimeout)
defer cancel()
cur, err := s.Engine.CreateSeriesCursor(ctx, SeriesCursorRequest{}, nil)
if err != nil {
return err
}
defer cur.Close()
var mu sync.Mutex
badMSketch := make(map[string]struct{}) // Badly formatted measurements.
missingBSketch := make(map[platform.ID]struct{}) // Missing buckets.
var seriesDeleted uint64 // Number of series where a delete is attempted.
var seriesSkipped uint64 // Number of series that were skipped from delete.
fn := func(name []byte, tags models.Tags) (int64, int64, bool) {
if len(name) != platform.IDLength {
mu.Lock()
badMSketch[string(name)] = struct{}{}
mu.Unlock()
atomic.AddUint64(&seriesSkipped, 1)
return 0, 0, false
labels := s.metrics.Labels()
for _, b := range buckets {
if b.RetentionPeriod == 0 {
continue
}
var n [16]byte
copy(n[:], name)
_, bucketID := tsdb.DecodeName(n)
retentionPeriod, ok := rpByBucketID[bucketID]
if !ok {
mu.Lock()
missingBSketch[bucketID] = struct{}{}
mu.Unlock()
atomic.AddUint64(&seriesSkipped, 1)
return 0, 0, false
}
if retentionPeriod == 0 {
return 0, 0, false
}
atomic.AddUint64(&seriesDeleted, 1)
to := now.Add(-retentionPeriod).UnixNano()
return math.MinInt64, to, true
}
defer func() {
if s.metrics == nil {
return
}
labels := s.metrics.Labels()
labels["status"] = "bad_measurement"
s.metrics.Unprocessable.With(labels).Add(float64(len(badMSketch)))
labels["status"] = "missing_bucket"
s.metrics.Unprocessable.With(labels).Add(float64(len(missingBSketch)))
labels["status"] = "ok"
s.metrics.Series.With(labels).Add(float64(atomic.LoadUint64(&seriesDeleted)))
labels["org_id"] = b.OrganizationID.String()
labels["bucket_id"] = b.ID.String()
labels["status"] = "skipped"
s.metrics.Series.With(labels).Add(float64(atomic.LoadUint64(&seriesSkipped)))
}()
max := now.Add(-b.RetentionPeriod).UnixNano()
err := s.Engine.DeleteBucketRange(b.OrganizationID, b.ID, math.MinInt64, max)
if err != nil {
labels["status"] = "error"
logger.Info("unable to delete bucket range",
zap.String("bucket id", b.ID.String()),
zap.String("org id", b.OrganizationID.String()),
zap.Error(err))
}
return s.Engine.DeleteSeriesRangeWithPredicate(newSeriesIteratorAdapter(cur), fn)
s.metrics.Checks.With(labels).Inc()
}
}
// getRetentionPeriodPerBucket returns a map of (bucket ID -> retention period)
// for all buckets.
func (s *retentionEnforcer) getRetentionPeriodPerBucket() (map[platform.ID]time.Duration, error) {
// getBucketInformation returns a slice of buckets to run retention on.
func (s *retentionEnforcer) getBucketInformation() ([]*platform.Bucket, error) {
ctx, cancel := context.WithTimeout(context.Background(), bucketAPITimeout)
defer cancel()
buckets, _, err := s.BucketService.FindBuckets(ctx, platform.BucketFilter{})
if err != nil {
return nil, err
}
rpByBucketID := make(map[platform.ID]time.Duration, len(buckets))
for _, bucket := range buckets {
rpByBucketID[bucket.ID] = bucket.RetentionPeriod
}
return rpByBucketID, nil
return buckets, err
}
// PrometheusCollectors satisfies the prom.PrometheusCollector interface.
func (s *retentionEnforcer) PrometheusCollectors() []prometheus.Collector {
return s.metrics.PrometheusCollectors()
}
type seriesIteratorAdapter struct {
itr SeriesCursor
ea seriesElemAdapter
elem tsdb.SeriesElem
}
func newSeriesIteratorAdapter(itr SeriesCursor) *seriesIteratorAdapter {
si := &seriesIteratorAdapter{itr: itr}
si.elem = &si.ea
return si
}
// Next returns the next tsdb.SeriesElem.
//
// The returned tsdb.SeriesElem is valid for use until Next is called again.
func (s *seriesIteratorAdapter) Next() (tsdb.SeriesElem, error) {
if s.itr == nil {
return nil, nil
}
row, err := s.itr.Next()
if err != nil {
return nil, err
}
if row == nil {
return nil, nil
}
s.ea.name = row.Name
s.ea.tags = row.Tags
return s.elem, nil
}
func (s *seriesIteratorAdapter) Close() error {
if s.itr != nil {
err := s.itr.Close()
s.itr = nil
return err
}
return nil
}
type seriesElemAdapter struct {
name []byte
tags models.Tags
}
func (e *seriesElemAdapter) Name() []byte { return e.name }
func (e *seriesElemAdapter) Tags() models.Tags { return e.tags }
func (e *seriesElemAdapter) Deleted() bool { return false }
func (e *seriesElemAdapter) Expr() influxql.Expr { return nil }

View File

@ -2,7 +2,6 @@ package storage
import (
"context"
"fmt"
"math"
"math/rand"
"reflect"
@ -10,103 +9,74 @@ import (
"time"
platform "github.com/influxdata/influxdb"
"github.com/influxdata/influxdb/models"
"github.com/influxdata/influxdb/tsdb"
"github.com/influxdata/influxql"
)
func TestService_expireData(t *testing.T) {
func TestRetentionService(t *testing.T) {
engine := NewTestEngine()
service := newRetentionEnforcer(engine, NewTestBucketFinder())
now := time.Date(2018, 4, 10, 23, 12, 33, 0, time.UTC)
t.Run("no rpByBucketID", func(t *testing.T) {
if err := service.expireData(nil, now); err != nil {
t.Error(err)
}
if err := service.expireData(map[platform.ID]time.Duration{}, now); err != nil {
t.Error(err)
}
t.Run("no buckets", func(t *testing.T) {
service.expireData(nil, now)
service.expireData([]*platform.Bucket{}, now)
})
// Generate some measurement names
var names [][]byte
rpByBucketID := map[platform.ID]time.Duration{}
expMatchedFrequencies := map[string]int{} // To be used for verifying test results.
expRejectedFrequencies := map[string]int{} // To be used for verifying test results.
// Generate some buckets to expire
buckets := []*platform.Bucket{}
expMatched := map[string]struct{}{} // To be used for verifying test results.
expRejected := map[string]struct{}{} // To be used for verifying test results.
for i := 0; i < 15; i++ {
repeat := rand.Intn(10) + 1 // [1, 10]
name := genMeasurementName()
for j := 0; j < repeat; j++ {
names = append(names, name)
}
var n [16]byte
copy(n[:], name)
_, bucketID := tsdb.DecodeName(n)
orgID, bucketID := tsdb.DecodeName(n)
// Put 1/3rd in the rpByBucketID into the set to delete and 1/3rd into the set
// to not delete because no rp, and 1/3rd into the set to not delete because 0 rp.
if i%3 == 0 {
rpByBucketID[bucketID] = 3 * time.Hour
expMatchedFrequencies[string(name)] = repeat
buckets = append(buckets, &platform.Bucket{
OrganizationID: orgID,
ID: bucketID,
RetentionPeriod: 3 * time.Hour,
})
expMatched[string(name)] = struct{}{}
} else if i%3 == 1 {
expRejectedFrequencies[string(name)] = repeat
expRejected[string(name)] = struct{}{}
} else if i%3 == 2 {
rpByBucketID[bucketID] = 0
expRejectedFrequencies[string(name)] = repeat
buckets = append(buckets, &platform.Bucket{
OrganizationID: orgID,
ID: bucketID,
RetentionPeriod: 0,
})
expRejected[string(name)] = struct{}{}
}
}
// Add a badly formatted measurement.
for i := 0; i < 5; i++ {
names = append(names, []byte("zyzwrong"))
}
expRejectedFrequencies["zyzwrong"] = 5
gotMatchedFrequencies := map[string]int{}
gotRejectedFrequencies := map[string]int{}
engine.DeleteSeriesRangeWithPredicateFn = func(_ tsdb.SeriesIterator, fn func([]byte, models.Tags) (int64, int64, bool)) error {
// Iterate over the generated names updating the frequencies by which
// the predicate function in expireData matches or rejects them.
for _, name := range names {
from, to, shouldDelete := fn(name, nil)
if shouldDelete {
gotMatchedFrequencies[string(name)]++
if from != math.MinInt64 {
return fmt.Errorf("got from %d, expected %d", from, math.MinInt64)
}
wantTo := now.Add(-3 * time.Hour).UnixNano()
if to != wantTo {
return fmt.Errorf("got to %d, expected %d", to, wantTo)
}
} else {
gotRejectedFrequencies[string(name)]++
}
gotMatched := map[string]struct{}{}
engine.DeleteBucketRangeFn = func(orgID, bucketID platform.ID, from, to int64) error {
if from != math.MinInt64 {
t.Fatalf("got from %d, expected %d", from, math.MinInt64)
}
wantTo := now.Add(-3 * time.Hour).UnixNano()
if to != wantTo {
t.Fatalf("got to %d, expected %d", to, wantTo)
}
name := tsdb.EncodeName(orgID, bucketID)
if _, ok := expRejected[string(name[:])]; ok {
t.Fatalf("got a delete for %x", name)
}
gotMatched[string(name[:])] = struct{}{}
return nil
}
t.Run("multiple bucket", func(t *testing.T) {
if err := service.expireData(rpByBucketID, now); err != nil {
t.Error(err)
t.Run("multiple buckets", func(t *testing.T) {
service.expireData(buckets, now)
if !reflect.DeepEqual(gotMatched, expMatched) {
t.Fatalf("got\n%#v\nexpected\n%#v", gotMatched, expMatched)
}
// Verify that the correct series were marked to be deleted.
t.Run("matched", func(t *testing.T) {
if !reflect.DeepEqual(gotMatchedFrequencies, expMatchedFrequencies) {
t.Fatalf("got\n%#v\nexpected\n%#v", gotMatchedFrequencies, expMatchedFrequencies)
}
})
t.Run("rejected", func(t *testing.T) {
// Verify that badly formatted measurements were rejected.
if !reflect.DeepEqual(gotRejectedFrequencies, expRejectedFrequencies) {
t.Fatalf("got\n%#v\nexpected\n%#v", gotRejectedFrequencies, expRejectedFrequencies)
}
})
})
}
@ -120,40 +90,18 @@ func genMeasurementName() []byte {
return b
}
type TestSeriesCursor struct {
CloseFn func() error
NextFn func() (*SeriesCursorRow, error)
}
func (f *TestSeriesCursor) Close() error { return f.CloseFn() }
func (f *TestSeriesCursor) Next() (*SeriesCursorRow, error) { return f.NextFn() }
type TestEngine struct {
CreateSeriesCursorFn func(context.Context, SeriesCursorRequest, influxql.Expr) (SeriesCursor, error)
DeleteSeriesRangeWithPredicateFn func(tsdb.SeriesIterator, func([]byte, models.Tags) (int64, int64, bool)) error
SeriesCursor *TestSeriesCursor
DeleteBucketRangeFn func(platform.ID, platform.ID, int64, int64) error
}
func NewTestEngine() *TestEngine {
cursor := &TestSeriesCursor{
CloseFn: func() error { return nil },
NextFn: func() (*SeriesCursorRow, error) { return nil, nil },
}
return &TestEngine{
SeriesCursor: cursor,
CreateSeriesCursorFn: func(context.Context, SeriesCursorRequest, influxql.Expr) (SeriesCursor, error) { return cursor, nil },
DeleteSeriesRangeWithPredicateFn: func(tsdb.SeriesIterator, func([]byte, models.Tags) (int64, int64, bool)) error { return nil },
DeleteBucketRangeFn: func(platform.ID, platform.ID, int64, int64) error { return nil },
}
}
func (e *TestEngine) CreateSeriesCursor(ctx context.Context, req SeriesCursorRequest, cond influxql.Expr) (SeriesCursor, error) {
return e.CreateSeriesCursorFn(ctx, req, cond)
}
func (e *TestEngine) DeleteSeriesRangeWithPredicate(itr tsdb.SeriesIterator, fn func([]byte, models.Tags) (int64, int64, bool)) error {
return e.DeleteSeriesRangeWithPredicateFn(itr, fn)
func (e *TestEngine) DeleteBucketRange(orgID, bucketID platform.ID, min, max int64) error {
return e.DeleteBucketRangeFn(orgID, bucketID, min, max)
}
type TestBucketFinder struct {

View File

@ -0,0 +1,28 @@
package wal
import (
"fmt"
"io/ioutil"
"os"
"testing"
)
func MustTempDir() string {
dir, err := ioutil.TempDir("", "tsm1-test")
if err != nil {
panic(fmt.Sprintf("failed to create temp dir: %v", err))
}
return dir
}
func MustTempFile(dir string) *os.File {
f, err := ioutil.TempFile(dir, "tsm1test")
if err != nil {
panic(fmt.Sprintf("failed to create temp file: %v", err))
}
return f
}
func fatal(t *testing.T, msg string, err error) {
t.Fatalf("unexpected error %v: %v", msg, err)
}

91
storage/wal/metrics.go Normal file
View File

@ -0,0 +1,91 @@
package wal
import (
"sort"
"sync"
"github.com/prometheus/client_golang/prometheus"
)
// The following package variables act as singletons, to be shared by all
// storage.Engine instantiations. This allows multiple WALs to be monitored
// within the same process.
var (
wms *walMetrics // main metrics
mmu sync.RWMutex
)
// PrometheusCollectors returns all the metrics associated with the tsdb package.
func PrometheusCollectors() []prometheus.Collector {
mmu.RLock()
defer mmu.RUnlock()
var collectors []prometheus.Collector
if wms != nil {
collectors = append(collectors, wms.PrometheusCollectors()...)
}
return collectors
}
// namespace is the leading part of all published metrics for the Storage service.
const namespace = "storage"
const walSubsystem = "wal" // sub-system associated with metrics for the WAL.
// walMetrics are a set of metrics concerned with tracking data about compactions.
type walMetrics struct {
OldSegmentBytes *prometheus.GaugeVec
CurrentSegmentBytes *prometheus.GaugeVec
Segments *prometheus.GaugeVec
Writes *prometheus.CounterVec
}
// newWALMetrics initialises the prometheus metrics for tracking the WAL.
func newWALMetrics(labels prometheus.Labels) *walMetrics {
var names []string
for k := range labels {
names = append(names, k)
}
sort.Strings(names)
writeNames := append(append([]string(nil), names...), "status")
sort.Strings(writeNames)
return &walMetrics{
OldSegmentBytes: prometheus.NewGaugeVec(prometheus.GaugeOpts{
Namespace: namespace,
Subsystem: walSubsystem,
Name: "old_segment_bytes",
Help: "Number of bytes old WAL segments using on disk.",
}, names),
CurrentSegmentBytes: prometheus.NewGaugeVec(prometheus.GaugeOpts{
Namespace: namespace,
Subsystem: walSubsystem,
Name: "current_segment_bytes",
Help: "Number of bytes TSM files using on disk.",
}, names),
Segments: prometheus.NewGaugeVec(prometheus.GaugeOpts{
Namespace: namespace,
Subsystem: walSubsystem,
Name: "segments_total",
Help: "Number of WAL segment files on disk.",
}, names),
Writes: prometheus.NewCounterVec(prometheus.CounterOpts{
Namespace: namespace,
Subsystem: walSubsystem,
Name: "writes_total",
Help: "Number of writes to the WAL.",
}, writeNames),
}
}
// PrometheusCollectors satisfies the prom.PrometheusCollector interface.
func (m *walMetrics) PrometheusCollectors() []prometheus.Collector {
return []prometheus.Collector{
m.OldSegmentBytes,
m.CurrentSegmentBytes,
m.Segments,
m.Writes,
}
}

View File

@ -0,0 +1,75 @@
package wal
import (
"testing"
"github.com/influxdata/influxdb/kit/prom/promtest"
"github.com/prometheus/client_golang/prometheus"
)
func TestMetrics_WAL(t *testing.T) {
// metrics to be shared by multiple file stores.
metrics := newWALMetrics(prometheus.Labels{"engine_id": "", "node_id": ""})
t1 := newWALTracker(metrics, prometheus.Labels{"engine_id": "0", "node_id": "0"})
t2 := newWALTracker(metrics, prometheus.Labels{"engine_id": "1", "node_id": "0"})
reg := prometheus.NewRegistry()
reg.MustRegister(metrics.PrometheusCollectors()...)
base := namespace + "_" + walSubsystem + "_"
// All the metric names
gauges := []string{
base + "old_segment_bytes",
base + "current_segment_bytes",
base + "segments_total",
}
counters := []string{
base + "writes_total",
}
// Generate some measurements.
for i, tracker := range []*walTracker{t1, t2} {
tracker.SetOldSegmentSize(uint64(i + len(gauges[0])))
tracker.SetCurrentSegmentSize(uint64(i + len(gauges[1])))
tracker.SetSegments(uint64(i + len(gauges[2])))
labels := tracker.Labels()
labels["status"] = "ok"
tracker.metrics.Writes.With(labels).Add(float64(i + len(counters[0])))
}
// Test that all the correct metrics are present.
mfs, err := reg.Gather()
if err != nil {
t.Fatal(err)
}
// The label variants for the two caches.
labelVariants := []prometheus.Labels{
prometheus.Labels{"engine_id": "0", "node_id": "0"},
prometheus.Labels{"engine_id": "1", "node_id": "0"},
}
for i, labels := range labelVariants {
for _, name := range gauges {
exp := float64(i + len(name))
metric := promtest.MustFindMetric(t, mfs, name, labels)
if got := metric.GetGauge().GetValue(); got != exp {
t.Errorf("[%s %d] got %v, expected %v", name, i, got, exp)
}
}
for _, name := range counters {
exp := float64(i + len(name))
labels["status"] = "ok"
metric := promtest.MustFindMetric(t, mfs, name, labels)
if got := metric.GetCounter().GetValue(); got != exp {
t.Errorf("[%s %d] got %v, expected %v", name, i, got, exp)
}
}
}
}

View File

@ -1,4 +1,4 @@
package tsm1
package wal
import "sync"

86
storage/wal/reader.go Normal file
View File

@ -0,0 +1,86 @@
package wal
import (
"os"
"sort"
"go.uber.org/zap"
)
// WALReader helps one read out the WAL into entries.
type WALReader struct {
files []string
logger *zap.Logger
r *WALSegmentReader
}
// NewWALReader constructs a WALReader over the given set of files.
func NewWALReader(files []string) *WALReader {
sort.Strings(files)
return &WALReader{
files: files,
logger: zap.NewNop(),
r: nil,
}
}
// WithLogger sets the logger for the WALReader.
func (r *WALReader) WithLogger(logger *zap.Logger) { r.logger = logger }
// Read calls the callback with every entry in the WAL files. If, during
// reading of a segment file, corruption is encountered, that segment file
// is truncated up to and including the last valid byte, and processing
// continues with the next segment file.
func (r *WALReader) Read(cb func(WALEntry) error) error {
for _, file := range r.files {
if err := r.readFile(file, cb); err != nil {
return err
}
}
return nil
}
// readFile reads the file and calls the callback with each WAL entry.
// It uses the provided logger for information about progress and corruptions.
func (r *WALReader) readFile(file string, cb func(WALEntry) error) error {
f, err := os.OpenFile(file, os.O_CREATE|os.O_RDWR, 0666)
if err != nil {
return err
}
defer f.Close()
stat, err := f.Stat()
if err != nil {
return err
}
r.logger.Info("Reading file", zap.String("path", file), zap.Int64("size", stat.Size()))
if stat.Size() == 0 {
return nil
}
if r.r == nil {
r.r = NewWALSegmentReader(f)
} else {
r.r.Reset(f)
}
defer r.r.Close()
for r.r.Next() {
entry, err := r.r.Read()
if err != nil {
n := r.r.Count()
r.logger.Info("File corrupt", zap.Error(err), zap.String("path", file), zap.Int64("pos", n))
if err := f.Truncate(n); err != nil {
return err
}
break
}
if err := cb(entry); err != nil {
return err
}
}
return r.r.Close()
}

View File

@ -1,8 +1,7 @@
package tsm1
package wal
import (
"bufio"
"bytes"
"encoding/binary"
"fmt"
"io"
@ -18,29 +17,14 @@ import (
"time"
"github.com/golang/snappy"
"github.com/influxdata/influxdb"
"github.com/influxdata/influxdb/pkg/limiter"
"github.com/influxdata/influxdb/pkg/pool"
"github.com/influxdata/influxdb/tsdb/value"
"github.com/prometheus/client_golang/prometheus"
"go.uber.org/zap"
)
// Log describes an interface for a durable disk-based log.
type Log interface {
Open() error
Close() error
Path() string
LastWriteTime() time.Time
DiskSizeBytes() int64
WriteMulti(values map[string][]Value) (int, error)
DeleteRange(keys [][]byte, min, max int64) (int, error)
CloseSegment() error
ClosedSegments() ([]string, error)
Remove(files []string) error
}
const (
// DefaultSegmentSize of 10MB is the size at which segment files will be rolled over.
DefaultSegmentSize = 10 * 1024 * 1024
@ -68,11 +52,14 @@ const (
// WriteWALEntryType indicates a write entry.
WriteWALEntryType WalEntryType = 0x01
// DeleteWALEntryType indicates a delete entry.
DeleteWALEntryType WalEntryType = 0x02
// DeleteWALEntryType indicates a delete entry. Deprecated.
_ WalEntryType = 0x02
// DeleteRangeWALEntryType indicates a delete range entry.
DeleteRangeWALEntryType WalEntryType = 0x03
// DeleteRangeWALEntryType indicates a delete range entry. Deprecated.
_ WalEntryType = 0x03
// DeleteBucketRangeWALEntryType indicates a delete bucket range entry.
DeleteBucketRangeWALEntryType WalEntryType = 0x04
)
var (
@ -97,7 +84,8 @@ type WAL struct {
mu sync.RWMutex
lastWriteTime time.Time
path string
path string
enabled bool
// write variables
currentSegmentID int
@ -120,7 +108,9 @@ type WAL struct {
// SegmentSize is the file size at which a segment file will be rotated
SegmentSize int
tracker *walTracker
tracker *walTracker
defaultMetricLabels prometheus.Labels // N.B this must not be mutated after Open is called.
limiter limiter.Fixed
}
@ -128,7 +118,8 @@ type WAL struct {
func NewWAL(path string) *WAL {
logger := zap.NewNop()
return &WAL{
path: path,
path: path,
enabled: true,
// these options should be overriden by any options in the config
SegmentSize: DefaultSegmentSize,
@ -137,7 +128,6 @@ func NewWAL(path string) *WAL {
limiter: limiter.NewFixed(defaultWaitingWALWrites),
logger: logger,
traceLogger: logger,
tracker: newWALTracker(newWALMetrics(nil), nil),
}
}
@ -154,6 +144,11 @@ func (l *WAL) WithFsyncDelay(delay time.Duration) {
l.syncDelay = delay
}
// SetEnabled sets if the WAL is enabled and should be called before the WAL is opened.
func (l *WAL) SetEnabled(enabled bool) {
l.enabled = enabled
}
// WithLogger sets the WAL's logger.
func (l *WAL) WithLogger(log *zap.Logger) {
l.logger = log.With(zap.String("service", "wal"))
@ -163,6 +158,15 @@ func (l *WAL) WithLogger(log *zap.Logger) {
}
}
// SetDefaultMetricLabels sets the default labels for metrics on the engine.
// It must be called before the Engine is opened.
func (l *WAL) SetDefaultMetricLabels(labels prometheus.Labels) {
l.defaultMetricLabels = make(prometheus.Labels, len(labels))
for k, v := range labels {
l.defaultMetricLabels[k] = v
}
}
// Path returns the directory the log was initialized with.
func (l *WAL) Path() string {
l.mu.RLock()
@ -175,6 +179,20 @@ func (l *WAL) Open() error {
l.mu.Lock()
defer l.mu.Unlock()
if !l.enabled {
return nil
}
// Initialise metrics for trackers.
mmu.Lock()
if wms == nil {
wms = newWALMetrics(l.defaultMetricLabels)
}
mmu.Unlock()
// Set the shared metrics for the tracker
l.tracker = newWALTracker(wms, l.defaultMetricLabels)
l.traceLogger.Info("tsm1 WAL starting", zap.Int("segment_size", l.SegmentSize))
l.traceLogger.Info("tsm1 WAL writing", zap.String("path", l.path))
@ -182,7 +200,7 @@ func (l *WAL) Open() error {
return err
}
segments, err := segmentFileNames(l.path)
segments, err := SegmentFileNames(l.path)
if err != nil {
return err
}
@ -299,8 +317,12 @@ func (l *WAL) sync() {
// WriteMulti writes the given values to the WAL. It returns the WAL segment ID to
// which the points were written. If an error is returned the segment ID should
// be ignored.
func (l *WAL) WriteMulti(values map[string][]Value) (int, error) {
// be ignored. If the WAL is disabled, -1 and nil is returned.
func (l *WAL) WriteMulti(values map[string][]value.Value) (int, error) {
if !l.enabled {
return -1, nil
}
entry := &WriteWALEntry{
Values: values,
}
@ -317,8 +339,13 @@ func (l *WAL) WriteMulti(values map[string][]Value) (int, error) {
// ClosedSegments returns a slice of the names of the closed segment files.
func (l *WAL) ClosedSegments() ([]string, error) {
if !l.enabled {
return nil, nil
}
l.mu.RLock()
defer l.mu.RUnlock()
// Not loading files from disk so nothing to do
if l.path == "" {
return nil, nil
@ -329,7 +356,7 @@ func (l *WAL) ClosedSegments() ([]string, error) {
currentFile = l.currentSegmentWriter.path()
}
files, err := segmentFileNames(l.path)
files, err := SegmentFileNames(l.path)
if err != nil {
return nil, err
}
@ -349,15 +376,20 @@ func (l *WAL) ClosedSegments() ([]string, error) {
// Remove deletes the given segment file paths from disk and cleans up any associated objects.
func (l *WAL) Remove(files []string) error {
if !l.enabled {
return nil
}
l.mu.Lock()
defer l.mu.Unlock()
for _, fn := range files {
l.traceLogger.Info("Removing WAL file", zap.String("path", fn))
os.RemoveAll(fn)
}
// Refresh the on-disk size stats
segments, err := segmentFileNames(l.path)
segments, err := SegmentFileNames(l.path)
if err != nil {
return err
}
@ -470,8 +502,13 @@ func (l *WAL) rollSegment() error {
// CloseSegment closes the current segment if it is non-empty and opens a new one.
func (l *WAL) CloseSegment() error {
if !l.enabled {
return nil
}
l.mu.Lock()
defer l.mu.Unlock()
if l.currentSegmentWriter == nil || l.currentSegmentWriter.size > 0 {
if err := l.newSegmentFile(); err != nil {
// A drop database or RP call could trigger this error if writes were in-flight
@ -483,32 +520,18 @@ func (l *WAL) CloseSegment() error {
return nil
}
// Delete deletes the given keys, returning the segment ID for the operation.
func (l *WAL) Delete(keys [][]byte) (int, error) {
if len(keys) == 0 {
return 0, nil
}
entry := &DeleteWALEntry{
Keys: keys,
// DeleteBucketRange deletes the data inside of the bucket between the two times, returning
// the segment ID for the operation.
func (l *WAL) DeleteBucketRange(orgID, bucketID influxdb.ID, min, max int64) (int, error) {
if !l.enabled {
return -1, nil
}
id, err := l.writeToLog(entry)
if err != nil {
return -1, err
}
return id, nil
}
// DeleteRange deletes the given keys within the given time range,
// returning the segment ID for the operation.
func (l *WAL) DeleteRange(keys [][]byte, min, max int64) (int, error) {
if len(keys) == 0 {
return 0, nil
}
entry := &DeleteRangeWALEntry{
Keys: keys,
Min: min,
Max: max,
entry := &DeleteBucketRangeWALEntry{
OrgID: orgID,
BucketID: bucketID,
Min: min,
Max: max,
}
id, err := l.writeToLog(entry)
@ -523,6 +546,10 @@ func (l *WAL) Close() error {
l.mu.Lock()
defer l.mu.Unlock()
if !l.enabled {
return nil
}
l.once.Do(func() {
// Close, but don't set to nil so future goroutines can still be signaled
l.traceLogger.Info("Closing WAL file", zap.String("path", l.path))
@ -538,8 +565,8 @@ func (l *WAL) Close() error {
return nil
}
// segmentFileNames will return all files that are WAL segment files in sorted order by ascending ID.
func segmentFileNames(dir string) ([]string, error) {
// SegmentFileNames will return all files that are WAL segment files in sorted order by ascending ID.
func SegmentFileNames(dir string) ([]string, error) {
names, err := filepath.Glob(filepath.Join(dir, fmt.Sprintf("%s*.%s", WALFilePrefix, WALFileExtension)))
if err != nil {
return nil, err
@ -666,10 +693,11 @@ type WALEntry interface {
// WriteWALEntry represents a write of points.
type WriteWALEntry struct {
Values map[string][]Value
Values map[string][]value.Value
sz int
}
// MarshalSize returns the number of bytes the entry takes when marshaled.
func (w *WriteWALEntry) MarshalSize() int {
if w.sz > 0 || len(w.Values) == 0 {
return w.sz
@ -687,17 +715,17 @@ func (w *WriteWALEntry) MarshalSize() int {
encLen += 8 * len(v) // timestamps (8)
switch v[0].(type) {
case FloatValue, IntegerValue, UnsignedValue:
case value.FloatValue, value.IntegerValue, value.UnsignedValue:
encLen += 8 * len(v)
case BooleanValue:
case value.BooleanValue:
encLen += 1 * len(v)
case StringValue:
case value.StringValue:
for _, vv := range v {
str, ok := vv.(StringValue)
str, ok := vv.(value.StringValue)
if !ok {
return 0
}
encLen += 4 + len(str.value)
encLen += 4 + len(str.RawValue())
}
default:
return 0
@ -746,15 +774,15 @@ func (w *WriteWALEntry) Encode(dst []byte) ([]byte, error) {
for k, v := range w.Values {
switch v[0].(type) {
case FloatValue:
case value.FloatValue:
curType = float64EntryType
case IntegerValue:
case value.IntegerValue:
curType = integerEntryType
case UnsignedValue:
case value.UnsignedValue:
curType = unsignedEntryType
case BooleanValue:
case value.BooleanValue:
curType = booleanEntryType
case StringValue:
case value.StringValue:
curType = stringEntryType
default:
return nil, fmt.Errorf("unsupported value type: %T", v[0])
@ -774,41 +802,41 @@ func (w *WriteWALEntry) Encode(dst []byte) ([]byte, error) {
n += 8
switch vv := vv.(type) {
case FloatValue:
case value.FloatValue:
if curType != float64EntryType {
return nil, fmt.Errorf("incorrect value found in %T slice: %T", v[0].Value(), vv)
}
binary.BigEndian.PutUint64(dst[n:n+8], math.Float64bits(vv.value))
binary.BigEndian.PutUint64(dst[n:n+8], math.Float64bits(vv.RawValue()))
n += 8
case IntegerValue:
case value.IntegerValue:
if curType != integerEntryType {
return nil, fmt.Errorf("incorrect value found in %T slice: %T", v[0].Value(), vv)
}
binary.BigEndian.PutUint64(dst[n:n+8], uint64(vv.value))
binary.BigEndian.PutUint64(dst[n:n+8], uint64(vv.RawValue()))
n += 8
case UnsignedValue:
case value.UnsignedValue:
if curType != unsignedEntryType {
return nil, fmt.Errorf("incorrect value found in %T slice: %T", v[0].Value(), vv)
}
binary.BigEndian.PutUint64(dst[n:n+8], uint64(vv.value))
binary.BigEndian.PutUint64(dst[n:n+8], uint64(vv.RawValue()))
n += 8
case BooleanValue:
case value.BooleanValue:
if curType != booleanEntryType {
return nil, fmt.Errorf("incorrect value found in %T slice: %T", v[0].Value(), vv)
}
if vv.value {
if vv.RawValue() {
dst[n] = 1
} else {
dst[n] = 0
}
n++
case StringValue:
case value.StringValue:
if curType != stringEntryType {
return nil, fmt.Errorf("incorrect value found in %T slice: %T", v[0].Value(), vv)
}
binary.BigEndian.PutUint32(dst[n:n+4], uint32(len(vv.value)))
binary.BigEndian.PutUint32(dst[n:n+4], uint32(len(vv.RawValue())))
n += 4
n += copy(dst[n:], vv.value)
n += copy(dst[n:], vv.RawValue())
default:
return nil, fmt.Errorf("unsupported value found in %T slice: %T", v[0].Value(), vv)
}
@ -863,13 +891,13 @@ func (w *WriteWALEntry) UnmarshalBinary(b []byte) error {
return ErrWALCorrupt
}
values := make([]Value, 0, nvals)
values := make([]value.Value, 0, nvals)
for j := 0; j < nvals; j++ {
un := int64(binary.BigEndian.Uint64(b[i : i+8]))
i += 8
v := math.Float64frombits((binary.BigEndian.Uint64(b[i : i+8])))
i += 8
values = append(values, NewFloatValue(un, v))
values = append(values, value.NewFloatValue(un, v))
}
w.Values[k] = values
case integerEntryType:
@ -877,13 +905,13 @@ func (w *WriteWALEntry) UnmarshalBinary(b []byte) error {
return ErrWALCorrupt
}
values := make([]Value, 0, nvals)
values := make([]value.Value, 0, nvals)
for j := 0; j < nvals; j++ {
un := int64(binary.BigEndian.Uint64(b[i : i+8]))
i += 8
v := int64(binary.BigEndian.Uint64(b[i : i+8]))
i += 8
values = append(values, NewIntegerValue(un, v))
values = append(values, value.NewIntegerValue(un, v))
}
w.Values[k] = values
@ -892,13 +920,13 @@ func (w *WriteWALEntry) UnmarshalBinary(b []byte) error {
return ErrWALCorrupt
}
values := make([]Value, 0, nvals)
values := make([]value.Value, 0, nvals)
for j := 0; j < nvals; j++ {
un := int64(binary.BigEndian.Uint64(b[i : i+8]))
i += 8
v := binary.BigEndian.Uint64(b[i : i+8])
i += 8
values = append(values, NewUnsignedValue(un, v))
values = append(values, value.NewUnsignedValue(un, v))
}
w.Values[k] = values
@ -907,7 +935,7 @@ func (w *WriteWALEntry) UnmarshalBinary(b []byte) error {
return ErrWALCorrupt
}
values := make([]Value, 0, nvals)
values := make([]value.Value, 0, nvals)
for j := 0; j < nvals; j++ {
un := int64(binary.BigEndian.Uint64(b[i : i+8]))
i += 8
@ -915,15 +943,15 @@ func (w *WriteWALEntry) UnmarshalBinary(b []byte) error {
v := b[i]
i += 1
if v == 1 {
values = append(values, NewBooleanValue(un, true))
values = append(values, value.NewBooleanValue(un, true))
} else {
values = append(values, NewBooleanValue(un, false))
values = append(values, value.NewBooleanValue(un, false))
}
}
w.Values[k] = values
case stringEntryType:
values := make([]Value, 0, nvals)
values := make([]value.Value, 0, nvals)
for j := 0; j < nvals; j++ {
if i+12 > len(b) {
return ErrWALCorrupt
@ -945,7 +973,7 @@ func (w *WriteWALEntry) UnmarshalBinary(b []byte) error {
v := string(b[i : i+length])
i += length
values = append(values, NewStringValue(un, v))
values = append(values, value.NewStringValue(un, v))
}
w.Values[k] = values
@ -961,152 +989,70 @@ func (w *WriteWALEntry) Type() WalEntryType {
return WriteWALEntryType
}
// DeleteWALEntry represents the deletion of multiple series.
type DeleteWALEntry struct {
Keys [][]byte
sz int
}
// MarshalBinary returns a binary representation of the entry in a new byte slice.
func (w *DeleteWALEntry) MarshalBinary() ([]byte, error) {
b := make([]byte, w.MarshalSize())
return w.Encode(b)
}
// UnmarshalBinary deserializes the byte slice into w.
func (w *DeleteWALEntry) UnmarshalBinary(b []byte) error {
if len(b) == 0 {
return nil
}
// b originates from a pool. Copy what needs to be retained.
buf := make([]byte, len(b))
copy(buf, b)
w.Keys = bytes.Split(buf, []byte("\n"))
return nil
}
func (w *DeleteWALEntry) MarshalSize() int {
if w.sz > 0 || len(w.Keys) == 0 {
return w.sz
}
encLen := len(w.Keys) // newlines
for _, k := range w.Keys {
encLen += len(k)
}
w.sz = encLen
return encLen
}
// Encode converts the DeleteWALEntry into a byte slice, appending to dst.
func (w *DeleteWALEntry) Encode(dst []byte) ([]byte, error) {
sz := w.MarshalSize()
if len(dst) < sz {
dst = make([]byte, sz)
}
var n int
for _, k := range w.Keys {
n += copy(dst[n:], k)
n += copy(dst[n:], "\n")
}
// We return n-1 to strip off the last newline so that unmarshalling the value
// does not produce an empty string
return []byte(dst[:n-1]), nil
}
// Type returns DeleteWALEntryType.
func (w *DeleteWALEntry) Type() WalEntryType {
return DeleteWALEntryType
}
// DeleteRangeWALEntry represents the deletion of multiple series.
type DeleteRangeWALEntry struct {
Keys [][]byte
// DeleteBucketRangeWALEntry represents the deletion of data in a bucket.
type DeleteBucketRangeWALEntry struct {
OrgID influxdb.ID
BucketID influxdb.ID
Min, Max int64
sz int
}
// MarshalBinary returns a binary representation of the entry in a new byte slice.
func (w *DeleteRangeWALEntry) MarshalBinary() ([]byte, error) {
func (w *DeleteBucketRangeWALEntry) MarshalBinary() ([]byte, error) {
b := make([]byte, w.MarshalSize())
return w.Encode(b)
}
// UnmarshalBinary deserializes the byte slice into w.
func (w *DeleteRangeWALEntry) UnmarshalBinary(b []byte) error {
if len(b) < 16 {
func (w *DeleteBucketRangeWALEntry) UnmarshalBinary(b []byte) error {
if len(b) != 2*influxdb.IDLength+16 {
return ErrWALCorrupt
}
w.Min = int64(binary.BigEndian.Uint64(b[:8]))
w.Max = int64(binary.BigEndian.Uint64(b[8:16]))
i := 16
for i < len(b) {
if i+4 > len(b) {
return ErrWALCorrupt
}
sz := int(binary.BigEndian.Uint32(b[i : i+4]))
i += 4
if i+sz > len(b) {
return ErrWALCorrupt
}
// b originates from a pool. Copy what needs to be retained.
buf := make([]byte, sz)
copy(buf, b[i:i+sz])
w.Keys = append(w.Keys, buf)
i += sz
if err := w.OrgID.Decode(b[0:influxdb.IDLength]); err != nil {
return err
}
if err := w.BucketID.Decode(b[influxdb.IDLength : 2*influxdb.IDLength]); err != nil {
return err
}
w.Min = int64(binary.BigEndian.Uint64(b[2*influxdb.IDLength : 2*influxdb.IDLength+8]))
w.Max = int64(binary.BigEndian.Uint64(b[2*influxdb.IDLength+8 : 2*influxdb.IDLength+16]))
return nil
}
func (w *DeleteRangeWALEntry) MarshalSize() int {
if w.sz > 0 {
return w.sz
}
sz := 16 + len(w.Keys)*4
for _, k := range w.Keys {
sz += len(k)
}
w.sz = sz
return sz
// MarshalSize returns the number of bytes the entry takes when marshaled.
func (w *DeleteBucketRangeWALEntry) MarshalSize() int {
return 2*influxdb.IDLength + 16
}
// Encode converts the DeleteRangeWALEntry into a byte slice, appending to b.
func (w *DeleteRangeWALEntry) Encode(b []byte) ([]byte, error) {
// Encode converts the entry into a byte stream using b if it is large enough.
// If b is too small, a newly allocated slice is returned.
func (w *DeleteBucketRangeWALEntry) Encode(b []byte) ([]byte, error) {
sz := w.MarshalSize()
if len(b) < sz {
b = make([]byte, sz)
}
binary.BigEndian.PutUint64(b[:8], uint64(w.Min))
binary.BigEndian.PutUint64(b[8:16], uint64(w.Max))
i := 16
for _, k := range w.Keys {
binary.BigEndian.PutUint32(b[i:i+4], uint32(len(k)))
i += 4
i += copy(b[i:], k)
orgID, err := w.OrgID.Encode()
if err != nil {
return nil, err
}
bucketID, err := w.BucketID.Encode()
if err != nil {
return nil, err
}
return b[:i], nil
copy(b, orgID)
copy(b[influxdb.IDLength:], bucketID)
binary.BigEndian.PutUint64(b[2*influxdb.IDLength:], uint64(w.Min))
binary.BigEndian.PutUint64(b[2*influxdb.IDLength+8:], uint64(w.Max))
return b[:sz], nil
}
// Type returns DeleteRangeWALEntryType.
func (w *DeleteRangeWALEntry) Type() WalEntryType {
return DeleteRangeWALEntryType
// Type returns DeleteBucketRangeWALEntryType.
func (w *DeleteBucketRangeWALEntry) Type() WalEntryType {
return DeleteBucketRangeWALEntryType
}
// WALSegmentWriter writes WAL segments.
@ -1250,12 +1196,10 @@ func (r *WALSegmentReader) Next() bool {
switch WalEntryType(entryType) {
case WriteWALEntryType:
r.entry = &WriteWALEntry{
Values: make(map[string][]Value),
Values: make(map[string][]value.Value),
}
case DeleteWALEntryType:
r.entry = &DeleteWALEntry{}
case DeleteRangeWALEntryType:
r.entry = &DeleteRangeWALEntry{}
case DeleteBucketRangeWALEntryType:
r.entry = &DeleteBucketRangeWALEntry{}
default:
r.err = fmt.Errorf("unknown wal entry type: %v", entryType)
return true
@ -1310,20 +1254,3 @@ func idFromFileName(name string) (int, error) {
return int(id), err
}
// NopWAL implements the Log interface and provides a no-op WAL implementation.
type NopWAL struct{}
func (w NopWAL) Open() error { return nil }
func (w NopWAL) Close() error { return nil }
func (w NopWAL) Path() string { return "" }
func (w NopWAL) LastWriteTime() time.Time { return time.Time{} }
func (w NopWAL) DiskSizeBytes() int64 { return 0 }
func (w NopWAL) WriteMulti(values map[string][]Value) (int, error) { return 0, nil }
func (w NopWAL) DeleteRange(keys [][]byte, min, max int64) (int, error) { return 0, nil }
func (w NopWAL) CloseSegment() error { return nil }
func (w NopWAL) ClosedSegments() ([]string, error) { return nil, nil }
func (w NopWAL) Remove(files []string) error { return nil }

580
storage/wal/wal_test.go Normal file
View File

@ -0,0 +1,580 @@
package wal
import (
"fmt"
"io"
"math/rand"
"os"
"reflect"
"testing"
"github.com/golang/snappy"
"github.com/influxdata/influxdb"
"github.com/influxdata/influxdb/tsdb/value"
)
func TestWALWriter_WriteMulti_Single(t *testing.T) {
dir := MustTempDir()
defer os.RemoveAll(dir)
f := MustTempFile(dir)
w := NewWALSegmentWriter(f)
p1 := value.NewValue(1, 1.1)
p2 := value.NewValue(1, int64(1))
p3 := value.NewValue(1, true)
p4 := value.NewValue(1, "string")
p5 := value.NewValue(1, ^uint64(0))
values := map[string][]value.Value{
"cpu,host=A#!~#float": []value.Value{p1},
"cpu,host=A#!~#int": []value.Value{p2},
"cpu,host=A#!~#bool": []value.Value{p3},
"cpu,host=A#!~#string": []value.Value{p4},
"cpu,host=A#!~#unsigned": []value.Value{p5},
}
entry := &WriteWALEntry{
Values: values,
}
if err := w.Write(mustMarshalEntry(entry)); err != nil {
fatal(t, "write points", err)
}
if err := w.Flush(); err != nil {
fatal(t, "flush", err)
}
if _, err := f.Seek(0, io.SeekStart); err != nil {
fatal(t, "seek", err)
}
r := NewWALSegmentReader(f)
if !r.Next() {
t.Fatalf("expected next, got false")
}
we, err := r.Read()
if err != nil {
fatal(t, "read entry", err)
}
e, ok := we.(*WriteWALEntry)
if !ok {
t.Fatalf("expected WriteWALEntry: got %#v", e)
}
for k, v := range e.Values {
for i, vv := range v {
if got, exp := vv.String(), values[k][i].String(); got != exp {
t.Fatalf("points mismatch: got %v, exp %v", got, exp)
}
}
}
if n := r.Count(); n != MustReadFileSize(f) {
t.Fatalf("wrong count of bytes read, got %d, exp %d", n, MustReadFileSize(f))
}
}
func TestWALWriter_WriteMulti_LargeBatch(t *testing.T) {
dir := MustTempDir()
defer os.RemoveAll(dir)
f := MustTempFile(dir)
w := NewWALSegmentWriter(f)
var points []value.Value
for i := 0; i < 100000; i++ {
points = append(points, value.NewValue(int64(i), int64(1)))
}
values := map[string][]value.Value{
"cpu,host=A,server=01,foo=bar,tag=really-long#!~#float": points,
"mem,host=A,server=01,foo=bar,tag=really-long#!~#float": points,
}
entry := &WriteWALEntry{
Values: values,
}
if err := w.Write(mustMarshalEntry(entry)); err != nil {
fatal(t, "write points", err)
}
if err := w.Flush(); err != nil {
fatal(t, "flush", err)
}
if _, err := f.Seek(0, io.SeekStart); err != nil {
fatal(t, "seek", err)
}
r := NewWALSegmentReader(f)
if !r.Next() {
t.Fatalf("expected next, got false")
}
we, err := r.Read()
if err != nil {
fatal(t, "read entry", err)
}
e, ok := we.(*WriteWALEntry)
if !ok {
t.Fatalf("expected WriteWALEntry: got %#v", e)
}
for k, v := range e.Values {
for i, vv := range v {
if got, exp := vv.String(), values[k][i].String(); got != exp {
t.Fatalf("points mismatch: got %v, exp %v", got, exp)
}
}
}
if n := r.Count(); n != MustReadFileSize(f) {
t.Fatalf("wrong count of bytes read, got %d, exp %d", n, MustReadFileSize(f))
}
}
func TestWALWriter_WriteMulti_Multiple(t *testing.T) {
dir := MustTempDir()
defer os.RemoveAll(dir)
f := MustTempFile(dir)
w := NewWALSegmentWriter(f)
p1 := value.NewValue(1, int64(1))
p2 := value.NewValue(1, int64(2))
exp := []struct {
key string
values []value.Value
}{
{"cpu,host=A#!~#value", []value.Value{p1}},
{"cpu,host=B#!~#value", []value.Value{p2}},
}
for _, v := range exp {
entry := &WriteWALEntry{
Values: map[string][]value.Value{v.key: v.values},
}
if err := w.Write(mustMarshalEntry(entry)); err != nil {
fatal(t, "write points", err)
}
if err := w.Flush(); err != nil {
fatal(t, "flush", err)
}
}
// Seek back to the beinning of the file for reading
if _, err := f.Seek(0, io.SeekStart); err != nil {
fatal(t, "seek", err)
}
r := NewWALSegmentReader(f)
for _, ep := range exp {
if !r.Next() {
t.Fatalf("expected next, got false")
}
we, err := r.Read()
if err != nil {
fatal(t, "read entry", err)
}
e, ok := we.(*WriteWALEntry)
if !ok {
t.Fatalf("expected WriteWALEntry: got %#v", e)
}
for k, v := range e.Values {
if got, exp := k, ep.key; got != exp {
t.Fatalf("key mismatch. got %v, exp %v", got, exp)
}
if got, exp := len(v), len(ep.values); got != exp {
t.Fatalf("values length mismatch: got %v, exp %v", got, exp)
}
for i, vv := range v {
if got, exp := vv.String(), ep.values[i].String(); got != exp {
t.Fatalf("points mismatch: got %v, exp %v", got, exp)
}
}
}
}
if n := r.Count(); n != MustReadFileSize(f) {
t.Fatalf("wrong count of bytes read, got %d, exp %d", n, MustReadFileSize(f))
}
}
func TestWALWriter_DeleteBucketRange(t *testing.T) {
dir := MustTempDir()
defer os.RemoveAll(dir)
f := MustTempFile(dir)
w := NewWALSegmentWriter(f)
entry := &DeleteBucketRangeWALEntry{
OrgID: influxdb.ID(1),
BucketID: influxdb.ID(2),
Min: 3,
Max: 4,
}
if err := w.Write(mustMarshalEntry(entry)); err != nil {
fatal(t, "write points", err)
}
if err := w.Flush(); err != nil {
fatal(t, "flush", err)
}
if _, err := f.Seek(0, io.SeekStart); err != nil {
fatal(t, "seek", err)
}
r := NewWALSegmentReader(f)
if !r.Next() {
t.Fatalf("expected next, got false")
}
we, err := r.Read()
if err != nil {
fatal(t, "read entry", err)
}
e, ok := we.(*DeleteBucketRangeWALEntry)
if !ok {
t.Fatalf("expected WriteWALEntry: got %#v", e)
}
if !reflect.DeepEqual(entry, e) {
t.Fatalf("expected %+v but got %+v", entry, e)
}
}
func TestWAL_ClosedSegments(t *testing.T) {
dir := MustTempDir()
defer os.RemoveAll(dir)
w := NewWAL(dir)
if err := w.Open(); err != nil {
t.Fatalf("error opening WAL: %v", err)
}
files, err := w.ClosedSegments()
if err != nil {
t.Fatalf("error getting closed segments: %v", err)
}
if got, exp := len(files), 0; got != exp {
t.Fatalf("close segment length mismatch: got %v, exp %v", got, exp)
}
if _, err := w.WriteMulti(map[string][]value.Value{
"cpu,host=A#!~#value": []value.Value{
value.NewValue(1, 1.1),
},
}); err != nil {
t.Fatalf("error writing points: %v", err)
}
if err := w.Close(); err != nil {
t.Fatalf("error closing wal: %v", err)
}
// Re-open the WAL
w = NewWAL(dir)
defer w.Close()
if err := w.Open(); err != nil {
t.Fatalf("error opening WAL: %v", err)
}
files, err = w.ClosedSegments()
if err != nil {
t.Fatalf("error getting closed segments: %v", err)
}
if got, exp := len(files), 0; got != exp {
t.Fatalf("close segment length mismatch: got %v, exp %v", got, exp)
}
}
func TestWALWriter_Corrupt(t *testing.T) {
dir := MustTempDir()
defer os.RemoveAll(dir)
f := MustTempFile(dir)
w := NewWALSegmentWriter(f)
corruption := []byte{1, 4, 0, 0, 0}
p1 := value.NewValue(1, 1.1)
values := map[string][]value.Value{
"cpu,host=A#!~#float": []value.Value{p1},
}
entry := &WriteWALEntry{
Values: values,
}
if err := w.Write(mustMarshalEntry(entry)); err != nil {
fatal(t, "write points", err)
}
if err := w.Flush(); err != nil {
fatal(t, "flush", err)
}
// Write some random bytes to the file to simulate corruption.
if _, err := f.Write(corruption); err != nil {
fatal(t, "corrupt WAL segment", err)
}
// Create the WAL segment reader.
if _, err := f.Seek(0, io.SeekStart); err != nil {
fatal(t, "seek", err)
}
r := NewWALSegmentReader(f)
// Try to decode two entries.
if !r.Next() {
t.Fatalf("expected next, got false")
}
if _, err := r.Read(); err != nil {
fatal(t, "read entry", err)
}
if !r.Next() {
t.Fatalf("expected next, got false")
}
if _, err := r.Read(); err == nil {
fatal(t, "read entry did not return err", nil)
}
// Count should only return size of valid data.
expCount := MustReadFileSize(f) - int64(len(corruption))
if n := r.Count(); n != expCount {
t.Fatalf("wrong count of bytes read, got %d, exp %d", n, expCount)
}
}
// Reproduces a `panic: runtime error: makeslice: cap out of range` when run with
// GOARCH=386 go test -run TestWALSegmentReader_Corrupt -v ./tsdb/engine/tsm1/
func TestWALSegmentReader_Corrupt(t *testing.T) {
dir := MustTempDir()
defer os.RemoveAll(dir)
f := MustTempFile(dir)
w := NewWALSegmentWriter(f)
p4 := value.NewValue(1, "string")
values := map[string][]value.Value{
"cpu,host=A#!~#string": []value.Value{p4, p4},
}
entry := &WriteWALEntry{
Values: values,
}
typ, b := mustMarshalEntry(entry)
// This causes the nvals field to overflow on 32 bit systems which produces a
// negative count and a panic when reading the segment.
b[25] = 255
if err := w.Write(typ, b); err != nil {
fatal(t, "write points", err)
}
if err := w.Flush(); err != nil {
fatal(t, "flush", err)
}
// Create the WAL segment reader.
if _, err := f.Seek(0, io.SeekStart); err != nil {
fatal(t, "seek", err)
}
r := NewWALSegmentReader(f)
defer r.Close()
// Try to decode two entries.
for r.Next() {
r.Read()
}
}
func TestWriteWALSegment_UnmarshalBinary_WriteWALCorrupt(t *testing.T) {
p1 := value.NewValue(1, 1.1)
p2 := value.NewValue(1, int64(1))
p3 := value.NewValue(1, true)
p4 := value.NewValue(1, "string")
p5 := value.NewValue(1, uint64(1))
values := map[string][]value.Value{
"cpu,host=A#!~#float": []value.Value{p1, p1},
"cpu,host=A#!~#int": []value.Value{p2, p2},
"cpu,host=A#!~#bool": []value.Value{p3, p3},
"cpu,host=A#!~#string": []value.Value{p4, p4},
"cpu,host=A#!~#unsigned": []value.Value{p5, p5},
}
w := &WriteWALEntry{
Values: values,
}
b, err := w.MarshalBinary()
if err != nil {
t.Fatalf("unexpected error, got %v", err)
}
// Test every possible truncation of a write WAL entry
for i := 0; i < len(b); i++ {
// re-allocated to ensure capacity would be exceed if slicing
truncated := make([]byte, i)
copy(truncated, b[:i])
err := w.UnmarshalBinary(truncated)
if err != nil && err != ErrWALCorrupt {
t.Fatalf("unexpected error: %v", err)
}
}
}
func TestDeleteBucketRangeWALEntry_UnmarshalBinary(t *testing.T) {
for i := 0; i < 1000; i++ {
in := &DeleteBucketRangeWALEntry{
OrgID: influxdb.ID(rand.Int63()) + 1,
BucketID: influxdb.ID(rand.Int63()) + 1,
Min: rand.Int63(),
Max: rand.Int63(),
}
b, err := in.MarshalBinary()
if err != nil {
t.Fatalf("unexpected error, got %v", err)
}
out := &DeleteBucketRangeWALEntry{}
if err := out.UnmarshalBinary(b); err != nil {
t.Fatalf("%v", err)
}
if !reflect.DeepEqual(in, out) {
t.Errorf("got %+v, expected %+v", out, in)
}
}
}
func TestWriteWALSegment_UnmarshalBinary_DeleteBucketRangeWALCorrupt(t *testing.T) {
w := &DeleteBucketRangeWALEntry{
OrgID: influxdb.ID(1),
BucketID: influxdb.ID(2),
Min: 3,
Max: 4,
}
b, err := w.MarshalBinary()
if err != nil {
t.Fatalf("unexpected error, got %v", err)
}
// Test every possible truncation of a write WAL entry
for i := 0; i < len(b); i++ {
// re-allocated to ensure capacity would be exceed if slicing
truncated := make([]byte, i)
copy(truncated, b[:i])
err := w.UnmarshalBinary(truncated)
if err != nil && err != ErrWALCorrupt {
t.Fatalf("unexpected error: %v", err)
}
}
}
func BenchmarkWALSegmentWriter(b *testing.B) {
points := map[string][]value.Value{}
for i := 0; i < 5000; i++ {
k := "cpu,host=A#!~#value"
points[k] = append(points[k], value.NewValue(int64(i), 1.1))
}
dir := MustTempDir()
defer os.RemoveAll(dir)
f := MustTempFile(dir)
w := NewWALSegmentWriter(f)
write := &WriteWALEntry{
Values: points,
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
if err := w.Write(mustMarshalEntry(write)); err != nil {
b.Fatalf("unexpected error writing entry: %v", err)
}
}
}
func BenchmarkWALSegmentReader(b *testing.B) {
points := map[string][]value.Value{}
for i := 0; i < 5000; i++ {
k := "cpu,host=A#!~#value"
points[k] = append(points[k], value.NewValue(int64(i), 1.1))
}
dir := MustTempDir()
defer os.RemoveAll(dir)
f := MustTempFile(dir)
w := NewWALSegmentWriter(f)
write := &WriteWALEntry{
Values: points,
}
for i := 0; i < 100; i++ {
if err := w.Write(mustMarshalEntry(write)); err != nil {
b.Fatalf("unexpected error writing entry: %v", err)
}
}
r := NewWALSegmentReader(f)
b.ResetTimer()
for i := 0; i < b.N; i++ {
b.StopTimer()
f.Seek(0, io.SeekStart)
b.StartTimer()
for r.Next() {
_, err := r.Read()
if err != nil {
b.Fatalf("unexpected error reading entry: %v", err)
}
}
}
}
// MustReadFileSize returns the size of the file, or panics.
func MustReadFileSize(f *os.File) int64 {
stat, err := os.Stat(f.Name())
if err != nil {
panic(fmt.Sprintf("failed to get size of file at %s: %s", f.Name(), err.Error()))
}
return stat.Size()
}
func mustMarshalEntry(entry WALEntry) (WalEntryType, []byte) {
bytes := make([]byte, 1024<<2)
b, err := entry.Encode(bytes)
if err != nil {
panic(fmt.Sprintf("error encoding: %v", err))
}
return entry.Type(), snappy.Encode(b, b)
}

View File

@ -179,7 +179,10 @@ from(bucketID: "000000000000000a")
if err != nil {
return nil, err
}
if len(runs) != 1 {
if len(runs) == 0 {
return nil, ErrRunNotFound
}
if len(runs) > 1 {
return nil, fmt.Errorf("expected one run, got %d", len(runs))
}

View File

@ -10,7 +10,7 @@ import (
"time"
platform "github.com/influxdata/influxdb"
"github.com/opentracing/opentracing-go"
opentracing "github.com/opentracing/opentracing-go"
"github.com/prometheus/client_golang/prometheus"
"go.uber.org/zap"
)
@ -659,8 +659,8 @@ func (r *runner) executeAndWait(ctx context.Context, qr QueuedRun, runLogger *za
}
}()
// TODO(mr): handle res.IsRetryable().
_, err = rp.Wait()
// TODO(mr): handle rr.IsRetryable().
rr, err := rp.Wait()
close(ready)
if err != nil {
if err == ErrRunCanceled {
@ -678,6 +678,13 @@ func (r *runner) executeAndWait(ctx context.Context, qr QueuedRun, runLogger *za
atomic.StoreUint32(r.state, runnerIdle)
return
}
if err := rr.Err(); err != nil {
runLogger.Info("Run failed to execute", zap.Error(err))
// TODO(mr): retry?
r.updateRunState(qr, RunFail, runLogger)
atomic.StoreUint32(r.state, runnerIdle)
return
}
if err := r.desiredState.FinishRun(r.ctx, qr.TaskID, qr.RunID); err != nil {
runLogger.Info("Failed to finish run", zap.Error(err))

View File

@ -483,7 +483,7 @@ func TestScheduler_RunLog(t *testing.T) {
pollForRunStatus(t, rl, task.ID, 2, 1, backend.RunStarted.String())
// Finish with failure.
// Finish with failure to create the run.
promises[0].Finish(nil, errors.New("forced failure"))
if _, err := e.PollForNumberRunning(task.ID, 0); err != nil {
t.Fatal(err)
@ -491,7 +491,7 @@ func TestScheduler_RunLog(t *testing.T) {
pollForRunStatus(t, rl, task.ID, 2, 1, backend.RunFail.String())
// One more run, but cancel this time.
// Create a new run that starts but fails.
s.Tick(8)
promises, err = e.PollForNumberRunning(task.ID, 1)
if err != nil {
@ -499,6 +499,20 @@ func TestScheduler_RunLog(t *testing.T) {
}
pollForRunStatus(t, rl, task.ID, 3, 2, backend.RunStarted.String())
promises[0].Finish(mock.NewRunResult(errors.New("started but failed to finish properly"), false), nil)
if _, err := e.PollForNumberRunning(task.ID, 0); err != nil {
t.Fatal(err)
}
pollForRunStatus(t, rl, task.ID, 3, 2, backend.RunFail.String())
// One more run, but cancel this time.
s.Tick(9)
promises, err = e.PollForNumberRunning(task.ID, 1)
if err != nil {
t.Fatal(err)
}
pollForRunStatus(t, rl, task.ID, 4, 3, backend.RunStarted.String())
// Finish with failure.
promises[0].Cancel()
@ -506,7 +520,7 @@ func TestScheduler_RunLog(t *testing.T) {
t.Fatal(err)
}
pollForRunStatus(t, rl, task.ID, 3, 2, backend.RunCanceled.String())
pollForRunStatus(t, rl, task.ID, 4, 3, backend.RunCanceled.String())
}
func TestScheduler_Metrics(t *testing.T) {

View File

@ -330,6 +330,11 @@ func findRunByIDTest(t *testing.T, crf CreateRunStoreFunc, drf DestroyRunStoreFu
if reflect.DeepEqual(returnedRun, rr2) {
t.Fatalf("updateing returned run modified RunStore data")
}
_, err = reader.FindRunByID(ctx, task.Org, 0xccc)
if err != backend.ErrRunNotFound {
t.Fatalf("expected finding run with invalid ID to return %v, got %v", backend.ErrRunNotFound, err)
}
}
func listLogsTest(t *testing.T, crf CreateRunStoreFunc, drf DestroyRunStoreFunc) {

View File

@ -2,7 +2,7 @@ package tsdb
import "github.com/influxdata/influxdb/tsdb/cursors"
// These aliases exist to maintain api compatability when they were moved
// These aliases exist to maintain api compatibility when they were moved
// into their own package to avoid having a heavy dependency in order to
// talk about consuming data.

83
tsdb/tsi1/DESIGN.md Normal file
View File

@ -0,0 +1,83 @@
# Time-Series Index
## Introduction
## Architecture
### index structures and access patterns
### series ID sets
### partitioning and file types
### compactions
## File Format
## Access Times
### Insertion
TODO
### Retrieval
This section provides some general idea of the typical timings one can expect to experience when accessing the index.
#### Measurement Retrieval
Approximate times for retrieving _all_ measurements, equivalent to executing `SHOW MEASUREMENTS`, follow. These types of query only involve materialising data held in the index.
- Retrieve 1 measurement from TSI index: `~100µs`
- Retrieve 100 measurements from TSI index: `~200µs`
- Retrieve 10,000 measurements from TSI index: `~8ms`
Note: as the number of measurements gets larger, much of the time will be spent allocating and materialising the measurements into a `[][]byte` to be returned to the caller.
#### Tag Keys Retrieval
Approximate times for retrieving _all_ tag keys, equivalent to executing `SHOW TAG KEYS`, follow. These types of query only involve materialising data held in the index.
- Retrieve 1 tag key from TSI index: `~65µs`
- Retrieve 100 tag keys from TSI index: `~90µs`
- Retrieve 1,000 tag keys from TSI index: `~1.3ms`
Note: the times here show only the TSI index access for retrieving the tag keys. In practice, the measurement retrieval times need to be added on top, since you need a measurement name to access the tag keys.
#### Tag Value Retrieval
Approximate times for retrieving _all_ tag values for a _specific_ tag key, equivalent to `SHOW TAG VALUES WITH KEY = "region"`, follow. These types of query only involve materialising data held in the index.
- Retrieve 1 tag value from TSI index: `~20µs`
- Retrieve 100 tag values from TSI index: `~240µs`
- Retrieve 10,000 tag values from TSI index: `~13ms`
#### Series ID Retrieval
Approximate times for retrieving a set of matching series ids for different total cardinalities, follow.
- Retrieve 1 series id for db with cardinality 1: `~50µs` (`10µs`)
- Retrieve 10 series ids for db with cardinality 100: `~50µs` (`10µs`)
- Retrieve 100 series ids for db with cardinality 10,000: `~80µs` (`10µs`)
- Retrieve 10,000 series ids for db with cardinality 1,000,000: `~600µs` (`10µs`)
- Retrieve 100,000 series ids for db with cardinality 10,000,000: `~22ms` (`10µs`)
Note: the initial time is for the first observation. The second—parenthesised—time is for subsequent observations. Subsequent observations make use of the TSI bitset cache introduced in [#10234](https://github.com/influxdata/influxdb/pull/10234).
## Complex Series ID Retrieval
Approximate times for retrieving a set of matching series ids for different total cardinalities. In these cases, each retrieval is based on two tag key/value predicates, e.g., `SHOW SERIES WHERE "region" = 'west' AND "zone" = 'a'`
- Retrieve 1,000 series ids for db with cardinality 1,000,000: `~8ms` (`15µs`)
- Retrieve 10,000 series ids for db with cardinality 10,000,000: `~7ms` (`25µs`)
Note: the initial time is for the first observation. The second—parenthesised—time is for subsequent observations. Subsequent observations make use of the TSI bitset cache introduced in [#10234](https://github.com/influxdata/influxdb/pull/10234).
In these more complex cases, a series ID set is retrieved for each of the predicates. The sets are then intersected to identify the final set. Cache times, then, are typically doubled since each series id set for each predicate is stored separately.
There will be some additional overhead for the intersection operation.

View File

@ -83,12 +83,12 @@ func (c *floatArrayAscendingCursor) Next() *tsdb.FloatArray {
tkey := tvals.Timestamps[c.tsm.pos]
if ckey == tkey {
c.res.Timestamps[pos] = ckey
c.res.Values[pos] = cvals[c.cache.pos].(FloatValue).value
c.res.Values[pos] = cvals[c.cache.pos].(FloatValue).RawValue()
c.cache.pos++
c.tsm.pos++
} else if ckey < tkey {
c.res.Timestamps[pos] = ckey
c.res.Values[pos] = cvals[c.cache.pos].(FloatValue).value
c.res.Values[pos] = cvals[c.cache.pos].(FloatValue).RawValue()
c.cache.pos++
} else {
c.res.Timestamps[pos] = tkey
@ -126,7 +126,7 @@ func (c *floatArrayAscendingCursor) Next() *tsdb.FloatArray {
// TSM was exhausted
for pos < len(c.res.Timestamps) && c.cache.pos < len(cvals) {
c.res.Timestamps[pos] = cvals[c.cache.pos].UnixNano()
c.res.Values[pos] = cvals[c.cache.pos].(FloatValue).value
c.res.Values[pos] = cvals[c.cache.pos].(FloatValue).RawValue()
pos++
c.cache.pos++
}
@ -247,12 +247,12 @@ func (c *floatArrayDescendingCursor) Next() *tsdb.FloatArray {
tkey := tvals.Timestamps[c.tsm.pos]
if ckey == tkey {
c.res.Timestamps[pos] = ckey
c.res.Values[pos] = cvals[c.cache.pos].(FloatValue).value
c.res.Values[pos] = cvals[c.cache.pos].(FloatValue).RawValue()
c.cache.pos--
c.tsm.pos--
} else if ckey > tkey {
c.res.Timestamps[pos] = ckey
c.res.Values[pos] = cvals[c.cache.pos].(FloatValue).value
c.res.Values[pos] = cvals[c.cache.pos].(FloatValue).RawValue()
c.cache.pos--
} else {
c.res.Timestamps[pos] = tkey
@ -285,7 +285,7 @@ func (c *floatArrayDescendingCursor) Next() *tsdb.FloatArray {
// TSM was exhausted
for pos < len(c.res.Timestamps) && c.cache.pos >= 0 {
c.res.Timestamps[pos] = cvals[c.cache.pos].UnixNano()
c.res.Values[pos] = cvals[c.cache.pos].(FloatValue).value
c.res.Values[pos] = cvals[c.cache.pos].(FloatValue).RawValue()
pos++
c.cache.pos--
}
@ -391,12 +391,12 @@ func (c *integerArrayAscendingCursor) Next() *tsdb.IntegerArray {
tkey := tvals.Timestamps[c.tsm.pos]
if ckey == tkey {
c.res.Timestamps[pos] = ckey
c.res.Values[pos] = cvals[c.cache.pos].(IntegerValue).value
c.res.Values[pos] = cvals[c.cache.pos].(IntegerValue).RawValue()
c.cache.pos++
c.tsm.pos++
} else if ckey < tkey {
c.res.Timestamps[pos] = ckey
c.res.Values[pos] = cvals[c.cache.pos].(IntegerValue).value
c.res.Values[pos] = cvals[c.cache.pos].(IntegerValue).RawValue()
c.cache.pos++
} else {
c.res.Timestamps[pos] = tkey
@ -434,7 +434,7 @@ func (c *integerArrayAscendingCursor) Next() *tsdb.IntegerArray {
// TSM was exhausted
for pos < len(c.res.Timestamps) && c.cache.pos < len(cvals) {
c.res.Timestamps[pos] = cvals[c.cache.pos].UnixNano()
c.res.Values[pos] = cvals[c.cache.pos].(IntegerValue).value
c.res.Values[pos] = cvals[c.cache.pos].(IntegerValue).RawValue()
pos++
c.cache.pos++
}
@ -555,12 +555,12 @@ func (c *integerArrayDescendingCursor) Next() *tsdb.IntegerArray {
tkey := tvals.Timestamps[c.tsm.pos]
if ckey == tkey {
c.res.Timestamps[pos] = ckey
c.res.Values[pos] = cvals[c.cache.pos].(IntegerValue).value
c.res.Values[pos] = cvals[c.cache.pos].(IntegerValue).RawValue()
c.cache.pos--
c.tsm.pos--
} else if ckey > tkey {
c.res.Timestamps[pos] = ckey
c.res.Values[pos] = cvals[c.cache.pos].(IntegerValue).value
c.res.Values[pos] = cvals[c.cache.pos].(IntegerValue).RawValue()
c.cache.pos--
} else {
c.res.Timestamps[pos] = tkey
@ -593,7 +593,7 @@ func (c *integerArrayDescendingCursor) Next() *tsdb.IntegerArray {
// TSM was exhausted
for pos < len(c.res.Timestamps) && c.cache.pos >= 0 {
c.res.Timestamps[pos] = cvals[c.cache.pos].UnixNano()
c.res.Values[pos] = cvals[c.cache.pos].(IntegerValue).value
c.res.Values[pos] = cvals[c.cache.pos].(IntegerValue).RawValue()
pos++
c.cache.pos--
}
@ -699,12 +699,12 @@ func (c *unsignedArrayAscendingCursor) Next() *tsdb.UnsignedArray {
tkey := tvals.Timestamps[c.tsm.pos]
if ckey == tkey {
c.res.Timestamps[pos] = ckey
c.res.Values[pos] = cvals[c.cache.pos].(UnsignedValue).value
c.res.Values[pos] = cvals[c.cache.pos].(UnsignedValue).RawValue()
c.cache.pos++
c.tsm.pos++
} else if ckey < tkey {
c.res.Timestamps[pos] = ckey
c.res.Values[pos] = cvals[c.cache.pos].(UnsignedValue).value
c.res.Values[pos] = cvals[c.cache.pos].(UnsignedValue).RawValue()
c.cache.pos++
} else {
c.res.Timestamps[pos] = tkey
@ -742,7 +742,7 @@ func (c *unsignedArrayAscendingCursor) Next() *tsdb.UnsignedArray {
// TSM was exhausted
for pos < len(c.res.Timestamps) && c.cache.pos < len(cvals) {
c.res.Timestamps[pos] = cvals[c.cache.pos].UnixNano()
c.res.Values[pos] = cvals[c.cache.pos].(UnsignedValue).value
c.res.Values[pos] = cvals[c.cache.pos].(UnsignedValue).RawValue()
pos++
c.cache.pos++
}
@ -863,12 +863,12 @@ func (c *unsignedArrayDescendingCursor) Next() *tsdb.UnsignedArray {
tkey := tvals.Timestamps[c.tsm.pos]
if ckey == tkey {
c.res.Timestamps[pos] = ckey
c.res.Values[pos] = cvals[c.cache.pos].(UnsignedValue).value
c.res.Values[pos] = cvals[c.cache.pos].(UnsignedValue).RawValue()
c.cache.pos--
c.tsm.pos--
} else if ckey > tkey {
c.res.Timestamps[pos] = ckey
c.res.Values[pos] = cvals[c.cache.pos].(UnsignedValue).value
c.res.Values[pos] = cvals[c.cache.pos].(UnsignedValue).RawValue()
c.cache.pos--
} else {
c.res.Timestamps[pos] = tkey
@ -901,7 +901,7 @@ func (c *unsignedArrayDescendingCursor) Next() *tsdb.UnsignedArray {
// TSM was exhausted
for pos < len(c.res.Timestamps) && c.cache.pos >= 0 {
c.res.Timestamps[pos] = cvals[c.cache.pos].UnixNano()
c.res.Values[pos] = cvals[c.cache.pos].(UnsignedValue).value
c.res.Values[pos] = cvals[c.cache.pos].(UnsignedValue).RawValue()
pos++
c.cache.pos--
}
@ -1007,12 +1007,12 @@ func (c *stringArrayAscendingCursor) Next() *tsdb.StringArray {
tkey := tvals.Timestamps[c.tsm.pos]
if ckey == tkey {
c.res.Timestamps[pos] = ckey
c.res.Values[pos] = cvals[c.cache.pos].(StringValue).value
c.res.Values[pos] = cvals[c.cache.pos].(StringValue).RawValue()
c.cache.pos++
c.tsm.pos++
} else if ckey < tkey {
c.res.Timestamps[pos] = ckey
c.res.Values[pos] = cvals[c.cache.pos].(StringValue).value
c.res.Values[pos] = cvals[c.cache.pos].(StringValue).RawValue()
c.cache.pos++
} else {
c.res.Timestamps[pos] = tkey
@ -1050,7 +1050,7 @@ func (c *stringArrayAscendingCursor) Next() *tsdb.StringArray {
// TSM was exhausted
for pos < len(c.res.Timestamps) && c.cache.pos < len(cvals) {
c.res.Timestamps[pos] = cvals[c.cache.pos].UnixNano()
c.res.Values[pos] = cvals[c.cache.pos].(StringValue).value
c.res.Values[pos] = cvals[c.cache.pos].(StringValue).RawValue()
pos++
c.cache.pos++
}
@ -1173,12 +1173,12 @@ func (c *stringArrayDescendingCursor) Next() *tsdb.StringArray {
tkey := tvals.Timestamps[c.tsm.pos]
if ckey == tkey {
c.res.Timestamps[pos] = ckey
c.res.Values[pos] = cvals[c.cache.pos].(StringValue).value
c.res.Values[pos] = cvals[c.cache.pos].(StringValue).RawValue()
c.cache.pos--
c.tsm.pos--
} else if ckey > tkey {
c.res.Timestamps[pos] = ckey
c.res.Values[pos] = cvals[c.cache.pos].(StringValue).value
c.res.Values[pos] = cvals[c.cache.pos].(StringValue).RawValue()
c.cache.pos--
} else {
c.res.Timestamps[pos] = tkey
@ -1211,7 +1211,7 @@ func (c *stringArrayDescendingCursor) Next() *tsdb.StringArray {
// TSM was exhausted
for pos < len(c.res.Timestamps) && c.cache.pos >= 0 {
c.res.Timestamps[pos] = cvals[c.cache.pos].UnixNano()
c.res.Values[pos] = cvals[c.cache.pos].(StringValue).value
c.res.Values[pos] = cvals[c.cache.pos].(StringValue).RawValue()
pos++
c.cache.pos--
}
@ -1319,12 +1319,12 @@ func (c *booleanArrayAscendingCursor) Next() *tsdb.BooleanArray {
tkey := tvals.Timestamps[c.tsm.pos]
if ckey == tkey {
c.res.Timestamps[pos] = ckey
c.res.Values[pos] = cvals[c.cache.pos].(BooleanValue).value
c.res.Values[pos] = cvals[c.cache.pos].(BooleanValue).RawValue()
c.cache.pos++
c.tsm.pos++
} else if ckey < tkey {
c.res.Timestamps[pos] = ckey
c.res.Values[pos] = cvals[c.cache.pos].(BooleanValue).value
c.res.Values[pos] = cvals[c.cache.pos].(BooleanValue).RawValue()
c.cache.pos++
} else {
c.res.Timestamps[pos] = tkey
@ -1362,7 +1362,7 @@ func (c *booleanArrayAscendingCursor) Next() *tsdb.BooleanArray {
// TSM was exhausted
for pos < len(c.res.Timestamps) && c.cache.pos < len(cvals) {
c.res.Timestamps[pos] = cvals[c.cache.pos].UnixNano()
c.res.Values[pos] = cvals[c.cache.pos].(BooleanValue).value
c.res.Values[pos] = cvals[c.cache.pos].(BooleanValue).RawValue()
pos++
c.cache.pos++
}
@ -1483,12 +1483,12 @@ func (c *booleanArrayDescendingCursor) Next() *tsdb.BooleanArray {
tkey := tvals.Timestamps[c.tsm.pos]
if ckey == tkey {
c.res.Timestamps[pos] = ckey
c.res.Values[pos] = cvals[c.cache.pos].(BooleanValue).value
c.res.Values[pos] = cvals[c.cache.pos].(BooleanValue).RawValue()
c.cache.pos--
c.tsm.pos--
} else if ckey > tkey {
c.res.Timestamps[pos] = ckey
c.res.Values[pos] = cvals[c.cache.pos].(BooleanValue).value
c.res.Values[pos] = cvals[c.cache.pos].(BooleanValue).RawValue()
c.cache.pos--
} else {
c.res.Timestamps[pos] = tkey
@ -1521,7 +1521,7 @@ func (c *booleanArrayDescendingCursor) Next() *tsdb.BooleanArray {
// TSM was exhausted
for pos < len(c.res.Timestamps) && c.cache.pos >= 0 {
c.res.Timestamps[pos] = cvals[c.cache.pos].UnixNano()
c.res.Values[pos] = cvals[c.cache.pos].(BooleanValue).value
c.res.Values[pos] = cvals[c.cache.pos].(BooleanValue).RawValue()
pos++
c.cache.pos--
}

View File

@ -54,7 +54,7 @@ c.end = end
})
}
func (c *{{$type}}) Err() error { return nil }
func (c *{{$type}}) Err() error { return nil }
// close closes the cursor and any dependent cursors.
func (c *{{$type}}) Close() {
@ -82,12 +82,12 @@ func (c *{{$type}}) Next() {{$arrayType}} {
tkey := tvals.Timestamps[c.tsm.pos]
if ckey == tkey {
c.res.Timestamps[pos] = ckey
c.res.Values[pos] = cvals[c.cache.pos].({{.Name}}Value).value
c.res.Values[pos] = cvals[c.cache.pos].({{.Name}}Value).RawValue()
c.cache.pos++
c.tsm.pos++
} else if ckey < tkey {
c.res.Timestamps[pos] = ckey
c.res.Values[pos] = cvals[c.cache.pos].({{.Name}}Value).value
c.res.Values[pos] = cvals[c.cache.pos].({{.Name}}Value).RawValue()
c.cache.pos++
} else {
c.res.Timestamps[pos] = tkey
@ -125,7 +125,7 @@ func (c *{{$type}}) Next() {{$arrayType}} {
// TSM was exhausted
for pos < len(c.res.Timestamps) && c.cache.pos < len(cvals) {
c.res.Timestamps[pos] = cvals[c.cache.pos].UnixNano()
c.res.Values[pos] = cvals[c.cache.pos].({{.Name}}Value).value
c.res.Values[pos] = cvals[c.cache.pos].({{.Name}}Value).RawValue()
pos++
c.cache.pos++
}
@ -228,7 +228,7 @@ func (c *{{$type}}) reset(seek, end int64, cacheValues Values, tsmKeyCursor *Key
}
}
func (c *{{$type}}) Err() error { return nil }
func (c *{{$type}}) Err() error { return nil }
func (c *{{$type}}) Close() {
if c.tsm.keyCursor != nil {
@ -254,12 +254,12 @@ func (c *{{$type}}) Next() {{$arrayType}} {
tkey := tvals.Timestamps[c.tsm.pos]
if ckey == tkey {
c.res.Timestamps[pos] = ckey
c.res.Values[pos] = cvals[c.cache.pos].({{.Name}}Value).value
c.res.Values[pos] = cvals[c.cache.pos].({{.Name}}Value).RawValue()
c.cache.pos--
c.tsm.pos--
} else if ckey > tkey {
c.res.Timestamps[pos] = ckey
c.res.Values[pos] = cvals[c.cache.pos].({{.Name}}Value).value
c.res.Values[pos] = cvals[c.cache.pos].({{.Name}}Value).RawValue()
c.cache.pos--
} else {
c.res.Timestamps[pos] = tkey
@ -292,7 +292,7 @@ func (c *{{$type}}) Next() {{$arrayType}} {
// TSM was exhausted
for pos < len(c.res.Timestamps) && c.cache.pos >= 0 {
c.res.Timestamps[pos] = cvals[c.cache.pos].UnixNano()
c.res.Values[pos] = cvals[c.cache.pos].({{.Name}}Value).value
c.res.Values[pos] = cvals[c.cache.pos].({{.Name}}Value).RawValue()
pos++
c.cache.pos--
}

View File

@ -0,0 +1,42 @@
[
{
"Name":"Float",
"name":"float",
"Type":"float64",
"ValueType":"FloatValue",
"Nil":"0",
"Size":"8"
},
{
"Name":"Integer",
"name":"integer",
"Type":"int64",
"ValueType":"IntegerValue",
"Nil":"0",
"Size":"8"
},
{
"Name":"Unsigned",
"name":"unsigned",
"Type":"uint64",
"ValueType":"UnsignedValue",
"Nil":"0",
"Size":"8"
},
{
"Name":"String",
"name":"string",
"Type":"string",
"ValueType":"StringValue",
"Nil":"\"\"",
"Size":"0"
},
{
"Name":"Boolean",
"name":"boolean",
"Type":"bool",
"ValueType":"BooleanValue",
"Nil":"false",
"Size":"1"
}
]

View File

@ -1,14 +1,15 @@
package tsm1
import (
"bytes"
"fmt"
"math"
"os"
"sync"
"sync/atomic"
"time"
"github.com/influxdata/influxdb/models"
"github.com/influxdata/influxdb/storage/wal"
"github.com/influxdata/influxdb/tsdb"
"github.com/influxdata/influxql"
"github.com/prometheus/client_golang/prometheus"
@ -545,50 +546,50 @@ func (c *Cache) Values(key []byte) Values {
return values
}
// Delete removes all values for the given keys from the cache.
func (c *Cache) Delete(keys [][]byte) {
c.DeleteRange(keys, math.MinInt64, math.MaxInt64)
}
// DeleteRange removes the values for all keys containing points
// with timestamps between between min and max from the cache.
//
// TODO(edd): Lock usage could possibly be optimised if necessary.
func (c *Cache) DeleteRange(keys [][]byte, min, max int64) {
// DeleteBucketRange removes values for all keys containing points
// with timestamps between min and max contained in the bucket identified
// by name from the cache.
func (c *Cache) DeleteBucketRange(name []byte, min, max int64) {
c.init()
// TODO(edd/jeff): find a way to optimize lock usage
c.mu.Lock()
defer c.mu.Unlock()
var toDelete [][]byte
var total uint64
for _, k := range keys {
// Make sure key exist in the cache, skip if it does not
e := c.store.entry(k)
if e == nil {
continue
}
// applySerial only errors if the closure returns an error.
_ = c.store.applySerial(func(k []byte, e *entry) error {
if !bytes.HasPrefix(k, name) {
return nil
}
total += uint64(e.size())
// Everything is being deleted.
// if everything is being deleted, just stage it to be deleted and move on.
if min == math.MinInt64 && max == math.MaxInt64 {
total += uint64(len(k)) // all entries and the key.
c.store.remove(k)
continue
toDelete = append(toDelete, k)
return nil
}
// Filter what to delete by time range.
// filter the values and subtract out the remaining bytes from the reduction.
e.filter(min, max)
total -= uint64(e.size())
// if it has no entries left, flag it to be deleted.
if e.count() == 0 {
// Nothing left in cache for that key
total += uint64(len(k)) // all entries and the key.
c.store.remove(k)
continue
toDelete = append(toDelete, k)
}
// Just update what is being deleted by the size of the filtered entries.
total -= uint64(e.size())
return nil
})
for _, k := range toDelete {
total += uint64(len(k))
c.store.remove(k)
}
c.tracker.DecCacheSize(total) // Decrease the live cache size.
c.tracker.DecCacheSize(total)
c.tracker.SetMemBytes(uint64(c.Size()))
}
@ -624,92 +625,45 @@ func (c *Cache) ApplyEntryFn(f func(key []byte, entry *entry) error) error {
}
// CacheLoader processes a set of WAL segment files, and loads a cache with the data
// contained within those files. Processing of the supplied files take place in the
// order they exist in the files slice.
// contained within those files.
type CacheLoader struct {
files []string
Logger *zap.Logger
reader *wal.WALReader
}
// NewCacheLoader returns a new instance of a CacheLoader.
func NewCacheLoader(files []string) *CacheLoader {
return &CacheLoader{
files: files,
Logger: zap.NewNop(),
reader: wal.NewWALReader(files),
}
}
// Load returns a cache loaded with the data contained within the segment files.
// If, during reading of a segment file, corruption is encountered, that segment
// file is truncated up to and including the last valid byte, and processing
// continues with the next segment file.
func (cl *CacheLoader) Load(cache *Cache) error {
return cl.reader.Read(func(entry wal.WALEntry) error {
switch en := entry.(type) {
case *wal.WriteWALEntry:
return cache.WriteMulti(en.Values)
var r *WALSegmentReader
for _, fn := range cl.files {
if err := func() error {
f, err := os.OpenFile(fn, os.O_CREATE|os.O_RDWR, 0666)
if err != nil {
return err
}
defer f.Close()
case *wal.DeleteBucketRangeWALEntry:
// TODO(edd): we need to clean up how we're encoding the prefix so that we
// don't have to remember to get it right everywhere we need to touch TSM data.
encoded := tsdb.EncodeName(en.OrgID, en.BucketID)
name := models.EscapeMeasurement(encoded[:])
// Log some information about the segments.
stat, err := os.Stat(f.Name())
if err != nil {
return err
}
cl.Logger.Info("Reading file", zap.String("path", f.Name()), zap.Int64("size", stat.Size()))
// Nothing to read, skip it
if stat.Size() == 0 {
return nil
}
if r == nil {
r = NewWALSegmentReader(f)
defer r.Close()
} else {
r.Reset(f)
}
for r.Next() {
entry, err := r.Read()
if err != nil {
n := r.Count()
cl.Logger.Info("File corrupt", zap.Error(err), zap.String("path", f.Name()), zap.Int64("pos", n))
if err := f.Truncate(n); err != nil {
return err
}
break
}
switch t := entry.(type) {
case *WriteWALEntry:
if err := cache.WriteMulti(t.Values); err != nil {
return err
}
case *DeleteRangeWALEntry:
cache.DeleteRange(t.Keys, t.Min, t.Max)
case *DeleteWALEntry:
cache.Delete(t.Keys)
}
}
return r.Close()
}(); err != nil {
return err
cache.DeleteBucketRange(name, en.Min, en.Max)
return nil
}
}
return nil
return nil
})
}
// WithLogger sets the logger on the CacheLoader.
func (cl *CacheLoader) WithLogger(log *zap.Logger) {
cl.Logger = log.With(zap.String("service", "cacheloader"))
func (cl *CacheLoader) WithLogger(logger *zap.Logger) {
cl.reader.WithLogger(logger.With(zap.String("service", "cacheloader")))
}
// LastWriteTime returns the time that the cache was last written to.
func (c *Cache) LastWriteTime() time.Time {
c.mu.RLock()
defer c.mu.RUnlock()

View File

@ -15,6 +15,8 @@ import (
"sync/atomic"
"testing"
"github.com/influxdata/influxdb/storage/wal"
"github.com/golang/snappy"
)
@ -167,7 +169,7 @@ func TestCache_CacheWriteMulti_TypeConflict(t *testing.T) {
}
}
func TestCache_Cache_DeleteRange(t *testing.T) {
func TestCache_Cache_DeleteBucketRange(t *testing.T) {
v0 := NewValue(1, 1.0)
v1 := NewValue(2, 2.0)
v2 := NewValue(3, 3.0)
@ -187,7 +189,7 @@ func TestCache_Cache_DeleteRange(t *testing.T) {
t.Fatalf("cache keys incorrect after 2 writes, exp %v, got %v", exp, keys)
}
c.DeleteRange([][]byte{[]byte("bar")}, 2, math.MaxInt64)
c.DeleteBucketRange([]byte("bar"), 2, math.MaxInt64)
if exp, keys := [][]byte{[]byte("bar"), []byte("foo")}, c.Keys(); !reflect.DeepEqual(keys, exp) {
t.Fatalf("cache keys incorrect after delete, exp %v, got %v", exp, keys)
@ -206,7 +208,7 @@ func TestCache_Cache_DeleteRange(t *testing.T) {
}
}
func TestCache_DeleteRange_NoValues(t *testing.T) {
func TestCache_DeleteBucketRange_NoValues(t *testing.T) {
v0 := NewValue(1, 1.0)
v1 := NewValue(2, 2.0)
v2 := NewValue(3, 3.0)
@ -226,7 +228,7 @@ func TestCache_DeleteRange_NoValues(t *testing.T) {
t.Fatalf("cache keys incorrect after 2 writes, exp %v, got %v", exp, keys)
}
c.DeleteRange([][]byte{[]byte("foo")}, math.MinInt64, math.MaxInt64)
c.DeleteBucketRange([]byte("foo"), math.MinInt64, math.MaxInt64)
if exp, keys := 0, len(c.Keys()); !reflect.DeepEqual(keys, exp) {
t.Fatalf("cache keys incorrect after 2 writes, exp %v, got %v", exp, keys)
@ -241,7 +243,7 @@ func TestCache_DeleteRange_NoValues(t *testing.T) {
}
}
func TestCache_DeleteRange_NotSorted(t *testing.T) {
func TestCache_DeleteBucketRange_NotSorted(t *testing.T) {
v0 := NewValue(1, 1.0)
v1 := NewValue(3, 3.0)
v2 := NewValue(2, 2.0)
@ -261,7 +263,7 @@ func TestCache_DeleteRange_NotSorted(t *testing.T) {
t.Fatalf("cache keys incorrect after 2 writes, exp %v, got %v", exp, keys)
}
c.DeleteRange([][]byte{[]byte("foo")}, 1, 3)
c.DeleteBucketRange([]byte("foo"), 1, 3)
if exp, keys := 0, len(c.Keys()); !reflect.DeepEqual(keys, exp) {
t.Fatalf("cache keys incorrect after delete, exp %v, got %v", exp, keys)
@ -276,49 +278,10 @@ func TestCache_DeleteRange_NotSorted(t *testing.T) {
}
}
func TestCache_Cache_Delete(t *testing.T) {
v0 := NewValue(1, 1.0)
v1 := NewValue(2, 2.0)
v2 := NewValue(3, 3.0)
values := Values{v0, v1, v2}
valuesSize := uint64(v0.Size() + v1.Size() + v2.Size())
c := NewCache(30 * valuesSize)
if err := c.WriteMulti(map[string][]Value{"foo": values, "bar": values}); err != nil {
t.Fatalf("failed to write key foo to cache: %s", err.Error())
}
if n := c.Size(); n != 2*valuesSize+6 {
t.Fatalf("cache size incorrect after 2 writes, exp %d, got %d", 2*valuesSize, n)
}
if exp, keys := [][]byte{[]byte("bar"), []byte("foo")}, c.Keys(); !reflect.DeepEqual(keys, exp) {
t.Fatalf("cache keys incorrect after 2 writes, exp %v, got %v", exp, keys)
}
c.Delete([][]byte{[]byte("bar")})
if exp, keys := [][]byte{[]byte("foo")}, c.Keys(); !reflect.DeepEqual(keys, exp) {
t.Fatalf("cache keys incorrect after 2 writes, exp %v, got %v", exp, keys)
}
if got, exp := c.Size(), valuesSize+3; exp != got {
t.Fatalf("cache size incorrect after 2 writes, exp %d, got %d", exp, got)
}
if got, exp := len(c.Values([]byte("bar"))), 0; got != exp {
t.Fatalf("cache values mismatch: got %v, exp %v", got, exp)
}
if got, exp := len(c.Values([]byte("foo"))), 3; got != exp {
t.Fatalf("cache values mismatch: got %v, exp %v", got, exp)
}
}
func TestCache_Cache_Delete_NonExistent(t *testing.T) {
func TestCache_DeleteBucketRange_NonExistent(t *testing.T) {
c := NewCache(1024)
c.Delete([][]byte{[]byte("bar")})
c.DeleteBucketRange([]byte("bar"), math.MinInt64, math.MaxInt64)
if got, exp := c.Size(), uint64(0); exp != got {
t.Fatalf("cache size incorrect exp %d, got %d", exp, got)
@ -604,7 +567,7 @@ func TestCacheLoader_LoadSingle(t *testing.T) {
dir := mustTempDir()
defer os.RemoveAll(dir)
f := mustTempFile(dir)
w := NewWALSegmentWriter(f)
w := wal.NewWALSegmentWriter(f)
p1 := NewValue(1, 1.1)
p2 := NewValue(1, int64(1))
@ -616,7 +579,7 @@ func TestCacheLoader_LoadSingle(t *testing.T) {
"baz": {p3},
}
entry := &WriteWALEntry{
entry := &wal.WriteWALEntry{
Values: values,
}
@ -676,7 +639,7 @@ func TestCacheLoader_LoadDouble(t *testing.T) {
dir := mustTempDir()
defer os.RemoveAll(dir)
f1, f2 := mustTempFile(dir), mustTempFile(dir)
w1, w2 := NewWALSegmentWriter(f1), NewWALSegmentWriter(f2)
w1, w2 := wal.NewWALSegmentWriter(f1), wal.NewWALSegmentWriter(f2)
p1 := NewValue(1, 1.1)
p2 := NewValue(1, int64(1))
@ -685,8 +648,8 @@ func TestCacheLoader_LoadDouble(t *testing.T) {
// Write first and second segment.
segmentWrite := func(w *WALSegmentWriter, values map[string][]Value) {
entry := &WriteWALEntry{
segmentWrite := func(w *wal.WALSegmentWriter, values map[string][]Value) {
entry := &wal.WriteWALEntry{
Values: values,
}
if err := w1.Write(mustMarshalEntry(entry)); err != nil {
@ -735,73 +698,6 @@ func TestCacheLoader_LoadDouble(t *testing.T) {
}
}
// Ensure the CacheLoader can load deleted series
func TestCacheLoader_LoadDeleted(t *testing.T) {
// Create a WAL segment.
dir := mustTempDir()
defer os.RemoveAll(dir)
f := mustTempFile(dir)
w := NewWALSegmentWriter(f)
p1 := NewValue(1, 1.0)
p2 := NewValue(2, 2.0)
p3 := NewValue(3, 3.0)
values := map[string][]Value{
"foo": {p1, p2, p3},
}
entry := &WriteWALEntry{
Values: values,
}
if err := w.Write(mustMarshalEntry(entry)); err != nil {
t.Fatal("write points", err)
}
if err := w.Flush(); err != nil {
t.Fatalf("flush error: %v", err)
}
dentry := &DeleteRangeWALEntry{
Keys: [][]byte{[]byte("foo")},
Min: 2,
Max: 3,
}
if err := w.Write(mustMarshalEntry(dentry)); err != nil {
t.Fatal("write points", err)
}
if err := w.Flush(); err != nil {
t.Fatalf("flush error: %v", err)
}
// Load the cache using the segment.
cache := NewCache(1024)
loader := NewCacheLoader([]string{f.Name()})
if err := loader.Load(cache); err != nil {
t.Fatalf("failed to load cache: %s", err.Error())
}
// Check the cache.
if values := cache.Values([]byte("foo")); !reflect.DeepEqual(values, Values{p1}) {
t.Fatalf("cache key foo not as expected, got %v, exp %v", values, Values{p1})
}
// Reload the cache using the segment.
cache = NewCache(1024)
loader = NewCacheLoader([]string{f.Name()})
if err := loader.Load(cache); err != nil {
t.Fatalf("failed to load cache: %s", err.Error())
}
// Check the cache.
if values := cache.Values([]byte("foo")); !reflect.DeepEqual(values, Values{p1}) {
t.Fatalf("cache key foo not as expected, got %v, exp %v", values, Values{p1})
}
}
func TestCache_Split(t *testing.T) {
v0 := NewValue(1, 1.0)
v1 := NewValue(2, 2.0)
@ -857,7 +753,7 @@ func mustTempFile(dir string) *os.File {
return f
}
func mustMarshalEntry(entry WALEntry) (WalEntryType, []byte) {
func mustMarshalEntry(entry wal.WALEntry) (wal.WalEntryType, []byte) {
bytes := make([]byte, 1024<<2)
b, err := entry.Encode(bytes)

View File

@ -212,8 +212,8 @@ type FloatValues []FloatValue
func NewFloatArrayFromValues(v FloatValues) *tsdb.FloatArray {
a := tsdb.NewFloatArrayLen(len(v))
for i, val := range v {
a.Timestamps[i] = val.unixnano
a.Values[i] = val.value
a.Timestamps[i] = val.UnixNano()
a.Values[i] = val.RawValue()
}
return a
}
@ -442,8 +442,8 @@ func encodeFloatValuesBlock(buf []byte, values []FloatValue) ([]byte, error) {
var b []byte
err := func() error {
for _, v := range values {
tsenc.Write(v.unixnano)
venc.Write(v.value)
tsenc.Write(v.UnixNano())
venc.Write(v.RawValue())
}
venc.Flush()
@ -482,8 +482,8 @@ type IntegerValues []IntegerValue
func NewIntegerArrayFromValues(v IntegerValues) *tsdb.IntegerArray {
a := tsdb.NewIntegerArrayLen(len(v))
for i, val := range v {
a.Timestamps[i] = val.unixnano
a.Values[i] = val.value
a.Timestamps[i] = val.UnixNano()
a.Values[i] = val.RawValue()
}
return a
}
@ -712,8 +712,8 @@ func encodeIntegerValuesBlock(buf []byte, values []IntegerValue) ([]byte, error)
var b []byte
err := func() error {
for _, v := range values {
tsenc.Write(v.unixnano)
venc.Write(v.value)
tsenc.Write(v.UnixNano())
venc.Write(v.RawValue())
}
venc.Flush()
@ -752,8 +752,8 @@ type UnsignedValues []UnsignedValue
func NewUnsignedArrayFromValues(v UnsignedValues) *tsdb.UnsignedArray {
a := tsdb.NewUnsignedArrayLen(len(v))
for i, val := range v {
a.Timestamps[i] = val.unixnano
a.Values[i] = val.value
a.Timestamps[i] = val.UnixNano()
a.Values[i] = val.RawValue()
}
return a
}
@ -982,8 +982,8 @@ func encodeUnsignedValuesBlock(buf []byte, values []UnsignedValue) ([]byte, erro
var b []byte
err := func() error {
for _, v := range values {
tsenc.Write(v.unixnano)
venc.Write(int64(v.value))
tsenc.Write(v.UnixNano())
venc.Write(int64(v.RawValue()))
}
venc.Flush()
@ -1022,8 +1022,8 @@ type StringValues []StringValue
func NewStringArrayFromValues(v StringValues) *tsdb.StringArray {
a := tsdb.NewStringArrayLen(len(v))
for i, val := range v {
a.Timestamps[i] = val.unixnano
a.Values[i] = val.value
a.Timestamps[i] = val.UnixNano()
a.Values[i] = val.RawValue()
}
return a
}
@ -1252,8 +1252,8 @@ func encodeStringValuesBlock(buf []byte, values []StringValue) ([]byte, error) {
var b []byte
err := func() error {
for _, v := range values {
tsenc.Write(v.unixnano)
venc.Write(v.value)
tsenc.Write(v.UnixNano())
venc.Write(v.RawValue())
}
venc.Flush()
@ -1292,8 +1292,8 @@ type BooleanValues []BooleanValue
func NewBooleanArrayFromValues(v BooleanValues) *tsdb.BooleanArray {
a := tsdb.NewBooleanArrayLen(len(v))
for i, val := range v {
a.Timestamps[i] = val.unixnano
a.Values[i] = val.value
a.Timestamps[i] = val.UnixNano()
a.Values[i] = val.RawValue()
}
return a
}
@ -1522,8 +1522,8 @@ func encodeBooleanValuesBlock(buf []byte, values []BooleanValue) ([]byte, error)
var b []byte
err := func() error {
for _, v := range values {
tsenc.Write(v.unixnano)
venc.Write(v.value)
tsenc.Write(v.UnixNano())
venc.Write(v.RawValue())
}
venc.Flush()

View File

@ -15,8 +15,8 @@ type {{.Name}}Values []{{.Name}}Value
func New{{.Name}}ArrayFromValues(v {{.Name}}Values) *tsdb.{{.Name}}Array {
a := tsdb.New{{.Name}}ArrayLen(len(v))
for i, val := range v {
a.Timestamps[i] = val.unixnano
a.Values[i] = val.value
a.Timestamps[i] = val.UnixNano()
a.Values[i] = val.RawValue()
}
return a
}
@ -247,8 +247,8 @@ func encode{{ .Name }}ValuesBlock(buf []byte, values []{{.Name}}Value) ([]byte,
var b []byte
err := func() error {
for _, v := range values {
tsenc.Write(v.unixnano)
venc.Write({{if .CastType}}{{.CastType}}(v.value){{else}}v.value{{end}})
tsenc.Write(v.UnixNano())
venc.Write({{if .CastType}}{{.CastType}}(v.RawValue()){{else}}v.RawValue(){{end}})
}
venc.Flush()

View File

@ -14,7 +14,7 @@ func makeIntegerValues(count int, min, max int64) IntegerValues {
inc := (max - min) / int64(count)
for i := 0; i < count; i++ {
vals[i].unixnano = ts
vals[i] = NewRawIntegerValue(ts, 0)
ts += inc
}
@ -24,7 +24,7 @@ func makeIntegerValues(count int, min, max int64) IntegerValues {
func makeIntegerValuesFromSlice(t []int64) IntegerValues {
iv := make(IntegerValues, len(t))
for i, v := range t {
iv[i].unixnano = v
iv[i] = NewRawIntegerValue(v, 0)
}
return iv
}
@ -91,7 +91,7 @@ func TestIntegerValues_Exclude(t *testing.T) {
vals = vals.Exclude(tc.min, tc.max)
var got []int64
for _, v := range vals {
got = append(got, v.unixnano)
got = append(got, v.UnixNano())
}
opt := cmp.AllowUnexported(IntegerValue{})
if !cmp.Equal(tc.exp, got, opt) {
@ -122,7 +122,7 @@ func TestIntegerValues_Include(t *testing.T) {
vals = vals.Include(tc.min, tc.max)
var got []int64
for _, v := range vals {
got = append(got, v.unixnano)
got = append(got, v.UnixNano())
}
opt := cmp.AllowUnexported(IntegerValue{})
if !cmp.Equal(tc.exp, got, opt) {

View File

@ -4,10 +4,8 @@ import (
"encoding/binary"
"fmt"
"runtime"
"time"
"github.com/influxdata/influxdb/pkg/pool"
"github.com/influxdata/influxdb/tsdb"
"github.com/influxdata/influxql"
)
@ -93,89 +91,6 @@ var (
})
)
// Value represents a TSM-encoded value.
type Value interface {
// UnixNano returns the timestamp of the value in nanoseconds since unix epoch.
UnixNano() int64
// Value returns the underlying value.
Value() interface{}
// Size returns the number of bytes necessary to represent the value and its timestamp.
Size() int
// String returns the string representation of the value and its timestamp.
String() string
// internalOnly is unexported to ensure implementations of Value
// can only originate in this package.
internalOnly()
}
// NewValue returns a new Value with the underlying type dependent on value.
func NewValue(t int64, value interface{}) Value {
switch v := value.(type) {
case int64:
return IntegerValue{unixnano: t, value: v}
case uint64:
return UnsignedValue{unixnano: t, value: v}
case float64:
return FloatValue{unixnano: t, value: v}
case bool:
return BooleanValue{unixnano: t, value: v}
case string:
return StringValue{unixnano: t, value: v}
}
return EmptyValue{}
}
// NewIntegerValue returns a new integer value.
func NewIntegerValue(t int64, v int64) Value {
return IntegerValue{unixnano: t, value: v}
}
// NewUnsignedValue returns a new unsigned integer value.
func NewUnsignedValue(t int64, v uint64) Value {
return UnsignedValue{unixnano: t, value: v}
}
// NewFloatValue returns a new float value.
func NewFloatValue(t int64, v float64) Value {
return FloatValue{unixnano: t, value: v}
}
// NewBooleanValue returns a new boolean value.
func NewBooleanValue(t int64, v bool) Value {
return BooleanValue{unixnano: t, value: v}
}
// NewStringValue returns a new string value.
func NewStringValue(t int64, v string) Value {
return StringValue{unixnano: t, value: v}
}
// EmptyValue is used when there is no appropriate other value.
type EmptyValue struct{}
// UnixNano returns tsdb.EOF.
func (e EmptyValue) UnixNano() int64 { return tsdb.EOF }
// Value returns nil.
func (e EmptyValue) Value() interface{} { return nil }
// Size returns 0.
func (e EmptyValue) Size() int { return 0 }
// String returns the empty string.
func (e EmptyValue) String() string { return "" }
func (EmptyValue) internalOnly() {}
func (StringValue) internalOnly() {}
func (IntegerValue) internalOnly() {}
func (UnsignedValue) internalOnly() {}
func (BooleanValue) internalOnly() {}
func (FloatValue) internalOnly() {}
// Encode converts the values to a byte slice. If there are no values,
// this function panics.
func (a Values) Encode(buf []byte) ([]byte, error) {
@ -318,32 +233,6 @@ func DecodeBlock(block []byte, vals []Value) ([]Value, error) {
}
}
// FloatValue represents a float64 value.
type FloatValue struct {
unixnano int64
value float64
}
// UnixNano returns the timestamp of the value.
func (v FloatValue) UnixNano() int64 {
return v.unixnano
}
// Value returns the underlying float64 value.
func (v FloatValue) Value() interface{} {
return v.value
}
// Size returns the number of bytes necessary to represent the value and its timestamp.
func (v FloatValue) Size() int {
return 16
}
// String returns the string representation of the value and its timestamp.
func (v FloatValue) String() string {
return fmt.Sprintf("%v %v", time.Unix(0, v.unixnano), v.value)
}
func encodeFloatBlock(buf []byte, values []Value) ([]byte, error) {
if len(values) == 0 {
return nil, nil
@ -373,8 +262,8 @@ func encodeFloatBlockUsing(buf []byte, values []Value, tsenc TimeEncoder, venc *
for _, v := range values {
vv := v.(FloatValue)
tsenc.Write(vv.unixnano)
venc.Write(vv.value)
tsenc.Write(vv.UnixNano())
venc.Write(vv.RawValue())
}
venc.Flush()
@ -432,7 +321,7 @@ func DecodeFloatBlock(block []byte, a *[]FloatValue) ([]FloatValue, error) {
// Decode both a timestamp and value
j := 0
for j < len(a) && tdec.Next() && vdec.Next() {
a[j] = FloatValue{unixnano: tdec.Read(), value: vdec.Values()}
a[j] = NewRawFloatValue(tdec.Read(), vdec.Values())
j++
}
i = j
@ -453,32 +342,6 @@ func DecodeFloatBlock(block []byte, a *[]FloatValue) ([]FloatValue, error) {
return (*a)[:i], err
}
// BooleanValue represents a boolean value.
type BooleanValue struct {
unixnano int64
value bool
}
// Size returns the number of bytes necessary to represent the value and its timestamp.
func (v BooleanValue) Size() int {
return 9
}
// UnixNano returns the timestamp of the value in nanoseconds since unix epoch.
func (v BooleanValue) UnixNano() int64 {
return v.unixnano
}
// Value returns the underlying boolean value.
func (v BooleanValue) Value() interface{} {
return v.value
}
// String returns the string representation of the value and its timestamp.
func (v BooleanValue) String() string {
return fmt.Sprintf("%v %v", time.Unix(0, v.unixnano), v.Value())
}
func encodeBooleanBlock(buf []byte, values []Value) ([]byte, error) {
if len(values) == 0 {
return nil, nil
@ -505,8 +368,8 @@ func encodeBooleanBlockUsing(buf []byte, values []Value, tenc TimeEncoder, venc
for _, v := range values {
vv := v.(BooleanValue)
tenc.Write(vv.unixnano)
venc.Write(vv.value)
tenc.Write(vv.UnixNano())
venc.Write(vv.RawValue())
}
// Encoded timestamp values
@ -560,7 +423,7 @@ func DecodeBooleanBlock(block []byte, a *[]BooleanValue) ([]BooleanValue, error)
// Decode both a timestamp and value
j := 0
for j < len(a) && tdec.Next() && vdec.Next() {
a[j] = BooleanValue{unixnano: tdec.Read(), value: vdec.Read()}
a[j] = NewRawBooleanValue(tdec.Read(), vdec.Read())
j++
}
i = j
@ -580,32 +443,6 @@ func DecodeBooleanBlock(block []byte, a *[]BooleanValue) ([]BooleanValue, error)
return (*a)[:i], err
}
// IntegerValue represents an int64 value.
type IntegerValue struct {
unixnano int64
value int64
}
// Value returns the underlying int64 value.
func (v IntegerValue) Value() interface{} {
return v.value
}
// UnixNano returns the timestamp of the value.
func (v IntegerValue) UnixNano() int64 {
return v.unixnano
}
// Size returns the number of bytes necessary to represent the value and its timestamp.
func (v IntegerValue) Size() int {
return 16
}
// String returns the string representation of the value and its timestamp.
func (v IntegerValue) String() string {
return fmt.Sprintf("%v %v", time.Unix(0, v.unixnano), v.Value())
}
func encodeIntegerBlock(buf []byte, values []Value) ([]byte, error) {
tenc := getTimeEncoder(len(values))
venc := getIntegerEncoder(len(values))
@ -624,8 +461,8 @@ func encodeIntegerBlockUsing(buf []byte, values []Value, tenc TimeEncoder, venc
for _, v := range values {
vv := v.(IntegerValue)
tenc.Write(vv.unixnano)
venc.Write(vv.value)
tenc.Write(vv.UnixNano())
venc.Write(vv.RawValue())
}
// Encoded timestamp values
@ -679,7 +516,7 @@ func DecodeIntegerBlock(block []byte, a *[]IntegerValue) ([]IntegerValue, error)
// Decode both a timestamp and value
j := 0
for j < len(a) && tdec.Next() && vdec.Next() {
a[j] = IntegerValue{unixnano: tdec.Read(), value: vdec.Read()}
a[j] = NewRawIntegerValue(tdec.Read(), vdec.Read())
j++
}
i = j
@ -699,32 +536,6 @@ func DecodeIntegerBlock(block []byte, a *[]IntegerValue) ([]IntegerValue, error)
return (*a)[:i], err
}
// UnsignedValue represents an int64 value.
type UnsignedValue struct {
unixnano int64
value uint64
}
// Value returns the underlying int64 value.
func (v UnsignedValue) Value() interface{} {
return v.value
}
// UnixNano returns the timestamp of the value.
func (v UnsignedValue) UnixNano() int64 {
return v.unixnano
}
// Size returns the number of bytes necessary to represent the value and its timestamp.
func (v UnsignedValue) Size() int {
return 16
}
// String returns the string representation of the value and its timestamp.
func (v UnsignedValue) String() string {
return fmt.Sprintf("%v %v", time.Unix(0, v.unixnano), v.Value())
}
func encodeUnsignedBlock(buf []byte, values []Value) ([]byte, error) {
tenc := getTimeEncoder(len(values))
venc := getUnsignedEncoder(len(values))
@ -743,8 +554,8 @@ func encodeUnsignedBlockUsing(buf []byte, values []Value, tenc TimeEncoder, venc
for _, v := range values {
vv := v.(UnsignedValue)
tenc.Write(vv.unixnano)
venc.Write(int64(vv.value))
tenc.Write(vv.UnixNano())
venc.Write(int64(vv.RawValue()))
}
// Encoded timestamp values
@ -798,7 +609,7 @@ func DecodeUnsignedBlock(block []byte, a *[]UnsignedValue) ([]UnsignedValue, err
// Decode both a timestamp and value
j := 0
for j < len(a) && tdec.Next() && vdec.Next() {
a[j] = UnsignedValue{unixnano: tdec.Read(), value: uint64(vdec.Read())}
a[j] = NewRawUnsignedValue(tdec.Read(), uint64(vdec.Read()))
j++
}
i = j
@ -818,35 +629,9 @@ func DecodeUnsignedBlock(block []byte, a *[]UnsignedValue) ([]UnsignedValue, err
return (*a)[:i], err
}
// StringValue represents a string value.
type StringValue struct {
unixnano int64
value string
}
// Value returns the underlying string value.
func (v StringValue) Value() interface{} {
return v.value
}
// UnixNano returns the timestamp of the value.
func (v StringValue) UnixNano() int64 {
return v.unixnano
}
// Size returns the number of bytes necessary to represent the value and its timestamp.
func (v StringValue) Size() int {
return 8 + len(v.value)
}
// String returns the string representation of the value and its timestamp.
func (v StringValue) String() string {
return fmt.Sprintf("%v %v", time.Unix(0, v.unixnano), v.Value())
}
func encodeStringBlock(buf []byte, values []Value) ([]byte, error) {
tenc := getTimeEncoder(len(values))
venc := getStringEncoder(len(values) * len(values[0].(StringValue).value))
venc := getStringEncoder(len(values) * len(values[0].(StringValue).RawValue()))
b, err := encodeStringBlockUsing(buf, values, tenc, venc)
@ -862,8 +647,8 @@ func encodeStringBlockUsing(buf []byte, values []Value, tenc TimeEncoder, venc S
for _, v := range values {
vv := v.(StringValue)
tenc.Write(vv.unixnano)
venc.Write(vv.value)
tenc.Write(vv.UnixNano())
venc.Write(vv.RawValue())
}
// Encoded timestamp values
@ -920,7 +705,7 @@ func DecodeStringBlock(block []byte, a *[]StringValue) ([]StringValue, error) {
// Decode both a timestamp and value
j := 0
for j < len(a) && tdec.Next() && vdec.Next() {
a[j] = StringValue{unixnano: tdec.Read(), value: vdec.Read()}
a[j] = NewRawStringValue(tdec.Read(), vdec.Read())
j++
}
i = j

View File

@ -4,11 +4,8 @@ package tsm1 // import "github.com/influxdata/influxdb/tsdb/tsm1"
import (
"bytes"
"context"
"errors"
"fmt"
"io"
"io/ioutil"
"math"
"os"
"path/filepath"
"regexp"
@ -20,7 +17,6 @@ import (
"github.com/influxdata/influxdb/logger"
"github.com/influxdata/influxdb/models"
"github.com/influxdata/influxdb/pkg/bytesutil"
"github.com/influxdata/influxdb/pkg/limiter"
"github.com/influxdata/influxdb/pkg/metrics"
"github.com/influxdata/influxdb/query"
@ -31,6 +27,7 @@ import (
"go.uber.org/zap"
)
//go:generate env GO111MODULE=on go run github.com/benbjohnson/tmpl -data=@array_cursor.gen.go.tmpldata array_cursor.gen.go.tmpl array_cursor_iterator.gen.go.tmpl
//go:generate env GO111MODULE=on go run github.com/influxdata/influxdb/tools/tmpl -i -data=file_store.gen.go.tmpldata file_store.gen.go.tmpl=file_store.gen.go
//go:generate env GO111MODULE=on go run github.com/influxdata/influxdb/tools/tmpl -i -d isArray=y -data=file_store.gen.go.tmpldata file_store.gen.go.tmpl=file_store_array.gen.go
//go:generate env GO111MODULE=on go run github.com/benbjohnson/tmpl -data=@encoding.gen.go.tmpldata encoding.gen.go.tmpl
@ -40,7 +37,6 @@ import (
var (
// Static objects to prevent small allocs.
keyFieldSeparatorBytes = []byte(keyFieldSeparator)
emptyBytes = []byte{}
)
var (
@ -66,9 +62,6 @@ const (
// that identifies a specific field in series
keyFieldSeparator = "#!~#"
// deleteFlushThreshold is the size in bytes of a batch of series keys to delete.
deleteFlushThreshold = 50 * 1024 * 1024
// MaxPointsPerBlock is the maximum number of points in an encoded block in a TSM file
MaxPointsPerBlock = 1000
)
@ -77,34 +70,41 @@ const (
// an Engine.
type EngineOption func(i *Engine)
// WithWAL sets the WAL for the Engine
var WithWAL = func(wal Log) EngineOption {
// be defensive: it's very easy to pass in a nil WAL here
// which will panic. Set any nil WALs to the NopWAL.
if pwal, _ := wal.(*WAL); pwal == nil {
wal = NopWAL{}
}
return func(e *Engine) {
e.WAL = wal
}
}
// WithTraceLogging sets if trace logging is enabled for the engine.
var WithTraceLogging = func(logging bool) EngineOption {
func WithTraceLogging(logging bool) EngineOption {
return func(e *Engine) {
e.FileStore.enableTraceLogging(logging)
}
}
// WithCompactionPlanner sets the compaction planner for the engine.
var WithCompactionPlanner = func(planner CompactionPlanner) EngineOption {
func WithCompactionPlanner(planner CompactionPlanner) EngineOption {
return func(e *Engine) {
planner.SetFileStore(e.FileStore)
e.CompactionPlan = planner
}
}
// Snapshotter allows upward signaling of the tsm1 engine to the storage engine. Hopefully
// it can be removed one day. The weird interface is due to the weird inversion of locking
// that has to happen.
type Snapshotter interface {
AcquireSegments(func(segments []string) error) error
CommitSegments(segments []string, fn func() error) error
}
type noSnapshotter struct{}
func (noSnapshotter) AcquireSegments(fn func([]string) error) error { return fn(nil) }
func (noSnapshotter) CommitSegments(_ []string, fn func() error) error { return fn() }
// WithSnapshotter sets the callbacks for the engine to use when creating snapshots.
func WithSnapshotter(snapshotter Snapshotter) EngineOption {
return func(e *Engine) {
e.snapshotter = snapshotter
}
}
// Engine represents a storage engine with compressed blocks.
type Engine struct {
mu sync.RWMutex
@ -132,7 +132,6 @@ type Engine struct {
traceLogger *zap.Logger // Logger to be used when trace-logging is on.
traceLogging bool
WAL Log
Cache *Cache
Compactor *Compactor
CompactionPlan CompactionPlanner
@ -161,7 +160,8 @@ type Engine struct {
// Limiter for concurrent compactions.
compactionLimiter limiter.Fixed
scheduler *scheduler
scheduler *scheduler
snapshotter Snapshotter
}
// NewEngine returns a new instance of Engine.
@ -207,7 +207,6 @@ func NewEngine(path string, idx *tsi1.Index, config Config, options ...EngineOpt
logger: logger,
traceLogger: logger,
WAL: NopWAL{},
Cache: cache,
FileStore: fs,
@ -221,6 +220,7 @@ func NewEngine(path string, idx *tsi1.Index, config Config, options ...EngineOpt
formatFileName: DefaultFormatFileName,
compactionLimiter: limiter.NewFixed(maxCompactions),
scheduler: newScheduler(maxCompactions),
snapshotter: new(noSnapshotter),
}
for _, option := range options {
@ -477,27 +477,11 @@ func (e *Engine) SeriesN() int64 {
return e.index.SeriesN()
}
// LastModified returns the time when this shard was last modified.
func (e *Engine) LastModified() time.Time {
fsTime := e.FileStore.LastModified()
if e.WAL.LastWriteTime().After(fsTime) {
return e.WAL.LastWriteTime()
}
return fsTime
}
// MeasurementStats returns the current measurement stats for the engine.
func (e *Engine) MeasurementStats() (MeasurementStats, error) {
return e.FileStore.MeasurementStats()
}
// DiskSize returns the total size in bytes of all TSM and WAL segments on disk.
func (e *Engine) DiskSize() int64 {
walDiskSizeBytes := e.WAL.DiskSizeBytes()
return e.FileStore.DiskSizeBytes() + walDiskSizeBytes
}
func (e *Engine) initTrackers() {
mmu.Lock()
defer mmu.Unlock()
@ -512,10 +496,6 @@ func (e *Engine) initTrackers() {
e.FileStore.tracker = newFileTracker(bms.fileMetrics, e.defaultMetricLabels)
e.Cache.tracker = newCacheTracker(bms.cacheMetrics, e.defaultMetricLabels)
// Set default metrics on WAL if enabled.
if wal, ok := e.WAL.(*WAL); ok {
wal.tracker = newWALTracker(bms.walMetrics, e.defaultMetricLabels)
}
e.scheduler.setCompactionTracker(e.compactionTracker)
}
@ -531,18 +511,10 @@ func (e *Engine) Open() error {
return err
}
if err := e.WAL.Open(); err != nil {
return err
}
if err := e.FileStore.Open(); err != nil {
return err
}
if err := e.reloadCache(); err != nil {
return err
}
e.Compactor.Open()
if e.enableCompactionsOnOpen {
@ -564,7 +536,8 @@ func (e *Engine) Close() error {
if err := e.FileStore.Close(); err != nil {
return err
}
return e.WAL.Close()
return nil
}
// WithLogger sets the logger for the engine.
@ -575,10 +548,6 @@ func (e *Engine) WithLogger(log *zap.Logger) {
e.traceLogger = e.logger
}
if wal, ok := e.WAL.(*WAL); ok {
wal.WithLogger(e.logger)
}
e.FileStore.WithLogger(e.logger)
}
@ -595,435 +564,33 @@ func (e *Engine) Free() error {
return e.FileStore.Free()
}
// WritePoints writes metadata and point data into the engine.
// It returns an error if new points are added to an existing key.
// WritePoints saves the set of points in the engine.
func (e *Engine) WritePoints(points []models.Point) error {
values := make(map[string][]Value, len(points))
var (
keyBuf []byte
baseLen int
)
for _, p := range points {
keyBuf = append(keyBuf[:0], p.Key()...)
keyBuf = append(keyBuf, keyFieldSeparator...)
baseLen = len(keyBuf)
iter := p.FieldIterator()
t := p.Time().UnixNano()
for iter.Next() {
keyBuf = append(keyBuf[:baseLen], iter.FieldKey()...)
var v Value
switch iter.Type() {
case models.Float:
fv, err := iter.FloatValue()
if err != nil {
return err
}
v = NewFloatValue(t, fv)
case models.Integer:
iv, err := iter.IntegerValue()
if err != nil {
return err
}
v = NewIntegerValue(t, iv)
case models.Unsigned:
iv, err := iter.UnsignedValue()
if err != nil {
return err
}
v = NewUnsignedValue(t, iv)
case models.String:
v = NewStringValue(t, iter.StringValue())
case models.Boolean:
bv, err := iter.BooleanValue()
if err != nil {
return err
}
v = NewBooleanValue(t, bv)
default:
return fmt.Errorf("unknown field type for %s: %s", string(iter.FieldKey()), p.String())
}
values[string(keyBuf)] = append(values[string(keyBuf)], v)
}
values, err := PointsToValues(points)
if err != nil {
return err
}
return e.WriteValues(values)
}
// WriteValues saves the set of values in the engine.
func (e *Engine) WriteValues(values map[string][]Value) error {
e.mu.RLock()
defer e.mu.RUnlock()
// first try to write to the cache
if err := e.Cache.WriteMulti(values); err != nil {
return err
}
// Then make the write durable in the cache.
if _, err := e.WAL.WriteMulti(values); err != nil {
return err
}
return nil
}
// DeleteSeriesRange removes the values between min and max (inclusive) from all series
func (e *Engine) DeleteSeriesRange(itr tsdb.SeriesIterator, min, max int64) error {
return e.DeleteSeriesRangeWithPredicate(itr, func(name []byte, tags models.Tags) (int64, int64, bool) {
return min, max, true
})
}
// DeleteSeriesRangeWithPredicate removes the values between min and max (inclusive) from all series
// for which predicate() returns true. If predicate() is nil, then all values in range are removed.
func (e *Engine) DeleteSeriesRangeWithPredicate(itr tsdb.SeriesIterator, predicate func(name []byte, tags models.Tags) (int64, int64, bool)) error {
var disableOnce bool
// Ensure that the index does not compact away the measurement or series we're
// going to delete before we're done with them.
e.index.DisableCompactions()
defer e.index.EnableCompactions()
e.index.Wait()
fs, err := e.index.RetainFileSet()
if err != nil {
return err
}
defer fs.Release()
var (
sz int
min, max int64 = math.MinInt64, math.MaxInt64
// Indicator that the min/max time for the current batch has changed and
// we need to flush the current batch before appending to it.
flushBatch bool
)
// These are reversed from min/max to ensure they are different the first time through.
newMin, newMax := int64(math.MaxInt64), int64(math.MinInt64)
// There is no predicate, so setup newMin/newMax to delete the full time range.
if predicate == nil {
newMin = min
newMax = max
}
batch := make([][]byte, 0, 10000)
for {
elem, err := itr.Next()
if err != nil {
return err
} else if elem == nil {
break
}
// See if the series should be deleted and if so, what range of time.
if predicate != nil {
var shouldDelete bool
newMin, newMax, shouldDelete = predicate(elem.Name(), elem.Tags())
if !shouldDelete {
continue
}
// If the min/max happens to change for the batch, we need to flush
// the current batch and start a new one.
flushBatch = (min != newMin || max != newMax) && len(batch) > 0
}
if elem.Expr() != nil {
if v, ok := elem.Expr().(*influxql.BooleanLiteral); !ok || !v.Val {
return errors.New("fields not supported in WHERE clause during deletion")
}
}
if !disableOnce {
// Disable and abort running compactions so that tombstones added existing tsm
// files don't get removed. This would cause deleted measurements/series to
// re-appear once the compaction completed. We only disable the level compactions
// so that snapshotting does not stop while writing out tombstones. If it is stopped,
// and writing tombstones takes a long time, writes can get rejected due to the cache
// filling up.
e.disableLevelCompactions(true)
defer e.enableLevelCompactions(true)
e.sfile.DisableCompactions()
defer e.sfile.EnableCompactions()
e.sfile.Wait()
disableOnce = true
}
if sz >= deleteFlushThreshold || flushBatch {
// Delete all matching batch.
if err := e.deleteSeriesRange(batch, min, max); err != nil {
return err
}
batch = batch[:0]
sz = 0
flushBatch = false
}
// Use the new min/max time for the next iteration
min = newMin
max = newMax
key := models.MakeKey(elem.Name(), elem.Tags())
sz += len(key)
batch = append(batch, key)
}
if len(batch) > 0 {
// Delete all matching batch.
if err := e.deleteSeriesRange(batch, min, max); err != nil {
return err
}
}
e.index.Rebuild()
return nil
}
// deleteSeriesRange removes the values between min and max (inclusive) from all series. This
// does not update the index or disable compactions. This should mainly be called by DeleteSeriesRange
// and not directly.
func (e *Engine) deleteSeriesRange(seriesKeys [][]byte, min, max int64) error {
if len(seriesKeys) == 0 {
return nil
}
// Ensure keys are sorted since lower layers require them to be.
if !bytesutil.IsSorted(seriesKeys) {
bytesutil.Sort(seriesKeys)
}
// Min and max time in the engine are slightly different from the query language values.
if min == influxql.MinTime {
min = math.MinInt64
}
if max == influxql.MaxTime {
max = math.MaxInt64
}
// Run the delete on each TSM file in parallel
if err := e.FileStore.Apply(func(r TSMFile) error {
// See if this TSM file contains the keys and time range
minKey, maxKey := seriesKeys[0], seriesKeys[len(seriesKeys)-1]
tsmMin, tsmMax := r.KeyRange()
tsmMin, _ = SeriesAndFieldFromCompositeKey(tsmMin)
tsmMax, _ = SeriesAndFieldFromCompositeKey(tsmMax)
overlaps := bytes.Compare(tsmMin, maxKey) <= 0 && bytes.Compare(tsmMax, minKey) >= 0
if !overlaps || !r.OverlapsTimeRange(min, max) {
return nil
}
// Delete each key we find in the file. We seek to the min key and walk from there.
batch := r.BatchDelete()
iter := r.Iterator(minKey)
var j int
for iter.Next() {
indexKey := iter.Key()
seriesKey, _ := SeriesAndFieldFromCompositeKey(indexKey)
for j < len(seriesKeys) && bytes.Compare(seriesKeys[j], seriesKey) < 0 {
j++
}
if j >= len(seriesKeys) {
break
}
if bytes.Equal(seriesKeys[j], seriesKey) {
if err := batch.DeleteRange([][]byte{indexKey}, min, max); err != nil {
batch.Rollback()
return err
}
}
}
if err := iter.Err(); err != nil {
batch.Rollback()
return err
}
return batch.Commit()
}); err != nil {
return err
}
// find the keys in the cache and remove them
deleteKeys := make([][]byte, 0, len(seriesKeys))
// ApplySerialEntryFn cannot return an error in this invocation.
_ = e.Cache.ApplyEntryFn(func(k []byte, _ *entry) error {
seriesKey, _ := SeriesAndFieldFromCompositeKey([]byte(k))
// Cache does not walk keys in sorted order, so search the sorted
// series we need to delete to see if any of the cache keys match.
i := bytesutil.SearchBytes(seriesKeys, seriesKey)
if i < len(seriesKeys) && bytes.Equal(seriesKey, seriesKeys[i]) {
// k is the measurement + tags + sep + field
deleteKeys = append(deleteKeys, k)
}
return nil
})
// Sort the series keys because ApplyEntryFn iterates over the keys randomly.
bytesutil.Sort(deleteKeys)
e.Cache.DeleteRange(deleteKeys, min, max)
// delete from the WAL
if _, err := e.WAL.DeleteRange(deleteKeys, min, max); err != nil {
return err
}
// The series are deleted on disk, but the index may still say they exist.
// Depending on the the min,max time passed in, the series may or not actually
// exists now. To reconcile the index, we walk the series keys that still exists
// on disk and cross out any keys that match the passed in series. Any series
// left in the slice at the end do not exist and can be deleted from the index.
// Note: this is inherently racy if writes are occurring to the same measurement/series are
// being removed. A write could occur and exist in the cache at this point, but we
// would delete it from the index.
minKey := seriesKeys[0]
// Apply runs this func concurrently. The seriesKeys slice is mutated concurrently
// by different goroutines setting positions to nil.
if err := e.FileStore.Apply(func(r TSMFile) error {
var j int
// Start from the min deleted key that exists in this file.
iter := r.Iterator(minKey)
for iter.Next() {
if j >= len(seriesKeys) {
return nil
}
indexKey := iter.Key()
seriesKey, _ := SeriesAndFieldFromCompositeKey(indexKey)
// Skip over any deleted keys that are less than our tsm key
cmp := bytes.Compare(seriesKeys[j], seriesKey)
for j < len(seriesKeys) && cmp < 0 {
j++
if j >= len(seriesKeys) {
return nil
}
cmp = bytes.Compare(seriesKeys[j], seriesKey)
}
// We've found a matching key, cross it out so we do not remove it from the index.
if j < len(seriesKeys) && cmp == 0 {
seriesKeys[j] = emptyBytes
j++
}
}
return iter.Err()
}); err != nil {
return err
}
// Have we deleted all values for the series? If so, we need to remove
// the series from the index.
if len(seriesKeys) > 0 {
buf := make([]byte, 1024) // For use when accessing series file.
ids := tsdb.NewSeriesIDSet()
measurements := make(map[string]struct{}, 1)
for _, k := range seriesKeys {
if len(k) == 0 {
continue // This key was wiped because it shouldn't be removed from index.
}
name, tags := models.ParseKeyBytes(k)
sid := e.sfile.SeriesID(name, tags, buf)
if sid.IsZero() {
continue
}
// See if this series was found in the cache earlier
i := bytesutil.SearchBytes(deleteKeys, k)
var hasCacheValues bool
// If there are multiple fields, they will have the same prefix. If any field
// has values, then we can't delete it from the index.
for i < len(deleteKeys) && bytes.HasPrefix(deleteKeys[i], k) {
if e.Cache.Values(deleteKeys[i]).Len() > 0 {
hasCacheValues = true
break
}
i++
}
if hasCacheValues {
continue
}
measurements[string(name)] = struct{}{}
// Remove the series from the local index.
if err := e.index.DropSeries(sid, k, false); err != nil {
return err
}
// Add the id to the set of delete ids.
ids.Add(sid)
}
for k := range measurements {
if err := e.index.DropMeasurementIfSeriesNotExist([]byte(k)); err != nil {
return err
}
}
// Remove the remaining ids from the series file as they no longer exist
// in any shard.
var err error
ids.ForEach(func(id tsdb.SeriesID) {
if err1 := e.sfile.DeleteSeriesID(id); err1 != nil {
err = err1
}
})
if err != nil {
return err
}
}
return nil
}
// DeleteMeasurement deletes a measurement and all related series.
func (e *Engine) DeleteMeasurement(name []byte) error {
// Delete the bulk of data outside of the fields lock.
if err := e.deleteMeasurement(name); err != nil {
return err
}
return nil
}
// DeleteMeasurement deletes a measurement and all related series.
func (e *Engine) deleteMeasurement(name []byte) error {
// Attempt to find the series keys.
itr, err := e.index.MeasurementSeriesIDIterator(name)
if err != nil {
return err
} else if itr == nil {
return nil
}
defer itr.Close()
return e.DeleteSeriesRange(tsdb.NewSeriesIteratorAdapter(e.sfile, itr), math.MinInt64, math.MaxInt64)
}
// ForEachMeasurementName iterates over each measurement name in the engine.
func (e *Engine) ForEachMeasurementName(fn func(name []byte) error) error {
return e.index.ForEachMeasurementName(fn)
}
func (e *Engine) CreateSeriesListIfNotExists(collection *tsdb.SeriesCollection) error {
return e.index.CreateSeriesListIfNotExists(collection)
}
// WriteTo is not implemented.
func (e *Engine) WriteTo(w io.Writer) (n int64, err error) { panic("not implemented") }
// compactionLevel describes a snapshot or levelled compaction.
type compactionLevel int
@ -1194,24 +761,18 @@ func (e *Engine) WriteSnapshot() error {
logEnd()
}()
closedFiles, snapshot, err := func() (segments []string, snapshot *Cache, err error) {
var (
snapshot *Cache
segments []string
)
if err := e.snapshotter.AcquireSegments(func(segs []string) (err error) {
segments = segs
e.mu.Lock()
defer e.mu.Unlock()
if err = e.WAL.CloseSegment(); err != nil {
return nil, nil, err
}
segments, err = e.WAL.ClosedSegments()
if err != nil {
return nil, nil, err
}
snapshot, err = e.Cache.Snapshot()
return segments, snapshot, err
}()
if err != nil {
e.mu.Unlock()
return err
}); err != nil {
return err
}
@ -1229,11 +790,11 @@ func (e *Engine) WriteSnapshot() error {
zap.String("path", e.path),
zap.Duration("duration", time.Since(dedup)))
return e.writeSnapshotAndCommit(log, closedFiles, snapshot)
return e.writeSnapshotAndCommit(log, snapshot, segments)
}
// writeSnapshotAndCommit will write the passed cache to a new TSM file and remove the closed WAL segments.
func (e *Engine) writeSnapshotAndCommit(log *zap.Logger, closedFiles []string, snapshot *Cache) (err error) {
func (e *Engine) writeSnapshotAndCommit(log *zap.Logger, snapshot *Cache, segments []string) (err error) {
defer func() {
if err != nil {
e.Cache.ClearSnapshot(false)
@ -1247,23 +808,20 @@ func (e *Engine) writeSnapshotAndCommit(log *zap.Logger, closedFiles []string, s
return err
}
e.mu.RLock()
defer e.mu.RUnlock()
return e.snapshotter.CommitSegments(segments, func() error {
e.mu.RLock()
defer e.mu.RUnlock()
// update the file store with these new files
if err := e.FileStore.Replace(nil, newFiles); err != nil {
log.Info("Error adding new TSM files from snapshot", zap.Error(err))
return err
}
// update the file store with these new files
if err := e.FileStore.Replace(nil, newFiles); err != nil {
log.Info("Error adding new TSM files from snapshot", zap.Error(err))
return err
}
// clear the snapshot from the in-memory cache, then the old WAL files
e.Cache.ClearSnapshot(true)
if err := e.WAL.Remove(closedFiles); err != nil {
log.Info("Error removing closed WAL segments", zap.Error(err))
}
return nil
// clear the snapshot from the in-memory cache
e.Cache.ClearSnapshot(true)
return nil
})
}
// compactCache continually checks if the WAL cache should be written to disk.
@ -1568,32 +1126,6 @@ func (e *Engine) fullCompactionStrategy(group CompactionGroup, optimize bool) *c
return s
}
// reloadCache reads the WAL segment files and loads them into the cache.
func (e *Engine) reloadCache() error {
now := time.Now()
files, err := segmentFileNames(e.WAL.Path())
if err != nil {
return err
}
limit := e.Cache.MaxSize()
defer func() {
e.Cache.SetMaxSize(limit)
}()
// Disable the max size during loading
e.Cache.SetMaxSize(0)
loader := NewCacheLoader(files)
loader.WithLogger(e.logger)
if err := loader.Load(e.Cache); err != nil {
return err
}
e.traceLogger.Info("Reloaded WAL cache", zap.String("path", e.WAL.Path()), zap.Duration("duration", time.Since(now)))
return nil
}
// cleanup removes all temp files and dirs that exist on disk. This is should only be run at startup to avoid
// removing tmp files that are still in use.
func (e *Engine) cleanup() error {

View File

@ -11,10 +11,10 @@ import (
"github.com/influxdata/influxql"
)
// DeleteBucket removes all TSM data belonging to a bucket, and removes all index
// DeleteBucketRange removes all TSM data belonging to a bucket, and removes all index
// and series file data associated with the bucket. The provided time range ensures
// that only bucket data for that range is removed.
func (e *Engine) DeleteBucket(name []byte, min, max int64) error {
func (e *Engine) DeleteBucketRange(name []byte, min, max int64) error {
// TODO(jeff): we need to block writes to this prefix while deletes are in progress
// otherwise we can end up in a situation where we have staged data in the cache or
// WAL that was deleted from the index, or worse. This needs to happen at a higher
@ -76,9 +76,6 @@ func (e *Engine) DeleteBucket(name []byte, min, max int64) error {
return err
}
// TODO(jeff): add a DeletePrefix to the Cache and WAL.
// TODO(jeff): add a Tombstone entry into the WAL for deletes.
var deleteKeys [][]byte
// ApplySerialEntryFn cannot return an error in this invocation.
@ -99,11 +96,8 @@ func (e *Engine) DeleteBucket(name []byte, min, max int64) error {
// Sort the series keys because ApplyEntryFn iterates over the keys randomly.
bytesutil.Sort(deleteKeys)
// Delete from the cache and WAL.
e.Cache.DeleteRange(deleteKeys, min, max)
if _, err := e.WAL.DeleteRange(deleteKeys, min, max); err != nil {
return err
}
// Delete from the cache.
e.Cache.DeleteBucketRange(name, min, max)
// Now that all of the data is purged, we need to find if some keys are fully deleted
// and if so, remove them from the index.
@ -138,6 +132,14 @@ func (e *Engine) DeleteBucket(name []byte, min, max int64) error {
return err
}
// ApplySerialEntryFn cannot return an error in this invocation.
_ = e.Cache.ApplyEntryFn(func(k []byte, _ *entry) error {
if bytes.HasPrefix(k, name) {
delete(possiblyDead.keys, string(k))
}
return nil
})
if len(possiblyDead.keys) > 0 {
buf := make([]byte, 1024)

View File

@ -8,7 +8,7 @@ import (
"github.com/influxdata/influxdb/models"
)
func TestEngine_DeletePrefix(t *testing.T) {
func TestEngine_DeleteBucket(t *testing.T) {
// Create a few points.
p1 := MustParsePointString("cpu,host=0 value=1.1 6")
p2 := MustParsePointString("cpu,host=A value=1.2 2")
@ -44,7 +44,7 @@ func TestEngine_DeletePrefix(t *testing.T) {
t.Fatalf("series count mismatch: exp %v, got %v", exp, got)
}
if err := e.DeleteBucket([]byte("cpu"), 0, 3); err != nil {
if err := e.DeleteBucketRange([]byte("cpu"), 0, 3); err != nil {
t.Fatalf("failed to delete series: %v", err)
}
@ -90,7 +90,7 @@ func TestEngine_DeletePrefix(t *testing.T) {
iter.Close()
// Deleting remaining series should remove them from the series.
if err := e.DeleteBucket([]byte("cpu"), 0, 9); err != nil {
if err := e.DeleteBucketRange([]byte("cpu"), 0, 9); err != nil {
t.Fatalf("failed to delete series: %v", err)
}

View File

@ -1,7 +1,6 @@
package tsm1_test
import (
"bytes"
"fmt"
"io/ioutil"
"math"
@ -59,8 +58,9 @@ func TestIndex_SeriesIDSet(t *testing.T) {
}
// Drop all the series for the gpu measurement and they should no longer
// be in the series ID set.
if err := engine.DeleteMeasurement([]byte("gpu")); err != nil {
// be in the series ID set. This relies on the fact that DeleteBucketRange is really
// operating on prefixes.
if err := engine.DeleteBucketRange([]byte("gpu"), math.MinInt64, math.MaxInt64); err != nil {
t.Fatal(err)
}
@ -72,17 +72,6 @@ func TestIndex_SeriesIDSet(t *testing.T) {
delete(seriesIDMap, "gpu")
delete(seriesIDMap, "gpu,host=b")
// Drop the specific mem series
ditr := &seriesIterator{keys: [][]byte{[]byte("mem,host=z")}}
if err := engine.DeleteSeriesRange(ditr, math.MinInt64, math.MaxInt64); err != nil {
t.Fatal(err)
}
if engine.SeriesIDSet().Contains(seriesIDMap["mem,host=z"]) {
t.Fatalf("bitmap does not contain ID: %d for key %s, but should", seriesIDMap["mem,host=z"], "mem,host=z")
}
delete(seriesIDMap, "mem,host=z")
// The rest of the keys should still be in the set.
for key, id := range seriesIDMap {
if !engine.SeriesIDSet().Contains(id) {
@ -106,589 +95,6 @@ func TestIndex_SeriesIDSet(t *testing.T) {
}
}
// Ensures that deleting series from TSM files with multiple fields removes all the
/// series
func TestEngine_DeleteSeries(t *testing.T) {
// Create a few points.
p1 := MustParsePointString("cpu,host=A value=1.1 1000000000")
p2 := MustParsePointString("cpu,host=B value=1.2 2000000000")
p3 := MustParsePointString("cpu,host=A sum=1.3 3000000000")
e, err := NewEngine()
if err != nil {
t.Fatal(err)
}
// mock the planner so compactions don't run during the test
e.CompactionPlan = &mockPlanner{}
if err := e.Open(); err != nil {
t.Fatal(err)
}
defer e.Close()
if err := e.writePoints(p1, p2, p3); err != nil {
t.Fatalf("failed to write points: %s", err.Error())
}
if err := e.WriteSnapshot(); err != nil {
t.Fatalf("failed to snapshot: %s", err.Error())
}
keys := e.FileStore.Keys()
if exp, got := 3, len(keys); exp != got {
t.Fatalf("series count mismatch: exp %v, got %v", exp, got)
}
itr := &seriesIterator{keys: [][]byte{[]byte("cpu,host=A")}}
if err := e.DeleteSeriesRange(itr, math.MinInt64, math.MaxInt64); err != nil {
t.Fatalf("failed to delete series: %v", err)
}
keys = e.FileStore.Keys()
if exp, got := 1, len(keys); exp != got {
t.Fatalf("series count mismatch: exp %v, got %v", exp, got)
}
exp := "cpu,host=B#!~#value"
if _, ok := keys[exp]; !ok {
t.Fatalf("wrong series deleted: exp %v, got %v", exp, keys)
}
}
func TestEngine_DeleteSeriesRange(t *testing.T) {
// Create a few points.
p1 := MustParsePointString("cpu,host=0 value=1.1 6000000000") // Should not be deleted
p2 := MustParsePointString("cpu,host=A value=1.2 2000000000")
p3 := MustParsePointString("cpu,host=A value=1.3 3000000000")
p4 := MustParsePointString("cpu,host=B value=1.3 4000000000") // Should not be deleted
p5 := MustParsePointString("cpu,host=B value=1.3 5000000000") // Should not be deleted
p6 := MustParsePointString("cpu,host=C value=1.3 1000000000")
p7 := MustParsePointString("mem,host=C value=1.3 1000000000") // Should not be deleted
p8 := MustParsePointString("disk,host=C value=1.3 1000000000") // Should not be deleted
e, err := NewEngine()
if err != nil {
t.Fatal(err)
}
// mock the planner so compactions don't run during the test
e.CompactionPlan = &mockPlanner{}
if err := e.Open(); err != nil {
t.Fatal(err)
}
defer e.Close()
if err := e.writePoints(p1, p2, p3, p4, p5, p6, p7, p8); err != nil {
t.Fatalf("failed to write points: %s", err.Error())
}
if err := e.WriteSnapshot(); err != nil {
t.Fatalf("failed to snapshot: %s", err.Error())
}
keys := e.FileStore.Keys()
if exp, got := 6, len(keys); exp != got {
t.Fatalf("series count mismatch: exp %v, got %v", exp, got)
}
itr := &seriesIterator{keys: [][]byte{[]byte("cpu,host=0"), []byte("cpu,host=A"), []byte("cpu,host=B"), []byte("cpu,host=C")}}
if err := e.DeleteSeriesRange(itr, 0, 3000000000); err != nil {
t.Fatalf("failed to delete series: %v", err)
}
keys = e.FileStore.Keys()
if exp, got := 4, len(keys); exp != got {
t.Fatalf("series count mismatch: exp %v, got %v", exp, got)
}
exp := "cpu,host=B#!~#value"
if _, ok := keys[exp]; !ok {
t.Fatalf("wrong series deleted: exp %v, got %v", exp, keys)
}
// Check that the series still exists in the index
iter, err := e.index.MeasurementSeriesIDIterator([]byte("cpu"))
if err != nil {
t.Fatalf("iterator error: %v", err)
}
defer iter.Close()
elem, err := iter.Next()
if err != nil {
t.Fatal(err)
}
if elem.SeriesID.IsZero() {
t.Fatalf("series index mismatch: EOF, exp 2 series")
}
// Lookup series.
name, tags := e.sfile.Series(elem.SeriesID)
if got, exp := name, []byte("cpu"); !bytes.Equal(got, exp) {
t.Fatalf("series mismatch: got %s, exp %s", got, exp)
}
if !tags.Equal(models.NewTags(map[string]string{"host": "0"})) && !tags.Equal(models.NewTags(map[string]string{"host": "B"})) {
t.Fatalf(`series mismatch: got %s, exp either "host=0" or "host=B"`, tags)
}
iter.Close()
// Deleting remaining series should remove them from the series.
itr = &seriesIterator{keys: [][]byte{[]byte("cpu,host=0"), []byte("cpu,host=B")}}
if err := e.DeleteSeriesRange(itr, 0, 9000000000); err != nil {
t.Fatalf("failed to delete series: %v", err)
}
if iter, err = e.index.MeasurementSeriesIDIterator([]byte("cpu")); err != nil {
t.Fatalf("iterator error: %v", err)
}
if iter == nil {
return
}
defer iter.Close()
if elem, err = iter.Next(); err != nil {
t.Fatal(err)
}
if !elem.SeriesID.IsZero() {
t.Fatalf("got an undeleted series id, but series should be dropped from index")
}
}
func TestEngine_DeleteSeriesRangeWithPredicate(t *testing.T) {
// Create a few points.
p1 := MustParsePointString("cpu,host=A value=1.1 6000000000") // Should not be deleted
p2 := MustParsePointString("cpu,host=A value=1.2 2000000000") // Should not be deleted
p3 := MustParsePointString("cpu,host=B value=1.3 3000000000")
p4 := MustParsePointString("cpu,host=B value=1.3 4000000000")
p5 := MustParsePointString("cpu,host=C value=1.3 5000000000") // Should not be deleted
p6 := MustParsePointString("mem,host=B value=1.3 1000000000")
p7 := MustParsePointString("mem,host=C value=1.3 1000000000")
p8 := MustParsePointString("disk,host=C value=1.3 1000000000") // Should not be deleted
e, err := NewEngine()
if err != nil {
t.Fatal(err)
}
// mock the planner so compactions don't run during the test
e.CompactionPlan = &mockPlanner{}
if err := e.Open(); err != nil {
t.Fatal(err)
}
defer e.Close()
if err := e.writePoints(p1, p2, p3, p4, p5, p6, p7, p8); err != nil {
t.Fatalf("failed to write points: %s", err.Error())
}
if err := e.WriteSnapshot(); err != nil {
t.Fatalf("failed to snapshot: %s", err.Error())
}
keys := e.FileStore.Keys()
if exp, got := 6, len(keys); exp != got {
t.Fatalf("series count mismatch: exp %v, got %v", exp, got)
}
itr := &seriesIterator{keys: [][]byte{[]byte("cpu,host=A"), []byte("cpu,host=B"), []byte("cpu,host=C"), []byte("mem,host=B"), []byte("mem,host=C")}}
predicate := func(name []byte, tags models.Tags) (int64, int64, bool) {
if bytes.Equal(name, []byte("mem")) {
return math.MinInt64, math.MaxInt64, true
}
if bytes.Equal(name, []byte("cpu")) {
for _, tag := range tags {
if bytes.Equal(tag.Key, []byte("host")) && bytes.Equal(tag.Value, []byte("B")) {
return math.MinInt64, math.MaxInt64, true
}
}
}
return math.MinInt64, math.MaxInt64, false
}
if err := e.DeleteSeriesRangeWithPredicate(itr, predicate); err != nil {
t.Fatalf("failed to delete series: %v", err)
}
keys = e.FileStore.Keys()
if exp, got := 3, len(keys); exp != got {
t.Fatalf("series count mismatch: exp %v, got %v", exp, got)
}
exps := []string{"cpu,host=A#!~#value", "cpu,host=C#!~#value", "disk,host=C#!~#value"}
for _, exp := range exps {
if _, ok := keys[exp]; !ok {
t.Fatalf("wrong series deleted: exp %v, got %v", exps, keys)
}
}
// Check that the series still exists in the index
iter, err := e.index.MeasurementSeriesIDIterator([]byte("cpu"))
if err != nil {
t.Fatalf("iterator error: %v", err)
}
defer iter.Close()
elem, err := iter.Next()
if err != nil {
t.Fatal(err)
}
if elem.SeriesID.IsZero() {
t.Fatalf("series index mismatch: EOF, exp 2 series")
}
// Lookup series.
name, tags := e.sfile.Series(elem.SeriesID)
if got, exp := name, []byte("cpu"); !bytes.Equal(got, exp) {
t.Fatalf("series mismatch: got %s, exp %s", got, exp)
}
if !tags.Equal(models.NewTags(map[string]string{"host": "A"})) && !tags.Equal(models.NewTags(map[string]string{"host": "C"})) {
t.Fatalf(`series mismatch: got %s, exp either "host=A" or "host=C"`, tags)
}
iter.Close()
// Deleting remaining series should remove them from the series.
itr = &seriesIterator{keys: [][]byte{[]byte("cpu,host=A"), []byte("cpu,host=C")}}
if err := e.DeleteSeriesRange(itr, 0, 9000000000); err != nil {
t.Fatalf("failed to delete series: %v", err)
}
if iter, err = e.index.MeasurementSeriesIDIterator([]byte("cpu")); err != nil {
t.Fatalf("iterator error: %v", err)
}
if iter == nil {
return
}
defer iter.Close()
if elem, err = iter.Next(); err != nil {
t.Fatal(err)
}
if !elem.SeriesID.IsZero() {
t.Fatalf("got an undeleted series id, but series should be dropped from index")
}
}
// Tests that a nil predicate deletes all values returned from the series iterator.
func TestEngine_DeleteSeriesRangeWithPredicate_Nil(t *testing.T) {
// Create a few points.
p1 := MustParsePointString("cpu,host=A value=1.1 6000000000") // Should not be deleted
p2 := MustParsePointString("cpu,host=A value=1.2 2000000000") // Should not be deleted
p3 := MustParsePointString("cpu,host=B value=1.3 3000000000")
p4 := MustParsePointString("cpu,host=B value=1.3 4000000000")
p5 := MustParsePointString("cpu,host=C value=1.3 5000000000") // Should not be deleted
p6 := MustParsePointString("mem,host=B value=1.3 1000000000")
p7 := MustParsePointString("mem,host=C value=1.3 1000000000")
p8 := MustParsePointString("disk,host=C value=1.3 1000000000") // Should not be deleted
e, err := NewEngine()
if err != nil {
t.Fatal(err)
}
// mock the planner so compactions don't run during the test
e.CompactionPlan = &mockPlanner{}
if err := e.Open(); err != nil {
t.Fatal(err)
}
defer e.Close()
if err := e.writePoints(p1, p2, p3, p4, p5, p6, p7, p8); err != nil {
t.Fatalf("failed to write points: %s", err.Error())
}
if err := e.WriteSnapshot(); err != nil {
t.Fatalf("failed to snapshot: %s", err.Error())
}
keys := e.FileStore.Keys()
if exp, got := 6, len(keys); exp != got {
t.Fatalf("series count mismatch: exp %v, got %v", exp, got)
}
itr := &seriesIterator{keys: [][]byte{[]byte("cpu,host=A"), []byte("cpu,host=B"), []byte("cpu,host=C"), []byte("mem,host=B"), []byte("mem,host=C")}}
if err := e.DeleteSeriesRangeWithPredicate(itr, nil); err != nil {
t.Fatalf("failed to delete series: %v", err)
}
keys = e.FileStore.Keys()
if exp, got := 1, len(keys); exp != got {
t.Fatalf("series count mismatch: exp %v, got %v", exp, got)
}
// Check that the series still exists in the index
iter, err := e.index.MeasurementSeriesIDIterator([]byte("cpu"))
if err != nil {
t.Fatalf("iterator error: %v", err)
} else if iter == nil {
return
}
defer iter.Close()
if elem, err := iter.Next(); err != nil {
t.Fatal(err)
} else if !elem.SeriesID.IsZero() {
t.Fatalf("got an undeleted series id, but series should be dropped from index")
}
// Check that disk series still exists
iter, err = e.index.MeasurementSeriesIDIterator([]byte("disk"))
if err != nil {
t.Fatalf("iterator error: %v", err)
} else if iter == nil {
return
}
defer iter.Close()
if elem, err := iter.Next(); err != nil {
t.Fatal(err)
} else if elem.SeriesID.IsZero() {
t.Fatalf("got an undeleted series id, but series should be dropped from index")
}
}
func TestEngine_DeleteSeriesRangeWithPredicate_FlushBatch(t *testing.T) {
// Create a few points.
p1 := MustParsePointString("cpu,host=A value=1.1 6000000000") // Should not be deleted
p2 := MustParsePointString("cpu,host=A value=1.2 2000000000") // Should not be deleted
p3 := MustParsePointString("cpu,host=B value=1.3 3000000000")
p4 := MustParsePointString("cpu,host=B value=1.3 4000000000")
p5 := MustParsePointString("cpu,host=C value=1.3 5000000000") // Should not be deleted
p6 := MustParsePointString("mem,host=B value=1.3 1000000000")
p7 := MustParsePointString("mem,host=C value=1.3 1000000000")
p8 := MustParsePointString("disk,host=C value=1.3 1000000000") // Should not be deleted
e, err := NewEngine()
if err != nil {
t.Fatal(err)
}
// mock the planner so compactions don't run during the test
e.CompactionPlan = &mockPlanner{}
if err := e.Open(); err != nil {
t.Fatal(err)
}
defer e.Close()
if err := e.writePoints(p1, p2, p3, p4, p5, p6, p7, p8); err != nil {
t.Fatalf("failed to write points: %s", err.Error())
}
if err := e.WriteSnapshot(); err != nil {
t.Fatalf("failed to snapshot: %s", err.Error())
}
keys := e.FileStore.Keys()
if exp, got := 6, len(keys); exp != got {
t.Fatalf("series count mismatch: exp %v, got %v", exp, got)
}
itr := &seriesIterator{keys: [][]byte{[]byte("cpu,host=A"), []byte("cpu,host=B"), []byte("cpu,host=C"), []byte("mem,host=B"), []byte("mem,host=C")}}
predicate := func(name []byte, tags models.Tags) (int64, int64, bool) {
if bytes.Equal(name, []byte("mem")) {
return 1000000000, 1000000000, true
}
if bytes.Equal(name, []byte("cpu")) {
for _, tag := range tags {
if bytes.Equal(tag.Key, []byte("host")) && bytes.Equal(tag.Value, []byte("B")) {
return 3000000000, 4000000000, true
}
}
}
return math.MinInt64, math.MaxInt64, false
}
if err := e.DeleteSeriesRangeWithPredicate(itr, predicate); err != nil {
t.Fatalf("failed to delete series: %v", err)
}
keys = e.FileStore.Keys()
if exp, got := 3, len(keys); exp != got {
t.Fatalf("series count mismatch: exp %v, got %v", exp, got)
}
exps := []string{"cpu,host=A#!~#value", "cpu,host=C#!~#value", "disk,host=C#!~#value"}
for _, exp := range exps {
if _, ok := keys[exp]; !ok {
t.Fatalf("wrong series deleted: exp %v, got %v", exps, keys)
}
}
// Check that the series still exists in the index
iter, err := e.index.MeasurementSeriesIDIterator([]byte("cpu"))
if err != nil {
t.Fatalf("iterator error: %v", err)
}
defer iter.Close()
elem, err := iter.Next()
if err != nil {
t.Fatal(err)
}
if elem.SeriesID.IsZero() {
t.Fatalf("series index mismatch: EOF, exp 2 series")
}
// Lookup series.
name, tags := e.sfile.Series(elem.SeriesID)
if got, exp := name, []byte("cpu"); !bytes.Equal(got, exp) {
t.Fatalf("series mismatch: got %s, exp %s", got, exp)
}
if !tags.Equal(models.NewTags(map[string]string{"host": "A"})) && !tags.Equal(models.NewTags(map[string]string{"host": "C"})) {
t.Fatalf(`series mismatch: got %s, exp either "host=A" or "host=C"`, tags)
}
iter.Close()
// Deleting remaining series should remove them from the series.
itr = &seriesIterator{keys: [][]byte{[]byte("cpu,host=A"), []byte("cpu,host=C")}}
if err := e.DeleteSeriesRange(itr, 0, 9000000000); err != nil {
t.Fatalf("failed to delete series: %v", err)
}
if iter, err = e.index.MeasurementSeriesIDIterator([]byte("cpu")); err != nil {
t.Fatalf("iterator error: %v", err)
}
if iter == nil {
return
}
defer iter.Close()
if elem, err = iter.Next(); err != nil {
t.Fatal(err)
}
if !elem.SeriesID.IsZero() {
t.Fatalf("got an undeleted series id, but series should be dropped from index")
}
}
func TestEngine_DeleteSeriesRange_OutsideTime(t *testing.T) {
// Create a few points.
p1 := MustParsePointString("cpu,host=A value=1.1 1000000000") // Should not be deleted
e, err := NewEngine()
if err != nil {
t.Fatal(err)
}
// mock the planner so compactions don't run during the test
e.CompactionPlan = &mockPlanner{}
if err := e.Open(); err != nil {
t.Fatal(err)
}
defer e.Close()
if err := e.writePoints(p1); err != nil {
t.Fatalf("failed to write points: %s", err.Error())
}
if err := e.WriteSnapshot(); err != nil {
t.Fatalf("failed to snapshot: %s", err.Error())
}
keys := e.FileStore.Keys()
if exp, got := 1, len(keys); exp != got {
t.Fatalf("series count mismatch: exp %v, got %v", exp, got)
}
itr := &seriesIterator{keys: [][]byte{[]byte("cpu,host=A")}}
if err := e.DeleteSeriesRange(itr, 0, 0); err != nil {
t.Fatalf("failed to delete series: %v", err)
}
keys = e.FileStore.Keys()
if exp, got := 1, len(keys); exp != got {
t.Fatalf("series count mismatch: exp %v, got %v", exp, got)
}
exp := "cpu,host=A#!~#value"
if _, ok := keys[exp]; !ok {
t.Fatalf("wrong series deleted: exp %v, got %v", exp, keys)
}
// Check that the series still exists in the index
iter, err := e.index.MeasurementSeriesIDIterator([]byte("cpu"))
if err != nil {
t.Fatalf("iterator error: %v", err)
}
defer iter.Close()
elem, err := iter.Next()
if err != nil {
t.Fatal(err)
}
if elem.SeriesID.IsZero() {
t.Fatalf("series index mismatch: EOF, exp 1 series")
}
// Lookup series.
name, tags := e.sfile.Series(elem.SeriesID)
if got, exp := name, []byte("cpu"); !bytes.Equal(got, exp) {
t.Fatalf("series mismatch: got %s, exp %s", got, exp)
}
if got, exp := tags, models.NewTags(map[string]string{"host": "A"}); !got.Equal(exp) {
t.Fatalf("series mismatch: got %s, exp %s", got, exp)
}
}
func TestEngine_LastModified(t *testing.T) {
if testing.Short() {
t.Skip("skipping test in short mode")
}
// Create a few points.
p1 := MustParsePointString("cpu,host=A value=1.1 1000000000")
p2 := MustParsePointString("cpu,host=B value=1.2 2000000000")
p3 := MustParsePointString("cpu,host=A sum=1.3 3000000000")
e, err := NewEngine()
if err != nil {
t.Fatal(err)
}
// mock the planner so compactions don't run during the test
e.CompactionPlan = &mockPlanner{}
e.SetEnabled(false)
if err := e.Open(); err != nil {
t.Fatal(err)
}
defer e.Close()
if err := e.writePoints(p1, p2, p3); err != nil {
t.Fatalf("failed to write points: %s", err.Error())
}
lm := e.LastModified()
if lm.IsZero() {
t.Fatalf("expected non-zero time, got %v", lm.UTC())
}
e.SetEnabled(true)
// Artificial sleep added due to filesystems caching the mod time
// of files. This prevents the WAL last modified time from being
// returned and newer than the filestore's mod time.
time.Sleep(2 * time.Second) // Covers most filesystems.
if err := e.WriteSnapshot(); err != nil {
t.Fatalf("failed to snapshot: %s", err.Error())
}
lm2 := e.LastModified()
if got, exp := lm.Equal(lm2), false; exp != got {
t.Fatalf("expected time change, got %v, exp %v: %s == %s", got, exp, lm.String(), lm2.String())
}
itr := &seriesIterator{keys: [][]byte{[]byte("cpu,host=A")}}
if err := e.DeleteSeriesRange(itr, math.MinInt64, math.MaxInt64); err != nil {
t.Fatalf("failed to delete series: %v", err)
}
lm3 := e.LastModified()
if got, exp := lm2.Equal(lm3), false; exp != got {
t.Fatalf("expected time change, got %v, exp %v", got, exp)
}
}
func TestEngine_SnapshotsDisabled(t *testing.T) {
sfile := MustOpenSeriesFile()
defer sfile.Close()
@ -1027,7 +433,7 @@ func (e *Engine) WritePointsString(ptstr ...string) error {
func (e *Engine) writePoints(points ...models.Point) error {
// Write into the index.
collection := tsdb.NewSeriesCollection(points)
if err := e.CreateSeriesListIfNotExists(collection); err != nil {
if err := e.index.CreateSeriesListIfNotExists(collection); err != nil {
return err
}
// Write the points into the cache/wal.
@ -1112,30 +518,3 @@ func (m *mockPlanner) Release(groups []tsm1.CompactionGroup) {}
func (m *mockPlanner) FullyCompacted() bool { return false }
func (m *mockPlanner) ForceFull() {}
func (m *mockPlanner) SetFileStore(fs *tsm1.FileStore) {}
type seriesIterator struct {
keys [][]byte
}
type series struct {
name []byte
tags models.Tags
deleted bool
}
func (s series) Name() []byte { return s.name }
func (s series) Tags() models.Tags { return s.tags }
func (s series) Deleted() bool { return s.deleted }
func (s series) Expr() influxql.Expr { return nil }
func (itr *seriesIterator) Close() error { return nil }
func (itr *seriesIterator) Next() (tsdb.SeriesElem, error) {
if len(itr.keys) == 0 {
return nil, nil
}
name, tags := models.ParseKeyBytes(itr.keys[0])
s := series{name: name, tags: tags}
itr.keys = itr.keys[1:]
return s, nil
}

View File

@ -25,7 +25,6 @@ func PrometheusCollectors() []prometheus.Collector {
collectors = append(collectors, bms.compactionMetrics.PrometheusCollectors()...)
collectors = append(collectors, bms.fileMetrics.PrometheusCollectors()...)
collectors = append(collectors, bms.cacheMetrics.PrometheusCollectors()...)
collectors = append(collectors, bms.walMetrics.PrometheusCollectors()...)
}
return collectors
}
@ -36,7 +35,6 @@ const namespace = "storage"
const compactionSubsystem = "compactions" // sub-system associated with metrics for compactions.
const fileStoreSubsystem = "tsm_files" // sub-system associated with metrics for TSM files.
const cacheSubsystem = "cache" // sub-system associated with metrics for the cache.
const walSubsystem = "wal" // sub-system associated with metrics for the WAL.
// blockMetrics are a set of metrics concerned with tracking data about block storage.
type blockMetrics struct {
@ -44,7 +42,6 @@ type blockMetrics struct {
*compactionMetrics
*fileMetrics
*cacheMetrics
*walMetrics
}
// newBlockMetrics initialises the prometheus metrics for the block subsystem.
@ -54,7 +51,6 @@ func newBlockMetrics(labels prometheus.Labels) *blockMetrics {
compactionMetrics: newCompactionMetrics(labels),
fileMetrics: newFileMetrics(labels),
cacheMetrics: newCacheMetrics(labels),
walMetrics: newWALMetrics(labels),
}
}
@ -64,7 +60,6 @@ func (m *blockMetrics) PrometheusCollectors() []prometheus.Collector {
metrics = append(metrics, m.compactionMetrics.PrometheusCollectors()...)
metrics = append(metrics, m.fileMetrics.PrometheusCollectors()...)
metrics = append(metrics, m.cacheMetrics.PrometheusCollectors()...)
metrics = append(metrics, m.walMetrics.PrometheusCollectors()...)
return metrics
}
@ -249,60 +244,3 @@ func (m *cacheMetrics) PrometheusCollectors() []prometheus.Collector {
m.Writes,
}
}
// walMetrics are a set of metrics concerned with tracking data about compactions.
type walMetrics struct {
OldSegmentBytes *prometheus.GaugeVec
CurrentSegmentBytes *prometheus.GaugeVec
Segments *prometheus.GaugeVec
Writes *prometheus.CounterVec
}
// newWALMetrics initialises the prometheus metrics for tracking the WAL.
func newWALMetrics(labels prometheus.Labels) *walMetrics {
var names []string
for k := range labels {
names = append(names, k)
}
sort.Strings(names)
writeNames := append(append([]string(nil), names...), "status")
sort.Strings(writeNames)
return &walMetrics{
OldSegmentBytes: prometheus.NewGaugeVec(prometheus.GaugeOpts{
Namespace: namespace,
Subsystem: walSubsystem,
Name: "old_segment_bytes",
Help: "Number of bytes old WAL segments using on disk.",
}, names),
CurrentSegmentBytes: prometheus.NewGaugeVec(prometheus.GaugeOpts{
Namespace: namespace,
Subsystem: walSubsystem,
Name: "current_segment_bytes",
Help: "Number of bytes TSM files using on disk.",
}, names),
Segments: prometheus.NewGaugeVec(prometheus.GaugeOpts{
Namespace: namespace,
Subsystem: walSubsystem,
Name: "segments_total",
Help: "Number of WAL segment files on disk.",
}, names),
Writes: prometheus.NewCounterVec(prometheus.CounterOpts{
Namespace: namespace,
Subsystem: walSubsystem,
Name: "writes_total",
Help: "Number of writes to the WAL.",
}, writeNames),
}
}
// PrometheusCollectors satisfies the prom.PrometheusCollector interface.
func (m *walMetrics) PrometheusCollectors() []prometheus.Collector {
return []prometheus.Collector{
m.OldSegmentBytes,
m.CurrentSegmentBytes,
m.Segments,
m.Writes,
}
}

View File

@ -130,73 +130,6 @@ func TestMetrics_Cache(t *testing.T) {
}
}
func TestMetrics_WAL(t *testing.T) {
// metrics to be shared by multiple file stores.
metrics := newWALMetrics(prometheus.Labels{"engine_id": "", "node_id": ""})
t1 := newWALTracker(metrics, prometheus.Labels{"engine_id": "0", "node_id": "0"})
t2 := newWALTracker(metrics, prometheus.Labels{"engine_id": "1", "node_id": "0"})
reg := prometheus.NewRegistry()
reg.MustRegister(metrics.PrometheusCollectors()...)
base := namespace + "_" + walSubsystem + "_"
// All the metric names
gauges := []string{
base + "old_segment_bytes",
base + "current_segment_bytes",
base + "segments_total",
}
counters := []string{
base + "writes_total",
}
// Generate some measurements.
for i, tracker := range []*walTracker{t1, t2} {
tracker.SetOldSegmentSize(uint64(i + len(gauges[0])))
tracker.SetCurrentSegmentSize(uint64(i + len(gauges[1])))
tracker.SetSegments(uint64(i + len(gauges[2])))
labels := tracker.Labels()
labels["status"] = "ok"
tracker.metrics.Writes.With(labels).Add(float64(i + len(counters[0])))
}
// Test that all the correct metrics are present.
mfs, err := reg.Gather()
if err != nil {
t.Fatal(err)
}
// The label variants for the two caches.
labelVariants := []prometheus.Labels{
prometheus.Labels{"engine_id": "0", "node_id": "0"},
prometheus.Labels{"engine_id": "1", "node_id": "0"},
}
for i, labels := range labelVariants {
for _, name := range gauges {
exp := float64(i + len(name))
metric := promtest.MustFindMetric(t, mfs, name, labels)
if got := metric.GetGauge().GetValue(); got != exp {
t.Errorf("[%s %d] got %v, expected %v", name, i, got, exp)
}
}
for _, name := range counters {
exp := float64(i + len(name))
labels["status"] = "ok"
metric := promtest.MustFindMetric(t, mfs, name, labels)
if got := metric.GetCounter().GetValue(); got != exp {
t.Errorf("[%s %d] got %v, expected %v", name, i, got, exp)
}
}
}
}
func TestMetrics_Compactions(t *testing.T) {
// metrics to be shared by multiple file stores.
metrics := newCompactionMetrics(prometheus.Labels{"engine_id": "", "node_id": ""})

123
tsdb/tsm1/value.go Normal file
View File

@ -0,0 +1,123 @@
package tsm1
import (
"fmt"
"time"
"github.com/influxdata/influxdb/models"
"github.com/influxdata/influxdb/tsdb/value"
)
type (
Value = value.Value
IntegerValue = value.IntegerValue
UnsignedValue = value.UnsignedValue
FloatValue = value.FloatValue
BooleanValue = value.BooleanValue
StringValue = value.StringValue
)
// NewValue returns a new Value with the underlying type dependent on value.
func NewValue(t int64, v interface{}) Value { return value.NewValue(t, v) }
// NewRawIntegerValue returns a new integer value.
func NewRawIntegerValue(t int64, v int64) IntegerValue { return value.NewRawIntegerValue(t, v) }
// NewRawUnsignedValue returns a new unsigned integer value.
func NewRawUnsignedValue(t int64, v uint64) UnsignedValue { return value.NewRawUnsignedValue(t, v) }
// NewRawFloatValue returns a new float value.
func NewRawFloatValue(t int64, v float64) FloatValue { return value.NewRawFloatValue(t, v) }
// NewRawBooleanValue returns a new boolean value.
func NewRawBooleanValue(t int64, v bool) BooleanValue { return value.NewRawBooleanValue(t, v) }
// NewRawStringValue returns a new string value.
func NewRawStringValue(t int64, v string) StringValue { return value.NewRawStringValue(t, v) }
// NewIntegerValue returns a new integer value.
func NewIntegerValue(t int64, v int64) Value { return value.NewIntegerValue(t, v) }
// NewUnsignedValue returns a new unsigned integer value.
func NewUnsignedValue(t int64, v uint64) Value { return value.NewUnsignedValue(t, v) }
// NewFloatValue returns a new float value.
func NewFloatValue(t int64, v float64) Value { return value.NewFloatValue(t, v) }
// NewBooleanValue returns a new boolean value.
func NewBooleanValue(t int64, v bool) Value { return value.NewBooleanValue(t, v) }
// NewStringValue returns a new string value.
func NewStringValue(t int64, v string) Value { return value.NewStringValue(t, v) }
// PointsToValues takes in a slice of points and returns it as a map of series key to
// values. It returns an error if any of the points could not be converted.
func PointsToValues(points []models.Point) (map[string][]Value, error) {
values := make(map[string][]Value, len(points))
var (
keyBuf []byte
baseLen int
)
for _, p := range points {
keyBuf = append(keyBuf[:0], p.Key()...)
keyBuf = append(keyBuf, keyFieldSeparator...)
baseLen = len(keyBuf)
iter := p.FieldIterator()
t := p.Time().UnixNano()
for iter.Next() {
keyBuf = append(keyBuf[:baseLen], iter.FieldKey()...)
var v Value
switch iter.Type() {
case models.Float:
fv, err := iter.FloatValue()
if err != nil {
return nil, err
}
v = NewFloatValue(t, fv)
case models.Integer:
iv, err := iter.IntegerValue()
if err != nil {
return nil, err
}
v = NewIntegerValue(t, iv)
case models.Unsigned:
iv, err := iter.UnsignedValue()
if err != nil {
return nil, err
}
v = NewUnsignedValue(t, iv)
case models.String:
v = NewStringValue(t, iter.StringValue())
case models.Boolean:
bv, err := iter.BooleanValue()
if err != nil {
return nil, err
}
v = NewBooleanValue(t, bv)
default:
return nil, fmt.Errorf("unknown field type for %s: %s",
string(iter.FieldKey()), p.String())
}
values[string(keyBuf)] = append(values[string(keyBuf)], v)
}
}
return values, nil
}
// ValuesToPoints takes in a map of values and returns a slice of models.Point.
func ValuesToPoints(values map[string][]Value) []models.Point {
points := make([]models.Point, 0, len(values))
for composite, vals := range values {
series, field := SeriesAndFieldFromCompositeKey([]byte(composite))
strField := string(field)
for _, val := range vals {
t := time.Unix(0, val.UnixNano())
fields := models.Fields{strField: val.Value()}
points = append(points, models.NewPointFromSeries(series, fields, t))
}
}
return points
}

View File

@ -1,864 +0,0 @@
package tsm1_test
import (
"fmt"
"io"
"os"
"reflect"
"testing"
"github.com/golang/snappy"
"github.com/influxdata/influxdb/pkg/slices"
"github.com/influxdata/influxdb/tsdb/tsm1"
)
func TestWALWriter_WriteMulti_Single(t *testing.T) {
dir := MustTempDir()
defer os.RemoveAll(dir)
f := MustTempFile(dir)
w := tsm1.NewWALSegmentWriter(f)
p1 := tsm1.NewValue(1, 1.1)
p2 := tsm1.NewValue(1, int64(1))
p3 := tsm1.NewValue(1, true)
p4 := tsm1.NewValue(1, "string")
p5 := tsm1.NewValue(1, ^uint64(0))
values := map[string][]tsm1.Value{
"cpu,host=A#!~#float": []tsm1.Value{p1},
"cpu,host=A#!~#int": []tsm1.Value{p2},
"cpu,host=A#!~#bool": []tsm1.Value{p3},
"cpu,host=A#!~#string": []tsm1.Value{p4},
"cpu,host=A#!~#unsigned": []tsm1.Value{p5},
}
entry := &tsm1.WriteWALEntry{
Values: values,
}
if err := w.Write(mustMarshalEntry(entry)); err != nil {
fatal(t, "write points", err)
}
if err := w.Flush(); err != nil {
fatal(t, "flush", err)
}
if _, err := f.Seek(0, io.SeekStart); err != nil {
fatal(t, "seek", err)
}
r := tsm1.NewWALSegmentReader(f)
if !r.Next() {
t.Fatalf("expected next, got false")
}
we, err := r.Read()
if err != nil {
fatal(t, "read entry", err)
}
e, ok := we.(*tsm1.WriteWALEntry)
if !ok {
t.Fatalf("expected WriteWALEntry: got %#v", e)
}
for k, v := range e.Values {
for i, vv := range v {
if got, exp := vv.String(), values[k][i].String(); got != exp {
t.Fatalf("points mismatch: got %v, exp %v", got, exp)
}
}
}
if n := r.Count(); n != MustReadFileSize(f) {
t.Fatalf("wrong count of bytes read, got %d, exp %d", n, MustReadFileSize(f))
}
}
func TestWALWriter_WriteMulti_LargeBatch(t *testing.T) {
dir := MustTempDir()
defer os.RemoveAll(dir)
f := MustTempFile(dir)
w := tsm1.NewWALSegmentWriter(f)
var points []tsm1.Value
for i := 0; i < 100000; i++ {
points = append(points, tsm1.NewValue(int64(i), int64(1)))
}
values := map[string][]tsm1.Value{
"cpu,host=A,server=01,foo=bar,tag=really-long#!~#float": points,
"mem,host=A,server=01,foo=bar,tag=really-long#!~#float": points,
}
entry := &tsm1.WriteWALEntry{
Values: values,
}
if err := w.Write(mustMarshalEntry(entry)); err != nil {
fatal(t, "write points", err)
}
if err := w.Flush(); err != nil {
fatal(t, "flush", err)
}
if _, err := f.Seek(0, io.SeekStart); err != nil {
fatal(t, "seek", err)
}
r := tsm1.NewWALSegmentReader(f)
if !r.Next() {
t.Fatalf("expected next, got false")
}
we, err := r.Read()
if err != nil {
fatal(t, "read entry", err)
}
e, ok := we.(*tsm1.WriteWALEntry)
if !ok {
t.Fatalf("expected WriteWALEntry: got %#v", e)
}
for k, v := range e.Values {
for i, vv := range v {
if got, exp := vv.String(), values[k][i].String(); got != exp {
t.Fatalf("points mismatch: got %v, exp %v", got, exp)
}
}
}
if n := r.Count(); n != MustReadFileSize(f) {
t.Fatalf("wrong count of bytes read, got %d, exp %d", n, MustReadFileSize(f))
}
}
func TestWALWriter_WriteMulti_Multiple(t *testing.T) {
dir := MustTempDir()
defer os.RemoveAll(dir)
f := MustTempFile(dir)
w := tsm1.NewWALSegmentWriter(f)
p1 := tsm1.NewValue(1, int64(1))
p2 := tsm1.NewValue(1, int64(2))
exp := []struct {
key string
values []tsm1.Value
}{
{"cpu,host=A#!~#value", []tsm1.Value{p1}},
{"cpu,host=B#!~#value", []tsm1.Value{p2}},
}
for _, v := range exp {
entry := &tsm1.WriteWALEntry{
Values: map[string][]tsm1.Value{v.key: v.values},
}
if err := w.Write(mustMarshalEntry(entry)); err != nil {
fatal(t, "write points", err)
}
if err := w.Flush(); err != nil {
fatal(t, "flush", err)
}
}
// Seek back to the beinning of the file for reading
if _, err := f.Seek(0, io.SeekStart); err != nil {
fatal(t, "seek", err)
}
r := tsm1.NewWALSegmentReader(f)
for _, ep := range exp {
if !r.Next() {
t.Fatalf("expected next, got false")
}
we, err := r.Read()
if err != nil {
fatal(t, "read entry", err)
}
e, ok := we.(*tsm1.WriteWALEntry)
if !ok {
t.Fatalf("expected WriteWALEntry: got %#v", e)
}
for k, v := range e.Values {
if got, exp := k, ep.key; got != exp {
t.Fatalf("key mismatch. got %v, exp %v", got, exp)
}
if got, exp := len(v), len(ep.values); got != exp {
t.Fatalf("values length mismatch: got %v, exp %v", got, exp)
}
for i, vv := range v {
if got, exp := vv.String(), ep.values[i].String(); got != exp {
t.Fatalf("points mismatch: got %v, exp %v", got, exp)
}
}
}
}
if n := r.Count(); n != MustReadFileSize(f) {
t.Fatalf("wrong count of bytes read, got %d, exp %d", n, MustReadFileSize(f))
}
}
func TestWALWriter_WriteDelete_Single(t *testing.T) {
dir := MustTempDir()
defer os.RemoveAll(dir)
f := MustTempFile(dir)
w := tsm1.NewWALSegmentWriter(f)
entry := &tsm1.DeleteWALEntry{
Keys: [][]byte{[]byte("cpu")},
}
if err := w.Write(mustMarshalEntry(entry)); err != nil {
fatal(t, "write points", err)
}
if err := w.Flush(); err != nil {
fatal(t, "flush", err)
}
if _, err := f.Seek(0, io.SeekStart); err != nil {
fatal(t, "seek", err)
}
r := tsm1.NewWALSegmentReader(f)
if !r.Next() {
t.Fatalf("expected next, got false")
}
we, err := r.Read()
if err != nil {
fatal(t, "read entry", err)
}
e, ok := we.(*tsm1.DeleteWALEntry)
if !ok {
t.Fatalf("expected WriteWALEntry: got %#v", e)
}
if got, exp := len(e.Keys), len(entry.Keys); got != exp {
t.Fatalf("key length mismatch: got %v, exp %v", got, exp)
}
if got, exp := string(e.Keys[0]), string(entry.Keys[0]); got != exp {
t.Fatalf("key mismatch: got %v, exp %v", got, exp)
}
}
func TestWALWriter_WriteMultiDelete_Multiple(t *testing.T) {
dir := MustTempDir()
defer os.RemoveAll(dir)
f := MustTempFile(dir)
w := tsm1.NewWALSegmentWriter(f)
p1 := tsm1.NewValue(1, true)
values := map[string][]tsm1.Value{
"cpu,host=A#!~#value": []tsm1.Value{p1},
}
writeEntry := &tsm1.WriteWALEntry{
Values: values,
}
if err := w.Write(mustMarshalEntry(writeEntry)); err != nil {
fatal(t, "write points", err)
}
if err := w.Flush(); err != nil {
fatal(t, "flush", err)
}
// Write the delete entry
deleteEntry := &tsm1.DeleteWALEntry{
Keys: [][]byte{[]byte("cpu,host=A#!~value")},
}
if err := w.Write(mustMarshalEntry(deleteEntry)); err != nil {
fatal(t, "write points", err)
}
if err := w.Flush(); err != nil {
fatal(t, "flush", err)
}
// Seek back to the beinning of the file for reading
if _, err := f.Seek(0, io.SeekStart); err != nil {
fatal(t, "seek", err)
}
r := tsm1.NewWALSegmentReader(f)
// Read the write points first
if !r.Next() {
t.Fatalf("expected next, got false")
}
we, err := r.Read()
if err != nil {
fatal(t, "read entry", err)
}
e, ok := we.(*tsm1.WriteWALEntry)
if !ok {
t.Fatalf("expected WriteWALEntry: got %#v", e)
}
for k, v := range e.Values {
if got, exp := len(v), len(values[k]); got != exp {
t.Fatalf("values length mismatch: got %v, exp %v", got, exp)
}
for i, vv := range v {
if got, exp := vv.String(), values[k][i].String(); got != exp {
t.Fatalf("points mismatch: got %v, exp %v", got, exp)
}
}
}
// Read the delete second
if !r.Next() {
t.Fatalf("expected next, got false")
}
we, err = r.Read()
if err != nil {
fatal(t, "read entry", err)
}
de, ok := we.(*tsm1.DeleteWALEntry)
if !ok {
t.Fatalf("expected DeleteWALEntry: got %#v", e)
}
if got, exp := len(de.Keys), len(deleteEntry.Keys); got != exp {
t.Fatalf("key length mismatch: got %v, exp %v", got, exp)
}
if got, exp := string(de.Keys[0]), string(deleteEntry.Keys[0]); got != exp {
t.Fatalf("key mismatch: got %v, exp %v", got, exp)
}
}
func TestWALWriter_WriteMultiDeleteRange_Multiple(t *testing.T) {
dir := MustTempDir()
defer os.RemoveAll(dir)
f := MustTempFile(dir)
w := tsm1.NewWALSegmentWriter(f)
p1 := tsm1.NewValue(1, 1.0)
p2 := tsm1.NewValue(2, 2.0)
p3 := tsm1.NewValue(3, 3.0)
values := map[string][]tsm1.Value{
"cpu,host=A#!~#value": []tsm1.Value{p1, p2, p3},
}
writeEntry := &tsm1.WriteWALEntry{
Values: values,
}
if err := w.Write(mustMarshalEntry(writeEntry)); err != nil {
fatal(t, "write points", err)
}
if err := w.Flush(); err != nil {
fatal(t, "flush", err)
}
// Write the delete entry
deleteEntry := &tsm1.DeleteRangeWALEntry{
Keys: [][]byte{[]byte("cpu,host=A#!~value")},
Min: 2,
Max: 3,
}
if err := w.Write(mustMarshalEntry(deleteEntry)); err != nil {
fatal(t, "write points", err)
}
if err := w.Flush(); err != nil {
fatal(t, "flush", err)
}
// Seek back to the beinning of the file for reading
if _, err := f.Seek(0, io.SeekStart); err != nil {
fatal(t, "seek", err)
}
r := tsm1.NewWALSegmentReader(f)
// Read the write points first
if !r.Next() {
t.Fatalf("expected next, got false")
}
we, err := r.Read()
if err != nil {
fatal(t, "read entry", err)
}
e, ok := we.(*tsm1.WriteWALEntry)
if !ok {
t.Fatalf("expected WriteWALEntry: got %#v", e)
}
for k, v := range e.Values {
if got, exp := len(v), len(values[k]); got != exp {
t.Fatalf("values length mismatch: got %v, exp %v", got, exp)
}
for i, vv := range v {
if got, exp := vv.String(), values[k][i].String(); got != exp {
t.Fatalf("points mismatch: got %v, exp %v", got, exp)
}
}
}
// Read the delete second
if !r.Next() {
t.Fatalf("expected next, got false")
}
we, err = r.Read()
if err != nil {
fatal(t, "read entry", err)
}
de, ok := we.(*tsm1.DeleteRangeWALEntry)
if !ok {
t.Fatalf("expected DeleteWALEntry: got %#v", e)
}
if got, exp := len(de.Keys), len(deleteEntry.Keys); got != exp {
t.Fatalf("key length mismatch: got %v, exp %v", got, exp)
}
if got, exp := string(de.Keys[0]), string(deleteEntry.Keys[0]); got != exp {
t.Fatalf("key mismatch: got %v, exp %v", got, exp)
}
if got, exp := de.Min, int64(2); got != exp {
t.Fatalf("min time mismatch: got %v, exp %v", got, exp)
}
if got, exp := de.Max, int64(3); got != exp {
t.Fatalf("min time mismatch: got %v, exp %v", got, exp)
}
}
func TestWAL_ClosedSegments(t *testing.T) {
dir := MustTempDir()
defer os.RemoveAll(dir)
w := tsm1.NewWAL(dir)
if err := w.Open(); err != nil {
t.Fatalf("error opening WAL: %v", err)
}
files, err := w.ClosedSegments()
if err != nil {
t.Fatalf("error getting closed segments: %v", err)
}
if got, exp := len(files), 0; got != exp {
t.Fatalf("close segment length mismatch: got %v, exp %v", got, exp)
}
if _, err := w.WriteMulti(map[string][]tsm1.Value{
"cpu,host=A#!~#value": []tsm1.Value{
tsm1.NewValue(1, 1.1),
},
}); err != nil {
t.Fatalf("error writing points: %v", err)
}
if err := w.Close(); err != nil {
t.Fatalf("error closing wal: %v", err)
}
// Re-open the WAL
w = tsm1.NewWAL(dir)
defer w.Close()
if err := w.Open(); err != nil {
t.Fatalf("error opening WAL: %v", err)
}
files, err = w.ClosedSegments()
if err != nil {
t.Fatalf("error getting closed segments: %v", err)
}
if got, exp := len(files), 0; got != exp {
t.Fatalf("close segment length mismatch: got %v, exp %v", got, exp)
}
}
func TestWAL_Delete(t *testing.T) {
dir := MustTempDir()
defer os.RemoveAll(dir)
w := tsm1.NewWAL(dir)
if err := w.Open(); err != nil {
t.Fatalf("error opening WAL: %v", err)
}
files, err := w.ClosedSegments()
if err != nil {
t.Fatalf("error getting closed segments: %v", err)
}
if got, exp := len(files), 0; got != exp {
t.Fatalf("close segment length mismatch: got %v, exp %v", got, exp)
}
if _, err := w.Delete([][]byte{[]byte("cpu")}); err != nil {
t.Fatalf("error writing points: %v", err)
}
if err := w.Close(); err != nil {
t.Fatalf("error closing wal: %v", err)
}
// Re-open the WAL
w = tsm1.NewWAL(dir)
defer w.Close()
if err := w.Open(); err != nil {
t.Fatalf("error opening WAL: %v", err)
}
files, err = w.ClosedSegments()
if err != nil {
t.Fatalf("error getting closed segments: %v", err)
}
if got, exp := len(files), 0; got != exp {
t.Fatalf("close segment length mismatch: got %v, exp %v", got, exp)
}
}
func TestWALWriter_Corrupt(t *testing.T) {
dir := MustTempDir()
defer os.RemoveAll(dir)
f := MustTempFile(dir)
w := tsm1.NewWALSegmentWriter(f)
corruption := []byte{1, 4, 0, 0, 0}
p1 := tsm1.NewValue(1, 1.1)
values := map[string][]tsm1.Value{
"cpu,host=A#!~#float": []tsm1.Value{p1},
}
entry := &tsm1.WriteWALEntry{
Values: values,
}
if err := w.Write(mustMarshalEntry(entry)); err != nil {
fatal(t, "write points", err)
}
if err := w.Flush(); err != nil {
fatal(t, "flush", err)
}
// Write some random bytes to the file to simulate corruption.
if _, err := f.Write(corruption); err != nil {
fatal(t, "corrupt WAL segment", err)
}
// Create the WAL segment reader.
if _, err := f.Seek(0, io.SeekStart); err != nil {
fatal(t, "seek", err)
}
r := tsm1.NewWALSegmentReader(f)
// Try to decode two entries.
if !r.Next() {
t.Fatalf("expected next, got false")
}
if _, err := r.Read(); err != nil {
fatal(t, "read entry", err)
}
if !r.Next() {
t.Fatalf("expected next, got false")
}
if _, err := r.Read(); err == nil {
fatal(t, "read entry did not return err", nil)
}
// Count should only return size of valid data.
expCount := MustReadFileSize(f) - int64(len(corruption))
if n := r.Count(); n != expCount {
t.Fatalf("wrong count of bytes read, got %d, exp %d", n, expCount)
}
}
// Reproduces a `panic: runtime error: makeslice: cap out of range` when run with
// GOARCH=386 go test -run TestWALSegmentReader_Corrupt -v ./tsdb/engine/tsm1/
func TestWALSegmentReader_Corrupt(t *testing.T) {
dir := MustTempDir()
defer os.RemoveAll(dir)
f := MustTempFile(dir)
w := tsm1.NewWALSegmentWriter(f)
p4 := tsm1.NewValue(1, "string")
values := map[string][]tsm1.Value{
"cpu,host=A#!~#string": []tsm1.Value{p4, p4},
}
entry := &tsm1.WriteWALEntry{
Values: values,
}
typ, b := mustMarshalEntry(entry)
// This causes the nvals field to overflow on 32 bit systems which produces a
// negative count and a panic when reading the segment.
b[25] = 255
if err := w.Write(typ, b); err != nil {
fatal(t, "write points", err)
}
if err := w.Flush(); err != nil {
fatal(t, "flush", err)
}
// Create the WAL segment reader.
if _, err := f.Seek(0, io.SeekStart); err != nil {
fatal(t, "seek", err)
}
r := tsm1.NewWALSegmentReader(f)
defer r.Close()
// Try to decode two entries.
for r.Next() {
r.Read()
}
}
func TestWriteWALSegment_UnmarshalBinary_WriteWALCorrupt(t *testing.T) {
p1 := tsm1.NewValue(1, 1.1)
p2 := tsm1.NewValue(1, int64(1))
p3 := tsm1.NewValue(1, true)
p4 := tsm1.NewValue(1, "string")
p5 := tsm1.NewValue(1, uint64(1))
values := map[string][]tsm1.Value{
"cpu,host=A#!~#float": []tsm1.Value{p1, p1},
"cpu,host=A#!~#int": []tsm1.Value{p2, p2},
"cpu,host=A#!~#bool": []tsm1.Value{p3, p3},
"cpu,host=A#!~#string": []tsm1.Value{p4, p4},
"cpu,host=A#!~#unsigned": []tsm1.Value{p5, p5},
}
w := &tsm1.WriteWALEntry{
Values: values,
}
b, err := w.MarshalBinary()
if err != nil {
t.Fatalf("unexpected error, got %v", err)
}
// Test every possible truncation of a write WAL entry
for i := 0; i < len(b); i++ {
// re-allocated to ensure capacity would be exceed if slicing
truncated := make([]byte, i)
copy(truncated, b[:i])
err := w.UnmarshalBinary(truncated)
if err != nil && err != tsm1.ErrWALCorrupt {
t.Fatalf("unexpected error: %v", err)
}
}
}
func TestDeleteWALEntry_UnmarshalBinary(t *testing.T) {
examples := []struct {
In []string
Out [][]byte
}{
{
In: []string{""},
Out: nil,
},
{
In: []string{"foo"},
Out: [][]byte{[]byte("foo")},
},
{
In: []string{"foo", "bar"},
Out: [][]byte{[]byte("foo"), []byte("bar")},
},
{
In: []string{"foo", "bar", "z", "abc"},
Out: [][]byte{[]byte("foo"), []byte("bar"), []byte("z"), []byte("abc")},
},
{
In: []string{"foo", "bar", "z", "a"},
Out: [][]byte{[]byte("foo"), []byte("bar"), []byte("z"), []byte("a")},
},
}
for i, example := range examples {
w := &tsm1.DeleteWALEntry{Keys: slices.StringsToBytes(example.In...)}
b, err := w.MarshalBinary()
if err != nil {
t.Fatalf("[example %d] unexpected error, got %v", i, err)
}
out := &tsm1.DeleteWALEntry{}
if err := out.UnmarshalBinary(b); err != nil {
t.Fatalf("[example %d] %v", i, err)
}
if !reflect.DeepEqual(example.Out, out.Keys) {
t.Errorf("[example %d] got %v, expected %v", i, out.Keys, example.Out)
}
}
}
func TestWriteWALSegment_UnmarshalBinary_DeleteWALCorrupt(t *testing.T) {
w := &tsm1.DeleteWALEntry{
Keys: [][]byte{[]byte("foo"), []byte("bar")},
}
b, err := w.MarshalBinary()
if err != nil {
t.Fatalf("unexpected error, got %v", err)
}
// Test every possible truncation of a write WAL entry
for i := 0; i < len(b); i++ {
// re-allocated to ensure capacity would be exceed if slicing
truncated := make([]byte, i)
copy(truncated, b[:i])
err := w.UnmarshalBinary(truncated)
if err != nil && err != tsm1.ErrWALCorrupt {
t.Fatalf("unexpected error: %v", err)
}
}
}
func TestWriteWALSegment_UnmarshalBinary_DeleteRangeWALCorrupt(t *testing.T) {
w := &tsm1.DeleteRangeWALEntry{
Keys: [][]byte{[]byte("foo"), []byte("bar")},
Min: 1,
Max: 2,
}
b, err := w.MarshalBinary()
if err != nil {
t.Fatalf("unexpected error, got %v", err)
}
// Test every possible truncation of a write WAL entry
for i := 0; i < len(b); i++ {
// re-allocated to ensure capacity would be exceed if slicing
truncated := make([]byte, i)
copy(truncated, b[:i])
err := w.UnmarshalBinary(truncated)
if err != nil && err != tsm1.ErrWALCorrupt {
t.Fatalf("unexpected error: %v", err)
}
}
}
func BenchmarkWALSegmentWriter(b *testing.B) {
points := map[string][]tsm1.Value{}
for i := 0; i < 5000; i++ {
k := "cpu,host=A#!~#value"
points[k] = append(points[k], tsm1.NewValue(int64(i), 1.1))
}
dir := MustTempDir()
defer os.RemoveAll(dir)
f := MustTempFile(dir)
w := tsm1.NewWALSegmentWriter(f)
write := &tsm1.WriteWALEntry{
Values: points,
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
if err := w.Write(mustMarshalEntry(write)); err != nil {
b.Fatalf("unexpected error writing entry: %v", err)
}
}
}
func BenchmarkWALSegmentReader(b *testing.B) {
points := map[string][]tsm1.Value{}
for i := 0; i < 5000; i++ {
k := "cpu,host=A#!~#value"
points[k] = append(points[k], tsm1.NewValue(int64(i), 1.1))
}
dir := MustTempDir()
defer os.RemoveAll(dir)
f := MustTempFile(dir)
w := tsm1.NewWALSegmentWriter(f)
write := &tsm1.WriteWALEntry{
Values: points,
}
for i := 0; i < 100; i++ {
if err := w.Write(mustMarshalEntry(write)); err != nil {
b.Fatalf("unexpected error writing entry: %v", err)
}
}
r := tsm1.NewWALSegmentReader(f)
b.ResetTimer()
for i := 0; i < b.N; i++ {
b.StopTimer()
f.Seek(0, io.SeekStart)
b.StartTimer()
for r.Next() {
_, err := r.Read()
if err != nil {
b.Fatalf("unexpected error reading entry: %v", err)
}
}
}
}
// MustReadFileSize returns the size of the file, or panics.
func MustReadFileSize(f *os.File) int64 {
stat, err := os.Stat(f.Name())
if err != nil {
panic(fmt.Sprintf("failed to get size of file at %s: %s", f.Name(), err.Error()))
}
return stat.Size()
}
func mustMarshalEntry(entry tsm1.WALEntry) (tsm1.WalEntryType, []byte) {
bytes := make([]byte, 1024<<2)
b, err := entry.Encode(bytes)
if err != nil {
panic(fmt.Sprintf("error encoding: %v", err))
}
return entry.Type(), snappy.Encode(b, b)
}

236
tsdb/value/value.go Normal file
View File

@ -0,0 +1,236 @@
package value
import (
"fmt"
"time"
"github.com/influxdata/influxdb/tsdb"
)
// Value represents a TSM-encoded value.
type Value interface {
// UnixNano returns the timestamp of the value in nanoseconds since unix epoch.
UnixNano() int64
// Value returns the underlying value.
Value() interface{}
// Size returns the number of bytes necessary to represent the value and its timestamp.
Size() int
// String returns the string representation of the value and its timestamp.
String() string
// internalOnly is unexported to ensure implementations of Value
// can only originate in this package.
internalOnly()
}
// NewValue returns a new Value with the underlying type dependent on value.
func NewValue(t int64, value interface{}) Value {
switch v := value.(type) {
case int64:
return IntegerValue{unixnano: t, value: v}
case uint64:
return UnsignedValue{unixnano: t, value: v}
case float64:
return FloatValue{unixnano: t, value: v}
case bool:
return BooleanValue{unixnano: t, value: v}
case string:
return StringValue{unixnano: t, value: v}
}
return EmptyValue{}
}
// NewRawIntegerValue returns a new integer value.
func NewRawIntegerValue(t int64, v int64) IntegerValue { return IntegerValue{unixnano: t, value: v} }
// NewRawUnsignedValue returns a new unsigned integer value.
func NewRawUnsignedValue(t int64, v uint64) UnsignedValue { return UnsignedValue{unixnano: t, value: v} }
// NewRawFloatValue returns a new float value.
func NewRawFloatValue(t int64, v float64) FloatValue { return FloatValue{unixnano: t, value: v} }
// NewRawBooleanValue returns a new boolean value.
func NewRawBooleanValue(t int64, v bool) BooleanValue { return BooleanValue{unixnano: t, value: v} }
// NewRawStringValue returns a new string value.
func NewRawStringValue(t int64, v string) StringValue { return StringValue{unixnano: t, value: v} }
// NewIntegerValue returns a new integer value.
func NewIntegerValue(t int64, v int64) Value { return NewRawIntegerValue(t, v) }
// NewUnsignedValue returns a new unsigned integer value.
func NewUnsignedValue(t int64, v uint64) Value { return NewRawUnsignedValue(t, v) }
// NewFloatValue returns a new float value.
func NewFloatValue(t int64, v float64) Value { return NewRawFloatValue(t, v) }
// NewBooleanValue returns a new boolean value.
func NewBooleanValue(t int64, v bool) Value { return NewRawBooleanValue(t, v) }
// NewStringValue returns a new string value.
func NewStringValue(t int64, v string) Value { return NewRawStringValue(t, v) }
// EmptyValue is used when there is no appropriate other value.
type EmptyValue struct{}
// UnixNano returns tsdb.EOF.
func (e EmptyValue) UnixNano() int64 { return tsdb.EOF }
// Value returns nil.
func (e EmptyValue) Value() interface{} { return nil }
// Size returns 0.
func (e EmptyValue) Size() int { return 0 }
// String returns the empty string.
func (e EmptyValue) String() string { return "" }
func (EmptyValue) internalOnly() {}
func (StringValue) internalOnly() {}
func (IntegerValue) internalOnly() {}
func (UnsignedValue) internalOnly() {}
func (BooleanValue) internalOnly() {}
func (FloatValue) internalOnly() {}
// IntegerValue represents an int64 value.
type IntegerValue struct {
unixnano int64
value int64
}
// Value returns the underlying int64 value.
func (v IntegerValue) Value() interface{} {
return v.value
}
// UnixNano returns the timestamp of the value.
func (v IntegerValue) UnixNano() int64 {
return v.unixnano
}
// Size returns the number of bytes necessary to represent the value and its timestamp.
func (v IntegerValue) Size() int {
return 16
}
// String returns the string representation of the value and its timestamp.
func (v IntegerValue) String() string {
return fmt.Sprintf("%v %v", time.Unix(0, v.unixnano), v.Value())
}
func (v IntegerValue) RawValue() int64 { return v.value }
// UnsignedValue represents an int64 value.
type UnsignedValue struct {
unixnano int64
value uint64
}
// Value returns the underlying int64 value.
func (v UnsignedValue) Value() interface{} {
return v.value
}
// UnixNano returns the timestamp of the value.
func (v UnsignedValue) UnixNano() int64 {
return v.unixnano
}
// Size returns the number of bytes necessary to represent the value and its timestamp.
func (v UnsignedValue) Size() int {
return 16
}
// String returns the string representation of the value and its timestamp.
func (v UnsignedValue) String() string {
return fmt.Sprintf("%v %v", time.Unix(0, v.unixnano), v.Value())
}
func (v UnsignedValue) RawValue() uint64 { return v.value }
// FloatValue represents a float64 value.
type FloatValue struct {
unixnano int64
value float64
}
// UnixNano returns the timestamp of the value.
func (v FloatValue) UnixNano() int64 {
return v.unixnano
}
// Value returns the underlying float64 value.
func (v FloatValue) Value() interface{} {
return v.value
}
// Size returns the number of bytes necessary to represent the value and its timestamp.
func (v FloatValue) Size() int {
return 16
}
// String returns the string representation of the value and its timestamp.
func (v FloatValue) String() string {
return fmt.Sprintf("%v %v", time.Unix(0, v.unixnano), v.value)
}
func (v FloatValue) RawValue() float64 { return v.value }
// BooleanValue represents a boolean value.
type BooleanValue struct {
unixnano int64
value bool
}
// Size returns the number of bytes necessary to represent the value and its timestamp.
func (v BooleanValue) Size() int {
return 9
}
// UnixNano returns the timestamp of the value in nanoseconds since unix epoch.
func (v BooleanValue) UnixNano() int64 {
return v.unixnano
}
// Value returns the underlying boolean value.
func (v BooleanValue) Value() interface{} {
return v.value
}
// String returns the string representation of the value and its timestamp.
func (v BooleanValue) String() string {
return fmt.Sprintf("%v %v", time.Unix(0, v.unixnano), v.Value())
}
func (v BooleanValue) RawValue() bool { return v.value }
// StringValue represents a string value.
type StringValue struct {
unixnano int64
value string
}
// Value returns the underlying string value.
func (v StringValue) Value() interface{} {
return v.value
}
// UnixNano returns the timestamp of the value.
func (v StringValue) UnixNano() int64 {
return v.unixnano
}
// Size returns the number of bytes necessary to represent the value and its timestamp.
func (v StringValue) Size() int {
return 8 + len(v.value)
}
// String returns the string representation of the value and its timestamp.
func (v StringValue) String() string {
return fmt.Sprintf("%v %v", time.Unix(0, v.unixnano), v.Value())
}
func (v StringValue) RawValue() string { return v.value }

6
ui/jestSetup.ts Normal file
View File

@ -0,0 +1,6 @@
import {cleanup} from 'react-testing-library'
// cleans up state between react-testing-library tests
afterEach(() => {
cleanup()
})

View File

@ -1,11 +1,5 @@
import {
Source,
SourceAuthenticationMethod,
Template,
SourceLinks,
TemplateType,
TemplateValueType,
} from 'src/types'
import {Template, SourceLinks, TemplateType, TemplateValueType} from 'src/types'
import {Source} from '@influxdata/influx'
import {Cell, Dashboard, Label} from 'src/types/v2'
import {Links} from 'src/types/v2/links'
import {Task} from 'src/types/v2/tasks'
@ -23,9 +17,10 @@ import {
TelegrafPluginInputNet,
TelegrafPluginInputProcstat,
TelegrafPluginInputDocker,
TelegrafPluginInputSwap,
Task as TaskApi,
Organization,
} from 'src/api'
} from '@influxdata/influx'
export const links: Links = {
authorizations: '/api/v2/authorizations',
@ -123,14 +118,12 @@ export const sourceLinks: SourceLinks = {
export const source: Source = {
id: '16',
name: 'ssl',
type: 'influx',
type: Source.TypeEnum.Self,
username: 'admin',
url: 'https://localhost:9086',
insecureSkipVerify: true,
default: false,
telegraf: 'telegraf',
links: sourceLinks,
authentication: SourceAuthenticationMethod.Basic,
}
export const timeRange = {
@ -408,6 +401,12 @@ export const redisTelegrafPlugin = {
name: TelegrafPluginInputRedis.NameEnum.Redis,
}
export const swapTelegrafPlugin = {
...telegrafPlugin,
name: TelegrafPluginInputSwap.NameEnum.Swap,
configured: ConfigurationState.Configured,
}
export const redisPlugin = {
name: TelegrafPluginInputRedis.NameEnum.Redis,
type: TelegrafPluginInputRedis.TypeEnum.Input,
@ -583,7 +582,11 @@ export const setSetupParamsResponse = {
userID: '033bc62520fe3000',
user: 'iris',
permissions: [
{action: 'read', resource: 'authorizations', orgID: '033bc62534be3000'},
{
action: 'read',
resource: 'authorizations',
orgID: '033bc62534be3000',
},
{
action: 'write',
resource: 'authorizations',

97
ui/package-lock.json generated
View File

@ -829,13 +829,23 @@
"dev": true
},
"@influxdata/influx": {
"version": "0.1.3",
"resolved": "https://registry.npmjs.org/@influxdata/influx/-/influx-0.1.3.tgz",
"integrity": "sha512-5RkmG8lABKNmorMq4s20WYXkoleG55bPjzor/NzPDiqvEF1Rrcp6TJMDuvsolO1/624CBLcieZ55nlImhCqNsw==",
"version": "0.2.2",
"resolved": "https://registry.npmjs.org/@influxdata/influx/-/influx-0.2.2.tgz",
"integrity": "sha512-ozz53ZrhWodRb72Dp0LBxd6mU/c++qVyKbd0l59VTTQ1BGHMKTJEZEDtgrnX+IkWaq4Ry4JUxJVkDAJ/QBem8A==",
"requires": {
"axios": "^0.18.0"
}
},
"@influxdata/react-custom-scrollbars": {
"version": "4.3.8",
"resolved": "https://registry.npmjs.org/@influxdata/react-custom-scrollbars/-/react-custom-scrollbars-4.3.8.tgz",
"integrity": "sha512-tfBCP+L69nCqq/HgFDBmkvo0l6++6NnVZ2vj7vP5gHLenbTgODGNa9FLoaA3x5QQ8+OzvTt98UkVCAjKuY6aRg==",
"requires": {
"dom-css": "^2.0.0",
"prop-types": "^15.5.10",
"raf": "^3.1.0"
}
},
"@mrmlnc/readdir-enhanced": {
"version": "2.2.1",
"resolved": "https://registry.npmjs.org/@mrmlnc/readdir-enhanced/-/readdir-enhanced-2.2.1.tgz",
@ -902,6 +912,12 @@
"physical-cpu-count": "^2.0.0"
}
},
"@sheerun/mutationobserver-shim": {
"version": "0.3.2",
"resolved": "https://registry.npmjs.org/@sheerun/mutationobserver-shim/-/mutationobserver-shim-0.3.2.tgz",
"integrity": "sha512-vTCdPp/T/Q3oSqwHmZ5Kpa9oI7iLtGl3RQaA/NyLHikvcrPxACkkKVr/XzkSPJWXHRhKGzVvb0urJsbMlRxi1Q==",
"dev": true
},
"@types/abstract-leveldown": {
"version": "5.0.1",
"resolved": "https://registry.npmjs.org/@types/abstract-leveldown/-/abstract-leveldown-5.0.1.tgz",
@ -1057,9 +1073,9 @@
"dev": true
},
"@types/react": {
"version": "16.4.16",
"resolved": "https://registry.npmjs.org/@types/react/-/react-16.4.16.tgz",
"integrity": "sha512-lxyoipLWweAnLnSsV4Ho2NAZTKKmxeYgkTQ6PaDiPDU9JJBUY2zJVVGiK1smzYv8+ZgbqEmcm5xM74GCpunSEA==",
"version": "16.8.2",
"resolved": "https://registry.npmjs.org/@types/react/-/react-16.8.2.tgz",
"integrity": "sha512-6mcKsqlqkN9xADrwiUz2gm9Wg4iGnlVGciwBRYFQSMWG6MQjhOZ/AVnxn+6v8nslFgfYTV8fNdE6XwKu6va5PA==",
"dev": true,
"requires": {
"@types/prop-types": "*",
@ -3354,9 +3370,9 @@
}
},
"csstype": {
"version": "2.5.7",
"resolved": "https://registry.npmjs.org/csstype/-/csstype-2.5.7.tgz",
"integrity": "sha512-Nt5VDyOTIIV4/nRFswoCKps1R5CD1hkiyjBE9/thNaNZILLEviVw9yWQw15+O+CpNjQKB/uvdcxFFOrSflY3Yw==",
"version": "2.6.2",
"resolved": "https://registry.npmjs.org/csstype/-/csstype-2.6.2.tgz",
"integrity": "sha512-Rl7PvTae0pflc1YtxtKbiSqq20Ts6vpIYOD5WBafl4y123DyHUeLrRdQP66sQW8/6gmX8jrYJLXwNeMqYVJcow==",
"dev": true
},
"custom-event": {
@ -3738,6 +3754,18 @@
}
}
},
"dom-testing-library": {
"version": "3.16.4",
"resolved": "https://registry.npmjs.org/dom-testing-library/-/dom-testing-library-3.16.4.tgz",
"integrity": "sha512-D8tFLGe0xInL8F/KxZM7gc4r/vOCTgFGM93zXLB/AjFPz2O86y0UaruXl45K6xhqyclJFHHxUtgwaRddRyqxFw==",
"dev": true,
"requires": {
"@babel/runtime": "^7.1.5",
"@sheerun/mutationobserver-shim": "^0.3.2",
"pretty-format": "^23.6.0",
"wait-for-expect": "^1.1.0"
}
},
"domain-browser": {
"version": "1.2.0",
"resolved": "https://registry.npmjs.org/domain-browser/-/domain-browser-1.2.0.tgz",
@ -10445,14 +10473,14 @@
}
},
"react": {
"version": "16.5.2",
"resolved": "https://registry.npmjs.org/react/-/react-16.5.2.tgz",
"integrity": "sha512-FDCSVd3DjVTmbEAjUNX6FgfAmQ+ypJfHUsqUJOYNCBUp1h8lqmtC+0mXJ+JjsWx4KAVTkk1vKd1hLQPvEviSuw==",
"version": "16.8.0",
"resolved": "https://registry.npmjs.org/react/-/react-16.8.0.tgz",
"integrity": "sha512-g+nikW2D48kqgWSPwNo0NH9tIGG3DsQFlrtrQ1kj6W77z5ahyIHG0w8kPpz4Sdj6gyLnz0lEd/xsjOoGge2MYQ==",
"requires": {
"loose-envify": "^1.1.0",
"object-assign": "^4.1.1",
"prop-types": "^15.6.2",
"schedule": "^0.5.0"
"scheduler": "^0.13.0"
}
},
"react-codemirror2": {
@ -10469,16 +10497,6 @@
"prop-types": "^15.5.8"
}
},
"react-custom-scrollbars": {
"version": "4.2.1",
"resolved": "https://registry.npmjs.org/react-custom-scrollbars/-/react-custom-scrollbars-4.2.1.tgz",
"integrity": "sha1-gw/ZUCkn6X6KeMIIaBOJmyqLZts=",
"requires": {
"dom-css": "^2.0.0",
"prop-types": "^15.5.10",
"raf": "^3.1.0"
}
},
"react-dimensions": {
"version": "1.3.1",
"resolved": "https://registry.npmjs.org/react-dimensions/-/react-dimensions-1.3.1.tgz",
@ -10509,14 +10527,14 @@
}
},
"react-dom": {
"version": "16.5.2",
"resolved": "https://registry.npmjs.org/react-dom/-/react-dom-16.5.2.tgz",
"integrity": "sha512-RC8LDw8feuZOHVgzEf7f+cxBr/DnKdqp56VU0lAs1f4UfKc4cU8wU4fTq/mgnvynLQo8OtlPC19NUFh/zjZPuA==",
"version": "16.8.0",
"resolved": "https://registry.npmjs.org/react-dom/-/react-dom-16.8.0.tgz",
"integrity": "sha512-dBzoAGYZpW9Yggp+CzBPC7q1HmWSeRc93DWrwbskmG1eHJWznZB/p0l/Sm+69leIGUS91AXPB/qB3WcPnKx8Sw==",
"requires": {
"loose-envify": "^1.1.0",
"object-assign": "^4.1.1",
"prop-types": "^15.6.2",
"schedule": "^0.5.0"
"scheduler": "^0.13.0"
}
},
"react-draggable": {
@ -10629,6 +10647,15 @@
"schedule": "^0.5.0"
}
},
"react-testing-library": {
"version": "5.4.4",
"resolved": "https://registry.npmjs.org/react-testing-library/-/react-testing-library-5.4.4.tgz",
"integrity": "sha512-/TiERZ+URSNhZQfjrUXh0VLsiLSmhqP1WP+2e2wWqWqrRIWpcAxrfuBxzlT75LYMDNmicEikaXJqRDi/pqCEDg==",
"dev": true,
"requires": {
"dom-testing-library": "^3.13.1"
}
},
"react-tooltip": {
"version": "3.8.4",
"resolved": "https://registry.npmjs.org/react-tooltip/-/react-tooltip-3.8.4.tgz",
@ -11602,10 +11629,20 @@
"version": "0.5.0",
"resolved": "https://registry.npmjs.org/schedule/-/schedule-0.5.0.tgz",
"integrity": "sha512-HUcJicG5Ou8xfR//c2rPT0lPIRR09vVvN81T9fqfVgBmhERUbDEQoYKjpBxbueJnCPpSu2ujXzOnRQt6x9o/jw==",
"dev": true,
"requires": {
"object-assign": "^4.1.1"
}
},
"scheduler": {
"version": "0.13.0",
"resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.13.0.tgz",
"integrity": "sha512-w7aJnV30jc7OsiZQNPVmBc+HooZuvQZIZIShKutC3tnMFMkcwVN9CZRRSSNw03OnSCKmEkK8usmwcw6dqBaLzw==",
"requires": {
"loose-envify": "^1.1.0",
"object-assign": "^4.1.1"
}
},
"seleccion": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/seleccion/-/seleccion-2.0.0.tgz",
@ -13000,6 +13037,12 @@
"browser-process-hrtime": "^0.1.2"
}
},
"wait-for-expect": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/wait-for-expect/-/wait-for-expect-1.1.0.tgz",
"integrity": "sha512-vQDokqxyMyknfX3luCDn16bSaRcOyH6gGuUXMIbxBLeTo6nWuEWYqMTT9a+44FmW8c2m6TRWBdNvBBjA1hwEKg==",
"dev": true
},
"walker": {
"version": "1.0.7",
"resolved": "https://registry.npmjs.org/walker/-/walker-1.0.7.tgz",

View File

@ -30,6 +30,7 @@
"tsc:watch": "tsc -p ./tsconfig.json --noEmit --pretty -w"
},
"jest": {
"setupTestFrameworkScriptFile": "./jestSetup.ts",
"displayName": "test",
"testURL": "http://localhost",
"testPathIgnorePatterns": [
@ -82,7 +83,7 @@
"@types/papaparse": "^4.1.34",
"@types/prop-types": "^15.5.2",
"@types/qs": "^6.5.1",
"@types/react": "^16.4.14",
"@types/react": "^16.8.0",
"@types/react-dnd": "^2.0.36",
"@types/react-dnd-html5-backend": "^2.1.9",
"@types/react-grid-layout": "^0.16.5",
@ -104,6 +105,7 @@
"jsdom": "^9.0.0",
"parcel": "^1.11.0",
"prettier": "^1.14.3",
"react-testing-library": "^5.4.4",
"sass": "^1.15.3",
"ts-jest": "^23.10.3",
"tslib": "^1.9.0",
@ -114,7 +116,8 @@
"typescript": "^3.1.3"
},
"dependencies": {
"@influxdata/influx": "^0.1.3",
"@influxdata/react-custom-scrollbars": "4.3.8",
"@influxdata/influx": "^0.2.2",
"axios": "^0.18.0",
"babel-polyfill": "^6.26.0",
"bignumber.js": "^4.0.2",
@ -138,14 +141,13 @@
"papaparse": "^4.4.0",
"prop-types": "^15.6.1",
"qs": "^6.5.2",
"react": "^15.0.0 || ^16.0.0",
"react": "^16.8.0",
"react-codemirror2": "^4.2.1",
"react-copy-to-clipboard": "^5.0.1",
"react-custom-scrollbars": "^4.1.1",
"react-dimensions": "^1.2.0",
"react-dnd": "^2.6.0",
"react-dnd-html5-backend": "^2.6.0",
"react-dom": "^16.3.1",
"react-dom": "^16.8.0",
"react-grid-layout": "^0.16.6",
"react-markdown": "^4.0.3",
"react-redux": "^5.0.7",

View File

@ -4,13 +4,14 @@ import {connect} from 'react-redux'
import {InjectedRouter} from 'react-router'
// APIs
import {getSetupStatus} from 'src/onboarding/apis'
import {client} from 'src/utils/api'
// Actions
import {notify as notifyAction} from 'src/shared/actions/notifications'
// Components
import {ErrorHandling} from 'src/shared/decorators/errors'
import {SpinnerContainer, TechnoSpinner} from 'src/clockface'
// Utils
import {isOnboardingURL} from 'src/onboarding/utils'
@ -50,12 +51,12 @@ export class Setup extends PureComponent<Props, State> {
return
}
const isSetupAllowed = await getSetupStatus()
const {allowed} = await client.setup.status()
this.setState({
loading: RemoteDataState.Done,
})
if (!isSetupAllowed) {
if (!allowed) {
return
}
@ -63,18 +64,12 @@ export class Setup extends PureComponent<Props, State> {
}
public render() {
if (this.isLoading) {
return <div className="page-spinner" />
} else {
return this.props.children && React.cloneElement(this.props.children)
}
}
private get isLoading(): boolean {
const {loading} = this.state
return (
loading === RemoteDataState.Loading ||
loading === RemoteDataState.NotStarted
<SpinnerContainer loading={loading} spinnerComponent={<TechnoSpinner />}>
{this.props.children && React.cloneElement(this.props.children)}
</SpinnerContainer>
)
}
}

View File

@ -7,6 +7,7 @@ import {client} from 'src/utils/api'
// Components
import {ErrorHandling} from 'src/shared/decorators/errors'
import {SpinnerContainer, TechnoSpinner} from 'src/clockface'
// Actions
import {notify as notifyAction} from 'src/shared/actions/notifications'
@ -55,18 +56,12 @@ export class Signin extends PureComponent<Props, State> {
}
public render() {
if (this.isLoading) {
return <div className="page-spinner" />
}
return this.props.children && React.cloneElement(this.props.children)
}
private get isLoading(): boolean {
const {loading} = this.state
return (
loading === RemoteDataState.Loading ||
loading === RemoteDataState.NotStarted
<SpinnerContainer loading={loading} spinnerComponent={<TechnoSpinner />}>
{this.props.children && React.cloneElement(this.props.children)}
</SpinnerContainer>
)
}

File diff suppressed because it is too large Load Diff

View File

@ -1,74 +0,0 @@
// tslint:disable
/**
* Influx API Service
* No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
*
* OpenAPI spec version: 0.1.0
*
*
* NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
* https://openapi-generator.tech
* Do not edit the class manually.
*/
export interface ConfigurationParameters {
apiKey?: string | ((name: string) => string);
username?: string;
password?: string;
accessToken?: string | ((name: string, scopes?: string[]) => string);
basePath?: string;
baseOptions?: any;
}
export class Configuration {
/**
* parameter for apiKey security
* @param name security name
* @memberof Configuration
*/
apiKey?: string | ((name: string) => string);
/**
* parameter for basic security
*
* @type {string}
* @memberof Configuration
*/
username?: string;
/**
* parameter for basic security
*
* @type {string}
* @memberof Configuration
*/
password?: string;
/**
* parameter for oauth2 security
* @param name security name
* @param scopes oauth2 scope
* @memberof Configuration
*/
accessToken?: string | ((name: string, scopes?: string[]) => string);
/**
* override base path
*
* @type {string}
* @memberof Configuration
*/
basePath?: string;
/**
* base options for axios calls
*
* @type {any}
* @memberof Configuration
*/
baseOptions?: any;
constructor(param: ConfigurationParameters = {}) {
this.apiKey = param.apiKey;
this.username = param.username;
this.password = param.password;
this.accessToken = param.accessToken;
this.basePath = param.basePath;
}
}

View File

@ -1 +0,0 @@
declare module 'url';

View File

@ -1,16 +0,0 @@
// tslint:disable
/**
* Influx API Service
* No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
*
* OpenAPI spec version: 0.1.0
*
*
* NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
* https://openapi-generator.tech
* Do not edit the class manually.
*/
export * from "./api";
export * from "./configuration";

View File

@ -1,4 +1,4 @@
import {Permission, PermissionResource, Authorization} from 'src/api'
import {Permission, PermissionResource, Authorization} from '@influxdata/influx'
const {TypeEnum} = PermissionResource
const {ActionEnum} = Permission

View File

@ -1,7 +1,7 @@
import {authorization} from './data'
// Types
import {Authorization} from 'src/api'
import {Authorization} from '@influxdata/influx'
export const getAuthorizations = async (): Promise<Authorization[]> => {
return Promise.resolve([authorization, {...authorization, id: '1'}])

View File

@ -1,15 +0,0 @@
import {Authorization} from 'src/api'
import {authorizationsAPI} from 'src/utils/api'
import {AxiosResponse} from 'axios'
export const getAuthorizations = async (): Promise<Authorization[]> => {
const {data} = await authorizationsAPI.authorizationsGet()
return data.authorizations
}
export const deleteAuthorization = async (
authID: string
): Promise<AxiosResponse> => {
const response = await authorizationsAPI.authorizationsAuthIDDelete(authID)
return response
}

View File

@ -130,6 +130,10 @@
opacity: 1;
transform: translate(-50%,-50%) scale(1,1);
}
.checked &:after {
opacity: 1;
}
}
/* Themes */

Some files were not shown because too many files have changed in this diff Show More