Merge branch 'master' into flux-staging

pull/12898/head
Nathaniel Cook 2019-03-27 09:42:48 -06:00
commit ca6f2ad0c6
559 changed files with 13659 additions and 5370 deletions

View File

@ -2,6 +2,7 @@ Closes #
_Briefly describe your proposed changes:_
- [ ] CHANGELOG.md updated with a link to the PR (not the Issue)
- [ ] Rebased/mergeable
- [ ] Tests pass
- [ ] http/swagger.yml updated (if modified Go structs or API)

1
.gitignore vendored
View File

@ -23,6 +23,7 @@ influxd.bolt
# Project tools that you might install with go build.
/editorconfig-checker
/staticcheck
ui/node_modules
ui/npm-error.log

View File

@ -4,16 +4,38 @@
1. [12663](https://github.com/influxdata/influxdb/pull/12663): Insert flux function near cursor in flux editor
1. [12678](https://github.com/influxdata/influxdb/pull/12678): Enable the use of variables in the Data Explorer and Cell Editor Overlay
1. [12655](https://github.com/influxdata/influxdb/pull/12655): Add a variable control bar to dashboards to select values for variables.
1. [12706](https://github.com/influxdata/influxdb/pull/12706): Add ability to add variable to script from the side menu.
1. [12791](https://github.com/influxdata/influxdb/pull/12791): Use time range for metaqueries in Data Explorer and Cell Editor Overlay
1. [12827](https://github.com/influxdata/influxdb/pull/12827): Fix screen tearing bug in Raw Data View
1. [12843](https://github.com/influxdata/influxdb/pull/12843): Add copy to clipboard button to export overlays
1. [12826](https://github.com/influxdata/influxdb/pull/12826): Enable copying error messages to the clipboard from dashboard cells
1. [12876](https://github.com/influxdata/influxdb/pull/12876): Add the ability to update token's status in Token list
1. [12821](https://github.com/influxdata/influxdb/pull/12821): Allow variables to be re-ordered within control bar on a dashboard.
1. [12888](https://github.com/influxdata/influxdb/pull/12888): Add the ability to delete a template
1. [12901](https://github.com/influxdata/influxdb/pull/12901): Save user preference for variable control bar visibility and default to visible
1. [12910](https://github.com/influxdata/influxdb/pull/12910): Add the ability to clone a template
### Bug Fixes
1. [12684](https://github.com/influxdata/influxdb/pull/12684): Fix mismatch in bucket row and header
1. [12684](https://github.com/influxdata/influxdb/pull/12684): Fix mismatch in bucket row and header
1. [12703](https://github.com/influxdata/influxdb/pull/12703): Allows user to edit note on cell
1. [12764](https://github.com/influxdata/influxdb/pull/12764): Fix empty state styles in scrapers in org view
1. [12790](https://github.com/influxdata/influxdb/pull/12790): Fix bucket creation error when changing rentention rules types.
1. [12793](https://github.com/influxdata/influxdb/pull/12793): Fix task creation error when switching schedule types.
1. [12805](https://github.com/influxdata/influxdb/pull/12805): Fix hidden horizonal scrollbars in flux raw data view
1. [12827](https://github.com/influxdata/influxdb/pull/12827): Fix screen tearing bug in Raw Data View
### UI Improvements
1. [12782](https://github.com/influxdata/influxdb/pull/12782): Move bucket selection in the query builder to the first card in the list
1. [12850](https://github.com/influxdata/influxdb/pull/12850): Ensure editor is automatically focused in note editor
## v2.0.0-alpha.6 [2019-03-15]
### Release Notes
We have updated the way we do predefined dashboards to [include Templates](https://github.com/influxdata/influxdb/pull/12532) in this release which will cause existing Organizations to not have a System dashboard created when they build a new Telegraf configuration. In order to get this functionality, remove your existing data and start from scratch.
We have updated the way we do predefined dashboards to [include Templates](https://github.com/influxdata/influxdb/pull/12532) in this release which will cause existing Organizations to not have a System dashboard created when they build a new Telegraf configuration. In order to get this functionality, remove your existing data and start from scratch.
**NOTE: This will remove all data from your InfluxDB v2.0 instance including timeseries data.**
@ -35,6 +57,7 @@ Once completed, `v2.0.0-alpha.6` can be started.
1. [12532](https://github.com/influxdata/influxdb/pull/12532): Add System template on onboarding
### Bug Fixes
1. [12641](https://github.com/influxdata/influxdb/pull/12641): Stop scrollbars from covering text in flux editor
### UI Improvements

View File

@ -21,16 +21,8 @@ func NewLabelService(s influxdb.LabelService) *LabelService {
}
}
func newLabelPermission(a influxdb.Action, id influxdb.ID) (*influxdb.Permission, error) {
p := &influxdb.Permission{
Action: a,
Resource: influxdb.Resource{
Type: influxdb.LabelsResourceType,
ID: &id,
},
}
return p, p.Valid()
func newLabelPermission(a influxdb.Action, orgID, id influxdb.ID) (*influxdb.Permission, error) {
return influxdb.NewPermissionAtID(id, a, influxdb.LabelsResourceType, orgID)
}
func newResourcePermission(a influxdb.Action, id influxdb.ID, resourceType influxdb.ResourceType) (*influxdb.Permission, error) {
@ -62,8 +54,8 @@ func authorizeLabelMappingAction(ctx context.Context, action influxdb.Action, id
return nil
}
func authorizeReadLabel(ctx context.Context, id influxdb.ID) error {
p, err := newLabelPermission(influxdb.ReadAction, id)
func authorizeReadLabel(ctx context.Context, orgID, id influxdb.ID) error {
p, err := newLabelPermission(influxdb.ReadAction, orgID, id)
if err != nil {
return err
}
@ -75,8 +67,8 @@ func authorizeReadLabel(ctx context.Context, id influxdb.ID) error {
return nil
}
func authorizeWriteLabel(ctx context.Context, id influxdb.ID) error {
p, err := newLabelPermission(influxdb.WriteAction, id)
func authorizeWriteLabel(ctx context.Context, orgID, id influxdb.ID) error {
p, err := newLabelPermission(influxdb.WriteAction, orgID, id)
if err != nil {
return err
}
@ -90,12 +82,12 @@ func authorizeWriteLabel(ctx context.Context, id influxdb.ID) error {
// FindLabelByID checks to see if the authorizer on context has read access to the label id provided.
func (s *LabelService) FindLabelByID(ctx context.Context, id influxdb.ID) (*influxdb.Label, error) {
if err := authorizeReadLabel(ctx, id); err != nil {
l, err := s.s.FindLabelByID(ctx, id)
if err != nil {
return nil, err
}
l, err := s.s.FindLabelByID(ctx, id)
if err != nil {
if err := authorizeReadLabel(ctx, l.OrganizationID, id); err != nil {
return nil, err
}
@ -115,12 +107,15 @@ func (s *LabelService) FindLabels(ctx context.Context, filter influxdb.LabelFilt
// https://github.com/golang/go/wiki/SliceTricks#filtering-without-allocating
labels := ls[:0]
for _, l := range ls {
err := authorizeReadLabel(ctx, l.ID)
if err != nil && influxdb.ErrorCode(err) != influxdb.EUnauthorized {
err := authorizeReadLabel(ctx, l.OrganizationID, l.ID)
if err != nil &&
influxdb.ErrorCode(err) != influxdb.EUnauthorized &&
influxdb.ErrorCode(err) != influxdb.EInvalid {
return nil, err
}
if influxdb.ErrorCode(err) == influxdb.EUnauthorized {
if influxdb.ErrorCode(err) == influxdb.EUnauthorized ||
influxdb.ErrorCode(err) == influxdb.EInvalid {
continue
}
@ -144,7 +139,7 @@ func (s *LabelService) FindResourceLabels(ctx context.Context, filter influxdb.L
labels := ls[:0]
for _, l := range ls {
err := authorizeReadLabel(ctx, l.ID)
err := authorizeReadLabel(ctx, l.OrganizationID, l.ID)
if err != nil && influxdb.ErrorCode(err) != influxdb.EUnauthorized {
return nil, err
}
@ -159,14 +154,9 @@ func (s *LabelService) FindResourceLabels(ctx context.Context, filter influxdb.L
return labels, nil
}
// CreateLabel checks to see if the authorizer on context has write access to the global labels resource.
// CreateLabel checks to see if the authorizer on context has read access to the new label's org.
func (s *LabelService) CreateLabel(ctx context.Context, l *influxdb.Label) error {
p, err := influxdb.NewGlobalPermission(influxdb.WriteAction, influxdb.LabelsResourceType)
if err != nil {
return err
}
if err := IsAllowed(ctx, *p); err != nil {
if err := authorizeReadOrg(ctx, l.OrganizationID); err != nil {
return err
}
@ -175,7 +165,12 @@ func (s *LabelService) CreateLabel(ctx context.Context, l *influxdb.Label) error
// CreateLabelMapping checks to see if the authorizer on context has write access to the label and the resource contained by the label mapping in creation.
func (s *LabelService) CreateLabelMapping(ctx context.Context, m *influxdb.LabelMapping) error {
if err := authorizeWriteLabel(ctx, m.LabelID); err != nil {
l, err := s.s.FindLabelByID(ctx, m.LabelID)
if err != nil {
return err
}
if err := authorizeWriteLabel(ctx, l.OrganizationID, m.LabelID); err != nil {
return err
}
@ -188,12 +183,12 @@ func (s *LabelService) CreateLabelMapping(ctx context.Context, m *influxdb.Label
// UpdateLabel checks to see if the authorizer on context has write access to the label provided.
func (s *LabelService) UpdateLabel(ctx context.Context, id influxdb.ID, upd influxdb.LabelUpdate) (*influxdb.Label, error) {
_, err := s.s.FindLabelByID(ctx, id)
l, err := s.s.FindLabelByID(ctx, id)
if err != nil {
return nil, err
}
if err := authorizeWriteLabel(ctx, id); err != nil {
if err := authorizeWriteLabel(ctx, l.OrganizationID, id); err != nil {
return nil, err
}
@ -202,12 +197,12 @@ func (s *LabelService) UpdateLabel(ctx context.Context, id influxdb.ID, upd infl
// DeleteLabel checks to see if the authorizer on context has write access to the label provided.
func (s *LabelService) DeleteLabel(ctx context.Context, id influxdb.ID) error {
_, err := s.s.FindLabelByID(ctx, id)
l, err := s.s.FindLabelByID(ctx, id)
if err != nil {
return err
}
if err := authorizeWriteLabel(ctx, id); err != nil {
if err := authorizeWriteLabel(ctx, l.OrganizationID, id); err != nil {
return err
}
@ -216,12 +211,12 @@ func (s *LabelService) DeleteLabel(ctx context.Context, id influxdb.ID) error {
// DeleteLabelMapping checks to see if the authorizer on context has write access to the label and the resource of the label mapping to delete.
func (s *LabelService) DeleteLabelMapping(ctx context.Context, m *influxdb.LabelMapping) error {
_, err := s.s.FindLabelByID(ctx, m.LabelID)
l, err := s.s.FindLabelByID(ctx, m.LabelID)
if err != nil {
return err
}
if err := authorizeWriteLabel(ctx, m.LabelID); err != nil {
if err := authorizeWriteLabel(ctx, l.OrganizationID, m.LabelID); err != nil {
return err
}

View File

@ -14,6 +14,10 @@ import (
influxdbtesting "github.com/influxdata/influxdb/testing"
)
const (
orgOneID = "020f755c3c083000"
)
var labelCmpOptions = cmp.Options{
cmp.Comparer(func(x, y []byte) bool {
return bytes.Equal(x, y)
@ -51,7 +55,8 @@ func TestLabelService_FindLabelByID(t *testing.T) {
LabelService: &mock.LabelService{
FindLabelByIDFn: func(ctx context.Context, id influxdb.ID) (*influxdb.Label, error) {
return &influxdb.Label{
ID: id,
ID: id,
OrganizationID: influxdbtesting.MustIDBase16(orgOneID),
}, nil
},
},
@ -76,7 +81,8 @@ func TestLabelService_FindLabelByID(t *testing.T) {
LabelService: &mock.LabelService{
FindLabelByIDFn: func(ctx context.Context, id influxdb.ID) (*influxdb.Label, error) {
return &influxdb.Label{
ID: id,
ID: id,
OrganizationID: influxdbtesting.MustIDBase16(orgOneID),
}, nil
},
},
@ -93,7 +99,7 @@ func TestLabelService_FindLabelByID(t *testing.T) {
},
wants: wants{
err: &influxdb.Error{
Msg: "read:labels/0000000000000001 is unauthorized",
Msg: "read:orgs/020f755c3c083000/labels/0000000000000001 is unauthorized",
Code: influxdb.EUnauthorized,
},
},
@ -138,13 +144,16 @@ func TestLabelService_FindLabels(t *testing.T) {
FindLabelsFn: func(ctx context.Context, filter influxdb.LabelFilter) ([]*influxdb.Label, error) {
return []*influxdb.Label{
{
ID: 1,
ID: 1,
OrganizationID: influxdbtesting.MustIDBase16(orgOneID),
},
{
ID: 2,
ID: 2,
OrganizationID: influxdbtesting.MustIDBase16(orgOneID),
},
{
ID: 3,
ID: 3,
OrganizationID: influxdbtesting.MustIDBase16(orgOneID),
},
}, nil
},
@ -161,13 +170,16 @@ func TestLabelService_FindLabels(t *testing.T) {
wants: wants{
labels: []*influxdb.Label{
{
ID: 1,
ID: 1,
OrganizationID: influxdbtesting.MustIDBase16(orgOneID),
},
{
ID: 2,
ID: 2,
OrganizationID: influxdbtesting.MustIDBase16(orgOneID),
},
{
ID: 3,
ID: 3,
OrganizationID: influxdbtesting.MustIDBase16(orgOneID),
},
},
},
@ -179,13 +191,16 @@ func TestLabelService_FindLabels(t *testing.T) {
FindLabelsFn: func(ctx context.Context, filter influxdb.LabelFilter) ([]*influxdb.Label, error) {
return []*influxdb.Label{
{
ID: 1,
ID: 1,
OrganizationID: influxdbtesting.MustIDBase16(orgOneID),
},
{
ID: 2,
ID: 2,
OrganizationID: influxdbtesting.MustIDBase16(orgOneID),
},
{
ID: 3,
ID: 3,
OrganizationID: influxdbtesting.MustIDBase16(orgOneID),
},
}, nil
},
@ -203,7 +218,8 @@ func TestLabelService_FindLabels(t *testing.T) {
wants: wants{
labels: []*influxdb.Label{
{
ID: 1,
ID: 1,
OrganizationID: influxdbtesting.MustIDBase16(orgOneID),
},
},
},
@ -215,13 +231,16 @@ func TestLabelService_FindLabels(t *testing.T) {
FindLabelsFn: func(ctx context.Context, filter influxdb.LabelFilter) ([]*influxdb.Label, error) {
return []*influxdb.Label{
{
ID: 1,
ID: 1,
OrganizationID: influxdbtesting.MustIDBase16(orgOneID),
},
{
ID: 2,
ID: 2,
OrganizationID: influxdbtesting.MustIDBase16(orgOneID),
},
{
ID: 3,
ID: 3,
OrganizationID: influxdbtesting.MustIDBase16(orgOneID),
},
}, nil
},
@ -283,12 +302,14 @@ func TestLabelService_UpdateLabel(t *testing.T) {
LabelService: &mock.LabelService{
FindLabelByIDFn: func(ctc context.Context, id influxdb.ID) (*influxdb.Label, error) {
return &influxdb.Label{
ID: 1,
ID: 1,
OrganizationID: influxdbtesting.MustIDBase16(orgOneID),
}, nil
},
UpdateLabelFn: func(ctx context.Context, id influxdb.ID, upd influxdb.LabelUpdate) (*influxdb.Label, error) {
return &influxdb.Label{
ID: 1,
ID: 1,
OrganizationID: influxdbtesting.MustIDBase16(orgOneID),
}, nil
},
},
@ -315,12 +336,14 @@ func TestLabelService_UpdateLabel(t *testing.T) {
LabelService: &mock.LabelService{
FindLabelByIDFn: func(ctc context.Context, id influxdb.ID) (*influxdb.Label, error) {
return &influxdb.Label{
ID: 1,
ID: 1,
OrganizationID: influxdbtesting.MustIDBase16(orgOneID),
}, nil
},
UpdateLabelFn: func(ctx context.Context, id influxdb.ID, upd influxdb.LabelUpdate) (*influxdb.Label, error) {
return &influxdb.Label{
ID: 1,
ID: 1,
OrganizationID: influxdbtesting.MustIDBase16(orgOneID),
}, nil
},
},
@ -339,7 +362,7 @@ func TestLabelService_UpdateLabel(t *testing.T) {
},
wants: wants{
err: &influxdb.Error{
Msg: "write:labels/0000000000000001 is unauthorized",
Msg: "write:orgs/020f755c3c083000/labels/0000000000000001 is unauthorized",
Code: influxdb.EUnauthorized,
},
},
@ -383,7 +406,8 @@ func TestLabelService_DeleteLabel(t *testing.T) {
LabelService: &mock.LabelService{
FindLabelByIDFn: func(ctc context.Context, id influxdb.ID) (*influxdb.Label, error) {
return &influxdb.Label{
ID: 1,
ID: 1,
OrganizationID: influxdbtesting.MustIDBase16(orgOneID),
}, nil
},
DeleteLabelFn: func(ctx context.Context, id influxdb.ID) error {
@ -397,8 +421,9 @@ func TestLabelService_DeleteLabel(t *testing.T) {
{
Action: "write",
Resource: influxdb.Resource{
Type: influxdb.LabelsResourceType,
ID: influxdbtesting.IDPtr(1),
Type: influxdb.LabelsResourceType,
ID: influxdbtesting.IDPtr(1),
OrgID: influxdbtesting.IDPtr(influxdbtesting.MustIDBase16(orgOneID)),
},
},
},
@ -413,7 +438,8 @@ func TestLabelService_DeleteLabel(t *testing.T) {
LabelService: &mock.LabelService{
FindLabelByIDFn: func(ctc context.Context, id influxdb.ID) (*influxdb.Label, error) {
return &influxdb.Label{
ID: 1,
ID: 1,
OrganizationID: influxdbtesting.MustIDBase16(orgOneID),
}, nil
},
DeleteLabelFn: func(ctx context.Context, id influxdb.ID) error {
@ -427,15 +453,16 @@ func TestLabelService_DeleteLabel(t *testing.T) {
{
Action: "read",
Resource: influxdb.Resource{
Type: influxdb.LabelsResourceType,
ID: influxdbtesting.IDPtr(1),
Type: influxdb.LabelsResourceType,
ID: influxdbtesting.IDPtr(1),
OrgID: influxdbtesting.IDPtr(influxdbtesting.MustIDBase16(orgOneID)),
},
},
},
},
wants: wants{
err: &influxdb.Error{
Msg: "write:labels/0000000000000001 is unauthorized",
Msg: "write:orgs/020f755c3c083000/labels/0000000000000001 is unauthorized",
Code: influxdb.EUnauthorized,
},
},
@ -483,9 +510,10 @@ func TestLabelService_CreateLabel(t *testing.T) {
},
args: args{
permission: influxdb.Permission{
Action: "write",
Action: "read",
Resource: influxdb.Resource{
Type: influxdb.LabelsResourceType,
ID: influxdbtesting.IDPtr(influxdbtesting.MustIDBase16(orgOneID)),
Type: influxdb.OrgsResourceType,
},
},
},
@ -512,7 +540,7 @@ func TestLabelService_CreateLabel(t *testing.T) {
},
wants: wants{
err: &influxdb.Error{
Msg: "write:labels is unauthorized",
Msg: "read:orgs/020f755c3c083000 is unauthorized",
Code: influxdb.EUnauthorized,
},
},
@ -526,7 +554,7 @@ func TestLabelService_CreateLabel(t *testing.T) {
ctx := context.Background()
ctx = influxdbcontext.SetAuthorizer(ctx, &Authorizer{[]influxdb.Permission{tt.args.permission}})
err := s.CreateLabel(ctx, &influxdb.Label{Name: "name"})
err := s.CreateLabel(ctx, &influxdb.Label{Name: "name", OrganizationID: influxdbtesting.MustIDBase16(orgOneID)})
influxdbtesting.ErrorsEqual(t, err, tt.wants.err)
})
}
@ -557,13 +585,16 @@ func TestLabelService_FindResourceLabels(t *testing.T) {
FindResourceLabelsFn: func(ctx context.Context, f influxdb.LabelMappingFilter) ([]*influxdb.Label, error) {
return []*influxdb.Label{
{
ID: 1,
ID: 1,
OrganizationID: influxdbtesting.MustIDBase16(orgOneID),
},
{
ID: 2,
ID: 2,
OrganizationID: influxdbtesting.MustIDBase16(orgOneID),
},
{
ID: 3,
ID: 3,
OrganizationID: influxdbtesting.MustIDBase16(orgOneID),
},
}, nil
},
@ -594,13 +625,16 @@ func TestLabelService_FindResourceLabels(t *testing.T) {
err: nil,
labels: []*influxdb.Label{
{
ID: 1,
ID: 1,
OrganizationID: influxdbtesting.MustIDBase16(orgOneID),
},
{
ID: 2,
ID: 2,
OrganizationID: influxdbtesting.MustIDBase16(orgOneID),
},
{
ID: 3,
ID: 3,
OrganizationID: influxdbtesting.MustIDBase16(orgOneID),
},
},
},
@ -612,13 +646,16 @@ func TestLabelService_FindResourceLabels(t *testing.T) {
FindResourceLabelsFn: func(ctx context.Context, f influxdb.LabelMappingFilter) ([]*influxdb.Label, error) {
return []*influxdb.Label{
{
ID: 1,
ID: 1,
OrganizationID: influxdbtesting.MustIDBase16(orgOneID),
},
{
ID: 2,
ID: 2,
OrganizationID: influxdbtesting.MustIDBase16(orgOneID),
},
{
ID: 3,
ID: 3,
OrganizationID: influxdbtesting.MustIDBase16(orgOneID),
},
}, nil
},
@ -650,7 +687,8 @@ func TestLabelService_FindResourceLabels(t *testing.T) {
err: nil,
labels: []*influxdb.Label{
{
ID: 3,
ID: 3,
OrganizationID: influxdbtesting.MustIDBase16(orgOneID),
},
},
},
@ -662,13 +700,16 @@ func TestLabelService_FindResourceLabels(t *testing.T) {
FindResourceLabelsFn: func(ctx context.Context, f influxdb.LabelMappingFilter) ([]*influxdb.Label, error) {
return []*influxdb.Label{
{
ID: 1,
ID: 1,
OrganizationID: influxdbtesting.MustIDBase16(orgOneID),
},
{
ID: 2,
ID: 2,
OrganizationID: influxdbtesting.MustIDBase16(orgOneID),
},
{
ID: 3,
ID: 3,
OrganizationID: influxdbtesting.MustIDBase16(orgOneID),
},
}, nil
},
@ -700,13 +741,16 @@ func TestLabelService_FindResourceLabels(t *testing.T) {
FindResourceLabelsFn: func(ctx context.Context, f influxdb.LabelMappingFilter) ([]*influxdb.Label, error) {
return []*influxdb.Label{
{
ID: 1,
ID: 1,
OrganizationID: influxdbtesting.MustIDBase16(orgOneID),
},
{
ID: 2,
ID: 2,
OrganizationID: influxdbtesting.MustIDBase16(orgOneID),
},
{
ID: 3,
ID: 3,
OrganizationID: influxdbtesting.MustIDBase16(orgOneID),
},
}, nil
},
@ -774,6 +818,12 @@ func TestLabelService_CreateLabelMapping(t *testing.T) {
name: "authorized to create label mapping",
fields: fields{
LabelService: &mock.LabelService{
FindLabelByIDFn: func(ctx context.Context, id influxdb.ID) (*influxdb.Label, error) {
return &influxdb.Label{
ID: 1,
OrganizationID: influxdbtesting.MustIDBase16(orgOneID),
}, nil
},
CreateLabelMappingFn: func(ctx context.Context, lm *influxdb.LabelMapping) error {
return nil
},
@ -809,6 +859,12 @@ func TestLabelService_CreateLabelMapping(t *testing.T) {
name: "unauthorized to create label mapping for resources on which the user does not have write access",
fields: fields{
LabelService: &mock.LabelService{
FindLabelByIDFn: func(ctx context.Context, id influxdb.ID) (*influxdb.Label, error) {
return &influxdb.Label{
ID: 1,
OrganizationID: influxdbtesting.MustIDBase16(orgOneID),
}, nil
},
CreateLabelMappingFn: func(ctx context.Context, lm *influxdb.LabelMapping) error {
return nil
},
@ -840,6 +896,12 @@ func TestLabelService_CreateLabelMapping(t *testing.T) {
name: "unauthorized to create label mapping",
fields: fields{
LabelService: &mock.LabelService{
FindLabelByIDFn: func(ctx context.Context, id influxdb.ID) (*influxdb.Label, error) {
return &influxdb.Label{
ID: 1,
OrganizationID: influxdbtesting.MustIDBase16(orgOneID),
}, nil
},
CreateLabelMappingFn: func(ctx context.Context, lm *influxdb.LabelMapping) error {
return nil
},
@ -862,7 +924,7 @@ func TestLabelService_CreateLabelMapping(t *testing.T) {
},
wants: wants{
err: &influxdb.Error{
Msg: "write:labels/0000000000000001 is unauthorized",
Msg: "write:orgs/020f755c3c083000/labels/0000000000000001 is unauthorized",
Code: influxdb.EUnauthorized,
},
},
@ -906,7 +968,8 @@ func TestLabelService_DeleteLabelMapping(t *testing.T) {
LabelService: &mock.LabelService{
FindLabelByIDFn: func(ctc context.Context, id influxdb.ID) (*influxdb.Label, error) {
return &influxdb.Label{
ID: 1,
ID: 1,
OrganizationID: influxdbtesting.MustIDBase16(orgOneID),
}, nil
},
DeleteLabelMappingFn: func(ctx context.Context, m *influxdb.LabelMapping) error {
@ -946,7 +1009,8 @@ func TestLabelService_DeleteLabelMapping(t *testing.T) {
LabelService: &mock.LabelService{
FindLabelByIDFn: func(ctc context.Context, id influxdb.ID) (*influxdb.Label, error) {
return &influxdb.Label{
ID: 1,
ID: 1,
OrganizationID: influxdbtesting.MustIDBase16(orgOneID),
}, nil
},
DeleteLabelMappingFn: func(ctx context.Context, m *influxdb.LabelMapping) error {
@ -982,7 +1046,8 @@ func TestLabelService_DeleteLabelMapping(t *testing.T) {
LabelService: &mock.LabelService{
FindLabelByIDFn: func(ctc context.Context, id influxdb.ID) (*influxdb.Label, error) {
return &influxdb.Label{
ID: 1,
ID: 1,
OrganizationID: influxdbtesting.MustIDBase16(orgOneID),
}, nil
},
DeleteLabelMappingFn: func(ctx context.Context, m *influxdb.LabelMapping) error {
@ -1007,7 +1072,7 @@ func TestLabelService_DeleteLabelMapping(t *testing.T) {
},
wants: wants{
err: &influxdb.Error{
Msg: "write:labels/0000000000000001 is unauthorized",
Msg: "write:orgs/020f755c3c083000/labels/0000000000000001 is unauthorized",
Code: influxdb.EUnauthorized,
},
},

View File

@ -140,6 +140,7 @@ var AllResourceTypes = []ResourceType{
LabelsResourceType, // 11
ViewsResourceType, // 12
DocumentsResourceType, // 13
// NOTE: when modifying this list, please update the swagger for components.schemas.Permission resource enum.
}
// OrgResourceTypes is the list of all known resource types that belong to an organization.

View File

@ -221,35 +221,35 @@ func (w *Wildcard) MarshalJSON() ([]byte, error) {
}
func MarshalJSON(v interface{}) ([]byte, error) {
switch v.(type) {
switch v := v.(type) {
case *influxql.BinaryExpr:
return json.Marshal(&BinaryExpr{v.(*influxql.BinaryExpr)})
return json.Marshal(&BinaryExpr{v})
case *influxql.BooleanLiteral:
return json.Marshal(&BooleanLiteral{v.(*influxql.BooleanLiteral)})
return json.Marshal(&BooleanLiteral{v})
case *influxql.Call:
return json.Marshal(&Call{v.(*influxql.Call)})
return json.Marshal(&Call{v})
case *influxql.Distinct:
return json.Marshal(&Distinct{v.(*influxql.Distinct)})
return json.Marshal(&Distinct{v})
case *influxql.DurationLiteral:
return json.Marshal(&DurationLiteral{v.(*influxql.DurationLiteral)})
return json.Marshal(&DurationLiteral{v})
case *influxql.IntegerLiteral:
return json.Marshal(&IntegerLiteral{v.(*influxql.IntegerLiteral)})
return json.Marshal(&IntegerLiteral{v})
case *influxql.NumberLiteral:
return json.Marshal(&NumberLiteral{v.(*influxql.NumberLiteral)})
return json.Marshal(&NumberLiteral{v})
case *influxql.ParenExpr:
return json.Marshal(&ParenExpr{v.(*influxql.ParenExpr)})
return json.Marshal(&ParenExpr{v})
case *influxql.RegexLiteral:
return json.Marshal(&RegexLiteral{v.(*influxql.RegexLiteral)})
return json.Marshal(&RegexLiteral{v})
case *influxql.ListLiteral:
return json.Marshal(&ListLiteral{v.(*influxql.ListLiteral)})
return json.Marshal(&ListLiteral{v})
case *influxql.StringLiteral:
return json.Marshal(&StringLiteral{v.(*influxql.StringLiteral)})
return json.Marshal(&StringLiteral{v})
case *influxql.TimeLiteral:
return json.Marshal(&TimeLiteral{v.(*influxql.TimeLiteral)})
return json.Marshal(&TimeLiteral{v})
case *influxql.VarRef:
return json.Marshal(&VarRef{v.(*influxql.VarRef)})
return json.Marshal(&VarRef{v})
case *influxql.Wildcard:
return json.Marshal(&Wildcard{v.(*influxql.Wildcard)})
return json.Marshal(&Wildcard{v})
default:
t := reflect.TypeOf(v)
return nil, fmt.Errorf("error marshaling query: unknown type %s", t)

View File

@ -24,7 +24,7 @@ func Test_MountableRouter_MountsRoutesUnderPrefix(t *testing.T) {
expected := "Hello?! McFly?! Anybody in there?!"
mr.GET("/biff", http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
fmt.Fprintf(rw, expected)
fmt.Fprint(rw, expected)
}))
ts := httptest.NewServer(mr)

15
cmd/influx/debug.go Normal file
View File

@ -0,0 +1,15 @@
package main
import (
"github.com/spf13/cobra"
)
// Debug Command
var debugCmd = &cobra.Command{
Use: "debug",
Short: "commands for debugging InfluxDB",
}
func init() {
debugCmd.AddCommand(initInspectReportTSMCommand()) // Add report-tsm command
}

112
cmd/influx/inspect.go Normal file
View File

@ -0,0 +1,112 @@
package main
import (
"errors"
"fmt"
"os"
"path/filepath"
"github.com/influxdata/influxdb"
"github.com/influxdata/influxdb/internal/fs"
"github.com/influxdata/influxdb/tsdb/tsm1"
"github.com/spf13/cobra"
)
// InspectReportTSMFlags defines the `report-tsm` Command.
type InspectReportTSMFlags struct {
pattern string
exact bool
detailed bool
orgID, bucketID string
dataDir string
}
var inspectReportTSMFlags InspectReportTSMFlags
func initInspectReportTSMCommand() *cobra.Command {
inspectReportTSMCommand := &cobra.Command{
Use: "report-tsm",
Short: "Run a TSM report",
Long: `This command will analyze TSM files within a storage engine
directory, reporting the cardinality within the files as well as the time range that
the point data covers.
This command only interrogates the index within each file, and does not read any
block data. To reduce heap requirements, by default report-tsm estimates the overall
cardinality in the file set by using the HLL++ algorithm. Exact cardinalities can
be determined by using the --exact flag.
For each file, the following is output:
* The full filename;
* The series cardinality within the file;
* The number of series first encountered within the file;
* The minimum and maximum timestamp associated with any TSM data in the file; and
* The time taken to load the TSM index and apply any tombstones.
The summary section then outputs the total time range and series cardinality for
the fileset. Depending on the --detailed flag, series cardinality is segmented
in the following ways:
* Series cardinality for each organization;
* Series cardinality for each bucket;
* Series cardinality for each measurement;
* Number of field keys for each measurement; and
* Number of tag values for each tag key.
`,
RunE: inspectReportTSMF,
}
inspectReportTSMCommand.Flags().StringVarP(&inspectReportTSMFlags.pattern, "pattern", "", "", "only process TSM files containing pattern")
inspectReportTSMCommand.Flags().BoolVarP(&inspectReportTSMFlags.exact, "exact", "", false, "calculate and exact cardinality count. Warning, may use significant memory...")
inspectReportTSMCommand.Flags().BoolVarP(&inspectReportTSMFlags.detailed, "detailed", "", false, "emit series cardinality segmented by measurements, tag keys and fields. Warning, may take a while.")
inspectReportTSMCommand.Flags().StringVarP(&inspectReportTSMFlags.orgID, "org-id", "", "", "process only data belonging to organization ID.")
inspectReportTSMCommand.Flags().StringVarP(&inspectReportTSMFlags.bucketID, "bucket-id", "", "", "process only data belonging to bucket ID. Requires org flag to be set.")
dir, err := fs.InfluxDir()
if err != nil {
panic(err)
}
inspectReportTSMCommand.Flags().StringVarP(&inspectReportTSMFlags.dataDir, "data-dir", "", "", fmt.Sprintf("use provided data directory (defaults to %s).", filepath.Join(dir, "engine/data")))
return inspectReportTSMCommand
}
// inspectReportTSMF runs the report-tsm tool.
func inspectReportTSMF(cmd *cobra.Command, args []string) error {
report := &tsm1.Report{
Stderr: os.Stderr,
Stdout: os.Stdout,
Dir: inspectReportTSMFlags.dataDir,
Pattern: inspectReportTSMFlags.pattern,
Detailed: inspectReportTSMFlags.detailed,
Exact: inspectReportTSMFlags.exact,
}
if inspectReportTSMFlags.orgID == "" && inspectReportTSMFlags.bucketID != "" {
return errors.New("org-id must be set for non-empty bucket-id")
}
if inspectReportTSMFlags.orgID != "" {
orgID, err := influxdb.IDFromString(inspectReportTSMFlags.orgID)
if err != nil {
return err
}
report.OrgID = orgID
}
if inspectReportTSMFlags.bucketID != "" {
bucketID, err := influxdb.IDFromString(inspectReportTSMFlags.bucketID)
if err != nil {
return err
}
report.BucketID = bucketID
}
_, err := report.Run(true)
if err != nil {
panic(err)
}
return err
}

View File

@ -65,7 +65,7 @@ func (cmd *Command) Run(args ...string) error {
fs.StringVar(&cmd.retentionFilter, "retention", "", "optional: retention policy")
fs.StringVar(&cmd.shardFilter, "shard", "", "optional: shard id")
fs.Int64Var(&cmd.maxLogFileSize, "max-log-file-size", tsi1.DefaultMaxIndexLogFileSize, "optional: maximum log file size")
fs.Uint64Var(&cmd.maxCacheSize, "max-cache-size", tsm1.DefaultCacheMaxMemorySize, "optional: maximum cache size")
fs.Uint64Var(&cmd.maxCacheSize, "max-cache-size", uint64(tsm1.DefaultCacheMaxMemorySize), "optional: maximum cache size")
fs.IntVar(&cmd.batchSize, "batch-size", defaultBatchSize, "optional: set the size of the batches we write to the index. Setting this can have adverse affects on performance and heap requirements")
fs.BoolVar(&cmd.Verbose, "v", false, "verbose")
fs.SetOutput(cmd.Stdout)
@ -267,7 +267,7 @@ func IndexShard(sfile *tsdb.SeriesFile, indexPath, dataDir, walDir string, maxLo
} else {
log.Info("Building cache from wal files")
cache := tsm1.NewCache(tsm1.DefaultCacheMaxMemorySize)
cache := tsm1.NewCache(uint64(tsm1.DefaultCacheMaxMemorySize))
loader := tsm1.NewCacheLoader(walPaths)
loader.WithLogger(log)
if err := loader.Load(cache); err != nil {

View File

@ -0,0 +1,139 @@
package generate
import (
"context"
"fmt"
"os"
"time"
"github.com/influxdata/influxdb/bolt"
"github.com/influxdata/influxdb/cmd/influxd/internal/profile"
"github.com/influxdata/influxdb/internal/fs"
"github.com/influxdata/influxdb/pkg/data/gen"
"github.com/spf13/cobra"
)
var Command = &cobra.Command{
Use: "generate <schema.toml>",
Short: "Generate time series data sets using TOML schema",
Long: `
This command will generate time series data direct to disk using schema
defined in a TOML file. Use the help-schema subcommand to produce a TOML
file to STDOUT, which includes documentation describing the available options.
NOTES:
* The influxd server should not be running when using the generate tool
as it modifies the index and TSM data.
* This tool is intended for development and testing purposes only and
SHOULD NOT be run on a production server.
`,
Args: cobra.ExactArgs(1),
RunE: generateFE,
}
var flags struct {
printOnly bool
storageSpec StorageSpec
profile profile.Config
}
func init() {
Command.Flags().SortFlags = false
pfs := Command.PersistentFlags()
pfs.SortFlags = false
pfs.BoolVar(&flags.printOnly, "print", false, "Print data spec and exit")
flags.storageSpec.AddFlags(Command, pfs)
pfs.StringVar(&flags.profile.CPU, "cpuprofile", "", "Collect a CPU profile")
pfs.StringVar(&flags.profile.Memory, "memprofile", "", "Collect a memory profile")
}
func generateFE(_ *cobra.Command, args []string) error {
storagePlan, err := flags.storageSpec.Plan()
if err != nil {
return err
}
storagePlan.PrintPlan(os.Stdout)
spec, err := gen.NewSpecFromPath(args[0])
if err != nil {
return err
}
if err = assignOrgBucket(spec); err != nil {
return err
}
if flags.printOnly {
return nil
}
return exec(storagePlan, spec)
}
func assignOrgBucket(spec *gen.Spec) error {
boltFile, err := fs.BoltFile()
if err != nil {
return err
}
c := bolt.NewClient()
c.Path = boltFile
if err := c.Open(context.Background()); err != nil {
return err
}
org, err := c.FindOrganizationByName(context.Background(), flags.storageSpec.Organization)
if err != nil {
return err
}
bucket, err := c.FindBucketByName(context.Background(), org.ID, flags.storageSpec.Bucket)
if err != nil {
return err
}
c.Close()
spec.OrgID = org.ID
spec.BucketID = bucket.ID
return nil
}
func exec(storagePlan *StoragePlan, spec *gen.Spec) error {
tr := gen.TimeRange{
Start: storagePlan.StartTime,
End: storagePlan.EndTime,
}
sg := gen.NewSeriesGeneratorFromSpec(spec, tr)
stop := flags.profile.Start()
defer stop()
var files []string
start := time.Now().UTC()
defer func() {
elapsed := time.Since(start)
fmt.Println()
fmt.Println("Generated:")
for _, f := range files {
fmt.Println(f)
}
fmt.Println()
fmt.Printf("Total time: %0.1f seconds\n", elapsed.Seconds())
}()
path, err := fs.InfluxDir()
if err != nil {
return err
}
g := &Generator{Clean: storagePlan.Clean}
files, err = g.Run(context.Background(), path, sg)
return err
}

View File

@ -0,0 +1,187 @@
package generate
import (
"fmt"
"github.com/spf13/cobra"
)
var helpSchemaCommand = &cobra.Command{
Use: "help-schema",
Short: "Print a documented TOML schema to STDOUT",
Run: func(cmd *cobra.Command, args []string) {
fmt.Print(documentedSchema)
},
}
func init() {
Command.AddCommand(helpSchemaCommand)
}
const documentedSchema = `title = "Documented schema"
# limit the maximum number of series generated across all measurements
#
# series-limit: integer, optional (default: unlimited)
[[measurements]]
# name of measurement
#
# NOTE:
# Multiple definitions of the same measurement name are allowed and
# will be merged together.
name = "cpu"
# sample: float; where 0 < sample 1.0 (default: 0.5)
# sample a subset of the tag set
#
# sample 25% of the tags
#
sample = 0.25
# Keys for defining a tag
#
# name: string, required
# Name of field
#
# source: array<string> or object
#
# A literal array of string values defines the tag values.
#
# An object defines more complex generators. The type key determines the
# type of generator.
#
# source types:
#
# type: "sequence"
# generate a sequence of tag values
#
# format: string
# a format string for the values (default: "value%s")
# start: int (default: 0)
# beginning value
# count: int, required
# ending value
#
# type: "file"
# generate a sequence of tag values from a file source.
# The data in the file is sorted, deduplicated and verified is valid UTF-8
#
# path: string
# absolute path or relative path to current toml file
tags = [
# example sequence tag source. The range of values are automatically
# prefixed with 0s
# to ensure correct sort behavior.
{ name = "host", source = { type = "sequence", format = "host-%s", start = 0, count = 5 } },
# tags can also be sourced from a file. The path is relative to the
# schema.toml.
# Each value must be on a new line. The file is also sorted, deduplicated
# and UTF-8 validated.
{ name = "rack", source = { type = "file", path = "files/racks.txt" } },
# Example string array source, which is also deduplicated and sorted
{ name = "region", source = ["us-west-01","us-west-02","us-east"] },
]
# Keys for defining a field
#
# name: string, required
# Name of field
#
# count: int, required
# The maximum number of values to generate. When multiple fields
# have the same count and time-spec, they will share timestamps.
#
# A time-spec can be either time-precision or time-interval, which
# determines how timestamps are generated and may also influence
# the time range and number of values generated.
#
# time-precision: string [ns, us, ms, s, m, h] (default: ms)
# Specifies the precision (rounding) for generated timestamps.
#
# If the precision results in fewer than "count" intervals for the
# given time range the number of values will be reduced.
#
# Example:
# count = 1000, start = 0s, end = 100s, time-precison = s
# 100 values will be generated at [0s, 1s, 2s, ..., 99s]
#
# If the precision results in greater than "count" intervals for the
# given time range, the interval will be rounded to the nearest multiple of
# time-precision.
#
# Example:
# count = 10, start = 0s, end = 100s, time-precison = s
# 100 values will be generated at [0s, 10s, 20s, ..., 90s]
#
# time-interval: Go duration string (eg 90s, 1h30m)
# Specifies the delta between generated timestamps.
#
# If the delta results in fewer than "count" intervals for the
# given time range the number of values will be reduced.
#
# Example:
# count = 100, start = 0s, end = 100s, time-interval = 10s
# 10 values will be generated at [0s, 10s, 20s, ..., 90s]
#
# If the delta results in greater than "count" intervals for the
# given time range, the start-time will be adjusted to ensure "count" values.
#
# Example:
# count = 20, start = 0s, end = 1000s, time-interval = 10s
# 20 values will be generated at [800s, 810s, ..., 900s, ..., 990s]
#
# source: int, float, boolean, string, array or object
#
# A literal int, float, boolean or string will produce
# a constant value of the same data type.
#
# A literal array of homogeneous values will generate a repeating
# sequence.
#
# An object defines more complex generators. The type key determines the
# type of generator.
#
# source types:
#
# type: "rand<float>"
# generate random float values
# seed: seed to random number generator (default: 0)
# min: minimum value (default: 0.0)
# max: maximum value (default: 1.0)
#
# type: "zipf<integer>"
# generate random integer values using a Zipf distribution
# The generator generates values k [0, imax] such that P(k)
# is proportional to (v + k) ** (-s). Requirements: s > 1 and v 1.
# See https://golang.org/pkg/math/rand/#NewZipf for more information.
#
# seed: seed to random number generator (default: 0)
# s: float > 1 (required)
# v: float 1 (required)
# imax: integer (required)
#
fields = [
# Example constant float
{ name = "system", count = 5000, source = 2.5 },
# Example random floats
{ name = "user", count = 5000, source = { type = "rand<float>", seed = 10, min = 0.0, max = 1.0 } },
]
# Multiple measurements may be defined.
[[measurements]]
name = "mem"
tags = [
{ name = "host", source = { type = "sequence", format = "host-%s", start = 0, count = 5 } },
{ name = "region", source = ["us-west-01","us-west-02","us-east"] },
]
fields = [
# An example of a sequence of integer values
{ name = "free", count = 100, source = [10,15,20,25,30,35,30], time-precision = "ms" },
{ name = "low_mem", count = 100, source = [false,true,true], time-precision = "ms" },
]
`

View File

@ -0,0 +1,85 @@
package generate
import (
"os"
"strings"
"text/template"
"github.com/influxdata/influxdb/pkg/data/gen"
"github.com/spf13/cobra"
)
var simpleCommand = &cobra.Command{
Use: "simple",
Short: "Generate simple data sets using only CLI flags",
RunE: simpleGenerateFE,
}
var simpleFlags struct {
schemaSpec SchemaSpec
}
func init() {
simpleCommand.PersistentFlags().SortFlags = false
simpleCommand.Flags().SortFlags = false
simpleFlags.schemaSpec.AddFlags(simpleCommand, simpleCommand.Flags())
Command.AddCommand(simpleCommand)
}
func simpleGenerateFE(_ *cobra.Command, _ []string) error {
storagePlan, err := flags.storageSpec.Plan()
if err != nil {
return err
}
storagePlan.PrintPlan(os.Stdout)
schemaPlan, err := simpleFlags.schemaSpec.Plan(storagePlan)
if err != nil {
return err
}
schemaPlan.PrintPlan(os.Stdout)
spec := planToSpec(schemaPlan)
if err = assignOrgBucket(spec); err != nil {
return err
}
if flags.printOnly {
return nil
}
return exec(storagePlan, spec)
}
var (
tomlSchema = template.Must(template.New("schema").Parse(`
title = "CLI schema"
[[measurements]]
name = "m0"
sample = 1.0
tags = [
{{- range $i, $e := .Tags }}
{ name = "tag{{$i}}", source = { type = "sequence", format = "value%s", start = 0, count = {{$e}} } },{{ end }}
]
fields = [
{ name = "v0", count = {{ .PointsPerSeries }}, source = 1.0 },
]`))
)
func planToSpec(p *SchemaPlan) *gen.Spec {
var sb strings.Builder
if err := tomlSchema.Execute(&sb, p); err != nil {
panic(err)
}
spec, err := gen.NewSpecFromToml(sb.String())
if err != nil {
panic(err)
}
return spec
}

View File

@ -0,0 +1,161 @@
package generate
import (
"context"
"fmt"
"os"
"path/filepath"
"runtime"
"sync"
"github.com/influxdata/influxdb/cmd/influxd/generate/internal/shard"
"github.com/influxdata/influxdb/kit/errors"
"github.com/influxdata/influxdb/models"
"github.com/influxdata/influxdb/pkg/data/gen"
"github.com/influxdata/influxdb/pkg/limiter"
"github.com/influxdata/influxdb/storage"
"github.com/influxdata/influxdb/tsdb"
"github.com/influxdata/influxdb/tsdb/tsi1"
"github.com/influxdata/influxdb/tsdb/tsm1"
)
type Generator struct {
sfile *tsdb.SeriesFile
// Clean specifies whether to clean any of the data related files
Clean CleanLevel
}
func (g *Generator) Run(ctx context.Context, path string, gen gen.SeriesGenerator) ([]string, error) {
path = filepath.Join(path, "engine")
config := storage.NewConfig()
switch g.Clean {
case CleanLevelTSM:
if err := os.RemoveAll(path); err != nil {
return nil, err
}
case CleanLevelAll:
if err := os.RemoveAll(path); err != nil {
return nil, err
}
}
g.sfile = tsdb.NewSeriesFile(config.GetSeriesFilePath(path))
if err := g.sfile.Open(ctx); err != nil {
return nil, err
}
defer g.sfile.Close()
g.sfile.DisableCompactions()
ti := tsi1.NewIndex(g.sfile, config.Index, tsi1.WithPath(config.GetIndexPath(path)))
if err := ti.Open(ctx); err != nil {
return nil, fmt.Errorf("error opening TSI1 index: %s", err.Error())
}
files, err := g.writeShard(ti, gen, config.GetEnginePath(path))
if err != nil {
return nil, fmt.Errorf("error writing data: %s", err.Error())
}
ti.Compact()
ti.Wait()
if err := ti.Close(); err != nil {
return nil, fmt.Errorf("error compacting TSI1 index: %s", err.Error())
}
var (
wg sync.WaitGroup
errs errors.List
)
parts := g.sfile.Partitions()
wg.Add(len(parts))
ch := make(chan error, len(parts))
limit := limiter.NewFixed(runtime.NumCPU())
for i := range parts {
go func(n int) {
limit.Take()
defer func() {
wg.Done()
limit.Release()
}()
p := parts[n]
c := tsdb.NewSeriesPartitionCompactor()
if _, err := c.Compact(p); err != nil {
ch <- fmt.Errorf("error compacting series partition %d: %s", n, err.Error())
}
}(i)
}
wg.Wait()
close(ch)
for e := range ch {
errs.Append(e)
}
if err := errs.Err(); err != nil {
return nil, err
}
return files, nil
}
// seriesBatchSize specifies the number of series keys passed to the index.
const seriesBatchSize = 1000
func (g *Generator) writeShard(idx *tsi1.Index, sg gen.SeriesGenerator, path string) ([]string, error) {
if err := os.MkdirAll(path, 0777); err != nil {
return nil, err
}
sw, err := shard.NewWriter(path, shard.AutoNumber())
if err != nil {
return nil, err
}
defer sw.Close()
coll := &tsdb.SeriesCollection{
Keys: make([][]byte, 0, seriesBatchSize),
Names: make([][]byte, 0, seriesBatchSize),
Tags: make([]models.Tags, 0, seriesBatchSize),
Types: make([]models.FieldType, 0, seriesBatchSize),
}
for sg.Next() {
seriesKey := sg.Key()
coll.Keys = append(coll.Keys, seriesKey)
coll.Names = append(coll.Names, sg.ID())
coll.Tags = append(coll.Tags, sg.Tags())
coll.Types = append(coll.Types, sg.FieldType())
if coll.Length() == seriesBatchSize {
if err := idx.CreateSeriesListIfNotExists(coll); err != nil {
return nil, err
}
coll.Truncate(0)
}
vg := sg.TimeValuesGenerator()
key := tsm1.SeriesFieldKeyBytes(string(seriesKey), string(sg.Field()))
for vg.Next() {
sw.WriteV(key, vg.Values())
}
if err := sw.Err(); err != nil {
return nil, err
}
}
if coll.Length() > 0 {
if err := idx.CreateSeriesListIfNotExists(coll); err != nil {
return nil, err
}
}
return sw.Files(), nil
}

View File

@ -0,0 +1,191 @@
package shard
import (
"fmt"
"os"
"path/filepath"
"github.com/influxdata/influxdb/pkg/data/gen"
"github.com/influxdata/influxdb/tsdb/tsm1"
)
const (
maxTSMFileSize = uint32(2048 * 1024 * 1024) // 2GB
)
type Writer struct {
tw tsm1.TSMWriter
path string
ext string
files []string
gen, seq int
err error
buf []byte
auto bool
}
type option func(w *Writer)
// Generation specifies the generation number of the tsm files.
func Generation(gen int) option {
return func(w *Writer) {
w.gen = gen
}
}
// Sequence specifies the starting sequence number of the tsm files.
func Sequence(seq int) option {
return func(w *Writer) {
w.seq = seq
}
}
// Temporary configures the writer to create tsm.tmp files.
func Temporary() option {
return func(w *Writer) {
w.ext = tsm1.TSMFileExtension + "." + tsm1.TmpTSMFileExtension
}
}
// AutoNumber will read the existing TSM file names and use generation + 1
func AutoNumber() option {
return func(w *Writer) {
w.auto = true
}
}
func NewWriter(path string, opts ...option) (*Writer, error) {
w := &Writer{path: path, gen: 1, seq: 1, ext: tsm1.TSMFileExtension}
for _, opt := range opts {
opt(w)
}
if w.auto {
err := w.readExisting()
if err != nil {
return nil, err
}
}
w.nextTSM()
if w.err != nil {
return nil, w.err
}
return w, nil
}
func (w *Writer) Write(key []byte, values tsm1.Values) {
if w.err != nil {
return
}
if w.tw.Size() > maxTSMFileSize {
w.closeTSM()
w.nextTSM()
}
if err := w.tw.Write(key, values); err != nil {
if err == tsm1.ErrMaxBlocksExceeded {
w.closeTSM()
w.nextTSM()
} else {
w.err = err
}
}
}
func (w *Writer) WriteV(key []byte, values gen.Values) {
if w.err != nil {
return
}
if w.tw.Size() > maxTSMFileSize {
w.closeTSM()
w.nextTSM()
}
minT, maxT := values.MinTime(), values.MaxTime()
var err error
if w.buf, err = values.Encode(w.buf); err != nil {
w.err = err
return
}
if err := w.tw.WriteBlock(key, minT, maxT, w.buf); err != nil {
if err == tsm1.ErrMaxBlocksExceeded {
w.closeTSM()
w.nextTSM()
} else {
w.err = err
}
}
}
// Close closes the writer.
func (w *Writer) Close() {
if w.tw != nil {
w.closeTSM()
}
}
func (w *Writer) Err() error { return w.err }
// Files returns the full paths of all the files written by the Writer.
func (w *Writer) Files() []string { return w.files }
func (w *Writer) nextTSM() {
fileName := filepath.Join(w.path, fmt.Sprintf("%s.%s", tsm1.DefaultFormatFileName(w.gen, w.seq), w.ext))
w.files = append(w.files, fileName)
w.seq++
if fi, _ := os.Stat(fileName); fi != nil {
w.err = fmt.Errorf("attempted to overwrite an existing TSM file: %q", fileName)
return
}
fd, err := os.OpenFile(fileName, os.O_CREATE|os.O_RDWR, 0666)
if err != nil {
w.err = err
return
}
// Create the writer for the new TSM file.
w.tw, err = tsm1.NewTSMWriter(fd)
if err != nil {
w.err = err
return
}
}
func (w *Writer) closeTSM() {
if err := w.tw.WriteIndex(); err != nil && err != tsm1.ErrNoValues {
w.err = err
}
if err := w.tw.Close(); err != nil && w.err == nil {
w.err = err
}
w.tw = nil
}
func (w *Writer) readExisting() error {
files, err := filepath.Glob(filepath.Join(w.path, fmt.Sprintf("*.%s", tsm1.TSMFileExtension)))
if err != nil {
return err
}
for _, f := range files {
generation, _, err := tsm1.DefaultParseFileName(f)
if err != nil {
return err
}
if generation >= w.gen {
w.gen = generation + 1
}
}
return nil
}

View File

@ -0,0 +1,60 @@
package generate
import (
"fmt"
"io"
"strings"
"text/tabwriter"
"time"
)
type StoragePlan struct {
Organization string
Bucket string
StartTime time.Time
EndTime time.Time
Clean CleanLevel
Path string
}
func (p *StoragePlan) String() string {
sb := new(strings.Builder)
p.PrintPlan(sb)
return sb.String()
}
func (p *StoragePlan) PrintPlan(w io.Writer) {
tw := tabwriter.NewWriter(w, 25, 4, 2, ' ', 0)
fmt.Fprintf(tw, "Organization\t%s\n", p.Organization)
fmt.Fprintf(tw, "Bucket\t%s\n", p.Bucket)
fmt.Fprintf(tw, "Start time\t%s\n", p.StartTime)
fmt.Fprintf(tw, "End time\t%s\n", p.EndTime)
fmt.Fprintf(tw, "Clean data\t%s\n", p.Clean)
tw.Flush()
}
// TimeSpan returns the total duration for which the data set.
func (p *StoragePlan) TimeSpan() time.Duration {
return p.EndTime.Sub(p.StartTime)
}
type SchemaPlan struct {
StoragePlan *StoragePlan
Tags TagCardinalities
PointsPerSeries int
}
func (p *SchemaPlan) String() string {
sb := new(strings.Builder)
p.PrintPlan(sb)
return sb.String()
}
func (p *SchemaPlan) PrintPlan(w io.Writer) {
tw := tabwriter.NewWriter(w, 25, 4, 2, ' ', 0)
fmt.Fprintf(tw, "Tag cardinalities\t%s\n", p.Tags)
fmt.Fprintf(tw, "Points per series\t%d\n", p.PointsPerSeries)
fmt.Fprintf(tw, "Total points\t%d\n", p.Tags.Cardinality()*p.PointsPerSeries)
fmt.Fprintf(tw, "Total series\t%d\n", p.Tags.Cardinality())
_ = tw.Flush()
}

View File

@ -0,0 +1,153 @@
package generate
import (
"fmt"
"strconv"
"strings"
"time"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
)
type CleanLevel int
const (
// CleanLevelNone will not remove any data files.
CleanLevelNone CleanLevel = iota
// CleanLevelTSM will only remove TSM data files.
CleanLevelTSM
// CleanLevelAll will remove all TSM and index data files.
CleanLevelAll
)
func (i CleanLevel) String() string {
switch i {
case CleanLevelNone:
return "none"
case CleanLevelTSM:
return "tsm"
case CleanLevelAll:
return "all"
default:
return strconv.FormatInt(int64(i), 10)
}
}
func (i *CleanLevel) Set(v string) error {
switch v {
case "none":
*i = CleanLevelNone
case "tsm":
*i = CleanLevelTSM
case "all":
*i = CleanLevelAll
default:
return fmt.Errorf("invalid clean level %q, must be none, tsm or all", v)
}
return nil
}
func (i CleanLevel) Type() string {
return "clean-level"
}
type StorageSpec struct {
Organization string
Bucket string
StartTime string
EndTime string
Clean CleanLevel
}
func (a *StorageSpec) AddFlags(cmd *cobra.Command, fs *pflag.FlagSet) {
fs.StringVar(&a.Organization, "org", "", "Name of organization")
cmd.MarkFlagRequired("org")
fs.StringVar(&a.Bucket, "bucket", "", "Name of bucket")
cmd.MarkFlagRequired("bucket")
start := time.Now().UTC().AddDate(0, 0, -7).Truncate(24 * time.Hour)
fs.StringVar(&a.StartTime, "start-time", start.Format(time.RFC3339), "Start time")
fs.StringVar(&a.EndTime, "end-time", start.AddDate(0, 0, 7).Format(time.RFC3339), "End time")
fs.Var(&a.Clean, "clean", "Clean time series data files (none, tsm or all)")
}
func (a *StorageSpec) Plan() (*StoragePlan, error) {
plan := &StoragePlan{
Organization: a.Organization,
Bucket: a.Bucket,
Clean: a.Clean,
}
if a.StartTime != "" {
if t, err := time.Parse(time.RFC3339, a.StartTime); err != nil {
return nil, err
} else {
plan.StartTime = t.UTC()
}
}
if a.EndTime != "" {
if t, err := time.Parse(time.RFC3339, a.EndTime); err != nil {
return nil, err
} else {
plan.EndTime = t.UTC()
}
}
return plan, nil
}
type TagCardinalities []int
func (t TagCardinalities) String() string {
s := make([]string, 0, len(t))
for i := 0; i < len(t); i++ {
s = append(s, strconv.Itoa(t[i]))
}
return fmt.Sprintf("[%s]", strings.Join(s, ","))
}
func (t TagCardinalities) Cardinality() int {
n := 1
for i := range t {
n *= t[i]
}
return n
}
func (t *TagCardinalities) Set(tags string) error {
*t = (*t)[:0]
for _, s := range strings.Split(tags, ",") {
v, err := strconv.Atoi(s)
if err != nil {
return fmt.Errorf("cannot parse tag cardinality: %s", s)
}
*t = append(*t, v)
}
return nil
}
func (t *TagCardinalities) Type() string {
return "tags"
}
type SchemaSpec struct {
Tags TagCardinalities
PointsPerSeries int
}
func (s *SchemaSpec) AddFlags(cmd *cobra.Command, fs *pflag.FlagSet) {
s.Tags = []int{10, 10, 10}
fs.Var(&s.Tags, "t", "Tag cardinality")
fs.IntVar(&s.PointsPerSeries, "p", 100, "Points per series")
}
func (s *SchemaSpec) Plan(sp *StoragePlan) (*SchemaPlan, error) {
return &SchemaPlan{
StoragePlan: sp,
Tags: s.Tags,
PointsPerSeries: s.PointsPerSeries,
}, nil
}

View File

@ -0,0 +1,116 @@
package inspect
import (
"errors"
"fmt"
"os"
"path/filepath"
"github.com/influxdata/influxdb"
"github.com/influxdata/influxdb/internal/fs"
"github.com/influxdata/influxdb/tsdb/tsm1"
"github.com/spf13/cobra"
)
// NewCommand creates the new command.
func NewCommand() *cobra.Command {
base := &cobra.Command{
Use: "inspect",
Short: "Commands for inspecting on-disk database data",
}
reportTSMCommand := &cobra.Command{
Use: "report-tsm",
Short: "Run TSM report",
Long: `
This command will analyze TSM files within a storage engine directory, reporting
the cardinality within the files as well as the time range that the point data
covers.
This command only interrogates the index within each file, and does not read any
block data. To reduce heap requirements, by default report-tsm estimates the
overall cardinality in the file set by using the HLL++ algorithm. Exact
cardinalities can be determined by using the --exact flag.
For each file, the following is output:
* The full filename;
* The series cardinality within the file;
* The number of series first encountered within the file;
* The min and max timestamp associated with TSM data in the file; and
* The time taken to load the TSM index and apply any tombstones.
The summary section then outputs the total time range and series cardinality for
the fileset. Depending on the --detailed flag, series cardinality is segmented
in the following ways:
* Series cardinality for each organization;
* Series cardinality for each bucket;
* Series cardinality for each measurement;
* Number of field keys for each measurement; and
* Number of tag values for each tag key.`,
RunE: inspectReportTSMF,
}
reportTSMCommand.Flags().StringVarP(&reportTSMFlags.pattern, "pattern", "", "", "only process TSM files containing pattern")
reportTSMCommand.Flags().BoolVarP(&reportTSMFlags.exact, "exact", "", false, "calculate and exact cardinality count. Warning, may use significant memory...")
reportTSMCommand.Flags().BoolVarP(&reportTSMFlags.detailed, "detailed", "", false, "emit series cardinality segmented by measurements, tag keys and fields. Warning, may take a while.")
reportTSMCommand.Flags().StringVarP(&reportTSMFlags.orgID, "org-id", "", "", "process only data belonging to organization ID.")
reportTSMCommand.Flags().StringVarP(&reportTSMFlags.bucketID, "bucket-id", "", "", "process only data belonging to bucket ID. Requires org flag to be set.")
dir, err := fs.InfluxDir()
if err != nil {
panic(err)
}
dir = filepath.Join(dir, "engine/data")
reportTSMCommand.Flags().StringVarP(&reportTSMFlags.dataDir, "data-dir", "", dir, fmt.Sprintf("use provided data directory (defaults to %s).", dir))
base.AddCommand(reportTSMCommand)
return base
}
// reportTSMFlags defines the `report-tsm` Command.
var reportTSMFlags = struct {
pattern string
exact bool
detailed bool
orgID, bucketID string
dataDir string
}{}
// inspectReportTSMF runs the report-tsm tool.
func inspectReportTSMF(cmd *cobra.Command, args []string) error {
report := &tsm1.Report{
Stderr: os.Stderr,
Stdout: os.Stdout,
Dir: reportTSMFlags.dataDir,
Pattern: reportTSMFlags.pattern,
Detailed: reportTSMFlags.detailed,
Exact: reportTSMFlags.exact,
}
if reportTSMFlags.orgID == "" && reportTSMFlags.bucketID != "" {
return errors.New("org-id must be set for non-empty bucket-id")
}
if reportTSMFlags.orgID != "" {
orgID, err := influxdb.IDFromString(reportTSMFlags.orgID)
if err != nil {
return err
}
report.OrgID = orgID
}
if reportTSMFlags.bucketID != "" {
bucketID, err := influxdb.IDFromString(reportTSMFlags.bucketID)
if err != nil {
return err
}
report.BucketID = bucketID
}
_, err := report.Run(true)
return err
}

View File

@ -0,0 +1,64 @@
package profile
import (
"log"
"os"
"runtime"
"runtime/pprof"
)
type Config struct {
// CPU, if set, specifies the file name of the CPU profile to capture
CPU string
// Memory, if set, specifies the file name of the CPU profile to capture
Memory string
}
func (c *Config) noProfiles() bool {
return c.CPU == "" && c.Memory == ""
}
// Start starts a CPU and / or Memory profile if configured and returns a
// function that should be called to terminate the profiles.
func (c *Config) Start() func() {
if c.noProfiles() {
return func() {}
}
var prof struct {
cpu *os.File
mem *os.File
}
if c.CPU != "" {
f, err := os.Create(c.CPU)
if err != nil {
log.Fatalf("cpuprofile: %v", err)
}
prof.cpu = f
_ = pprof.StartCPUProfile(prof.cpu)
}
if c.Memory != "" {
f, err := os.Create(c.Memory)
if err != nil {
log.Fatalf("memprofile: %v", err)
}
prof.mem = f
runtime.MemProfileRate = 4096
}
return func() {
if prof.cpu != nil {
pprof.StopCPUProfile()
_ = prof.cpu.Close()
prof.cpu = nil
}
if prof.mem != nil {
_ = pprof.Lookup("heap").WriteTo(prof.mem, 0)
_ = prof.mem.Close()
prof.mem = nil
}
}
}

View File

@ -16,7 +16,7 @@ import (
"github.com/influxdata/flux/execute"
"github.com/influxdata/influxdb/kit/signals"
"github.com/influxdata/influxdb/telemetry"
"github.com/opentracing/opentracing-go"
opentracing "github.com/opentracing/opentracing-go"
"github.com/prometheus/client_golang/prometheus"
"github.com/spf13/cobra"
jaegerconfig "github.com/uber/jaeger-client-go/config"
@ -209,9 +209,10 @@ type Launcher struct {
protosPath string
secretStore string
boltClient *bolt.Client
kvService *kv.Service
engine *storage.Engine
boltClient *bolt.Client
kvService *kv.Service
engine *storage.Engine
StorageConfig storage.Config
queryController *pcontrol.Controller
@ -236,9 +237,10 @@ type Launcher struct {
// NewLauncher returns a new instance of Launcher connected to standard in/out/err.
func NewLauncher() *Launcher {
return &Launcher{
Stdin: os.Stdin,
Stdout: os.Stdout,
Stderr: os.Stderr,
Stdin: os.Stdin,
Stdout: os.Stdout,
Stderr: os.Stderr,
StorageConfig: storage.NewConfig(),
}
}
@ -490,7 +492,7 @@ func (m *Launcher) run(ctx context.Context) (err error) {
var pointsWriter storage.PointsWriter
{
m.engine = storage.NewEngine(m.enginePath, storage.NewConfig(), storage.WithRetentionEnforcer(bucketSvc))
m.engine = storage.NewEngine(m.enginePath, m.StorageConfig, storage.WithRetentionEnforcer(bucketSvc))
m.engine.WithLogger(m.logger)
if err := m.engine.Open(ctx); err != nil {

View File

@ -4,7 +4,6 @@ import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"io/ioutil"
nethttp "net/http"
@ -13,7 +12,6 @@ import (
"strings"
"testing"
"github.com/google/go-cmp/cmp"
platform "github.com/influxdata/influxdb"
"github.com/influxdata/influxdb/cmd/influxd/launcher"
"github.com/influxdata/influxdb/http"
@ -147,110 +145,6 @@ func TestLauncher_SetupWithUsers(t *testing.T) {
}
}
func TestLauncher_WriteAndQuery(t *testing.T) {
l := RunLauncherOrFail(t, ctx)
l.SetupOrFail(t)
defer l.ShutdownOrFail(t, ctx)
// Execute single write against the server.
resp, err := nethttp.DefaultClient.Do(l.MustNewHTTPRequest("POST", fmt.Sprintf("/api/v2/write?org=%s&bucket=%s", l.Org.ID, l.Bucket.ID), `m,k=v f=100i 946684800000000000`))
if err != nil {
t.Fatal(err)
}
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
t.Fatal(err)
}
if err := resp.Body.Close(); err != nil {
t.Fatal(err)
}
if resp.StatusCode != nethttp.StatusNoContent {
t.Fatalf("unexpected status code: %d, body: %s, headers: %v", resp.StatusCode, body, resp.Header)
}
// Query server to ensure write persists.
qs := `from(bucket:"BUCKET") |> range(start:2000-01-01T00:00:00Z,stop:2000-01-02T00:00:00Z)`
exp := `,result,table,_start,_stop,_time,_value,_measurement,k,_field` + "\r\n" +
`,_result,0,2000-01-01T00:00:00Z,2000-01-02T00:00:00Z,2000-01-01T00:00:00Z,100,m,v,f` + "\r\n\r\n"
buf, err := http.SimpleQuery(l.URL(), qs, l.Org.Name, l.Auth.Token)
if err != nil {
t.Fatalf("unexpected error querying server: %v", err)
}
if diff := cmp.Diff(string(buf), exp); diff != "" {
t.Fatal(diff)
}
}
func TestLauncher_BucketDelete(t *testing.T) {
l := RunLauncherOrFail(t, ctx)
l.SetupOrFail(t)
defer l.ShutdownOrFail(t, ctx)
// Execute single write against the server.
resp, err := nethttp.DefaultClient.Do(l.MustNewHTTPRequest("POST", fmt.Sprintf("/api/v2/write?org=%s&bucket=%s", l.Org.ID, l.Bucket.ID), `m,k=v f=100i 946684800000000000`))
if err != nil {
t.Fatal(err)
}
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
t.Fatal(err)
}
if err := resp.Body.Close(); err != nil {
t.Fatal(err)
}
if resp.StatusCode != nethttp.StatusNoContent {
t.Fatalf("unexpected status code: %d, body: %s, headers: %v", resp.StatusCode, body, resp.Header)
}
// Query server to ensure write persists.
qs := `from(bucket:"BUCKET") |> range(start:2000-01-01T00:00:00Z,stop:2000-01-02T00:00:00Z)`
exp := `,result,table,_start,_stop,_time,_value,_measurement,k,_field` + "\r\n" +
`,_result,0,2000-01-01T00:00:00Z,2000-01-02T00:00:00Z,2000-01-01T00:00:00Z,100,m,v,f` + "\r\n\r\n"
buf, err := http.SimpleQuery(l.URL(), qs, l.Org.Name, l.Auth.Token)
if err != nil {
t.Fatalf("unexpected error querying server: %v", err)
}
if diff := cmp.Diff(string(buf), exp); diff != "" {
t.Fatal(diff)
}
// Verify the cardinality in the engine.
engine := l.Launcher.Engine()
if got, exp := engine.SeriesCardinality(), int64(1); got != exp {
t.Fatalf("got %d, exp %d", got, exp)
}
// Delete the bucket.
if resp, err = nethttp.DefaultClient.Do(l.MustNewHTTPRequest("DELETE", fmt.Sprintf("/api/v2/buckets/%s", l.Bucket.ID), "")); err != nil {
t.Fatal(err)
}
if body, err = ioutil.ReadAll(resp.Body); err != nil {
t.Fatal(err)
}
if err := resp.Body.Close(); err != nil {
t.Fatal(err)
}
if resp.StatusCode != nethttp.StatusNoContent {
t.Fatalf("unexpected status code: %d, body: %s, headers: %v", resp.StatusCode, body, resp.Header)
}
// Verify that the data has been removed from the storage engine.
if got, exp := engine.SeriesCardinality(), int64(0); got != exp {
t.Fatalf("after bucket delete got %d, exp %d", got, exp)
}
}
// Launcher is a test wrapper for launcher.Launcher.
type Launcher struct {
*launcher.Launcher

View File

@ -4,11 +4,15 @@ import (
"fmt"
"io/ioutil"
nethttp "net/http"
"path/filepath"
"testing"
"time"
"github.com/google/go-cmp/cmp"
"github.com/influxdata/influxdb"
"github.com/influxdata/influxdb/http"
"github.com/influxdata/influxdb/toml"
"github.com/influxdata/influxdb/tsdb/tsm1"
)
func TestStorage_WriteAndQuery(t *testing.T) {
@ -48,6 +52,201 @@ func TestStorage_WriteAndQuery(t *testing.T) {
}
}
func TestLauncher_WriteAndQuery(t *testing.T) {
l := RunLauncherOrFail(t, ctx)
l.SetupOrFail(t)
defer l.ShutdownOrFail(t, ctx)
// Execute single write against the server.
resp, err := nethttp.DefaultClient.Do(l.MustNewHTTPRequest("POST", fmt.Sprintf("/api/v2/write?org=%s&bucket=%s", l.Org.ID, l.Bucket.ID), `m,k=v f=100i 946684800000000000`))
if err != nil {
t.Fatal(err)
}
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
t.Fatal(err)
}
if err := resp.Body.Close(); err != nil {
t.Fatal(err)
}
if resp.StatusCode != nethttp.StatusNoContent {
t.Fatalf("unexpected status code: %d, body: %s, headers: %v", resp.StatusCode, body, resp.Header)
}
// Query server to ensure write persists.
qs := `from(bucket:"BUCKET") |> range(start:2000-01-01T00:00:00Z,stop:2000-01-02T00:00:00Z)`
exp := `,result,table,_start,_stop,_time,_value,_measurement,k,_field` + "\r\n" +
`,_result,0,2000-01-01T00:00:00Z,2000-01-02T00:00:00Z,2000-01-01T00:00:00Z,100,m,v,f` + "\r\n\r\n"
buf, err := http.SimpleQuery(l.URL(), qs, l.Org.Name, l.Auth.Token)
if err != nil {
t.Fatalf("unexpected error querying server: %v", err)
}
if diff := cmp.Diff(string(buf), exp); diff != "" {
t.Fatal(diff)
}
}
func TestLauncher_BucketDelete(t *testing.T) {
l := RunLauncherOrFail(t, ctx)
l.SetupOrFail(t)
defer l.ShutdownOrFail(t, ctx)
// Execute single write against the server.
resp, err := nethttp.DefaultClient.Do(l.MustNewHTTPRequest("POST", fmt.Sprintf("/api/v2/write?org=%s&bucket=%s", l.Org.ID, l.Bucket.ID), `m,k=v f=100i 946684800000000000`))
if err != nil {
t.Fatal(err)
}
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
t.Fatal(err)
}
if err := resp.Body.Close(); err != nil {
t.Fatal(err)
}
if resp.StatusCode != nethttp.StatusNoContent {
t.Fatalf("unexpected status code: %d, body: %s, headers: %v", resp.StatusCode, body, resp.Header)
}
// Query server to ensure write persists.
qs := `from(bucket:"BUCKET") |> range(start:2000-01-01T00:00:00Z,stop:2000-01-02T00:00:00Z)`
exp := `,result,table,_start,_stop,_time,_value,_measurement,k,_field` + "\r\n" +
`,_result,0,2000-01-01T00:00:00Z,2000-01-02T00:00:00Z,2000-01-01T00:00:00Z,100,m,v,f` + "\r\n\r\n"
buf, err := http.SimpleQuery(l.URL(), qs, l.Org.Name, l.Auth.Token)
if err != nil {
t.Fatalf("unexpected error querying server: %v", err)
}
if diff := cmp.Diff(string(buf), exp); diff != "" {
t.Fatal(diff)
}
// Verify the cardinality in the engine.
engine := l.Launcher.Engine()
if got, exp := engine.SeriesCardinality(), int64(1); got != exp {
t.Fatalf("got %d, exp %d", got, exp)
}
// Delete the bucket.
if resp, err = nethttp.DefaultClient.Do(l.MustNewHTTPRequest("DELETE", fmt.Sprintf("/api/v2/buckets/%s", l.Bucket.ID), "")); err != nil {
t.Fatal(err)
}
if body, err = ioutil.ReadAll(resp.Body); err != nil {
t.Fatal(err)
}
if err := resp.Body.Close(); err != nil {
t.Fatal(err)
}
if resp.StatusCode != nethttp.StatusNoContent {
t.Fatalf("unexpected status code: %d, body: %s, headers: %v", resp.StatusCode, body, resp.Header)
}
// Verify that the data has been removed from the storage engine.
if got, exp := engine.SeriesCardinality(), int64(0); got != exp {
t.Fatalf("after bucket delete got %d, exp %d", got, exp)
}
}
func TestStorage_CacheSnapshot_Size(t *testing.T) {
l := NewLauncher()
l.StorageConfig.Engine.Cache.SnapshotMemorySize = 10
l.StorageConfig.Engine.Cache.SnapshotAgeDuration = toml.Duration(time.Hour)
defer l.ShutdownOrFail(t, ctx)
if err := l.Run(ctx); err != nil {
t.Fatal(err)
}
l.SetupOrFail(t)
org1 := l.OnBoardOrFail(t, &influxdb.OnboardingRequest{
User: "USER-1",
Password: "PASSWORD-1",
Org: "ORG-01",
Bucket: "BUCKET",
})
// Execute single write against the server.
l.WriteOrFail(t, org1, `m,k=v1 f=100i 946684800000000000`)
l.WriteOrFail(t, org1, `m,k=v2 f=101i 946684800000000000`)
l.WriteOrFail(t, org1, `m,k=v3 f=102i 946684800000000000`)
l.WriteOrFail(t, org1, `m,k=v4 f=103i 946684800000000000`)
l.WriteOrFail(t, org1, `m,k=v5 f=104i 946684800000000000`)
// Wait for cache to snapshot. This should take no longer than one second.
time.Sleep(time.Second * 5)
// Check there is TSM data.
report := tsm1.Report{
Dir: filepath.Join(l.Path, "/engine/data"),
Exact: true,
}
summary, err := report.Run(false)
if err != nil {
t.Fatal(err)
}
// Five series should be in the snapshot
if got, exp := summary.Total, uint64(5); got != exp {
t.Fatalf("got %d series in TSM files, expected %d", got, exp)
}
}
func TestStorage_CacheSnapshot_Age(t *testing.T) {
l := NewLauncher()
l.StorageConfig.Engine.Cache.SnapshotAgeDuration = toml.Duration(time.Second)
defer l.ShutdownOrFail(t, ctx)
if err := l.Run(ctx); err != nil {
t.Fatal(err)
}
l.SetupOrFail(t)
org1 := l.OnBoardOrFail(t, &influxdb.OnboardingRequest{
User: "USER-1",
Password: "PASSWORD-1",
Org: "ORG-01",
Bucket: "BUCKET",
})
// Execute single write against the server.
l.WriteOrFail(t, org1, `m,k=v1 f=100i 946684800000000000`)
l.WriteOrFail(t, org1, `m,k=v2 f=101i 946684800000000000`)
l.WriteOrFail(t, org1, `m,k=v3 f=102i 946684800000000000`)
l.WriteOrFail(t, org1, `m,k=v4 f=102i 946684800000000000`)
l.WriteOrFail(t, org1, `m,k=v5 f=102i 946684800000000000`)
// Wait for cache to snapshot. This should take no longer than one second.
time.Sleep(time.Second * 5)
// Check there is TSM data.
report := tsm1.Report{
Dir: filepath.Join(l.Path, "/engine/data"),
Exact: true,
}
summary, err := report.Run(false)
if err != nil {
t.Fatal(err)
}
// Five series should be in the snapshot
if got, exp := summary.Total, uint64(5); got != exp {
t.Fatalf("got %d series in TSM files, expected %d", got, exp)
}
}
// WriteOrFail attempts a write to the organization and bucket identified by to or fails if there is an error.
func (l *Launcher) WriteOrFail(tb testing.TB, to *influxdb.OnboardingResults, data string) {
tb.Helper()

View File

@ -2,8 +2,10 @@ package launcher_test
import (
"context"
"encoding/json"
"fmt"
nethttp "net/http"
"reflect"
"testing"
"time"
@ -226,4 +228,27 @@ from(bucket:"my_bucket_in") |> range(start:-5m) |> to(bucket:"%s", org:"%s")`, b
if len(logs) < 1 {
t.Fatalf("expected logs for run, got %d", len(logs))
}
// One of the log lines must be query statistics.
// For now, assume it's the first line that begins with "{" (beginning of a JSON object).
// That might change in the future.
var statJSON string
for _, log := range logs {
if len(log.Message) > 0 && log.Message[0] == '{' {
statJSON = log.Message
break
}
}
if statJSON == "" {
t.Fatalf("no stats JSON found in run logs")
}
var stats flux.Statistics
if err := json.Unmarshal([]byte(statJSON), &stats); err != nil {
t.Fatal(err)
}
if reflect.DeepEqual(stats, flux.Statistics{}) {
t.Fatalf("unmarshalled query statistics are zero; they should be non-zero. JSON: %s", statJSON)
}
}

View File

@ -6,6 +6,8 @@ import (
"strings"
"github.com/influxdata/influxdb"
"github.com/influxdata/influxdb/cmd/influxd/generate"
"github.com/influxdata/influxdb/cmd/influxd/inspect"
"github.com/influxdata/influxdb/cmd/influxd/launcher"
_ "github.com/influxdata/influxdb/query/builtin"
_ "github.com/influxdata/influxdb/tsdb/tsi1"
@ -31,7 +33,10 @@ func init() {
viper.AutomaticEnv()
viper.SetEnvKeyReplacer(strings.NewReplacer("-", "_"))
rootCmd.InitDefaultHelpCmd()
rootCmd.AddCommand(launcher.NewCommand())
rootCmd.AddCommand(generate.Command)
rootCmd.AddCommand(inspect.NewCommand())
}
// find determines the default behavior when running influxd.

View File

@ -4,6 +4,9 @@ import (
"context"
)
// ErrDocumentNotFound is the error msg for a missing document.
const ErrDocumentNotFound = "document not found"
// DocumentService is used to create/find instances of document stores.
type DocumentService interface {
CreateDocumentStore(ctx context.Context, name string) (DocumentStore, error)

12
go.mod
View File

@ -105,7 +105,9 @@ require (
github.com/sirupsen/logrus v1.3.0 // indirect
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d // indirect
github.com/smartystreets/goconvey v0.0.0-20181108003508-044398e4856c // indirect
github.com/spf13/cast v1.2.0
github.com/spf13/cobra v0.0.3
github.com/spf13/pflag v1.0.3
github.com/spf13/viper v1.2.1
github.com/tcnksm/go-input v0.0.0-20180404061846-548a7d7a8ee8
github.com/testcontainers/testcontainers-go v0.0.0-20190108154635-47c0da630f72
@ -119,13 +121,13 @@ require (
github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82 // indirect
github.com/yudai/pp v2.0.1+incompatible // indirect
go.uber.org/zap v1.9.1
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9
golang.org/x/net v0.0.0-20181106065722-10aee1819953
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2
golang.org/x/net v0.0.0-20190311183353-d8887717615a
golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f
golang.org/x/sys v0.0.0-20181228144115-9a3f9b0469bb
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c
golang.org/x/tools v0.0.0-20181221154417-3ad2d988d5e2
golang.org/x/tools v0.0.0-20190322203728-c1a832b0ad89
google.golang.org/api v0.0.0-20181021000519-a2651947f503
google.golang.org/genproto v0.0.0-20190108161440-ae2f86662275 // indirect
google.golang.org/grpc v1.17.0
@ -136,7 +138,7 @@ require (
gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce // indirect
gopkg.in/robfig/cron.v2 v2.0.0-20150107220207-be2e0b0deed5
gopkg.in/vmihailenco/msgpack.v2 v2.9.1 // indirect
honnef.co/go/tools v0.0.0-20181108184350-ae8f1f9103cc
honnef.co/go/tools v0.0.0-20190319011948-d116c56a00f3
labix.org/v2/mgo v0.0.0-20140701140051-000000000287 // indirect
launchpad.net/gocheck v0.0.0-20140225173054-000000000087 // indirect
)

12
go.sum
View File

@ -234,6 +234,7 @@ github.com/influxdata/tdigest v0.0.0-20181121200506-bf2b5ad3c0a9 h1:MHTrDWmQpHq/
github.com/influxdata/tdigest v0.0.0-20181121200506-bf2b5ad3c0a9/go.mod h1:Js0mqiSBE6Ffsg94weZZ2c+v/ciT8QRHFOap7EKDrR0=
github.com/influxdata/usage-client v0.0.0-20160829180054-6d3895376368 h1:+TUUmaFa4YD1Q+7bH9o5NCHQGPMqZCYJiNW6lIIS9z4=
github.com/influxdata/usage-client v0.0.0-20160829180054-6d3895376368/go.mod h1:Wbbw6tYNvwa5dlB6304Sd+82Z3f7PmVZHVKU637d4po=
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A=
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo=
github.com/jefferai/jsonx v0.0.0-20160721235117-9cc31c3135ee h1:AQ/QmCk6x8ECPpf2pkPtA4lyncEEBbs8VFnVXPYKhIs=
github.com/jefferai/jsonx v0.0.0-20160721235117-9cc31c3135ee/go.mod h1:N0t2vlmpe8nyZB5ouIbJQPDSR+mH6oe7xHB9VZHSUzM=
@ -429,6 +430,8 @@ golang.org/x/crypto v0.0.0-20180505025534-4ec37c66abab/go.mod h1:6SG95UA2DQfeDnf
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9 h1:mKdxBk7AujPs8kU4m80U72y/zjbZ3UcXC7dClwKbUI0=
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20181112044915-a3060d491354 h1:6UAgZ8309zQ9+1iWkHzfszFguqzOdHGyGkd1HmhJ+UE=
golang.org/x/exp v0.0.0-20181112044915-a3060d491354/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
@ -440,6 +443,8 @@ golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73r
golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181106065722-10aee1819953 h1:LuZIitY8waaxUfNIdtajyE/YzA/zyf0YxXG27VpLrkg=
golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4 h1:99CA0JJbUX4ozCnLon680Jc9e0T1i8HCaLVJMwtI8Hc=
golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
@ -455,6 +460,8 @@ golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a h1:1n5lsVfiQW3yfsRGu98756EH1
golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181228144115-9a3f9b0469bb h1:pf3XwC90UUdNPYWZdFjhGBE7DUFuK3Ct1zWmZ65QN30=
golang.org/x/sys v0.0.0-20181228144115-9a3f9b0469bb/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c h1:fqgJT0MGcGpPgpWU7VRdRjuArfcOvC4AoJmILihzhDg=
@ -463,6 +470,8 @@ golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGm
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20181221154417-3ad2d988d5e2 h1:M7NLB69gFpUH4s6SJLwXiVs45aZfVjqGKynfNFKSGcI=
golang.org/x/tools v0.0.0-20181221154417-3ad2d988d5e2/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190322203728-c1a832b0ad89 h1:iWXXYN3edZ3Nd/7I6Rt1sXrWVmhF9bgVtlEJ7BbH124=
golang.org/x/tools v0.0.0-20190322203728-c1a832b0ad89/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
gonum.org/v1/gonum v0.0.0-20181121035319-3f7ecaa7e8ca h1:PupagGYwj8+I4ubCxcmcBRk3VlUWtTg5huQpZR9flmE=
gonum.org/v1/gonum v0.0.0-20181121035319-3f7ecaa7e8ca/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo=
gonum.org/v1/netlib v0.0.0-20181029234149-ec6d1f5cefe6 h1:4WsZyVtkthqrHTbDCJfiTs8IWNYE4uvsSDgaV6xpp+o=
@ -503,6 +512,7 @@ gopkg.in/robfig/cron.v2 v2.0.0-20150107220207-be2e0b0deed5 h1:E846t8CnR+lv5nE+Vu
gopkg.in/robfig/cron.v2 v2.0.0-20150107220207-be2e0b0deed5/go.mod h1:hiOFpYm0ZJbusNj2ywpbrXowU3G8U6GIQzqn2mw1UIE=
gopkg.in/src-d/go-billy.v4 v4.2.1/go.mod h1:tm33zBoOwxjYHZIE+OV8bxTWFMJLrconzFMd38aARFk=
gopkg.in/src-d/go-git-fixtures.v3 v3.1.1/go.mod h1:dLBcvytrw/TYZsNTWCnkNF2DSIlzWYqTe3rJR56Ac7g=
gopkg.in/src-d/go-git.v4 v4.8.1 h1:aAyBmkdE1QUUEHcP4YFCGKmsMQRAuRmUcPEQR7lOAa0=
gopkg.in/src-d/go-git.v4 v4.8.1/go.mod h1:Vtut8izDyrM8BUVQnzJ+YvmNcem2J89EmfZYCkLokZk=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
@ -519,6 +529,8 @@ honnef.co/go/tools v0.0.0-20180728063816-88497007e858 h1:wN+eVZ7U+gqdqkec6C6VXR1
honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20181108184350-ae8f1f9103cc h1:VdiEcF0DrrUbDdrLBceS0h7LE60ebD5yRYLLXi0ezIs=
honnef.co/go/tools v0.0.0-20181108184350-ae8f1f9103cc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190319011948-d116c56a00f3 h1:XNmJXNdEHJ6ib8002TXvjYr8cjxBc0mmMoPsNQO4nsM=
honnef.co/go/tools v0.0.0-20190319011948-d116c56a00f3/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
labix.org/v2/mgo v0.0.0-20140701140051-000000000287 h1:L0cnkNl4TfAXzvdrqsYEmxOHOCv2p5I3taaReO8BWFs=
labix.org/v2/mgo v0.0.0-20140701140051-000000000287/go.mod h1:Lg7AYkt1uXJoR9oeSZ3W/8IXLdvOfIITgZnommstyz4=
launchpad.net/gocheck v0.0.0-20140225173054-000000000087 h1:Izowp2XBH6Ya6rv+hqbceQyw/gSGoXfH/UPoTGduL54=

View File

@ -119,8 +119,7 @@ func NewAPIHandler(b *APIBackend) *APIHandler {
sourceBackend := NewSourceBackend(b)
sourceBackend.SourceService = authorizer.NewSourceService(b.SourceService)
sourceBackend.NewBucketService = b.NewBucketService
sourceBackend.NewQueryService = b.NewQueryService
sourceBackend.BucketService = authorizer.NewBucketService(b.BucketService)
h.SourceHandler = NewSourceHandler(sourceBackend)
setupBackend := NewSetupBackend(b)

View File

@ -151,6 +151,13 @@ func decodePostDocumentRequest(ctx context.Context, r *http.Request) (*postDocum
return nil, err
}
if req.Document == nil {
return nil, &influxdb.Error{
Code: influxdb.EInvalid,
Msg: "missing document body",
}
}
params := httprouter.ParamsFromContext(ctx)
req.Namespace = params.ByName("ns")
if req.Namespace == "" {

View File

@ -11,9 +11,9 @@ import (
"github.com/influxdata/flux"
"github.com/influxdata/flux/csv"
"github.com/influxdata/flux/lang"
platform "github.com/influxdata/influxdb"
platformhttp "github.com/influxdata/influxdb/http"
"github.com/influxdata/influxdb/kit/check"
"github.com/influxdata/influxdb/kit/tracing"
"github.com/influxdata/influxdb/query"
"github.com/influxdata/influxdb/query/influxql"
@ -166,3 +166,7 @@ func (s *SourceProxyQueryService) influxQuery(ctx context.Context, w io.Writer,
}
return flux.Statistics{}, nil
}
func (s *SourceProxyQueryService) Check(context.Context) check.Response {
return platformhttp.QueryHealthCheck(s.URL, s.InsecureSkipVerify)
}

View File

@ -78,9 +78,16 @@ func (b postLabelRequest) Validate() error {
Msg: "label requires a name",
}
}
if !b.Label.OrganizationID.Valid() {
return &platform.Error{
Code: platform.EInvalid,
Msg: "label requires a valid orgID",
}
}
return nil
}
// TODO(jm): ensure that the specified org actually exists
func decodePostLabelRequest(ctx context.Context, r *http.Request) (*postLabelRequest, error) {
l := &platform.Label{}
if err := json.NewDecoder(r.Body).Decode(l); err != nil {

View File

@ -282,7 +282,8 @@ func TestService_handlePostLabel(t *testing.T) {
},
args: args{
label: &platform.Label{
Name: "mylabel",
Name: "mylabel",
OrganizationID: platformtesting.MustIDBase16("020f755c3c082008"),
},
},
wants: wants{
@ -295,7 +296,8 @@ func TestService_handlePostLabel(t *testing.T) {
},
"label": {
"id": "020f755c3c082000",
"name": "mylabel"
"name": "mylabel",
"orgID": "020f755c3c082008"
}
}
`,

View File

@ -524,7 +524,7 @@ func (h *OrgHandler) handleDeleteSecrets(w http.ResponseWriter, r *http.Request)
return
}
if err := h.SecretService.DeleteSecret(ctx, req.orgID, req.secrets...); err != nil {
if err := h.SecretService.DeleteSecret(ctx, req.orgID, req.Secrets...); err != nil {
EncodeError(ctx, err, w)
return
}
@ -534,7 +534,7 @@ func (h *OrgHandler) handleDeleteSecrets(w http.ResponseWriter, r *http.Request)
type deleteSecretsRequest struct {
orgID influxdb.ID
secrets []string
Secrets []string `json:"secrets"`
}
func decodeDeleteSecretsRequest(ctx context.Context, r *http.Request) (*deleteSecretsRequest, error) {
@ -552,13 +552,12 @@ func decodeDeleteSecretsRequest(ctx context.Context, r *http.Request) (*deleteSe
if err := i.DecodeFromString(id); err != nil {
return nil, err
}
req.orgID = i
req.secrets = []string{}
req.Secrets = []string{}
if err := json.NewDecoder(r.Body).Decode(&req.secrets); err != nil {
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
return nil, err
}
req.orgID = i
return req, nil
}

View File

@ -291,7 +291,9 @@ func TestSecretService_handleDeleteSecrets(t *testing.T) {
orgBackend.SecretService = tt.fields.SecretService
h := NewOrgHandler(orgBackend)
b, err := json.Marshal(tt.args.secrets)
b, err := json.Marshal(deleteSecretsRequest{
Secrets: tt.args.secrets,
})
if err != nil {
t.Fatalf("failed to marshal secrets: %v", err)
}

View File

@ -12,6 +12,7 @@ import (
"github.com/influxdata/flux"
"github.com/influxdata/flux/iocounter"
influxdbcontext "github.com/influxdata/influxdb/context"
"github.com/influxdata/influxdb/kit/check"
"github.com/influxdata/influxdb/kit/tracing"
"github.com/influxdata/influxdb/query"
"github.com/julienschmidt/httprouter"
@ -123,6 +124,18 @@ type ProxyQueryService struct {
InsecureSkipVerify bool
}
func (s *ProxyQueryService) Check(ctx context.Context) check.Response {
resp := check.Response{Name: "Query Service"}
if err := s.Ping(ctx); err != nil {
resp.Status = check.StatusFail
resp.Message = err.Error()
} else {
resp.Status = check.StatusPass
}
return resp
}
// Ping checks to see if the server is responding to a ping request.
func (s *ProxyQueryService) Ping(ctx context.Context) error {
u, err := newURL(s.Addr, "/ping")
@ -182,8 +195,13 @@ func (s *ProxyQueryService) Query(ctx context.Context, w io.Writer, req *query.P
data := []byte(resp.Trailer.Get(QueryStatsTrailer))
var stats flux.Statistics
if err := json.Unmarshal(data, &stats); err != nil {
return stats, tracing.LogError(span, err)
if len(data) > 0 {
// FIXME(jsternberg): The queryd service always sends these,
// but envoy does not currently return them properly.
// https://github.com/influxdata/idpe/issues/2841
if err := json.Unmarshal(data, &stats); err != nil {
return stats, tracing.LogError(span, err)
}
}
return stats, nil

View File

@ -0,0 +1,81 @@
package http
import (
"bytes"
"context"
"io"
"net/http/httptest"
"testing"
"github.com/google/go-cmp/cmp"
"github.com/influxdata/flux"
"github.com/influxdata/flux/csv"
"github.com/influxdata/flux/lang"
"github.com/influxdata/influxdb"
"github.com/influxdata/influxdb/mock"
"github.com/influxdata/influxdb/query"
"go.uber.org/zap"
)
func TestProxyQueryService_Query(t *testing.T) {
id, err := influxdb.IDFromString("deadbeefbeefdead")
if err != nil {
t.Fatalf("error creating org ID: %v", err)
}
h := NewProxyQueryHandler("test")
h.CompilerMappings = make(flux.CompilerMappings)
h.DialectMappings = make(flux.DialectMappings)
h.Logger = zap.NewNop()
if err := lang.AddCompilerMappings(h.CompilerMappings); err != nil {
t.Fatalf("error adding compiler mappings: %v", err)
}
if err := csv.AddDialectMappings(h.DialectMappings); err != nil {
t.Fatalf("error adding dialect mappings: %v", err)
}
h.ProxyQueryService = &mock.ProxyQueryService{
QueryFn: func(ctx context.Context, w io.Writer, req *query.ProxyRequest) (flux.Statistics, error) {
if _, err := io.WriteString(w, "boo"); err != nil {
return flux.Statistics{}, err
}
return flux.Statistics{
TotalDuration: 777,
}, nil
},
}
ts := httptest.NewServer(h)
defer ts.Close()
svc := ProxyQueryService{
Addr: ts.URL,
}
var w bytes.Buffer
req := query.ProxyRequest{
Request: query.Request{
Authorization: &influxdb.Authorization{
ID: *id,
OrgID: *id,
UserID: *id,
},
OrganizationID: *id,
Compiler: lang.FluxCompiler{
Query: "buckets()",
},
},
Dialect: csv.Dialect{},
}
stats, err := svc.Query(context.Background(), &w, &req)
if err != nil {
t.Fatalf("call to ProxyQueryService.Query failed: %v", err.Error())
}
if w.String() != "boo" {
t.Errorf(`unexpected return: -want/+got: -"boo"/+"%v"`, w.String())
}
if diff := cmp.Diff(flux.Statistics{TotalDuration: 777}, stats); diff != "" {
t.Errorf("Query returned unexpected stats -want/+got: %v", diff)
}
}

View File

@ -17,14 +17,15 @@ import (
"github.com/influxdata/flux/csv"
"github.com/influxdata/flux/iocounter"
"github.com/influxdata/flux/parser"
"github.com/julienschmidt/httprouter"
"github.com/prometheus/client_golang/prometheus"
"go.uber.org/zap"
platform "github.com/influxdata/influxdb"
pcontext "github.com/influxdata/influxdb/context"
"github.com/influxdata/influxdb/kit/check"
"github.com/influxdata/influxdb/kit/tracing"
"github.com/influxdata/influxdb/query"
"github.com/julienschmidt/httprouter"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"go.uber.org/zap"
)
const (
@ -377,6 +378,10 @@ func (s *FluxService) Query(ctx context.Context, w io.Writer, r *query.ProxyRequ
return flux.Statistics{}, nil
}
func (s FluxService) Check(ctx context.Context) check.Response {
return QueryHealthCheck(s.Addr, s.InsecureSkipVerify)
}
var _ query.QueryService = (*FluxQueryService)(nil)
// FluxQueryService implements query.QueryService by making HTTP requests to the /api/v2/query API endpoint.
@ -444,6 +449,10 @@ func (s *FluxQueryService) Query(ctx context.Context, r *query.Request) (flux.Re
return itr, nil
}
func (s FluxQueryService) Check(ctx context.Context) check.Response {
return QueryHealthCheck(s.Addr, s.InsecureSkipVerify)
}
// SimpleQuery runs a flux query with common parameters and returns CSV results.
func SimpleQuery(addr, flux, org, token string) ([]byte, error) {
u, err := newURL(addr, fluxPath)
@ -495,3 +504,44 @@ func SimpleQuery(addr, flux, org, token string) ([]byte, error) {
defer res.Body.Close()
return ioutil.ReadAll(res.Body)
}
func QueryHealthCheck(url string, insecureSkipVerify bool) check.Response {
u, err := newURL(url, "/health")
if err != nil {
return check.Response{
Name: "query health",
Status: check.StatusFail,
Message: errors.Wrap(err, "could not form URL").Error(),
}
}
hc := newClient(u.Scheme, insecureSkipVerify)
resp, err := hc.Get(u.String())
if err != nil {
return check.Response{
Name: "query health",
Status: check.StatusFail,
Message: errors.Wrap(err, "error getting response").Error(),
}
}
defer resp.Body.Close()
if resp.StatusCode/100 != 2 {
return check.Response{
Name: "query health",
Status: check.StatusFail,
Message: fmt.Sprintf("http error %v", resp.StatusCode),
}
}
var healthResponse check.Response
if err = json.NewDecoder(resp.Body).Decode(&healthResponse); err != nil {
return check.Response{
Name: "query health",
Status: check.StatusFail,
Message: errors.Wrap(err, "error decoding JSON response").Error(),
}
}
return healthResponse
}

View File

@ -16,6 +16,7 @@ import (
"github.com/influxdata/flux/csv"
"github.com/influxdata/flux/lang"
platform "github.com/influxdata/influxdb"
"github.com/influxdata/influxdb/kit/check"
"github.com/influxdata/influxdb/query"
)
@ -291,6 +292,42 @@ func TestFluxHandler_postFluxSpec(t *testing.T) {
}
}
func TestFluxService_Check(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(HealthHandler))
defer ts.Close()
s := &FluxService{
Addr: ts.URL,
}
got := s.Check(context.Background())
want := check.Response{
Name: "influxdb",
Status: "pass",
Message: "ready for queries and writes",
Checks: check.Responses{},
}
if !cmp.Equal(want, got) {
t.Errorf("unexpected response -want/+got: " + cmp.Diff(want, got))
}
}
func TestFluxQueryService_Check(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(HealthHandler))
defer ts.Close()
s := &FluxQueryService{
Addr: ts.URL,
}
got := s.Check(context.Background())
want := check.Response{
Name: "influxdb",
Status: "pass",
Message: "ready for queries and writes",
Checks: check.Responses{},
}
if !cmp.Equal(want, got) {
t.Errorf("unexpected response -want/+got: " + cmp.Diff(want, got))
}
}
var crlfPattern = regexp.MustCompile(`\r?\n`)
func toCRLF(data string) string {

View File

@ -12,8 +12,8 @@ import (
"github.com/influxdata/flux"
"github.com/influxdata/flux/lang"
platform "github.com/influxdata/influxdb"
"github.com/influxdata/influxdb/kit/check"
"github.com/influxdata/influxdb/kit/tracing"
"github.com/influxdata/influxdb/query"
"github.com/influxdata/influxdb/query/influxql"
@ -117,3 +117,7 @@ func (s *SourceProxyQueryService) queryInfluxQL(ctx context.Context, w io.Writer
return flux.Statistics{}, nil
}
func (s *SourceProxyQueryService) Check(context.Context) check.Response {
return QueryHealthCheck(s.Addr, s.InsecureSkipVerify)
}

View File

@ -79,9 +79,10 @@ func newSourcesResponse(srcs []*platform.Source) *sourcesResponse {
type SourceBackend struct {
Logger *zap.Logger
SourceService platform.SourceService
NewBucketService func(s *platform.Source) (platform.BucketService, error)
NewQueryService func(s *platform.Source) (query.ProxyQueryService, error)
SourceService platform.SourceService
LabelService platform.LabelService
BucketService platform.BucketService
NewQueryService func(s *platform.Source) (query.ProxyQueryService, error)
}
// NewSourceBackend returns a new instance of SourceBackend.
@ -89,9 +90,10 @@ func NewSourceBackend(b *APIBackend) *SourceBackend {
return &SourceBackend{
Logger: b.Logger.With(zap.String("handler", "source")),
SourceService: b.SourceService,
NewBucketService: b.NewBucketService,
NewQueryService: b.NewQueryService,
SourceService: b.SourceService,
LabelService: b.LabelService,
BucketService: b.BucketService,
NewQueryService: b.NewQueryService,
}
}
@ -100,11 +102,12 @@ type SourceHandler struct {
*httprouter.Router
Logger *zap.Logger
SourceService platform.SourceService
LabelService platform.LabelService
BucketService platform.BucketService
// TODO(desa): this was done so in order to remove an import cycle and to allow
// for http mocking.
NewBucketService func(s *platform.Source) (platform.BucketService, error)
NewQueryService func(s *platform.Source) (query.ProxyQueryService, error)
NewQueryService func(s *platform.Source) (query.ProxyQueryService, error)
}
// NewSourceHandler returns a new instance of SourceHandler.
@ -113,9 +116,10 @@ func NewSourceHandler(b *SourceBackend) *SourceHandler {
Router: NewRouter(),
Logger: b.Logger,
SourceService: b.SourceService,
NewBucketService: b.NewBucketService,
NewQueryService: b.NewQueryService,
SourceService: b.SourceService,
LabelService: b.LabelService,
BucketService: b.BucketService,
NewQueryService: b.NewQueryService,
}
h.HandlerFunc("POST", "/api/v2/sources", h.handlePostSource)
@ -222,24 +226,19 @@ func (h *SourceHandler) handleGetSourcesBuckets(w http.ResponseWriter, r *http.R
return
}
s, err := h.SourceService.FindSourceByID(ctx, req.SourceID)
_, err = h.SourceService.FindSourceByID(ctx, req.getSourceRequest.SourceID)
if err != nil {
EncodeError(ctx, err, w)
return
}
bucketSvc, err := h.NewBucketService(s)
if err != nil {
EncodeError(ctx, err, w)
return
}
bs, _, err := bucketSvc.FindBuckets(ctx, req.filter)
bs, _, err := h.BucketService.FindBuckets(ctx, req.getBucketsRequest.filter)
if err != nil {
EncodeError(ctx, err, w)
return
}
if err := encodeResponse(ctx, w, http.StatusOK, bs); err != nil {
if err := encodeResponse(ctx, w, http.StatusOK, newBucketsResponse(ctx, req.opts, req.filter, bs, h.LabelService)); err != nil {
logEncodingError(h.Logger, r, err)
return
}

View File

@ -254,6 +254,27 @@ paths:
application/json:
schema:
$ref: "#/components/schemas/Error"
delete:
tags:
- Templates
summary: delete a template document
parameters:
- $ref: '#/components/parameters/TraceSpan'
- in: path
name: templateID
schema:
type: string
required: true
description: ID of template
responses:
'204':
description: delete has been accepted
default:
description: unexpected error
content:
application/json:
schema:
$ref: "#/components/schemas/Error"
/telegrafs:
get:
tags:
@ -796,19 +817,14 @@ paths:
content:
application/json:
schema:
$ref: "#/components/schemas/Label"
$ref: "#/components/schemas/LabelMapping"
responses:
'200':
description: a list of all labels for a scraper target
content:
application/json:
schema:
type: object
properties:
labels:
$ref: "#/components/schemas/Labels"
links:
$ref: "#/components/schemas/Links"
$ref: "#/components/schemas/LabelsResponse"
default:
description: unexpected error
content:
@ -1638,7 +1654,7 @@ paths:
content:
application/json:
schema:
$ref: "#/components/schemas/Label"
$ref: "#/components/schemas/LabelCreateRequest"
responses:
'201':
description: Added label
@ -3070,12 +3086,7 @@ paths:
content:
application/json:
schema:
type: object
properties:
labels:
$ref: "#/components/schemas/Labels"
links:
$ref: "#/components/schemas/Links"
$ref: "#/components/schemas/LabelsResponse"
default:
description: unexpected error
content:
@ -3576,7 +3587,7 @@ paths:
content:
application/json:
schema:
$ref: "#/components/schemas/SecretKeys"
$ref: "#/components/schemas/SecretKeysResponse"
default:
description: unexpected error
content:
@ -4335,7 +4346,7 @@ paths:
$ref: "#/components/schemas/PasswordResetBody"
responses:
'204':
description: password succesfully updated
description: password successfully updated
default:
description: unsuccessful authentication
content:
@ -5313,6 +5324,12 @@ components:
- tasks
- telegrafs
- users
- variables
- scrapers
- secrets
- labels
- views
- documents
id:
type: string
nullable: true
@ -5710,7 +5727,7 @@ components:
description: A task repetition schedule in the form '* * * * * *'; parsed from Flux.
type: string
offset:
description: Duration to delay after the schedule, before executing the task; parsed from flux.
description: Duration to delay after the schedule, before executing the task; parsed from flux, if set to zero it will remove this option and use 0 as the default.
type: string
latestCompleted:
description: Timestamp of latest scheduled, completed run, RFC3339.
@ -6611,18 +6628,21 @@ components:
example:
apikey: abc123xyz
SecretKeys:
type: object
properties:
links:
type: object
secrets:
type: array
items:
type: string
SecretKeysResponse:
allOf:
- $ref: "#/components/schemas/SecretKeys"
- type: object
properties:
self:
type: string
org:
type: string
secrets:
type: array
items:
type: string
CreateProtoResourcesRequest:
properties:
orgID:
@ -6876,7 +6896,6 @@ components:
- id
- meta
- content
- labels
DocumentCreate:
type: object
properties:
@ -6918,7 +6937,6 @@ components:
required:
- id
- meta
- labels
Documents:
type: object
properties:
@ -6945,27 +6963,25 @@ components:
organizationID:
type: string
TelegrafRequestPlugin:
type: object
discriminator:
propertyName: "name"
required:
- name
- type
properties:
name:
type: string
example: cpu
type:
type: string
enum:
- input
- output
oneOf:
- $ref: '#/components/schemas/TelegrafPluginInputCpu'
- $ref: '#/components/schemas/TelegrafPluginInputDisk'
- $ref: '#/components/schemas/TelegrafPluginInputDiskio'
- $ref: '#/components/schemas/TelegrafPluginInputDocker'
- $ref: '#/components/schemas/TelegrafPluginInputFile'
- $ref: '#/components/schemas/TelegrafPluginInputKubernetes'
- $ref: '#/components/schemas/TelegrafPluginInputLogParser'
- $ref: '#/components/schemas/TelegrafPluginInputProcstat'
- $ref: '#/components/schemas/TelegrafPluginInputPrometheus'
- $ref: '#/components/schemas/TelegrafPluginInputRedis'
- $ref: '#/components/schemas/TelegrafPluginInputSyslog'
- $ref: '#/components/schemas/TelegrafPluginOutputFile'
- $ref: '#/components/schemas/TelegrafPluginOutputInfluxDBV2'
TelegrafPluginInputCpu:
type: object
required:
- name
- type
- config
properties:
name:
type: string
@ -6975,19 +6991,11 @@ components:
enum: ["input"]
comment:
type: string
config:
$ref: "#/components/schemas/TelegrafPluginConfig"
TelegrafPluginInputCpuRequest:
type: object
allOf:
- $ref: "#/components/schemas/TelegrafRequestPlugin"
- $ref: "#/components/schemas/TelegrafPluginInputCpu"
TelegrafPluginInputDisk:
type: object
required:
- name
- type
- config
properties:
name:
type: string
@ -6997,20 +7005,12 @@ components:
enum: ["input"]
comment:
type: string
config:
$ref: "#/components/schemas/TelegrafPluginConfig"
TelegrafPluginInputDiskRequest:
type: object
allOf:
- $ref: "#/components/schemas/TelegrafRequestPlugin"
- $ref: "#/components/schemas/TelegrafPluginInputDisk"
TelegrafPluginInputDiskio:
type:
object
required:
- name
- type
- config
properties:
name:
type: string
@ -7020,13 +7020,6 @@ components:
enum: ["input"]
comment:
type: string
config:
$ref: "#/components/schemas/TelegrafPluginConfig"
TelegrafPluginInputDiskioRequest:
type: object
allOf:
- $ref: "#/components/schemas/TelegrafRequestPlugin"
- $ref: "#/components/schemas/TelegrafPluginInputDiskio"
TelegrafPluginInputDocker:
type:
object
@ -7045,11 +7038,6 @@ components:
type: string
config:
$ref: '#/components/schemas/TelegrafPluginInputDockerConfig'
TelegrafPluginInputDockerRequest:
type: object
allOf:
- $ref: "#/components/schemas/TelegrafRequestPlugin"
- $ref: "#/components/schemas/TelegrafPluginInputDocker"
TelegrafPluginInputFile:
type:
object
@ -7068,18 +7056,12 @@ components:
type: string
config:
$ref: '#/components/schemas/TelegrafPluginInputFileConfig'
TelegrafPluginInputFileRequest:
type: object
allOf:
- $ref: "#/components/schemas/TelegrafRequestPlugin"
- $ref: "#/components/schemas/TelegrafPluginInputFile"
TelegrafPluginInputKernel:
type:
object
required:
- name
- type
- config
properties:
name:
type: string
@ -7089,13 +7071,6 @@ components:
enum: ["input"]
comment:
type: string
config:
$ref: "#/components/schemas/TelegrafPluginConfig"
TelegrafPluginInputKernelRequest:
type: object
allOf:
- $ref: "#/components/schemas/TelegrafRequestPlugin"
- $ref: "#/components/schemas/TelegrafPluginInputKernel"
TelegrafPluginInputKubernetes:
type:
object
@ -7114,11 +7089,6 @@ components:
type: string
config:
$ref: '#/components/schemas/TelegrafPluginInputKubernetesConfig'
TelegrafPluginInputKubernetesRequest:
type: object
allOf:
- $ref: "#/components/schemas/TelegrafRequestPlugin"
- $ref: "#/components/schemas/TelegrafPluginInputKubernetes"
TelegrafPluginInputLogParser:
type:
object
@ -7137,18 +7107,12 @@ components:
type: string
config:
$ref: '#/components/schemas/TelegrafPluginInputLogParserConfig'
TelegrafPluginInputLogParserRequest:
type: object
allOf:
- $ref: "#/components/schemas/TelegrafRequestPlugin"
- $ref: "#/components/schemas/TelegrafPluginInputLogParser"
TelegrafPluginInputMem:
type:
object
required:
- name
- type
- config
properties:
name:
type: string
@ -7158,20 +7122,12 @@ components:
enum: ["input"]
comment:
type: string
config:
$ref: "#/components/schemas/TelegrafPluginConfig"
TelegrafPluginInputMemRequest:
type: object
allOf:
- $ref: "#/components/schemas/TelegrafRequestPlugin"
- $ref: "#/components/schemas/TelegrafPluginInputMem"
TelegrafPluginInputNetResponse:
type:
object
required:
- name
- type
- config
properties:
name:
type: string
@ -7181,20 +7137,12 @@ components:
enum: ["input"]
comment:
type: string
config:
$ref: "#/components/schemas/TelegrafPluginConfig"
TelegrafPluginInputNetResponseRequest:
type: object
allOf:
- $ref: "#/components/schemas/TelegrafRequestPlugin"
- $ref: "#/components/schemas/TelegrafPluginInputNetResponse"
TelegrafPluginInputNet:
type:
object
required:
- name
- type
- config
properties:
name:
type: string
@ -7204,20 +7152,12 @@ components:
enum: ["input"]
comment:
type: string
config:
$ref: "#/components/schemas/TelegrafPluginConfig"
TelegrafPluginInputNetRequest:
type: object
allOf:
- $ref: "#/components/schemas/TelegrafRequestPlugin"
- $ref: "#/components/schemas/TelegrafPluginInputNet"
TelegrafPluginInputNginx:
type:
object
required:
- name
- type
- config
properties:
name:
type: string
@ -7227,20 +7167,12 @@ components:
enum: ["input"]
comment:
type: string
config:
$ref: "#/components/schemas/TelegrafPluginConfig"
TelegrafPluginInputNginxRequest:
type: object
allOf:
- $ref: "#/components/schemas/TelegrafRequestPlugin"
- $ref: "#/components/schemas/TelegrafPluginInputNginx"
TelegrafPluginInputProcesses:
type:
object
required:
- name
- type
- config
properties:
name:
type: string
@ -7250,13 +7182,6 @@ components:
enum: ["input"]
comment:
type: string
config:
$ref: "#/components/schemas/TelegrafPluginConfig"
TelegrafPluginInputProcessesRequest:
type: object
allOf:
- $ref: "#/components/schemas/TelegrafRequestPlugin"
- $ref: "#/components/schemas/TelegrafPluginInputProcesses"
TelegrafPluginInputProcstat:
type:
object
@ -7275,11 +7200,6 @@ components:
type: string
config:
$ref: '#/components/schemas/TelegrafPluginInputProcstatConfig'
TelegrafPluginInputProcstatRequest:
type: object
allOf:
- $ref: "#/components/schemas/TelegrafRequestPlugin"
- $ref: "#/components/schemas/TelegrafPluginInputProcstat"
TelegrafPluginInputPrometheus:
type:
object
@ -7298,11 +7218,6 @@ components:
type: string
config:
$ref: '#/components/schemas/TelegrafPluginInputPrometheusConfig'
TelegrafPluginInputPrometheusRequest:
type: object
allOf:
- $ref: "#/components/schemas/TelegrafRequestPlugin"
- $ref: "#/components/schemas/TelegrafPluginInputPrometheus"
TelegrafPluginInputRedis:
type:
object
@ -7321,11 +7236,6 @@ components:
type: string
config:
$ref: '#/components/schemas/TelegrafPluginInputRedisConfig'
TelegrafPluginInputRedisRequest:
type: object
allOf:
- $ref: "#/components/schemas/TelegrafRequestPlugin"
- $ref: "#/components/schemas/TelegrafPluginInputRedis"
TelegrafPluginInputSyslog:
type:
object
@ -7344,18 +7254,12 @@ components:
type: string
config:
$ref: '#/components/schemas/TelegrafPluginInputSyslogConfig'
TelegrafPluginInputSyslogRequest:
type: object
allOf:
- $ref: "#/components/schemas/TelegrafRequestPlugin"
- $ref: "#/components/schemas/TelegrafPluginInputSyslog"
TelegrafPluginInputSwap:
type:
object
required:
- name
- type
- config
properties:
name:
type: string
@ -7365,20 +7269,12 @@ components:
enum: ["input"]
comment:
type: string
config:
$ref: '#/components/schemas/TelegrafPluginConfig'
TelegrafPluginInputSwapRequest:
type: object
allOf:
- $ref: "#/components/schemas/TelegrafRequestPlugin"
- $ref: "#/components/schemas/TelegrafPluginInputSwap"
TelegrafPluginInputSystem:
type:
object
required:
- name
- type
- config
properties:
name:
type: string
@ -7388,20 +7284,12 @@ components:
enum: ["input"]
comment:
type: string
config:
$ref: '#/components/schemas/TelegrafPluginConfig'
TelegrafPluginInputSystemRequest:
type: object
allOf:
- $ref: "#/components/schemas/TelegrafRequestPlugin"
- $ref: "#/components/schemas/TelegrafPluginInputSystem"
TelegrafPluginInputTail:
type:
object
required:
- name
- type
- config
properties:
name:
type: string
@ -7411,13 +7299,6 @@ components:
enum: ["input"]
comment:
type: string
config:
$ref: '#/components/schemas/TelegrafPluginConfig'
TelegrafPluginInputTailRequest:
type: object
allOf:
- $ref: "#/components/schemas/TelegrafRequestPlugin"
- $ref: "#/components/schemas/TelegrafPluginInputTail"
TelegrafPluginOutputFile:
type:
object
@ -7436,11 +7317,6 @@ components:
type: string
config:
$ref: '#/components/schemas/TelegrafPluginOutputFileConfig'
TelegrafPluginOutputFileRequest:
type: object
allOf:
- $ref: "#/components/schemas/TelegrafRequestPlugin"
- $ref: "#/components/schemas/TelegrafPluginOutputFile"
TelegrafPluginOutputInfluxDBV2:
type:
object
@ -7459,24 +7335,6 @@ components:
type: string
config:
$ref: '#/components/schemas/TelegrafPluginOutputInfluxDBV2Config'
TelegrafPluginOutputInfluxDBV2Request:
type: object
allOf:
- $ref: "#/components/schemas/TelegrafRequestPlugin"
- $ref: "#/components/schemas/TelegrafPluginOutputInfluxDBV2"
TelegrafRequestConfig:
oneOf:
- $ref: '#/components/schemas/TelegrafPluginConfig'
- $ref: '#/components/schemas/TelegrafPluginInputDockerConfig'
- $ref: '#/components/schemas/TelegrafPluginInputFileConfig'
- $ref: '#/components/schemas/TelegrafPluginInputKubernetesConfig'
- $ref: '#/components/schemas/TelegrafPluginInputLogParserConfig'
- $ref: '#/components/schemas/TelegrafPluginInputProcstatConfig'
- $ref: '#/components/schemas/TelegrafPluginInputPrometheusConfig'
- $ref: '#/components/schemas/TelegrafPluginInputRedisConfig'
- $ref: '#/components/schemas/TelegrafPluginInputSyslogConfig'
- $ref: '#/components/schemas/TelegrafPluginOutputFileConfig'
- $ref: '#/components/schemas/TelegrafPluginOutputInfluxDBV2Config'
Telegraf:
type: object
allOf:
@ -7485,8 +7343,10 @@ components:
properties:
id:
type: string
readOnly: true
links:
type: object
readOnly: true
example:
self: "/api/v2/telegrafs/1"
lables: "/api/v2/telegrafs/1/labels"
@ -7502,6 +7362,7 @@ components:
owners:
$ref: "#/components/schemas/Link"
labels:
readOnly: true
$ref: "#/components/schemas/Labels"
Telegrafs:
type: object
@ -7510,8 +7371,6 @@ components:
type: array
items:
$ref: "#/components/schemas/Telegraf"
TelegrafPluginConfig:
type: object
TelegrafPluginInputDockerConfig:
type: object
required:
@ -7693,6 +7552,23 @@ components:
id:
readOnly: true
type: string
orgID:
readOnly: true
type: string
name:
type: string
properties:
type: object
additionalProperties:
type: string
description: Key/Value pairs associated with this label. Keys can be removed by sending an update with an empty value.
example: {"color": "ffb3b3", "description": "this is a description"}
LabelCreateRequest:
type: object
required: [orgID]
properties:
orgID:
type: string
name:
type: string
properties:
@ -7741,7 +7617,6 @@ components:
- ms
- s
- us
- u
- ns
TaskCreateRequest:
type: object

View File

@ -55,11 +55,9 @@ func httpTaskServiceFactory(t *testing.T) (*servicetest.System, context.CancelFu
server.Close()
}()
tsFunc := func() platform.TaskService {
return http.TaskService{
Addr: server.URL,
Token: auth.Token,
}
taskService := http.TaskService{
Addr: server.URL,
Token: auth.Token,
}
cFunc := func() (servicetest.TestCreds, error) {
@ -73,13 +71,11 @@ func httpTaskServiceFactory(t *testing.T) (*servicetest.System, context.CancelFu
}
return &servicetest.System{
S: store,
LR: rrw,
LW: rrw,
I: i,
Ctx: ctx,
TaskServiceFunc: tsFunc,
CredsFunc: cFunc,
TaskControlService: servicetest.TaskControlAdaptor(store, rrw, rrw),
TaskService: taskService,
Ctx: ctx,
I: i,
CredsFunc: cFunc,
}, cancel
}

View File

@ -93,24 +93,13 @@ func (h *UserHandler) putPassword(ctx context.Context, w http.ResponseWriter, r
// handlePutPassword is the HTTP handler for the PUT /api/v2/users/:id/password
func (h *UserHandler) handlePutUserPassword(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
username, err := h.putPassword(ctx, w, r)
if err != nil {
EncodeError(ctx, err, w)
return
}
filter := influxdb.UserFilter{
Name: &username,
}
b, err := h.UserService.FindUser(ctx, filter)
_, err := h.putPassword(ctx, w, r)
if err != nil {
EncodeError(ctx, err, w)
return
}
if err := encodeResponse(ctx, w, http.StatusOK, newUserResponse(b)); err != nil {
EncodeError(ctx, err, w)
return
}
w.WriteHeader(http.StatusNoContent)
}
type passwordResetRequest struct {

59
kit/errors/list.go Normal file
View File

@ -0,0 +1,59 @@
package errors
import (
"errors"
"strings"
)
// List represents a list of errors.
type List struct {
errs []error
err error // cached error
}
// Append adds err to the errors list.
func (l *List) Append(err error) {
l.errs = append(l.errs, err)
l.err = nil
}
// AppendString adds a new error that formats as the given text.
func (l *List) AppendString(text string) {
l.errs = append(l.errs, errors.New(text))
l.err = nil
}
// Clear removes all the previously appended errors from the list.
func (l *List) Clear() {
for i := range l.errs {
l.errs[i] = nil
}
l.errs = l.errs[:0]
l.err = nil
}
// Err returns an error composed of the list of errors, separated by a new line, or nil if no errors
// were appended.
func (l *List) Err() error {
if len(l.errs) == 0 {
return nil
}
if l.err != nil {
switch len(l.errs) {
case 1:
l.err = l.errs[0]
default:
var sb strings.Builder
sb.WriteString(l.errs[0].Error())
for _, err := range l.errs[1:] {
sb.WriteRune('\n')
sb.WriteString(err.Error())
}
l.err = errors.New(sb.String())
}
}
return l.err
}

View File

@ -113,6 +113,10 @@ func (s *DocumentStore) CreateDocument(ctx context.Context, d *influxdb.Document
}
}
if err := s.decorateDocumentWithLabels(ctx, tx, d); err != nil {
return err
}
return nil
})
}
@ -563,16 +567,9 @@ func (s *DocumentStore) FindDocuments(ctx context.Context, opts ...influxdb.Docu
if dd.labels {
for _, doc := range docs {
ls := []*influxdb.Label{}
f := influxdb.LabelMappingFilter{
ResourceID: doc.ID,
ResourceType: influxdb.DocumentsResourceType,
}
if err := s.service.findResourceLabels(ctx, tx, f, &ls); err != nil {
if err := s.decorateDocumentWithLabels(ctx, tx, doc); err != nil {
return err
}
doc.Labels = append(doc.Labels, ls...)
}
}
@ -581,6 +578,13 @@ func (s *DocumentStore) FindDocuments(ctx context.Context, opts ...influxdb.Docu
return nil
})
if IsNotFound(err) {
return nil, &influxdb.Error{
Code: influxdb.ENotFound,
Msg: influxdb.ErrDocumentNotFound,
}
}
if err != nil {
return nil, err
}
@ -722,8 +726,11 @@ func (s *DocumentStore) UpdateDocument(ctx context.Context, d *influxdb.Document
}
}
err := s.service.updateDocument(ctx, tx, s.namespace, d)
if err != nil {
if err := s.service.updateDocument(ctx, tx, s.namespace, d); err != nil {
return err
}
if err := s.decorateDocumentWithLabels(ctx, tx, d); err != nil {
return err
}
@ -740,3 +747,17 @@ func (s *Service) updateDocument(ctx context.Context, tx Tx, ns string, d *influ
return nil
}
func (s *DocumentStore) decorateDocumentWithLabels(ctx context.Context, tx Tx, d *influxdb.Document) error {
ls := []*influxdb.Label{}
f := influxdb.LabelMappingFilter{
ResourceID: d.ID,
ResourceType: influxdb.DocumentsResourceType,
}
if err := s.service.findResourceLabels(ctx, tx, f, &ls); err != nil {
return err
}
d.Labels = append(d.Labels, ls...)
return nil
}

View File

@ -6,6 +6,7 @@ import (
"encoding/json"
"github.com/influxdata/influxdb"
"github.com/influxdata/influxdb/kit/tracing"
)
var (
@ -248,7 +249,15 @@ func (s *Service) CreateLabel(ctx context.Context, l *influxdb.Label) error {
err := s.kv.Update(ctx, func(tx Tx) error {
l.ID = s.IDGenerator.ID()
return s.putLabel(ctx, tx, l)
if err := s.putLabel(ctx, tx, l); err != nil {
return err
}
if err := s.createLabelUserResourceMappings(ctx, tx, l); err != nil {
return err
}
return nil
})
if err != nil {
@ -271,6 +280,36 @@ func (s *Service) PutLabel(ctx context.Context, l *influxdb.Label) error {
})
}
func (s *Service) createLabelUserResourceMappings(ctx context.Context, tx Tx, l *influxdb.Label) error {
span, ctx := tracing.StartSpanFromContext(ctx)
defer span.Finish()
ms, err := s.findUserResourceMappings(ctx, tx, influxdb.UserResourceMappingFilter{
ResourceType: influxdb.OrgsResourceType,
ResourceID: l.OrganizationID,
})
if err != nil {
return &influxdb.Error{
Err: err,
}
}
for _, m := range ms {
if err := s.createUserResourceMapping(ctx, tx, &influxdb.UserResourceMapping{
ResourceType: influxdb.LabelsResourceType,
ResourceID: l.ID,
UserID: m.UserID,
UserType: m.UserType,
}); err != nil {
return &influxdb.Error{
Err: err,
}
}
}
return nil
}
func labelMappingKey(m *influxdb.LabelMapping) ([]byte, error) {
lid, err := m.LabelID.Encode()
if err != nil {
@ -475,5 +514,18 @@ func (s *Service) deleteLabel(ctx context.Context, tx Tx, id influxdb.ID) error
return err
}
return b.Delete(encodedID)
if err := b.Delete(encodedID); err != nil {
return &influxdb.Error{
Err: err,
}
}
if err := s.deleteUserResourceMappings(ctx, tx, influxdb.UserResourceMappingFilter{
ResourceID: id,
ResourceType: influxdb.LabelsResourceType,
}); err != nil {
return err
}
return nil
}

View File

@ -47,9 +47,10 @@ type LabelService interface {
// Label is a tag set on a resource, typically used for filtering on a UI.
type Label struct {
ID ID `json:"id,omitempty"`
Name string `json:"name"`
Properties map[string]string `json:"properties,omitempty"`
ID ID `json:"id,omitempty"`
OrganizationID ID `json:"orgID,omitempty"`
Name string `json:"name"`
Properties map[string]string `json:"properties,omitempty"`
}
// Validate returns an error if the label is invalid.
@ -61,6 +62,13 @@ func (l *Label) Validate() error {
}
}
if !l.OrganizationID.Valid() {
return &Error{
Code: EInvalid,
Msg: "organization ID is required",
}
}
return nil
}
@ -105,7 +113,8 @@ type LabelUpdate struct {
// LabelFilter represents a set of filters that restrict the returned results.
type LabelFilter struct {
Name string
Name string
OrgID *ID
}
// LabelMappingFilter represents a set of filters that restrict the returned results.

View File

@ -3,13 +3,19 @@ package influxdb_test
import (
"testing"
"github.com/influxdata/influxdb"
platform "github.com/influxdata/influxdb"
influxtest "github.com/influxdata/influxdb/testing"
)
const (
orgOneID = "020f755c3c083000"
)
func TestLabelValidate(t *testing.T) {
type fields struct {
ResourceID platform.ID
Name string
Name string
OrgID influxdb.ID
}
tests := []struct {
name string
@ -19,19 +25,30 @@ func TestLabelValidate(t *testing.T) {
{
name: "valid label",
fields: fields{
Name: "iot",
Name: "iot",
OrgID: influxtest.MustIDBase16(orgOneID),
},
},
{
name: "label requires a name",
fields: fields{},
name: "label requires a name",
fields: fields{
OrgID: influxtest.MustIDBase16(orgOneID),
},
wantErr: true,
},
{
name: "label requires an organization ID",
fields: fields{
Name: "iot",
},
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
m := platform.Label{
Name: tt.fields.Name,
Name: tt.fields.Name,
OrganizationID: tt.fields.OrgID,
}
if err := m.Validate(); (err != nil) != tt.wantErr {
t.Errorf("Label.Validate() error = %v, wantErr %v", err, tt.wantErr)

View File

@ -9,9 +9,6 @@ import (
)
const (
// TraceIDKey is the logging context key used for identifying unique traces.
TraceIDKey = "trace_id"
// OperationNameKey is the logging context key used for identifying name of an operation.
OperationNameKey = "op_name"
@ -47,11 +44,6 @@ func nextID() string {
return gen.NextString()
}
// TraceID returns a field for tracking the trace identifier.
func TraceID(id string) zapcore.Field {
return zap.String(TraceIDKey, id)
}
// OperationName returns a field for tracking the name of an operation.
func OperationName(name string) zapcore.Field {
return zap.String(OperationNameKey, name)
@ -98,7 +90,7 @@ func Shard(id uint64) zapcore.Field {
// called when the operation concludes in order to log a corresponding message which
// includes an elapsed time and that the operation has ended.
func NewOperation(log *zap.Logger, msg, name string, fields ...zapcore.Field) (*zap.Logger, func()) {
f := []zapcore.Field{TraceID(nextID()), OperationName(name)}
f := []zapcore.Field{OperationName(name)}
if len(fields) > 0 {
f = append(f, fields...)
}

View File

@ -5,6 +5,7 @@ import (
"io"
"github.com/influxdata/flux"
"github.com/influxdata/influxdb/kit/check"
"github.com/influxdata/influxdb/query"
)
@ -28,3 +29,7 @@ func NewProxyQueryService() *ProxyQueryService {
func (s *ProxyQueryService) Query(ctx context.Context, w io.Writer, req *query.ProxyRequest) (flux.Statistics, error) {
return s.QueryFn(ctx, w, req)
}
func (*ProxyQueryService) Check(ctx context.Context) check.Response {
return check.Response{Name: "Mock Query Service", Status: check.StatusPass}
}

View File

@ -7,91 +7,91 @@
package gen
import (
"github.com/influxdata/influxdb/tsdb/cursors"
"github.com/influxdata/influxdb/tsdb"
"github.com/influxdata/influxdb/tsdb/tsm1"
)
type FloatArray struct {
cursors.FloatArray
type floatArray struct {
tsdb.FloatArray
}
func NewFloatArrayLen(sz int) *FloatArray {
return &FloatArray{
FloatArray: cursors.FloatArray{
func newFloatArrayLen(sz int) *floatArray {
return &floatArray{
FloatArray: tsdb.FloatArray{
Timestamps: make([]int64, sz),
Values: make([]float64, sz),
},
}
}
func (a *FloatArray) Encode(b []byte) ([]byte, error) {
func (a *floatArray) Encode(b []byte) ([]byte, error) {
return tsm1.EncodeFloatArrayBlock(&a.FloatArray, b)
}
type IntegerArray struct {
cursors.IntegerArray
type integerArray struct {
tsdb.IntegerArray
}
func NewIntegerArrayLen(sz int) *IntegerArray {
return &IntegerArray{
IntegerArray: cursors.IntegerArray{
func newIntegerArrayLen(sz int) *integerArray {
return &integerArray{
IntegerArray: tsdb.IntegerArray{
Timestamps: make([]int64, sz),
Values: make([]int64, sz),
},
}
}
func (a *IntegerArray) Encode(b []byte) ([]byte, error) {
func (a *integerArray) Encode(b []byte) ([]byte, error) {
return tsm1.EncodeIntegerArrayBlock(&a.IntegerArray, b)
}
type UnsignedArray struct {
cursors.UnsignedArray
type unsignedArray struct {
tsdb.UnsignedArray
}
func NewUnsignedArrayLen(sz int) *UnsignedArray {
return &UnsignedArray{
UnsignedArray: cursors.UnsignedArray{
func newUnsignedArrayLen(sz int) *unsignedArray {
return &unsignedArray{
UnsignedArray: tsdb.UnsignedArray{
Timestamps: make([]int64, sz),
Values: make([]uint64, sz),
},
}
}
func (a *UnsignedArray) Encode(b []byte) ([]byte, error) {
func (a *unsignedArray) Encode(b []byte) ([]byte, error) {
return tsm1.EncodeUnsignedArrayBlock(&a.UnsignedArray, b)
}
type StringArray struct {
cursors.StringArray
type stringArray struct {
tsdb.StringArray
}
func NewStringArrayLen(sz int) *StringArray {
return &StringArray{
StringArray: cursors.StringArray{
func newStringArrayLen(sz int) *stringArray {
return &stringArray{
StringArray: tsdb.StringArray{
Timestamps: make([]int64, sz),
Values: make([]string, sz),
},
}
}
func (a *StringArray) Encode(b []byte) ([]byte, error) {
func (a *stringArray) Encode(b []byte) ([]byte, error) {
return tsm1.EncodeStringArrayBlock(&a.StringArray, b)
}
type BooleanArray struct {
cursors.BooleanArray
type booleanArray struct {
tsdb.BooleanArray
}
func NewBooleanArrayLen(sz int) *BooleanArray {
return &BooleanArray{
BooleanArray: cursors.BooleanArray{
func newBooleanArrayLen(sz int) *booleanArray {
return &booleanArray{
BooleanArray: tsdb.BooleanArray{
Timestamps: make([]int64, sz),
Values: make([]bool, sz),
},
}
}
func (a *BooleanArray) Encode(b []byte) ([]byte, error) {
func (a *booleanArray) Encode(b []byte) ([]byte, error) {
return tsm1.EncodeBooleanArrayBlock(&a.BooleanArray, b)
}

View File

@ -1,19 +1,20 @@
package gen
import (
"github.com/influxdata/influxdb/tsdb"
"github.com/influxdata/influxdb/tsdb/tsm1"
"github.com/influxdata/influxdb/tsdb/cursors"
)
{{range .}}
{{ $typename := print .Name "Array" }}
{{ $typename := print .name "Array" }}
{{ $tsdbname := print .Name "Array" }}
type {{$typename}} struct {
cursors.{{$typename}}
tsdb.{{$tsdbname}}
}
func New{{$typename}}Len(sz int) *{{$typename}} {
func new{{$tsdbname}}Len(sz int) *{{$typename}} {
return &{{$typename}}{
{{$typename}}: cursors.{{$typename}}{
{{$tsdbname}}: tsdb.{{$tsdbname}}{
Timestamps: make([]int64, sz),
Values: make([]{{.Type}}, sz),
},
@ -21,6 +22,6 @@ func New{{$typename}}Len(sz int) *{{$typename}} {
}
func (a *{{$typename}}) Encode(b []byte) ([]byte, error) {
return tsm1.Encode{{$typename}}Block(&a.{{$typename}}, b)
return tsm1.Encode{{$tsdbname}}Block(&a.{{$tsdbname}}, b)
}
{{end}}
{{end}}

View File

@ -1,3 +1,4 @@
package gen
//go:generate env GO111MODULE=on go run github.com/benbjohnson/tmpl -data=@types.tmpldata arrays.gen.go.tmpl values_constant.gen.go.tmpl
//go:generate tmpl -data=@types.tmpldata arrays.gen.go.tmpl values.gen.go.tmpl values_sequence.gen.go.tmpl
//go:generate stringer -type=precision -trimprefix=precision

View File

@ -0,0 +1,144 @@
package gen
import (
"container/heap"
"math"
"github.com/influxdata/influxdb/models"
)
type mergedSeriesGenerator struct {
heap seriesGeneratorHeap
last constSeries
n int64
first bool
}
func NewMergedSeriesGenerator(s []SeriesGenerator) SeriesGenerator {
if len(s) == 0 {
return nil
} else if len(s) == 1 {
return s[0]
}
msg := &mergedSeriesGenerator{first: true, n: math.MaxInt64}
msg.heap.init(s)
return msg
}
func NewMergedSeriesGeneratorLimit(s []SeriesGenerator, n int64) SeriesGenerator {
if len(s) == 0 {
return nil
}
msg := &mergedSeriesGenerator{first: true, n: n}
msg.heap.init(s)
return msg
}
func (s *mergedSeriesGenerator) Next() bool {
if len(s.heap.items) == 0 {
return false
}
if s.n > 0 {
s.n--
if !s.first {
top := s.heap.items[0]
s.last.CopyFrom(top) // capture last key for duplicate checking
for {
if top.Next() {
if len(s.heap.items) > 1 {
heap.Fix(&s.heap, 0)
}
} else {
heap.Pop(&s.heap)
if len(s.heap.items) == 0 {
return false
}
}
top = s.heap.items[0]
if CompareSeries(&s.last, top) == 0 {
// duplicate key, get next
continue
}
return true
}
}
s.first = false
return true
}
return false
}
func (s *mergedSeriesGenerator) Key() []byte {
return s.heap.items[0].Key()
}
func (s *mergedSeriesGenerator) ID() []byte {
return s.heap.items[0].ID()
}
func (s *mergedSeriesGenerator) Tags() models.Tags {
return s.heap.items[0].Tags()
}
func (s *mergedSeriesGenerator) Field() []byte {
return s.heap.items[0].Field()
}
func (s *mergedSeriesGenerator) FieldType() models.FieldType {
return s.heap.items[0].FieldType()
}
func (s *mergedSeriesGenerator) TimeValuesGenerator() TimeValuesSequence {
return s.heap.items[0].TimeValuesGenerator()
}
type seriesGeneratorHeap struct {
items []SeriesGenerator
}
func (h *seriesGeneratorHeap) init(results []SeriesGenerator) {
if cap(h.items) < len(results) {
h.items = make([]SeriesGenerator, 0, len(results))
} else {
h.items = h.items[:0]
}
for _, rs := range results {
if rs.Next() {
h.items = append(h.items, rs)
}
}
heap.Init(h)
}
func (h *seriesGeneratorHeap) Less(i, j int) bool {
return CompareSeries(h.items[i], h.items[j]) == -1
}
func (h *seriesGeneratorHeap) Len() int {
return len(h.items)
}
func (h *seriesGeneratorHeap) Swap(i, j int) {
h.items[i], h.items[j] = h.items[j], h.items[i]
}
func (h *seriesGeneratorHeap) Push(x interface{}) {
panic("not implemented")
}
func (h *seriesGeneratorHeap) Pop() interface{} {
old := h.items
n := len(old)
item := old[n-1]
old[n-1] = nil
h.items = old[0 : n-1]
return item
}

View File

@ -0,0 +1,228 @@
package gen
import (
"fmt"
"math"
"strings"
"testing"
"time"
"github.com/google/go-cmp/cmp"
"github.com/influxdata/influxdb"
"github.com/influxdata/influxdb/models"
"github.com/influxdata/influxdb/tsdb"
)
var (
org = influxdb.ID(0xff00ff00)
bucket = influxdb.ID(0xcc00cc00)
orgBucketID = tsdb.EncodeName(org, bucket)
)
func sg(m, prefix, field string, counts ...int) SeriesGenerator {
spec := TimeSequenceSpec{Count: 1, Start: time.Unix(0, 0), Delta: time.Second}
ts := NewTimestampSequenceFromSpec(spec)
vs := NewFloatConstantValuesSequence(1)
vg := NewTimeFloatValuesSequence(spec.Count, ts, vs)
return NewSeriesGenerator(orgBucketID, []byte(field), vg, NewTagsValuesSequenceCounts(m, field, prefix, counts))
}
func tags(sb *strings.Builder, m, prefix, f string, vals []int) {
sb.WriteByte(',')
// max tag width
tw := int(math.Ceil(math.Log10(float64(len(vals)))))
tf := fmt.Sprintf("%s%%0%dd=value%%d", prefix, tw)
tvs := make([]string, 0, len(vals)+2)
tvs = append(tvs, fmt.Sprintf("%s=%s", models.MeasurementTagKey, m))
for i := range vals {
tvs = append(tvs, fmt.Sprintf(tf, i, vals[i]))
}
tvs = append(tvs, fmt.Sprintf("%s=%s", models.FieldKeyTagKey, f))
sb.WriteString(strings.Join(tvs, ","))
}
func line(name, prefix, field string, vals ...int) string {
var sb strings.Builder
sb.Write(orgBucketID[:])
tags(&sb, name, prefix, field, vals)
sb.WriteString("#!~#")
sb.WriteString(field)
return sb.String()
}
func seriesGeneratorString(sg SeriesGenerator) []string {
var lines []string
for sg.Next() {
lines = append(lines, fmt.Sprintf("%s#!~#%s", string(sg.Key()), string(sg.Field())))
}
return lines
}
func TestNewMergedSeriesGenerator(t *testing.T) {
tests := []struct {
n string
s []SeriesGenerator
exp []string
}{
{
n: "single",
s: []SeriesGenerator{
sg("cpu", "t", "f0", 2, 1),
},
exp: []string{
line("cpu", "t", "f0", 0, 0),
line("cpu", "t", "f0", 1, 0),
},
},
{
n: "multiple,interleaved",
s: []SeriesGenerator{
sg("cpu", "t", "f0", 2, 1),
sg("cpu", "t", "f1", 2, 1),
},
exp: []string{
line("cpu", "t", "f0", 0, 0),
line("cpu", "t", "f1", 0, 0),
line("cpu", "t", "f0", 1, 0),
line("cpu", "t", "f1", 1, 0),
},
},
{
n: "multiple,sequential",
s: []SeriesGenerator{
sg("cpu", "t", "f0", 2),
sg("cpu", "u", "f0", 2, 1),
},
exp: []string{
line("cpu", "t", "f0", 0),
line("cpu", "t", "f0", 1),
line("cpu", "u", "f0", 0, 0),
line("cpu", "u", "f0", 1, 0),
},
},
{
n: "multiple,sequential",
s: []SeriesGenerator{
sg("m1", "t", "f0", 2, 1),
sg("m0", "t", "f0", 2, 1),
},
exp: []string{
line("m0", "t", "f0", 0, 0),
line("m0", "t", "f0", 1, 0),
line("m1", "t", "f0", 0, 0),
line("m1", "t", "f0", 1, 0),
},
},
{
// ensure duplicates are removed
n: "duplicates",
s: []SeriesGenerator{
sg("cpu", "t", "f0", 2, 1),
sg("cpu", "t", "f0", 2, 1),
},
exp: []string{
line("cpu", "t", "f0", 0, 0),
line("cpu", "t", "f0", 1, 0),
},
},
{
// ensure duplicates are removed, but non-dupes from same SeriesGenerator
// are still included
n: "duplicates,multiple,interleaved",
s: []SeriesGenerator{
sg("cpu", "t", "f0", 2, 1),
sg("cpu", "t", "f1", 2, 1),
sg("cpu", "t", "f0", 2, 1),
sg("cpu", "t", "f1", 3, 1),
},
exp: []string{
line("cpu", "t", "f0", 0, 0),
line("cpu", "t", "f1", 0, 0),
line("cpu", "t", "f0", 1, 0),
line("cpu", "t", "f1", 1, 0),
line("cpu", "t", "f1", 2, 0),
},
},
}
for _, tt := range tests {
t.Run(tt.n, func(t *testing.T) {
sg := NewMergedSeriesGenerator(tt.s)
if got := seriesGeneratorString(sg); !cmp.Equal(got, tt.exp) {
t.Errorf("unpexected -got/+exp\n%s", cmp.Diff(got, tt.exp))
}
})
}
}
func TestNewMergedSeriesGeneratorLimit(t *testing.T) {
tests := []struct {
n string
s []SeriesGenerator
lim int64
exp []string
}{
{
n: "single",
s: []SeriesGenerator{
sg("cpu", "t", "f0", 4, 1),
},
lim: 2,
exp: []string{
line("cpu", "t", "f0", 0, 0),
line("cpu", "t", "f0", 1, 0),
},
},
{
n: "multiple,interleaved",
s: []SeriesGenerator{
sg("cpu", "t", "f0", 2, 1),
sg("cpu", "t", "f1", 2, 1),
},
lim: 3,
exp: []string{
line("cpu", "t", "f0", 0, 0),
line("cpu", "t", "f1", 0, 0),
line("cpu", "t", "f0", 1, 0),
},
},
{
n: "multiple,sequential",
s: []SeriesGenerator{
sg("cpu", "t", "f0", 2),
sg("cpu", "u", "f0", 2, 1),
},
lim: 2,
exp: []string{
line("cpu", "t", "f0", 0),
line("cpu", "t", "f0", 1),
},
},
{
n: "multiple,sequential",
s: []SeriesGenerator{
sg("m1", "t", "f0", 2, 1),
sg("m0", "t", "f0", 2, 1),
},
lim: 4,
exp: []string{
line("m0", "t", "f0", 0, 0),
line("m0", "t", "f0", 1, 0),
line("m1", "t", "f0", 0, 0),
line("m1", "t", "f0", 1, 0),
},
},
}
for _, tt := range tests {
t.Run(tt.n, func(t *testing.T) {
sg := NewMergedSeriesGeneratorLimit(tt.s, tt.lim)
if got := seriesGeneratorString(sg); !cmp.Equal(got, tt.exp) {
t.Errorf("unpexected -got/+exp\n%s", cmp.Diff(got, tt.exp))
}
})
}
}

View File

@ -0,0 +1,16 @@
// Code generated by "stringer -type=precision -trimprefix=precision"; DO NOT EDIT.
package gen
import "strconv"
const _precision_name = "MillisecondNanosecondMicrosecondSecondMinuteHour"
var _precision_index = [...]uint8{0, 11, 21, 32, 38, 44, 48}
func (i precision) String() string {
if i >= precision(len(_precision_index)-1) {
return "precision(" + strconv.FormatInt(int64(i), 10) + ")"
}
return _precision_name[_precision_index[i]:_precision_index[i+1]]
}

257
pkg/data/gen/schema.go Normal file
View File

@ -0,0 +1,257 @@
package gen
import (
"fmt"
)
type Visitor interface {
Visit(node SchemaNode) (w Visitor)
}
type SchemaNode interface {
node()
}
type Schema struct {
Title string
Version string
SeriesLimit *SeriesLimit `toml:"series-limit"`
Measurements Measurements
}
func (*Schema) node() {}
type Measurements []Measurement
func (Measurements) node() {}
type Tags []Tag
func (Tags) node() {}
type Fields []Field
func (Fields) node() {}
type Measurement struct {
Name string
SeriesLimit *SeriesLimit `toml:"series-limit"`
Sample *sample
Tags Tags
Fields Fields
}
func (*Measurement) node() {}
type TagSource interface {
fmt.Stringer
SchemaNode
tagsource()
}
type Tag struct {
Name string
Source TagSource
}
func (*Tag) node() {}
type TagArraySource struct {
Values []string
}
func (*TagArraySource) node() {}
func (*TagArraySource) tagsource() {}
func (s *TagArraySource) String() string {
return fmt.Sprintf("array, source=%#v", s.Values)
}
type TagSequenceSource struct {
Format string
Start int64
Count int64
}
func (*TagSequenceSource) node() {}
func (*TagSequenceSource) tagsource() {}
func (t *TagSequenceSource) String() string {
return fmt.Sprintf("sequence, prefix=%q, range=[%d,%d)", t.Format, t.Start, t.Start+t.Count)
}
type TagFileSource struct {
Path string
}
func (*TagFileSource) node() {}
func (*TagFileSource) tagsource() {}
func (s *TagFileSource) String() string {
return fmt.Sprintf("file, path=%s", s.Path)
}
type FieldSource interface {
fmt.Stringer
SchemaNode
fieldsource()
}
type Field struct {
Name string
Count int64
TimePrecision *precision `toml:"time-precision"` // TimePrecision determines the precision for generated timestamp values
TimeInterval *duration `toml:"time-interval"` // TimeInterval determines the duration between timestamp values
Source FieldSource
}
func (t *Field) TimeSequenceSpec() TimeSequenceSpec {
if t.TimeInterval != nil {
return TimeSequenceSpec{
Count: int(t.Count),
Delta: t.TimeInterval.Duration,
}
}
if t.TimePrecision != nil {
return TimeSequenceSpec{
Count: int(t.Count),
Precision: t.TimePrecision.ToDuration(),
}
}
panic("TimeInterval and TimePrecision are nil")
}
func (*Field) node() {}
type FieldConstantValue struct {
Value interface{}
}
func (*FieldConstantValue) node() {}
func (*FieldConstantValue) fieldsource() {}
func (f *FieldConstantValue) String() string {
return fmt.Sprintf("constant, source=%#v", f.Value)
}
type FieldArraySource struct {
Value interface{}
}
func (*FieldArraySource) node() {}
func (*FieldArraySource) fieldsource() {}
func (f *FieldArraySource) String() string {
return fmt.Sprintf("array, source=%#v", f.Value)
}
type FieldFloatRandomSource struct {
Seed int64
Min, Max float64
}
func (*FieldFloatRandomSource) node() {}
func (*FieldFloatRandomSource) fieldsource() {}
func (f *FieldFloatRandomSource) String() string {
return fmt.Sprintf("rand<float>, seed=%d, min=%f, max=%f", f.Seed, f.Max, f.Max)
}
type FieldIntegerZipfSource struct {
Seed int64
S, V float64
IMAX uint64
}
func (*FieldIntegerZipfSource) node() {}
func (*FieldIntegerZipfSource) fieldsource() {}
func (f *FieldIntegerZipfSource) String() string {
return fmt.Sprintf("rand<float>, seed=%d, s=%f, v=%f, imax=%d", f.Seed, f.S, f.V, f.IMAX)
}
type VisitorFn func(node SchemaNode) bool
func (fn VisitorFn) Visit(node SchemaNode) (w Visitor) {
if fn(node) {
return fn
}
return nil
}
// WalkDown performs a pre-order, depth-first traversal of the graph, calling v for each node.
// Pre-order starts by calling the visitor for the root and each child as it traverses down
// the graph to the leaves.
func WalkDown(v Visitor, node SchemaNode) {
walk(v, node, false)
}
// WalkUp performs a post-order, depth-first traversal of the graph, calling v for each node.
// Post-order starts by calling the visitor for the leaves then each parent as it traverses up
// the graph to the root.
func WalkUp(v Visitor, node SchemaNode) {
walk(v, node, true)
}
func walk(v Visitor, node SchemaNode, up bool) Visitor {
if v == nil {
return nil
}
if !up {
if v = v.Visit(node); v == nil {
return nil
}
}
switch n := node.(type) {
case *Schema:
walk(v, n.Measurements, up)
case Measurements:
v := v
for i := range n {
v = walk(v, &n[i], up)
}
case *Measurement:
v := v
v = walk(v, n.Tags, up)
walk(v, n.Fields, up)
case Fields:
v := v
for i := 0; i < len(n); i++ {
v = walk(v, &n[i], up)
}
case Tags:
v := v
for i := 0; i < len(n); i++ {
v = walk(v, &n[i], up)
}
case *Tag:
walk(v, n.Source, up)
case *TagArraySource, *TagSequenceSource, *TagFileSource:
// nothing to do
case *Field:
walk(v, n.Source, up)
case *FieldConstantValue, *FieldArraySource, *FieldFloatRandomSource, *FieldIntegerZipfSource:
// nothing to do
default:
panic(fmt.Sprintf("schema.Walk: unexpected node type %T", n))
}
if up && v != nil {
v = v.Visit(node)
}
return v
}

View File

@ -20,7 +20,7 @@ type CounterByteSequence struct {
nfmt string
val string
s int
v int
i int
end int
}
@ -33,7 +33,7 @@ func NewCounterByteSequence(format string, start, end int) *CounterByteSequence
format: format,
nfmt: fmt.Sprintf("%%0%dd", int(math.Ceil(math.Log10(float64(end))))),
s: start,
v: start,
i: start,
end: end,
}
s.update()
@ -41,23 +41,56 @@ func NewCounterByteSequence(format string, start, end int) *CounterByteSequence
}
func (s *CounterByteSequence) Next() bool {
s.v++
if s.v >= s.end {
s.v = s.s
s.i++
if s.i >= s.end {
s.i = s.s
}
s.update()
return true
}
func (s *CounterByteSequence) update() {
s.val = fmt.Sprintf(s.format, fmt.Sprintf(s.nfmt, s.v))
s.val = fmt.Sprintf(s.format, fmt.Sprintf(s.nfmt, s.i))
}
func (s *CounterByteSequence) Value() string { return s.val }
func (s *CounterByteSequence) Count() int { return s.end - s.s }
type ConstantStringSequence string
type StringArraySequence struct {
vals []string
c int
i int
}
func (s ConstantStringSequence) Next() bool { return true }
func (s ConstantStringSequence) Value() string { return string(s) }
func (s ConstantStringSequence) Count() int { return 1 }
func NewStringArraySequence(vals []string) *StringArraySequence {
return &StringArraySequence{vals: sortDedupStrings(vals)}
}
func (s *StringArraySequence) Next() bool {
s.i++
if s.i == len(s.vals) {
s.i = 0
}
s.c = s.i
return true
}
func (s *StringArraySequence) Value() string {
return s.vals[s.c]
}
func (s *StringArraySequence) Count() int {
return len(s.vals)
}
type StringConstantSequence struct {
val string
}
func NewStringConstantSequence(val string) *StringConstantSequence {
return &StringConstantSequence{val: val}
}
func (s *StringConstantSequence) Next() bool { return true }
func (s *StringConstantSequence) Value() string { return s.val }
func (s *StringConstantSequence) Count() int { return 1 }

63
pkg/data/gen/series.go Normal file
View File

@ -0,0 +1,63 @@
package gen
import (
"bytes"
)
type seriesKeyField interface {
// Key returns the series key.
// The returned value may be cached.
Key() []byte
// Field returns the name of the field.
// The returned value may be modified by a subsequent call to Next.
Field() []byte
}
type constSeries struct {
key []byte
field []byte
}
func (s *constSeries) Key() []byte { return s.key }
func (s *constSeries) Field() []byte { return s.field }
var nilSeries seriesKeyField = &constSeries{}
// Compare returns an integer comparing two SeriesGenerator instances
// lexicographically.
// The result will be 0 if a==b, -1 if a < b, and +1 if a > b.
// A nil argument is equivalent to an empty SeriesGenerator.
func CompareSeries(a, b seriesKeyField) int {
if a == nil {
a = nilSeries
}
if b == nil {
b = nilSeries
}
switch res := bytes.Compare(a.Key(), b.Key()); res {
case 0:
return bytes.Compare(a.Field(), b.Field())
default:
return res
}
}
func (s *constSeries) CopyFrom(a seriesKeyField) {
key := a.Key()
if cap(s.key) < len(key) {
s.key = make([]byte, len(key))
} else {
s.key = s.key[:len(key)]
}
copy(s.key, key)
field := a.Field()
if cap(s.field) < len(field) {
s.field = make([]byte, len(field))
} else {
s.field = s.field[:len(field)]
}
copy(s.field, field)
}

View File

@ -1,6 +1,9 @@
package gen
import (
"math"
"time"
"github.com/influxdata/influxdb/models"
)
@ -8,9 +11,12 @@ type SeriesGenerator interface {
// Next advances the series generator to the next series key.
Next() bool
// Name returns the name of the measurement.
// The returned value may be modified by a subsequent call to Next.
Name() []byte
// Key returns the series key.
// The returned value may be cached.
Key() []byte
// ID returns the org and bucket identifier for the series.
ID() []byte
// Tags returns the tag set.
// The returned value may be modified by a subsequent call to Next.
@ -20,14 +26,78 @@ type SeriesGenerator interface {
// The returned value may be modified by a subsequent call to Next.
Field() []byte
// ValuesGenerator returns a values sequence for the current series.
ValuesGenerator() ValuesSequence
// FieldType returns the data type for the field.
FieldType() models.FieldType
// TimeValuesGenerator returns a values sequence for the current series.
TimeValuesGenerator() TimeValuesSequence
}
type ValuesSequence interface {
type TimeSequenceSpec struct {
// Count specifies the maximum number of values to generate.
Count int
// Start specifies the starting time for the values.
Start time.Time
// Delta specifies the interval between timestamps.
Delta time.Duration
// Precision specifies the precision of timestamp intervals
Precision time.Duration
}
func (ts TimeSequenceSpec) ForTimeRange(tr TimeRange) TimeSequenceSpec {
// Truncate time range
if ts.Delta > 0 {
tr = tr.Truncate(ts.Delta)
} else {
tr = tr.Truncate(ts.Precision)
}
ts.Start = tr.Start
if ts.Delta > 0 {
intervals := int(tr.End.Sub(tr.Start) / ts.Delta)
if intervals > ts.Count {
// if the number of intervals in the specified time range exceeds
// the maximum count, move the start forward to limit the number of values
ts.Start = tr.End.Add(-time.Duration(ts.Count) * ts.Delta)
} else {
ts.Count = intervals
}
} else {
ts.Delta = tr.End.Sub(tr.Start) / time.Duration(ts.Count)
if ts.Delta < ts.Precision {
// count is too high for the range of time and precision
ts.Count = int(tr.End.Sub(tr.Start) / ts.Precision)
ts.Delta = ts.Precision
} else {
ts.Delta = ts.Delta.Round(ts.Precision)
}
ts.Precision = 0
}
return ts
}
type TimeRange struct {
Start time.Time
End time.Time
}
func (t TimeRange) Truncate(d time.Duration) TimeRange {
return TimeRange{
Start: t.Start.Truncate(d),
End: t.End.Truncate(d),
}
}
type TimeValuesSequence interface {
Reset()
Next() bool
Values() Values
ValueType() models.FieldType
}
type Values interface {
@ -35,3 +105,76 @@ type Values interface {
MaxTime() int64
Encode([]byte) ([]byte, error)
}
type cache struct {
key []byte
tags models.Tags
}
type seriesGenerator struct {
id idType
tags TagsSequence
field []byte
vg TimeValuesSequence
n int64
c cache
}
func NewSeriesGenerator(id idType, field []byte, vg TimeValuesSequence, tags TagsSequence) SeriesGenerator {
return NewSeriesGeneratorLimit(id, field, vg, tags, math.MaxInt64)
}
func NewSeriesGeneratorLimit(id idType, field []byte, vg TimeValuesSequence, tags TagsSequence, n int64) SeriesGenerator {
return &seriesGenerator{
id: id,
field: field,
tags: tags,
vg: vg,
n: n,
}
}
func (g *seriesGenerator) Next() bool {
if g.n > 0 {
g.n--
if g.tags.Next() {
g.c = cache{}
g.vg.Reset()
return true
}
g.n = 0
}
return false
}
func (g *seriesGenerator) Key() []byte {
if len(g.c.key) == 0 {
g.c.key = models.MakeKey(g.id[:], g.tags.Value())
}
return g.c.key
}
func (g *seriesGenerator) ID() []byte {
return g.id[:]
}
func (g *seriesGenerator) Tags() models.Tags {
if len(g.c.tags) == 0 {
g.c.tags = g.tags.Value().Clone()
}
return g.c.tags
}
func (g *seriesGenerator) Field() []byte {
return g.field
}
func (g *seriesGenerator) FieldType() models.FieldType {
return g.vg.ValueType()
}
func (g *seriesGenerator) TimeValuesGenerator() TimeValuesSequence {
return g.vg
}

View File

@ -0,0 +1,161 @@
package gen
import (
"testing"
"time"
"github.com/google/go-cmp/cmp"
)
func TestTimeSequenceSpec_ForTimeRange(t *testing.T) {
secs := func(sec int64) time.Time {
return time.Unix(sec, 0).UTC()
}
tests := []struct {
name string
ts TimeSequenceSpec
tr TimeRange
exp TimeSequenceSpec
}{
{
// this test verifies Count is reduced
// as the range has fewer intervals than Count * Delta
name: "delta/range_fewer",
ts: TimeSequenceSpec{
Count: 100,
Delta: 10 * time.Second,
},
tr: TimeRange{
Start: secs(0),
End: secs(100),
},
exp: TimeSequenceSpec{
Count: 10,
Start: secs(0),
Delta: 10 * time.Second,
},
},
{
// this test verifies Count is not adjusted
// as the range equals Count * Delta
name: "delta/range_equal",
ts: TimeSequenceSpec{
Count: 100,
Delta: 10 * time.Second,
},
tr: TimeRange{
Start: secs(0),
End: secs(1000),
},
exp: TimeSequenceSpec{
Count: 100,
Start: secs(0),
Delta: 10 * time.Second,
},
},
{
// this test verifies the Start is adjusted to
// limit the number of intervals to Count
name: "delta/range_greater",
ts: TimeSequenceSpec{
Count: 100,
Delta: 10 * time.Second,
},
tr: TimeRange{
Start: secs(0),
End: secs(2000),
},
exp: TimeSequenceSpec{
Count: 100,
Start: secs(1000),
Delta: 10 * time.Second,
},
},
{
// this test verifies Count is reduced
// as the time range has fewer intervals than Count * Precision
name: "precision/range_fewer",
ts: TimeSequenceSpec{
Count: 100,
Precision: 10 * time.Second,
},
tr: TimeRange{
Start: secs(0),
End: secs(100),
},
exp: TimeSequenceSpec{
Count: 10,
Start: secs(0),
Delta: 10 * time.Second,
},
},
{
// this test verifies Count is unchanged and Delta is a multiple
// of Precision, given the time range has more intervals
// than Count * Precision
name: "precision/range_greater",
ts: TimeSequenceSpec{
Count: 100,
Precision: 10 * time.Second,
},
tr: TimeRange{
Start: secs(0),
End: secs(2000),
},
exp: TimeSequenceSpec{
Count: 100,
Start: secs(0),
Delta: 20 * time.Second,
},
},
{
// this test verifies Count is unchanged and Delta is equal
// to Precision, given the time range has an equal number of
// intervals as Count * Precision
name: "precision/range_equal",
ts: TimeSequenceSpec{
Count: 100,
Precision: 10 * time.Second,
},
tr: TimeRange{
Start: secs(0),
End: secs(1000),
},
exp: TimeSequenceSpec{
Count: 100,
Start: secs(0),
Delta: 10 * time.Second,
},
},
{
// this test verifies Count is reduced
// as the range has fewer intervals than Count * Delta
name: "start/rounding",
ts: TimeSequenceSpec{
Count: 100,
Delta: 10 * time.Second,
},
tr: TimeRange{
Start: secs(13),
End: secs(110),
},
exp: TimeSequenceSpec{
Count: 10,
Start: secs(10),
Delta: 10 * time.Second,
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := tt.ts.ForTimeRange(tt.tr); !cmp.Equal(got, tt.exp) {
t.Errorf("unexpected value -got/+exp\n%s", cmp.Diff(got, tt.exp))
}
})
}
}

View File

@ -0,0 +1,74 @@
package gen
import (
"testing"
"github.com/google/go-cmp/cmp"
)
func TestCompareSeries(t *testing.T) {
mk := func(k, f string) seriesKeyField {
return &constSeries{key: []byte(k), field: []byte(f)}
}
tests := []struct {
name string
a seriesKeyField
b seriesKeyField
exp int
}{
{
name: "nil a,b",
exp: 0,
},
{
name: "a(nil) < b",
a: nil,
b: mk("cpu,t0=v0", "f0"),
exp: -1,
},
{
name: "a > b(nil)",
a: mk("cpu,t0=v0", "f0"),
b: nil,
exp: 1,
},
{
name: "a = b",
a: mk("cpu,t0=v0", "f0"),
b: mk("cpu,t0=v0", "f0"),
exp: 0,
},
{
name: "a(f0) < b(f1)",
a: mk("cpu,t0=v0", "f0"),
b: mk("cpu,t0=v0", "f1"),
exp: -1,
},
{
name: "a(v0) < b(v1)",
a: mk("cpu,t0=v0", "f0"),
b: mk("cpu,t0=v1", "f0"),
exp: -1,
},
{
name: "a(f1) > b(f0)",
a: mk("cpu,t0=v0", "f1"),
b: mk("cpu,t0=v0", "f0"),
exp: 1,
},
{
name: "a(v1) > b(v0)",
a: mk("cpu,t0=v1", "f0"),
b: mk("cpu,t0=v0", "f0"),
exp: 1,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := CompareSeries(tt.a, tt.b); got != tt.exp {
t.Errorf("unexpected value -got/+exp\n%s", cmp.Diff(got, tt.exp))
}
})
}
}

584
pkg/data/gen/specs.go Normal file
View File

@ -0,0 +1,584 @@
package gen
import (
"bufio"
"fmt"
"math/rand"
"os"
"path"
"path/filepath"
"sort"
"unicode/utf8"
"github.com/BurntSushi/toml"
"github.com/influxdata/influxdb"
"github.com/influxdata/influxdb/models"
"github.com/influxdata/influxdb/tsdb"
"github.com/pkg/errors"
)
type Spec struct {
OrgID influxdb.ID
BucketID influxdb.ID
SeriesLimit *int64
Measurements []MeasurementSpec
}
type idType [influxdb.IDLength]byte
func NewSeriesGeneratorFromSpec(s *Spec, tr TimeRange) SeriesGenerator {
id := tsdb.EncodeName(s.OrgID, s.BucketID)
sg := make([]SeriesGenerator, len(s.Measurements))
for i := range s.Measurements {
sg[i] = newSeriesGeneratorFromMeasurementSpec(id, &s.Measurements[i], tr)
}
if s.SeriesLimit == nil {
return NewMergedSeriesGenerator(sg)
}
return NewMergedSeriesGeneratorLimit(sg, *s.SeriesLimit)
}
type MeasurementSpec struct {
Name string
SeriesLimit *SeriesLimit
TagsSpec *TagsSpec
FieldValuesSpec *FieldValuesSpec
}
func newSeriesGeneratorFromMeasurementSpec(id idType, ms *MeasurementSpec, tr TimeRange) SeriesGenerator {
if ms.SeriesLimit == nil {
return NewSeriesGenerator(
id,
[]byte(ms.FieldValuesSpec.Name),
newTimeValuesSequenceFromFieldValuesSpec(ms.FieldValuesSpec, tr),
newTagsSequenceFromTagsSpec(ms.Name, ms.FieldValuesSpec.Name, ms.TagsSpec))
}
return NewSeriesGeneratorLimit(
id,
[]byte(ms.FieldValuesSpec.Name),
newTimeValuesSequenceFromFieldValuesSpec(ms.FieldValuesSpec, tr),
newTagsSequenceFromTagsSpec(ms.Name, ms.FieldValuesSpec.Name, ms.TagsSpec),
int64(*ms.SeriesLimit))
}
// NewTimeValuesSequenceFn returns a TimeValuesSequence that will generate a
// sequence of values based on the spec.
type NewTimeValuesSequenceFn func(spec TimeSequenceSpec) TimeValuesSequence
type NewTagsValuesSequenceFn func() TagsSequence
type NewCountableSequenceFn func() CountableSequence
type TagsSpec struct {
Tags []*TagValuesSpec
Sample *sample
}
func newTagsSequenceFromTagsSpec(m, f string, ts *TagsSpec) TagsSequence {
var keys []string
var vals []CountableSequence
keys = append(keys, models.MeasurementTagKey)
vals = append(vals, NewStringConstantSequence(m))
for _, spec := range ts.Tags {
keys = append(keys, spec.TagKey)
vals = append(vals, spec.Values())
}
keys = append(keys, models.FieldKeyTagKey)
vals = append(vals, NewStringConstantSequence(f))
var opts []tagsValuesOption
if ts.Sample != nil && *ts.Sample != 1.0 {
opts = append(opts, TagValuesSampleOption(float64(*ts.Sample)))
}
return NewTagsValuesSequenceKeysValues(keys, vals, opts...)
}
type TagValuesSpec struct {
TagKey string
Values NewCountableSequenceFn
}
type FieldValuesSpec struct {
TimeSequenceSpec
Name string
DataType models.FieldType
Values NewTimeValuesSequenceFn
}
func newTimeValuesSequenceFromFieldValuesSpec(fs *FieldValuesSpec, tr TimeRange) TimeValuesSequence {
return fs.Values(fs.TimeSequenceSpec.ForTimeRange(tr))
}
func NewSpecFromToml(s string) (*Spec, error) {
var out Schema
if _, err := toml.Decode(s, &out); err != nil {
return nil, err
}
return NewSpecFromSchema(&out)
}
func NewSpecFromPath(p string) (*Spec, error) {
var err error
p, err = filepath.Abs(p)
if err != nil {
return nil, err
}
var out Schema
if _, err := toml.DecodeFile(p, &out); err != nil {
return nil, err
}
return newSpecFromSchema(&out, schemaDir(path.Dir(p)))
}
func NewSchemaFromPath(path string) (*Schema, error) {
var out Schema
if _, err := toml.DecodeFile(path, &out); err != nil {
return nil, err
}
return &out, nil
}
type schemaToSpecState int
const (
stateOk schemaToSpecState = iota
stateErr
)
type schemaToSpec struct {
schemaDir string
stack []interface{}
state schemaToSpecState
spec *Spec
err error
}
func (s *schemaToSpec) push(v interface{}) {
s.stack = append(s.stack, v)
}
func (s *schemaToSpec) pop() interface{} {
tail := len(s.stack) - 1
v := s.stack[tail]
s.stack[tail] = nil
s.stack = s.stack[:tail]
return v
}
func (s *schemaToSpec) peek() interface{} {
if len(s.stack) == 0 {
return nil
}
return s.stack[len(s.stack)-1]
}
func (s *schemaToSpec) Visit(node SchemaNode) (w Visitor) {
switch s.state {
case stateOk:
if s.visit(node) {
return s
}
s.state = stateErr
case stateErr:
s.visitErr(node)
}
return nil
}
func (s *schemaToSpec) visit(node SchemaNode) bool {
switch n := node.(type) {
case *Schema:
s.spec.Measurements = s.pop().([]MeasurementSpec)
if n.SeriesLimit != nil {
sl := int64(*n.SeriesLimit)
s.spec.SeriesLimit = &sl
}
case Measurements:
// flatten measurements
var mss []MeasurementSpec
for {
if specs, ok := s.peek().([]MeasurementSpec); ok {
s.pop()
mss = append(mss, specs...)
continue
}
break
}
sort.Slice(mss, func(i, j int) bool {
return mss[i].Name < mss[j].Name
})
// validate field types are homogeneous for a single measurement
mg := make(map[string]models.FieldType)
for i := range mss {
spec := &mss[i]
key := spec.Name + "." + spec.FieldValuesSpec.Name
ft := spec.FieldValuesSpec.DataType
if dt, ok := mg[key]; !ok {
mg[key] = ft
} else if dt != ft {
s.err = fmt.Errorf("field %q data-type conflict, found %s and %s",
key,
dt,
ft)
return false
}
}
s.push(mss)
case *Measurement:
if len(n.Name) == 0 {
s.err = errors.New("missing measurement name")
return false
}
fields := s.pop().([]*FieldValuesSpec)
tagsSpec := s.pop().(*TagsSpec)
tagsSpec.Sample = n.Sample
// default: sample 50%
if n.Sample == nil {
s := sample(0.5)
tagsSpec.Sample = &s
}
if *tagsSpec.Sample <= 0.0 || *tagsSpec.Sample > 1.0 {
s.err = errors.New("invalid sample, must be 0 < sample ≤ 1.0")
return false
}
var ms []MeasurementSpec
for _, spec := range fields {
ms = append(ms, MeasurementSpec{
Name: n.Name,
SeriesLimit: n.SeriesLimit,
TagsSpec: tagsSpec,
FieldValuesSpec: spec,
})
}
// NOTE: sort each measurement name + field name to ensure series are produced
// in correct order
sort.Slice(ms, func(i, j int) bool {
return ms[i].FieldValuesSpec.Name < ms[j].FieldValuesSpec.Name
})
s.push(ms)
case Tags:
var ts TagsSpec
for {
if spec, ok := s.peek().(*TagValuesSpec); ok {
s.pop()
ts.Tags = append(ts.Tags, spec)
continue
}
break
}
// Tag keys must be sorted to produce a valid series key sequence
sort.Slice(ts.Tags, func(i, j int) bool {
return ts.Tags[i].TagKey < ts.Tags[j].TagKey
})
for i := 1; i < len(ts.Tags); i++ {
if ts.Tags[i-1].TagKey == ts.Tags[i].TagKey {
s.err = fmt.Errorf("duplicate tag keys %q", ts.Tags[i].TagKey)
return false
}
}
s.push(&ts)
case Fields:
// combine fields
var fs []*FieldValuesSpec
for {
if spec, ok := s.peek().(*FieldValuesSpec); ok {
s.pop()
fs = append(fs, spec)
continue
}
break
}
sort.Slice(fs, func(i, j int) bool {
return fs[i].Name < fs[j].Name
})
for i := 1; i < len(fs); i++ {
if fs[i-1].Name == fs[i].Name {
s.err = fmt.Errorf("duplicate field names %q", fs[i].Name)
return false
}
}
s.push(fs)
case *Field:
fs, ok := s.peek().(*FieldValuesSpec)
if !ok {
panic(fmt.Sprintf("unexpected type %T", fs))
}
fs.TimeSequenceSpec = n.TimeSequenceSpec()
fs.Name = n.Name
case *FieldConstantValue:
var fs FieldValuesSpec
switch v := n.Value.(type) {
case float64:
fs.DataType = models.Float
fs.Values = func(spec TimeSequenceSpec) TimeValuesSequence {
return NewTimeFloatValuesSequence(
spec.Count,
NewTimestampSequenceFromSpec(spec),
NewFloatConstantValuesSequence(v),
)
}
case int64:
fs.DataType = models.Integer
fs.Values = func(spec TimeSequenceSpec) TimeValuesSequence {
return NewTimeIntegerValuesSequence(
spec.Count,
NewTimestampSequenceFromSpec(spec),
NewIntegerConstantValuesSequence(v),
)
}
case string:
fs.DataType = models.String
fs.Values = func(spec TimeSequenceSpec) TimeValuesSequence {
return NewTimeStringValuesSequence(
spec.Count,
NewTimestampSequenceFromSpec(spec),
NewStringConstantValuesSequence(v),
)
}
case bool:
fs.DataType = models.Boolean
fs.Values = func(spec TimeSequenceSpec) TimeValuesSequence {
return NewTimeBooleanValuesSequence(
spec.Count,
NewTimestampSequenceFromSpec(spec),
NewBooleanConstantValuesSequence(v),
)
}
default:
panic(fmt.Sprintf("unexpected type %T", v))
}
s.push(&fs)
case *FieldArraySource:
var fs FieldValuesSpec
switch v := n.Value.(type) {
case []float64:
fs.DataType = models.Float
fs.Values = func(spec TimeSequenceSpec) TimeValuesSequence {
return NewTimeFloatValuesSequence(
spec.Count,
NewTimestampSequenceFromSpec(spec),
NewFloatArrayValuesSequence(v),
)
}
case []int64:
fs.DataType = models.Integer
fs.Values = func(spec TimeSequenceSpec) TimeValuesSequence {
return NewTimeIntegerValuesSequence(
spec.Count,
NewTimestampSequenceFromSpec(spec),
NewIntegerArrayValuesSequence(v),
)
}
case []string:
fs.DataType = models.String
fs.Values = func(spec TimeSequenceSpec) TimeValuesSequence {
return NewTimeStringValuesSequence(
spec.Count,
NewTimestampSequenceFromSpec(spec),
NewStringArrayValuesSequence(v),
)
}
case []bool:
fs.DataType = models.Boolean
fs.Values = func(spec TimeSequenceSpec) TimeValuesSequence {
return NewTimeBooleanValuesSequence(
spec.Count,
NewTimestampSequenceFromSpec(spec),
NewBooleanArrayValuesSequence(v),
)
}
default:
panic(fmt.Sprintf("unexpected type %T", v))
}
s.push(&fs)
case *FieldFloatRandomSource:
var fs FieldValuesSpec
fs.DataType = models.Float
fs.Values = NewTimeValuesSequenceFn(func(spec TimeSequenceSpec) TimeValuesSequence {
return NewTimeFloatValuesSequence(
spec.Count,
NewTimestampSequenceFromSpec(spec),
NewFloatRandomValuesSequence(n.Min, n.Max, rand.New(rand.NewSource(n.Seed))),
)
})
s.push(&fs)
case *FieldIntegerZipfSource:
var fs FieldValuesSpec
fs.DataType = models.Integer
fs.Values = NewTimeValuesSequenceFn(func(spec TimeSequenceSpec) TimeValuesSequence {
return NewTimeIntegerValuesSequence(
spec.Count,
NewTimestampSequenceFromSpec(spec),
NewIntegerZipfValuesSequence(n),
)
})
s.push(&fs)
case *Tag:
s.push(&TagValuesSpec{
TagKey: n.Name,
Values: s.pop().(NewCountableSequenceFn),
})
case *TagSequenceSource:
s.push(NewCountableSequenceFn(func() CountableSequence {
return NewCounterByteSequence(n.Format, int(n.Start), int(n.Start+n.Count))
}))
case *TagFileSource:
p, err := s.resolvePath(n.Path)
if err != nil {
s.err = err
return false
}
lines, err := s.readLines(p)
if err != nil {
s.err = err
return false
}
s.push(NewCountableSequenceFn(func() CountableSequence {
return NewStringArraySequence(lines)
}))
case *TagArraySource:
s.push(NewCountableSequenceFn(func() CountableSequence {
return NewStringArraySequence(n.Values)
}))
case nil:
default:
panic(fmt.Sprintf("unexpected type %T", node))
}
return true
}
func (s *schemaToSpec) visitErr(node SchemaNode) {
switch n := node.(type) {
case *Schema:
s.err = fmt.Errorf("error processing schema: %v", s.err)
case *Measurement:
s.err = fmt.Errorf("measurement %q: %v", n.Name, s.err)
case *Tag:
s.err = fmt.Errorf("tag %q: %v", n.Name, s.err)
case *Field:
s.err = fmt.Errorf("field %q: %v", n.Name, s.err)
}
}
func (s *schemaToSpec) resolvePath(p string) (string, error) {
fullPath := os.ExpandEnv(p)
if !filepath.IsAbs(fullPath) {
fullPath = filepath.Join(s.schemaDir, fullPath)
}
fi, err := os.Stat(fullPath)
if err != nil {
return "", fmt.Errorf("error resolving path %q: %v", p, err)
}
if fi.IsDir() {
return "", fmt.Errorf("path %q is not a file: resolved to %s", p, fullPath)
}
return fullPath, nil
}
func (s *schemaToSpec) readLines(p string) ([]string, error) {
fp, err := s.resolvePath(p)
if err != nil {
return nil, err
}
f, err := os.Open(fp)
if err != nil {
return nil, fmt.Errorf("path error: %v", err)
}
defer f.Close()
scan := bufio.NewScanner(f)
scan.Split(bufio.ScanLines)
n := 0
var lines []string
for scan.Scan() {
if len(scan.Bytes()) == 0 {
// skip empty lines
continue
}
if !utf8.Valid(scan.Bytes()) {
return nil, fmt.Errorf("path %q, invalid UTF-8 on line %d", p, n)
}
lines = append(lines, scan.Text())
}
if scan.Err() != nil {
return nil, scan.Err()
}
return lines, nil
}
type option func(s *schemaToSpec)
func schemaDir(p string) option {
return func(s *schemaToSpec) {
s.schemaDir = p
}
}
func NewSpecFromSchema(root *Schema) (*Spec, error) {
return newSpecFromSchema(root)
}
func newSpecFromSchema(root *Schema, opts ...option) (*Spec, error) {
var spec Spec
vis := &schemaToSpec{spec: &spec}
for _, o := range opts {
o(vis)
}
WalkUp(vis, root)
if vis.err != nil {
return nil, vis.err
}
return &spec, nil
}

220
pkg/data/gen/specs_test.go Normal file
View File

@ -0,0 +1,220 @@
package gen
import (
"testing"
"time"
"github.com/BurntSushi/toml"
"github.com/google/go-cmp/cmp"
"github.com/influxdata/influxdb/models"
)
func countableSequenceFnCmp(a, b NewCountableSequenceFn) bool {
// these aren't comparable
return true
}
func timeValuesSequenceFnCmp(a, b NewTimeValuesSequenceFn) bool {
// these aren't comparable
return true
}
func TestSpecFromSchema(t *testing.T) {
in := `
title = "example schema"
[[measurements]]
name = "m0"
tags = [
{ name = "tag0", source = [ "host1", "host2" ] },
{ name = "tag1", source = [ "process1", "process2" ] },
{ name = "tag2", source = { type = "sequence", format = "value%s", start = 0, count = 100 } }
]
fields = [
{ name = "f0", count = 5000, source = 0.5 },
{ name = "f1", count = 5000, source = 2 },
]
[[measurements]]
name = "m1"
tags = [
{ name = "tag0", source = [ "host1", "host2" ] },
]
fields = [
{ name = "f0", count = 5000, source = 0.5 },
]
`
var out Schema
if _, err := toml.Decode(in, &out); err != nil {
t.Fatalf("unxpected error: %v", err)
}
got, err := NewSpecFromSchema(&out)
if err != nil {
t.Error(err)
}
samples := []sample{0.5}
exp := &Spec{
SeriesLimit: nil,
Measurements: []MeasurementSpec{
{
Name: "m0",
SeriesLimit: nil,
TagsSpec: &TagsSpec{
Tags: []*TagValuesSpec{
{TagKey: "tag0"},
{TagKey: "tag1"},
{TagKey: "tag2"},
},
Sample: &samples[0],
},
FieldValuesSpec: &FieldValuesSpec{
TimeSequenceSpec: TimeSequenceSpec{
Count: 5000,
Precision: time.Millisecond,
},
Name: "f0",
DataType: models.Float,
},
},
{
Name: "m0",
SeriesLimit: nil,
TagsSpec: &TagsSpec{
Tags: []*TagValuesSpec{
{TagKey: "tag0"},
{TagKey: "tag1"},
{TagKey: "tag2"},
},
Sample: &samples[0],
},
FieldValuesSpec: &FieldValuesSpec{
TimeSequenceSpec: TimeSequenceSpec{
Count: 5000,
Precision: time.Millisecond,
},
Name: "f1",
DataType: models.Integer,
},
},
{
Name: "m1",
SeriesLimit: nil,
TagsSpec: &TagsSpec{
Tags: []*TagValuesSpec{
{TagKey: "tag0"},
},
Sample: &samples[0],
},
FieldValuesSpec: &FieldValuesSpec{
TimeSequenceSpec: TimeSequenceSpec{
Count: 5000,
Precision: time.Millisecond,
},
Name: "f0",
DataType: models.Float,
},
},
},
}
// TODO(sgc): use a Spec rather than closures for NewCountableSequenceFn and NewTimeValuesSequenceFn
if !cmp.Equal(got, exp, cmp.Comparer(countableSequenceFnCmp), cmp.Comparer(timeValuesSequenceFnCmp)) {
t.Errorf("unexpected spec; -got/+exp\n%s", cmp.Diff(got, exp, cmp.Comparer(countableSequenceFnCmp), cmp.Comparer(timeValuesSequenceFnCmp)))
}
}
func TestSpecFromSchemaError(t *testing.T) {
tests := []struct {
name string
in string
decodeErr string
specErr string
}{
{
in: `
[[measurements]]
tags = [ { name = "tag0", source = [ "host1", "host2" ] } ]
fields = [ { name = "f0", count = 5000, source = 0.5 } ]
`,
specErr: "error processing schema: missing measurement name",
},
{
in: `
[[measurements]]
sample = -0.1
tags = [ { name = "tag0", source = [ "host1", "host2" ] } ]
fields = [ { name = "f0", count = 5000, source = 0.5 } ]
`,
decodeErr: "sample: must be 0 < sample ≤ 1.0",
},
{
in: `
[[measurements]]
name = "m0"
tags = [ { source = [ "host1", "host2" ] } ]
fields = [ { name = "f0", count = 5000, source = 0.5 } ]
`,
decodeErr: "tag: missing or invalid value for name",
},
{
in: `
[[measurements]]
name = "m0"
tags = [ { name = "tag0" } ]
fields = [ { name = "f0", count = 5000, source = 0.5 } ]
`,
decodeErr: `missing source for tag "tag0"`,
},
{
in: `
[[measurements]]
name = "m0"
tags = [ { name = "tag0", source = [ "host1", "host2" ] } ]
fields = [ { count = 5000, source = 0.5 } ]
`,
decodeErr: `field: missing or invalid value for name`,
},
{
in: `
[[measurements]]
name = "m0"
tags = [ { name = "tag0", source = [ "host1", "host2" ] } ]
fields = [ { name = "f0", count = 5000 } ]
`,
decodeErr: `missing source for field "f0"`,
},
}
checkErr := func(t *testing.T, err error, exp string) {
t.Helper()
if exp == "" {
if err == nil {
return
}
t.Errorf("unexpected error, got %v", err)
}
if err == nil {
t.Errorf("expected error, got nil")
} else if err.Error() != exp {
t.Errorf("unexpected error, -got/+exp\n%s", cmp.Diff(err.Error(), exp))
}
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
var out Schema
_, err := toml.Decode(test.in, &out)
checkErr(t, err, test.decodeErr)
if test.decodeErr == "" {
_, err = NewSpecFromSchema(&out)
checkErr(t, err, test.specErr)
}
})
}
}

View File

@ -3,6 +3,7 @@ package gen
import (
"fmt"
"math"
"math/rand"
"sort"
"github.com/influxdata/influxdb/models"
@ -14,14 +15,42 @@ type TagsSequence interface {
Count() int
}
type TagsValuesSequence struct {
tags models.Tags
vals []CountableSequence
n int
max int
type tagsValuesSequence struct {
tags models.Tags
vals []CountableSequence
n int
count int
sample float64
src rand.Source
nextFn func(*tagsValuesSequence) bool
}
func NewTagsValuesSequenceKeysValues(keys []string, vals []CountableSequence) *TagsValuesSequence {
type tagsValuesOption func(s *tagsValuesSequence)
func TagValuesLimitOption(n int) tagsValuesOption {
return func(s *tagsValuesSequence) {
if n >= s.count {
return
}
s.src = rand.NewSource(20040409)
s.sample = float64(n) / float64(s.count)
}
}
func TagValuesSampleOption(n float64) tagsValuesOption {
return func(s *tagsValuesSequence) {
if n <= 0.0 || n > 1.0 {
panic("expect: 0.0 < n ≤ 1.0")
}
s.src = rand.NewSource(int64(float64(math.MaxInt64>>1) * n))
s.sample = n
s.nextFn = (*tagsValuesSequence).nextSample
}
}
func NewTagsValuesSequenceKeysValues(keys []string, vals []CountableSequence, opts ...tagsValuesOption) TagsSequence {
tm := make(map[string]string, len(keys))
for _, k := range keys {
tm[k] = ""
@ -35,26 +64,51 @@ func NewTagsValuesSequenceKeysValues(keys []string, vals []CountableSequence) *T
// models.Tags are ordered, so ensure vals are ordered with respect to keys
sort.Sort(keyValues{keys, vals})
return &TagsValuesSequence{
tags: models.NewTags(tm),
vals: vals,
max: count,
s := &tagsValuesSequence{
tags: models.NewTags(tm),
vals: vals,
count: count,
nextFn: (*tagsValuesSequence).next,
}
for _, o := range opts {
o(s)
}
return s
}
func NewTagsValuesSequenceValues(prefix string, vals []CountableSequence) *TagsValuesSequence {
keys := make([]string, len(vals))
func NewTagsValuesSequenceValues(m, f, prefix string, tv []CountableSequence) TagsSequence {
keys := make([]string, 0, len(tv)+2)
vals := make([]CountableSequence, 0, len(keys))
keys = append(keys, models.MeasurementTagKey)
vals = append(vals, NewStringConstantSequence(m))
// max tag width
tw := int(math.Ceil(math.Log10(float64(len(vals)))))
tw := int(math.Ceil(math.Log10(float64(len(tv)))))
tf := fmt.Sprintf("%s%%0%dd", prefix, tw)
for i := range vals {
keys[i] = fmt.Sprintf(tf, i)
for i := range tv {
keys = append(keys, fmt.Sprintf(tf, i))
vals = append(vals, tv[i])
}
keys = append(keys, models.FieldKeyTagKey)
vals = append(vals, NewStringConstantSequence(f))
return NewTagsValuesSequenceKeysValues(keys, vals)
}
func (s *TagsValuesSequence) Next() bool {
if s.n >= s.max {
func NewTagsValuesSequenceCounts(m, f, prefix string, counts []int) TagsSequence {
tv := make([]CountableSequence, len(counts))
for i := range counts {
tv[i] = NewCounterByteSequenceCount(counts[i])
}
return NewTagsValuesSequenceValues(m, f, prefix, tv)
}
func (s *tagsValuesSequence) next() bool {
if s.n >= s.count {
return false
}
@ -77,8 +131,46 @@ func (s *TagsValuesSequence) Next() bool {
return true
}
func (s *TagsValuesSequence) Value() models.Tags { return s.tags }
func (s *TagsValuesSequence) Count() int { return s.max }
func (s *tagsValuesSequence) skip() bool {
return (float64(s.src.Int63()>>10))*(1.0/9007199254740992.0) > s.sample
}
func (s *tagsValuesSequence) nextSample() bool {
if s.n >= s.count {
return false
}
for i := range s.vals {
s.tags[i].Value = []byte(s.vals[i].Value())
}
for {
s.n++
i := s.n
for j := len(s.vals) - 1; j >= 0; j-- {
v := s.vals[j]
v.Next()
c := v.Count()
if r := i % c; r != 0 {
break
}
i /= c
}
if !s.skip() {
break
}
}
return true
}
func (s *tagsValuesSequence) Next() bool {
return s.nextFn(s)
}
func (s *tagsValuesSequence) Value() models.Tags { return s.tags }
func (s *tagsValuesSequence) Count() int { return s.count }
type keyValues struct {
keys []string

View File

@ -0,0 +1,36 @@
package gen
type TimestampSequence interface {
Reset()
Write(ts []int64)
}
type timestampSequence struct {
t int64
start int64
delta int64
}
func NewTimestampSequenceFromSpec(spec TimeSequenceSpec) TimestampSequence {
return &timestampSequence{
t: spec.Start.UnixNano(),
start: spec.Start.UnixNano(),
delta: int64(spec.Delta),
}
}
func (g *timestampSequence) Reset() {
g.t = g.start
}
func (g *timestampSequence) Write(ts []int64) {
var (
t = g.t
d = g.delta
)
for i := 0; i < len(ts); i++ {
ts[i] = t
t += d
}
g.t = t
}

460
pkg/data/gen/toml.go Normal file
View File

@ -0,0 +1,460 @@
package gen
import (
"errors"
"fmt"
"strings"
"time"
"github.com/spf13/cast"
)
type SeriesLimit int64
func (s *SeriesLimit) UnmarshalTOML(data interface{}) error {
v, ok := data.(int64)
if !ok {
return errors.New("series-limit: invalid value")
}
if v < 0 {
return errors.New("series-limit: must be ≥ 0")
}
*s = SeriesLimit(v)
return nil
}
type sample float64
func (s *sample) UnmarshalTOML(data interface{}) error {
v, ok := data.(float64)
if !ok {
return errors.New("sample: must be a float")
}
if v <= 0 || v > 1.0 {
return errors.New("sample: must be 0 < sample ≤ 1.0")
}
*s = sample(v)
return nil
}
type duration struct {
time.Duration
}
func (d *duration) UnmarshalTOML(data interface{}) error {
text, ok := data.(string)
if !ok {
return fmt.Errorf("invalid duration, expect a Go duration as a string: %T", data)
}
return d.UnmarshalText([]byte(text))
}
func (d *duration) UnmarshalText(text []byte) error {
s := string(text)
var err error
d.Duration, err = time.ParseDuration(s)
if err != nil {
return err
}
if d.Duration == 0 {
d.Duration, err = time.ParseDuration("1" + s)
if err != nil {
return err
}
}
if d.Duration <= 0 {
return fmt.Errorf("invalid duration, must be > 0: %s", d.Duration)
}
return nil
}
type precision byte
const (
precisionMillisecond precision = iota // default
precisionNanosecond
precisionMicrosecond
precisionSecond
precisionMinute
precisionHour
)
var precisionToDuration = [...]time.Duration{
time.Millisecond,
time.Nanosecond,
time.Microsecond,
time.Second,
time.Minute,
time.Minute * 60,
time.Nanosecond,
time.Nanosecond,
}
func (p *precision) ToDuration() time.Duration {
return precisionToDuration[*p&0x7]
}
func (p *precision) UnmarshalTOML(data interface{}) error {
d, ok := data.(string)
if !ok {
return fmt.Errorf("invalid precision, expect one of (ns, us, ms, s, m, h): %T", data)
}
d = strings.ToLower(d)
switch d {
case "ns", "nanosecond":
*p = precisionNanosecond
case "us", "microsecond", "µs":
*p = precisionMicrosecond
case "ms", "millisecond":
*p = precisionMillisecond
case "s", "second":
*p = precisionSecond
case "m", "minute":
*p = precisionMinute
case "h", "hour":
*p = precisionHour
default:
return fmt.Errorf("invalid precision, expect one of (ns, ms, s, m, h): %s", d)
}
return nil
}
func (t *Tag) UnmarshalTOML(data interface{}) error {
d, ok := data.(map[string]interface{})
if !ok {
return nil
}
if n, ok := d["name"].(string); !ok || n == "" {
return errors.New("tag: missing or invalid value for name")
} else {
t.Name = n
}
// infer source
if _, ok := d["source"]; !ok {
return fmt.Errorf("missing source for tag %q", t.Name)
}
switch v := d["source"].(type) {
case int64, string, float64, bool:
if src, err := decodeTagConstantSource(v); err != nil {
return err
} else {
t.Source = src
}
case []interface{}:
if src, err := decodeTagArraySource(v); err != nil {
return err
} else {
t.Source = src
}
case map[string]interface{}:
if src, err := decodeTagSource(v); err != nil {
return err
} else {
t.Source = src
}
default:
return fmt.Errorf("invalid source for tag %q: %T", t.Name, v)
}
return nil
}
func decodeTagConstantSource(data interface{}) (TagSource, error) {
switch data.(type) {
case int64, string, float64, bool:
if src, err := cast.ToStringE(data); err != nil {
return nil, err
} else {
return &TagArraySource{Values: []string{src}}, nil
}
}
return nil, errors.New("invalid constant tag source")
}
func decodeTagArraySource(data []interface{}) (TagSource, error) {
if len(data) == 0 {
return nil, errors.New("empty array source")
}
if src, err := cast.ToStringSliceE(data); err != nil {
return nil, err
} else {
return &TagArraySource{Values: src}, nil
}
}
func decodeTagSource(data map[string]interface{}) (TagSource, error) {
typ, ok := data["type"].(string)
if !ok {
return nil, errors.New("missing type field")
}
switch typ {
case "sequence":
return decodeTagSequenceSource(data)
case "file":
return decodeTagFileSource(data)
default:
return nil, fmt.Errorf("invalid type field %q", typ)
}
}
func decodeTagFileSource(data map[string]interface{}) (TagSource, error) {
var s TagFileSource
if v, ok := data["path"].(string); ok {
s.Path = v
} else {
return nil, errors.New("file: missing path")
}
return &s, nil
}
func decodeTagSequenceSource(data map[string]interface{}) (TagSource, error) {
var s TagSequenceSource
if v, ok := data["format"].(string); ok {
// TODO(sgc): validate format string
s.Format = v
} else {
s.Format = "value%s"
}
if v, ok := data["start"]; ok {
if v, err := cast.ToInt64E(v); err != nil {
return nil, fmt.Errorf("tag.sequence: invalid start, %v", err)
} else if v < 0 {
return nil, fmt.Errorf("tag.sequence: start must be ≥ 0")
} else {
s.Start = v
}
}
if v, ok := data["count"]; ok {
if v, err := cast.ToInt64E(v); err != nil {
return nil, fmt.Errorf("tag.sequence: invalid count, %v", err)
} else if v < 0 {
return nil, fmt.Errorf("tag.sequence: count must be > 0")
} else {
s.Count = v
}
} else {
return nil, fmt.Errorf("tag.sequence: missing count")
}
return &s, nil
}
func (t *Field) UnmarshalTOML(data interface{}) error {
d, ok := data.(map[string]interface{})
if !ok {
return nil
}
if n, ok := d["name"].(string); !ok || n == "" {
return errors.New("field: missing or invalid value for name")
} else {
t.Name = n
}
if n, ok := d["count"]; !ok {
return errors.New("field: missing value for count")
} else if count, err := cast.ToInt64E(n); err != nil {
return fmt.Errorf("field: invalid count, %v", err)
} else if count <= 0 {
return errors.New("field: count must be > 0")
} else {
t.Count = count
}
if n, ok := d["time-precision"]; ok {
var tp precision
if err := tp.UnmarshalTOML(n); err != nil {
return err
}
t.TimePrecision = &tp
}
if n, ok := d["time-interval"]; ok {
var ti duration
if err := ti.UnmarshalTOML(n); err != nil {
return err
}
t.TimeInterval = &ti
t.TimePrecision = nil
}
if t.TimePrecision == nil && t.TimeInterval == nil {
var tp precision
t.TimePrecision = &tp
}
// infer source
if _, ok := d["source"]; !ok {
return fmt.Errorf("missing source for field %q", t.Name)
}
switch v := d["source"].(type) {
case int64, string, float64, bool:
t.Source = &FieldConstantValue{v}
case []interface{}:
if src, err := decodeFieldArraySource(v); err != nil {
return err
} else {
t.Source = src
}
case map[string]interface{}:
if src, err := decodeFieldSource(v); err != nil {
return err
} else {
t.Source = src
}
default:
// unknown
return fmt.Errorf("invalid source for tag %q: %T", t.Name, v)
}
return nil
}
func decodeFieldArraySource(data []interface{}) (FieldSource, error) {
if len(data) == 0 {
return nil, errors.New("empty array")
}
var (
src interface{}
err error
)
// use first value to determine slice type
switch data[0].(type) {
case int64:
src, err = toInt64SliceE(data)
case float64:
src, err = toFloat64SliceE(data)
case string:
src, err = cast.ToStringSliceE(data)
case bool:
src, err = cast.ToBoolSliceE(data)
default:
err = fmt.Errorf("unsupported field source data type: %T", data[0])
}
if err != nil {
return nil, err
}
return &FieldArraySource{Value: src}, nil
}
func decodeFieldSource(data map[string]interface{}) (FieldSource, error) {
typ, ok := data["type"].(string)
if !ok {
return nil, errors.New("missing type field")
}
switch typ {
case "rand<float>":
return decodeFloatRandomSource(data)
case "zipf<integer>":
return decodeIntegerZipfSource(data)
default:
return nil, fmt.Errorf("invalid type field %q", typ)
}
}
func decodeFloatRandomSource(data map[string]interface{}) (FieldSource, error) {
var s FieldFloatRandomSource
if v, ok := data["seed"]; ok {
if v, err := cast.ToInt64E(v); err != nil {
return nil, fmt.Errorf("rand<float>: invalid seed, %v", err)
} else {
s.Seed = v
}
}
if v, ok := data["min"]; ok {
if v, err := cast.ToFloat64E(v); err != nil {
return nil, fmt.Errorf("rand<float>: invalid min, %v", err)
} else {
s.Min = v
}
}
if v, ok := data["max"]; ok {
if v, err := cast.ToFloat64E(v); err != nil {
return nil, fmt.Errorf("rand<float>: invalid max, %v", err)
} else {
s.Max = v
}
} else {
s.Max = 1.0
}
if !(s.Min <= s.Max) {
return nil, errors.New("rand<float>: min ≤ max")
}
return &s, nil
}
func decodeIntegerZipfSource(data map[string]interface{}) (FieldSource, error) {
var s FieldIntegerZipfSource
if v, ok := data["seed"]; ok {
if v, err := cast.ToInt64E(v); err != nil {
return nil, fmt.Errorf("zipf<integer>: invalid seed, %v", err)
} else {
s.Seed = v
}
}
if v, ok := data["s"]; ok {
if v, err := cast.ToFloat64E(v); err != nil || v <= 1.0 {
return nil, fmt.Errorf("zipf<integer>: invalid value for s (s > 1), %v", err)
} else {
s.S = v
}
} else {
return nil, fmt.Errorf("zipf<integer>: missing value for s")
}
if v, ok := data["v"]; ok {
if v, err := cast.ToFloat64E(v); err != nil || v < 1.0 {
return nil, fmt.Errorf("zipf<integer>: invalid value for v (v ≥ 1), %v", err)
} else {
s.V = v
}
} else {
return nil, fmt.Errorf("zipf<integer>: missing value for v")
}
if v, ok := data["imax"]; ok {
if v, err := cast.ToUint64E(v); err != nil {
return nil, fmt.Errorf("zipf<integer>: invalid value for imax, %v", err)
} else {
s.IMAX = v
}
} else {
return nil, fmt.Errorf("zipf<integer>: missing value for imax")
}
return &s, nil
}

169
pkg/data/gen/toml_test.go Normal file
View File

@ -0,0 +1,169 @@
package gen
import (
"fmt"
"strings"
"testing"
"github.com/BurntSushi/toml"
"github.com/google/go-cmp/cmp"
)
func visit(root *Schema) string {
w := &strings.Builder{}
walkFn := func(node SchemaNode) bool {
switch n := node.(type) {
case *Schema:
case Measurements:
fmt.Fprintln(w, "Measurements: ")
case *Measurement:
fmt.Fprintln(w)
fmt.Fprintf(w, " Name: %s\n", n.Name)
case Tags:
fmt.Fprintln(w, " Tags:")
case Fields:
fmt.Fprintln(w, " Fields:")
case *Field:
if n.TimePrecision != nil {
fmt.Fprintf(w, " %s: %s, count=%d, time-precision=%s\n", n.Name, n.Source, n.Count, *n.TimePrecision)
} else {
fmt.Fprintf(w, " %s: %s, count=%d, time-interval=%s\n", n.Name, n.Source, n.Count, n.TimeInterval)
}
case *Tag:
fmt.Fprintf(w, " %s: %s\n", n.Name, n.Source)
}
return true
}
WalkDown(VisitorFn(walkFn), root)
return w.String()
}
func TestSchema(t *testing.T) {
in := `
title = "example schema"
series-limit = 10
[[measurements]]
name = "constant"
series-limit = 5
[[measurements.tags]]
name = "tag0"
source = [ "host1", "host2" ]
[[measurements.tags]]
name = "tag1"
source = { type = "file", path = "foo.txt" }
[[measurements.fields]]
name = "floatC"
count = 5000
source = 0.5
time-precision = "us"
[[measurements.fields]]
name = "integerC"
count = 5000
source = 3
time-precision = "hour"
[[measurements.fields]]
name = "stringC"
count = 5000
source = "hello"
time-interval = "60s"
[[measurements.fields]]
name = "stringA"
count = 5000
source = ["hello", "world"]
[[measurements.fields]]
name = "boolf"
count = 5000
source = false
[[measurements]]
name = "random"
[[measurements.tags]]
name = "tagSeq"
source = { type = "sequence", format = "value%s", start = 0, count = 100 }
[[measurements.fields]]
name = "floatR"
count = 5000
source = { type = "rand<float>", min = 0.5, max = 50.1, seed = 10 }
time-precision = "us"
[[measurements]]
name = "array"
[[measurements.tags]]
name = "tagSeq"
source = { type = "sequence", format = "value%s", start = 0, count = 100 }
[[measurements.tags]]
name = "tagFile"
source = { type = "file", path = "foo.txt" }
[[measurements.fields]]
name = "stringA"
count = 1000
source = ["this", "that"]
time-precision = "us"
[[measurements.fields]]
name = "integerA"
count = 1000
source = [5, 6, 7]
time-interval = "90s"
`
var out Schema
_, err := toml.Decode(in, &out)
if err != nil {
t.Fatalf("unxpected error: %v", err)
}
exp := `Measurements:
Name: constant
Tags:
tag0: array, source=[]string{"host1", "host2"}
tag1: file, path=foo.txt
Fields:
floatC: constant, source=0.5, count=5000, time-precision=Microsecond
integerC: constant, source=3, count=5000, time-precision=Hour
stringC: constant, source="hello", count=5000, time-interval=1m0s
stringA: array, source=[]string{"hello", "world"}, count=5000, time-precision=Millisecond
boolf: constant, source=false, count=5000, time-precision=Millisecond
Name: random
Tags:
tagSeq: sequence, prefix="value%s", range=[0,100)
Fields:
floatR: rand<float>, seed=10, min=50.100000, max=50.100000, count=5000, time-precision=Microsecond
Name: array
Tags:
tagSeq: sequence, prefix="value%s", range=[0,100)
tagFile: file, path=foo.txt
Fields:
stringA: array, source=[]string{"this", "that"}, count=1000, time-precision=Microsecond
integerA: array, source=[]int64{5, 6, 7}, count=1000, time-interval=1m30s
`
if got := visit(&out); !cmp.Equal(got, exp) {
t.Errorf("unexpected value, -got/+exp\n%s", cmp.Diff(got, exp))
}
}

View File

@ -2,17 +2,20 @@
{
"Name":"Float",
"name":"float",
"Type":"float64"
"Type":"float64",
"Rand":"Float64"
},
{
"Name":"Integer",
"name":"integer",
"Type":"int64"
"Type":"int64",
"Rand":"Int64"
},
{
"Name":"Unsigned",
"name":"unsigned",
"Type":"uint64"
"Type":"uint64",
"Rand":"Uint64"
},
{
"Name":"String",

View File

@ -1,8 +1,87 @@
package gen
import (
"fmt"
"reflect"
"sort"
"github.com/spf13/cast"
)
func min(a, b int) int {
if a < b {
return a
}
return b
}
func sortDedupStrings(in []string) []string {
sort.Strings(in)
j := 0
for i := 1; i < len(in); i++ {
if in[j] == in[i] {
continue
}
j++
in[j] = in[i]
}
return in[:j+1]
}
// ToInt64SliceE casts an interface to a []int64 type.
func toInt64SliceE(i interface{}) ([]int64, error) {
if i == nil {
return []int64{}, fmt.Errorf("unable to cast %#v of type %T to []int64", i, i)
}
switch v := i.(type) {
case []int64:
return v, nil
}
kind := reflect.TypeOf(i).Kind()
switch kind {
case reflect.Slice, reflect.Array:
s := reflect.ValueOf(i)
a := make([]int64, s.Len())
for j := 0; j < s.Len(); j++ {
val, err := cast.ToInt64E(s.Index(j).Interface())
if err != nil {
return []int64{}, fmt.Errorf("unable to cast %#v of type %T to []int64", i, i)
}
a[j] = val
}
return a, nil
default:
return []int64{}, fmt.Errorf("unable to cast %#v of type %T to []int64", i, i)
}
}
// ToFloat64SliceE casts an interface to a []float64 type.
func toFloat64SliceE(i interface{}) ([]float64, error) {
if i == nil {
return []float64{}, fmt.Errorf("unable to cast %#v of type %T to []float64", i, i)
}
switch v := i.(type) {
case []float64:
return v, nil
}
kind := reflect.TypeOf(i).Kind()
switch kind {
case reflect.Slice, reflect.Array:
s := reflect.ValueOf(i)
a := make([]float64, s.Len())
for j := 0; j < s.Len(); j++ {
val, err := cast.ToFloat64E(s.Index(j).Interface())
if err != nil {
return []float64{}, fmt.Errorf("unable to cast %#v of type %T to []float64", i, i)
}
a[j] = val
}
return a, nil
default:
return []float64{}, fmt.Errorf("unable to cast %#v of type %T to []float64", i, i)
}
}

252
pkg/data/gen/values.gen.go Normal file
View File

@ -0,0 +1,252 @@
// Generated by tmpl
// https://github.com/benbjohnson/tmpl
//
// DO NOT EDIT!
// Source: values.gen.go.tmpl
package gen
type floatConstantValuesSequence struct {
v float64
}
func NewFloatConstantValuesSequence(v float64) FloatValuesSequence {
return &floatConstantValuesSequence{
v: v,
}
}
func (g *floatConstantValuesSequence) Reset() {
}
func (g *floatConstantValuesSequence) Write(vs []float64) {
for i := 0; i < len(vs); i++ {
vs[i] = g.v
}
}
type integerConstantValuesSequence struct {
v int64
}
func NewIntegerConstantValuesSequence(v int64) IntegerValuesSequence {
return &integerConstantValuesSequence{
v: v,
}
}
func (g *integerConstantValuesSequence) Reset() {
}
func (g *integerConstantValuesSequence) Write(vs []int64) {
for i := 0; i < len(vs); i++ {
vs[i] = g.v
}
}
type unsignedConstantValuesSequence struct {
v uint64
}
func NewUnsignedConstantValuesSequence(v uint64) UnsignedValuesSequence {
return &unsignedConstantValuesSequence{
v: v,
}
}
func (g *unsignedConstantValuesSequence) Reset() {
}
func (g *unsignedConstantValuesSequence) Write(vs []uint64) {
for i := 0; i < len(vs); i++ {
vs[i] = g.v
}
}
type stringConstantValuesSequence struct {
v string
}
func NewStringConstantValuesSequence(v string) StringValuesSequence {
return &stringConstantValuesSequence{
v: v,
}
}
func (g *stringConstantValuesSequence) Reset() {
}
func (g *stringConstantValuesSequence) Write(vs []string) {
for i := 0; i < len(vs); i++ {
vs[i] = g.v
}
}
type booleanConstantValuesSequence struct {
v bool
}
func NewBooleanConstantValuesSequence(v bool) BooleanValuesSequence {
return &booleanConstantValuesSequence{
v: v,
}
}
func (g *booleanConstantValuesSequence) Reset() {
}
func (g *booleanConstantValuesSequence) Write(vs []bool) {
for i := 0; i < len(vs); i++ {
vs[i] = g.v
}
}
type floatArrayValuesSequence struct {
v []float64
vi int
}
func NewFloatArrayValuesSequence(v []float64) FloatValuesSequence {
return &floatArrayValuesSequence{
v: v,
}
}
func (g *floatArrayValuesSequence) Reset() {
g.vi = 0
}
func (g *floatArrayValuesSequence) Write(vs []float64) {
var (
v = g.v
vi = g.vi
)
for i := 0; i < len(vs); i++ {
if vi >= len(v) {
vi = 0
}
vs[i] = v[vi]
vi += 1
}
g.vi = vi
}
type integerArrayValuesSequence struct {
v []int64
vi int
}
func NewIntegerArrayValuesSequence(v []int64) IntegerValuesSequence {
return &integerArrayValuesSequence{
v: v,
}
}
func (g *integerArrayValuesSequence) Reset() {
g.vi = 0
}
func (g *integerArrayValuesSequence) Write(vs []int64) {
var (
v = g.v
vi = g.vi
)
for i := 0; i < len(vs); i++ {
if vi >= len(v) {
vi = 0
}
vs[i] = v[vi]
vi += 1
}
g.vi = vi
}
type unsignedArrayValuesSequence struct {
v []uint64
vi int
}
func NewUnsignedArrayValuesSequence(v []uint64) UnsignedValuesSequence {
return &unsignedArrayValuesSequence{
v: v,
}
}
func (g *unsignedArrayValuesSequence) Reset() {
g.vi = 0
}
func (g *unsignedArrayValuesSequence) Write(vs []uint64) {
var (
v = g.v
vi = g.vi
)
for i := 0; i < len(vs); i++ {
if vi >= len(v) {
vi = 0
}
vs[i] = v[vi]
vi += 1
}
g.vi = vi
}
type stringArrayValuesSequence struct {
v []string
vi int
}
func NewStringArrayValuesSequence(v []string) StringValuesSequence {
return &stringArrayValuesSequence{
v: v,
}
}
func (g *stringArrayValuesSequence) Reset() {
g.vi = 0
}
func (g *stringArrayValuesSequence) Write(vs []string) {
var (
v = g.v
vi = g.vi
)
for i := 0; i < len(vs); i++ {
if vi >= len(v) {
vi = 0
}
vs[i] = v[vi]
vi += 1
}
g.vi = vi
}
type booleanArrayValuesSequence struct {
v []bool
vi int
}
func NewBooleanArrayValuesSequence(v []bool) BooleanValuesSequence {
return &booleanArrayValuesSequence{
v: v,
}
}
func (g *booleanArrayValuesSequence) Reset() {
g.vi = 0
}
func (g *booleanArrayValuesSequence) Write(vs []bool) {
var (
v = g.v
vi = g.vi
)
for i := 0; i < len(vs); i++ {
if vi >= len(v) {
vi = 0
}
vs[i] = v[vi]
vi += 1
}
g.vi = vi
}

View File

@ -0,0 +1,54 @@
package gen
{{range .}}
type {{.name}}ConstantValuesSequence struct {
v {{.Type}}
}
func New{{.Name}}ConstantValuesSequence(v {{.Type}}) {{.Name}}ValuesSequence {
return &{{.name}}ConstantValuesSequence{
v: v,
}
}
func (g *{{.name}}ConstantValuesSequence) Reset() {
}
func (g *{{.name}}ConstantValuesSequence) Write(vs []{{.Type}}) {
for i := 0; i < len(vs); i++ {
vs[i] = g.v
}
}
{{end}}
{{range .}}
type {{.name}}ArrayValuesSequence struct {
v []{{.Type}}
vi int
}
func New{{.Name}}ArrayValuesSequence(v []{{.Type}}) {{.Name}}ValuesSequence {
return &{{.name}}ArrayValuesSequence{
v: v,
}
}
func (g *{{.name}}ArrayValuesSequence) Reset() {
g.vi = 0
}
func (g *{{.name}}ArrayValuesSequence) Write(vs []{{.Type}}) {
var (
v = g.v
vi = g.vi
)
for i := 0; i < len(vs); i++ {
if vi >= len(v) {
vi = 0
}
vs[i] = v[vi]
vi += 1
}
g.vi = vi
}
{{end}}

46
pkg/data/gen/values.go Normal file
View File

@ -0,0 +1,46 @@
package gen
import (
"math/rand"
)
type floatRandomValuesSequence struct {
r *rand.Rand
a float64
b float64
}
func NewFloatRandomValuesSequence(min, max float64, r *rand.Rand) FloatValuesSequence {
return &floatRandomValuesSequence{r: r, a: max - min, b: min}
}
func (g *floatRandomValuesSequence) Reset() {}
func (g *floatRandomValuesSequence) Write(vs []float64) {
var (
a = g.a
b = g.b
)
for i := 0; i < len(vs); i++ {
vs[i] = a*g.r.Float64() + b // ax + b
}
}
type integerRandomValuesSequence struct {
r *rand.Zipf
}
// NewIntegerZipfValuesSequence produces int64 values using a Zipfian distribution
// described by s.
func NewIntegerZipfValuesSequence(s *FieldIntegerZipfSource) IntegerValuesSequence {
r := rand.New(rand.NewSource(s.Seed))
return &integerRandomValuesSequence{r: rand.NewZipf(r, s.S, s.V, s.IMAX)}
}
func (g *integerRandomValuesSequence) Reset() {}
func (g *integerRandomValuesSequence) Write(vs []int64) {
for i := 0; i < len(vs); i++ {
vs[i] = int64(g.r.Uint64())
}
}

View File

@ -1,308 +0,0 @@
// Generated by tmpl
// https://github.com/benbjohnson/tmpl
//
// DO NOT EDIT!
// Source: values_constant.gen.go.tmpl
package gen
import (
"time"
"github.com/influxdata/influxdb/tsdb/cursors"
)
type FloatConstantValuesSequence struct {
vals FloatArray
n int
t int64
state struct {
n int
t int64
d int64
v float64
}
}
func NewFloatConstantValuesSequence(n int, start time.Time, delta time.Duration, v float64) *FloatConstantValuesSequence {
g := &FloatConstantValuesSequence{
vals: *NewFloatArrayLen(cursors.DefaultMaxPointsPerBlock),
}
g.state.n = n
g.state.t = start.UnixNano()
g.state.d = int64(delta)
g.state.v = v
g.Reset()
return g
}
func (g *FloatConstantValuesSequence) Reset() {
g.n = g.state.n
g.t = g.state.t
}
func (g *FloatConstantValuesSequence) Next() bool {
if g.n == 0 {
return false
}
c := min(g.n, cursors.DefaultMaxPointsPerBlock)
g.n -= c
g.vals.Timestamps = g.vals.Timestamps[:c]
g.vals.Values = g.vals.Values[:c]
var (
t = g.t
ts = g.vals.Timestamps
vs = g.vals.Values
d = g.state.d
)
for i := 0; i < len(ts) && i < len(vs); i++ {
ts[i] = g.t
vs[i] = g.state.v
t += d
}
g.t = t
return true
}
func (g *FloatConstantValuesSequence) Values() Values {
return &g.vals
}
type IntegerConstantValuesSequence struct {
vals IntegerArray
n int
t int64
state struct {
n int
t int64
d int64
v int64
}
}
func NewIntegerConstantValuesSequence(n int, start time.Time, delta time.Duration, v int64) *IntegerConstantValuesSequence {
g := &IntegerConstantValuesSequence{
vals: *NewIntegerArrayLen(cursors.DefaultMaxPointsPerBlock),
}
g.state.n = n
g.state.t = start.UnixNano()
g.state.d = int64(delta)
g.state.v = v
g.Reset()
return g
}
func (g *IntegerConstantValuesSequence) Reset() {
g.n = g.state.n
g.t = g.state.t
}
func (g *IntegerConstantValuesSequence) Next() bool {
if g.n == 0 {
return false
}
c := min(g.n, cursors.DefaultMaxPointsPerBlock)
g.n -= c
g.vals.Timestamps = g.vals.Timestamps[:c]
g.vals.Values = g.vals.Values[:c]
var (
t = g.t
ts = g.vals.Timestamps
vs = g.vals.Values
d = g.state.d
)
for i := 0; i < len(ts) && i < len(vs); i++ {
ts[i] = g.t
vs[i] = g.state.v
t += d
}
g.t = t
return true
}
func (g *IntegerConstantValuesSequence) Values() Values {
return &g.vals
}
type UnsignedConstantValuesSequence struct {
vals UnsignedArray
n int
t int64
state struct {
n int
t int64
d int64
v uint64
}
}
func NewUnsignedConstantValuesSequence(n int, start time.Time, delta time.Duration, v uint64) *UnsignedConstantValuesSequence {
g := &UnsignedConstantValuesSequence{
vals: *NewUnsignedArrayLen(cursors.DefaultMaxPointsPerBlock),
}
g.state.n = n
g.state.t = start.UnixNano()
g.state.d = int64(delta)
g.state.v = v
g.Reset()
return g
}
func (g *UnsignedConstantValuesSequence) Reset() {
g.n = g.state.n
g.t = g.state.t
}
func (g *UnsignedConstantValuesSequence) Next() bool {
if g.n == 0 {
return false
}
c := min(g.n, cursors.DefaultMaxPointsPerBlock)
g.n -= c
g.vals.Timestamps = g.vals.Timestamps[:c]
g.vals.Values = g.vals.Values[:c]
var (
t = g.t
ts = g.vals.Timestamps
vs = g.vals.Values
d = g.state.d
)
for i := 0; i < len(ts) && i < len(vs); i++ {
ts[i] = g.t
vs[i] = g.state.v
t += d
}
g.t = t
return true
}
func (g *UnsignedConstantValuesSequence) Values() Values {
return &g.vals
}
type StringConstantValuesSequence struct {
vals StringArray
n int
t int64
state struct {
n int
t int64
d int64
v string
}
}
func NewStringConstantValuesSequence(n int, start time.Time, delta time.Duration, v string) *StringConstantValuesSequence {
g := &StringConstantValuesSequence{
vals: *NewStringArrayLen(cursors.DefaultMaxPointsPerBlock),
}
g.state.n = n
g.state.t = start.UnixNano()
g.state.d = int64(delta)
g.state.v = v
g.Reset()
return g
}
func (g *StringConstantValuesSequence) Reset() {
g.n = g.state.n
g.t = g.state.t
}
func (g *StringConstantValuesSequence) Next() bool {
if g.n == 0 {
return false
}
c := min(g.n, cursors.DefaultMaxPointsPerBlock)
g.n -= c
g.vals.Timestamps = g.vals.Timestamps[:c]
g.vals.Values = g.vals.Values[:c]
var (
t = g.t
ts = g.vals.Timestamps
vs = g.vals.Values
d = g.state.d
)
for i := 0; i < len(ts) && i < len(vs); i++ {
ts[i] = g.t
vs[i] = g.state.v
t += d
}
g.t = t
return true
}
func (g *StringConstantValuesSequence) Values() Values {
return &g.vals
}
type BooleanConstantValuesSequence struct {
vals BooleanArray
n int
t int64
state struct {
n int
t int64
d int64
v bool
}
}
func NewBooleanConstantValuesSequence(n int, start time.Time, delta time.Duration, v bool) *BooleanConstantValuesSequence {
g := &BooleanConstantValuesSequence{
vals: *NewBooleanArrayLen(cursors.DefaultMaxPointsPerBlock),
}
g.state.n = n
g.state.t = start.UnixNano()
g.state.d = int64(delta)
g.state.v = v
g.Reset()
return g
}
func (g *BooleanConstantValuesSequence) Reset() {
g.n = g.state.n
g.t = g.state.t
}
func (g *BooleanConstantValuesSequence) Next() bool {
if g.n == 0 {
return false
}
c := min(g.n, cursors.DefaultMaxPointsPerBlock)
g.n -= c
g.vals.Timestamps = g.vals.Timestamps[:c]
g.vals.Values = g.vals.Values[:c]
var (
t = g.t
ts = g.vals.Timestamps
vs = g.vals.Values
d = g.state.d
)
for i := 0; i < len(ts) && i < len(vs); i++ {
ts[i] = g.t
vs[i] = g.state.v
t += d
}
g.t = t
return true
}
func (g *BooleanConstantValuesSequence) Values() Values {
return &g.vals
}

View File

@ -1,68 +0,0 @@
package gen
import (
"time"
"github.com/influxdata/influxdb/tsdb/cursors"
)
{{range .}}
type {{.Name}}ConstantValuesSequence struct {
vals {{.Name}}Array
n int
t int64
state struct {
n int
t int64
d int64
v {{.Type}}
}
}
func New{{.Name}}ConstantValuesSequence(n int, start time.Time, delta time.Duration, v {{.Type}}) *{{.Name}}ConstantValuesSequence {
g := &{{.Name}}ConstantValuesSequence{
vals: *New{{.Name}}ArrayLen(cursors.DefaultMaxPointsPerBlock),
}
g.state.n = n
g.state.t = start.UnixNano()
g.state.d = int64(delta)
g.state.v = v
g.Reset()
return g
}
func (g *{{.Name}}ConstantValuesSequence) Reset() {
g.n = g.state.n
g.t = g.state.t
}
func (g *{{.Name}}ConstantValuesSequence) Next() bool {
if g.n == 0 {
return false
}
c := min(g.n, cursors.DefaultMaxPointsPerBlock)
g.n -= c
g.vals.Timestamps = g.vals.Timestamps[:c]
g.vals.Values = g.vals.Values[:c]
var (
t = g.t
ts = g.vals.Timestamps
vs = g.vals.Values
d = g.state.d
)
for i := 0; i < len(ts) && i < len(vs); i++ {
ts[i] = g.t
vs[i] = g.state.v
t += d
}
g.t = t
return true
}
func (g *{{.Name}}ConstantValuesSequence) Values() Values {
return &g.vals
}
{{end}}

View File

@ -0,0 +1,272 @@
// Generated by tmpl
// https://github.com/benbjohnson/tmpl
//
// DO NOT EDIT!
// Source: values_sequence.gen.go.tmpl
package gen
import (
"github.com/influxdata/influxdb/models"
"github.com/influxdata/influxdb/tsdb/cursors"
)
type FloatValuesSequence interface {
Reset()
Write(v []float64)
}
type timeFloatValuesSequence struct {
vals floatArray
ts TimestampSequence
vs FloatValuesSequence
count int
n int
}
func NewTimeFloatValuesSequence(count int, ts TimestampSequence, vs FloatValuesSequence) TimeValuesSequence {
return &timeFloatValuesSequence{
vals: *newFloatArrayLen(cursors.DefaultMaxPointsPerBlock),
ts: ts,
vs: vs,
count: count,
n: count,
}
}
func (s *timeFloatValuesSequence) Reset() {
s.ts.Reset()
s.vs.Reset()
s.n = s.count
}
func (s *timeFloatValuesSequence) Next() bool {
if s.n > 0 {
c := min(s.n, cursors.DefaultMaxPointsPerBlock)
s.n -= c
s.vals.Timestamps = s.vals.Timestamps[:c]
s.vals.Values = s.vals.Values[:c]
s.ts.Write(s.vals.Timestamps)
s.vs.Write(s.vals.Values)
return true
}
return false
}
func (s *timeFloatValuesSequence) Values() Values {
return &s.vals
}
func (s *timeFloatValuesSequence) ValueType() models.FieldType {
return models.Float
}
type IntegerValuesSequence interface {
Reset()
Write(v []int64)
}
type timeIntegerValuesSequence struct {
vals integerArray
ts TimestampSequence
vs IntegerValuesSequence
count int
n int
}
func NewTimeIntegerValuesSequence(count int, ts TimestampSequence, vs IntegerValuesSequence) TimeValuesSequence {
return &timeIntegerValuesSequence{
vals: *newIntegerArrayLen(cursors.DefaultMaxPointsPerBlock),
ts: ts,
vs: vs,
count: count,
n: count,
}
}
func (s *timeIntegerValuesSequence) Reset() {
s.ts.Reset()
s.vs.Reset()
s.n = s.count
}
func (s *timeIntegerValuesSequence) Next() bool {
if s.n > 0 {
c := min(s.n, cursors.DefaultMaxPointsPerBlock)
s.n -= c
s.vals.Timestamps = s.vals.Timestamps[:c]
s.vals.Values = s.vals.Values[:c]
s.ts.Write(s.vals.Timestamps)
s.vs.Write(s.vals.Values)
return true
}
return false
}
func (s *timeIntegerValuesSequence) Values() Values {
return &s.vals
}
func (s *timeIntegerValuesSequence) ValueType() models.FieldType {
return models.Integer
}
type UnsignedValuesSequence interface {
Reset()
Write(v []uint64)
}
type timeUnsignedValuesSequence struct {
vals unsignedArray
ts TimestampSequence
vs UnsignedValuesSequence
count int
n int
}
func NewTimeUnsignedValuesSequence(count int, ts TimestampSequence, vs UnsignedValuesSequence) TimeValuesSequence {
return &timeUnsignedValuesSequence{
vals: *newUnsignedArrayLen(cursors.DefaultMaxPointsPerBlock),
ts: ts,
vs: vs,
count: count,
n: count,
}
}
func (s *timeUnsignedValuesSequence) Reset() {
s.ts.Reset()
s.vs.Reset()
s.n = s.count
}
func (s *timeUnsignedValuesSequence) Next() bool {
if s.n > 0 {
c := min(s.n, cursors.DefaultMaxPointsPerBlock)
s.n -= c
s.vals.Timestamps = s.vals.Timestamps[:c]
s.vals.Values = s.vals.Values[:c]
s.ts.Write(s.vals.Timestamps)
s.vs.Write(s.vals.Values)
return true
}
return false
}
func (s *timeUnsignedValuesSequence) Values() Values {
return &s.vals
}
func (s *timeUnsignedValuesSequence) ValueType() models.FieldType {
return models.Unsigned
}
type StringValuesSequence interface {
Reset()
Write(v []string)
}
type timeStringValuesSequence struct {
vals stringArray
ts TimestampSequence
vs StringValuesSequence
count int
n int
}
func NewTimeStringValuesSequence(count int, ts TimestampSequence, vs StringValuesSequence) TimeValuesSequence {
return &timeStringValuesSequence{
vals: *newStringArrayLen(cursors.DefaultMaxPointsPerBlock),
ts: ts,
vs: vs,
count: count,
n: count,
}
}
func (s *timeStringValuesSequence) Reset() {
s.ts.Reset()
s.vs.Reset()
s.n = s.count
}
func (s *timeStringValuesSequence) Next() bool {
if s.n > 0 {
c := min(s.n, cursors.DefaultMaxPointsPerBlock)
s.n -= c
s.vals.Timestamps = s.vals.Timestamps[:c]
s.vals.Values = s.vals.Values[:c]
s.ts.Write(s.vals.Timestamps)
s.vs.Write(s.vals.Values)
return true
}
return false
}
func (s *timeStringValuesSequence) Values() Values {
return &s.vals
}
func (s *timeStringValuesSequence) ValueType() models.FieldType {
return models.String
}
type BooleanValuesSequence interface {
Reset()
Write(v []bool)
}
type timeBooleanValuesSequence struct {
vals booleanArray
ts TimestampSequence
vs BooleanValuesSequence
count int
n int
}
func NewTimeBooleanValuesSequence(count int, ts TimestampSequence, vs BooleanValuesSequence) TimeValuesSequence {
return &timeBooleanValuesSequence{
vals: *newBooleanArrayLen(cursors.DefaultMaxPointsPerBlock),
ts: ts,
vs: vs,
count: count,
n: count,
}
}
func (s *timeBooleanValuesSequence) Reset() {
s.ts.Reset()
s.vs.Reset()
s.n = s.count
}
func (s *timeBooleanValuesSequence) Next() bool {
if s.n > 0 {
c := min(s.n, cursors.DefaultMaxPointsPerBlock)
s.n -= c
s.vals.Timestamps = s.vals.Timestamps[:c]
s.vals.Values = s.vals.Values[:c]
s.ts.Write(s.vals.Timestamps)
s.vs.Write(s.vals.Values)
return true
}
return false
}
func (s *timeBooleanValuesSequence) Values() Values {
return &s.vals
}
func (s *timeBooleanValuesSequence) ValueType() models.FieldType {
return models.Boolean
}

View File

@ -0,0 +1,60 @@
package gen
import (
"github.com/influxdata/influxdb/models"
"github.com/influxdata/influxdb/tsdb/cursors"
)
{{range .}}
type {{.Name}}ValuesSequence interface {
Reset()
Write(v []{{.Type}})
}
type time{{.Name}}ValuesSequence struct {
vals {{.name}}Array
ts TimestampSequence
vs {{.Name}}ValuesSequence
count int
n int
}
func NewTime{{.Name}}ValuesSequence(count int, ts TimestampSequence, vs {{.Name}}ValuesSequence) TimeValuesSequence {
return &time{{.Name}}ValuesSequence{
vals: *new{{.Name}}ArrayLen(cursors.DefaultMaxPointsPerBlock),
ts: ts,
vs: vs,
count: count,
n: count,
}
}
func (s *time{{.Name}}ValuesSequence) Reset() {
s.ts.Reset()
s.vs.Reset()
s.n = s.count
}
func (s *time{{.Name}}ValuesSequence) Next() bool {
if s.n > 0 {
c := min(s.n, cursors.DefaultMaxPointsPerBlock)
s.n -= c
s.vals.Timestamps = s.vals.Timestamps[:c]
s.vals.Values = s.vals.Values[:c]
s.ts.Write(s.vals.Timestamps)
s.vs.Write(s.vals.Values)
return true
}
return false
}
func (s *time{{.Name}}ValuesSequence) Values() Values {
return &s.vals
}
func (s *time{{.Name}}ValuesSequence) ValueType() models.FieldType {
return models.{{.Name}}
}
{{end}}

View File

@ -1,60 +0,0 @@
package gen
import (
"math/rand"
"time"
"github.com/influxdata/influxdb/tsdb/cursors"
)
type FloatRandomValuesSequence struct {
buf FloatArray
vals FloatArray
n int
t int64
state struct {
n int
t int64
d int64
scale float64
}
}
func NewFloatRandomValuesSequence(n int, start time.Time, delta time.Duration, scale float64) *FloatRandomValuesSequence {
g := &FloatRandomValuesSequence{
buf: *NewFloatArrayLen(cursors.DefaultMaxPointsPerBlock),
}
g.state.n = n
g.state.t = start.UnixNano()
g.state.d = int64(delta)
g.state.scale = scale
g.Reset()
return g
}
func (g *FloatRandomValuesSequence) Reset() {
g.n = g.state.n
g.t = g.state.t
}
func (g *FloatRandomValuesSequence) Next() bool {
if g.n == 0 {
return false
}
c := min(g.n, cursors.DefaultMaxPointsPerBlock)
g.n -= c
g.vals.Timestamps = g.buf.Timestamps[:0]
g.vals.Values = g.buf.Values[:0]
for i := 0; i < c; i++ {
g.vals.Timestamps = append(g.vals.Timestamps, g.t)
g.vals.Values = append(g.vals.Values, rand.Float64()*g.state.scale)
g.t += g.state.d
}
return true
}
func (g *FloatRandomValuesSequence) Values() Values {
return &g.vals
}

30
pkg/pointer/pointer.go Normal file
View File

@ -0,0 +1,30 @@
// Package pointer provides utilities for pointer handling that aren't avaliable in go.
// Feel free to add more pointerification functions for more types as you need them.
package pointer
import "time"
// Duration returns a pointer to its argument.
func Duration(d time.Duration) *time.Duration {
return &d
}
// Int returns a pointer to its argument.
func Int(i int) *int {
return &i
}
// Int64 returns a pointer to its argument.
func Int64(i int64) *int64 {
return &i
}
// String returns a pointer to its argument.
func String(s string) *string {
return &s
}
// Time returns a pointer to its argument.
func Time(t time.Time) *time.Time {
return &t
}

View File

@ -7,6 +7,7 @@ import (
"github.com/influxdata/flux"
"github.com/influxdata/flux/csv"
platform "github.com/influxdata/influxdb"
"github.com/influxdata/influxdb/kit/check"
"github.com/influxdata/influxdb/kit/tracing"
)
@ -23,6 +24,12 @@ func (b QueryServiceBridge) Query(ctx context.Context, req *Request) (flux.Resul
return flux.NewResultIteratorFromQuery(query), nil
}
// Check returns the status of this query service. Since this bridge consumes an AsyncQueryService,
// which is not available over the network, this check always passes.
func (QueryServiceBridge) Check(context.Context) check.Response {
return check.Response{Name: "Query Service", Status: check.StatusPass}
}
// QueryServiceProxyBridge implements QueryService while consuming a ProxyQueryService interface.
type QueryServiceProxyBridge struct {
ProxyQueryService ProxyQueryService
@ -36,34 +43,42 @@ func (b QueryServiceProxyBridge) Query(ctx context.Context, req *Request) (flux.
}
r, w := io.Pipe()
statsChan := make(chan flux.Statistics, 1)
asri := &asyncStatsResultIterator{statsReady: make(chan struct{})}
go func() {
stats, err := b.ProxyQueryService.Query(ctx, w, preq)
_ = w.CloseWithError(err)
statsChan <- stats
asri.stats = stats
close(asri.statsReady)
}()
dec := csv.NewMultiResultDecoder(csv.ResultDecoderConfig{})
ri, err := dec.Decode(r)
return asyncStatsResultIterator{
ResultIterator: ri,
statsChan: statsChan,
}, err
asri.ResultIterator = ri
return asri, err
}
func (b QueryServiceProxyBridge) Check(ctx context.Context) check.Response {
return b.ProxyQueryService.Check(ctx)
}
type asyncStatsResultIterator struct {
flux.ResultIterator
statsChan chan flux.Statistics
stats flux.Statistics
// Channel that is closed when stats have been written.
statsReady chan struct{}
// Statistics gathered from calling the proxy query service.
// This field must not be read until statsReady is closed.
stats flux.Statistics
}
func (i asyncStatsResultIterator) Release() {
func (i *asyncStatsResultIterator) Release() {
i.ResultIterator.Release()
i.stats = <-i.statsChan
}
func (i asyncStatsResultIterator) Statistics() flux.Statistics {
func (i *asyncStatsResultIterator) Statistics() flux.Statistics {
<-i.statsReady
return i.stats
}
@ -94,6 +109,12 @@ func (b ProxyQueryServiceAsyncBridge) Query(ctx context.Context, w io.Writer, re
return stats, nil
}
// Check returns the status of this query service. Since this bridge consumes an AsyncQueryService,
// which is not available over the network, this check always passes.
func (ProxyQueryServiceAsyncBridge) Check(context.Context) check.Response {
return check.Response{Name: "Query Service", Status: check.StatusPass}
}
// REPLQuerier implements the repl.Querier interface while consuming a QueryService
type REPLQuerier struct {
// Authorization is the authorization to provide for all requests

View File

@ -7,6 +7,7 @@ import (
"time"
"github.com/influxdata/flux"
"github.com/influxdata/influxdb/kit/check"
"github.com/influxdata/influxdb/kit/tracing"
)
@ -62,3 +63,7 @@ func (s *LoggingServiceBridge) Query(ctx context.Context, w io.Writer, req *Prox
}
return stats, nil
}
func (s *LoggingServiceBridge) Check(ctx context.Context) check.Response {
return s.QueryService.Check(ctx)
}

View File

@ -6,6 +6,7 @@ import (
"sync"
"github.com/influxdata/flux"
"github.com/influxdata/influxdb/kit/check"
"github.com/influxdata/influxdb/query"
)
@ -19,6 +20,10 @@ func (s *ProxyQueryService) Query(ctx context.Context, w io.Writer, req *query.P
return s.QueryF(ctx, w, req)
}
func (s *ProxyQueryService) Check(ctx context.Context) check.Response {
return check.Response{Name: "Mock Proxy Query Service", Status: check.StatusPass}
}
// QueryService mocks the idep QueryService for testing.
type QueryService struct {
QueryF func(ctx context.Context, req *query.Request) (flux.ResultIterator, error)
@ -29,6 +34,10 @@ func (s *QueryService) Query(ctx context.Context, req *query.Request) (flux.Resu
return s.QueryF(ctx, req)
}
func (s *QueryService) Check(ctx context.Context) check.Response {
return check.Response{Name: "Mock Query Service", Status: check.StatusPass}
}
// AsyncQueryService mocks the idep QueryService for testing.
type AsyncQueryService struct {
QueryF func(ctx context.Context, req *query.Request) (flux.Query, error)

View File

@ -5,10 +5,13 @@ import (
"io"
"github.com/influxdata/flux"
"github.com/influxdata/influxdb/kit/check"
)
// QueryService represents a type capable of performing queries.
type QueryService interface {
check.Checker
// Query submits a query for execution returning a results iterator.
// Cancel must be called on any returned results to free resources.
Query(ctx context.Context, req *Request) (flux.ResultIterator, error)
@ -24,6 +27,8 @@ type AsyncQueryService interface {
// ProxyQueryService performs queries and encodes the result into a writer.
// The results are opaque to a ProxyQueryService.
type ProxyQueryService interface {
check.Checker
// Query performs the requested query and encodes the results into w.
// The number of bytes written to w is returned __independent__ of any error.
Query(ctx context.Context, w io.Writer, req *ProxyRequest) (flux.Statistics, error)

View File

@ -589,6 +589,9 @@ func writeTable(t *ToTransformation, tbl flux.Table) error {
}
}
points, err = tsdb.ExplodePoints(*orgID, *bucketID, points)
if err != nil {
return err
}
return d.PointsWriter.WritePoints(context.TODO(), points)
})
}

View File

@ -4,14 +4,12 @@ import (
"bytes"
"context"
"fmt"
"github.com/influxdata/influxdb/kit/tracing"
"sort"
"github.com/influxdata/influxdb/logger"
"github.com/influxdata/influxdb/models"
"github.com/influxdata/influxdb/storage/reads/datatypes"
"github.com/influxdata/influxdb/tsdb/cursors"
"github.com/opentracing/opentracing-go"
"go.uber.org/zap"
)
type groupResultSet struct {
@ -113,30 +111,14 @@ func (g *groupResultSet) Next() GroupCursor {
}
func (g *groupResultSet) sort() (int, error) {
log := logger.LoggerFromContext(g.ctx)
if log != nil {
var f func()
log, f = logger.NewOperation(log, "Sort", "group.sort", zap.String("group_type", g.req.Group.String()))
defer f()
}
span := opentracing.SpanFromContext(g.ctx)
if span != nil {
span = opentracing.StartSpan(
"group.sort",
opentracing.ChildOf(span.Context()),
opentracing.Tag{Key: "group_type", Value: g.req.Group.String()})
defer span.Finish()
}
span, _ := tracing.StartSpanFromContext(g.ctx)
defer span.Finish()
span.LogKV("group_type", g.req.Group.String())
n, err := g.sortFn(g)
if span != nil {
span.SetTag("rows", n)
}
if log != nil {
log.Info("Sort completed", zap.Int("rows", n))
if err != nil {
span.LogKV("rows", n)
}
return n, err

View File

@ -345,7 +345,7 @@ func BenchmarkNewGroupResultSet_GroupBy(b *testing.B) {
vals[i] = gen.NewCounterByteSequenceCount(card[i])
}
tags := gen.NewTagsValuesSequenceValues("tag", vals)
tags := gen.NewTagsValuesSequenceValues("m0", "f0", "tag", vals)
rows := make([]reads.SeriesRow, tags.Count())
for i := range rows {
tags.Next()

View File

@ -91,6 +91,12 @@ func (t *floatTable) Do(f func(flux.ColReader) error) error {
}
func (t *floatTable) advance() bool {
for _, cb := range t.colBufs {
if cb != nil {
cb.Release()
}
}
a := t.cur.Next()
t.l = a.Len()
if t.l == 0 {
@ -320,6 +326,12 @@ func (t *integerTable) Do(f func(flux.ColReader) error) error {
}
func (t *integerTable) advance() bool {
for _, cb := range t.colBufs {
if cb != nil {
cb.Release()
}
}
a := t.cur.Next()
t.l = a.Len()
if t.l == 0 {
@ -549,6 +561,12 @@ func (t *unsignedTable) Do(f func(flux.ColReader) error) error {
}
func (t *unsignedTable) advance() bool {
for _, cb := range t.colBufs {
if cb != nil {
cb.Release()
}
}
a := t.cur.Next()
t.l = a.Len()
if t.l == 0 {
@ -778,6 +796,12 @@ func (t *stringTable) Do(f func(flux.ColReader) error) error {
}
func (t *stringTable) advance() bool {
for _, cb := range t.colBufs {
if cb != nil {
cb.Release()
}
}
a := t.cur.Next()
t.l = a.Len()
if t.l == 0 {
@ -1007,6 +1031,12 @@ func (t *booleanTable) Do(f func(flux.ColReader) error) error {
}
func (t *booleanTable) advance() bool {
for _, cb := range t.colBufs {
if cb != nil {
cb.Release()
}
}
a := t.cur.Next()
t.l = a.Len()
if t.l == 0 {

View File

@ -85,6 +85,12 @@ func (t *{{.name}}Table) Do(f func(flux.ColReader) error) error {
}
func (t *{{.name}}Table) advance() bool {
for _, cb := range t.colBufs {
if cb != nil {
cb.Release()
}
}
a := t.cur.Next()
t.l = a.Len()
if t.l == 0 {

42
task.go
View File

@ -149,11 +149,11 @@ func (t *TaskUpdate) UnmarshalJSON(data []byte) error {
// Offset represents a delay before execution.
// It gets marshalled from a string duration, i.e.: "10s" is 10 seconds
Offset flux.Duration `json:"offset,omitempty"`
Offset *flux.Duration `json:"offset,omitempty"`
Concurrency int64 `json:"concurrency,omitempty"`
Concurrency *int64 `json:"concurrency,omitempty"`
Retry int64 `json:"retry,omitempty"`
Retry *int64 `json:"retry,omitempty"`
Token string `json:"token,omitempty"`
}{}
@ -164,7 +164,10 @@ func (t *TaskUpdate) UnmarshalJSON(data []byte) error {
t.Options.Name = jo.Name
t.Options.Cron = jo.Cron
t.Options.Every = time.Duration(jo.Every)
t.Options.Offset = time.Duration(jo.Offset)
if jo.Offset != nil {
offset := time.Duration(*jo.Offset)
t.Options.Offset = &offset
}
t.Options.Concurrency = jo.Concurrency
t.Options.Retry = jo.Retry
t.Flux = jo.Flux
@ -187,18 +190,21 @@ func (t TaskUpdate) MarshalJSON() ([]byte, error) {
Every flux.Duration `json:"every,omitempty"`
// Offset represents a delay before execution.
Offset flux.Duration `json:"offset,omitempty"`
Offset *flux.Duration `json:"offset,omitempty"`
Concurrency int64 `json:"concurrency,omitempty"`
Concurrency *int64 `json:"concurrency,omitempty"`
Retry int64 `json:"retry,omitempty"`
Retry *int64 `json:"retry,omitempty"`
Token string `json:"token,omitempty"`
}{}
jo.Name = t.Options.Name
jo.Cron = t.Options.Cron
jo.Every = flux.Duration(t.Options.Every)
jo.Offset = flux.Duration(t.Options.Offset)
if t.Options.Offset != nil {
offset := flux.Duration(*t.Options.Offset)
jo.Offset = &offset
}
jo.Concurrency = t.Options.Concurrency
jo.Retry = t.Options.Retry
jo.Flux = t.Flux
@ -225,6 +231,7 @@ func (t *TaskUpdate) UpdateFlux(oldFlux string) error {
if t.Flux != nil && *t.Flux != "" {
oldFlux = *t.Flux
}
toDelete := map[string]struct{}{}
parsedPKG := parser.ParseSource(oldFlux)
if ast.Check(parsedPKG) > 0 {
return ast.GetError(parsedPKG)
@ -245,11 +252,15 @@ func (t *TaskUpdate) UpdateFlux(oldFlux string) error {
if t.Options.Cron != "" {
op["cron"] = &ast.StringLiteral{Value: t.Options.Cron}
}
if t.Options.Offset != 0 {
d := ast.Duration{Magnitude: int64(t.Options.Offset), Unit: "ns"}
op["offset"] = &ast.DurationLiteral{Values: []ast.Duration{d}}
if t.Options.Offset != nil {
if *t.Options.Offset != 0 {
d := ast.Duration{Magnitude: int64(*t.Options.Offset), Unit: "ns"}
op["offset"] = &ast.DurationLiteral{Values: []ast.Duration{d}}
} else {
toDelete["offset"] = struct{}{}
}
}
if len(op) > 0 {
if len(op) > 0 || len(toDelete) > 0 {
editFunc := func(opt *ast.OptionStatement) (ast.Expression, error) {
a, ok := opt.Assignment.(*ast.VariableAssignment)
if !ok {
@ -260,8 +271,11 @@ func (t *TaskUpdate) UpdateFlux(oldFlux string) error {
return nil, fmt.Errorf("value is is %s, not an object expression", a.Init.Type())
}
// modify in the keys and values that already are in the ast
for _, p := range obj.Properties {
for i, p := range obj.Properties {
k := p.Key.Key()
if _, ok := toDelete[k]; ok {
obj.Properties = append(obj.Properties[:i], obj.Properties[i+1:]...)
}
switch k {
case "name":
if name, ok := op["name"]; ok && t.Options.Name != "" {
@ -269,7 +283,7 @@ func (t *TaskUpdate) UpdateFlux(oldFlux string) error {
p.Value = name
}
case "offset":
if offset, ok := op["offset"]; ok && t.Options.Offset != 0 {
if offset, ok := op["offset"]; ok && t.Options.Offset != nil {
delete(op, "offset")
p.Value = offset
}

View File

@ -53,6 +53,11 @@ func (c *Coordinator) claimExistingTasks() {
for len(tasks) > 0 {
for _, task := range tasks {
if task.Meta.Status != string(backend.TaskActive) {
// Don't claim inactive tasks at startup.
continue
}
t := task // Copy to avoid mistaken closure around task value.
if err := c.sch.ClaimTask(&t.Task, &t.Meta); err != nil {
c.logger.Error("failed claim task", zap.Error(err))

View File

@ -166,11 +166,16 @@ func TestCoordinator_ClaimExistingTasks(t *testing.T) {
createChan := sched.TaskCreateChan()
const numTasks = 110 // One page of listed tasks should be 100, so pick more than that.
const numTasks = 110 // One page of listed tasks should be 100, so pick more than that.
const inactiveTaskIndex = 13 // One arbitrary task is set to inactive.
createdIDs := make([]platform.ID, numTasks)
for i := 0; i < numTasks; i++ {
id, err := st.CreateTask(context.Background(), backend.CreateTaskRequest{Org: 1, AuthorizationID: 3, Script: script})
ctr := backend.CreateTaskRequest{Org: 1, AuthorizationID: 3, Script: script}
if i == inactiveTaskIndex {
ctr.Status = backend.TaskInactive
}
id, err := st.CreateTask(context.Background(), ctr)
if err != nil {
t.Fatal(err)
}
@ -179,15 +184,21 @@ func TestCoordinator_ClaimExistingTasks(t *testing.T) {
coordinator.New(zaptest.NewLogger(t), sched, st)
for i := 0; i < numTasks; i++ {
const expectedCreatedTasks = numTasks - 1 // -1 to skip the single inactive task.
for i := 0; i < expectedCreatedTasks; i++ {
_, err := timeoutSelector(createChan)
if err != nil {
t.Fatal(err)
}
}
for _, id := range createdIDs {
if task := sched.TaskFor(id); task == nil {
for i, id := range createdIDs {
task := sched.TaskFor(id)
if i == inactiveTaskIndex {
if task != nil {
t.Fatalf("inactive task with id %s claimed by coordinator at startup", id)
}
} else if task == nil {
t.Fatalf("did not find created task with ID %s", id)
}
}

View File

@ -63,7 +63,7 @@ type syncRunPromise struct {
ctx context.Context
cancel context.CancelFunc
logger *zap.Logger
logEnd func()
logEnd func() // Called to log the end of the run operation.
finishOnce sync.Once // Ensure we set the values only once.
ready chan struct{} // Closed inside finish. Indicates Wait will no longer block.
@ -169,6 +169,10 @@ func (p *syncRunPromise) doQuery(wg *sync.WaitGroup) {
}
}
// Must call Release to ensure Statistics are ready.
// It's safe for Release to be called multiple times.
it.Release()
// Is it okay to assume it.Err will be set if the query context is canceled?
p.finish(&runResult{err: it.Err(), statistics: it.Statistics()}, nil)
}
@ -245,7 +249,7 @@ type asyncRunPromise struct {
q flux.Query
logger *zap.Logger
logEnd func()
logEnd func() // Called to log the end of the run operation.
finishOnce sync.Once // Ensure we set the values only once.
ready chan struct{} // Closed inside finish. Indicates Wait will no longer block.
@ -327,8 +331,9 @@ func (p *asyncRunPromise) followQuery(wg *sync.WaitGroup) {
wg.Wait()
// Otherwise, query was successful.
// TODO(mr): collect query statistics, once RunResult interface supports them?
p.finish(new(runResult), nil)
// Must call query.Done before collecting statistics. It's safe to call multiple times.
p.q.Done()
p.finish(&runResult{statistics: p.q.Statistics()}, nil)
}
}

View File

@ -16,14 +16,18 @@ import (
// NewStoreTaskMeta returns a new StoreTaskMeta based on the given request and parsed options.
func NewStoreTaskMeta(req CreateTaskRequest, o options.Options) StoreTaskMeta {
stm := StoreTaskMeta{
MaxConcurrency: int32(o.Concurrency),
Status: string(req.Status),
LatestCompleted: req.ScheduleAfter,
CreatedAt: time.Now().Unix(),
EffectiveCron: o.EffectiveCronString(),
Offset: int32(o.Offset / time.Second),
AuthorizationID: uint64(req.AuthorizationID),
}
if o.Concurrency != nil {
stm.MaxConcurrency = int32(*o.Concurrency)
}
if o.Offset != nil {
stm.Offset = int32(*o.Offset / time.Second)
}
if stm.Status == "" {
stm.Status = string(DefaultTaskStatus)

View File

@ -348,6 +348,23 @@ func (s *TickScheduler) UpdateTask(task *StoreTask, meta *StoreTaskMeta) error {
ts.nextDue = next
ts.nextDueMu.Unlock()
// check the concurrency
// todo(lh): In the near future we may not be using the scheduler to manage concurrency.
maxC := int(meta.MaxConcurrency)
if maxC != len(ts.runners) {
ts.runningMu.Lock()
if maxC < len(ts.runners) {
ts.runners = ts.runners[:maxC]
}
if maxC > len(ts.runners) {
delta := maxC - len(ts.runners)
for i := 0; i < delta; i++ {
ts.runners = append(ts.runners, newRunner(s.ctx, ts.wg, s.logger, task, s.desiredState, s.executor, s.logWriter, ts))
}
}
ts.runningMu.Unlock()
}
if now := atomic.LoadInt64(&s.now); now >= next || hasQueue {
ts.Work()
}

View File

@ -2,12 +2,15 @@ package backend_test
import (
"context"
"encoding/json"
"errors"
"fmt"
"reflect"
"strings"
"testing"
"time"
"github.com/influxdata/flux"
platform "github.com/influxdata/influxdb"
"github.com/influxdata/influxdb/kit/prom"
"github.com/influxdata/influxdb/kit/prom/promtest"
@ -221,6 +224,78 @@ func TestScheduler_CreateNextRunOnTick(t *testing.T) {
}
}
func TestScheduler_LogStatisticsOnSuccess(t *testing.T) {
t.Parallel()
d := mock.NewDesiredState()
e := mock.NewExecutor()
rl := backend.NewInMemRunReaderWriter()
o := backend.NewScheduler(d, e, rl, 5, backend.WithLogger(zaptest.NewLogger(t)))
o.Start(context.Background())
defer o.Stop()
const taskID = 0x12345
const orgID = 0x54321
task := &backend.StoreTask{
ID: taskID,
Org: orgID,
}
meta := &backend.StoreTaskMeta{
MaxConcurrency: 1,
EffectiveCron: "@every 1s",
LatestCompleted: 5,
}
d.SetTaskMeta(taskID, *meta)
if err := o.ClaimTask(task, meta); err != nil {
t.Fatal(err)
}
o.Tick(6)
p, err := e.PollForNumberRunning(taskID, 1)
if err != nil {
t.Fatal(err)
}
rr := mock.NewRunResult(nil, false)
rr.Stats = flux.Statistics{Metadata: flux.Metadata{"foo": []interface{}{"bar"}}}
p[0].Finish(rr, nil)
runID := p[0].Run().RunID
if _, err := e.PollForNumberRunning(taskID, 0); err != nil {
t.Fatal(err)
}
logs, err := rl.ListLogs(context.Background(), orgID, platform.LogFilter{Task: taskID, Run: &runID})
if err != nil {
t.Fatal(err)
}
// For now, assume the stats line is the only line beginning with "{".
var statJSON string
for _, log := range logs {
if len(log.Message) > 0 && log.Message[0] == '{' {
statJSON = log.Message
break
}
}
if statJSON == "" {
t.Fatal("could not find log message that looked like statistics")
}
var stats flux.Statistics
if err := json.Unmarshal([]byte(statJSON), &stats); err != nil {
t.Fatal(err)
}
foo := stats.Metadata["foo"]
if !reflect.DeepEqual(foo, []interface{}{"bar"}) {
t.Fatalf("query statistics were not encoded correctly into logs. expected metadata.foo=[bar], got: %#v", stats)
}
}
func TestScheduler_Release(t *testing.T) {
t.Parallel()
@ -260,7 +335,6 @@ func TestScheduler_Release(t *testing.T) {
}
func TestScheduler_UpdateTask(t *testing.T) {
t.Skip("flaky test: https://github.com/influxdata/influxdb/issues/12667")
t.Parallel()
d := mock.NewDesiredState()
@ -465,7 +539,7 @@ func pollForRunStatus(t *testing.T, r backend.LogReader, taskID, orgID platform.
t.FailNow()
}
func TestScheduler_RunLog(t *testing.T) {
func TestScheduler_RunStatus(t *testing.T) {
t.Parallel()
d := mock.NewDesiredState()

View File

@ -460,7 +460,7 @@ func listLogsTest(t *testing.T, crf CreateRunStoreFunc, drf DestroyRunStoreFunc)
if logs[0].Time != fmtTimelog {
t.Fatalf("expected: %q, got: %q", fmtTimelog, logs[0].Time)
}
if "log4" != logs[0].Message {
if logs[0].Message != "log4" {
t.Fatalf("expected: %q, got: %q", "log4", logs[0].Message)
}

31
task/backend/task.go Normal file
View File

@ -0,0 +1,31 @@
package backend
import (
"context"
"time"
"github.com/influxdata/influxdb"
)
// TaskControlService is a low-level controller interface, intended to be passed to
// task executors and schedulers, which allows creation, completion, and status updates of runs.
type TaskControlService interface {
// CreateNextRun attempts to create a new run.
// The new run's ScheduledFor is assigned the earliest possible time according to task's cron,
// that is later than any in-progress run and LatestCompleted run.
// If the run's ScheduledFor would be later than the passed-in now, CreateNextRun returns a RunNotYetDueError.
CreateNextRun(ctx context.Context, taskID influxdb.ID, now int64) (RunCreation, error)
// FinishRun removes runID from the list of running tasks and if its `ScheduledFor` is later then last completed update it.
FinishRun(ctx context.Context, taskID, runID influxdb.ID) (*influxdb.Run, error)
// NextDueRun returns the Unix timestamp of when the next call to CreateNextRun will be ready.
// The returned timestamp reflects the task's offset, so it does not necessarily exactly match the schedule time.
NextDueRun(ctx context.Context, taskID influxdb.ID) (int64, error)
// UpdateRunState sets the run state at the respective time.
UpdateRunState(ctx context.Context, taskID, runID influxdb.ID, when time.Time, state RunStatus) error
// AddRunLog adds a log line to the run.
AddRunLog(ctx context.Context, taskID, runID influxdb.ID, when time.Time, log string) error
}

View File

@ -5,11 +5,11 @@ import (
"context"
"errors"
"fmt"
"github.com/influxdata/flux"
"strings"
"sync"
"time"
"github.com/influxdata/flux"
platform "github.com/influxdata/influxdb"
"github.com/influxdata/influxdb/task/backend"
scheduler "github.com/influxdata/influxdb/task/backend"
@ -453,7 +453,10 @@ func (p *RunPromise) Finish(r backend.RunResult, err error) {
type RunResult struct {
err error
isRetryable bool
stats flux.Statistics
// Most tests don't care about statistics.
// If your test does care, adjust it after the call to NewRunResult.
Stats flux.Statistics
}
var _ backend.RunResult = (*RunResult)(nil)
@ -471,5 +474,5 @@ func (rr *RunResult) IsRetryable() bool {
}
func (rr *RunResult) Statistics() flux.Statistics {
return rr.stats
return rr.Stats
}

View File

@ -10,6 +10,8 @@ import (
"github.com/influxdata/flux"
"github.com/influxdata/flux/semantic"
"github.com/influxdata/flux/values"
"github.com/influxdata/influxdb/pkg/pointer"
cron "gopkg.in/robfig/cron.v2"
)
@ -42,11 +44,11 @@ type Options struct {
// Offset represents a delay before execution.
// this can be unmarshaled from json as a string i.e.: "1d" will unmarshal as 1 day
Offset time.Duration `json:"offset,omitempty"`
Offset *time.Duration `json:"offset,omitempty"`
Concurrency int64 `json:"concurrency,omitempty"`
Concurrency *int64 `json:"concurrency,omitempty"`
Retry int64 `json:"retry,omitempty"`
Retry *int64 `json:"retry,omitempty"`
}
// Clear clears out all options in the options struct, it us useful if you wish to reuse it.
@ -54,20 +56,30 @@ func (o *Options) Clear() {
o.Name = ""
o.Cron = ""
o.Every = 0
o.Offset = 0
o.Concurrency = 0
o.Retry = 0
o.Offset = nil
o.Concurrency = nil
o.Retry = nil
}
func (o *Options) IsZero() bool {
return o.Name == "" &&
o.Cron == "" &&
o.Every == 0 &&
o.Offset == 0 &&
o.Concurrency == 0 &&
o.Retry == 0
o.Offset == nil &&
o.Concurrency == nil &&
o.Retry == nil
}
// All the task option names we accept.
const (
optName = "name"
optCron = "cron"
optEvery = "every"
optOffset = "offset"
optConcurrency = "concurrency"
optRetry = "retry"
)
// FromScript extracts Options from a Flux script.
func FromScript(script string) (Options, error) {
if optionCache != nil {
@ -79,8 +91,7 @@ func FromScript(script string) (Options, error) {
return opt, nil
}
}
opt := Options{Retry: 1, Concurrency: 1}
opt := Options{Retry: pointer.Int64(1), Concurrency: pointer.Int64(1)}
_, scope, err := flux.Eval(script)
if err != nil {
@ -93,7 +104,11 @@ func FromScript(script string) (Options, error) {
return opt, errors.New("missing required option: 'task'")
}
optObject := task.Object()
nameVal, ok := optObject.Get("name")
if err := validateOptionNames(optObject); err != nil {
return opt, err
}
nameVal, ok := optObject.Get(optName)
if !ok {
return opt, errors.New("missing name in task options")
}
@ -102,8 +117,8 @@ func FromScript(script string) (Options, error) {
return opt, err
}
opt.Name = nameVal.Str()
crVal, cronOK := optObject.Get("cron")
everyVal, everyOK := optObject.Get("every")
crVal, cronOK := optObject.Get(optCron)
everyVal, everyOK := optObject.Get(optEvery)
if cronOK && everyOK {
return opt, errors.New("cannot use both cron and every in task options")
}
@ -126,25 +141,25 @@ func FromScript(script string) (Options, error) {
opt.Every = everyVal.Duration().Duration()
}
if offsetVal, ok := optObject.Get("offset"); ok {
if offsetVal, ok := optObject.Get(optOffset); ok {
if err := checkNature(offsetVal.PolyType().Nature(), semantic.Duration); err != nil {
return opt, err
}
opt.Offset = offsetVal.Duration().Duration()
opt.Offset = pointer.Duration(offsetVal.Duration().Duration())
}
if concurrencyVal, ok := optObject.Get("concurrency"); ok {
if concurrencyVal, ok := optObject.Get(optConcurrency); ok {
if err := checkNature(concurrencyVal.PolyType().Nature(), semantic.Int); err != nil {
return opt, err
}
opt.Concurrency = concurrencyVal.Int()
opt.Concurrency = pointer.Int64(concurrencyVal.Int())
}
if retryVal, ok := optObject.Get("retry"); ok {
if retryVal, ok := optObject.Get(optRetry); ok {
if err := checkNature(retryVal.PolyType().Nature(), semantic.Int); err != nil {
return opt, err
}
opt.Retry = retryVal.Int()
opt.Retry = pointer.Int64(retryVal.Int())
}
if err := opt.Validate(); err != nil {
@ -185,21 +200,23 @@ func (o *Options) Validate() error {
}
}
if o.Offset.Truncate(time.Second) != o.Offset {
if o.Offset != nil && o.Offset.Truncate(time.Second) != *o.Offset {
// For now, allowing negative offset delays. Maybe they're useful for forecasting?
errs = append(errs, "offset option must be expressible as whole seconds")
}
if o.Concurrency < 1 {
errs = append(errs, "concurrency must be at least 1")
} else if o.Concurrency > maxConcurrency {
errs = append(errs, fmt.Sprintf("concurrency exceeded max of %d", maxConcurrency))
if o.Concurrency != nil {
if *o.Concurrency < 1 {
errs = append(errs, "concurrency must be at least 1")
} else if *o.Concurrency > maxConcurrency {
errs = append(errs, fmt.Sprintf("concurrency exceeded max of %d", maxConcurrency))
}
}
if o.Retry < 1 {
errs = append(errs, "retry must be at least 1")
} else if o.Retry > maxRetry {
errs = append(errs, fmt.Sprintf("retry exceeded max of %d", maxRetry))
if o.Retry != nil {
if *o.Retry < 1 {
errs = append(errs, "retry must be at least 1")
} else if *o.Retry > maxRetry {
errs = append(errs, fmt.Sprintf("retry exceeded max of %d", maxRetry))
}
}
if len(errs) == 0 {
@ -231,3 +248,25 @@ func checkNature(got, exp semantic.Nature) error {
}
return nil
}
// validateOptionNames returns an error if any keys in the option object o
// do not match an expected option name.
func validateOptionNames(o values.Object) error {
var unexpected []string
o.Range(func(name string, _ values.Value) {
switch name {
case optName, optCron, optEvery, optOffset, optConcurrency, optRetry:
// Known option. Nothing to do.
default:
unexpected = append(unexpected, name)
}
})
if len(unexpected) > 0 {
u := strings.Join(unexpected, ", ")
v := strings.Join([]string{optName, optCron, optEvery, optOffset, optConcurrency, optRetry}, ", ")
return fmt.Errorf("unknown task option(s): %s. valid options are %s", u, v)
}
return nil
}

Some files were not shown because too many files have changed in this diff Show More