Merge branch 'master' into flux-staging

pull/13050/head
Jonathan A. Sternberg 2019-04-01 10:59:43 -05:00
commit 72726cbe85
No known key found for this signature in database
GPG Key ID: 4A0C1200CB8B9D2E
254 changed files with 6121 additions and 8659 deletions

View File

@ -1,4 +1,12 @@
## v2.0.0-alpha.7 [unreleased]
## v2.0.0-alpha.8 [unreleased]
### Features
### Bug Fixes
### UI Improvements
## v2.0.0-alpha.7 [2019-03-28]
### Features
@ -15,6 +23,7 @@
1. [12888](https://github.com/influxdata/influxdb/pull/12888): Add the ability to delete a template
1. [12901](https://github.com/influxdata/influxdb/pull/12901): Save user preference for variable control bar visibility and default to visible
1. [12910](https://github.com/influxdata/influxdb/pull/12910): Add the ability to clone a template
1. [12958](https://github.com/influxdata/influxdb/pull/12958): Add the ability to import a variable
### Bug Fixes
@ -25,11 +34,14 @@
1. [12793](https://github.com/influxdata/influxdb/pull/12793): Fix task creation error when switching schedule types.
1. [12805](https://github.com/influxdata/influxdb/pull/12805): Fix hidden horizonal scrollbars in flux raw data view
1. [12827](https://github.com/influxdata/influxdb/pull/12827): Fix screen tearing bug in Raw Data View
1. [12961](https://github.com/influxdata/influxdb/pull/12961): Fix scroll clipping in graph legends & dropdown menus
1. [12959](https://github.com/influxdata/influxdb/pull/12959): Fix routing loop
### UI Improvements
1. [12782](https://github.com/influxdata/influxdb/pull/12782): Move bucket selection in the query builder to the first card in the list
1. [12850](https://github.com/influxdata/influxdb/pull/12850): Ensure editor is automatically focused in note editor
1. [12915](https://github.com/influxdata/influxdb/pull/12915): Add ability to edit a template's name.
## v2.0.0-alpha.6 [2019-03-15]

13
auth.go
View File

@ -27,6 +27,12 @@ type Authorization struct {
Permissions []Permission `json:"permissions"`
}
// AuthorizationUpdate is the authorization update request.
type AuthorizationUpdate struct {
Status *Status `json:"status,omitempty"`
Description *string `json:"description,omitempty"`
}
// Valid ensures that the authorization is valid.
func (a *Authorization) Valid() error {
for _, p := range a.Permissions {
@ -78,7 +84,7 @@ const (
OpFindAuthorizationByToken = "FindAuthorizationByToken"
OpFindAuthorizations = "FindAuthorizations"
OpCreateAuthorization = "CreateAuthorization"
OpSetAuthorizationStatus = "SetAuthorizationStatus"
OpUpdateAuthorization = "UpdateAuthorization"
OpDeleteAuthorization = "DeleteAuthorization"
)
@ -97,9 +103,8 @@ type AuthorizationService interface {
// Creates a new authorization and sets a.Token and a.UserID with the new identifier.
CreateAuthorization(ctx context.Context, a *Authorization) error
// SetAuthorizationStatus updates the status of the authorization. Useful
// for setting an authorization to inactive or active.
SetAuthorizationStatus(ctx context.Context, id ID, status Status) error
// UpdateAuthorization updates the status and description if available.
UpdateAuthorization(ctx context.Context, id ID, udp *AuthorizationUpdate) error
// Removes a authorization by token.
DeleteAuthorization(ctx context.Context, id ID) error

View File

@ -143,8 +143,8 @@ func VerifyPermissions(ctx context.Context, ps []influxdb.Permission) error {
return nil
}
// SetAuthorizationStatus checks to see if the authorizer on context has write access to the authorization provided.
func (s *AuthorizationService) SetAuthorizationStatus(ctx context.Context, id influxdb.ID, st influxdb.Status) error {
// UpdateAuthorization checks to see if the authorizer on context has write access to the authorization provided.
func (s *AuthorizationService) UpdateAuthorization(ctx context.Context, id influxdb.ID, upd *influxdb.AuthorizationUpdate) error {
a, err := s.s.FindAuthorizationByID(ctx, id)
if err != nil {
return err
@ -154,7 +154,7 @@ func (s *AuthorizationService) SetAuthorizationStatus(ctx context.Context, id in
return err
}
return s.s.SetAuthorizationStatus(ctx, id, st)
return s.s.UpdateAuthorization(ctx, id, upd)
}
// DeleteAuthorization checks to see if the authorizer on context has write access to the authorization provided.

View File

@ -244,7 +244,7 @@ func TestAuthorizationService_WriteAuthorization(t *testing.T) {
m.DeleteAuthorizationFn = func(ctx context.Context, id influxdb.ID) error {
return nil
}
m.SetAuthorizationStatusFn = func(ctx context.Context, id influxdb.ID, s influxdb.Status) error {
m.UpdateAuthorizationFn = func(ctx context.Context, id influxdb.ID, upd *influxdb.AuthorizationUpdate) error {
return nil
}
s := authorizer.NewAuthorizationService(m)
@ -257,8 +257,8 @@ func TestAuthorizationService_WriteAuthorization(t *testing.T) {
influxdbtesting.ErrorsEqual(t, err, tt.wants.err)
})
t.Run("set authorization status", func(t *testing.T) {
err := s.SetAuthorizationStatus(ctx, 10, influxdb.Active)
t.Run("update authorization", func(t *testing.T) {
err := s.UpdateAuthorization(ctx, 10, &influxdb.AuthorizationUpdate{Status: influxdb.Active.Ptr()})
influxdbtesting.ErrorsEqual(t, err, tt.wants.err)
})

View File

@ -4,7 +4,7 @@ import (
"context"
"encoding/json"
"github.com/coreos/bbolt"
bolt "github.com/coreos/bbolt"
platform "github.com/influxdata/influxdb"
)
@ -382,27 +382,32 @@ func (c *Client) deleteAuthorization(ctx context.Context, tx *bolt.Tx, id platfo
return nil
}
// SetAuthorizationStatus updates the status of the authorization. Useful
// for setting an authorization to inactive or active.
func (c *Client) SetAuthorizationStatus(ctx context.Context, id platform.ID, status platform.Status) error {
// UpdateAuthorization updates the status and description if available.
func (c *Client) UpdateAuthorization(ctx context.Context, id platform.ID, upd *platform.AuthorizationUpdate) error {
return c.db.Update(func(tx *bolt.Tx) error {
if pe := c.updateAuthorization(ctx, tx, id, status); pe != nil {
if pe := c.updateAuthorization(ctx, tx, id, upd); pe != nil {
return &platform.Error{
Err: pe,
Op: platform.OpSetAuthorizationStatus,
Op: platform.OpUpdateAuthorization,
}
}
return nil
})
}
func (c *Client) updateAuthorization(ctx context.Context, tx *bolt.Tx, id platform.ID, status platform.Status) *platform.Error {
func (c *Client) updateAuthorization(ctx context.Context, tx *bolt.Tx, id platform.ID, upd *platform.AuthorizationUpdate) *platform.Error {
a, pe := c.findAuthorizationByID(ctx, tx, id)
if pe != nil {
return pe
}
a.Status = status
if upd.Status != nil {
a.Status = *upd.Status
}
if upd.Description != nil {
a.Description = *upd.Description
}
b, err := encodeAuthorization(a)
if err != nil {
return &platform.Error{

View File

@ -133,11 +133,6 @@ func (c *Client) initialize(ctx context.Context) error {
return err
}
// Always create Views bucket.
if err := c.initializeViews(ctx, tx); err != nil {
return err
}
// Always create Variables bucket.
if err := c.initializeVariables(ctx, tx); err != nil {
return err

View File

@ -28,11 +28,6 @@ func initDashboardService(f platformtesting.DashboardFields, t *testing.T) (plat
t.Fatalf("failed to populate dashboards")
}
}
for _, b := range f.Views {
if err := c.PutView(ctx, b); err != nil {
t.Fatalf("failed to populate views")
}
}
return c, bolt.OpPrefix, func() {
defer closeFn()
for _, b := range f.Dashboards {
@ -40,11 +35,6 @@ func initDashboardService(f platformtesting.DashboardFields, t *testing.T) (plat
t.Logf("failed to remove dashboard: %v", err)
}
}
for _, b := range f.Views {
if err := c.DeleteView(ctx, b.ID); err != nil {
t.Logf("failed to remove view: %v", err)
}
}
}
}

View File

@ -1,320 +0,0 @@
package bolt
import (
"context"
"encoding/json"
"sync"
bolt "github.com/coreos/bbolt"
platform "github.com/influxdata/influxdb"
)
var (
viewBucket = []byte("viewsv2")
)
func (c *Client) initializeViews(ctx context.Context, tx *bolt.Tx) error {
if _, err := tx.CreateBucketIfNotExists([]byte(viewBucket)); err != nil {
return err
}
return nil
}
// FindViewByID retrieves a view by id.
func (c *Client) FindViewByID(ctx context.Context, id platform.ID) (*platform.View, error) {
var d *platform.View
err := c.db.View(func(tx *bolt.Tx) error {
dash, err := c.findViewByID(ctx, tx, id)
if err != nil {
return &platform.Error{
Err: err,
Op: getOp(platform.OpFindViewByID),
}
}
d = dash
return nil
})
return d, err
}
func (c *Client) findViewByID(ctx context.Context, tx *bolt.Tx, id platform.ID) (*platform.View, *platform.Error) {
var d platform.View
encodedID, err := id.Encode()
if err != nil {
return nil, &platform.Error{
Err: err,
}
}
v := tx.Bucket(viewBucket).Get(encodedID)
if len(v) == 0 {
return nil, &platform.Error{
Code: platform.ENotFound,
Msg: platform.ErrViewNotFound,
}
}
if err := json.Unmarshal(v, &d); err != nil {
return nil, &platform.Error{
Err: err,
}
}
return &d, nil
}
// FindView retrieves a view using an arbitrary view filter.
func (c *Client) FindView(ctx context.Context, filter platform.ViewFilter) (*platform.View, error) {
if filter.ID != nil {
return c.FindViewByID(ctx, *filter.ID)
}
var d *platform.View
err := c.db.View(func(tx *bolt.Tx) error {
filterFn := filterViewsFn(filter)
return c.forEachView(ctx, tx, func(dash *platform.View) bool {
if filterFn(dash) {
d = dash
return false
}
return true
})
})
if err != nil {
return nil, &platform.Error{
Err: err,
}
}
if d == nil {
return nil, &platform.Error{
Code: platform.ENotFound,
Msg: platform.ErrViewNotFound,
}
}
return d, nil
}
func filterViewsFn(filter platform.ViewFilter) func(v *platform.View) bool {
if filter.ID != nil {
return func(v *platform.View) bool {
return v.ID == *filter.ID
}
}
if len(filter.Types) > 0 {
var sm sync.Map
for _, t := range filter.Types {
sm.Store(t, true)
}
return func(v *platform.View) bool {
_, ok := sm.Load(v.Properties.GetType())
return ok
}
}
return func(v *platform.View) bool { return true }
}
// FindViews retrives all views that match an arbitrary view filter.
func (c *Client) FindViews(ctx context.Context, filter platform.ViewFilter) ([]*platform.View, int, error) {
ds := []*platform.View{}
op := getOp(platform.OpFindViews)
if filter.ID != nil {
d, err := c.FindViewByID(ctx, *filter.ID)
if err != nil && platform.ErrorCode(err) != platform.ENotFound {
return nil, 0, &platform.Error{
Err: err,
Op: op,
}
}
if d != nil {
ds = append(ds, d)
}
return ds, 1, nil
}
err := c.db.View(func(tx *bolt.Tx) error {
dashs, err := c.findViews(ctx, tx, filter)
if err != nil {
return &platform.Error{
Err: err,
Op: op,
}
}
ds = dashs
return nil
})
return ds, len(ds), err
}
func (c *Client) findViews(ctx context.Context, tx *bolt.Tx, filter platform.ViewFilter) ([]*platform.View, error) {
ds := []*platform.View{}
filterFn := filterViewsFn(filter)
err := c.forEachView(ctx, tx, func(d *platform.View) bool {
if filterFn(d) {
ds = append(ds, d)
}
return true
})
if err != nil {
return nil, err
}
return ds, nil
}
// CreateView creates a platform view and sets d.ID.
func (c *Client) CreateView(ctx context.Context, d *platform.View) error {
return c.db.Update(func(tx *bolt.Tx) error {
if pe := c.createView(ctx, tx, d); pe != nil {
return &platform.Error{
Op: getOp(platform.OpCreateView),
Err: pe,
}
}
return nil
})
}
func (c *Client) createView(ctx context.Context, tx *bolt.Tx, d *platform.View) *platform.Error {
d.ID = c.IDGenerator.ID()
return c.putView(ctx, tx, d)
}
// PutView will put a view without setting an ID.
func (c *Client) PutView(ctx context.Context, d *platform.View) error {
return c.db.Update(func(tx *bolt.Tx) error {
if pe := c.putView(ctx, tx, d); pe != nil {
return pe
}
return nil
})
}
func (c *Client) putView(ctx context.Context, tx *bolt.Tx, d *platform.View) *platform.Error {
v, err := json.Marshal(d)
if err != nil {
return &platform.Error{
Err: err,
}
}
encodedID, err := d.ID.Encode()
if err != nil {
return &platform.Error{
Err: err,
}
}
if err := tx.Bucket(viewBucket).Put(encodedID, v); err != nil {
return &platform.Error{
Err: err,
}
}
return nil
}
// forEachView will iterate through all views while fn returns true.
func (c *Client) forEachView(ctx context.Context, tx *bolt.Tx, fn func(*platform.View) bool) error {
cur := tx.Bucket(viewBucket).Cursor()
for k, v := cur.First(); k != nil; k, v = cur.Next() {
d := &platform.View{}
if err := json.Unmarshal(v, d); err != nil {
return err
}
if !fn(d) {
break
}
}
return nil
}
// UpdateView updates a view according the parameters set on upd.
func (c *Client) UpdateView(ctx context.Context, id platform.ID, upd platform.ViewUpdate) (*platform.View, error) {
var d *platform.View
err := c.db.Update(func(tx *bolt.Tx) error {
dash, pe := c.updateView(ctx, tx, id, upd)
if pe != nil {
return &platform.Error{
Err: pe,
Op: getOp(platform.OpUpdateView),
}
}
d = dash
return nil
})
return d, err
}
func (c *Client) updateView(ctx context.Context, tx *bolt.Tx, id platform.ID, upd platform.ViewUpdate) (*platform.View, error) {
d, err := c.findViewByID(ctx, tx, id)
if err != nil {
return nil, err
}
if upd.Name != nil {
d.Name = *upd.Name
}
if upd.Properties != nil {
d.Properties = upd.Properties
}
if err := c.putView(ctx, tx, d); err != nil {
return nil, err
}
return d, nil
}
// DeleteView deletes a view and prunes it from the index.
func (c *Client) DeleteView(ctx context.Context, id platform.ID) error {
return c.db.Update(func(tx *bolt.Tx) error {
if pe := c.deleteView(ctx, tx, id); pe != nil {
return &platform.Error{
Err: pe,
Op: getOp(platform.OpDeleteView),
}
}
return nil
})
}
func (c *Client) deleteView(ctx context.Context, tx *bolt.Tx, id platform.ID) *platform.Error {
_, pe := c.findViewByID(ctx, tx, id)
if pe != nil {
return pe
}
encodedID, err := id.Encode()
if err != nil {
return &platform.Error{
Err: err,
}
}
if err := tx.Bucket(viewBucket).Delete(encodedID); err != nil {
return &platform.Error{
Err: err,
}
}
if err := c.deleteUserResourceMappings(ctx, tx, platform.UserResourceMappingFilter{
ResourceID: id,
ResourceType: platform.DashboardsResourceType,
}); err != nil {
return &platform.Error{
Err: err,
}
}
return nil
}

View File

@ -1,52 +0,0 @@
package bolt_test
import (
"context"
"testing"
platform "github.com/influxdata/influxdb"
"github.com/influxdata/influxdb/bolt"
platformtesting "github.com/influxdata/influxdb/testing"
)
func initViewService(f platformtesting.ViewFields, t *testing.T) (platform.ViewService, string, func()) {
c, closeFn, err := NewTestClient()
if err != nil {
t.Fatalf("failed to create new bolt client: %v", err)
}
c.IDGenerator = f.IDGenerator
ctx := context.TODO()
for _, b := range f.Views {
if err := c.PutView(ctx, b); err != nil {
t.Fatalf("failed to populate cells")
}
}
return c, bolt.OpPrefix, func() {
defer closeFn()
for _, b := range f.Views {
if err := c.DeleteView(ctx, b.ID); err != nil {
t.Logf("failed to remove cell: %v", err)
}
}
}
}
func TestViewService_CreateView(t *testing.T) {
platformtesting.CreateView(initViewService, t)
}
func TestViewService_FindViewByID(t *testing.T) {
platformtesting.FindViewByID(initViewService, t)
}
func TestViewService_FindViews(t *testing.T) {
platformtesting.FindViews(initViewService, t)
}
func TestViewService_DeleteView(t *testing.T) {
platformtesting.DeleteView(initViewService, t)
}
func TestViewService_UpdateView(t *testing.T) {
platformtesting.UpdateView(initViewService, t)
}

View File

@ -5,10 +5,8 @@ import (
"os"
platform "github.com/influxdata/influxdb"
"github.com/influxdata/influxdb/bolt"
"github.com/influxdata/influxdb/cmd/influx/internal"
"github.com/influxdata/influxdb/http"
"github.com/influxdata/influxdb/internal/fs"
"github.com/spf13/cobra"
)
@ -289,17 +287,7 @@ func init() {
func newAuthorizationService(f Flags) (platform.AuthorizationService, error) {
if flags.local {
boltFile, err := fs.BoltFile()
if err != nil {
return nil, err
}
c := bolt.NewClient()
c.Path = boltFile
if err := c.Open(context.Background()); err != nil {
return nil, err
}
return c, nil
return newLocalKVService()
}
return &http.AuthorizationService{
Addr: flags.host,
@ -473,7 +461,9 @@ func authorizationActiveF(cmd *cobra.Command, args []string) error {
return err
}
if err := s.SetAuthorizationStatus(context.Background(), id, platform.Active); err != nil {
if err := s.UpdateAuthorization(context.Background(), id, &platform.AuthorizationUpdate{
Status: platform.Active.Ptr(),
}); err != nil {
return err
}
@ -542,7 +532,9 @@ func authorizationInactiveF(cmd *cobra.Command, args []string) error {
return err
}
if err := s.SetAuthorizationStatus(ctx, id, platform.Inactive); err != nil {
if err := s.UpdateAuthorization(context.Background(), id, &platform.AuthorizationUpdate{
Status: platform.Inactive.Ptr(),
}); err != nil {
return err
}

View File

@ -7,10 +7,8 @@ import (
"time"
platform "github.com/influxdata/influxdb"
"github.com/influxdata/influxdb/bolt"
"github.com/influxdata/influxdb/cmd/influx/internal"
"github.com/influxdata/influxdb/http"
"github.com/influxdata/influxdb/internal/fs"
"github.com/spf13/cobra"
)
@ -53,17 +51,7 @@ func init() {
func newBucketService(f Flags) (platform.BucketService, error) {
if flags.local {
boltFile, err := fs.BoltFile()
if err != nil {
return nil, err
}
c := bolt.NewClient()
c.Path = boltFile
if err := c.Open(context.Background()); err != nil {
return nil, err
}
return c, nil
return newLocalKVService()
}
return &http.BucketService{
Addr: flags.host,

View File

@ -8,9 +8,11 @@ import (
"path/filepath"
"github.com/influxdata/influxdb"
"github.com/influxdata/influxdb/bolt"
"github.com/influxdata/influxdb/cmd/influx/internal"
"github.com/influxdata/influxdb/http"
"github.com/influxdata/influxdb/internal/fs"
"github.com/influxdata/influxdb/kv"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
@ -164,3 +166,17 @@ func Execute() {
os.Exit(1)
}
}
func newLocalKVService() (*kv.Service, error) {
boltFile, err := fs.BoltFile()
if err != nil {
return nil, err
}
store := bolt.NewKVStore(boltFile)
if err := store.Open(context.Background()); err != nil {
return nil, err
}
return kv.NewService(store), nil
}

View File

@ -6,10 +6,8 @@ import (
"os"
platform "github.com/influxdata/influxdb"
"github.com/influxdata/influxdb/bolt"
"github.com/influxdata/influxdb/cmd/influx/internal"
"github.com/influxdata/influxdb/http"
"github.com/influxdata/influxdb/internal/fs"
"github.com/spf13/cobra"
)
@ -47,17 +45,7 @@ func init() {
func newOrganizationService(f Flags) (platform.OrganizationService, error) {
if flags.local {
boltFile, err := fs.BoltFile()
if err != nil {
return nil, err
}
c := bolt.NewClient()
c.Path = boltFile
if err := c.Open(context.Background()); err != nil {
return nil, err
}
return c, nil
return newLocalKVService()
}
return &http.OrganizationService{
Addr: flags.host,

View File

@ -5,10 +5,8 @@ import (
"os"
platform "github.com/influxdata/influxdb"
"github.com/influxdata/influxdb/bolt"
"github.com/influxdata/influxdb/cmd/influx/internal"
"github.com/influxdata/influxdb/http"
"github.com/influxdata/influxdb/internal/fs"
"github.com/spf13/cobra"
)
@ -44,17 +42,7 @@ func init() {
func newUserService(f Flags) (platform.UserService, error) {
if flags.local {
boltFile, err := fs.BoltFile()
if err != nil {
return nil, err
}
c := bolt.NewClient()
c.Path = boltFile
if err := c.Open(context.Background()); err != nil {
return nil, err
}
return c, nil
return newLocalKVService()
}
return &http.UserService{
Addr: flags.host,
@ -64,17 +52,7 @@ func newUserService(f Flags) (platform.UserService, error) {
func newUserResourceMappingService(f Flags) (platform.UserResourceMappingService, error) {
if flags.local {
boltFile, err := fs.BoltFile()
if err != nil {
return nil, err
}
c := bolt.NewClient()
c.Path = boltFile
if err := c.Open(context.Background()); err != nil {
return nil, err
}
return c, nil
return newLocalKVService()
}
return &http.UserResourceMappingService{
Addr: flags.host,

View File

@ -547,12 +547,13 @@ func (m *Launcher) run(ctx context.Context) (err error) {
executor := taskexecutor.NewAsyncQueryServiceExecutor(m.logger.With(zap.String("service", "task-executor")), m.queryController, authSvc, store)
lw := taskbackend.NewPointLogWriter(pointsWriter)
m.scheduler = taskbackend.NewScheduler(store, executor, lw, time.Now().UTC().Unix(), taskbackend.WithTicker(ctx, 100*time.Millisecond), taskbackend.WithLogger(m.logger))
queryService := query.QueryServiceBridge{AsyncQueryService: m.queryController}
lr := taskbackend.NewQueryLogReader(queryService)
taskControlService := taskbackend.TaskControlAdaptor(store, lw, lr)
m.scheduler = taskbackend.NewScheduler(taskControlService, executor, time.Now().UTC().Unix(), taskbackend.WithTicker(ctx, 100*time.Millisecond), taskbackend.WithLogger(m.logger))
m.scheduler.Start(ctx)
m.reg.MustRegister(m.scheduler.PrometheusCollectors()...)
queryService := query.QueryServiceBridge{AsyncQueryService: m.queryController}
lr := taskbackend.NewQueryLogReader(queryService)
taskSvc = task.PlatformAdapter(coordinator.New(m.logger.With(zap.String("service", "task-coordinator")), m.scheduler, store), lr, m.scheduler, authSvc, userResourceSvc, orgSvc)
taskSvc = task.NewValidator(m.logger.With(zap.String("service", "task-authz-validator")), taskSvc, bucketSvc)
m.taskStore = store

View File

@ -0,0 +1,381 @@
package launcher
import (
"bytes"
"context"
"fmt"
"io"
"io/ioutil"
nethttp "net/http"
"os"
"path/filepath"
"reflect"
"sort"
"strings"
"testing"
"github.com/influxdata/flux"
"github.com/influxdata/flux/lang"
platform "github.com/influxdata/influxdb"
"github.com/influxdata/influxdb/bolt"
"github.com/influxdata/influxdb/http"
"github.com/influxdata/influxdb/query"
)
// TestLauncher is a test wrapper for launcher.Launcher.
type TestLauncher struct {
*Launcher
// Root temporary directory for all data.
Path string
// Initialized after calling the Setup() helper.
User *platform.User
Org *platform.Organization
Bucket *platform.Bucket
Auth *platform.Authorization
// Standard in/out/err buffers.
Stdin bytes.Buffer
Stdout bytes.Buffer
Stderr bytes.Buffer
}
// NewTestLauncher returns a new instance of TestLauncher.
func NewTestLauncher() *TestLauncher {
l := &TestLauncher{Launcher: NewLauncher()}
l.Launcher.Stdin = &l.Stdin
l.Launcher.Stdout = &l.Stdout
l.Launcher.Stderr = &l.Stderr
if testing.Verbose() {
l.Launcher.Stdout = io.MultiWriter(l.Launcher.Stdout, os.Stdout)
l.Launcher.Stderr = io.MultiWriter(l.Launcher.Stderr, os.Stderr)
}
path, err := ioutil.TempDir("", "")
if err != nil {
panic(err)
}
l.Path = path
return l
}
// RunLauncherOrFail initializes and starts the server.
func RunTestLauncherOrFail(tb testing.TB, ctx context.Context, args ...string) *TestLauncher {
tb.Helper()
l := NewTestLauncher()
if err := l.Run(ctx, args...); err != nil {
tb.Fatal(err)
}
return l
}
// Run executes the program with additional arguments to set paths and ports.
func (tl *TestLauncher) Run(ctx context.Context, args ...string) error {
args = append(args, "--bolt-path", filepath.Join(tl.Path, "influxd.bolt"))
args = append(args, "--protos-path", filepath.Join(tl.Path, "protos"))
args = append(args, "--engine-path", filepath.Join(tl.Path, "engine"))
args = append(args, "--http-bind-address", "127.0.0.1:0")
args = append(args, "--log-level", "debug")
return tl.Launcher.Run(ctx, args...)
}
// Shutdown stops the program and cleans up temporary paths.
func (tl *TestLauncher) Shutdown(ctx context.Context) error {
tl.Cancel()
tl.Launcher.Shutdown(ctx)
return os.RemoveAll(tl.Path)
}
// ShutdownOrFail stops the program and cleans up temporary paths. Fail on error.
func (tl *TestLauncher) ShutdownOrFail(tb testing.TB, ctx context.Context) {
tb.Helper()
if err := tl.Shutdown(ctx); err != nil {
tb.Fatal(err)
}
}
// SetupOrFail creates a new user, bucket, org, and auth token. Fail on error.
func (tl *TestLauncher) SetupOrFail(tb testing.TB) {
results := tl.OnBoardOrFail(tb, &platform.OnboardingRequest{
User: "USER",
Password: "PASSWORD",
Org: "ORG",
Bucket: "BUCKET",
})
tl.User = results.User
tl.Org = results.Org
tl.Bucket = results.Bucket
tl.Auth = results.Auth
}
// OnBoardOrFail attempts an on-boarding request or fails on error.
// The on-boarding status is also reset to allow multiple user/org/buckets to be created.
func (tl *TestLauncher) OnBoardOrFail(tb testing.TB, req *platform.OnboardingRequest) *platform.OnboardingResults {
tb.Helper()
res, err := tl.KeyValueService().Generate(context.Background(), req)
if err != nil {
tb.Fatal(err)
}
err = tl.KeyValueService().PutOnboardingStatus(context.Background(), false)
if err != nil {
tb.Fatal(err)
}
return res
}
// WriteOrFail attempts a write to the organization and bucket identified by to or fails if there is an error.
func (tl *TestLauncher) WriteOrFail(tb testing.TB, to *platform.OnboardingResults, data string) {
tb.Helper()
resp, err := nethttp.DefaultClient.Do(tl.NewHTTPRequestOrFail(tb, "POST", fmt.Sprintf("/api/v2/write?org=%s&bucket=%s", to.Org.ID, to.Bucket.ID), to.Auth.Token, data))
if err != nil {
tb.Fatal(err)
}
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
tb.Fatal(err)
}
if err := resp.Body.Close(); err != nil {
tb.Fatal(err)
}
if resp.StatusCode != nethttp.StatusNoContent {
tb.Fatalf("unexpected status code: %d, body: %s, headers: %v", resp.StatusCode, body, resp.Header)
}
}
// WriteOrFail attempts a write to the organization and bucket used during setup or fails if there is an error.
func (tl *TestLauncher) WritePointsOrFail(tb testing.TB, data string) {
tb.Helper()
resp, err := nethttp.DefaultClient.Do(
tl.NewHTTPRequestOrFail(
tb,
"POST", fmt.Sprintf("/api/v2/write?org=%s&bucket=%s", tl.Org.ID, tl.Bucket.ID),
tl.Auth.Token,
data))
if err != nil {
tb.Fatal(err)
}
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
tb.Fatal(err)
}
if err := resp.Body.Close(); err != nil {
tb.Fatal(err)
}
if resp.StatusCode != nethttp.StatusNoContent {
tb.Fatalf("unexpected status code: %d, body: %s, headers: %v", resp.StatusCode, body, resp.Header)
}
}
// MustExecuteQuery executes the provided query panicking if an error is encountered.
// Callers of MustExecuteQuery must call Done on the returned QueryResults.
func (tl *TestLauncher) MustExecuteQuery(query string) *QueryResults {
results, err := tl.ExecuteQuery(query)
if err != nil {
panic(err)
}
return results
}
// ExecuteQuery executes the provided query against the ith query node.
// Callers of ExecuteQuery must call Done on the returned QueryResults.
func (tl *TestLauncher) ExecuteQuery(q string) (*QueryResults, error) {
fq, err := tl.QueryController().Query(context.Background(), &query.Request{
Authorization: tl.Auth,
OrganizationID: tl.Auth.OrgID,
Compiler: lang.FluxCompiler{
Query: q,
}})
if err != nil {
return nil, err
}
if err = fq.Err(); err != nil {
return nil, fq.Err()
}
return &QueryResults{
Results: <-fq.Ready(),
Query: fq,
}, nil
}
// QueryAndConsume queries InfluxDB using the request provided. It uses a function to consume the results obtained.
// It returns the first error encountered when requesting the query, consuming the results, or executing the query.
func (tl *TestLauncher) QueryAndConsume(ctx context.Context, req *query.Request, fn func(r flux.Result) error) error {
res, err := tl.FluxQueryService().Query(ctx, req)
if err != nil {
return err
}
// iterate over results to populate res.Err()
var gotErr error
for res.More() {
if err := fn(res.Next()); gotErr == nil {
gotErr = err
}
}
if gotErr != nil {
return gotErr
}
return res.Err()
}
// QueryAndNopConsume does the same as QueryAndConsume but consumes results with a nop function.
func (tl *TestLauncher) QueryAndNopConsume(ctx context.Context, req *query.Request) error {
return tl.QueryAndConsume(ctx, req, func(r flux.Result) error {
return r.Tables().Do(func(table flux.Table) error {
return nil
})
})
}
// FluxQueryOrFail performs a query to the specified organization and returns the results
// or fails if there is an error.
func (tl *TestLauncher) FluxQueryOrFail(tb testing.TB, org *platform.Organization, token string, query string) string {
tb.Helper()
b, err := http.SimpleQuery(tl.URL(), query, org.Name, token)
if err != nil {
tb.Fatal(err)
}
return string(b)
}
// MustNewHTTPRequest returns a new nethttp.Request with base URL and auth attached. Fail on error.
func (tl *TestLauncher) MustNewHTTPRequest(method, rawurl, body string) *nethttp.Request {
req, err := nethttp.NewRequest(method, tl.URL()+rawurl, strings.NewReader(body))
if err != nil {
panic(err)
}
req.Header.Set("Authorization", "Token "+tl.Auth.Token)
return req
}
// MustNewHTTPRequest returns a new nethttp.Request with base URL and auth attached. Fail on error.
func (tl *TestLauncher) NewHTTPRequestOrFail(tb testing.TB, method, rawurl, token string, body string) *nethttp.Request {
tb.Helper()
req, err := nethttp.NewRequest(method, tl.URL()+rawurl, strings.NewReader(body))
if err != nil {
tb.Fatal(err)
}
req.Header.Set("Authorization", "Token "+token)
return req
}
// Services
func (tl *TestLauncher) FluxService() *http.FluxService {
return &http.FluxService{Addr: tl.URL(), Token: tl.Auth.Token}
}
func (tl *TestLauncher) FluxQueryService() *http.FluxQueryService {
return &http.FluxQueryService{Addr: tl.URL(), Token: tl.Auth.Token}
}
func (tl *TestLauncher) BucketService() *http.BucketService {
return &http.BucketService{Addr: tl.URL(), Token: tl.Auth.Token, OpPrefix: bolt.OpPrefix}
}
func (tl *TestLauncher) AuthorizationService() *http.AuthorizationService {
return &http.AuthorizationService{Addr: tl.URL(), Token: tl.Auth.Token}
}
func (tl *TestLauncher) TaskService() *http.TaskService {
return &http.TaskService{Addr: tl.URL(), Token: tl.Auth.Token}
}
// QueryResult wraps a single flux.Result with some helper methods.
type QueryResult struct {
t *testing.T
q flux.Result
}
// HasTableWithCols checks if the desired number of tables and columns exist,
// ignoring any system columns.
//
// If the result is not as expected then the testing.T fails.
func (r *QueryResult) HasTablesWithCols(want []int) {
r.t.Helper()
// _start, _stop, _time, _f
systemCols := 4
got := []int{}
if err := r.q.Tables().Do(func(b flux.Table) error {
got = append(got, len(b.Cols())-systemCols)
b.Do(func(c flux.ColReader) error { return nil })
return nil
}); err != nil {
r.t.Fatal(err)
}
if !reflect.DeepEqual(got, want) {
r.t.Fatalf("got %v, expected %v", got, want)
}
}
// TablesN returns the number of tables for the result.
func (r *QueryResult) TablesN() int {
var total int
r.q.Tables().Do(func(b flux.Table) error {
total++
b.Do(func(c flux.ColReader) error { return nil })
return nil
})
return total
}
// QueryResults wraps a set of query results with some helper methods.
type QueryResults struct {
Results map[string]flux.Result
Query flux.Query
}
func (r *QueryResults) Done() {
r.Query.Done()
}
// First returns the first QueryResult. When there are not exactly 1 table First
// will fail.
func (r *QueryResults) First(t *testing.T) *QueryResult {
r.HasTableCount(t, 1)
for _, result := range r.Results {
return &QueryResult{t: t, q: result}
}
return nil
}
// HasTableCount asserts that there are n tables in the result.
func (r *QueryResults) HasTableCount(t *testing.T, n int) {
if got, exp := len(r.Results), n; got != exp {
t.Fatalf("result has %d tables, expected %d. Tables: %s", got, exp, r.Names())
}
}
// Names returns the sorted set of table names for the query results.
func (r *QueryResults) Names() []string {
if len(r.Results) == 0 {
return nil
}
names := make([]string, len(r.Results), 0)
for k := range r.Results {
names = append(names, k)
}
return names
}
// SortedNames returns the sorted set of table names for the query results.
func (r *QueryResults) SortedNames() []string {
names := r.Names()
sort.Strings(names)
return names
}

View File

@ -1,15 +1,10 @@
package launcher_test
import (
"bytes"
"context"
"encoding/json"
"io"
"io/ioutil"
nethttp "net/http"
"os"
"path/filepath"
"strings"
"testing"
platform "github.com/influxdata/influxdb"
@ -22,7 +17,7 @@ import (
var ctx = context.Background()
func TestLauncher_Setup(t *testing.T) {
l := NewLauncher()
l := launcher.NewTestLauncher()
if err := l.Run(ctx); err != nil {
t.Fatal(err)
}
@ -50,7 +45,7 @@ func TestLauncher_Setup(t *testing.T) {
// This is to mimic chronograf using cookies as sessions
// rather than authorizations
func TestLauncher_SetupWithUsers(t *testing.T) {
l := RunLauncherOrFail(t, ctx)
l := launcher.RunTestLauncherOrFail(t, ctx)
l.SetupOrFail(t)
defer l.ShutdownOrFail(t, ctx)
@ -144,147 +139,3 @@ func TestLauncher_SetupWithUsers(t *testing.T) {
t.Fatalf("unexpected 2 users: %#+v", exp)
}
}
// Launcher is a test wrapper for launcher.Launcher.
type Launcher struct {
*launcher.Launcher
// Root temporary directory for all data.
Path string
// Initialized after calling the Setup() helper.
User *platform.User
Org *platform.Organization
Bucket *platform.Bucket
Auth *platform.Authorization
// Standard in/out/err buffers.
Stdin bytes.Buffer
Stdout bytes.Buffer
Stderr bytes.Buffer
}
// NewLauncher returns a new instance of Launcher.
func NewLauncher() *Launcher {
l := &Launcher{Launcher: launcher.NewLauncher()}
l.Launcher.Stdin = &l.Stdin
l.Launcher.Stdout = &l.Stdout
l.Launcher.Stderr = &l.Stderr
if testing.Verbose() {
l.Launcher.Stdout = io.MultiWriter(l.Launcher.Stdout, os.Stdout)
l.Launcher.Stderr = io.MultiWriter(l.Launcher.Stderr, os.Stderr)
}
path, err := ioutil.TempDir("", "")
if err != nil {
panic(err)
}
l.Path = path
return l
}
// RunLauncherOrFail initializes and starts the server.
func RunLauncherOrFail(tb testing.TB, ctx context.Context, args ...string) *Launcher {
tb.Helper()
l := NewLauncher()
if err := l.Run(ctx, args...); err != nil {
tb.Fatal(err)
}
return l
}
// Run executes the program with additional arguments to set paths and ports.
func (l *Launcher) Run(ctx context.Context, args ...string) error {
args = append(args, "--bolt-path", filepath.Join(l.Path, "influxd.bolt"))
args = append(args, "--protos-path", filepath.Join(l.Path, "protos"))
args = append(args, "--engine-path", filepath.Join(l.Path, "engine"))
args = append(args, "--http-bind-address", "127.0.0.1:0")
args = append(args, "--log-level", "debug")
return l.Launcher.Run(ctx, args...)
}
// Shutdown stops the program and cleans up temporary paths.
func (l *Launcher) Shutdown(ctx context.Context) error {
l.Cancel()
l.Launcher.Shutdown(ctx)
return os.RemoveAll(l.Path)
}
// ShutdownOrFail stops the program and cleans up temporary paths. Fail on error.
func (l *Launcher) ShutdownOrFail(tb testing.TB, ctx context.Context) {
tb.Helper()
if err := l.Shutdown(ctx); err != nil {
tb.Fatal(err)
}
}
// SetupOrFail creates a new user, bucket, org, and auth token. Fail on error.
func (l *Launcher) SetupOrFail(tb testing.TB) {
results := l.OnBoardOrFail(tb, &platform.OnboardingRequest{
User: "USER",
Password: "PASSWORD",
Org: "ORG",
Bucket: "BUCKET",
})
l.User = results.User
l.Org = results.Org
l.Bucket = results.Bucket
l.Auth = results.Auth
}
// OnBoardOrFail attempts an on-boarding request or fails on error.
// The on-boarding status is also reset to allow multiple user/org/buckets to be created.
func (l *Launcher) OnBoardOrFail(tb testing.TB, req *platform.OnboardingRequest) *platform.OnboardingResults {
tb.Helper()
res, err := l.KeyValueService().Generate(context.Background(), req)
if err != nil {
tb.Fatal(err)
}
err = l.KeyValueService().PutOnboardingStatus(context.Background(), false)
if err != nil {
tb.Fatal(err)
}
return res
}
func (l *Launcher) FluxService() *http.FluxService {
return &http.FluxService{Addr: l.URL(), Token: l.Auth.Token}
}
func (l *Launcher) BucketService() *http.BucketService {
return &http.BucketService{Addr: l.URL(), Token: l.Auth.Token}
}
func (l *Launcher) AuthorizationService() *http.AuthorizationService {
return &http.AuthorizationService{Addr: l.URL(), Token: l.Auth.Token}
}
func (l *Launcher) TaskService() *http.TaskService {
return &http.TaskService{Addr: l.URL(), Token: l.Auth.Token}
}
// MustNewHTTPRequest returns a new nethttp.Request with base URL and auth attached. Fail on error.
func (l *Launcher) MustNewHTTPRequest(method, rawurl, body string) *nethttp.Request {
req, err := nethttp.NewRequest(method, l.URL()+rawurl, strings.NewReader(body))
if err != nil {
panic(err)
}
req.Header.Set("Authorization", "Token "+l.Auth.Token)
return req
}
// MustNewHTTPRequest returns a new nethttp.Request with base URL and auth attached. Fail on error.
func (l *Launcher) NewHTTPRequestOrFail(tb testing.TB, method, rawurl, token string, body string) *nethttp.Request {
tb.Helper()
req, err := nethttp.NewRequest(method, l.URL()+rawurl, strings.NewReader(body))
if err != nil {
tb.Fatal(err)
}
req.Header.Set("Authorization", "Token "+token)
return req
}

View File

@ -6,20 +6,19 @@ import (
"fmt"
"io"
nethttp "net/http"
"reflect"
"sort"
"strings"
"testing"
"time"
"github.com/influxdata/flux"
"github.com/influxdata/flux/lang"
platform "github.com/influxdata/influxdb"
"github.com/influxdata/influxdb/cmd/influxd/launcher"
phttp "github.com/influxdata/influxdb/http"
"github.com/influxdata/influxdb/query"
)
func TestPipeline_Write_Query_FieldKey(t *testing.T) {
be := RunLauncherOrFail(t, ctx)
be := launcher.RunTestLauncherOrFail(t, ctx)
be.SetupOrFail(t)
defer be.ShutdownOrFail(t, ctx)
@ -55,7 +54,7 @@ mem,server=b value=45.2`))
// _measurement=cpu,region=west,server=a,_field=v0
// _measurement=cpu,region=west,server=b,_field=v0
//
results := be.MustExecuteQuery(be.Org.ID, rawQ, be.Auth)
results := be.MustExecuteQuery(rawQ)
defer results.Done()
results.First(t).HasTablesWithCols([]int{5, 4, 4})
}
@ -64,9 +63,7 @@ mem,server=b value=45.2`))
// and checks that the queried results contain the expected number of tables
// and expected number of columns.
func TestPipeline_WriteV2_Query(t *testing.T) {
t.Parallel()
be := RunLauncherOrFail(t, ctx)
be := launcher.RunTestLauncherOrFail(t, ctx)
be.SetupOrFail(t)
defer be.ShutdownOrFail(t, ctx)
@ -97,127 +94,52 @@ func TestPipeline_WriteV2_Query(t *testing.T) {
t.Fatalf("exp status %d; got %d, body: %s", nethttp.StatusNoContent, resp.StatusCode, buf.String())
}
res := be.MustExecuteQuery(
be.Org.ID,
fmt.Sprintf(`from(bucket:"%s") |> range(start:-5m)`, be.Bucket.Name),
be.Auth)
res := be.MustExecuteQuery(fmt.Sprintf(`from(bucket:"%s") |> range(start:-5m)`, be.Bucket.Name))
defer res.Done()
res.HasTableCount(t, 1)
}
// QueryResult wraps a single flux.Result with some helper methods.
type QueryResult struct {
t *testing.T
q flux.Result
}
// This test initializes a default launcher; writes some data; queries the data (success);
// sets memory limits to the same read query; checks that the query fails because limits are exceeded.
func TestPipeline_QueryMemoryLimits(t *testing.T) {
l := launcher.RunTestLauncherOrFail(t, ctx)
l.SetupOrFail(t)
defer l.ShutdownOrFail(t, ctx)
// HasTableWithCols checks if the desired number of tables and columns exist,
// ignoring any system columns.
//
// If the result is not as expected then the testing.T fails.
func (r *QueryResult) HasTablesWithCols(want []int) {
r.t.Helper()
// _start, _stop, _time, _f
systemCols := 4
got := []int{}
if err := r.q.Tables().Do(func(b flux.Table) error {
got = append(got, len(b.Cols())-systemCols)
b.Do(func(c flux.ColReader) error { return nil })
return nil
}); err != nil {
r.t.Fatal(err)
// write some points
for i := 0; i < 100; i++ {
l.WritePointsOrFail(t, fmt.Sprintf(`m,k=v1 f=%di %d`, i*100, time.Now().UnixNano()))
}
if !reflect.DeepEqual(got, want) {
r.t.Fatalf("got %v, expected %v", got, want)
}
}
// TablesN returns the number of tables for the result.
func (r *QueryResult) TablesN() int {
var total int
r.q.Tables().Do(func(b flux.Table) error {
total++
b.Do(func(c flux.ColReader) error { return nil })
return nil
})
return total
}
// MustExecuteQuery executes the provided query panicking if an error is encountered.
// Callers of MustExecuteQuery must call Done on the returned QueryResults.
func (p *Launcher) MustExecuteQuery(orgID platform.ID, query string, auth *platform.Authorization) *QueryResults {
results, err := p.ExecuteQuery(orgID, query, auth)
// compile a from query and get the spec
spec, err := flux.Compile(context.Background(), fmt.Sprintf(`from(bucket:"%s") |> range(start:-5m)`, l.Bucket.Name), time.Now())
if err != nil {
panic(err)
t.Fatal(err)
}
return results
}
// ExecuteQuery executes the provided query against the ith query node.
// Callers of ExecuteQuery must call Done on the returned QueryResults.
func (p *Launcher) ExecuteQuery(orgID platform.ID, q string, auth *platform.Authorization) (*QueryResults, error) {
fq, err := p.QueryController().Query(context.Background(), &query.Request{
Authorization: auth,
OrganizationID: orgID,
Compiler: lang.FluxCompiler{
Query: q,
}})
if err != nil {
return nil, err
// we expect this request to succeed
req := &query.Request{
Authorization: l.Auth,
OrganizationID: l.Org.ID,
Compiler: lang.SpecCompiler{
Spec: spec,
},
}
if err = fq.Err(); err != nil {
return nil, fq.Err()
if err := l.QueryAndNopConsume(context.Background(), req); err != nil {
t.Fatal(err)
}
return &QueryResults{
Results: <-fq.Ready(),
Query: fq,
}, nil
}
// QueryResults wraps a set of query results with some helper methods.
type QueryResults struct {
Results map[string]flux.Result
Query flux.Query
}
func (r *QueryResults) Done() {
r.Query.Done()
}
// First returns the first QueryResult. When there are not exactly 1 table First
// will fail.
func (r *QueryResults) First(t *testing.T) *QueryResult {
r.HasTableCount(t, 1)
for _, result := range r.Results {
return &QueryResult{t: t, q: result}
// ok, the first request went well, let's add memory limits:
// this query should error.
spec.Resources = flux.ResourceManagement{
MemoryBytesQuota: 100,
}
return nil
}
// HasTableCount asserts that there are n tables in the result.
func (r *QueryResults) HasTableCount(t *testing.T, n int) {
if got, exp := len(r.Results), n; got != exp {
t.Fatalf("result has %d tables, expected %d. Tables: %s", got, exp, r.Names())
if err := l.QueryAndNopConsume(context.Background(), req); err != nil {
if !strings.Contains(err.Error(), "allocation limit reached") {
t.Fatalf("query errored with unexpected error: %v", err)
}
} else {
t.Fatal("expected error, got successful query execution")
}
}
// Names returns the sorted set of table names for the query results.
func (r *QueryResults) Names() []string {
if len(r.Results) == 0 {
return nil
}
names := make([]string, len(r.Results), 0)
for k := range r.Results {
names = append(names, k)
}
return names
}
// SortedNames returns the sorted set of table names for the query results.
func (r *QueryResults) SortedNames() []string {
names := r.Names()
sort.Strings(names)
return names
}

View File

@ -10,13 +10,14 @@ import (
"github.com/google/go-cmp/cmp"
"github.com/influxdata/influxdb"
"github.com/influxdata/influxdb/cmd/influxd/launcher"
"github.com/influxdata/influxdb/http"
"github.com/influxdata/influxdb/toml"
"github.com/influxdata/influxdb/tsdb/tsm1"
)
func TestStorage_WriteAndQuery(t *testing.T) {
l := RunLauncherOrFail(t, ctx)
l := launcher.RunTestLauncherOrFail(t, ctx)
org1 := l.OnBoardOrFail(t, &influxdb.OnboardingRequest{
User: "USER-1",
@ -53,7 +54,7 @@ func TestStorage_WriteAndQuery(t *testing.T) {
}
func TestLauncher_WriteAndQuery(t *testing.T) {
l := RunLauncherOrFail(t, ctx)
l := launcher.RunTestLauncherOrFail(t, ctx)
l.SetupOrFail(t)
defer l.ShutdownOrFail(t, ctx)
@ -91,7 +92,7 @@ func TestLauncher_WriteAndQuery(t *testing.T) {
}
func TestLauncher_BucketDelete(t *testing.T) {
l := RunLauncherOrFail(t, ctx)
l := launcher.RunTestLauncherOrFail(t, ctx)
l.SetupOrFail(t)
defer l.ShutdownOrFail(t, ctx)
@ -157,7 +158,7 @@ func TestLauncher_BucketDelete(t *testing.T) {
}
func TestStorage_CacheSnapshot_Size(t *testing.T) {
l := NewLauncher()
l := launcher.NewTestLauncher()
l.StorageConfig.Engine.Cache.SnapshotMemorySize = 10
l.StorageConfig.Engine.Cache.SnapshotAgeDuration = toml.Duration(time.Hour)
defer l.ShutdownOrFail(t, ctx)
@ -203,7 +204,7 @@ func TestStorage_CacheSnapshot_Size(t *testing.T) {
}
func TestStorage_CacheSnapshot_Age(t *testing.T) {
l := NewLauncher()
l := launcher.NewTestLauncher()
l.StorageConfig.Engine.Cache.SnapshotAgeDuration = toml.Duration(time.Second)
defer l.ShutdownOrFail(t, ctx)
@ -246,38 +247,3 @@ func TestStorage_CacheSnapshot_Age(t *testing.T) {
t.Fatalf("got %d series in TSM files, expected %d", got, exp)
}
}
// WriteOrFail attempts a write to the organization and bucket identified by to or fails if there is an error.
func (l *Launcher) WriteOrFail(tb testing.TB, to *influxdb.OnboardingResults, data string) {
tb.Helper()
resp, err := nethttp.DefaultClient.Do(l.NewHTTPRequestOrFail(tb, "POST", fmt.Sprintf("/api/v2/write?org=%s&bucket=%s", to.Org.ID, to.Bucket.ID), to.Auth.Token, data))
if err != nil {
tb.Fatal(err)
}
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
tb.Fatal(err)
}
if err := resp.Body.Close(); err != nil {
tb.Fatal(err)
}
if resp.StatusCode != nethttp.StatusNoContent {
tb.Fatalf("unexpected status code: %d, body: %s, headers: %v", resp.StatusCode, body, resp.Header)
}
}
// FluxQueryOrFail performs a query to the specified organization and returns the results
// or fails if there is an error.
func (l *Launcher) FluxQueryOrFail(tb testing.TB, org *influxdb.Organization, token string, query string) string {
tb.Helper()
b, err := http.SimpleQuery(l.URL(), query, org.Name, token)
if err != nil {
tb.Fatal(err)
}
return string(b)
}

View File

@ -13,12 +13,13 @@ import (
"github.com/influxdata/flux"
"github.com/influxdata/flux/execute/executetest"
"github.com/influxdata/influxdb"
"github.com/influxdata/influxdb/cmd/influxd/launcher"
pctx "github.com/influxdata/influxdb/context"
"github.com/influxdata/influxdb/task/backend"
)
func TestLauncher_Task(t *testing.T) {
be := RunLauncherOrFail(t, ctx)
be := launcher.RunTestLauncherOrFail(t, ctx)
be.SetupOrFail(t)
defer be.ShutdownOrFail(t, ctx)
@ -95,7 +96,7 @@ stuff f=-123.456,b=true,s="hello"
}
from(bucket:"my_bucket_in") |> range(start:-5m) |> to(bucket:"%s", org:"%s")`, bOut.Name, be.Org.Name),
}
created, err := be.TaskService().CreateTask(pctx.SetAuthorizer(ctx, be.Auth), create)
created, err := be.TaskService().CreateTask(ctx, create)
if err != nil {
t.Fatal(err)
}
@ -160,7 +161,7 @@ from(bucket:"my_bucket_in") |> range(start:-5m) |> to(bucket:"%s", org:"%s")`, b
// Explicitly set the now option so want and got have the same _start and _end values.
nowOpt := fmt.Sprintf("option now = () => %s\n", time.Unix(now, 0).UTC().Format(time.RFC3339))
res := be.MustExecuteQuery(org.ID, nowOpt+`from(bucket:"my_bucket_in") |> range(start:-5m)`, be.Auth)
res := be.MustExecuteQuery(nowOpt + `from(bucket:"my_bucket_in") |> range(start:-5m)`)
defer res.Done()
if len(res.Results) < 1 {
t.Fail()
@ -185,7 +186,7 @@ from(bucket:"my_bucket_in") |> range(start:-5m) |> to(bucket:"%s", org:"%s")`, b
for _, w := range want {
executetest.NormalizeTables(w)
}
res = be.MustExecuteQuery(org.ID, nowOpt+`from(bucket:"my_bucket_out") |> range(start:-5m)`, be.Auth)
res = be.MustExecuteQuery(nowOpt + `from(bucket:"my_bucket_out") |> range(start:-5m)`)
defer res.Done()
got := make(map[string][]*executetest.Table)
for k, v := range res.Results {

View File

@ -2,6 +2,8 @@ package influxdb
import (
"context"
"encoding/json"
"fmt"
"net/url"
"sort"
"time"
@ -13,6 +15,9 @@ const ErrDashboardNotFound = "dashboard not found"
// ErrCellNotFound is the error msg for a missing cell.
const ErrCellNotFound = "cell not found"
// ErrViewNotFound is the error msg for a missing View.
const ErrViewNotFound = "view not found"
// ops for dashboard service.
const (
OpFindDashboardByID = "FindDashboardByID"
@ -229,3 +234,503 @@ func (u CellUpdate) Valid() *Error {
return nil
}
// ViewUpdate is a struct for updating Views.
type ViewUpdate struct {
ViewContentsUpdate
Properties ViewProperties
}
// Valid validates the update struct. It expects minimal values to be set.
func (u ViewUpdate) Valid() *Error {
_, ok := u.Properties.(EmptyViewProperties)
if u.Name == nil && ok {
return &Error{
Code: EInvalid,
Msg: "expected at least one attribute to be updated",
}
}
return nil
}
// Apply updates a view with the view updates properties.
func (u ViewUpdate) Apply(v *View) error {
if err := u.Valid(); err != nil {
return err
}
if u.Name != nil {
v.Name = *u.Name
}
if u.Properties != nil {
v.Properties = u.Properties
}
return nil
}
// ViewContentsUpdate is a struct for updating the non properties content of a View.
type ViewContentsUpdate struct {
Name *string `json:"name"`
}
// ViewFilter represents a set of filter that restrict the returned results.
type ViewFilter struct {
ID *ID
Types []string
}
// View holds positional and visual information for a View.
type View struct {
ViewContents
Properties ViewProperties
}
// ViewContents is the id and name of a specific view.
type ViewContents struct {
ID ID `json:"id,omitempty"`
Name string `json:"name"`
}
// ViewProperties is used to mark other structures as conforming to a View.
type ViewProperties interface {
viewProperties()
GetType() string
}
// EmptyViewProperties is visualization that has no values
type EmptyViewProperties struct{}
func (v EmptyViewProperties) viewProperties() {}
func (v EmptyViewProperties) GetType() string { return "" }
// UnmarshalViewPropertiesJSON unmarshals JSON bytes into a ViewProperties.
func UnmarshalViewPropertiesJSON(b []byte) (ViewProperties, error) {
var v struct {
B json.RawMessage `json:"properties"`
}
if err := json.Unmarshal(b, &v); err != nil {
return nil, err
}
if len(v.B) == 0 {
// Then there wasn't any visualization field, so there's no need unmarshal it
return EmptyViewProperties{}, nil
}
var t struct {
Shape string `json:"shape"`
Type string `json:"type"`
}
if err := json.Unmarshal(v.B, &t); err != nil {
return nil, err
}
var vis ViewProperties
switch t.Shape {
case "chronograf-v2":
switch t.Type {
case "xy":
var xyv XYViewProperties
if err := json.Unmarshal(v.B, &xyv); err != nil {
return nil, err
}
vis = xyv
case "single-stat":
var ssv SingleStatViewProperties
if err := json.Unmarshal(v.B, &ssv); err != nil {
return nil, err
}
vis = ssv
case "gauge":
var gv GaugeViewProperties
if err := json.Unmarshal(v.B, &gv); err != nil {
return nil, err
}
vis = gv
case "table":
var tv TableViewProperties
if err := json.Unmarshal(v.B, &tv); err != nil {
return nil, err
}
vis = tv
case "markdown":
var mv MarkdownViewProperties
if err := json.Unmarshal(v.B, &mv); err != nil {
return nil, err
}
vis = mv
case "log-viewer": // happens in log viewer stays in log viewer.
var lv LogViewProperties
if err := json.Unmarshal(v.B, &lv); err != nil {
return nil, err
}
vis = lv
case "line-plus-single-stat":
var lv LinePlusSingleStatProperties
if err := json.Unmarshal(v.B, &lv); err != nil {
return nil, err
}
vis = lv
case "histogram":
var hv HistogramViewProperties
if err := json.Unmarshal(v.B, &hv); err != nil {
return nil, err
}
vis = hv
}
case "empty":
var ev EmptyViewProperties
if err := json.Unmarshal(v.B, &ev); err != nil {
return nil, err
}
vis = ev
default:
return nil, fmt.Errorf("unknown type %v", t.Shape)
}
return vis, nil
}
// MarshalViewPropertiesJSON encodes a view into JSON bytes.
func MarshalViewPropertiesJSON(v ViewProperties) ([]byte, error) {
var s interface{}
switch vis := v.(type) {
case SingleStatViewProperties:
s = struct {
Shape string `json:"shape"`
SingleStatViewProperties
}{
Shape: "chronograf-v2",
SingleStatViewProperties: vis,
}
case TableViewProperties:
s = struct {
Shape string `json:"shape"`
TableViewProperties
}{
Shape: "chronograf-v2",
TableViewProperties: vis,
}
case GaugeViewProperties:
s = struct {
Shape string `json:"shape"`
GaugeViewProperties
}{
Shape: "chronograf-v2",
GaugeViewProperties: vis,
}
case XYViewProperties:
s = struct {
Shape string `json:"shape"`
XYViewProperties
}{
Shape: "chronograf-v2",
XYViewProperties: vis,
}
case LinePlusSingleStatProperties:
s = struct {
Shape string `json:"shape"`
LinePlusSingleStatProperties
}{
Shape: "chronograf-v2",
LinePlusSingleStatProperties: vis,
}
case HistogramViewProperties:
s = struct {
Shape string `json:"shape"`
HistogramViewProperties
}{
Shape: "chronograf-v2",
HistogramViewProperties: vis,
}
case MarkdownViewProperties:
s = struct {
Shape string `json:"shape"`
MarkdownViewProperties
}{
Shape: "chronograf-v2",
MarkdownViewProperties: vis,
}
case LogViewProperties:
s = struct {
Shape string `json:"shape"`
LogViewProperties
}{
Shape: "chronograf-v2",
LogViewProperties: vis,
}
default:
s = struct {
Shape string `json:"shape"`
EmptyViewProperties
}{
Shape: "empty",
EmptyViewProperties: EmptyViewProperties{},
}
}
return json.Marshal(s)
}
// MarshalJSON encodes a view to JSON bytes.
func (c View) MarshalJSON() ([]byte, error) {
vis, err := MarshalViewPropertiesJSON(c.Properties)
if err != nil {
return nil, err
}
return json.Marshal(struct {
ViewContents
ViewProperties json.RawMessage `json:"properties"`
}{
ViewContents: c.ViewContents,
ViewProperties: vis,
})
}
// UnmarshalJSON decodes JSON bytes into the corresponding view type (those that implement ViewProperties).
func (c *View) UnmarshalJSON(b []byte) error {
if err := json.Unmarshal(b, &c.ViewContents); err != nil {
return err
}
v, err := UnmarshalViewPropertiesJSON(b)
if err != nil {
return err
}
c.Properties = v
return nil
}
// UnmarshalJSON decodes JSON bytes into the corresponding view update type (those that implement ViewProperties).
func (u *ViewUpdate) UnmarshalJSON(b []byte) error {
if err := json.Unmarshal(b, &u.ViewContentsUpdate); err != nil {
return err
}
v, err := UnmarshalViewPropertiesJSON(b)
if err != nil {
return err
}
u.Properties = v
return nil
}
// MarshalJSON encodes a view to JSON bytes.
func (u ViewUpdate) MarshalJSON() ([]byte, error) {
vis, err := MarshalViewPropertiesJSON(u.Properties)
if err != nil {
return nil, err
}
return json.Marshal(struct {
ViewContentsUpdate
ViewProperties json.RawMessage `json:"properties,omitempty"`
}{
ViewContentsUpdate: u.ViewContentsUpdate,
ViewProperties: vis,
})
}
// LinePlusSingleStatProperties represents options for line plus single stat view in Chronograf
type LinePlusSingleStatProperties struct {
Queries []DashboardQuery `json:"queries"`
Axes map[string]Axis `json:"axes"`
Type string `json:"type"`
Legend Legend `json:"legend"`
ViewColors []ViewColor `json:"colors"`
Prefix string `json:"prefix"`
Suffix string `json:"suffix"`
DecimalPlaces DecimalPlaces `json:"decimalPlaces"`
Note string `json:"note"`
ShowNoteWhenEmpty bool `json:"showNoteWhenEmpty"`
}
// XYViewProperties represents options for line, bar, step, or stacked view in Chronograf
type XYViewProperties struct {
Queries []DashboardQuery `json:"queries"`
Axes map[string]Axis `json:"axes"`
Type string `json:"type"`
Legend Legend `json:"legend"`
Geom string `json:"geom"` // Either "line", "step", "stacked", or "bar"
ViewColors []ViewColor `json:"colors"`
Note string `json:"note"`
ShowNoteWhenEmpty bool `json:"showNoteWhenEmpty"`
}
// SingleStatViewProperties represents options for single stat view in Chronograf
type SingleStatViewProperties struct {
Type string `json:"type"`
Queries []DashboardQuery `json:"queries"`
Prefix string `json:"prefix"`
Suffix string `json:"suffix"`
ViewColors []ViewColor `json:"colors"`
DecimalPlaces DecimalPlaces `json:"decimalPlaces"`
Note string `json:"note"`
ShowNoteWhenEmpty bool `json:"showNoteWhenEmpty"`
}
// HistogramViewProperties represents options for histogram view in Chronograf
type HistogramViewProperties struct {
Type string `json:"type"`
Queries []DashboardQuery `json:"queries"`
ViewColors []ViewColor `json:"colors"`
XColumn string `json:"xColumn"`
FillColumns []string `json:"fillColumns"`
XDomain []float64 `json:"xDomain,omitEmpty"`
XAxisLabel string `json:"xAxisLabel"`
Position string `json:"position"`
BinCount int `json:"binCount"`
Note string `json:"note"`
ShowNoteWhenEmpty bool `json:"showNoteWhenEmpty"`
}
// GaugeViewProperties represents options for gauge view in Chronograf
type GaugeViewProperties struct {
Type string `json:"type"`
Queries []DashboardQuery `json:"queries"`
Prefix string `json:"prefix"`
Suffix string `json:"suffix"`
ViewColors []ViewColor `json:"colors"`
DecimalPlaces DecimalPlaces `json:"decimalPlaces"`
Note string `json:"note"`
ShowNoteWhenEmpty bool `json:"showNoteWhenEmpty"`
}
// TableViewProperties represents options for table view in Chronograf
type TableViewProperties struct {
Type string `json:"type"`
Queries []DashboardQuery `json:"queries"`
ViewColors []ViewColor `json:"colors"`
TableOptions TableOptions `json:"tableOptions"`
FieldOptions []RenamableField `json:"fieldOptions"`
TimeFormat string `json:"timeFormat"`
DecimalPlaces DecimalPlaces `json:"decimalPlaces"`
Note string `json:"note"`
ShowNoteWhenEmpty bool `json:"showNoteWhenEmpty"`
}
type MarkdownViewProperties struct {
Type string `json:"type"`
Note string `json:"note"`
}
// LogViewProperties represents options for log viewer in Chronograf.
type LogViewProperties struct {
Type string `json:"type"`
Columns []LogViewerColumn `json:"columns"`
}
// LogViewerColumn represents a specific column in a Log Viewer.
type LogViewerColumn struct {
Name string `json:"name"`
Position int32 `json:"position"`
Settings []LogColumnSetting `json:"settings"`
}
// LogColumnSetting represent the settings for a specific column of a Log Viewer.
type LogColumnSetting struct {
Type string `json:"type"`
Value string `json:"value"`
Name string `json:"name,omitempty"`
}
func (XYViewProperties) viewProperties() {}
func (LinePlusSingleStatProperties) viewProperties() {}
func (SingleStatViewProperties) viewProperties() {}
func (HistogramViewProperties) viewProperties() {}
func (GaugeViewProperties) viewProperties() {}
func (TableViewProperties) viewProperties() {}
func (MarkdownViewProperties) viewProperties() {}
func (LogViewProperties) viewProperties() {}
func (v XYViewProperties) GetType() string { return v.Type }
func (v LinePlusSingleStatProperties) GetType() string { return v.Type }
func (v SingleStatViewProperties) GetType() string { return v.Type }
func (v HistogramViewProperties) GetType() string { return v.Type }
func (v GaugeViewProperties) GetType() string { return v.Type }
func (v TableViewProperties) GetType() string { return v.Type }
func (v MarkdownViewProperties) GetType() string { return v.Type }
func (v LogViewProperties) GetType() string { return v.Type }
/////////////////////////////
// Old Chronograf Types
/////////////////////////////
// DashboardQuery represents a query used in a dashboard cell
type DashboardQuery struct {
Text string `json:"text"`
EditMode string `json:"editMode"` // Either "builder" or "advanced"
Name string `json:"name"` // Term or phrase that refers to the query
BuilderConfig BuilderConfig `json:"builderConfig"`
}
type BuilderConfig struct {
Buckets []string `json:"buckets"`
Tags []struct {
Key string `json:"key"`
Values []string `json:"values"`
} `json:"tags"`
Functions []struct {
Name string `json:"name"`
} `json:"functions"`
}
// Axis represents the visible extents of a visualization
type Axis struct {
Bounds []string `json:"bounds"` // bounds are an arbitrary list of client-defined strings that specify the viewport for a View
LegacyBounds [2]int64 `json:"-"` // legacy bounds are for testing a migration from an earlier version of axis
Label string `json:"label"` // label is a description of this Axis
Prefix string `json:"prefix"` // Prefix represents a label prefix for formatting axis values
Suffix string `json:"suffix"` // Suffix represents a label suffix for formatting axis values
Base string `json:"base"` // Base represents the radix for formatting axis values
Scale string `json:"scale"` // Scale is the axis formatting scale. Supported: "log", "linear"
}
// ViewColor represents the encoding of data into visualizations
type ViewColor struct {
ID string `json:"id"` // ID is the unique id of the View color
Type string `json:"type"` // Type is how the color is used. Accepted (min,max,threshold)
Hex string `json:"hex"` // Hex is the hex number of the color
Name string `json:"name"` // Name is the user-facing name of the hex color
Value float64 `json:"value"` // Value is the data value mapped to this color
}
// Legend represents the encoding of data into a legend
type Legend struct {
Type string `json:"type,omitempty"`
Orientation string `json:"orientation,omitempty"`
}
// TableOptions is a type of options for a DashboardView with type Table
type TableOptions struct {
VerticalTimeAxis bool `json:"verticalTimeAxis"`
SortBy RenamableField `json:"sortBy"`
Wrapping string `json:"wrapping"`
FixFirstColumn bool `json:"fixFirstColumn"`
}
// RenamableField is a column/row field in a DashboardView of type Table
type RenamableField struct {
InternalName string `json:"internalName"`
DisplayName string `json:"displayName"`
Visible bool `json:"visible"`
}
// DecimalPlaces indicates whether decimal places should be enforced, and how many digits it should show.
type DecimalPlaces struct {
IsEnforced bool `json:"isEnforced"`
Digits int32 `json:"digits"`
}

View File

@ -72,7 +72,6 @@ type APIBackend struct {
ChronografService *server.Service
ProtoService influxdb.ProtoService
OrgLookupService authorizer.OrganizationService
ViewService influxdb.ViewService
DocumentService influxdb.DocumentService
}

View File

@ -65,7 +65,7 @@ func NewAuthorizationHandler(b *AuthorizationBackend) *AuthorizationHandler {
h.HandlerFunc("POST", "/api/v2/authorizations", h.handlePostAuthorization)
h.HandlerFunc("GET", "/api/v2/authorizations", h.handleGetAuthorizations)
h.HandlerFunc("GET", "/api/v2/authorizations/:id", h.handleGetAuthorization)
h.HandlerFunc("PATCH", "/api/v2/authorizations/:id", h.handleSetAuthorizationStatus)
h.HandlerFunc("PATCH", "/api/v2/authorizations/:id", h.handleUpdateAuthorization)
h.HandlerFunc("DELETE", "/api/v2/authorizations/:id", h.handleDeleteAuthorization)
return h
}
@ -459,11 +459,11 @@ func decodeGetAuthorizationRequest(ctx context.Context, r *http.Request) (*getAu
}, nil
}
// handleSetAuthorizationStatus is the HTTP handler for the PATCH /api/v2/authorizations/:id route that updates the authorization's status.
func (h *AuthorizationHandler) handleSetAuthorizationStatus(w http.ResponseWriter, r *http.Request) {
// handleUpdateAuthorization is the HTTP handler for the PATCH /api/v2/authorizations/:id route that updates the authorization's status and desc.
func (h *AuthorizationHandler) handleUpdateAuthorization(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
req, err := decodeSetAuthorizationStatusRequest(ctx, r)
req, err := decodeUpdateAuthorizationRequest(ctx, r)
if err != nil {
h.Logger.Info("failed to decode request", zap.String("handler", "updateAuthorization"), zap.Error(err))
EncodeError(ctx, err, w)
@ -476,13 +476,10 @@ func (h *AuthorizationHandler) handleSetAuthorizationStatus(w http.ResponseWrite
return
}
if req.Status != a.Status {
a.Status = req.Status
if err := h.AuthorizationService.SetAuthorizationStatus(ctx, a.ID, a.Status); err != nil {
if err := h.AuthorizationService.UpdateAuthorization(ctx, a.ID, req.AuthorizationUpdate); err != nil {
EncodeError(ctx, err, w)
return
}
}
o, err := h.OrganizationService.FindOrganizationByID(ctx, a.OrgID)
if err != nil {
@ -510,10 +507,10 @@ func (h *AuthorizationHandler) handleSetAuthorizationStatus(w http.ResponseWrite
type updateAuthorizationRequest struct {
ID platform.ID
Status platform.Status
*platform.AuthorizationUpdate
}
func decodeSetAuthorizationStatusRequest(ctx context.Context, r *http.Request) (*updateAuthorizationRequest, error) {
func decodeUpdateAuthorizationRequest(ctx context.Context, r *http.Request) (*updateAuthorizationRequest, error) {
params := httprouter.ParamsFromContext(ctx)
id := params.ByName("id")
if id == "" {
@ -528,14 +525,14 @@ func decodeSetAuthorizationStatusRequest(ctx context.Context, r *http.Request) (
return nil, err
}
a := &setAuthorizationStatusRequest{}
if err := json.NewDecoder(r.Body).Decode(a); err != nil {
upd := &platform.AuthorizationUpdate{}
if err := json.NewDecoder(r.Body).Decode(upd); err != nil {
return nil, err
}
return &updateAuthorizationRequest{
ID: i,
Status: a.Status,
AuthorizationUpdate: upd,
}, nil
}
@ -745,20 +742,14 @@ func (s *AuthorizationService) CreateAuthorization(ctx context.Context, a *platf
return nil
}
type setAuthorizationStatusRequest struct {
Status platform.Status `json:"status"`
}
// SetAuthorizationStatus updates an authorization's status.
func (s *AuthorizationService) SetAuthorizationStatus(ctx context.Context, id platform.ID, status platform.Status) error {
// UpdateAuthorization updates the status and description if available.
func (s *AuthorizationService) UpdateAuthorization(ctx context.Context, id platform.ID, upd *platform.AuthorizationUpdate) error {
u, err := newURL(s.Addr, authorizationIDPath(id))
if err != nil {
return err
}
b, err := json.Marshal(setAuthorizationStatusRequest{
Status: status,
})
b, err := json.Marshal(upd)
if err != nil {
return err
}

View File

@ -776,8 +776,8 @@ func TestAuthorizationService_DeleteAuthorization(t *testing.T) {
platformtesting.DeleteAuthorization(initAuthorizationService, t)
}
func TestAuthorizationService_UpdateAuthorizationStatus(t *testing.T) {
platformtesting.UpdateAuthorizationStatus(initAuthorizationService, t)
func TestAuthorizationService_UpdateAuthorization(t *testing.T) {
platformtesting.UpdateAuthorization(initAuthorizationService, t)
}
func MustMarshal(o interface{}) []byte {

View File

@ -120,7 +120,6 @@ func NewBucketHandler(b *BucketBackend) *BucketHandler {
h.HandlerFunc("GET", bucketsIDLabelsPath, newGetLabelsHandler(labelBackend))
h.HandlerFunc("POST", bucketsIDLabelsPath, newPostLabelHandler(labelBackend))
h.HandlerFunc("DELETE", bucketsIDLabelsIDPath, newDeleteLabelHandler(labelBackend))
h.HandlerFunc("PATCH", bucketsIDLabelsIDPath, newPatchLabelHandler(labelBackend))
return h
}

View File

@ -125,7 +125,6 @@ func NewDashboardHandler(b *DashboardBackend) *DashboardHandler {
h.HandlerFunc("GET", dashboardsIDLabelsPath, newGetLabelsHandler(labelBackend))
h.HandlerFunc("POST", dashboardsIDLabelsPath, newPostLabelHandler(labelBackend))
h.HandlerFunc("DELETE", dashboardsIDLabelsIDPath, newDeleteLabelHandler(labelBackend))
h.HandlerFunc("PATCH", dashboardsIDLabelsIDPath, newPatchLabelHandler(labelBackend))
return h
}
@ -230,8 +229,34 @@ func newDashboardCellsResponse(dashboardID platform.ID, cs []*platform.Cell) das
return res
}
func newDashboardCellViewResponse(dashID, cellID platform.ID, v *platform.View) viewResponse {
return viewResponse{
type viewLinks struct {
Self string `json:"self"`
}
type dashboardCellViewResponse struct {
platform.View
Links viewLinks `json:"links"`
}
func (r dashboardCellViewResponse) MarshalJSON() ([]byte, error) {
props, err := platform.MarshalViewPropertiesJSON(r.Properties)
if err != nil {
return nil, err
}
return json.Marshal(struct {
platform.ViewContents
Links viewLinks `json:"links"`
Properties json.RawMessage `json:"properties"`
}{
ViewContents: r.ViewContents,
Links: r.Links,
Properties: props,
})
}
func newDashboardCellViewResponse(dashID, cellID platform.ID, v *platform.View) dashboardCellViewResponse {
return dashboardCellViewResponse{
Links: viewLinks{
Self: fmt.Sprintf("/api/v2/dashboards/%s/cells/%s", dashID, cellID),
},
@ -1322,7 +1347,7 @@ func (s *DashboardService) GetDashboardCellView(ctx context.Context, dashboardID
return nil, err
}
res := viewResponse{}
res := dashboardCellViewResponse{}
if err := json.NewDecoder(resp.Body).Decode(&res); err != nil {
return nil, err
}
@ -1362,7 +1387,7 @@ func (s *DashboardService) UpdateDashboardCellView(ctx context.Context, dashboar
return nil, err
}
res := viewResponse{}
res := dashboardCellViewResponse{}
if err := json.NewDecoder(resp.Body).Decode(&res); err != nil {
return nil, err
}

View File

@ -13,11 +13,14 @@ import (
"go.uber.org/zap"
"github.com/google/go-cmp/cmp"
platform "github.com/influxdata/influxdb"
"github.com/influxdata/influxdb/inmem"
"github.com/influxdata/influxdb/mock"
platformtesting "github.com/influxdata/influxdb/testing"
"github.com/julienschmidt/httprouter"
"github.com/yudai/gojsondiff"
"github.com/yudai/gojsondiff/formatter"
)
// NewMockDashboardBackend returns a DashboardBackend with mock services.
@ -1265,11 +1268,6 @@ func initDashboardService(f platformtesting.DashboardFields, t *testing.T) (plat
t.Fatalf("failed to populate dashboard")
}
}
for _, b := range f.Views {
if err := svc.PutView(ctx, b); err != nil {
t.Fatalf("failed to populate views")
}
}
dashboardBackend := NewMockDashboardBackend()
dashboardBackend.DashboardService = svc
@ -1386,3 +1384,39 @@ func TestService_handlePostDashboardLabel(t *testing.T) {
})
}
}
func jsonEqual(s1, s2 string) (eq bool, diff string, err error) {
var o1, o2 interface{}
if s1 == s2 {
return true, "", nil
}
if s1 == "" {
return false, s2, fmt.Errorf("s1 is empty")
}
if s2 == "" {
return false, s1, fmt.Errorf("s2 is empty")
}
if err = json.Unmarshal([]byte(s1), &o1); err != nil {
return
}
if err = json.Unmarshal([]byte(s2), &o2); err != nil {
return
}
differ := gojsondiff.New()
d, err := differ.Compare([]byte(s1), []byte(s2))
if err != nil {
return
}
config := formatter.AsciiFormatterConfig{}
formatter := formatter.NewAsciiFormatter(o1, config)
diff, err = formatter.Format(d)
return cmp.Equal(o1, o2), diff, err
}

View File

@ -432,30 +432,6 @@ func decodePostLabelMappingRequest(ctx context.Context, r *http.Request, rt plat
return req, nil
}
// newPatchLabelHandler returns a handler func for a PATCH to /labels endpoints
func newPatchLabelHandler(b *LabelBackend) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
req, err := decodePatchLabelRequest(ctx, r)
if err != nil {
EncodeError(ctx, err, w)
return
}
label, err := b.LabelService.UpdateLabel(ctx, req.LabelID, req.Update)
if err != nil {
EncodeError(ctx, err, w)
return
}
if err := encodeResponse(ctx, w, http.StatusOK, newLabelResponse(label)); err != nil {
logEncodingError(b.Logger, r, err)
return
}
}
}
// newDeleteLabelHandler returns a handler func for a DELETE to /labels endpoints
func newDeleteLabelHandler(b *LabelBackend) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {

View File

@ -127,7 +127,6 @@ func NewOrgHandler(b *OrgBackend) *OrgHandler {
h.HandlerFunc("GET", organizationsIDLabelsPath, newGetLabelsHandler(labelBackend))
h.HandlerFunc("POST", organizationsIDLabelsPath, newPostLabelHandler(labelBackend))
h.HandlerFunc("DELETE", organizationsIDLabelsIDPath, newDeleteLabelHandler(labelBackend))
h.HandlerFunc("PATCH", organizationsIDLabelsIDPath, newPatchLabelHandler(labelBackend))
return h
}

View File

@ -1262,6 +1262,98 @@ paths:
application/json:
schema:
$ref: "#/components/schemas/Error"
'/variables/{variableID}/labels':
get:
tags:
- Variables
summary: list all labels for a variable
parameters:
- $ref: '#/components/parameters/TraceSpan'
- in: path
name: variableID
schema:
type: string
required: true
description: ID of the variable
responses:
'200':
description: a list of all labels for a variable
content:
application/json:
schema:
$ref: "#/components/schemas/LabelsResponse"
default:
description: unexpected error
content:
application/json:
schema:
$ref: "#/components/schemas/Error"
post:
tags:
- Variables
summary: add a label to a variable
parameters:
- $ref: '#/components/parameters/TraceSpan'
- in: path
name: variableID
schema:
type: string
required: true
description: ID of the variable
requestBody:
description: label to add
required: true
content:
application/json:
schema:
$ref: "#/components/schemas/LabelMapping"
responses:
'200':
description: a list of all labels for a variable
content:
application/json:
schema:
$ref: "#/components/schemas/LabelsResponse"
default:
description: unexpected error
content:
application/json:
schema:
$ref: "#/components/schemas/Error"
'/variables/{variableID}/labels/{labelID}':
delete:
tags:
- Variables
summary: delete a label from a variable
parameters:
- $ref: '#/components/parameters/TraceSpan'
- in: path
name: variableID
schema:
type: string
required: true
description: ID of the variable
- in: path
name: labelID
schema:
type: string
required: true
description: the label id to delete
responses:
'204':
description: delete has been accepted
'404':
description: variable not found
content:
application/json:
schema:
$ref: "#/components/schemas/Error"
default:
description: unexpected error
content:
application/json:
schema:
$ref: "#/components/schemas/Error"
/write:
post:
tags:
@ -2700,7 +2792,7 @@ paths:
content:
application/json:
schema:
$ref: "#/components/schemas/Authorization"
$ref: "#/components/schemas/AuthorizationUpdateRequest"
parameters:
- $ref: '#/components/parameters/TraceSpan'
- in: path
@ -4677,12 +4769,8 @@ paths:
schema:
$ref: "#/components/schemas/PasswordResetBody"
responses:
'200':
description: user details
content:
application/json:
schema:
$ref: "#/components/schemas/User"
'204':
description: password successfully updated
default:
description: unsuccessful authentication
content:
@ -5346,12 +5434,8 @@ components:
type: string
nullable: true
description: optional name of the organization of the organization with orgID.
Authorization:
required: [orgID, permissions]
AuthorizationUpdateRequest:
properties:
orgID:
type: string
description: ID of org that authorization is scoped to.
status:
description: if inactive the token is inactive and requests using the token will be rejected.
default: active
@ -5362,6 +5446,15 @@ components:
description:
type: string
description: A description of the token.
Authorization:
required: [orgID, permissions]
allOf:
- $ref: "#/components/schemas/AuthorizationUpdateRequest"
- type: object
properties:
orgID:
type: string
description: ID of org that authorization is scoped to.
permissions:
type: array
minLength: 1
@ -6435,6 +6528,8 @@ components:
enum: ["map"]
values:
type: object
additionalProperties:
type: string
QueryVariableProperties:
properties:
type:
@ -6461,6 +6556,12 @@ components:
self:
type: string
format: uri
org:
type: string
format: uri
labels:
type: string
format: uri
id:
readOnly: true
type: string
@ -6472,6 +6573,8 @@ components:
type: array
items:
type: string
labels:
$ref: "#/components/schemas/Labels"
arguments:
type: object
oneOf:
@ -6724,10 +6827,10 @@ components:
properties:
createdAt:
type: string
format: date
format: date-time
updatedAt:
type: string
format: date
format: date-time
cells:
$ref: "#/components/schemas/Cells"
labels:
@ -6892,6 +6995,15 @@ components:
type: object
labels:
$ref: "#/components/schemas/Labels"
links:
type: object
readOnly: true
example:
self: "/api/v2/documents/templates/1"
properties:
self:
description: URL for this document
$ref: "#/components/schemas/Link"
required:
- id
- meta
@ -6934,6 +7046,15 @@ components:
$ref: "#/components/schemas/DocumentMeta"
labels:
$ref: "#/components/schemas/Labels"
links:
type: object
readOnly: true
example:
self: "/api/v2/documents/templates/1"
properties:
self:
description: URL for this document
$ref: "#/components/schemas/Link"
required:
- id
- meta

View File

@ -145,7 +145,6 @@ func NewTaskHandler(b *TaskBackend) *TaskHandler {
h.HandlerFunc("GET", tasksIDLabelsPath, newGetLabelsHandler(labelBackend))
h.HandlerFunc("POST", tasksIDLabelsPath, newPostLabelHandler(labelBackend))
h.HandlerFunc("DELETE", tasksIDLabelsIDPath, newDeleteLabelHandler(labelBackend))
h.HandlerFunc("PATCH", tasksIDLabelsIDPath, newPatchLabelHandler(labelBackend))
return h
}

View File

@ -71,7 +71,7 @@ func httpTaskServiceFactory(t *testing.T) (*servicetest.System, context.CancelFu
}
return &servicetest.System{
TaskControlService: servicetest.TaskControlAdaptor(store, rrw, rrw),
TaskControlService: backend.TaskControlAdaptor(store, rrw, rrw),
TaskService: taskService,
Ctx: ctx,
I: i,

View File

@ -111,7 +111,6 @@ func NewTelegrafHandler(b *TelegrafBackend) *TelegrafHandler {
h.HandlerFunc("GET", telegrafsIDLabelsPath, newGetLabelsHandler(labelBackend))
h.HandlerFunc("POST", telegrafsIDLabelsPath, newPostLabelHandler(labelBackend))
h.HandlerFunc("DELETE", telegrafsIDLabelsIDPath, newDeleteLabelHandler(labelBackend))
h.HandlerFunc("PATCH", telegrafsIDLabelsIDPath, newPatchLabelHandler(labelBackend))
return h
}

View File

@ -22,12 +22,15 @@ const (
type VariableBackend struct {
Logger *zap.Logger
VariableService platform.VariableService
LabelService platform.LabelService
}
// NewVariableBackend creates a backend used by the variable handler.
func NewVariableBackend(b *APIBackend) *VariableBackend {
return &VariableBackend{
Logger: b.Logger.With(zap.String("handler", "variable")),
VariableService: b.VariableService,
LabelService: b.LabelService,
}
}
@ -38,6 +41,7 @@ type VariableHandler struct {
Logger *zap.Logger
VariableService platform.VariableService
LabelService platform.LabelService
}
// NewVariableHandler creates a new VariableHandler
@ -47,9 +51,12 @@ func NewVariableHandler(b *VariableBackend) *VariableHandler {
Logger: b.Logger,
VariableService: b.VariableService,
LabelService: b.LabelService,
}
entityPath := fmt.Sprintf("%s/:id", variablePath)
entityLabelsPath := fmt.Sprintf("%s/labels", entityPath)
entityLabelsIDPath := fmt.Sprintf("%s/:lid", entityLabelsPath)
h.HandlerFunc("GET", variablePath, h.handleGetVariables)
h.HandlerFunc("POST", variablePath, h.handlePostVariable)
@ -58,6 +65,15 @@ func NewVariableHandler(b *VariableBackend) *VariableHandler {
h.HandlerFunc("PUT", entityPath, h.handlePutVariable)
h.HandlerFunc("DELETE", entityPath, h.handleDeleteVariable)
labelBackend := &LabelBackend{
Logger: b.Logger.With(zap.String("handler", "label")),
LabelService: b.LabelService,
ResourceType: platform.DashboardsResourceType,
}
h.HandlerFunc("GET", entityLabelsPath, newGetLabelsHandler(labelBackend))
h.HandlerFunc("POST", entityLabelsPath, newPostLabelHandler(labelBackend))
h.HandlerFunc("DELETE", entityLabelsIDPath, newDeleteLabelHandler(labelBackend))
return h
}
@ -74,7 +90,7 @@ func (r getVariablesResponse) ToPlatform() []*platform.Variable {
return variables
}
func newGetVariablesResponse(variables []*platform.Variable, f platform.VariableFilter, opts platform.FindOptions) getVariablesResponse {
func newGetVariablesResponse(ctx context.Context, variables []*platform.Variable, f platform.VariableFilter, opts platform.FindOptions, labelService platform.LabelService) getVariablesResponse {
num := len(variables)
resp := getVariablesResponse{
Variables: make([]variableResponse, 0, num),
@ -82,7 +98,8 @@ func newGetVariablesResponse(variables []*platform.Variable, f platform.Variable
}
for _, variable := range variables {
resp.Variables = append(resp.Variables, newVariableResponse(variable))
labels, _ := labelService.FindResourceLabels(ctx, platform.LabelMappingFilter{ResourceID: variable.ID})
resp.Variables = append(resp.Variables, newVariableResponse(variable, labels))
}
return resp
@ -138,7 +155,7 @@ func (h *VariableHandler) handleGetVariables(w http.ResponseWriter, r *http.Requ
return
}
err = encodeResponse(ctx, w, http.StatusOK, newGetVariablesResponse(variables, req.filter, req.opts))
err = encodeResponse(ctx, w, http.StatusOK, newGetVariablesResponse(ctx, variables, req.filter, req.opts, h.LabelService))
if err != nil {
logEncodingError(h.Logger, r, err)
return
@ -181,7 +198,13 @@ func (h *VariableHandler) handleGetVariable(w http.ResponseWriter, r *http.Reque
return
}
err = encodeResponse(ctx, w, http.StatusOK, newVariableResponse(variable))
labels, err := h.LabelService.FindResourceLabels(ctx, platform.LabelMappingFilter{ResourceID: variable.ID})
if err != nil {
EncodeError(ctx, err, w)
return
}
err = encodeResponse(ctx, w, http.StatusOK, newVariableResponse(variable, labels))
if err != nil {
logEncodingError(h.Logger, r, err)
return
@ -190,22 +213,32 @@ func (h *VariableHandler) handleGetVariable(w http.ResponseWriter, r *http.Reque
type variableLinks struct {
Self string `json:"self"`
Labels string `json:"labels"`
Org string `json:"org"`
}
type variableResponse struct {
*platform.Variable
Labels []platform.Label `json:"labels"`
Links variableLinks `json:"links"`
}
func newVariableResponse(m *platform.Variable) variableResponse {
return variableResponse{
func newVariableResponse(m *platform.Variable, labels []*platform.Label) variableResponse {
res := variableResponse{
Variable: m,
Labels: []platform.Label{},
Links: variableLinks{
Self: fmt.Sprintf("/api/v2/variables/%s", m.ID),
Labels: fmt.Sprintf("/api/v2/variables/%s/labels", m.ID),
Org: fmt.Sprintf("/api/v2/orgs/%s", m.OrganizationID),
},
}
for _, l := range labels {
res.Labels = append(res.Labels, *l)
}
return res
}
func (h *VariableHandler) handlePostVariable(w http.ResponseWriter, r *http.Request) {
@ -223,8 +256,7 @@ func (h *VariableHandler) handlePostVariable(w http.ResponseWriter, r *http.Requ
return
}
err = encodeResponse(ctx, w, http.StatusCreated, newVariableResponse(req.variable))
if err != nil {
if err := encodeResponse(ctx, w, http.StatusCreated, newVariableResponse(req.variable, []*platform.Label{})); err != nil {
logEncodingError(h.Logger, r, err)
return
}
@ -278,7 +310,13 @@ func (h *VariableHandler) handlePatchVariable(w http.ResponseWriter, r *http.Req
return
}
err = encodeResponse(ctx, w, http.StatusOK, newVariableResponse(variable))
labels, err := h.LabelService.FindResourceLabels(ctx, platform.LabelMappingFilter{ResourceID: variable.ID})
if err != nil {
EncodeError(ctx, err, w)
return
}
err = encodeResponse(ctx, w, http.StatusOK, newVariableResponse(variable, labels))
if err != nil {
logEncodingError(h.Logger, r, err)
return
@ -340,7 +378,13 @@ func (h *VariableHandler) handlePutVariable(w http.ResponseWriter, r *http.Reque
return
}
err = encodeResponse(ctx, w, http.StatusOK, newVariableResponse(req.variable))
labels, err := h.LabelService.FindResourceLabels(ctx, platform.LabelMappingFilter{ResourceID: req.variable.ID})
if err != nil {
EncodeError(ctx, err, w)
return
}
err = encodeResponse(ctx, w, http.StatusOK, newVariableResponse(req.variable, labels))
if err != nil {
logEncodingError(h.Logger, r, err)
return

View File

@ -3,6 +3,7 @@ package http
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
@ -23,12 +24,14 @@ func NewMockVariableBackend() *VariableBackend {
return &VariableBackend{
Logger: zap.NewNop().With(zap.String("handler", "variable")),
VariableService: mock.NewVariableService(),
LabelService: mock.NewLabelService(),
}
}
func TestVariableService_handleGetVariables(t *testing.T) {
type fields struct {
VariableService platform.VariableService
LabelService platform.LabelService
}
type args struct {
queryParams map[string][]string
@ -74,11 +77,25 @@ func TestVariableService_handleGetVariables(t *testing.T) {
}, nil
},
},
&mock.LabelService{
FindResourceLabelsFn: func(ctx context.Context, f platform.LabelMappingFilter) ([]*platform.Label, error) {
labels := []*platform.Label{
{
ID: platformtesting.MustIDBase16("fc3dc670a4be9b9a"),
Name: "label",
Properties: map[string]string{
"color": "fff000",
},
},
}
return labels, nil
},
},
},
wants: wants{
statusCode: http.StatusOK,
contentType: "application/json; charset=utf-8",
body: `{"variables":[{"id":"6162207574726f71","orgID":"0000000000000001","name":"variable-a","selected":["b"],"arguments":{"type":"constant","values":["a","b"]},"links":{"self":"/api/v2/variables/6162207574726f71","org": "/api/v2/orgs/0000000000000001"}},{"id":"61726920617a696f","orgID":"0000000000000001","name":"variable-b","selected":["c"],"arguments":{"type":"map","values":{"a":"b","c":"d"}},"links":{"self":"/api/v2/variables/61726920617a696f","org": "/api/v2/orgs/0000000000000001"}}],"links":{"self":"/api/v2/variables?descending=false&limit=20&offset=0"}}`,
body: `{"variables":[{"id":"6162207574726f71","orgID":"0000000000000001","name":"variable-a","selected":["b"],"arguments":{"type":"constant","values":["a","b"]},"labels":[{"id":"fc3dc670a4be9b9a","name":"label","properties":{"color":"fff000"}}],"links":{"self":"/api/v2/variables/6162207574726f71","labels":"/api/v2/variables/6162207574726f71/labels","org":"/api/v2/orgs/0000000000000001"}},{"id":"61726920617a696f","orgID":"0000000000000001","name":"variable-b","selected":["c"],"arguments":{"type":"map","values":{"a":"b","c":"d"}},"labels":[{"id":"fc3dc670a4be9b9a","name":"label","properties":{"color":"fff000"}}],"links":{"self":"/api/v2/variables/61726920617a696f","labels":"/api/v2/variables/61726920617a696f/labels","org": "/api/v2/orgs/0000000000000001"}}],"links":{"self":"/api/v2/variables?descending=false&limit=20&offset=0"}}`,
},
},
{
@ -89,6 +106,11 @@ func TestVariableService_handleGetVariables(t *testing.T) {
return []*platform.Variable{}, nil
},
},
&mock.LabelService{
FindResourceLabelsFn: func(ctx context.Context, f platform.LabelMappingFilter) ([]*platform.Label, error) {
return []*platform.Label{}, nil
},
},
},
args: args{
map[string][]string{
@ -120,6 +142,20 @@ func TestVariableService_handleGetVariables(t *testing.T) {
}, nil
},
},
&mock.LabelService{
FindResourceLabelsFn: func(ctx context.Context, f platform.LabelMappingFilter) ([]*platform.Label, error) {
labels := []*platform.Label{
{
ID: platformtesting.MustIDBase16("fc3dc670a4be9b9a"),
Name: "label",
Properties: map[string]string{
"color": "fff000",
},
},
}
return labels, nil
},
},
},
args: args{
map[string][]string{
@ -129,7 +165,7 @@ func TestVariableService_handleGetVariables(t *testing.T) {
wants: wants{
statusCode: http.StatusOK,
contentType: "application/json; charset=utf-8",
body: `{"variables":[{"id":"6162207574726f71","orgID":"0000000000000001","name":"variable-a","selected":["b"],"arguments":{"type":"constant","values":["a","b"]},"links":{"self":"/api/v2/variables/6162207574726f71","org":"/api/v2/orgs/0000000000000001"}}],"links":{"self":"/api/v2/variables?descending=false&limit=20&offset=0&orgID=0000000000000001"}}`,
body: `{"variables":[{"id":"6162207574726f71","orgID":"0000000000000001","name":"variable-a","selected":["b"],"arguments":{"type":"constant","values":["a","b"]},"labels":[{"id":"fc3dc670a4be9b9a","name":"label","properties":{"color": "fff000"}}],"links":{"self":"/api/v2/variables/6162207574726f71","org":"/api/v2/orgs/0000000000000001","labels":"/api/v2/variables/6162207574726f71/labels"}}],"links":{"self":"/api/v2/variables?descending=false&limit=20&offset=0&orgID=0000000000000001"}}`,
},
},
}
@ -137,10 +173,12 @@ func TestVariableService_handleGetVariables(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
variableBackend := NewMockVariableBackend()
variableBackend.LabelService = tt.fields.LabelService
variableBackend.VariableService = tt.fields.VariableService
h := NewVariableHandler(variableBackend)
r := httptest.NewRequest("GET", "http://howdy.tld", nil)
qp := r.URL.Query()
for k, vs := range tt.args.queryParams {
for _, v := range vs {
@ -213,7 +251,7 @@ func TestVariableService_handleGetVariable(t *testing.T) {
wants: wants{
statusCode: 200,
contentType: "application/json; charset=utf-8",
body: `{"id":"75650d0a636f6d70","orgID":"0000000000000001","name":"variable-a","selected":["b"],"arguments":{"type":"constant","values":["a","b"]},"links":{"self":"/api/v2/variables/75650d0a636f6d70","org":"/api/v2/orgs/0000000000000001"}}
body: `{"id":"75650d0a636f6d70","orgID":"0000000000000001","name":"variable-a","selected":["b"],"arguments":{"type":"constant","values":["a","b"]},"labels":[],"links":{"self":"/api/v2/variables/75650d0a636f6d70","labels":"/api/v2/variables/75650d0a636f6d70/labels","org":"/api/v2/orgs/0000000000000001"}}
`,
},
},
@ -291,7 +329,6 @@ func TestVariableService_handleGetVariable(t *testing.T) {
if body != tt.wants.body {
t.Errorf("got = %v, want %v", body, tt.wants.body)
}
})
}
}
@ -347,7 +384,7 @@ func TestVariableService_handlePostVariable(t *testing.T) {
wants: wants{
statusCode: 201,
contentType: "application/json; charset=utf-8",
body: `{"id":"75650d0a636f6d70","orgID":"0000000000000001","name":"my-great-variable","selected":["'foo'"],"arguments":{"type":"constant","values":["bar","foo"]},"links":{"self":"/api/v2/variables/75650d0a636f6d70","org":"/api/v2/orgs/0000000000000001"}}
body: `{"id":"75650d0a636f6d70","orgID":"0000000000000001","name":"my-great-variable","selected":["'foo'"],"arguments":{"type":"constant","values":["bar","foo"]},"labels":[],"links":{"self":"/api/v2/variables/75650d0a636f6d70","labels":"/api/v2/variables/75650d0a636f6d70/labels","org":"/api/v2/orgs/0000000000000001"}}
`,
},
},
@ -464,7 +501,7 @@ func TestVariableService_handlePatchVariable(t *testing.T) {
wants: wants{
statusCode: 200,
contentType: "application/json; charset=utf-8",
body: `{"id":"75650d0a636f6d70","orgID":"0000000000000002","name":"new-name","selected":[],"arguments":{"type":"constant","values":[]},"links":{"self":"/api/v2/variables/75650d0a636f6d70","org":"/api/v2/orgs/0000000000000002"}}
body: `{"id":"75650d0a636f6d70","orgID":"0000000000000002","name":"new-name","selected":[],"arguments":{"type":"constant","values":[]},"labels":[],"links":{"self":"/api/v2/variables/75650d0a636f6d70","labels":"/api/v2/variables/75650d0a636f6d70/labels","org":"/api/v2/orgs/0000000000000002"}}
`,
},
},
@ -604,6 +641,104 @@ func TestVariableService_handleDeleteVariable(t *testing.T) {
}
}
func TestService_handlePostVariableLabel(t *testing.T) {
type fields struct {
LabelService platform.LabelService
}
type args struct {
labelMapping *platform.LabelMapping
variableID platform.ID
}
type wants struct {
statusCode int
contentType string
body string
}
tests := []struct {
name string
fields fields
args args
wants wants
}{
{
name: "add label to variable",
fields: fields{
LabelService: &mock.LabelService{
FindLabelByIDFn: func(ctx context.Context, id platform.ID) (*platform.Label, error) {
return &platform.Label{
ID: 1,
Name: "label",
Properties: map[string]string{
"color": "fff000",
},
}, nil
},
CreateLabelMappingFn: func(ctx context.Context, m *platform.LabelMapping) error { return nil },
},
},
args: args{
labelMapping: &platform.LabelMapping{
ResourceID: 100,
LabelID: 1,
},
variableID: 100,
},
wants: wants{
statusCode: http.StatusCreated,
contentType: "application/json; charset=utf-8",
body: `
{
"label": {
"id": "0000000000000001",
"name": "label",
"properties": {
"color": "fff000"
}
},
"links": {
"self": "/api/v2/labels/0000000000000001"
}
}
`,
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
variableBackend := NewMockVariableBackend()
variableBackend.LabelService = tt.fields.LabelService
h := NewVariableHandler(variableBackend)
b, err := json.Marshal(tt.args.labelMapping)
if err != nil {
t.Fatalf("failed to unmarshal label mapping: %v", err)
}
url := fmt.Sprintf("http://localhost:9999/api/v2/variables/%s/labels", tt.args.variableID)
r := httptest.NewRequest("POST", url, bytes.NewReader(b))
w := httptest.NewRecorder()
h.ServeHTTP(w, r)
res := w.Result()
content := res.Header.Get("Content-Type")
body, _ := ioutil.ReadAll(res.Body)
if res.StatusCode != tt.wants.statusCode {
t.Errorf("got %v, want %v", res.StatusCode, tt.wants.statusCode)
}
if tt.wants.contentType != "" && content != tt.wants.contentType {
t.Errorf("got %v, want %v", content, tt.wants.contentType)
}
if eq, diff, _ := jsonEqual(string(body), tt.wants.body); tt.wants.body != "" && !eq {
t.Errorf("Diff\n%s", diff)
}
})
}
}
func initVariableService(f platformtesting.VariableFields, t *testing.T) (platform.VariableService, string, func()) {
t.Helper()
svc := inmem.NewService()

View File

@ -1,407 +0,0 @@
// NOTE: This service has been deprecated and should not be used.
// Views are now resources that belong to dashboards. The reason for
// this is due to how we authorize operations against views.
package http
import (
"context"
"encoding/json"
"fmt"
"net/http"
"github.com/influxdata/influxdb"
"github.com/julienschmidt/httprouter"
"go.uber.org/zap"
)
// ViewBackend is all services and associated parameters required to construct
// the ScraperHandler.
type ViewBackend struct {
Logger *zap.Logger
ViewService influxdb.ViewService
UserService influxdb.UserService
UserResourceMappingService influxdb.UserResourceMappingService
LabelService influxdb.LabelService
}
// NewViewBackend returns a new instance of ViewBackend.
func NewViewBackend(b *APIBackend) *ViewBackend {
return &ViewBackend{
Logger: b.Logger.With(zap.String("handler", "scraper")),
ViewService: b.ViewService,
UserService: b.UserService,
LabelService: b.LabelService,
}
}
// ViewHandler is the handler for the view service
type ViewHandler struct {
*httprouter.Router
Logger *zap.Logger
ViewService influxdb.ViewService
UserService influxdb.UserService
UserResourceMappingService influxdb.UserResourceMappingService
LabelService influxdb.LabelService
}
const (
viewsPath = "/api/v2/views"
viewsIDPath = "/api/v2/views/:id"
viewsIDMembersPath = "/api/v2/views/:id/members"
viewsIDMembersIDPath = "/api/v2/views/:id/members/:userID"
viewsIDOwnersPath = "/api/v2/views/:id/owners"
viewsIDOwnersIDPath = "/api/v2/views/:id/owners/:userID"
viewsIDLabelsPath = "/api/v2/views/:id/labels"
viewsIDLabelsIDPath = "/api/v2/views/:id/labels/:lid"
)
// NewViewHandler returns a new instance of ViewHandler.
func NewViewHandler(b *ViewBackend) *ViewHandler {
h := &ViewHandler{
Router: NewRouter(),
Logger: b.Logger,
ViewService: b.ViewService,
UserResourceMappingService: b.UserResourceMappingService,
LabelService: b.LabelService,
UserService: b.UserService,
}
h.HandlerFunc("POST", viewsPath, h.handlePostViews)
h.HandlerFunc("GET", viewsPath, h.handleGetViews)
h.HandlerFunc("GET", viewsIDPath, h.handleGetView)
h.HandlerFunc("DELETE", viewsIDPath, h.handleDeleteView)
h.HandlerFunc("PATCH", viewsIDPath, h.handlePatchView)
memberBackend := MemberBackend{
Logger: b.Logger.With(zap.String("handler", "member")),
ResourceType: influxdb.ViewsResourceType,
UserType: influxdb.Member,
UserResourceMappingService: b.UserResourceMappingService,
UserService: b.UserService,
}
h.HandlerFunc("POST", viewsIDMembersPath, newPostMemberHandler(memberBackend))
h.HandlerFunc("GET", viewsIDMembersPath, newGetMembersHandler(memberBackend))
h.HandlerFunc("DELETE", viewsIDMembersIDPath, newDeleteMemberHandler(memberBackend))
ownerBackend := MemberBackend{
Logger: b.Logger.With(zap.String("handler", "member")),
ResourceType: influxdb.ViewsResourceType,
UserType: influxdb.Owner,
UserResourceMappingService: b.UserResourceMappingService,
UserService: b.UserService,
}
h.HandlerFunc("POST", viewsIDOwnersPath, newPostMemberHandler(ownerBackend))
h.HandlerFunc("GET", viewsIDOwnersPath, newGetMembersHandler(ownerBackend))
h.HandlerFunc("DELETE", viewsIDOwnersIDPath, newDeleteMemberHandler(ownerBackend))
labelBackend := &LabelBackend{
Logger: b.Logger.With(zap.String("handler", "label")),
LabelService: b.LabelService,
}
h.HandlerFunc("GET", viewsIDLabelsPath, newGetLabelsHandler(labelBackend))
h.HandlerFunc("POST", viewsIDLabelsPath, newPostLabelHandler(labelBackend))
h.HandlerFunc("DELETE", viewsIDLabelsIDPath, newDeleteLabelHandler(labelBackend))
return h
}
type viewLinks struct {
Self string `json:"self"`
Labels string `json:"labels"`
}
type viewResponse struct {
influxdb.View
Links viewLinks `json:"links"`
}
func (r viewResponse) MarshalJSON() ([]byte, error) {
props, err := influxdb.MarshalViewPropertiesJSON(r.Properties)
if err != nil {
return nil, err
}
return json.Marshal(struct {
influxdb.ViewContents
Links viewLinks `json:"links"`
Properties json.RawMessage `json:"properties"`
}{
ViewContents: r.ViewContents,
Links: r.Links,
Properties: props,
})
}
func newViewResponse(c *influxdb.View) viewResponse {
return viewResponse{
Links: viewLinks{
Self: fmt.Sprintf("/api/v2/views/%s", c.ID),
Labels: fmt.Sprintf("/api/v2/views/%s/labels", c.ID),
},
View: *c,
}
}
// handleGetViews returns all views within the store.
func (h *ViewHandler) handleGetViews(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
req := decodeGetViewsRequest(ctx, r)
views, _, err := h.ViewService.FindViews(ctx, req.filter)
if err != nil {
EncodeError(ctx, err, w)
return
}
if err := encodeResponse(ctx, w, http.StatusOK, newGetViewsResponse(views)); err != nil {
logEncodingError(h.Logger, r, err)
return
}
}
type getViewsRequest struct {
filter influxdb.ViewFilter
}
func decodeGetViewsRequest(ctx context.Context, r *http.Request) *getViewsRequest {
qp := r.URL.Query()
return &getViewsRequest{
filter: influxdb.ViewFilter{
Types: qp["type"],
},
}
}
type getViewsLinks struct {
Self string `json:"self"`
}
type getViewsResponse struct {
Links getViewsLinks `json:"links"`
Views []viewResponse `json:"views"`
}
func newGetViewsResponse(views []*influxdb.View) getViewsResponse {
res := getViewsResponse{
Links: getViewsLinks{
Self: "/api/v2/views",
},
Views: make([]viewResponse, 0, len(views)),
}
for _, view := range views {
res.Views = append(res.Views, newViewResponse(view))
}
return res
}
// handlePostViews creates a new view.
func (h *ViewHandler) handlePostViews(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
req, err := decodePostViewRequest(ctx, r)
if err != nil {
EncodeError(ctx, err, w)
return
}
if err := h.ViewService.CreateView(ctx, req.View); err != nil {
EncodeError(ctx, err, w)
return
}
if err := encodeResponse(ctx, w, http.StatusCreated, newViewResponse(req.View)); err != nil {
logEncodingError(h.Logger, r, err)
return
}
}
type postViewRequest struct {
View *influxdb.View
}
func decodePostViewRequest(ctx context.Context, r *http.Request) (*postViewRequest, error) {
c := &influxdb.View{}
if err := json.NewDecoder(r.Body).Decode(c); err != nil {
return nil, &influxdb.Error{
Code: influxdb.EInvalid,
Msg: err.Error(),
}
}
return &postViewRequest{
View: c,
}, nil
}
// hanldeGetView retrieves a view by ID.
func (h *ViewHandler) handleGetView(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
req, err := decodeGetViewRequest(ctx, r)
if err != nil {
EncodeError(ctx, err, w)
return
}
view, err := h.ViewService.FindViewByID(ctx, req.ViewID)
if err != nil {
EncodeError(ctx, err, w)
return
}
if err := encodeResponse(ctx, w, http.StatusOK, newViewResponse(view)); err != nil {
logEncodingError(h.Logger, r, err)
return
}
}
type getViewRequest struct {
ViewID influxdb.ID
}
func decodeGetViewRequest(ctx context.Context, r *http.Request) (*getViewRequest, error) {
params := httprouter.ParamsFromContext(ctx)
id := params.ByName("id")
if id == "" {
return nil, &influxdb.Error{
Code: influxdb.EInvalid,
Msg: "url missing id",
}
}
var i influxdb.ID
if err := i.DecodeFromString(id); err != nil {
return nil, err
}
return &getViewRequest{
ViewID: i,
}, nil
}
// handleDeleteView removes a view by ID.
func (h *ViewHandler) handleDeleteView(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
req, err := decodeDeleteViewRequest(ctx, r)
if err != nil {
EncodeError(ctx, err, w)
return
}
if err := h.ViewService.DeleteView(ctx, req.ViewID); err != nil {
EncodeError(ctx, err, w)
return
}
w.WriteHeader(http.StatusNoContent)
}
type deleteViewRequest struct {
ViewID influxdb.ID
}
func decodeDeleteViewRequest(ctx context.Context, r *http.Request) (*deleteViewRequest, error) {
params := httprouter.ParamsFromContext(ctx)
id := params.ByName("id")
if id == "" {
return nil, &influxdb.Error{
Code: influxdb.EInvalid,
Msg: "url missing id",
}
}
var i influxdb.ID
if err := i.DecodeFromString(id); err != nil {
return nil, err
}
return &deleteViewRequest{
ViewID: i,
}, nil
}
// handlePatchView updates a view.
func (h *ViewHandler) handlePatchView(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
req, pe := decodePatchViewRequest(ctx, r)
if pe != nil {
EncodeError(ctx, pe, w)
return
}
view, err := h.ViewService.UpdateView(ctx, req.ViewID, req.Upd)
if err != nil {
EncodeError(ctx, err, w)
return
}
if err := encodeResponse(ctx, w, http.StatusOK, newViewResponse(view)); err != nil {
logEncodingError(h.Logger, r, err)
return
}
}
type patchViewRequest struct {
ViewID influxdb.ID
Upd influxdb.ViewUpdate
}
func decodePatchViewRequest(ctx context.Context, r *http.Request) (*patchViewRequest, *influxdb.Error) {
req := &patchViewRequest{}
upd := influxdb.ViewUpdate{}
if err := json.NewDecoder(r.Body).Decode(&upd); err != nil {
return nil, &influxdb.Error{
Code: influxdb.EInvalid,
Msg: err.Error(),
}
}
req.Upd = upd
params := httprouter.ParamsFromContext(ctx)
id := params.ByName("id")
if id == "" {
return nil, &influxdb.Error{
Code: influxdb.EInvalid,
Msg: "url missing id",
}
}
var i influxdb.ID
if err := i.DecodeFromString(id); err != nil {
return nil, &influxdb.Error{
Code: influxdb.EInvalid,
Err: err,
}
}
req.ViewID = i
if err := req.Valid(); err != nil {
return nil, &influxdb.Error{
Err: err,
}
}
return req, nil
}
// Valid validates that the view ID is non zero valued and update has expected values set.
func (r *patchViewRequest) Valid() *influxdb.Error {
if !r.ViewID.Valid() {
return &influxdb.Error{
Code: influxdb.EInvalid,
Msg: "missing view ID",
}
}
return r.Upd.Valid()
}

View File

@ -1,736 +0,0 @@
package http
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"net/http/httptest"
"testing"
"github.com/google/go-cmp/cmp"
"github.com/influxdata/influxdb"
"github.com/influxdata/influxdb/mock"
influxdbtesting "github.com/influxdata/influxdb/testing"
"github.com/julienschmidt/httprouter"
"github.com/yudai/gojsondiff"
"github.com/yudai/gojsondiff/formatter"
"go.uber.org/zap"
)
// NewMockViewBackend returns a ViewBackend with mock services.
func NewMockViewBackend() *ViewBackend {
return &ViewBackend{
Logger: zap.NewNop().With(zap.String("handler", "view")),
ViewService: &mock.ViewService{},
UserService: mock.NewUserService(),
UserResourceMappingService: &mock.UserResourceMappingService{},
LabelService: mock.NewLabelService(),
}
}
func TestService_handleGetViews(t *testing.T) {
type fields struct {
ViewService influxdb.ViewService
}
type args struct {
queryParams map[string][]string
}
type wants struct {
statusCode int
contentType string
body string
}
tests := []struct {
name string
fields fields
args args
wants wants
}{
{
name: "get all views",
fields: fields{
&mock.ViewService{
FindViewsF: func(ctx context.Context, filter influxdb.ViewFilter) ([]*influxdb.View, int, error) {
return []*influxdb.View{
{
ViewContents: influxdb.ViewContents{
ID: influxdbtesting.MustIDBase16("7365637465747572"),
Name: "hello",
},
Properties: influxdb.XYViewProperties{
Type: "xy",
},
},
{
ViewContents: influxdb.ViewContents{
ID: influxdbtesting.MustIDBase16("6167697474697320"),
Name: "example",
},
},
}, 2, nil
},
},
},
args: args{},
wants: wants{
statusCode: http.StatusOK,
contentType: "application/json; charset=utf-8",
body: `
{
"links": {
"self": "/api/v2/views"
},
"views": [
{
"id": "7365637465747572",
"name": "hello",
"links": {
"labels": "/api/v2/views/7365637465747572/labels",
"self": "/api/v2/views/7365637465747572"
},
"properties": {
"shape": "chronograf-v2",
"queries": null,
"axes": null,
"type": "xy",
"colors": null,
"legend": {},
"geom": "",
"note": "",
"showNoteWhenEmpty": false
}
},
{
"id": "6167697474697320",
"name": "example",
"links": {
"labels": "/api/v2/views/6167697474697320/labels",
"self": "/api/v2/views/6167697474697320"
},
"properties": {
"shape": "empty"
}
}
]
}`,
},
},
{
name: "get all views when there are none",
fields: fields{
&mock.ViewService{
FindViewsF: func(ctx context.Context, filter influxdb.ViewFilter) ([]*influxdb.View, int, error) {
return []*influxdb.View{}, 0, nil
},
},
},
args: args{},
wants: wants{
statusCode: http.StatusOK,
contentType: "application/json; charset=utf-8",
body: `
{
"links": {
"self": "/api/v2/views"
},
"views": []
}`,
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
viewBackend := NewMockViewBackend()
viewBackend.ViewService = tt.fields.ViewService
h := NewViewHandler(viewBackend)
r := httptest.NewRequest("GET", "http://any.url", nil)
qp := r.URL.Query()
for k, vs := range tt.args.queryParams {
for _, v := range vs {
qp.Add(k, v)
}
}
r.URL.RawQuery = qp.Encode()
w := httptest.NewRecorder()
h.handleGetViews(w, r)
res := w.Result()
content := res.Header.Get("Content-Type")
body, _ := ioutil.ReadAll(res.Body)
if res.StatusCode != tt.wants.statusCode {
t.Errorf("%q. handleGetViews() = %v, want %v", tt.name, res.StatusCode, tt.wants.statusCode)
}
if tt.wants.contentType != "" && content != tt.wants.contentType {
t.Errorf("%q. handleGetViews() = %v, want %v", tt.name, content, tt.wants.contentType)
}
if eq, diff, _ := jsonEqual(string(body), tt.wants.body); tt.wants.body != "" && !eq {
t.Errorf("%q. handleGetViews() = ***%s***", tt.name, diff)
}
})
}
}
func TestService_handleGetView(t *testing.T) {
type fields struct {
ViewService influxdb.ViewService
}
type args struct {
id string
}
type wants struct {
statusCode int
contentType string
body string
}
tests := []struct {
name string
fields fields
args args
wants wants
}{
{
name: "get a view by id",
fields: fields{
&mock.ViewService{
FindViewByIDF: func(ctx context.Context, id influxdb.ID) (*influxdb.View, error) {
return &influxdb.View{
ViewContents: influxdb.ViewContents{
ID: influxdbtesting.MustIDBase16("020f755c3c082000"),
Name: "example",
},
}, nil
},
},
},
args: args{
id: "020f755c3c082000",
},
wants: wants{
statusCode: http.StatusOK,
contentType: "application/json; charset=utf-8",
body: `
{
"id": "020f755c3c082000",
"name": "example",
"links": {
"labels": "/api/v2/views/020f755c3c082000/labels",
"self": "/api/v2/views/020f755c3c082000"
},
"properties": {
"shape": "empty"
}
}
`,
},
},
{
name: "not found",
fields: fields{
&mock.ViewService{
FindViewByIDF: func(ctx context.Context, id influxdb.ID) (*influxdb.View, error) {
return nil, &influxdb.Error{
Code: influxdb.ENotFound,
Msg: influxdb.ErrViewNotFound,
}
},
},
},
args: args{
id: "020f755c3c082000",
},
wants: wants{
statusCode: http.StatusNotFound,
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
viewBackend := NewMockViewBackend()
viewBackend.ViewService = tt.fields.ViewService
h := NewViewHandler(viewBackend)
r := httptest.NewRequest("GET", "http://any.url", nil)
r = r.WithContext(context.WithValue(
context.TODO(),
httprouter.ParamsKey,
httprouter.Params{
{
Key: "id",
Value: tt.args.id,
},
}))
w := httptest.NewRecorder()
h.handleGetView(w, r)
res := w.Result()
content := res.Header.Get("Content-Type")
body, _ := ioutil.ReadAll(res.Body)
if res.StatusCode != tt.wants.statusCode {
t.Errorf("%q. handleGetView() = %v, want %v", tt.name, res.StatusCode, tt.wants.statusCode)
}
if tt.wants.contentType != "" && content != tt.wants.contentType {
t.Errorf("%q. handleGetView() = %v, want %v", tt.name, content, tt.wants.contentType)
}
if eq, diff, _ := jsonEqual(string(body), tt.wants.body); tt.wants.body != "" && !eq {
t.Errorf("%q. handleGetView() = ***%s***", tt.name, diff)
}
})
}
}
func TestService_handlePostViews(t *testing.T) {
type fields struct {
ViewService influxdb.ViewService
}
type args struct {
view *influxdb.View
}
type wants struct {
statusCode int
contentType string
body string
}
tests := []struct {
name string
fields fields
args args
wants wants
}{
{
name: "create a new view",
fields: fields{
&mock.ViewService{
CreateViewF: func(ctx context.Context, c *influxdb.View) error {
c.ID = influxdbtesting.MustIDBase16("020f755c3c082000")
return nil
},
},
},
args: args{
view: &influxdb.View{
ViewContents: influxdb.ViewContents{
ID: influxdbtesting.MustIDBase16("020f755c3c082000"),
Name: "hello",
},
Properties: influxdb.XYViewProperties{
Type: "xy",
},
},
},
wants: wants{
statusCode: http.StatusCreated,
contentType: "application/json; charset=utf-8",
body: `
{
"id": "020f755c3c082000",
"name": "hello",
"links": {
"labels": "/api/v2/views/020f755c3c082000/labels",
"self": "/api/v2/views/020f755c3c082000"
},
"properties": {
"shape": "chronograf-v2",
"queries": null,
"axes": null,
"type": "xy",
"colors": null,
"legend": {},
"geom": "",
"note": "",
"showNoteWhenEmpty": false
}
}
`,
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
viewBackend := NewMockViewBackend()
viewBackend.ViewService = tt.fields.ViewService
h := NewViewHandler(viewBackend)
b, err := json.Marshal(tt.args.view)
if err != nil {
t.Fatalf("failed to unmarshal view: %v", err)
}
r := httptest.NewRequest("GET", "http://any.url", bytes.NewReader(b))
w := httptest.NewRecorder()
h.handlePostViews(w, r)
res := w.Result()
content := res.Header.Get("Content-Type")
body, _ := ioutil.ReadAll(res.Body)
if res.StatusCode != tt.wants.statusCode {
t.Errorf("%q. handlePostViews() = %v, want %v", tt.name, res.StatusCode, tt.wants.statusCode)
}
if tt.wants.contentType != "" && content != tt.wants.contentType {
t.Errorf("%q. handlePostViews() = %v, want %v", tt.name, content, tt.wants.contentType)
}
if eq, diff, _ := jsonEqual(string(body), tt.wants.body); tt.wants.body != "" && !eq {
t.Errorf("%q. handlePostViews() = ***%s***", tt.name, diff)
}
})
}
}
func TestService_handleDeleteView(t *testing.T) {
type fields struct {
ViewService influxdb.ViewService
}
type args struct {
id string
}
type wants struct {
statusCode int
contentType string
body string
}
tests := []struct {
name string
fields fields
args args
wants wants
}{
{
name: "remove a view by id",
fields: fields{
&mock.ViewService{
DeleteViewF: func(ctx context.Context, id influxdb.ID) error {
if id == influxdbtesting.MustIDBase16("020f755c3c082000") {
return nil
}
return fmt.Errorf("wrong id")
},
},
},
args: args{
id: "020f755c3c082000",
},
wants: wants{
statusCode: http.StatusNoContent,
},
},
{
name: "view not found",
fields: fields{
&mock.ViewService{
DeleteViewF: func(ctx context.Context, id influxdb.ID) error {
return &influxdb.Error{
Code: influxdb.ENotFound,
Msg: influxdb.ErrViewNotFound,
}
},
},
},
args: args{
id: "020f755c3c082000",
},
wants: wants{
statusCode: http.StatusNotFound,
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
viewBackend := NewMockViewBackend()
viewBackend.ViewService = tt.fields.ViewService
h := NewViewHandler(viewBackend)
r := httptest.NewRequest("GET", "http://any.url", nil)
r = r.WithContext(context.WithValue(
context.TODO(),
httprouter.ParamsKey,
httprouter.Params{
{
Key: "id",
Value: tt.args.id,
},
}))
w := httptest.NewRecorder()
h.handleDeleteView(w, r)
res := w.Result()
content := res.Header.Get("Content-Type")
body, _ := ioutil.ReadAll(res.Body)
if res.StatusCode != tt.wants.statusCode {
t.Errorf("%q. handleDeleteView() = %v, want %v", tt.name, res.StatusCode, tt.wants.statusCode)
}
if tt.wants.contentType != "" && content != tt.wants.contentType {
t.Errorf("%q. handleDeleteView() = %v, want %v", tt.name, content, tt.wants.contentType)
}
if eq, diff, _ := jsonEqual(string(body), tt.wants.body); tt.wants.body != "" && !eq {
t.Errorf("%q. handleDeleteView() = ***%s***", tt.name, diff)
}
})
}
}
func TestService_handlePatchView(t *testing.T) {
type fields struct {
ViewService influxdb.ViewService
}
type args struct {
id string
name string
properties influxdb.ViewProperties
}
type wants struct {
statusCode int
contentType string
body string
}
tests := []struct {
name string
fields fields
args args
wants wants
}{
{
name: "update a view",
fields: fields{
&mock.ViewService{
UpdateViewF: func(ctx context.Context, id influxdb.ID, upd influxdb.ViewUpdate) (*influxdb.View, error) {
if id == influxdbtesting.MustIDBase16("020f755c3c082000") {
return &influxdb.View{
ViewContents: influxdb.ViewContents{
ID: influxdbtesting.MustIDBase16("020f755c3c082000"),
Name: "example",
},
Properties: influxdb.XYViewProperties{
Type: "xy",
},
}, nil
}
return nil, fmt.Errorf("not found")
},
},
},
args: args{
id: "020f755c3c082000",
name: "example",
},
wants: wants{
statusCode: http.StatusOK,
contentType: "application/json; charset=utf-8",
body: `
{
"id": "020f755c3c082000",
"name": "example",
"links": {
"labels": "/api/v2/views/020f755c3c082000/labels",
"self": "/api/v2/views/020f755c3c082000"
},
"properties": {
"shape": "chronograf-v2",
"queries": null,
"axes": null,
"type": "xy",
"colors": null,
"legend": {},
"geom": "",
"note": "",
"showNoteWhenEmpty": false
}
}
`,
},
},
{
name: "update a view with empty request body",
fields: fields{
&mock.ViewService{
UpdateViewF: func(ctx context.Context, id influxdb.ID, upd influxdb.ViewUpdate) (*influxdb.View, error) {
if id == influxdbtesting.MustIDBase16("020f755c3c082000") {
return &influxdb.View{
ViewContents: influxdb.ViewContents{
ID: influxdbtesting.MustIDBase16("020f755c3c082000"),
Name: "example",
},
Properties: influxdb.XYViewProperties{
Type: "xy",
},
}, nil
}
return nil, fmt.Errorf("not found")
},
},
},
args: args{
id: "020f755c3c082000",
},
wants: wants{
statusCode: http.StatusBadRequest,
},
},
{
name: "view not found",
fields: fields{
&mock.ViewService{
UpdateViewF: func(ctx context.Context, id influxdb.ID, upd influxdb.ViewUpdate) (*influxdb.View, error) {
return nil, &influxdb.Error{
Code: influxdb.ENotFound,
Msg: influxdb.ErrViewNotFound,
}
},
},
},
args: args{
id: "020f755c3c082000",
name: "hello",
},
wants: wants{
statusCode: http.StatusNotFound,
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
viewBackend := NewMockViewBackend()
viewBackend.ViewService = tt.fields.ViewService
h := NewViewHandler(viewBackend)
upd := influxdb.ViewUpdate{}
if tt.args.name != "" {
upd.Name = &tt.args.name
}
if tt.args.properties != nil {
upd.Properties = tt.args.properties
}
b, err := json.Marshal(upd)
if err != nil {
t.Fatalf("failed to unmarshal view update: %v", err)
}
r := httptest.NewRequest("GET", "http://any.url", bytes.NewReader(b))
r = r.WithContext(context.WithValue(
context.TODO(),
httprouter.ParamsKey,
httprouter.Params{
{
Key: "id",
Value: tt.args.id,
},
}))
w := httptest.NewRecorder()
h.handlePatchView(w, r)
res := w.Result()
content := res.Header.Get("Content-Type")
body, _ := ioutil.ReadAll(res.Body)
if res.StatusCode != tt.wants.statusCode {
t.Errorf("%q. handlePatchView() = %v, want %v", tt.name, res.StatusCode, tt.wants.statusCode)
}
if tt.wants.contentType != "" && content != tt.wants.contentType {
t.Errorf("%q. handlePatchView() = %v, want %v", tt.name, content, tt.wants.contentType)
}
if eq, diff, _ := jsonEqual(string(body), tt.wants.body); tt.wants.body != "" && !eq {
t.Errorf("%q. handlePatchView() = ***%s***", tt.name, diff)
}
})
}
}
func jsonEqual(s1, s2 string) (eq bool, diff string, err error) {
var o1, o2 interface{}
if s1 == s2 {
return true, "", nil
}
if s1 == "" {
return false, s2, fmt.Errorf("s1 is empty")
}
if s2 == "" {
return false, s1, fmt.Errorf("s2 is empty")
}
if err = json.Unmarshal([]byte(s1), &o1); err != nil {
return
}
if err = json.Unmarshal([]byte(s2), &o2); err != nil {
return
}
differ := gojsondiff.New()
d, err := differ.Compare([]byte(s1), []byte(s2))
if err != nil {
return
}
config := formatter.AsciiFormatterConfig{}
formatter := formatter.NewAsciiFormatter(o1, config)
diff, err = formatter.Format(d)
return cmp.Equal(o1, o2), diff, err
}
/* todo
func initViewService(f influxdbtesting.ViewFields, t *testing.T) (influxdb.ViewService, func()) {
t.Helper()
svc := inmem.NewService()
svc.IDGenerator = f.IDGenerator
ctx := context.Background()
for _, b := range f.Views {
if err := s.PutView(ctx, b); err != nil {
t.Fatalf("failed to populate Views")
}
}
handler := NewViewHandler()
handler.ViewService = svc
server := httptest.NewServer(handler)
client := ViewService{
Addr: server.URL,
}
done := server.Close
return &client, done
}
func TestViewService(t *testing.T) {
influxdbtesting.ViewService(initViewService, t)
}
*/

View File

@ -192,9 +192,9 @@ func (s *Service) DeleteAuthorization(ctx context.Context, id platform.ID) error
return nil
}
// SetAuthorizationStatus updates the status of an authorization associated with id.
func (s *Service) SetAuthorizationStatus(ctx context.Context, id platform.ID, status platform.Status) error {
op := OpPrefix + platform.OpSetAuthorizationStatus
// UpdateAuthorization updates the status and description if available.
func (s *Service) UpdateAuthorization(ctx context.Context, id platform.ID, upd *platform.AuthorizationUpdate) error {
op := OpPrefix + platform.OpUpdateAuthorization
a, err := s.FindAuthorizationByID(ctx, id)
if err != nil {
return &platform.Error{
@ -203,6 +203,8 @@ func (s *Service) SetAuthorizationStatus(ctx context.Context, id platform.ID, st
}
}
if upd.Status != nil {
status := *upd.Status
switch status {
case platform.Active, platform.Inactive:
default:
@ -212,11 +214,12 @@ func (s *Service) SetAuthorizationStatus(ctx context.Context, id platform.ID, st
Op: op,
}
}
if a.Status == status {
return nil
a.Status = status
}
if upd.Description != nil {
a.Description = *upd.Description
}
a.Status = status
return s.PutAuthorization(ctx, a)
}

View File

@ -303,7 +303,10 @@ func (s *Service) RemoveDashboardCell(ctx context.Context, dashboardID platform.
}
if err := s.DeleteView(ctx, d.Cells[idx].ID); err != nil {
return err
return &platform.Error{
Err: err,
Op: op,
}
}
d.Cells = append(d.Cells[:idx], d.Cells[idx+1:]...)
@ -442,3 +445,50 @@ func (s *Service) UpdateDashboardCellView(ctx context.Context, dashboardID, cell
return v, nil
}
func (s *Service) loadView(ctx context.Context, id platform.ID) (*platform.View, *platform.Error) {
i, ok := s.viewKV.Load(id.String())
if !ok {
return nil, &platform.Error{
Code: platform.ENotFound,
Msg: "view not found",
}
}
d, ok := i.(*platform.View)
if !ok {
return nil, &platform.Error{
Code: platform.EInvalid,
Msg: fmt.Sprintf("type %T is not a view", i),
}
}
return d, nil
}
// FindViewByID returns a single view by ID.
func (s *Service) FindViewByID(ctx context.Context, id platform.ID) (*platform.View, error) {
v, pe := s.loadView(ctx, id)
if pe != nil {
return nil, pe
}
return v, nil
}
// PutView sets view with the current ID.
func (s *Service) PutView(ctx context.Context, c *platform.View) error {
if c.Properties == nil {
c.Properties = platform.EmptyViewProperties{}
}
s.viewKV.Store(c.ID.String(), c)
return nil
}
// DeleteView removes a view by ID.
func (s *Service) DeleteView(ctx context.Context, id platform.ID) error {
if _, err := s.FindViewByID(ctx, id); err != nil {
return err
}
s.viewKV.Delete(id.String())
return nil
}

View File

@ -18,11 +18,6 @@ func initDashboardService(f platformtesting.DashboardFields, t *testing.T) (plat
t.Fatalf("failed to populate Dashboards")
}
}
for _, b := range f.Views {
if err := s.PutView(ctx, b); err != nil {
t.Fatalf("failed to populate views")
}
}
return s, OpPrefix, func() {}
}

View File

@ -125,7 +125,7 @@ func (s *Service) UpdateSource(ctx context.Context, id platform.ID, upd platform
if err != nil {
return nil, &platform.Error{
Err: err,
Op: OpPrefix + platform.OpUpdateView,
Op: OpPrefix + platform.OpUpdateSource,
}
}
@ -139,7 +139,7 @@ func (s *Service) DeleteSource(ctx context.Context, id platform.ID) error {
if _, err := s.FindSourceByID(ctx, id); err != nil {
return &platform.Error{
Err: err,
Op: OpPrefix + platform.OpDeleteView,
Op: OpPrefix + platform.OpDeleteSource,
}
}
s.sourceKV.Delete(id.String())

View File

@ -1,139 +0,0 @@
package inmem
import (
"context"
"fmt"
platform "github.com/influxdata/influxdb"
)
func (s *Service) loadView(ctx context.Context, id platform.ID) (*platform.View, *platform.Error) {
i, ok := s.viewKV.Load(id.String())
if !ok {
return nil, &platform.Error{
Code: platform.ENotFound,
Msg: "view not found",
}
}
d, ok := i.(*platform.View)
if !ok {
return nil, &platform.Error{
Code: platform.EInvalid,
Msg: fmt.Sprintf("type %T is not a view", i),
}
}
return d, nil
}
// FindViewByID returns a single view by ID.
func (s *Service) FindViewByID(ctx context.Context, id platform.ID) (*platform.View, error) {
v, pe := s.loadView(ctx, id)
if pe != nil {
return nil, &platform.Error{
Err: pe,
Op: OpPrefix + platform.OpFindViewByID,
}
}
return v, nil
}
func filterViewFn(filter platform.ViewFilter) func(d *platform.View) bool {
if filter.ID != nil {
return func(d *platform.View) bool {
return d.ID == *filter.ID
}
}
return func(d *platform.View) bool { return true }
}
// FindViews implements platform.ViewService interface.
func (s *Service) FindViews(ctx context.Context, filter platform.ViewFilter) ([]*platform.View, int, error) {
var ds []*platform.View
if filter.ID != nil {
d, err := s.FindViewByID(ctx, *filter.ID)
if err != nil && platform.ErrorCode(err) != platform.ENotFound {
return nil, 0, &platform.Error{
Err: err,
Op: OpPrefix + platform.OpFindViews,
}
}
if d != nil {
ds = append(ds, d)
}
return ds, len(ds), nil
}
var err error
filterF := filterViewFn(filter)
s.viewKV.Range(func(k, v interface{}) bool {
d, ok := v.(*platform.View)
if !ok {
return false
}
if filterF(d) {
ds = append(ds, d)
}
return true
})
return ds, len(ds), err
}
// CreateView implements platform.ViewService interface.
func (s *Service) CreateView(ctx context.Context, c *platform.View) error {
c.ID = s.IDGenerator.ID()
if err := s.PutView(ctx, c); err != nil {
return &platform.Error{
Err: err,
Op: OpPrefix + platform.OpCreateView,
}
}
return nil
}
// PutView implements platform.ViewService interface.
func (s *Service) PutView(ctx context.Context, c *platform.View) error {
if c.Properties == nil {
c.Properties = platform.EmptyViewProperties{}
}
s.viewKV.Store(c.ID.String(), c)
return nil
}
// UpdateView implements platform.ViewService interface.
func (s *Service) UpdateView(ctx context.Context, id platform.ID, upd platform.ViewUpdate) (*platform.View, error) {
c, err := s.FindViewByID(ctx, id)
if err != nil {
return nil, &platform.Error{
Err: err,
Op: OpPrefix + platform.OpUpdateView,
}
}
if upd.Name != nil {
c.Name = *upd.Name
}
if upd.Properties != nil {
c.Properties = upd.Properties
}
s.viewKV.Store(c.ID.String(), c)
return c, nil
}
// DeleteView implements platform.ViewService interface.
func (s *Service) DeleteView(ctx context.Context, id platform.ID) error {
if _, err := s.FindViewByID(ctx, id); err != nil {
return &platform.Error{
Err: err,
Op: OpPrefix + platform.OpDeleteView,
}
}
s.viewKV.Delete(id.String())
return nil
}

View File

@ -1,40 +0,0 @@
package inmem
import (
"context"
"testing"
platform "github.com/influxdata/influxdb"
platformtesting "github.com/influxdata/influxdb/testing"
)
func initViewService(f platformtesting.ViewFields, t *testing.T) (platform.ViewService, string, func()) {
s := NewService()
s.IDGenerator = f.IDGenerator
ctx := context.TODO()
for _, b := range f.Views {
if err := s.PutView(ctx, b); err != nil {
t.Fatalf("failed to populate Views")
}
}
return s, OpPrefix, func() {}
}
func TestViewService_CreateView(t *testing.T) {
platformtesting.CreateView(initViewService, t)
}
func TestViewService_FindViewByID(t *testing.T) {
platformtesting.FindViewByID(initViewService, t)
}
func TestViewService_FindViews(t *testing.T) {
platformtesting.FindViews(initViewService, t)
}
func TestViewService_DeleteView(t *testing.T) {
platformtesting.DeleteView(initViewService, t)
}
func TestViewService_UpdateView(t *testing.T) {
platformtesting.UpdateView(initViewService, t)
}

View File

@ -416,17 +416,32 @@ func (s *Service) deleteAuthorization(ctx context.Context, tx Tx, id influxdb.ID
// for setting an authorization to inactive or active.
func (s *Service) SetAuthorizationStatus(ctx context.Context, id influxdb.ID, status influxdb.Status) error {
return s.kv.Update(ctx, func(tx Tx) error {
return s.updateAuthorization(ctx, tx, id, status)
return s.updateAuthorization(ctx, tx, id, &influxdb.AuthorizationUpdate{
Status: &status,
})
})
}
func (s *Service) updateAuthorization(ctx context.Context, tx Tx, id influxdb.ID, status influxdb.Status) error {
// UpdateAuthorization updates the status and description if available.
func (s *Service) UpdateAuthorization(ctx context.Context, id influxdb.ID, upd *influxdb.AuthorizationUpdate) error {
return s.kv.Update(ctx, func(tx Tx) error {
return s.updateAuthorization(ctx, tx, id, upd)
})
}
func (s *Service) updateAuthorization(ctx context.Context, tx Tx, id influxdb.ID, upd *influxdb.AuthorizationUpdate) error {
a, err := s.findAuthorizationByID(ctx, tx, id)
if err != nil {
return err
}
a.Status = status
if upd.Status != nil {
a.Status = *upd.Status
}
if upd.Description != nil {
a.Description = *upd.Description
}
v, err := encodeAuthorization(a)
if err != nil {
return &influxdb.Error{

View File

@ -168,7 +168,13 @@ func (i *DocumentIndex) FindLabelByName(name string) (influxdb.ID, error) {
if err != nil {
return influxdb.InvalidID(), err
}
if len(ls) != 1 {
if len(ls) == 0 {
return influxdb.InvalidID(), &influxdb.Error{
Code: influxdb.ENotFound,
Msg: "label not found",
}
}
if len(ls) > 1 {
return influxdb.InvalidID(), &influxdb.Error{
Code: influxdb.EInternal,
Msg: "found multiple labels matching the name provided",

View File

@ -21,7 +21,7 @@ type AuthorizationService struct {
FindAuthorizationsFn func(context.Context, platform.AuthorizationFilter, ...platform.FindOptions) ([]*platform.Authorization, int, error)
CreateAuthorizationFn func(context.Context, *platform.Authorization) error
DeleteAuthorizationFn func(context.Context, platform.ID) error
SetAuthorizationStatusFn func(context.Context, platform.ID, platform.Status) error
UpdateAuthorizationFn func(context.Context, platform.ID, *platform.AuthorizationUpdate) error
}
// NewAuthorizationService returns a mock AuthorizationService where its methods will return
@ -35,7 +35,7 @@ func NewAuthorizationService() *AuthorizationService {
},
CreateAuthorizationFn: func(context.Context, *platform.Authorization) error { return nil },
DeleteAuthorizationFn: func(context.Context, platform.ID) error { return nil },
SetAuthorizationStatusFn: func(context.Context, platform.ID, platform.Status) error { return nil },
UpdateAuthorizationFn: func(context.Context, platform.ID, *platform.AuthorizationUpdate) error { return nil },
}
}
@ -63,6 +63,7 @@ func (s *AuthorizationService) DeleteAuthorization(ctx context.Context, id platf
return s.DeleteAuthorizationFn(ctx, id)
}
func (s *AuthorizationService) SetAuthorizationStatus(ctx context.Context, id platform.ID, status platform.Status) error {
return s.SetAuthorizationStatusFn(ctx, id, status)
// UpdateAuthorization updates the status and description if available.
func (s *AuthorizationService) UpdateAuthorization(ctx context.Context, id platform.ID, upd *platform.AuthorizationUpdate) error {
return s.UpdateAuthorizationFn(ctx, id, upd)
}

View File

@ -1,37 +0,0 @@
package mock
import (
"context"
platform "github.com/influxdata/influxdb"
)
var _ platform.ViewService = &ViewService{}
type ViewService struct {
CreateViewF func(context.Context, *platform.View) error
FindViewByIDF func(context.Context, platform.ID) (*platform.View, error)
FindViewsF func(context.Context, platform.ViewFilter) ([]*platform.View, int, error)
UpdateViewF func(context.Context, platform.ID, platform.ViewUpdate) (*platform.View, error)
DeleteViewF func(context.Context, platform.ID) error
}
func (s *ViewService) FindViewByID(ctx context.Context, id platform.ID) (*platform.View, error) {
return s.FindViewByIDF(ctx, id)
}
func (s *ViewService) FindViews(ctx context.Context, filter platform.ViewFilter) ([]*platform.View, int, error) {
return s.FindViewsF(ctx, filter)
}
func (s *ViewService) CreateView(ctx context.Context, b *platform.View) error {
return s.CreateViewF(ctx, b)
}
func (s *ViewService) UpdateView(ctx context.Context, id platform.ID, upd platform.ViewUpdate) (*platform.View, error) {
return s.UpdateViewF(ctx, id, upd)
}
func (s *ViewService) DeleteView(ctx context.Context, id platform.ID) error {
return s.DeleteViewF(ctx, id)
}

View File

@ -2,7 +2,9 @@
// Feel free to add more pointerification functions for more types as you need them.
package pointer
import "time"
import (
"time"
)
// Duration returns a pointer to its argument.
func Duration(d time.Duration) *time.Duration {

View File

@ -109,9 +109,8 @@ func (s *AuthorizationService) DeleteAuthorization(ctx context.Context, id platf
return s.AuthorizationService.DeleteAuthorization(ctx, id)
}
// SetAuthorizationStatus updates the status of the authorization. Useful
// for setting an authorization to inactive or active.
func (s *AuthorizationService) SetAuthorizationStatus(ctx context.Context, id platform.ID, status platform.Status) (err error) {
// UpdateAuthorization updates the status and description.
func (s *AuthorizationService) UpdateAuthorization(ctx context.Context, id platform.ID, upd *platform.AuthorizationUpdate) (err error) {
defer func(start time.Time) {
labels := prometheus.Labels{
"method": "setAuthorizationStatus",
@ -121,7 +120,7 @@ func (s *AuthorizationService) SetAuthorizationStatus(ctx context.Context, id pl
s.requestDuration.With(labels).Observe(time.Since(start).Seconds())
}(time.Now())
return s.AuthorizationService.SetAuthorizationStatus(ctx, id, status)
return s.AuthorizationService.UpdateAuthorization(ctx, id, upd)
}
// PrometheusCollectors returns all authorization service prometheus collectors.

View File

@ -38,7 +38,7 @@ func (a *authzSvc) DeleteAuthorization(context.Context, platform.ID) error {
return a.Err
}
func (a *authzSvc) SetAuthorizationStatus(context.Context, platform.ID, platform.Status) error {
func (a *authzSvc) UpdateAuthorization(context.Context, platform.ID, *platform.AuthorizationUpdate) error {
return a.Err
}

View File

@ -4,11 +4,6 @@ import (
"bufio"
"bytes"
"context"
"io"
"io/ioutil"
nethttp "net/http"
"os"
"path/filepath"
"strings"
"testing"
@ -20,9 +15,7 @@ import (
"github.com/influxdata/flux/stdlib"
platform "github.com/influxdata/influxdb"
"github.com/influxdata/influxdb/bolt"
"github.com/influxdata/influxdb/cmd/influxd/launcher"
"github.com/influxdata/influxdb/http"
"github.com/influxdata/influxdb/query"
_ "github.com/influxdata/flux/stdlib" // Import the built-in functions
@ -147,7 +140,7 @@ func BenchmarkFluxEndToEnd(b *testing.B) {
}
func runEndToEnd(t *testing.T, pkgs []*ast.Package) {
l := RunMainOrFail(t, ctx)
l := launcher.RunTestLauncherOrFail(t, ctx)
l.SetupOrFail(t)
defer l.ShutdownOrFail(t, ctx)
for _, pkg := range pkgs {
@ -163,7 +156,7 @@ func runEndToEnd(t *testing.T, pkgs []*ast.Package) {
}
func benchEndToEnd(b *testing.B, pkgs []*ast.Package) {
l := RunMainOrFail(b, ctx)
l := launcher.RunTestLauncherOrFail(b, ctx)
l.SetupOrFail(b)
defer l.ShutdownOrFail(b, ctx)
for _, pkg := range pkgs {
@ -203,7 +196,7 @@ func init() {
optionsAST = pkg.Files[0]
}
func testFlux(t testing.TB, l *Launcher, pkg *ast.Package) {
func testFlux(t testing.TB, l *launcher.TestLauncher, pkg *ast.Package) {
// Query server to ensure write persists.
@ -245,7 +238,7 @@ func testFlux(t testing.TB, l *Launcher, pkg *ast.Package) {
OrganizationID: l.Org.ID,
Compiler: lang.ASTCompiler{AST: pkg},
}
if r, err := l.FluxService().Query(ctx, req); err != nil {
if r, err := l.FluxQueryService().Query(ctx, req); err != nil {
t.Fatal(err)
} else {
for r.More() {
@ -264,7 +257,7 @@ func testFlux(t testing.TB, l *Launcher, pkg *ast.Package) {
// this time we use a call to `run` so that the assertion error is triggered
runCalls := stdlib.TestingRunCalls(pkg)
pkg.Files[len(pkg.Files)-1] = runCalls
r, err := l.FluxService().Query(ctx, req)
r, err := l.FluxQueryService().Query(ctx, req)
if err != nil {
t.Fatal(err)
}
@ -281,7 +274,7 @@ func testFlux(t testing.TB, l *Launcher, pkg *ast.Package) {
t.Error(err)
// Replace the testing.run calls with testing.inspect calls.
pkg.Files[len(pkg.Files)-1] = inspectCalls
r, err := l.FluxService().Query(ctx, req)
r, err := l.FluxQueryService().Query(ctx, req)
if err != nil {
t.Fatal(err)
}
@ -306,118 +299,3 @@ func testFlux(t testing.TB, l *Launcher, pkg *ast.Package) {
}
}
}
// Launcher is a test wrapper for main.Launcher.
type Launcher struct {
*launcher.Launcher
// Root temporary directory for all data.
Path string
// Initialized after calling the Setup() helper.
User *platform.User
Org *platform.Organization
Bucket *platform.Bucket
Auth *platform.Authorization
// Standard in/out/err buffers.
Stdin bytes.Buffer
Stdout bytes.Buffer
Stderr bytes.Buffer
}
// NewLauncher returns a new instance of Launcher.
func NewLauncher() *Launcher {
l := &Launcher{Launcher: launcher.NewLauncher()}
l.Launcher.Stdin = &l.Stdin
l.Launcher.Stdout = &l.Stdout
l.Launcher.Stderr = &l.Stderr
if testing.Verbose() {
l.Launcher.Stdout = io.MultiWriter(l.Launcher.Stdout, os.Stdout)
l.Launcher.Stderr = io.MultiWriter(l.Launcher.Stderr, os.Stderr)
}
path, err := ioutil.TempDir("", "")
if err != nil {
panic(err)
}
l.Path = path
return l
}
// RunMainOrFail initializes and starts the server.
func RunMainOrFail(tb testing.TB, ctx context.Context, args ...string) *Launcher {
tb.Helper()
l := NewLauncher()
if err := l.Run(ctx, args...); err != nil {
tb.Fatal(err)
}
return l
}
// Run executes the program with additional arguments to set paths and ports.
func (l *Launcher) Run(ctx context.Context, args ...string) error {
args = append(args, "--bolt-path", filepath.Join(l.Path, "influxd.bolt"))
args = append(args, "--protos-path", filepath.Join(l.Path, "protos"))
args = append(args, "--engine-path", filepath.Join(l.Path, "engine"))
args = append(args, "--http-bind-address", "127.0.0.1:0")
args = append(args, "--log-level", "debug")
return l.Launcher.Run(ctx, args...)
}
// Shutdown stops the program and cleans up temporary paths.
func (l *Launcher) Shutdown(ctx context.Context) error {
l.Cancel()
l.Launcher.Shutdown(ctx)
return os.RemoveAll(l.Path)
}
// ShutdownOrFail stops the program and cleans up temporary paths. Fail on error.
func (l *Launcher) ShutdownOrFail(tb testing.TB, ctx context.Context) {
tb.Helper()
if err := l.Shutdown(ctx); err != nil {
tb.Fatal(err)
}
}
// SetupOrFail creates a new user, bucket, org, and auth token. Fail on error.
func (l *Launcher) SetupOrFail(tb testing.TB) {
svc := &http.SetupService{Addr: l.URL()}
results, err := svc.Generate(ctx, &platform.OnboardingRequest{
User: "USER",
Password: "PASSWORD",
Org: "ORG",
Bucket: "BUCKET",
})
if err != nil {
tb.Fatal(err)
}
l.User = results.User
l.Org = results.Org
l.Bucket = results.Bucket
l.Auth = results.Auth
}
func (l *Launcher) FluxService() *http.FluxQueryService {
return &http.FluxQueryService{Addr: l.URL(), Token: l.Auth.Token}
}
func (l *Launcher) BucketService() *http.BucketService {
return &http.BucketService{
Addr: l.URL(),
Token: l.Auth.Token,
OpPrefix: bolt.OpPrefix,
}
}
// MustNewHTTPRequest returns a new nethttp.Request with base URL and auth attached. Fail on error.
func (l *Launcher) MustNewHTTPRequest(method, rawurl, body string) *nethttp.Request {
req, err := nethttp.NewRequest(method, l.URL()+rawurl, strings.NewReader(body))
if err != nil {
panic(err)
}
req.Header.Set("Authorization", "Token "+l.Auth.Token)
return req
}

View File

@ -24,3 +24,8 @@ func (s Status) Valid() error {
}
}
}
// Ptr returns the pointer of that status.
func (s Status) Ptr() *Status {
return &s
}

59
task.go
View File

@ -6,9 +6,7 @@ import (
"errors"
"fmt"
"strconv"
"time"
"github.com/influxdata/flux"
"github.com/influxdata/flux/ast"
"github.com/influxdata/flux/ast/edit"
"github.com/influxdata/flux/parser"
@ -40,6 +38,21 @@ type Task struct {
UpdatedAt string `json:"updatedAt,omitempty"`
}
// EffectiveCron returns the effective cron string of the options.
// If the cron option was specified, it is returned.
// If the every option was specified, it is converted into a cron string using "@every".
// Otherwise, the empty string is returned.
// The value of the offset option is not considered.
func (t *Task) EffectiveCron() string {
if t.Cron != "" {
return t.Cron
}
if t.Every != "" {
return "@every " + t.Every
}
return ""
}
// Run is a record created when a run of a task is scheduled.
type Run struct {
ID ID `json:"id,omitempty"`
@ -145,11 +158,11 @@ func (t *TaskUpdate) UnmarshalJSON(data []byte) error {
// Every represents a fixed period to repeat execution.
// It gets marshalled from a string duration, i.e.: "10s" is 10 seconds
Every flux.Duration `json:"every,omitempty"`
Every options.Duration `json:"every,omitempty"`
// Offset represents a delay before execution.
// It gets marshalled from a string duration, i.e.: "10s" is 10 seconds
Offset *flux.Duration `json:"offset,omitempty"`
Offset *options.Duration `json:"offset,omitempty"`
Concurrency *int64 `json:"concurrency,omitempty"`
@ -163,9 +176,9 @@ func (t *TaskUpdate) UnmarshalJSON(data []byte) error {
}
t.Options.Name = jo.Name
t.Options.Cron = jo.Cron
t.Options.Every = time.Duration(jo.Every)
t.Options.Every = jo.Every
if jo.Offset != nil {
offset := time.Duration(*jo.Offset)
offset := *jo.Offset
t.Options.Offset = &offset
}
t.Options.Concurrency = jo.Concurrency
@ -187,10 +200,10 @@ func (t TaskUpdate) MarshalJSON() ([]byte, error) {
Cron string `json:"cron,omitempty"`
// Every represents a fixed period to repeat execution.
Every flux.Duration `json:"every,omitempty"`
Every options.Duration `json:"every,omitempty"`
// Offset represents a delay before execution.
Offset *flux.Duration `json:"offset,omitempty"`
Offset *options.Duration `json:"offset,omitempty"`
Concurrency *int64 `json:"concurrency,omitempty"`
@ -200,9 +213,9 @@ func (t TaskUpdate) MarshalJSON() ([]byte, error) {
}{}
jo.Name = t.Options.Name
jo.Cron = t.Options.Cron
jo.Every = flux.Duration(t.Options.Every)
jo.Every = t.Options.Every
if t.Options.Offset != nil {
offset := flux.Duration(*t.Options.Offset)
offset := *t.Options.Offset
jo.Offset = &offset
}
jo.Concurrency = t.Options.Concurrency
@ -215,7 +228,7 @@ func (t TaskUpdate) MarshalJSON() ([]byte, error) {
func (t TaskUpdate) Validate() error {
switch {
case t.Options.Every != 0 && t.Options.Cron != "":
case !t.Options.Every.IsZero() && t.Options.Cron != "":
return errors.New("cannot specify both every and cron")
case t.Flux == nil && t.Status == nil && t.Options.IsZero() && t.Token == "":
return errors.New("cannot update task without content")
@ -237,25 +250,23 @@ func (t *TaskUpdate) UpdateFlux(oldFlux string) error {
return ast.GetError(parsedPKG)
}
parsed := parsedPKG.Files[0]
if t.Options.Every != 0 && t.Options.Cron != "" {
return errors.New("cannot specify both every and cron")
if !t.Options.Every.IsZero() && t.Options.Cron != "" {
return errors.New("cannot specify both cron and every")
}
op := make(map[string]ast.Expression, 4)
if t.Options.Name != "" {
op["name"] = &ast.StringLiteral{Value: t.Options.Name}
}
if t.Options.Every != 0 {
d := ast.Duration{Magnitude: int64(t.Options.Every), Unit: "ns"}
op["every"] = &ast.DurationLiteral{Values: []ast.Duration{d}}
if !t.Options.Every.IsZero() {
op["every"] = &t.Options.Every.Node
}
if t.Options.Cron != "" {
op["cron"] = &ast.StringLiteral{Value: t.Options.Cron}
}
if t.Options.Offset != nil {
if *t.Options.Offset != 0 {
d := ast.Duration{Magnitude: int64(*t.Options.Offset), Unit: "ns"}
op["offset"] = &ast.DurationLiteral{Values: []ast.Duration{d}}
if !t.Options.Offset.IsZero() {
op["offset"] = &t.Options.Offset.Node
} else {
toDelete["offset"] = struct{}{}
}
@ -285,12 +296,12 @@ func (t *TaskUpdate) UpdateFlux(oldFlux string) error {
case "offset":
if offset, ok := op["offset"]; ok && t.Options.Offset != nil {
delete(op, "offset")
p.Value = offset
p.Value = offset.Copy().(*ast.DurationLiteral)
}
case "every":
if every, ok := op["every"]; ok && t.Options.Every != 0 {
if every, ok := op["every"]; ok && !t.Options.Every.IsZero() {
p.Value = every.Copy().(*ast.DurationLiteral)
delete(op, "every")
p.Value = every
} else if cron, ok := op["cron"]; ok && t.Options.Cron != "" {
delete(op, "cron")
p.Value = cron
@ -300,10 +311,10 @@ func (t *TaskUpdate) UpdateFlux(oldFlux string) error {
if cron, ok := op["cron"]; ok && t.Options.Cron != "" {
delete(op, "cron")
p.Value = cron
} else if every, ok := op["every"]; ok && t.Options.Every != 0 {
} else if every, ok := op["every"]; ok && !t.Options.Every.IsZero() {
delete(op, "every")
p.Key = &ast.Identifier{Name: "every"}
p.Value = every
p.Value = every.Copy().(*ast.DurationLiteral)
}
}
}

View File

@ -58,8 +58,13 @@ func (c *Coordinator) claimExistingTasks() {
continue
}
t := task // Copy to avoid mistaken closure around task value.
if err := c.sch.ClaimTask(&t.Task, &t.Meta); err != nil {
t, err := backend.ToInfluxTask(&task.Task, &task.Meta)
if err != nil {
continue
}
// I may need a context with an auth here
if err := c.sch.ClaimTask(context.Background(), t); err != nil {
c.logger.Error("failed claim task", zap.Error(err))
continue
}
@ -84,8 +89,11 @@ func (c *Coordinator) CreateTask(ctx context.Context, req backend.CreateTaskRequ
if err != nil {
return id, err
}
if err := c.sch.ClaimTask(task, meta); err != nil {
t, err := backend.ToInfluxTask(task, meta)
if err != nil {
return id, err
}
if err := c.sch.ClaimTask(ctx, t); err != nil {
_, delErr := c.Store.DeleteTask(ctx, id)
if delErr != nil {
return id, fmt.Errorf("schedule task failed: %s\n\tcleanup also failed: %s", err, delErr)
@ -114,13 +122,18 @@ func (c *Coordinator) UpdateTask(ctx context.Context, req backend.UpdateTaskRequ
}
}
if err := c.sch.UpdateTask(task, meta); err != nil && err != backend.ErrTaskNotClaimed {
t, err := backend.ToInfluxTask(task, meta)
if err != nil {
return res, err
}
if err := c.sch.UpdateTask(ctx, t); err != nil && err != backend.ErrTaskNotClaimed {
return res, err
}
// If enabling the task, claim it after modifying the script.
if req.Status == backend.TaskActive {
if err := c.sch.ClaimTask(task, meta); err != nil && err != backend.ErrTaskAlreadyClaimed {
if err := c.sch.ClaimTask(ctx, t); err != nil && err != backend.ErrTaskAlreadyClaimed {
return res, err
}
}
@ -162,9 +175,15 @@ func (c *Coordinator) ManuallyRunTimeRange(ctx context.Context, taskID platform.
if err != nil {
return r, err
}
t, m, err := c.Store.FindTaskByIDWithMeta(ctx, taskID)
task, meta, err := c.Store.FindTaskByIDWithMeta(ctx, taskID)
if err != nil {
return nil, err
}
return r, c.sch.UpdateTask(t, m)
t, err := backend.ToInfluxTask(task, meta)
if err != nil {
return nil, err
}
return r, c.sch.UpdateTask(ctx, t)
}

View File

@ -15,11 +15,11 @@ import (
"go.uber.org/zap/zaptest"
)
func timeoutSelector(ch <-chan *mock.Task) (*mock.Task, error) {
func timeoutSelector(ch <-chan *platform.Task) (*platform.Task, error) {
select {
case task := <-ch:
return task, nil
case <-time.After(time.Second):
case <-time.After(10 * time.Second):
return nil, errors.New("timeout on select")
}
}
@ -47,7 +47,7 @@ func TestCoordinator(t *testing.T) {
t.Fatal(err)
}
if task.Script != script {
if task.Flux != script {
t.Fatal("task sent to scheduler doesnt match task created")
}
@ -65,7 +65,7 @@ func TestCoordinator(t *testing.T) {
t.Fatal(err)
}
if task.Script != script {
if task.Flux != script {
t.Fatal("task sent to scheduler doesnt match task created")
}
@ -102,7 +102,7 @@ func TestCoordinator(t *testing.T) {
t.Fatal(err)
}
if task.Script != script {
if task.Flux != script {
t.Fatal("task sent to scheduler doesnt match task created")
}
@ -115,7 +115,7 @@ func TestCoordinator(t *testing.T) {
t.Fatal(err)
}
if task.Script != script {
if task.Flux != script {
t.Fatal("task sent to scheduler doesnt match task created")
}
@ -129,7 +129,7 @@ func TestCoordinator(t *testing.T) {
t.Fatal(err)
}
if task.Script != newScript {
if task.Flux != newScript {
t.Fatal("task sent to scheduler doesnt match task created")
}
}

View File

@ -14,6 +14,7 @@ import (
// This file contains helper methods for the StoreTaskMeta type defined in protobuf.
// NewStoreTaskMeta returns a new StoreTaskMeta based on the given request and parsed options.
// Do not call this without validating the request and options first.
func NewStoreTaskMeta(req CreateTaskRequest, o options.Options) StoreTaskMeta {
stm := StoreTaskMeta{
Status: string(req.Status),
@ -26,7 +27,8 @@ func NewStoreTaskMeta(req CreateTaskRequest, o options.Options) StoreTaskMeta {
stm.MaxConcurrency = int32(*o.Concurrency)
}
if o.Offset != nil {
stm.Offset = int32(*o.Offset / time.Second)
offset, _ := o.Offset.DurationFrom(time.Unix(req.ScheduleAfter, 0)) // we can do this because it is validated already.
stm.Offset = offset.String()
}
if stm.Status == "" {
@ -43,20 +45,29 @@ func (stm *StoreTaskMeta) AlignLatestCompleted() {
if strings.HasPrefix(stm.EffectiveCron, "@every ") {
everyString := strings.TrimPrefix(stm.EffectiveCron, "@every ")
every, err := time.ParseDuration(everyString)
every := options.Duration{}
err := every.Parse(everyString)
if err != nil {
// We cannot align a invalid time
return
}
t := time.Unix(stm.LatestCompleted, 0).Truncate(every).Unix()
if t == stm.LatestCompleted {
t := time.Unix(stm.LatestCompleted, 0)
everyDur, err := every.DurationFrom(t)
if err != nil {
return
}
t = t.Truncate(everyDur)
if t.Unix() == stm.LatestCompleted {
// For example, every 1m truncates to exactly on the minute.
// But the input request is schedule after, not "on or after".
// Add one interval.
t += int64(every / time.Second)
tafter, err := every.Add(t)
if err != nil {
return
}
stm.LatestCompleted = t
t = tafter
}
stm.LatestCompleted = t.Truncate(time.Second).Unix()
}
}
@ -123,15 +134,23 @@ func (stm *StoreTaskMeta) CreateNextRun(now int64, makeID func() (platform.ID, e
latest = cr.Now
}
}
nowTime := time.Unix(now, 0)
nextScheduled := sch.Next(time.Unix(latest, 0))
nextScheduledUnix := nextScheduled.Unix()
if dueAt := nextScheduledUnix + int64(stm.Offset); dueAt > now {
offset := &options.Duration{}
if err := offset.Parse(stm.Offset); err != nil {
return RunCreation{}, err
}
dueAt, err := offset.Add(nextScheduled)
if err != nil {
return RunCreation{}, err
}
if dueAt.After(nowTime) {
// Can't schedule yet.
if len(stm.ManualRuns) > 0 {
return stm.createNextRunFromQueue(now, dueAt, sch, makeID)
return stm.createNextRunFromQueue(now, dueAt.Unix(), sch, makeID)
}
return RunCreation{}, RunNotYetDueError{DueAt: dueAt}
return RunCreation{}, RunNotYetDueError{DueAt: dueAt.Unix()}
}
id, err := makeID()
@ -145,12 +164,16 @@ func (stm *StoreTaskMeta) CreateNextRun(now int64, makeID func() (platform.ID, e
RunID: uint64(id),
})
nextDue, err := offset.Add(sch.Next(nextScheduled))
if err != nil {
return RunCreation{}, err
}
return RunCreation{
Created: QueuedRun{
RunID: id,
Now: nextScheduledUnix,
},
NextDue: sch.Next(nextScheduled).Unix() + int64(stm.Offset),
NextDue: nextDue.Unix(),
HasQueue: len(stm.ManualRuns) > 0,
}, nil
}
@ -229,8 +252,15 @@ func (stm *StoreTaskMeta) NextDueRun() (int64, error) {
latest = cr.Now
}
}
return sch.Next(time.Unix(latest, 0)).Unix() + int64(stm.Offset), nil
offset := &options.Duration{}
if err := offset.Parse(stm.Offset); err != nil {
return 0, err
}
nextDue, err := offset.Add(sch.Next(time.Unix(latest, 0)))
if err != nil {
return 0, err
}
return nextDue.Unix(), nil
}
// ManuallyRunTimeRange requests a manual run covering the approximate range specified by the Unix timestamps start and end.

View File

@ -35,7 +35,7 @@ type StoreTaskMeta struct {
// effective_cron is the effective cron string as reported by the task's options.
EffectiveCron string `protobuf:"bytes,5,opt,name=effective_cron,json=effectiveCron,proto3" json:"effective_cron,omitempty"`
// Task's configured delay, in seconds.
Offset int32 `protobuf:"varint,6,opt,name=offset,proto3" json:"offset,omitempty"`
Offset string `protobuf:"bytes,6,opt,name=offset,proto3" json:"offset,omitempty"`
CreatedAt int64 `protobuf:"varint,7,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"`
UpdatedAt int64 `protobuf:"varint,8,opt,name=updated_at,json=updatedAt,proto3" json:"updated_at,omitempty"`
// The Authorization ID associated with the task.
@ -47,7 +47,7 @@ func (m *StoreTaskMeta) Reset() { *m = StoreTaskMeta{} }
func (m *StoreTaskMeta) String() string { return proto.CompactTextString(m) }
func (*StoreTaskMeta) ProtoMessage() {}
func (*StoreTaskMeta) Descriptor() ([]byte, []int) {
return fileDescriptor_meta_841ef32afee093f0, []int{0}
return fileDescriptor_meta_b8385560be3db2c8, []int{0}
}
func (m *StoreTaskMeta) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@ -111,11 +111,11 @@ func (m *StoreTaskMeta) GetEffectiveCron() string {
return ""
}
func (m *StoreTaskMeta) GetOffset() int32 {
func (m *StoreTaskMeta) GetOffset() string {
if m != nil {
return m.Offset
}
return 0
return ""
}
func (m *StoreTaskMeta) GetCreatedAt() int64 {
@ -164,7 +164,7 @@ func (m *StoreTaskMetaRun) Reset() { *m = StoreTaskMetaRun{} }
func (m *StoreTaskMetaRun) String() string { return proto.CompactTextString(m) }
func (*StoreTaskMetaRun) ProtoMessage() {}
func (*StoreTaskMetaRun) Descriptor() ([]byte, []int) {
return fileDescriptor_meta_841ef32afee093f0, []int{1}
return fileDescriptor_meta_b8385560be3db2c8, []int{1}
}
func (m *StoreTaskMetaRun) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@ -254,7 +254,7 @@ func (m *StoreTaskMetaManualRun) Reset() { *m = StoreTaskMetaManualRun{}
func (m *StoreTaskMetaManualRun) String() string { return proto.CompactTextString(m) }
func (*StoreTaskMetaManualRun) ProtoMessage() {}
func (*StoreTaskMetaManualRun) Descriptor() ([]byte, []int) {
return fileDescriptor_meta_841ef32afee093f0, []int{2}
return fileDescriptor_meta_b8385560be3db2c8, []int{2}
}
func (m *StoreTaskMetaManualRun) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@ -372,10 +372,11 @@ func (m *StoreTaskMeta) MarshalTo(dAtA []byte) (int, error) {
i = encodeVarintMeta(dAtA, i, uint64(len(m.EffectiveCron)))
i += copy(dAtA[i:], m.EffectiveCron)
}
if m.Offset != 0 {
dAtA[i] = 0x30
if len(m.Offset) > 0 {
dAtA[i] = 0x32
i++
i = encodeVarintMeta(dAtA, i, uint64(m.Offset))
i = encodeVarintMeta(dAtA, i, uint64(len(m.Offset)))
i += copy(dAtA[i:], m.Offset)
}
if m.CreatedAt != 0 {
dAtA[i] = 0x38
@ -535,8 +536,9 @@ func (m *StoreTaskMeta) Size() (n int) {
if l > 0 {
n += 1 + l + sovMeta(uint64(l))
}
if m.Offset != 0 {
n += 1 + sovMeta(uint64(m.Offset))
l = len(m.Offset)
if l > 0 {
n += 1 + l + sovMeta(uint64(l))
}
if m.CreatedAt != 0 {
n += 1 + sovMeta(uint64(m.CreatedAt))
@ -777,10 +779,10 @@ func (m *StoreTaskMeta) Unmarshal(dAtA []byte) error {
m.EffectiveCron = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 6:
if wireType != 0 {
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Offset", wireType)
}
m.Offset = 0
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMeta
@ -790,11 +792,21 @@ func (m *StoreTaskMeta) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
m.Offset |= (int32(b) & 0x7F) << shift
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthMeta
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Offset = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 7:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field CreatedAt", wireType)
@ -1318,42 +1330,42 @@ var (
ErrIntOverflowMeta = fmt.Errorf("proto: integer overflow")
)
func init() { proto.RegisterFile("meta.proto", fileDescriptor_meta_841ef32afee093f0) }
func init() { proto.RegisterFile("meta.proto", fileDescriptor_meta_b8385560be3db2c8) }
var fileDescriptor_meta_841ef32afee093f0 = []byte{
// 543 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x93, 0x41, 0x6f, 0xd3, 0x30,
0x14, 0xc7, 0x1b, 0xd2, 0x74, 0xab, 0x4b, 0xd7, 0x60, 0xa6, 0x29, 0x02, 0x91, 0x66, 0x15, 0x88,
0x72, 0x09, 0x12, 0x48, 0x9c, 0x10, 0x52, 0x57, 0x38, 0xec, 0xb0, 0x8b, 0xc7, 0x09, 0x09, 0x45,
0x5e, 0xe2, 0x94, 0xa8, 0x89, 0x5d, 0x9c, 0x67, 0x68, 0xf9, 0x14, 0x7c, 0x14, 0xae, 0x7c, 0x03,
0x8e, 0x3b, 0x72, 0x9a, 0x50, 0xfb, 0x35, 0x38, 0x20, 0x3b, 0x69, 0xd9, 0x46, 0x0f, 0x68, 0xb7,
0xe7, 0xdf, 0x8b, 0x9f, 0xdf, 0xff, 0xff, 0x5e, 0x10, 0x2a, 0x18, 0xd0, 0x70, 0x26, 0x05, 0x08,
0xfc, 0x30, 0x16, 0x45, 0x98, 0xf1, 0x34, 0x57, 0xf3, 0x84, 0x6a, 0x9a, 0x53, 0x48, 0x85, 0x2c,
0x42, 0xa0, 0xe5, 0x34, 0x3c, 0xa3, 0xf1, 0x94, 0xf1, 0xe4, 0xde, 0xfe, 0x44, 0x4c, 0x84, 0xb9,
0xf0, 0x54, 0x47, 0xd5, 0xdd, 0xc1, 0x6f, 0x1b, 0x75, 0x4f, 0x41, 0x48, 0xf6, 0x96, 0x96, 0xd3,
0x13, 0x06, 0x14, 0x3f, 0x46, 0xbd, 0x82, 0xce, 0xa3, 0x58, 0xf0, 0x58, 0x49, 0xc9, 0x78, 0xbc,
0xf0, 0xac, 0xc0, 0x1a, 0x3a, 0x64, 0xaf, 0xa0, 0xf3, 0xf1, 0x5f, 0x8a, 0x9f, 0x20, 0x37, 0xa7,
0xc0, 0x4a, 0x88, 0x62, 0x51, 0xcc, 0x72, 0x06, 0x2c, 0xf1, 0x6e, 0x05, 0xd6, 0xd0, 0x26, 0xbd,
0x8a, 0x8f, 0xd7, 0x18, 0x1f, 0xa0, 0x56, 0x09, 0x14, 0x54, 0xe9, 0xd9, 0x81, 0x35, 0x6c, 0x93,
0xfa, 0x84, 0x63, 0x74, 0xa7, 0x2a, 0x07, 0xf9, 0x22, 0x92, 0x8a, 0xf3, 0x8c, 0x4f, 0xbc, 0x66,
0x60, 0x0f, 0x3b, 0xcf, 0x5e, 0x84, 0xff, 0xa3, 0x2a, 0xbc, 0xd2, 0x3b, 0x51, 0x9c, 0xb8, 0x9b,
0x82, 0xa4, 0xaa, 0x87, 0x1f, 0xa1, 0x3d, 0x96, 0xa6, 0x2c, 0x86, 0xec, 0x13, 0x8b, 0x62, 0x29,
0xb8, 0xe7, 0x98, 0x26, 0xba, 0x1b, 0x3a, 0x96, 0x82, 0xeb, 0x1e, 0x45, 0x9a, 0x96, 0x0c, 0xbc,
0x96, 0x91, 0x5b, 0x9f, 0xf0, 0x03, 0x84, 0x62, 0xc9, 0x28, 0xb0, 0x24, 0xa2, 0xe0, 0xed, 0x18,
0x81, 0xed, 0x9a, 0x8c, 0x4c, 0x5a, 0xcd, 0x92, 0x75, 0x7a, 0xb7, 0x4a, 0xd7, 0x64, 0x04, 0xf8,
0x15, 0x72, 0xa9, 0x82, 0x0f, 0x42, 0x66, 0x5f, 0x28, 0x64, 0x82, 0x47, 0x59, 0xe2, 0xb5, 0x03,
0x6b, 0xd8, 0x3c, 0xba, 0xbb, 0xbc, 0xe8, 0xf7, 0x46, 0x97, 0x73, 0xc7, 0xaf, 0x49, 0xef, 0xca,
0xc7, 0xc7, 0x09, 0x7e, 0x8f, 0x3a, 0x05, 0xe5, 0x8a, 0xe6, 0xda, 0x9e, 0xd2, 0x73, 0x8d, 0x37,
0x2f, 0x6f, 0xe0, 0xcd, 0x89, 0xa9, 0xa2, 0x1d, 0x42, 0xc5, 0x3a, 0x2c, 0x07, 0xdf, 0x2d, 0xe4,
0x5e, 0xb7, 0x10, 0xbb, 0xc8, 0xe6, 0xe2, 0xb3, 0x99, 0xba, 0x4d, 0x74, 0xa8, 0x09, 0xc8, 0x85,
0x99, 0x6e, 0x97, 0xe8, 0x10, 0x07, 0xa8, 0x25, 0x95, 0x51, 0x63, 0x1b, 0x35, 0xed, 0xe5, 0x45,
0xdf, 0x21, 0x4a, 0x6b, 0x70, 0xa4, 0xd2, 0x9d, 0xf7, 0x51, 0x47, 0x52, 0x3e, 0x61, 0x51, 0x09,
0x54, 0x82, 0xd7, 0x34, 0xd5, 0x90, 0x41, 0xa7, 0x9a, 0xe0, 0xfb, 0xa8, 0x5d, 0x7d, 0xc0, 0x78,
0x62, 0x46, 0x62, 0x93, 0x5d, 0x03, 0xde, 0xf0, 0x04, 0x1f, 0xa2, 0xdb, 0x92, 0x7d, 0x54, 0xac,
0xac, 0x8d, 0x6d, 0x99, 0x7c, 0x67, 0xc3, 0x46, 0x30, 0xf8, 0x66, 0xa1, 0x83, 0xed, 0x12, 0xf1,
0x3e, 0x72, 0xaa, 0x57, 0x2b, 0x0d, 0xd5, 0x41, 0xab, 0xd0, 0x4f, 0x55, 0x3b, 0xaa, 0xc3, 0xad,
0x2b, 0x6c, 0x6f, 0x5f, 0xe1, 0xeb, 0x0d, 0x35, 0xff, 0x69, 0xe8, 0x92, 0x27, 0xce, 0x76, 0x4f,
0x8e, 0x0e, 0x7f, 0x2c, 0x7d, 0xeb, 0x7c, 0xe9, 0x5b, 0xbf, 0x96, 0xbe, 0xf5, 0x75, 0xe5, 0x37,
0xce, 0x57, 0x7e, 0xe3, 0xe7, 0xca, 0x6f, 0xbc, 0xdb, 0xa9, 0x87, 0x76, 0xd6, 0x32, 0xff, 0xe5,
0xf3, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x74, 0xb0, 0xab, 0x79, 0xe1, 0x03, 0x00, 0x00,
var fileDescriptor_meta_b8385560be3db2c8 = []byte{
// 544 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x53, 0xc1, 0x6e, 0xd3, 0x4c,
0x10, 0xae, 0x7f, 0xc7, 0x69, 0xb3, 0xf9, 0xd3, 0x98, 0xa5, 0xaa, 0x2c, 0x10, 0x8e, 0x1b, 0x81,
0x08, 0x17, 0x23, 0x81, 0xc4, 0x09, 0x21, 0xa5, 0x81, 0x43, 0x0f, 0xbd, 0x6c, 0x39, 0x21, 0x21,
0x6b, 0x6b, 0xaf, 0x83, 0x15, 0x7b, 0x37, 0xac, 0x67, 0x21, 0xe1, 0x29, 0x78, 0x14, 0xae, 0xbc,
0x01, 0xc7, 0x1e, 0x39, 0x55, 0x28, 0x79, 0x0d, 0x0e, 0x68, 0x77, 0x93, 0xd0, 0x96, 0x1c, 0x10,
0xb7, 0x99, 0x6f, 0x76, 0x67, 0xbf, 0xef, 0x9b, 0x59, 0x84, 0x2a, 0x06, 0x34, 0x9e, 0x4a, 0x01,
0x02, 0xdf, 0x4f, 0x45, 0x15, 0x17, 0x3c, 0x2f, 0xd5, 0x2c, 0xa3, 0x1a, 0x2d, 0x29, 0xe4, 0x42,
0x56, 0x31, 0xd0, 0x7a, 0x12, 0x9f, 0xd3, 0x74, 0xc2, 0x78, 0x76, 0xe7, 0x60, 0x2c, 0xc6, 0xc2,
0x5c, 0x78, 0xac, 0x23, 0x7b, 0xb7, 0xff, 0xd3, 0x45, 0x9d, 0x33, 0x10, 0x92, 0xbd, 0xa6, 0xf5,
0xe4, 0x94, 0x01, 0xc5, 0x0f, 0x51, 0xb7, 0xa2, 0xb3, 0x24, 0x15, 0x3c, 0x55, 0x52, 0x32, 0x9e,
0xce, 0x03, 0x27, 0x72, 0x06, 0x1e, 0xd9, 0xaf, 0xe8, 0x6c, 0xf4, 0x1b, 0xc5, 0x8f, 0x90, 0x5f,
0x52, 0x60, 0x35, 0x24, 0xa9, 0xa8, 0xa6, 0x25, 0x03, 0x96, 0x05, 0xff, 0x45, 0xce, 0xc0, 0x25,
0x5d, 0x8b, 0x8f, 0xd6, 0x30, 0x3e, 0x44, 0xcd, 0x1a, 0x28, 0xa8, 0x3a, 0x70, 0x23, 0x67, 0xd0,
0x22, 0xab, 0x0c, 0xa7, 0xe8, 0x96, 0x6d, 0x07, 0xe5, 0x3c, 0x91, 0x8a, 0xf3, 0x82, 0x8f, 0x83,
0x46, 0xe4, 0x0e, 0xda, 0x4f, 0x9e, 0xc5, 0x7f, 0xa3, 0x2a, 0xbe, 0xc6, 0x9d, 0x28, 0x4e, 0xfc,
0x4d, 0x43, 0x62, 0xfb, 0xe1, 0x07, 0x68, 0x9f, 0xe5, 0x39, 0x4b, 0xa1, 0xf8, 0xc0, 0x92, 0x54,
0x0a, 0x1e, 0x78, 0x86, 0x44, 0x67, 0x83, 0x8e, 0xa4, 0xe0, 0x9a, 0xa3, 0xc8, 0xf3, 0x9a, 0x41,
0xd0, 0xb4, 0x1c, 0x6d, 0x86, 0xef, 0x21, 0x94, 0x4a, 0x46, 0x81, 0x65, 0x09, 0x85, 0x60, 0xd7,
0x08, 0x6c, 0xad, 0x90, 0xa1, 0x29, 0xab, 0x69, 0xb6, 0x2e, 0xef, 0xd9, 0xf2, 0x0a, 0x19, 0x02,
0x7e, 0x81, 0x7c, 0xaa, 0xe0, 0x9d, 0x90, 0xc5, 0x27, 0x0a, 0x85, 0xe0, 0x49, 0x91, 0x05, 0xad,
0xc8, 0x19, 0x34, 0x8e, 0x6f, 0x2f, 0x2e, 0x7b, 0xdd, 0xe1, 0xd5, 0xda, 0xc9, 0x4b, 0xd2, 0xbd,
0x76, 0xf8, 0x24, 0xc3, 0x6f, 0x51, 0xbb, 0xa2, 0x5c, 0xd1, 0x52, 0xdb, 0x53, 0x07, 0xbe, 0xf1,
0xe6, 0xf9, 0x3f, 0x78, 0x73, 0x6a, 0xba, 0x68, 0x87, 0x50, 0xb5, 0x0e, 0xeb, 0xfe, 0x57, 0x07,
0xf9, 0x37, 0x2d, 0xc4, 0x3e, 0x72, 0xb9, 0xf8, 0x68, 0xa6, 0xee, 0x12, 0x1d, 0x6a, 0x04, 0xe4,
0xdc, 0x4c, 0xb7, 0x43, 0x74, 0x88, 0x23, 0xd4, 0x94, 0xca, 0xa8, 0x71, 0x8d, 0x9a, 0xd6, 0xe2,
0xb2, 0xe7, 0x11, 0xa5, 0x35, 0x78, 0x52, 0x69, 0xe6, 0x3d, 0xd4, 0x96, 0x94, 0x8f, 0x59, 0x52,
0x03, 0x95, 0x10, 0x34, 0x4c, 0x37, 0x64, 0xa0, 0x33, 0x8d, 0xe0, 0xbb, 0xa8, 0x65, 0x0f, 0x30,
0x9e, 0x99, 0x91, 0xb8, 0x64, 0xcf, 0x00, 0xaf, 0x78, 0x86, 0x8f, 0xd0, 0xff, 0x92, 0xbd, 0x57,
0xac, 0x5e, 0x19, 0xdb, 0x34, 0xf5, 0xf6, 0x06, 0x1b, 0x42, 0xff, 0x8b, 0x83, 0x0e, 0xb7, 0x4b,
0xc4, 0x07, 0xc8, 0xb3, 0xaf, 0x5a, 0x0d, 0x36, 0xd1, 0x2a, 0xf4, 0x53, 0x76, 0x47, 0x75, 0xb8,
0x75, 0x85, 0xdd, 0xed, 0x2b, 0x7c, 0x93, 0x50, 0xe3, 0x0f, 0x42, 0x57, 0x3c, 0xf1, 0xb6, 0x7b,
0x72, 0x7c, 0xf4, 0x6d, 0x11, 0x3a, 0x17, 0x8b, 0xd0, 0xf9, 0xb1, 0x08, 0x9d, 0xcf, 0xcb, 0x70,
0xe7, 0x62, 0x19, 0xee, 0x7c, 0x5f, 0x86, 0x3b, 0x6f, 0x76, 0x57, 0x43, 0x3b, 0x6f, 0x9a, 0x7f,
0xf9, 0xf4, 0x57, 0x00, 0x00, 0x00, 0xff, 0xff, 0xca, 0x64, 0xf6, 0x93, 0xe1, 0x03, 0x00, 0x00,
}

View File

@ -25,7 +25,7 @@ message StoreTaskMeta {
string effective_cron = 5;
// Task's configured delay, in seconds.
int32 offset = 6;
string offset = 6;
int64 created_at = 7;
int64 updated_at = 8;

View File

@ -190,7 +190,7 @@ func TestMeta_CreateNextRun_Delay(t *testing.T) {
MaxConcurrency: 2,
Status: "enabled",
EffectiveCron: "* * * * *", // Every minute.
Offset: 5,
Offset: "5s",
LatestCompleted: 30, // Arbitrary non-overlap starting point.
}
@ -219,7 +219,7 @@ func TestMeta_ManuallyRunTimeRange(t *testing.T) {
MaxConcurrency: 2,
Status: "enabled",
EffectiveCron: "* * * * *", // Every minute.
Offset: 5,
Offset: "5s",
LatestCompleted: 30, // Arbitrary non-overlap starting point.
}

View File

@ -11,12 +11,12 @@ import (
"time"
"github.com/influxdata/flux"
platform "github.com/influxdata/influxdb"
"github.com/influxdata/influxdb/kit/tracing"
"github.com/influxdata/influxdb/task/options"
opentracing "github.com/opentracing/opentracing-go"
"github.com/prometheus/client_golang/prometheus"
"go.uber.org/zap"
platform "github.com/influxdata/influxdb"
"github.com/influxdata/influxdb/kit/tracing"
)
var (
@ -30,21 +30,6 @@ var (
ErrTaskAlreadyClaimed = errors.New("task already claimed")
)
// DesiredState persists the desired state of a run.
type DesiredState interface {
// CreateNextRun requests the next run from the desired state, delegating to (*StoreTaskMeta).CreateNextRun.
// This allows the scheduler to be "dumb" and just tell DesiredState what time the scheduler thinks it is,
// and the DesiredState will create the appropriate run according to the task's cron schedule,
// and according to what's in progress and what's been finished.
//
// If a Run is requested and the cron schedule says the schedule isn't ready, a RunNotYetDueError is returned.
CreateNextRun(ctx context.Context, taskID platform.ID, now int64) (RunCreation, error)
// FinishRun indicates that the given run is no longer intended to be executed.
// This may be called after a successful or failed execution, or upon cancellation.
FinishRun(ctx context.Context, taskID, runID platform.ID) error
}
// Executor handles execution of a run.
type Executor interface {
// Execute attempts to begin execution of a run.
@ -114,10 +99,10 @@ type Scheduler interface {
Stop()
// ClaimTask begins control of task execution in this scheduler.
ClaimTask(task *StoreTask, meta *StoreTaskMeta) error
ClaimTask(authCtx context.Context, task *platform.Task) error
// UpdateTask will update the concurrency and the runners for a task
UpdateTask(task *StoreTask, meta *StoreTaskMeta) error
UpdateTask(authCtx context.Context, task *platform.Task) error
// ReleaseTask immediately cancels any in-progress runs for the given task ID,
// and releases any resources related to management of that task.
@ -166,11 +151,10 @@ func WithLogger(logger *zap.Logger) TickSchedulerOption {
}
// NewScheduler returns a new scheduler with the given desired state and the given now UTC timestamp.
func NewScheduler(desiredState DesiredState, executor Executor, lw LogWriter, now int64, opts ...TickSchedulerOption) *TickScheduler {
func NewScheduler(taskControlService TaskControlService, executor Executor, now int64, opts ...TickSchedulerOption) *TickScheduler {
o := &TickScheduler{
desiredState: desiredState,
taskControlService: taskControlService,
executor: executor,
logWriter: lw,
now: now,
taskSchedulers: make(map[platform.ID]*taskScheduler),
logger: zap.NewNop(),
@ -186,9 +170,8 @@ func NewScheduler(desiredState DesiredState, executor Executor, lw LogWriter, no
}
type TickScheduler struct {
desiredState DesiredState
taskControlService TaskControlService
executor Executor
logWriter LogWriter
now int64
logger *zap.Logger
@ -286,7 +269,7 @@ func (s *TickScheduler) Stop() {
s.executor.Wait()
}
func (s *TickScheduler) ClaimTask(task *StoreTask, meta *StoreTaskMeta) (err error) {
func (s *TickScheduler) ClaimTask(authCtx context.Context, task *platform.Task) (err error) {
s.schedulerMu.Lock()
defer s.schedulerMu.Unlock()
if s.ctx == nil {
@ -302,7 +285,7 @@ func (s *TickScheduler) ClaimTask(task *StoreTask, meta *StoreTaskMeta) (err err
defer s.metrics.ClaimTask(err == nil)
ts, err := newTaskScheduler(s.ctx, s.wg, s, task, meta, s.metrics)
ts, err := newTaskScheduler(s.ctx, authCtx, s.wg, s, task, s.metrics)
if err != nil {
return err
}
@ -314,8 +297,13 @@ func (s *TickScheduler) ClaimTask(task *StoreTask, meta *StoreTaskMeta) (err err
s.taskSchedulers[task.ID] = ts
if len(meta.CurrentlyRunning) > 0 {
if err := ts.WorkCurrentlyRunning(meta); err != nil {
// pickup any runs that are still "running from a previous failure"
runs, err := s.taskControlService.CurrentlyRunning(authCtx, task.ID)
if err != nil {
return err
}
if len(runs) > 0 {
if err := ts.WorkCurrentlyRunning(runs); err != nil {
return err
}
}
@ -327,7 +315,12 @@ func (s *TickScheduler) ClaimTask(task *StoreTask, meta *StoreTaskMeta) (err err
return nil
}
func (s *TickScheduler) UpdateTask(task *StoreTask, meta *StoreTaskMeta) error {
func (s *TickScheduler) UpdateTask(authCtx context.Context, task *platform.Task) error {
opt, err := options.FromScript(task.Flux)
if err != nil {
return err
}
s.schedulerMu.Lock()
defer s.schedulerMu.Unlock()
@ -337,20 +330,29 @@ func (s *TickScheduler) UpdateTask(task *StoreTask, meta *StoreTaskMeta) error {
}
ts.task = task
next, err := meta.NextDueRun()
next, err := s.taskControlService.NextDueRun(authCtx, task.ID)
if err != nil {
return err
}
hasQueue := len(meta.ManualRuns) > 0
runs, err := s.taskControlService.ManualRuns(authCtx, task.ID)
if err != nil {
return err
}
hasQueue := len(runs) > 0
// update the queued information
ts.nextDueMu.Lock()
ts.hasQueue = hasQueue
ts.nextDue = next
ts.authCtx = authCtx
ts.nextDueMu.Unlock()
// check the concurrency
// todo(lh): In the near future we may not be using the scheduler to manage concurrency.
maxC := int(meta.MaxConcurrency)
maxC := len(ts.runners)
if opt.Concurrency != nil {
maxC = int(*opt.Concurrency)
}
if maxC != len(ts.runners) {
ts.runningMu.Lock()
if maxC < len(ts.runners) {
@ -360,7 +362,7 @@ func (s *TickScheduler) UpdateTask(task *StoreTask, meta *StoreTaskMeta) error {
if maxC > len(ts.runners) {
delta := maxC - len(ts.runners)
for i := 0; i < delta; i++ {
ts.runners = append(ts.runners, newRunner(s.ctx, ts.wg, s.logger, task, s.desiredState, s.executor, s.logWriter, ts))
ts.runners = append(ts.runners, newRunner(s.ctx, ts.wg, s.logger, task, s.taskControlService, s.executor, ts))
}
}
ts.runningMu.Unlock()
@ -404,7 +406,10 @@ type taskScheduler struct {
now *int64
// Task we are scheduling for.
task *StoreTask
task *platform.Task
// Authorization context for using the TaskControlService
authCtx context.Context
// CancelFunc for context passed to runners, to enable Cancel method.
cancel context.CancelFunc
@ -427,13 +432,26 @@ type taskScheduler struct {
func newTaskScheduler(
ctx context.Context,
authCtx context.Context,
wg *sync.WaitGroup,
s *TickScheduler,
task *StoreTask,
meta *StoreTaskMeta,
task *platform.Task,
metrics *schedulerMetrics,
) (*taskScheduler, error) {
firstDue, err := meta.NextDueRun()
firstDue, err := s.taskControlService.NextDueRun(authCtx, task.ID)
if err != nil {
return nil, err
}
opt, err := options.FromScript(task.Flux)
if err != nil {
return nil, err
}
maxC := 1
if opt.Concurrency != nil {
maxC = int(*opt.Concurrency)
}
runs, err := s.taskControlService.ManualRuns(authCtx, task.ID)
if err != nil {
return nil, err
}
@ -442,20 +460,21 @@ func newTaskScheduler(
ts := &taskScheduler{
now: &s.now,
task: task,
authCtx: authCtx,
cancel: cancel,
wg: wg,
runners: make([]*runner, meta.MaxConcurrency),
running: make(map[platform.ID]runCtx, meta.MaxConcurrency),
runners: make([]*runner, maxC),
running: make(map[platform.ID]runCtx, maxC),
logger: s.logger.With(zap.String("task_id", task.ID.String())),
metrics: s.metrics,
nextDue: firstDue,
nextDueSource: math.MinInt64,
hasQueue: len(meta.ManualRuns) > 0,
hasQueue: len(runs) > 0,
}
for i := range ts.runners {
logger := ts.logger.With(zap.Int("run_slot", i))
ts.runners[i] = newRunner(ctx, wg, logger, task, s.desiredState, s.executor, s.logWriter, ts)
ts.runners[i] = newRunner(ctx, wg, logger, task, s.taskControlService, s.executor, ts)
}
return ts, nil
@ -473,11 +492,15 @@ func (ts *taskScheduler) Work() {
}
}
func (ts *taskScheduler) WorkCurrentlyRunning(meta *StoreTaskMeta) error {
for _, cr := range meta.CurrentlyRunning {
func (ts *taskScheduler) WorkCurrentlyRunning(runs []*platform.Run) error {
for _, cr := range runs {
foundWorker := false
for _, r := range ts.runners {
qr := QueuedRun{TaskID: ts.task.ID, RunID: platform.ID(cr.RunID), Now: cr.Now}
time, err := time.Parse(time.RFC3339, cr.ScheduledFor)
if err != nil {
return err
}
qr := QueuedRun{TaskID: ts.task.ID, RunID: platform.ID(cr.ID), Now: time.Unix()}
if r.RestartRun(qr) {
foundWorker = true
break
@ -523,11 +546,10 @@ type runner struct {
ctx context.Context
wg *sync.WaitGroup
task *StoreTask
task *platform.Task
desiredState DesiredState
taskControlService TaskControlService
executor Executor
logWriter LogWriter
// Parent taskScheduler.
ts *taskScheduler
@ -539,10 +561,9 @@ func newRunner(
ctx context.Context,
wg *sync.WaitGroup,
logger *zap.Logger,
task *StoreTask,
desiredState DesiredState,
task *platform.Task,
taskControlService TaskControlService,
executor Executor,
logWriter LogWriter,
ts *taskScheduler,
) *runner {
return &runner{
@ -550,9 +571,8 @@ func newRunner(
wg: wg,
state: new(uint32),
task: task,
desiredState: desiredState,
taskControlService: taskControlService,
executor: executor,
logWriter: logWriter,
ts: ts,
logger: logger,
}
@ -624,7 +644,7 @@ func (r *runner) startFromWorking(now int64) {
defer span.Finish()
ctx, cancel := context.WithCancel(ctx)
rc, err := r.desiredState.CreateNextRun(ctx, r.task.ID, now)
rc, err := r.taskControlService.CreateNextRun(ctx, r.task.ID, now)
if err != nil {
r.logger.Info("Failed to create run", zap.Error(err))
atomic.StoreUint32(r.state, runnerIdle)
@ -658,13 +678,7 @@ func (r *runner) clearRunning(id platform.ID) {
// fail sets r's state to failed, and marks this runner as idle.
func (r *runner) fail(qr QueuedRun, runLogger *zap.Logger, stage string, reason error) {
rlb := RunLogBase{
Task: r.task,
RunID: qr.RunID,
RunScheduledFor: qr.Now,
RequestedAt: qr.RequestedAt,
}
if err := r.logWriter.AddRunLog(r.ctx, rlb, time.Now(), stage+": "+reason.Error()); err != nil {
if err := r.taskControlService.AddRunLog(r.ts.authCtx, r.task.ID, qr.RunID, time.Now(), stage+": "+reason.Error()); err != nil {
runLogger.Info("Failed to update run log", zap.Error(err))
}
@ -674,6 +688,16 @@ func (r *runner) fail(qr QueuedRun, runLogger *zap.Logger, stage string, reason
func (r *runner) executeAndWait(ctx context.Context, qr QueuedRun, runLogger *zap.Logger) {
defer r.wg.Done()
errMsg := "Failed to finish run"
defer func() {
if _, err := r.taskControlService.FinishRun(r.ctx, qr.TaskID, qr.RunID); err != nil {
// TODO(mr): Need to figure out how to reconcile this error, on the next run, if it happens.
runLogger.Error(errMsg, zap.Error(err))
atomic.StoreUint32(r.state, runnerIdle)
}
}()
sp, spCtx := tracing.StartSpanFromContext(ctx)
defer sp.Finish()
@ -681,11 +705,7 @@ func (r *runner) executeAndWait(ctx context.Context, qr QueuedRun, runLogger *za
rp, err := r.executor.Execute(spCtx, qr)
if err != nil {
runLogger.Info("Failed to begin run execution", zap.Error(err))
if err := r.desiredState.FinishRun(r.ctx, qr.TaskID, qr.RunID); err != nil {
// TODO(mr): Need to figure out how to reconcile this error, on the next run, if it happens.
runLogger.Error("Beginning run execution failed, and desired state update failed", zap.Error(err))
}
errMsg = "Beginning run execution failed, " + errMsg
// TODO(mr): retry?
r.fail(qr, runLogger, "Run failed to begin execution", err)
return
@ -713,19 +733,14 @@ func (r *runner) executeAndWait(ctx context.Context, qr QueuedRun, runLogger *za
close(ready)
if err != nil {
if err == ErrRunCanceled {
_ = r.desiredState.FinishRun(r.ctx, qr.TaskID, qr.RunID)
r.updateRunState(qr, RunCanceled, runLogger)
errMsg = "Waiting for execution result failed, " + errMsg
// Move on to the next execution, for a canceled run.
r.startFromWorking(atomic.LoadInt64(r.ts.now))
return
}
runLogger.Info("Failed to wait for execution result", zap.Error(err))
if err := r.desiredState.FinishRun(r.ctx, qr.TaskID, qr.RunID); err != nil {
// TODO(mr): Need to figure out how to reconcile this error, on the next run, if it happens.
runLogger.Error("Waiting for execution result failed, and desired state update failed", zap.Error(err))
}
// TODO(mr): retry?
r.fail(qr, runLogger, "Waiting for execution result", err)
@ -733,34 +748,22 @@ func (r *runner) executeAndWait(ctx context.Context, qr QueuedRun, runLogger *za
}
if err := rr.Err(); err != nil {
runLogger.Info("Run failed to execute", zap.Error(err))
if err := r.desiredState.FinishRun(r.ctx, qr.TaskID, qr.RunID); err != nil {
// TODO(mr): Need to figure out how to reconcile this error, on the next run, if it happens.
runLogger.Error("Run failed to execute, and desired state update failed", zap.Error(err))
}
errMsg = "Run failed to execute, " + errMsg
// TODO(mr): retry?
r.fail(qr, runLogger, "Run failed to execute", err)
return
}
if err := r.desiredState.FinishRun(r.ctx, qr.TaskID, qr.RunID); err != nil {
runLogger.Info("Failed to finish run", zap.Error(err))
// TODO(mr): retry?
// Need to think about what it means if there was an error finishing a run.
atomic.StoreUint32(r.state, runnerIdle)
r.updateRunState(qr, RunFail, runLogger)
return
}
rlb := RunLogBase{
Task: r.task,
RunID: qr.RunID,
RunScheduledFor: qr.Now,
RequestedAt: qr.RequestedAt,
}
stats := rr.Statistics()
b, err := json.Marshal(stats)
if err == nil {
r.logWriter.AddRunLog(r.ctx, rlb, time.Now(), string(b))
// authctx can be updated mid process
r.ts.nextDueMu.RLock()
authCtx := r.ts.authCtx
r.ts.nextDueMu.RUnlock()
r.taskControlService.AddRunLog(authCtx, r.task.ID, qr.RunID, time.Now(), string(b))
}
r.updateRunState(qr, RunSuccess, runLogger)
runLogger.Info("Execution succeeded")
@ -770,26 +773,19 @@ func (r *runner) executeAndWait(ctx context.Context, qr QueuedRun, runLogger *za
}
func (r *runner) updateRunState(qr QueuedRun, s RunStatus, runLogger *zap.Logger) {
rlb := RunLogBase{
Task: r.task,
RunID: qr.RunID,
RunScheduledFor: qr.Now,
RequestedAt: qr.RequestedAt,
}
switch s {
case RunStarted:
r.ts.metrics.StartRun(r.task.ID.String())
r.logWriter.AddRunLog(r.ctx, rlb, time.Now(), fmt.Sprintf("Started task from script: %q", r.task.Script))
r.taskControlService.AddRunLog(r.ts.authCtx, r.task.ID, qr.RunID, time.Now(), fmt.Sprintf("Started task from script: %q", r.task.Flux))
case RunSuccess:
r.ts.metrics.FinishRun(r.task.ID.String(), true)
r.logWriter.AddRunLog(r.ctx, rlb, time.Now(), "Completed successfully")
r.taskControlService.AddRunLog(r.ts.authCtx, r.task.ID, qr.RunID, time.Now(), "Completed successfully")
case RunFail:
r.ts.metrics.FinishRun(r.task.ID.String(), false)
r.logWriter.AddRunLog(r.ctx, rlb, time.Now(), "Failed")
r.taskControlService.AddRunLog(r.ts.authCtx, r.task.ID, qr.RunID, time.Now(), "Failed")
case RunCanceled:
r.ts.metrics.FinishRun(r.task.ID.String(), false)
r.logWriter.AddRunLog(r.ctx, rlb, time.Now(), "Canceled")
r.taskControlService.AddRunLog(r.ts.authCtx, r.task.ID, qr.RunID, time.Now(), "Canceled")
default: // We are deliberately not handling RunQueued yet.
// There is not really a notion of being queued in this runner architecture.
runLogger.Warn("Unhandled run state", zap.Stringer("state", s))
@ -799,7 +795,7 @@ func (r *runner) updateRunState(qr QueuedRun, s RunStatus, runLogger *zap.Logger
// If we start seeing errors from this, we know the time limit is too short or the system is overloaded.
ctx, cancel := context.WithTimeout(r.ctx, 10*time.Millisecond)
defer cancel()
if err := r.logWriter.UpdateRunState(ctx, rlb, time.Now(), s); err != nil {
if err := r.taskControlService.UpdateRunState(ctx, r.task.ID, qr.RunID, time.Now(), s); err != nil {
runLogger.Info("Error updating run state", zap.Stringer("state", s), zap.Error(err))
}
}

View File

@ -7,6 +7,7 @@ import (
"fmt"
"reflect"
"strings"
"sync"
"testing"
"time"
@ -23,55 +24,46 @@ import (
func TestScheduler_Cancelation(t *testing.T) {
t.Parallel()
d := mock.NewDesiredState()
tcs := mock.NewTaskControlService()
e := mock.NewExecutor()
e.WithHanging(100 * time.Millisecond)
rl := backend.NewInMemRunReaderWriter()
o := backend.NewScheduler(d, e, rl, 5, backend.WithLogger(zaptest.NewLogger(t)))
o := backend.NewScheduler(tcs, e, 5, backend.WithLogger(zaptest.NewLogger(t)))
o.Start(context.Background())
defer o.Stop()
const orgID = 2
task := &backend.StoreTask{
task := &platform.Task{
ID: platform.ID(1),
Org: orgID,
OrganizationID: orgID,
Every: "1s",
LatestCompleted: "1970-01-01T00:00:04Z",
Flux: `option task = {name:"x", every:1m} from(bucket:"a") |> to(bucket:"b", org: "o")`,
}
meta := &backend.StoreTaskMeta{
MaxConcurrency: 1,
EffectiveCron: "@every 1s",
LatestCompleted: 4,
}
d.SetTaskMeta(task.ID, *meta)
if err := o.ClaimTask(task, meta); err != nil {
tcs.SetTask(task)
if err := o.ClaimTask(context.Background(), task); err != nil {
t.Fatal(err)
}
runs, err := rl.ListRuns(context.Background(), orgID, platform.RunFilter{Task: task.ID})
runs, err := tcs.CurrentlyRunning(context.Background(), task.ID)
if err != nil {
t.Fatal(err)
}
if err = o.CancelRun(context.Background(), task.ID, runs[0].ID); err != nil {
run := runs[0]
if err = o.CancelRun(context.Background(), task.ID, run.ID); err != nil {
t.Fatal(err)
}
time.Sleep(10 * time.Millisecond) // we have to do this because the storage system we are using for the logs is eventually consistent.
runs, err = rl.ListRuns(context.Background(), orgID, platform.RunFilter{Task: task.ID})
time.Sleep(20 * time.Millisecond) // we have to do this because the storage system we are using for the logs is eventually consistent.
runs, err = tcs.CurrentlyRunning(context.Background(), task.ID)
if err != nil {
t.Fatal(err)
}
if runs[0].Status != "canceled" {
t.Fatalf("Run not logged as canceled, but is %s", runs[0].Status)
}
// check to make sure it is really canceling, and that the status doesn't get changed to something else after it would have finished
time.Sleep(500 * time.Millisecond)
runs, err = rl.ListRuns(context.Background(), orgID, platform.RunFilter{Task: task.ID})
if err != nil {
t.Fatal(err)
}
if runs[0].Status != "canceled" {
t.Fatalf("Run not actually canceled, but is %s", runs[0].Status)
if len(runs) != 0 {
t.Fatal("canceled run still running")
}
// check for when we cancel something already canceled
if err = o.CancelRun(context.Background(), task.ID, runs[0].ID); err != backend.ErrRunNotFound {
time.Sleep(500 * time.Millisecond)
if err = o.CancelRun(context.Background(), task.ID, run.ID); err != backend.ErrRunNotFound {
t.Fatalf("expected ErrRunNotFound but got %s", err)
}
}
@ -79,55 +71,47 @@ func TestScheduler_Cancelation(t *testing.T) {
func TestScheduler_StartScriptOnClaim(t *testing.T) {
t.Parallel()
d := mock.NewDesiredState()
tcs := mock.NewTaskControlService()
e := mock.NewExecutor()
o := backend.NewScheduler(d, e, backend.NopLogWriter{}, 5, backend.WithLogger(zaptest.NewLogger(t)))
o := backend.NewScheduler(tcs, e, 5, backend.WithLogger(zaptest.NewLogger(t)))
o.Start(context.Background())
defer o.Stop()
task := &backend.StoreTask{
task := &platform.Task{
ID: platform.ID(1),
Cron: "* * * * *",
LatestCompleted: "1970-01-01T00:00:03Z",
Flux: `option task = {name:"x", every:1m} from(bucket:"a") |> to(bucket:"b", org: "o")`,
}
meta := &backend.StoreTaskMeta{
MaxConcurrency: 1,
EffectiveCron: "* * * * *",
LatestCompleted: 3,
}
d.SetTaskMeta(task.ID, *meta)
if err := o.ClaimTask(task, meta); err != nil {
tcs.SetTask(task)
if err := o.ClaimTask(context.Background(), task); err != nil {
t.Fatal(err)
}
// No valid timestamps between 3 and 5 for every minute.
if n := len(d.CreatedFor(task.ID)); n > 0 {
if n := len(tcs.CreatedFor(task.ID)); n > 0 {
t.Fatalf("expected no runs queued, but got %d", n)
}
// For every second, can queue for timestamps 4 and 5.
task = &backend.StoreTask{
task = &platform.Task{
ID: platform.ID(2),
Every: "1s",
LatestCompleted: "1970-01-01T00:00:03Z",
Flux: `option task = {concurrency: 99, name:"x", every:1m} from(bucket:"a") |> to(bucket:"b", org: "o")`,
}
meta = &backend.StoreTaskMeta{
MaxConcurrency: 99,
EffectiveCron: "@every 1s",
LatestCompleted: 3,
CurrentlyRunning: []*backend.StoreTaskMetaRun{
&backend.StoreTaskMetaRun{
Now: 4,
RunID: uint64(10),
},
},
}
d.SetTaskMeta(task.ID, *meta)
if err := o.ClaimTask(task, meta); err != nil {
tcs.SetTask(task)
if err := o.ClaimTask(context.Background(), task); err != nil {
t.Fatal(err)
}
if n := len(d.CreatedFor(task.ID)); n != 1 {
if n := len(tcs.CreatedFor(task.ID)); n != 2 {
t.Fatalf("expected 2 runs queued for 'every 1s' script, but got %d", n)
}
if x, err := d.PollForNumberCreated(task.ID, 1); err != nil {
if x, err := tcs.PollForNumberCreated(task.ID, 2); err != nil {
t.Fatalf("expected 1 runs queued, but got %d", len(x))
}
@ -143,7 +127,7 @@ func TestScheduler_StartScriptOnClaim(t *testing.T) {
rp.Finish(mock.NewRunResult(nil, false), nil)
}
if x, err := d.PollForNumberCreated(task.ID, 0); err != nil {
if x, err := tcs.PollForNumberCreated(task.ID, 0); err != nil {
t.Fatalf("expected 1 runs queued, but got %d", len(x))
}
@ -155,32 +139,30 @@ func TestScheduler_StartScriptOnClaim(t *testing.T) {
func TestScheduler_CreateNextRunOnTick(t *testing.T) {
t.Parallel()
d := mock.NewDesiredState()
tcs := mock.NewTaskControlService()
e := mock.NewExecutor()
o := backend.NewScheduler(d, e, backend.NopLogWriter{}, 5)
o := backend.NewScheduler(tcs, e, 5)
o.Start(context.Background())
defer o.Stop()
task := &backend.StoreTask{
task := &platform.Task{
ID: platform.ID(1),
}
meta := &backend.StoreTaskMeta{
MaxConcurrency: 2,
EffectiveCron: "@every 1s",
LatestCompleted: 5,
Every: "1s",
LatestCompleted: "1970-01-01T00:00:05Z",
Flux: `option task = {concurrency: 2, name:"x", every:1m} from(bucket:"a") |> to(bucket:"b", org: "o")`,
}
d.SetTaskMeta(task.ID, *meta)
if err := o.ClaimTask(task, meta); err != nil {
tcs.SetTask(task)
if err := o.ClaimTask(context.Background(), task); err != nil {
t.Fatal(err)
}
if x, err := d.PollForNumberCreated(task.ID, 0); err != nil {
if x, err := tcs.PollForNumberCreated(task.ID, 0); err != nil {
t.Fatalf("expected no runs queued, but got %d", len(x))
}
o.Tick(6)
if x, err := d.PollForNumberCreated(task.ID, 1); err != nil {
if x, err := tcs.PollForNumberCreated(task.ID, 1); err != nil {
t.Fatalf("expected 1 run queued, but got %d", len(x))
}
running, err := e.PollForNumberRunning(task.ID, 1)
@ -193,7 +175,7 @@ func TestScheduler_CreateNextRunOnTick(t *testing.T) {
}
o.Tick(7)
if x, err := d.PollForNumberCreated(task.ID, 2); err != nil {
if x, err := tcs.PollForNumberCreated(task.ID, 2); err != nil {
t.Fatalf("expected 2 runs queued, but got %d", len(x))
}
running, err = e.PollForNumberRunning(task.ID, 2)
@ -212,7 +194,7 @@ func TestScheduler_CreateNextRunOnTick(t *testing.T) {
}
o.Tick(8) // Can't exceed concurrency of 2.
if x, err := d.PollForNumberCreated(task.ID, 2); err != nil {
if x, err := tcs.PollForNumberCreated(task.ID, 2); err != nil {
t.Fatalf("expected 2 runs queued, but got %d", len(x))
}
run6.Cancel() // 7 and 8 should be running.
@ -227,28 +209,26 @@ func TestScheduler_CreateNextRunOnTick(t *testing.T) {
func TestScheduler_LogStatisticsOnSuccess(t *testing.T) {
t.Parallel()
d := mock.NewDesiredState()
tcs := mock.NewTaskControlService()
e := mock.NewExecutor()
rl := backend.NewInMemRunReaderWriter()
o := backend.NewScheduler(d, e, rl, 5, backend.WithLogger(zaptest.NewLogger(t)))
o := backend.NewScheduler(tcs, e, 5, backend.WithLogger(zaptest.NewLogger(t)))
o.Start(context.Background())
defer o.Stop()
const taskID = 0x12345
const orgID = 0x54321
task := &backend.StoreTask{
task := &platform.Task{
ID: taskID,
Org: orgID,
}
meta := &backend.StoreTaskMeta{
MaxConcurrency: 1,
EffectiveCron: "@every 1s",
LatestCompleted: 5,
OrganizationID: orgID,
Every: "1s",
LatestCompleted: "1970-01-01T00:00:05Z",
Flux: `option task = {name:"x", every:1m} from(bucket:"a") |> to(bucket:"b", org: "o")`,
}
d.SetTaskMeta(taskID, *meta)
if err := o.ClaimTask(task, meta); err != nil {
tcs.SetTask(task)
if err := o.ClaimTask(context.Background(), task); err != nil {
t.Fatal(err)
}
@ -269,14 +249,11 @@ func TestScheduler_LogStatisticsOnSuccess(t *testing.T) {
t.Fatal(err)
}
logs, err := rl.ListLogs(context.Background(), orgID, platform.LogFilter{Task: taskID, Run: &runID})
if err != nil {
t.Fatal(err)
}
run := tcs.FinishedRun(runID)
// For now, assume the stats line is the only line beginning with "{".
var statJSON string
for _, log := range logs {
for _, log := range run.Log {
if len(log.Message) > 0 && log.Message[0] == '{' {
statJSON = log.Message
break
@ -299,29 +276,27 @@ func TestScheduler_LogStatisticsOnSuccess(t *testing.T) {
func TestScheduler_Release(t *testing.T) {
t.Parallel()
d := mock.NewDesiredState()
tcs := mock.NewTaskControlService()
e := mock.NewExecutor()
o := backend.NewScheduler(d, e, backend.NopLogWriter{}, 5)
o := backend.NewScheduler(tcs, e, 5)
o.Start(context.Background())
defer o.Stop()
task := &backend.StoreTask{
task := &platform.Task{
ID: platform.ID(1),
}
meta := &backend.StoreTaskMeta{
MaxConcurrency: 99,
EffectiveCron: "@every 1s",
LatestCompleted: 5,
Every: "1s",
LatestCompleted: "1970-01-01T00:00:05Z",
Flux: `option task = {concurrency: 99, name:"x", every:1m} from(bucket:"a") |> to(bucket:"b", org: "o")`,
}
d.SetTaskMeta(task.ID, *meta)
if err := o.ClaimTask(task, meta); err != nil {
tcs.SetTask(task)
if err := o.ClaimTask(context.Background(), task); err != nil {
t.Fatal(err)
}
o.Tick(6)
o.Tick(7)
if n := len(d.CreatedFor(task.ID)); n != 2 {
if n := len(tcs.CreatedFor(task.ID)); n != 2 {
t.Fatalf("expected 2 runs queued, but got %d", n)
}
@ -329,7 +304,7 @@ func TestScheduler_Release(t *testing.T) {
t.Fatal(err)
}
if _, err := d.PollForNumberCreated(task.ID, 0); err != nil {
if _, err := tcs.PollForNumberCreated(task.ID, 0); err != nil {
t.Fatal(err)
}
}
@ -337,23 +312,21 @@ func TestScheduler_Release(t *testing.T) {
func TestScheduler_UpdateTask(t *testing.T) {
t.Parallel()
d := mock.NewDesiredState()
tcs := mock.NewTaskControlService()
e := mock.NewExecutor()
s := backend.NewScheduler(d, e, backend.NopLogWriter{}, 3059, backend.WithLogger(zaptest.NewLogger(t)))
s := backend.NewScheduler(tcs, e, 3059, backend.WithLogger(zaptest.NewLogger(t)))
s.Start(context.Background())
defer s.Stop()
task := &backend.StoreTask{
task := &platform.Task{
ID: platform.ID(1),
}
meta := &backend.StoreTaskMeta{
MaxConcurrency: 1,
EffectiveCron: "* * * * *", // Every minute.
LatestCompleted: 3000,
Cron: "* * * * *",
LatestCompleted: "1970-01-01T00:50:00Z",
Flux: `option task = {name:"x", every:1m} from(bucket:"a") |> to(bucket:"b", org: "o")`,
}
d.SetTaskMeta(task.ID, *meta)
if err := s.ClaimTask(task, meta); err != nil {
tcs.SetTask(task)
if err := s.ClaimTask(context.Background(), task); err != nil {
t.Fatal(err)
}
@ -365,11 +338,11 @@ func TestScheduler_UpdateTask(t *testing.T) {
p[0].Finish(mock.NewRunResult(nil, false), nil)
meta.EffectiveCron = "0 * * * *"
meta.MaxConcurrency = 30
d.SetTaskMeta(task.ID, *meta)
task.Cron = "0 * * * *"
task.Flux = `option task = {concurrency: 50, name:"x", every:1m} from(bucket:"a") |> to(bucket:"b", org: "o")`
tcs.SetTask(task)
if err := s.UpdateTask(task, meta); err != nil {
if err := s.UpdateTask(context.Background(), task); err != nil {
t.Fatal(err)
}
@ -390,30 +363,41 @@ func TestScheduler_UpdateTask(t *testing.T) {
func TestScheduler_Queue(t *testing.T) {
t.Parallel()
d := mock.NewDesiredState()
tcs := mock.NewTaskControlService()
e := mock.NewExecutor()
o := backend.NewScheduler(d, e, backend.NopLogWriter{}, 3059, backend.WithLogger(zaptest.NewLogger(t)))
o := backend.NewScheduler(tcs, e, 3059, backend.WithLogger(zaptest.NewLogger(t)))
o.Start(context.Background())
defer o.Stop()
task := &backend.StoreTask{
task := &platform.Task{
ID: platform.ID(1),
}
meta := &backend.StoreTaskMeta{
MaxConcurrency: 1,
EffectiveCron: "* * * * *", // Every minute.
LatestCompleted: 3000,
ManualRuns: []*backend.StoreTaskMetaManualRun{
{Start: 120, End: 240, LatestCompleted: 119, RequestedAt: 3001},
},
Cron: "* * * * *",
LatestCompleted: "1970-01-01T00:50:00Z",
Flux: `option task = {name:"x", every:1m} from(bucket:"a") |> to(bucket:"b", org: "o")`,
}
d.SetTaskMeta(task.ID, *meta)
if err := o.ClaimTask(task, meta); err != nil {
tcs.SetTask(task)
tcs.SetManualRuns([]*platform.Run{
&platform.Run{
ID: platform.ID(10),
TaskID: task.ID,
ScheduledFor: "1970-01-01T00:02:00Z",
},
&platform.Run{
ID: platform.ID(11),
TaskID: task.ID,
ScheduledFor: "1970-01-01T00:03:00Z",
}, &platform.Run{
ID: platform.ID(12),
TaskID: task.ID,
ScheduledFor: "1970-01-01T00:04:00Z",
},
})
if err := o.ClaimTask(context.Background(), task); err != nil {
t.Fatal(err)
}
cs, err := d.PollForNumberCreated(task.ID, 1)
cs, err := tcs.PollForNumberCreated(task.ID, 1)
if err != nil {
t.Fatal(err)
}
@ -456,40 +440,64 @@ func TestScheduler_Queue(t *testing.T) {
// Tick the scheduler so the next natural run will happen once 180 finishes.
o.Tick(3062)
// Cancel 180. Next run should be 3060, the next natural schedule.
e.RunningFor(task.ID)[0].Cancel()
pollForRun(3060)
// Cancel the 3060 run; 240 should pick up.
// Cancel 180. Next run should be 240, manual runs get priority.
e.RunningFor(task.ID)[0].Cancel()
pollForRun(240)
// Cancel 240; jobs should be idle.
// Cancel the 240 run; 3060 should pick up.
e.RunningFor(task.ID)[0].Cancel()
pollForRun(3060)
// Cancel 3060; jobs should be idle.
e.RunningFor(task.ID)[0].Cancel()
if _, err := e.PollForNumberRunning(task.ID, 0); err != nil {
t.Fatal(err)
}
}
func pollForRunLog(t *testing.T, r backend.LogReader, taskID, runID, orgID platform.ID, exp string) {
// LogListener allows us to act as a middleware and see if specific logs have been written
type logListener struct {
mu sync.Mutex
backend.TaskControlService
logs map[string][]string
}
func newLogListener(tcs backend.TaskControlService) *logListener {
return &logListener{
TaskControlService: tcs,
logs: make(map[string][]string),
}
}
func (l *logListener) AddRunLog(ctx context.Context, taskID, runID platform.ID, when time.Time, log string) error {
l.mu.Lock()
defer l.mu.Unlock()
logs := l.logs[taskID.String()+runID.String()]
logs = append(logs, log)
l.logs[taskID.String()+runID.String()] = logs
return l.TaskControlService.AddRunLog(ctx, taskID, runID, when, log)
}
func pollForRunLog(t *testing.T, ll *logListener, taskID, runID platform.ID, exp string) {
t.Helper()
var logs []platform.Log
var err error
var logs []string
const maxAttempts = 50
for i := 0; i < maxAttempts; i++ {
if i != 0 {
time.Sleep(10 * time.Millisecond)
}
logs, err = r.ListLogs(context.Background(), orgID, platform.LogFilter{Task: taskID, Run: &runID})
if err != nil {
t.Fatal(err)
}
ll.mu.Lock()
logs = ll.logs[taskID.String()+runID.String()]
ll.mu.Unlock()
for _, log := range logs {
if log.Message == exp {
if log == exp {
return
}
}
@ -497,33 +505,75 @@ func pollForRunLog(t *testing.T, r backend.LogReader, taskID, runID, orgID platf
t.Logf("Didn't find message %q in logs:", exp)
for _, log := range logs {
t.Logf("\t%s", log.Message)
t.Logf("\t%s", log)
}
t.FailNow()
}
// runListener allows us to act as a middleware and see if specific states are updated
type runListener struct {
mu sync.Mutex
backend.TaskControlService
rs map[platform.ID][]*platform.Run
}
func newRunListener(tcs backend.TaskControlService) *runListener {
return &runListener{
TaskControlService: tcs,
rs: make(map[platform.ID][]*platform.Run),
}
}
func (l *runListener) UpdateRunState(ctx context.Context, taskID, runID platform.ID, when time.Time, state backend.RunStatus) error {
l.mu.Lock()
defer l.mu.Unlock()
runs, ok := l.rs[taskID]
if !ok {
runs = []*platform.Run{}
}
found := false
for _, run := range runs {
if run.ID == runID {
found = true
run.Status = state.String()
}
}
if !found {
runs = append(runs, &platform.Run{ID: runID, Status: state.String()})
}
l.rs[taskID] = runs
return l.TaskControlService.UpdateRunState(ctx, taskID, runID, when, state)
}
// pollForRunStatus tries a few times to find runs matching supplied conditions, before failing.
func pollForRunStatus(t *testing.T, r backend.LogReader, taskID, orgID platform.ID, expCount, expIndex int, expStatus string) {
func pollForRunStatus(t *testing.T, r *runListener, taskID platform.ID, expCount, expIndex int, expStatus string) {
t.Helper()
var runs []*platform.Run
var err error
const maxAttempts = 50
for i := 0; i < maxAttempts; i++ {
if i != 0 {
time.Sleep(10 * time.Millisecond)
}
runs, err = r.ListRuns(context.Background(), orgID, platform.RunFilter{Task: taskID})
if err != nil {
t.Fatal(err)
}
r.mu.Lock()
runs = r.rs[taskID]
r.mu.Unlock()
if len(runs) != expCount {
continue
}
// make sure we dont panic
if len(runs) < expIndex {
continue
}
if runs[expIndex].Status != expStatus {
continue
}
@ -542,26 +592,24 @@ func pollForRunStatus(t *testing.T, r backend.LogReader, taskID, orgID platform.
func TestScheduler_RunStatus(t *testing.T) {
t.Parallel()
d := mock.NewDesiredState()
tcs := mock.NewTaskControlService()
e := mock.NewExecutor()
rl := backend.NewInMemRunReaderWriter()
s := backend.NewScheduler(d, e, rl, 5, backend.WithLogger(zaptest.NewLogger(t)))
rl := newRunListener(tcs)
s := backend.NewScheduler(rl, e, 5, backend.WithLogger(zaptest.NewLogger(t)))
s.Start(context.Background())
defer s.Stop()
// Claim a task that starts later.
task := &backend.StoreTask{
task := &platform.Task{
ID: platform.ID(1),
Org: 2,
}
meta := &backend.StoreTaskMeta{
MaxConcurrency: 99,
EffectiveCron: "@every 1s",
LatestCompleted: 5,
OrganizationID: platform.ID(2),
Every: "1s",
LatestCompleted: "1970-01-01T00:00:05Z",
Flux: `option task = {concurrency: 99, name:"x", every:1m} from(bucket:"a") |> to(bucket:"b", org: "o")`,
}
d.SetTaskMeta(task.ID, *meta)
if err := s.ClaimTask(task, meta); err != nil {
tcs.SetTask(task)
if err := s.ClaimTask(context.Background(), task); err != nil {
t.Fatal(err)
}
@ -571,7 +619,7 @@ func TestScheduler_RunStatus(t *testing.T) {
t.Fatal(err)
}
runs, err := rl.ListRuns(context.Background(), task.Org, platform.RunFilter{Task: task.ID})
runs, err := tcs.CurrentlyRunning(context.Background(), task.ID)
if err != nil {
t.Fatal(err)
}
@ -589,7 +637,7 @@ func TestScheduler_RunStatus(t *testing.T) {
t.Fatal(err)
}
pollForRunStatus(t, rl, task.ID, task.Org, 1, 0, backend.RunSuccess.String())
pollForRunStatus(t, rl, task.ID, 1, 0, backend.RunSuccess.String())
// Create a new run, but fail this time.
s.Tick(7)
@ -598,7 +646,7 @@ func TestScheduler_RunStatus(t *testing.T) {
t.Fatal(err)
}
pollForRunStatus(t, rl, task.ID, task.Org, 2, 1, backend.RunStarted.String())
pollForRunStatus(t, rl, task.ID, 2, 1, backend.RunStarted.String())
// Finish with failure to create the run.
promises[0].Finish(nil, errors.New("forced failure"))
@ -606,7 +654,7 @@ func TestScheduler_RunStatus(t *testing.T) {
t.Fatal(err)
}
pollForRunStatus(t, rl, task.ID, task.Org, 2, 1, backend.RunFail.String())
pollForRunStatus(t, rl, task.ID, 2, 1, backend.RunFail.String())
// Create a new run that starts but fails.
s.Tick(8)
@ -615,12 +663,12 @@ func TestScheduler_RunStatus(t *testing.T) {
t.Fatal(err)
}
pollForRunStatus(t, rl, task.ID, task.Org, 3, 2, backend.RunStarted.String())
pollForRunStatus(t, rl, task.ID, 3, 2, backend.RunStarted.String())
promises[0].Finish(mock.NewRunResult(errors.New("started but failed to finish properly"), false), nil)
if _, err := e.PollForNumberRunning(task.ID, 0); err != nil {
t.Fatal(err)
}
pollForRunStatus(t, rl, task.ID, task.Org, 3, 2, backend.RunFail.String())
pollForRunStatus(t, rl, task.ID, 3, 2, backend.RunFail.String())
// One more run, but cancel this time.
s.Tick(9)
@ -629,7 +677,7 @@ func TestScheduler_RunStatus(t *testing.T) {
t.Fatal(err)
}
pollForRunStatus(t, rl, task.ID, task.Org, 4, 3, backend.RunStarted.String())
pollForRunStatus(t, rl, task.ID, 4, 3, backend.RunStarted.String())
// Finish with failure.
promises[0].Cancel()
@ -637,31 +685,29 @@ func TestScheduler_RunStatus(t *testing.T) {
t.Fatal(err)
}
pollForRunStatus(t, rl, task.ID, task.Org, 4, 3, backend.RunCanceled.String())
pollForRunStatus(t, rl, task.ID, 4, 3, backend.RunCanceled.String())
}
func TestScheduler_RunFailureCleanup(t *testing.T) {
t.Parallel()
d := mock.NewDesiredState()
tcs := mock.NewTaskControlService()
e := mock.NewExecutor()
rl := backend.NewInMemRunReaderWriter()
s := backend.NewScheduler(d, e, rl, 5, backend.WithLogger(zaptest.NewLogger(t)))
ll := newLogListener(tcs)
s := backend.NewScheduler(ll, e, 5, backend.WithLogger(zaptest.NewLogger(t)))
s.Start(context.Background())
defer s.Stop()
// Task with concurrency 1 should continue after one run fails.
task := &backend.StoreTask{
task := &platform.Task{
ID: platform.ID(1),
}
meta := &backend.StoreTaskMeta{
MaxConcurrency: 1,
EffectiveCron: "@every 1s",
LatestCompleted: 5,
Every: "1s",
LatestCompleted: "1970-01-01T00:00:05Z",
Flux: `option task = {name:"x", every:1m} from(bucket:"a") |> to(bucket:"b", org: "o")`,
}
d.SetTaskMeta(task.ID, *meta)
if err := s.ClaimTask(task, meta); err != nil {
tcs.SetTask(task)
if err := s.ClaimTask(context.Background(), task); err != nil {
t.Fatal(err)
}
@ -676,7 +722,7 @@ func TestScheduler_RunFailureCleanup(t *testing.T) {
if _, err := e.PollForNumberRunning(task.ID, 0); err != nil {
t.Fatal(err)
}
pollForRunLog(t, rl, task.ID, promises[0].Run().RunID, task.Org, "Waiting for execution result: forced failure")
pollForRunLog(t, ll, task.ID, promises[0].Run().RunID, "Waiting for execution result: forced failure")
// Should continue even if max concurrency == 1.
// This run will start and then fail.
@ -690,10 +736,10 @@ func TestScheduler_RunFailureCleanup(t *testing.T) {
if _, err := e.PollForNumberRunning(task.ID, 0); err != nil {
t.Fatal(err)
}
pollForRunLog(t, rl, task.ID, promises[0].Run().RunID, task.Org, "Run failed to execute: started but failed to finish properly")
pollForRunLog(t, ll, task.ID, promises[0].Run().RunID, "Run failed to execute: started but failed to finish properly")
// Fail to execute next run.
if n := d.TotalRunsCreatedForTask(task.ID); n != 2 {
if n := tcs.TotalRunsCreatedForTask(task.ID); n != 2 {
t.Fatalf("should have created 2 runs so far, got %d", n)
}
e.FailNextCallToExecute(errors.New("forced failure on Execute"))
@ -702,7 +748,7 @@ func TestScheduler_RunFailureCleanup(t *testing.T) {
const attempts = 50
for i := 0; i < attempts; i++ {
time.Sleep(2 * time.Millisecond)
n := d.TotalRunsCreatedForTask(task.ID)
n := tcs.TotalRunsCreatedForTask(task.ID)
if n == 3 {
break
}
@ -712,11 +758,11 @@ func TestScheduler_RunFailureCleanup(t *testing.T) {
}
}
// We don't have a good hook to get the run ID right now, so list the runs and assume the final one is ours.
runs, err := rl.ListRuns(context.Background(), task.Org, platform.RunFilter{Task: task.ID})
runs := tcs.FinishedRuns()
if err != nil {
t.Fatal(err)
}
pollForRunLog(t, rl, task.ID, runs[len(runs)-1].ID, task.Org, "Run failed to begin execution: forced failure on Execute")
pollForRunLog(t, ll, task.ID, runs[len(runs)-1].ID, "Run failed to begin execution: forced failure on Execute")
// One more tick just to ensure that we can keep going after this type of failure too.
s.Tick(9)
@ -729,9 +775,9 @@ func TestScheduler_RunFailureCleanup(t *testing.T) {
func TestScheduler_Metrics(t *testing.T) {
t.Parallel()
d := mock.NewDesiredState()
tcs := mock.NewTaskControlService()
e := mock.NewExecutor()
s := backend.NewScheduler(d, e, backend.NopLogWriter{}, 5)
s := backend.NewScheduler(tcs, e, 5)
s.Start(context.Background())
defer s.Stop()
@ -741,17 +787,15 @@ func TestScheduler_Metrics(t *testing.T) {
reg.MustRegister(s.PrometheusCollectors()...)
// Claim a task that starts later.
task := &backend.StoreTask{
task := &platform.Task{
ID: platform.ID(1),
}
meta := &backend.StoreTaskMeta{
MaxConcurrency: 99,
EffectiveCron: "@every 1s",
LatestCompleted: 5,
Every: "1s",
LatestCompleted: "1970-01-01T00:00:05Z",
Flux: `option task = {concurrency: 99, name:"x", every:1m} from(bucket:"a") |> to(bucket:"b", org: "o")`,
}
d.SetTaskMeta(task.ID, *meta)
if err := s.ClaimTask(task, meta); err != nil {
tcs.SetTask(task)
if err := s.ClaimTask(context.Background(), task); err != nil {
t.Fatal(err)
}
@ -871,7 +915,7 @@ func TestScheduler_Stop(t *testing.T) {
t.Parallel()
e := &fakeWaitExecutor{wait: make(chan struct{})}
o := backend.NewScheduler(mock.NewDesiredState(), e, backend.NopLogWriter{}, 4, backend.WithLogger(zaptest.NewLogger(t)))
o := backend.NewScheduler(mock.NewTaskControlService(), e, 4, backend.WithLogger(zaptest.NewLogger(t)))
o.Start(context.Background())
stopped := make(chan struct{})
@ -904,33 +948,30 @@ func TestScheduler_WithTicker(t *testing.T) {
defer cancel()
tickFreq := 100 * time.Millisecond
d := mock.NewDesiredState()
tcs := mock.NewTaskControlService()
e := mock.NewExecutor()
o := backend.NewScheduler(d, e, backend.NopLogWriter{}, 5, backend.WithLogger(zaptest.NewLogger(t)), backend.WithTicker(ctx, tickFreq))
o := backend.NewScheduler(tcs, e, 5, backend.WithLogger(zaptest.NewLogger(t)), backend.WithTicker(ctx, tickFreq))
o.Start(ctx)
defer o.Stop()
task := &backend.StoreTask{
createdAt := time.Now()
task := &platform.Task{
ID: platform.ID(1),
}
createdAt := time.Now().Unix()
meta := &backend.StoreTaskMeta{
MaxConcurrency: 5,
EffectiveCron: "@every 1s",
LatestCompleted: createdAt,
Every: "1s",
Flux: `option task = {concurrency: 5, name:"x", every:1m} from(bucket:"a") |> to(bucket:"b", org: "o")`,
LatestCompleted: createdAt.Format(time.RFC3339Nano),
}
d.SetTaskMeta(task.ID, *meta)
if err := o.ClaimTask(task, meta); err != nil {
tcs.SetTask(task)
if err := o.ClaimTask(context.Background(), task); err != nil {
t.Fatal(err)
}
for time.Now().Unix() == createdAt {
for time.Now().Unix() == createdAt.Unix() {
time.Sleep(tickFreq + 10*time.Millisecond)
}
if x, err := d.PollForNumberCreated(task.ID, 1); err != nil {
if x, err := tcs.PollForNumberCreated(task.ID, 1); err != nil {
t.Fatalf("expected 1 run queued, but got %d", len(x))
}
}

View File

@ -12,6 +12,7 @@ import (
platform "github.com/influxdata/influxdb"
"github.com/influxdata/influxdb/snowflake"
"github.com/influxdata/influxdb/task/backend"
"github.com/influxdata/influxdb/task/options"
)
var idGen = snowflake.NewIDGenerator()
@ -522,8 +523,15 @@ from(bucket:"test") |> range(start:-1h)`
if meta.EffectiveCron != "* * * * *" {
t.Fatalf("unexpected cron stored in meta: %q", meta.EffectiveCron)
}
if time.Duration(meta.Offset)*time.Second != 5*time.Second {
duration := options.Duration{}
if err := duration.Parse(meta.Offset); err != nil {
t.Fatal(err)
}
dur, err := duration.DurationFrom(time.Now()) // is time.Now() the best option here
if err != nil {
t.Fatal(err)
}
if dur != 5*time.Second {
t.Fatalf("unexpected delay stored in meta: %v", meta.Offset)
}
@ -683,7 +691,12 @@ from(bucket:"test") |> range(start:-1h)`
t.Fatalf("unexpected cron stored in meta: %q", meta.EffectiveCron)
}
if time.Duration(meta.Offset)*time.Second != 5*time.Second {
duration := options.Duration{}
if err := duration.Parse(meta.Offset); err != nil {
t.Fatal(err)
}
if duration.String() != "5s" {
t.Fatalf("unexpected delay stored in meta: %v", meta.Offset)
}

View File

@ -5,6 +5,7 @@ import (
"time"
"github.com/influxdata/influxdb"
"github.com/influxdata/influxdb/task/options"
)
// TaskControlService is a low-level controller interface, intended to be passed to
@ -16,6 +17,9 @@ type TaskControlService interface {
// If the run's ScheduledFor would be later than the passed-in now, CreateNextRun returns a RunNotYetDueError.
CreateNextRun(ctx context.Context, taskID influxdb.ID, now int64) (RunCreation, error)
CurrentlyRunning(ctx context.Context, taskID influxdb.ID) ([]*influxdb.Run, error)
ManualRuns(ctx context.Context, taskID influxdb.ID) ([]*influxdb.Run, error)
// FinishRun removes runID from the list of running tasks and if its `ScheduledFor` is later then last completed update it.
FinishRun(ctx context.Context, taskID, runID influxdb.ID) (*influxdb.Run, error)
@ -29,3 +33,213 @@ type TaskControlService interface {
// AddRunLog adds a log line to the run.
AddRunLog(ctx context.Context, taskID, runID influxdb.ID, when time.Time, log string) error
}
// TaskControlAdaptor creates a TaskControlService for the older TaskStore system.
// TODO(lh): remove task control adaptor when we transition away from Store.
func TaskControlAdaptor(s Store, lw LogWriter, lr LogReader) TaskControlService {
return &taskControlAdaptor{s, lw, lr}
}
// taskControlAdaptor adapts a Store and log readers and writers to implement the task control service.
type taskControlAdaptor struct {
s Store
lw LogWriter
lr LogReader
}
func (tcs *taskControlAdaptor) CreateNextRun(ctx context.Context, taskID influxdb.ID, now int64) (RunCreation, error) {
return tcs.s.CreateNextRun(ctx, taskID, now)
}
func (tcs *taskControlAdaptor) FinishRun(ctx context.Context, taskID, runID influxdb.ID) (*influxdb.Run, error) {
// Once we completely switch over to the new system we can look at the returned run in the tests.
task, err := tcs.s.FindTaskByID(ctx, taskID)
if err != nil {
return nil, err
}
tcs.lr.FindRunByID(ctx, task.Org, runID)
return nil, tcs.s.FinishRun(ctx, taskID, runID)
}
func (tcs *taskControlAdaptor) CurrentlyRunning(ctx context.Context, taskID influxdb.ID) ([]*influxdb.Run, error) {
t, m, err := tcs.s.FindTaskByIDWithMeta(ctx, taskID)
if err != nil {
return nil, err
}
var rtn = make([]*influxdb.Run, len(m.CurrentlyRunning))
for i, cr := range m.CurrentlyRunning {
rtn[i] = &influxdb.Run{
ID: influxdb.ID(cr.RunID),
TaskID: t.ID,
ScheduledFor: time.Unix(cr.Now, 0).UTC().Format(time.RFC3339),
}
if cr.RequestedAt != 0 {
rtn[i].RequestedAt = time.Unix(cr.RequestedAt, 0).UTC().Format(time.RFC3339)
}
}
return rtn, nil
}
func (tcs *taskControlAdaptor) ManualRuns(ctx context.Context, taskID influxdb.ID) ([]*influxdb.Run, error) {
t, m, err := tcs.s.FindTaskByIDWithMeta(ctx, taskID)
if err != nil {
return nil, err
}
var rtn = make([]*influxdb.Run, len(m.ManualRuns))
for i, cr := range m.ManualRuns {
rtn[i] = &influxdb.Run{
ID: influxdb.ID(cr.RunID),
TaskID: t.ID,
ScheduledFor: time.Unix(cr.Start, 0).UTC().Format(time.RFC3339),
}
if cr.RequestedAt != 0 {
rtn[i].RequestedAt = time.Unix(cr.RequestedAt, 0).Format(time.RFC3339)
}
}
return rtn, nil
}
func (tcs *taskControlAdaptor) NextDueRun(ctx context.Context, taskID influxdb.ID) (int64, error) {
m, err := tcs.s.FindTaskMetaByID(ctx, taskID)
if err != nil {
return 0, err
}
return m.NextDueRun()
}
func (tcs *taskControlAdaptor) UpdateRunState(ctx context.Context, taskID, runID influxdb.ID, when time.Time, state RunStatus) error {
st, m, err := tcs.s.FindTaskByIDWithMeta(ctx, taskID)
if err != nil {
return err
}
var (
schedFor, reqAt time.Time
)
// check the log store
r, err := tcs.lr.FindRunByID(ctx, st.Org, runID)
if err == nil && r != nil {
schedFor, err = time.Parse(time.RFC3339, r.ScheduledFor)
if err != nil {
return err
}
if r.RequestedAt != "" {
reqAt, err = time.Parse(time.RFC3339, r.RequestedAt)
if err != nil {
return err
}
}
}
// in the old system the log store may not have the run until after the first
// state update, so we will need to pull the currently running.
if schedFor.IsZero() {
for _, cr := range m.CurrentlyRunning {
if influxdb.ID(cr.RunID) == runID {
schedFor = time.Unix(cr.Now, 0)
if cr.RequestedAt != 0 {
reqAt = time.Unix(cr.RequestedAt, 0)
}
}
}
}
rlb := RunLogBase{
Task: st,
RunID: runID,
RunScheduledFor: schedFor.Unix(),
}
if !reqAt.IsZero() {
rlb.RequestedAt = reqAt.Unix()
}
if err := tcs.lw.UpdateRunState(ctx, rlb, when, state); err != nil {
return err
}
return nil
}
func (tcs *taskControlAdaptor) AddRunLog(ctx context.Context, taskID, runID influxdb.ID, when time.Time, log string) error {
st, m, err := tcs.s.FindTaskByIDWithMeta(ctx, taskID)
if err != nil {
return err
}
var (
schedFor, reqAt time.Time
)
r, err := tcs.lr.FindRunByID(ctx, st.Org, runID)
if err == nil && r != nil {
schedFor, err = time.Parse(time.RFC3339, r.ScheduledFor)
if err != nil {
return err
}
if r.RequestedAt != "" {
reqAt, err = time.Parse(time.RFC3339, r.RequestedAt)
if err != nil {
return err
}
}
}
// in the old system the log store may not have the run until after the first
// state update, so we will need to pull the currently running.
if schedFor.IsZero() {
for _, cr := range m.CurrentlyRunning {
if influxdb.ID(cr.RunID) == runID {
schedFor = time.Unix(cr.Now, 0)
if cr.RequestedAt != 0 {
reqAt = time.Unix(cr.RequestedAt, 0)
}
}
}
}
rlb := RunLogBase{
Task: st,
RunID: runID,
RunScheduledFor: schedFor.Unix(),
}
if !reqAt.IsZero() {
rlb.RequestedAt = reqAt.Unix()
}
return tcs.lw.AddRunLog(ctx, rlb, when, log)
}
// ToInfluxTask converts a backend tas and meta to a influxdb.Task
// TODO(lh): remove this when we no longer need the backend store.
func ToInfluxTask(t *StoreTask, m *StoreTaskMeta) (*influxdb.Task, error) {
opts, err := options.FromScript(t.Script)
if err != nil {
return nil, err
}
pt := &influxdb.Task{
ID: t.ID,
OrganizationID: t.Org,
Name: t.Name,
Flux: t.Script,
Cron: opts.Cron,
AuthorizationID: influxdb.ID(m.AuthorizationID),
}
if !opts.Every.IsZero() {
pt.Every = opts.Every.String()
}
if opts.Offset != nil && !opts.Offset.IsZero() {
pt.Offset = opts.Offset.String()
}
if m != nil {
pt.Status = string(m.Status)
pt.LatestCompleted = time.Unix(m.LatestCompleted, 0).UTC().Format(time.RFC3339)
if m.CreatedAt != 0 {
pt.CreatedAt = time.Unix(m.CreatedAt, 0).UTC().Format(time.RFC3339)
}
if m.UpdatedAt != 0 {
pt.UpdatedAt = time.Unix(m.UpdatedAt, 0).UTC().Format(time.RFC3339)
}
pt.AuthorizationID = influxdb.ID(m.AuthorizationID)
}
return pt, nil
}

View File

@ -3,16 +3,13 @@ package mock
import (
"context"
"errors"
"fmt"
"strings"
"sync"
"time"
"github.com/influxdata/flux"
platform "github.com/influxdata/influxdb"
"github.com/influxdata/influxdb/task/backend"
scheduler "github.com/influxdata/influxdb/task/backend"
"go.uber.org/zap"
)
@ -22,28 +19,19 @@ type Scheduler struct {
lastTick int64
claims map[string]*Task
meta map[string]backend.StoreTaskMeta
claims map[platform.ID]*platform.Task
createChan chan *Task
releaseChan chan *Task
updateChan chan *Task
createChan chan *platform.Task
releaseChan chan *platform.Task
updateChan chan *platform.Task
claimError error
releaseError error
}
// Task is a mock implementation of a task.
type Task struct {
Script string
StartExecution int64
ConcurrencyLimit uint8
}
func NewScheduler() *Scheduler {
return &Scheduler{
claims: map[string]*Task{},
meta: map[string]backend.StoreTaskMeta{},
claims: map[platform.ID]*platform.Task{},
}
}
@ -60,7 +48,7 @@ func (s *Scheduler) Start(context.Context) {}
func (s *Scheduler) Stop() {}
func (s *Scheduler) ClaimTask(task *backend.StoreTask, meta *backend.StoreTaskMeta) error {
func (s *Scheduler) ClaimTask(_ context.Context, task *platform.Task) error {
if s.claimError != nil {
return s.claimError
}
@ -68,40 +56,33 @@ func (s *Scheduler) ClaimTask(task *backend.StoreTask, meta *backend.StoreTaskMe
s.Lock()
defer s.Unlock()
_, ok := s.claims[task.ID.String()]
_, ok := s.claims[task.ID]
if ok {
return backend.ErrTaskAlreadyClaimed
}
s.meta[task.ID.String()] = *meta
t := &Task{Script: task.Script, StartExecution: meta.LatestCompleted, ConcurrencyLimit: uint8(meta.MaxConcurrency)}
s.claims[task.ID.String()] = t
s.claims[task.ID] = task
if s.createChan != nil {
s.createChan <- t
s.createChan <- task
}
return nil
}
func (s *Scheduler) UpdateTask(task *backend.StoreTask, meta *backend.StoreTaskMeta) error {
func (s *Scheduler) UpdateTask(_ context.Context, task *platform.Task) error {
s.Lock()
defer s.Unlock()
_, ok := s.claims[task.ID.String()]
_, ok := s.claims[task.ID]
if !ok {
return backend.ErrTaskNotClaimed
}
s.meta[task.ID.String()] = *meta
t := &Task{Script: task.Script, StartExecution: meta.LatestCompleted, ConcurrencyLimit: uint8(meta.MaxConcurrency)}
s.claims[task.ID.String()] = t
s.claims[task.ID] = task
if s.updateChan != nil {
s.updateChan <- t
s.updateChan <- task
}
return nil
@ -115,7 +96,7 @@ func (s *Scheduler) ReleaseTask(taskID platform.ID) error {
s.Lock()
defer s.Unlock()
t, ok := s.claims[taskID.String()]
t, ok := s.claims[taskID]
if !ok {
return backend.ErrTaskNotClaimed
}
@ -123,28 +104,27 @@ func (s *Scheduler) ReleaseTask(taskID platform.ID) error {
s.releaseChan <- t
}
delete(s.claims, taskID.String())
delete(s.meta, taskID.String())
delete(s.claims, taskID)
return nil
}
func (s *Scheduler) TaskFor(id platform.ID) *Task {
func (s *Scheduler) TaskFor(id platform.ID) *platform.Task {
s.Lock()
defer s.Unlock()
return s.claims[id.String()]
return s.claims[id]
}
func (s *Scheduler) TaskCreateChan() <-chan *Task {
s.createChan = make(chan *Task, 10)
func (s *Scheduler) TaskCreateChan() <-chan *platform.Task {
s.createChan = make(chan *platform.Task, 10)
return s.createChan
}
func (s *Scheduler) TaskReleaseChan() <-chan *Task {
s.releaseChan = make(chan *Task, 10)
func (s *Scheduler) TaskReleaseChan() <-chan *platform.Task {
s.releaseChan = make(chan *platform.Task, 10)
return s.releaseChan
}
func (s *Scheduler) TaskUpdateChan() <-chan *Task {
s.updateChan = make(chan *Task, 10)
func (s *Scheduler) TaskUpdateChan() <-chan *platform.Task {
s.updateChan = make(chan *platform.Task, 10)
return s.updateChan
}
@ -162,134 +142,6 @@ func (s *Scheduler) CancelRun(_ context.Context, taskID, runID platform.ID) erro
return nil
}
// DesiredState is a mock implementation of DesiredState (used by NewScheduler).
type DesiredState struct {
mu sync.Mutex
// Map of stringified task ID to last ID used for run.
runIDs map[string]uint64
// Map of stringified, concatenated task and platform ID, to runs that have been created.
created map[string]backend.QueuedRun
// Map of stringified task ID to task meta.
meta map[string]backend.StoreTaskMeta
// Map of task ID to total number of runs created for that task.
totalRunsCreated map[platform.ID]int
}
var _ backend.DesiredState = (*DesiredState)(nil)
func NewDesiredState() *DesiredState {
return &DesiredState{
runIDs: make(map[string]uint64),
created: make(map[string]backend.QueuedRun),
meta: make(map[string]backend.StoreTaskMeta),
totalRunsCreated: make(map[platform.ID]int),
}
}
// SetTaskMeta sets the task meta for the given task ID.
// SetTaskMeta must be called before CreateNextRun, for a given task ID.
func (d *DesiredState) SetTaskMeta(taskID platform.ID, meta backend.StoreTaskMeta) {
d.mu.Lock()
defer d.mu.Unlock()
d.meta[taskID.String()] = meta
}
// CreateNextRun creates the next run for the given task.
// Refer to the documentation for SetTaskPeriod to understand how the times are determined.
func (d *DesiredState) CreateNextRun(_ context.Context, taskID platform.ID, now int64) (backend.RunCreation, error) {
d.mu.Lock()
defer d.mu.Unlock()
if !taskID.Valid() {
return backend.RunCreation{}, errors.New("invalid task id")
}
tid := taskID.String()
meta, ok := d.meta[tid]
if !ok {
panic(fmt.Sprintf("meta not set for task with ID %s", tid))
}
makeID := func() (platform.ID, error) {
d.runIDs[tid]++
runID := platform.ID(d.runIDs[tid])
return runID, nil
}
rc, err := meta.CreateNextRun(now, makeID)
if err != nil {
return backend.RunCreation{}, err
}
d.meta[tid] = meta
rc.Created.TaskID = taskID
d.created[tid+rc.Created.RunID.String()] = rc.Created
d.totalRunsCreated[taskID]++
return rc, nil
}
func (d *DesiredState) FinishRun(_ context.Context, taskID, runID platform.ID) error {
d.mu.Lock()
defer d.mu.Unlock()
tid := taskID.String()
rid := runID.String()
m := d.meta[tid]
if !m.FinishRun(runID) {
var knownIDs []string
for _, r := range m.CurrentlyRunning {
knownIDs = append(knownIDs, platform.ID(r.RunID).String())
}
return fmt.Errorf("unknown run ID %s; known run IDs: %s", rid, strings.Join(knownIDs, ", "))
}
d.meta[tid] = m
delete(d.created, tid+rid)
return nil
}
func (d *DesiredState) CreatedFor(taskID platform.ID) []backend.QueuedRun {
d.mu.Lock()
defer d.mu.Unlock()
var qrs []backend.QueuedRun
for _, qr := range d.created {
if qr.TaskID == taskID {
qrs = append(qrs, qr)
}
}
return qrs
}
// TotalRunsCreatedForTask returns the number of runs created for taskID.
func (d *DesiredState) TotalRunsCreatedForTask(taskID platform.ID) int {
d.mu.Lock()
defer d.mu.Unlock()
return d.totalRunsCreated[taskID]
}
// PollForNumberCreated blocks for a small amount of time waiting for exactly the given count of created and unfinished runs for the given task ID.
// If the expected number isn't found in time, it returns an error.
//
// Because the scheduler and executor do a lot of state changes asynchronously, this is useful in test.
func (d *DesiredState) PollForNumberCreated(taskID platform.ID, count int) ([]scheduler.QueuedRun, error) {
const numAttempts = 50
actualCount := 0
var created []scheduler.QueuedRun
for i := 0; i < numAttempts; i++ {
time.Sleep(2 * time.Millisecond) // we sleep even on first so it becomes more likely that we catch when too many are produced.
created = d.CreatedFor(taskID)
actualCount = len(created)
if actualCount == count {
return created, nil
}
}
return created, fmt.Errorf("did not see count of %d created run(s) for task with ID %s in time, instead saw %d", count, taskID.String(), actualCount) // we return created anyways, to make it easier to debug
}
type Executor struct {
mu sync.Mutex
hangingFor time.Duration
@ -385,7 +237,7 @@ func (e *Executor) PollForNumberRunning(taskID platform.ID, count int) ([]*RunPr
return running, nil
}
}
return nil, fmt.Errorf("did not see count of %d running task(s) for ID %s in time; last count was %d", count, taskID.String(), len(running))
return nil, fmt.Errorf("did not see count of %d running task(s) for ID %s in time; last count was %d", count, taskID, len(running))
}
// RunPromise is a mock RunPromise.

View File

@ -0,0 +1,346 @@
package mock
import (
"context"
"errors"
"fmt"
"sort"
"sync"
"time"
"github.com/influxdata/influxdb"
"github.com/influxdata/influxdb/snowflake"
"github.com/influxdata/influxdb/task/backend"
cron "gopkg.in/robfig/cron.v2"
)
var idgen = snowflake.NewDefaultIDGenerator()
// TaskControlService is a mock implementation of TaskControlService (used by NewScheduler).
type TaskControlService struct {
mu sync.Mutex
// Map of stringified task ID to last ID used for run.
runs map[influxdb.ID]map[influxdb.ID]*influxdb.Run
// Map of stringified, concatenated task and platform ID, to runs that have been created.
created map[string]backend.QueuedRun
// Map of stringified task ID to task meta.
tasks map[influxdb.ID]*influxdb.Task
manualRuns []*influxdb.Run
// Map of task ID to total number of runs created for that task.
totalRunsCreated map[influxdb.ID]int
finishedRuns map[influxdb.ID]*influxdb.Run
}
var _ backend.TaskControlService = (*TaskControlService)(nil)
func NewTaskControlService() *TaskControlService {
return &TaskControlService{
runs: make(map[influxdb.ID]map[influxdb.ID]*influxdb.Run),
finishedRuns: make(map[influxdb.ID]*influxdb.Run),
tasks: make(map[influxdb.ID]*influxdb.Task),
created: make(map[string]backend.QueuedRun),
totalRunsCreated: make(map[influxdb.ID]int),
}
}
// SetTask sets the task.
// SetTask must be called before CreateNextRun, for a given task ID.
func (d *TaskControlService) SetTask(task *influxdb.Task) {
d.mu.Lock()
defer d.mu.Unlock()
d.tasks[task.ID] = task
}
func (d *TaskControlService) SetManualRuns(runs []*influxdb.Run) {
d.manualRuns = runs
}
// CreateNextRun creates the next run for the given task.
// Refer to the documentation for SetTaskPeriod to understand how the times are determined.
func (d *TaskControlService) CreateNextRun(ctx context.Context, taskID influxdb.ID, now int64) (backend.RunCreation, error) {
d.mu.Lock()
defer d.mu.Unlock()
if !taskID.Valid() {
return backend.RunCreation{}, errors.New("invalid task id")
}
tid := taskID
task, ok := d.tasks[tid]
if !ok {
panic(fmt.Sprintf("meta not set for task with ID %s", tid))
}
if len(d.manualRuns) != 0 {
run := d.manualRuns[0]
d.manualRuns = d.manualRuns[1:]
runs, ok := d.runs[tid]
if !ok {
runs = make(map[influxdb.ID]*influxdb.Run)
}
runs[run.ID] = run
d.runs[task.ID] = runs
now, err := time.Parse(time.RFC3339, run.ScheduledFor)
next, _ := d.nextDueRun(ctx, taskID)
if err == nil {
rc := backend.RunCreation{
Created: backend.QueuedRun{
TaskID: task.ID,
RunID: run.ID,
Now: now.Unix(),
},
NextDue: next,
HasQueue: len(d.manualRuns) != 0,
}
d.created[tid.String()+rc.Created.RunID.String()] = rc.Created
d.totalRunsCreated[taskID]++
return rc, nil
}
}
rc, err := d.createNextRun(task, now)
if err != nil {
return backend.RunCreation{}, err
}
rc.Created.TaskID = taskID
d.created[tid.String()+rc.Created.RunID.String()] = rc.Created
d.totalRunsCreated[taskID]++
return rc, nil
}
func (t *TaskControlService) createNextRun(task *influxdb.Task, now int64) (backend.RunCreation, error) {
sch, err := cron.Parse(task.EffectiveCron())
if err != nil {
return backend.RunCreation{}, err
}
latest := int64(0)
lt, err := time.Parse(time.RFC3339, task.LatestCompleted)
if err == nil {
latest = lt.Unix()
}
for _, r := range t.runs[task.ID] {
rt, err := time.Parse(time.RFC3339, r.ScheduledFor)
if err == nil {
if rt.Unix() > latest {
latest = rt.Unix()
}
}
}
nextScheduled := sch.Next(time.Unix(latest, 0))
nextScheduledUnix := nextScheduled.Unix()
offset := int64(0)
if task.Offset != "" {
toff, err := time.ParseDuration(task.Offset)
if err == nil {
offset = toff.Nanoseconds()
}
}
if dueAt := nextScheduledUnix + int64(offset); dueAt > now {
return backend.RunCreation{}, backend.RunNotYetDueError{DueAt: dueAt}
}
runID := idgen.ID()
runs, ok := t.runs[task.ID]
if !ok {
runs = make(map[influxdb.ID]*influxdb.Run)
}
runs[runID] = &influxdb.Run{
ID: runID,
ScheduledFor: nextScheduled.Format(time.RFC3339),
}
t.runs[task.ID] = runs
return backend.RunCreation{
Created: backend.QueuedRun{
RunID: runID,
Now: nextScheduledUnix,
},
NextDue: sch.Next(nextScheduled).Unix() + offset,
HasQueue: false,
}, nil
}
func (d *TaskControlService) FinishRun(_ context.Context, taskID, runID influxdb.ID) (*influxdb.Run, error) {
d.mu.Lock()
defer d.mu.Unlock()
tid := taskID
rid := runID
r := d.runs[tid][rid]
delete(d.runs[tid], rid)
t := d.tasks[tid]
schedFor, err := time.Parse(time.RFC3339, r.ScheduledFor)
if err != nil {
return nil, err
}
var latest time.Time
if t.LatestCompleted != "" {
latest, err = time.Parse(time.RFC3339, t.LatestCompleted)
if err != nil {
return nil, err
}
}
if schedFor.After(latest) {
t.LatestCompleted = r.ScheduledFor
}
d.finishedRuns[rid] = r
delete(d.created, tid.String()+rid.String())
return r, nil
}
func (t *TaskControlService) CurrentlyRunning(ctx context.Context, taskID influxdb.ID) ([]*influxdb.Run, error) {
t.mu.Lock()
defer t.mu.Unlock()
rtn := []*influxdb.Run{}
for _, run := range t.runs[taskID] {
rtn = append(rtn, run)
}
return rtn, nil
}
func (t *TaskControlService) ManualRuns(ctx context.Context, taskID influxdb.ID) ([]*influxdb.Run, error) {
t.mu.Lock()
defer t.mu.Unlock()
if t.manualRuns != nil {
return t.manualRuns, nil
}
return []*influxdb.Run{}, nil
}
// NextDueRun returns the Unix timestamp of when the next call to CreateNextRun will be ready.
// The returned timestamp reflects the task's offset, so it does not necessarily exactly match the schedule time.
func (d *TaskControlService) NextDueRun(ctx context.Context, taskID influxdb.ID) (int64, error) {
d.mu.Lock()
defer d.mu.Unlock()
return d.nextDueRun(ctx, taskID)
}
func (d *TaskControlService) nextDueRun(ctx context.Context, taskID influxdb.ID) (int64, error) {
task := d.tasks[taskID]
sch, err := cron.Parse(task.EffectiveCron())
if err != nil {
return 0, err
}
latest := int64(0)
lt, err := time.Parse(time.RFC3339, task.LatestCompleted)
if err == nil {
latest = lt.Unix()
}
for _, r := range d.runs[task.ID] {
rt, err := time.Parse(time.RFC3339, r.ScheduledFor)
if err == nil {
if rt.Unix() > latest {
latest = rt.Unix()
}
}
}
nextScheduled := sch.Next(time.Unix(latest, 0))
nextScheduledUnix := nextScheduled.Unix()
offset := int64(0)
if task.Offset != "" {
toff, err := time.ParseDuration(task.Offset)
if err == nil {
offset = toff.Nanoseconds()
}
}
return nextScheduledUnix + int64(offset), nil
}
// UpdateRunState sets the run state at the respective time.
func (d *TaskControlService) UpdateRunState(ctx context.Context, taskID, runID influxdb.ID, when time.Time, state backend.RunStatus) error {
d.mu.Lock()
defer d.mu.Unlock()
run := d.runs[taskID][runID]
switch state {
case backend.RunStarted:
run.StartedAt = when.Format(time.RFC3339Nano)
case backend.RunSuccess, backend.RunFail, backend.RunCanceled:
run.FinishedAt = when.Format(time.RFC3339Nano)
case backend.RunScheduled:
// nothing
default:
panic("invalid status")
}
run.Status = state.String()
return nil
}
// AddRunLog adds a log line to the run.
func (d *TaskControlService) AddRunLog(ctx context.Context, taskID, runID influxdb.ID, when time.Time, log string) error {
d.mu.Lock()
defer d.mu.Unlock()
run := d.runs[taskID][runID]
if run == nil {
panic("cannot add a log to a non existant run")
}
run.Log = append(run.Log, influxdb.Log{Time: when.Format(time.RFC3339Nano), Message: log})
return nil
}
func (d *TaskControlService) CreatedFor(taskID influxdb.ID) []backend.QueuedRun {
d.mu.Lock()
defer d.mu.Unlock()
var qrs []backend.QueuedRun
for _, qr := range d.created {
if qr.TaskID == taskID {
qrs = append(qrs, qr)
}
}
return qrs
}
// TotalRunsCreatedForTask returns the number of runs created for taskID.
func (d *TaskControlService) TotalRunsCreatedForTask(taskID influxdb.ID) int {
d.mu.Lock()
defer d.mu.Unlock()
return d.totalRunsCreated[taskID]
}
// PollForNumberCreated blocks for a small amount of time waiting for exactly the given count of created and unfinished runs for the given task ID.
// If the expected number isn't found in time, it returns an error.
//
// Because the scheduler and executor do a lot of state changes asynchronously, this is useful in test.
func (d *TaskControlService) PollForNumberCreated(taskID influxdb.ID, count int) ([]backend.QueuedRun, error) {
const numAttempts = 50
actualCount := 0
var created []backend.QueuedRun
for i := 0; i < numAttempts; i++ {
time.Sleep(2 * time.Millisecond) // we sleep even on first so it becomes more likely that we catch when too many are produced.
created = d.CreatedFor(taskID)
actualCount = len(created)
if actualCount == count {
return created, nil
}
}
return created, fmt.Errorf("did not see count of %d created run(s) for task with ID %s in time, instead saw %d", count, taskID, actualCount) // we return created anyways, to make it easier to debug
}
func (d *TaskControlService) FinishedRun(runID influxdb.ID) *influxdb.Run {
d.mu.Lock()
defer d.mu.Unlock()
return d.finishedRuns[runID]
}
func (d *TaskControlService) FinishedRuns() []*influxdb.Run {
rtn := []*influxdb.Run{}
for _, run := range d.finishedRuns {
rtn = append(rtn, run)
}
sort.Slice(rtn, func(i, j int) bool { return rtn[i].ScheduledFor < rtn[j].ScheduledFor })
return rtn
}

View File

@ -8,6 +8,10 @@ import (
"sync"
"time"
"github.com/influxdata/flux/parser"
"github.com/influxdata/flux/ast"
"github.com/influxdata/flux"
"github.com/influxdata/flux/semantic"
"github.com/influxdata/flux/values"
@ -40,31 +44,111 @@ type Options struct {
// Every represents a fixed period to repeat execution.
// this can be unmarshaled from json as a string i.e.: "1d" will unmarshal as 1 day
Every time.Duration `json:"every,omitempty"`
Every Duration `json:"every,omitempty"`
// Offset represents a delay before execution.
// this can be unmarshaled from json as a string i.e.: "1d" will unmarshal as 1 day
Offset *time.Duration `json:"offset,omitempty"`
Offset *Duration `json:"offset,omitempty"`
Concurrency *int64 `json:"concurrency,omitempty"`
Retry *int64 `json:"retry,omitempty"`
}
// Duration is a time span that supports the same units as the flux parser's time duration, as well as negative length time spans.
type Duration struct {
Node ast.DurationLiteral
}
func (a Duration) String() string {
return ast.Format(&a.Node)
}
// Parse parses a string into a Duration.
func (a *Duration) Parse(s string) error {
q, err := parseSignedDuration(s)
if err != nil {
return err
}
a.Node = *q
return nil
}
// MustParseDuration parses a string and returns a duration. It panics if there is an error.
func MustParseDuration(s string) (dur *Duration) {
dur = &Duration{}
if err := dur.Parse(s); err != nil {
panic(err)
}
return dur
}
// parseSignedDuration is a helper wrapper around parser.ParseSignedDuration.
// We use it because we need to clear the basenode, but flux does not.
func parseSignedDuration(text string) (*ast.DurationLiteral, error) {
q, err := parser.ParseSignedDuration(text)
if err != nil {
return nil, err
}
q.BaseNode = ast.BaseNode{}
return q, err
}
// UnmarshalText unmarshals text into a Duration.
func (a *Duration) UnmarshalText(text []byte) error {
q, err := parseSignedDuration(string(text))
if err != nil {
return err
}
a.Node = *q
return nil
}
// UnmarshalText marshals text into a Duration.
func (a Duration) MarshalText() ([]byte, error) {
return []byte(a.String()), nil
}
// IsZero checks if each segment of the duration is zero, it doesn't check if the Duration sums to zero, just if each internal duration is zero.
func (a *Duration) IsZero() bool {
for i := range a.Node.Values {
if a.Node.Values[i].Magnitude != 0 {
return false
}
}
return true
}
// DurationFrom gives us a time.Duration from a time.
// Currently because of how flux works, this is just an approfimation for any time unit larger than hours.
func (a *Duration) DurationFrom(t time.Time) (time.Duration, error) {
return ast.DurationFrom(&a.Node, t)
}
// Add adds the duration to a time.
func (a *Duration) Add(t time.Time) (time.Time, error) {
d, err := ast.DurationFrom(&a.Node, t)
if err != nil {
return time.Time{}, err
}
return t.Add(d), nil
}
// Clear clears out all options in the options struct, it us useful if you wish to reuse it.
func (o *Options) Clear() {
o.Name = ""
o.Cron = ""
o.Every = 0
o.Every = Duration{}
o.Offset = nil
o.Concurrency = nil
o.Retry = nil
}
// IsZero tells us if the options has been zeroed out.
func (o *Options) IsZero() bool {
return o.Name == "" &&
o.Cron == "" &&
o.Every == 0 &&
o.Every.IsZero() &&
o.Offset == nil &&
o.Concurrency == nil &&
o.Retry == nil
@ -80,6 +164,50 @@ const (
optRetry = "retry"
)
// contains is a helper function to see if an array of strings contains a string
func contains(s []string, e string) bool {
for i := range s {
if s[i] == e {
return true
}
}
return false
}
func grabTaskOptionAST(p *ast.Package, keys ...string) map[string]ast.Expression {
res := make(map[string]ast.Expression, 2) // we preallocate two keys for the map, as that is how many we will use at maximum (offset and every)
for i := range p.Files {
for j := range p.Files[i].Body {
if p.Files[i].Body[j].Type() != "OptionStatement" {
continue
}
opt := (p.Files[i].Body[j]).(*ast.OptionStatement)
if opt.Assignment.Type() != "VariableAssignment" {
continue
}
asmt, ok := opt.Assignment.(*ast.VariableAssignment)
if !ok {
continue
}
if asmt.ID.Key() != "task" {
continue
}
ae, ok := asmt.Init.(*ast.ObjectExpression)
if !ok {
continue
}
for k := range ae.Properties {
prop := ae.Properties[k]
if key := prop.Key.Key(); prop != nil && contains(keys, key) {
res[key] = prop.Value
}
}
return res
}
}
return res
}
// FromScript extracts Options from a Flux script.
func FromScript(script string) (Options, error) {
if optionCache != nil {
@ -93,7 +221,12 @@ func FromScript(script string) (Options, error) {
}
opt := Options{Retry: pointer.Int64(1), Concurrency: pointer.Int64(1)}
_, scope, err := flux.Eval(script)
fluxAST, err := flux.Parse(script)
if err != nil {
return opt, err
}
durTypes := grabTaskOptionAST(fluxAST, optEvery, optOffset)
_, scope, err := flux.EvalAST(fluxAST)
if err != nil {
return opt, err
}
@ -103,6 +236,10 @@ func FromScript(script string) (Options, error) {
if !ok {
return opt, errors.New("missing required option: 'task'")
}
// check to make sure task is an object
if err := checkNature(task.PolyType().Nature(), semantic.Object); err != nil {
return opt, err
}
optObject := task.Object()
if err := validateOptionNames(optObject); err != nil {
return opt, err
@ -138,14 +275,39 @@ func FromScript(script string) (Options, error) {
if err := checkNature(everyVal.PolyType().Nature(), semantic.Duration); err != nil {
return opt, err
}
opt.Every = everyVal.Duration().Duration()
dur, ok := durTypes["every"]
if !ok || dur == nil {
return opt, errors.New("failed to parse `every` in task")
}
durNode, err := parseSignedDuration(dur.Location().Source)
if err != nil {
return opt, err
}
if !ok || durNode == nil {
return opt, errors.New("failed to parse `every` in task")
}
durNode.BaseNode = ast.BaseNode{}
opt.Every.Node = *durNode
}
if offsetVal, ok := optObject.Get(optOffset); ok {
if err := checkNature(offsetVal.PolyType().Nature(), semantic.Duration); err != nil {
return opt, err
}
opt.Offset = pointer.Duration(offsetVal.Duration().Duration())
dur, ok := durTypes["offset"]
if !ok || dur == nil {
return opt, errors.New("failed to parse `offset` in task")
}
durNode, err := parseSignedDuration(dur.Location().Source)
if err != nil {
return opt, err
}
if !ok || durNode == nil {
return opt, errors.New("failed to parse `offset` in task")
}
durNode.BaseNode = ast.BaseNode{}
opt.Offset = &Duration{}
opt.Offset.Node = *durNode
}
if concurrencyVal, ok := optObject.Get(optConcurrency); ok {
@ -177,13 +339,14 @@ func FromScript(script string) (Options, error) {
// Validate returns an error if the options aren't valid.
func (o *Options) Validate() error {
now := time.Now()
var errs []string
if o.Name == "" {
errs = append(errs, "name required")
}
cronPresent := o.Cron != ""
everyPresent := o.Every != 0
everyPresent := !o.Every.IsZero()
if cronPresent == everyPresent {
// They're both present or both missing.
errs = append(errs, "must specify exactly one of either cron or every")
@ -193,17 +356,26 @@ func (o *Options) Validate() error {
errs = append(errs, "cron invalid: "+err.Error())
}
} else if everyPresent {
if o.Every < time.Second {
every, err := o.Every.DurationFrom(now)
if err != nil {
return err
}
if every < time.Second {
errs = append(errs, "every option must be at least 1 second")
} else if o.Every.Truncate(time.Second) != o.Every {
} else if every.Truncate(time.Second) != every {
errs = append(errs, "every option must be expressible as whole seconds")
}
}
if o.Offset != nil && o.Offset.Truncate(time.Second) != *o.Offset {
if o.Offset != nil {
offset, err := o.Offset.DurationFrom(now)
if err != nil {
return err
}
if offset.Truncate(time.Second) != offset {
// For now, allowing negative offset delays. Maybe they're useful for forecasting?
errs = append(errs, "offset option must be expressible as whole seconds")
}
}
if o.Concurrency != nil {
if *o.Concurrency < 1 {
errs = append(errs, "concurrency must be at least 1")
@ -231,11 +403,15 @@ func (o *Options) Validate() error {
// If the every option was specified, it is converted into a cron string using "@every".
// Otherwise, the empty string is returned.
// The value of the offset option is not considered.
// TODO(docmerlin): create an EffectiveCronStringFrom(t time.Time) string,
// that works from a unit of time.
// Do not use this if you haven't checked for validity already.
func (o *Options) EffectiveCronString() string {
if o.Cron != "" {
return o.Cron
}
if o.Every > 0 {
every, _ := o.Every.DurationFrom(time.Now()) // we can ignore errors here because we have alreach checked for validity.
if every > 0 {
return "@every " + o.Every.String()
}
return ""

View File

@ -21,10 +21,10 @@ func scriptGenerator(opt options.Options, body string) string {
if opt.Cron != "" {
taskData = fmt.Sprintf("%s cron: %q,\n", taskData, opt.Cron)
}
if opt.Every != 0 {
if !opt.Every.IsZero() {
taskData = fmt.Sprintf("%s every: %s,\n", taskData, opt.Every.String())
}
if opt.Offset != nil && *opt.Offset != 0 {
if opt.Offset != nil && !(*opt.Offset).IsZero() {
taskData = fmt.Sprintf("%s offset: %s,\n", taskData, opt.Offset.String())
}
if opt.Concurrency != nil && *opt.Concurrency != 0 {
@ -45,20 +45,36 @@ func scriptGenerator(opt options.Options, body string) string {
%s`, taskData, body)
}
func TestNegDurations(t *testing.T) {
dur := options.MustParseDuration("-1m")
d, err := dur.DurationFrom(time.Now())
if err != nil {
t.Fatal(err)
}
if d != -time.Minute {
t.Fatalf("expected duration to be -1m but was %s", d)
}
}
func TestFromScript(t *testing.T) {
for _, c := range []struct {
script string
exp options.Options
shouldErr bool
}{
{script: scriptGenerator(options.Options{Name: "name0", Cron: "* * * * *", Concurrency: pointer.Int64(2), Retry: pointer.Int64(3), Offset: pointer.Duration(-time.Minute)}, ""), exp: options.Options{Name: "name0", Cron: "* * * * *", Concurrency: pointer.Int64(2), Retry: pointer.Int64(3), Offset: pointer.Duration(-time.Minute)}},
{script: scriptGenerator(options.Options{Name: "name1", Every: 5 * time.Second}, ""), exp: options.Options{Name: "name1", Every: 5 * time.Second, Concurrency: pointer.Int64(1), Retry: pointer.Int64(1)}},
{script: scriptGenerator(options.Options{Name: "name0", Cron: "* * * * *", Concurrency: pointer.Int64(2), Retry: pointer.Int64(3), Offset: options.MustParseDuration("-1m")}, ""),
exp: options.Options{Name: "name0",
Cron: "* * * * *",
Concurrency: pointer.Int64(2),
Retry: pointer.Int64(3),
Offset: options.MustParseDuration("-1m")}},
{script: scriptGenerator(options.Options{Name: "name1", Every: *(options.MustParseDuration("5s"))}, ""), exp: options.Options{Name: "name1", Every: *(options.MustParseDuration("5s")), Concurrency: pointer.Int64(1), Retry: pointer.Int64(1)}},
{script: scriptGenerator(options.Options{Name: "name2", Cron: "* * * * *"}, ""), exp: options.Options{Name: "name2", Cron: "* * * * *", Concurrency: pointer.Int64(1), Retry: pointer.Int64(1)}},
{script: scriptGenerator(options.Options{Name: "name3", Every: time.Hour, Cron: "* * * * *"}, ""), shouldErr: true},
{script: scriptGenerator(options.Options{Name: "name4", Concurrency: pointer.Int64(1000), Every: time.Hour}, ""), shouldErr: true},
{script: scriptGenerator(options.Options{Name: "name3", Every: *(options.MustParseDuration("1h")), Cron: "* * * * *"}, ""), shouldErr: true},
{script: scriptGenerator(options.Options{Name: "name4", Concurrency: pointer.Int64(1000), Every: *(options.MustParseDuration("1h"))}, ""), shouldErr: true},
{script: "option task = {\n name: \"name5\",\n concurrency: 0,\n every: 1m0s,\n\n}\n\nfrom(bucket: \"test\")\n |> range(start:-1h)", shouldErr: true},
{script: "option task = {\n name: \"name6\",\n concurrency: 1,\n every: 1,\n\n}\n\nfrom(bucket: \"test\")\n |> range(start:-1h)", shouldErr: true},
{script: scriptGenerator(options.Options{Name: "name7", Retry: pointer.Int64(20), Every: time.Hour}, ""), shouldErr: true},
{script: scriptGenerator(options.Options{Name: "name7", Retry: pointer.Int64(20), Every: *(options.MustParseDuration("1h"))}, ""), shouldErr: true},
{script: "option task = {\n name: \"name8\",\n retry: 0,\n every: 1m0s,\n\n}\n\nfrom(bucket: \"test\")\n |> range(start:-1h)", shouldErr: true},
{script: scriptGenerator(options.Options{Name: "name9"}, ""), shouldErr: true},
{script: scriptGenerator(options.Options{}, ""), shouldErr: true},
@ -125,7 +141,7 @@ func TestValidate(t *testing.T) {
}
*bad = good
bad.Every = time.Minute
bad.Every = *options.MustParseDuration("1m")
if err := bad.Validate(); err == nil {
t.Error("expected error for options with both cron and every")
}
@ -138,13 +154,13 @@ func TestValidate(t *testing.T) {
*bad = good
bad.Cron = ""
bad.Every = -1 * time.Minute
bad.Every = *options.MustParseDuration("-1m")
if err := bad.Validate(); err == nil {
t.Error("expected error for negative every")
}
*bad = good
bad.Offset = pointer.Duration(1500 * time.Millisecond)
bad.Offset = options.MustParseDuration("1500ms")
if err := bad.Validate(); err == nil {
t.Error("expected error for sub-second delay resolution")
}
@ -177,11 +193,11 @@ func TestValidate(t *testing.T) {
func TestEffectiveCronString(t *testing.T) {
for _, c := range []struct {
c string
e time.Duration
e options.Duration
exp string
}{
{c: "10 * * * *", exp: "10 * * * *"},
{e: 10 * time.Second, exp: "@every 10s"},
{e: *(options.MustParseDuration("10s")), exp: "@every 10s"},
{exp: ""},
} {
o := options.Options{Cron: c.c, Every: c.e}
@ -191,3 +207,68 @@ func TestEffectiveCronString(t *testing.T) {
}
}
}
func TestDurationMarshaling(t *testing.T) {
t.Run("unmarshaling", func(t *testing.T) {
now := time.Now()
dur1 := options.Duration{}
if err := dur1.UnmarshalText([]byte("1h10m3s")); err != nil {
t.Fatal(err)
}
d1, err1 := dur1.DurationFrom(now)
if err1 != nil {
t.Fatal(err1)
}
dur2 := options.Duration{}
if err := dur2.Parse("1h10m3s"); err != nil {
t.Fatal(err)
}
d2, err2 := dur2.DurationFrom(now)
if err2 != nil {
t.Fatal(err2)
}
if d1 != d2 || d1 != time.Hour+10*time.Minute+3*time.Second {
t.Fatal("Parse and Marshaling do not give us the same result")
}
})
t.Run("marshaling", func(t *testing.T) {
dur := options.Duration{}
if err := dur.UnmarshalText([]byte("1h10m3s")); err != nil {
t.Fatal(err)
}
if dur.String() != "1h10m3s" {
t.Fatalf("duration string should be \"1h10m3s\" but was %s", dur.String())
}
text, err := dur.MarshalText()
if err != nil {
t.Fatal(err)
}
if string(text) != "1h10m3s" {
t.Fatalf("duration text should be \"1h10m3s\" but was %s", text)
}
})
t.Run("parse zero", func(t *testing.T) {
dur := options.Duration{}
if err := dur.UnmarshalText([]byte("0h0s")); err != nil {
t.Fatal(err)
}
if !dur.IsZero() {
t.Fatalf("expected duration \"0s\" to be zero but was %s", dur.String())
}
})
}
func TestDurationMath(t *testing.T) {
dur := options.MustParseDuration("10s")
d, err := dur.DurationFrom(time.Now())
if err != nil {
t.Fatal(err)
}
if d != 10*time.Second {
t.Fatalf("expected duration to be 10s but it was %s", d)
}
}

View File

@ -176,10 +176,10 @@ func (p pAdapter) CreateTask(ctx context.Context, t platform.TaskCreate) (*platf
AuthorizationID: req.AuthorizationID,
}
if opts.Every != 0 {
if !opts.Every.IsZero() {
task.Every = opts.Every.String()
}
if opts.Offset != nil && *opts.Offset != 0 {
if opts.Offset != nil && !(*opts.Offset).IsZero() {
task.Offset = opts.Offset.String()
}
@ -429,10 +429,10 @@ func (p *pAdapter) toPlatformTask(ctx context.Context, t backend.StoreTask, m *b
Flux: t.Script,
Cron: opts.Cron,
}
if opts.Every != 0 {
if !opts.Every.IsZero() {
pt.Every = opts.Every.String()
}
if opts.Offset != nil && *opts.Offset != 0 {
if opts.Offset != nil && !(*opts.Offset).IsZero() {
pt.Offset = opts.Offset.String()
}
if m != nil {

View File

@ -27,7 +27,7 @@ func inMemFactory(t *testing.T) (*servicetest.System, context.CancelFunc) {
i := inmem.NewService()
return &servicetest.System{
TaskControlService: servicetest.TaskControlAdaptor(st, lrw, lrw),
TaskControlService: backend.TaskControlAdaptor(st, lrw, lrw),
Ctx: ctx,
I: i,
TaskService: servicetest.UsePlatformAdaptor(st, lrw, mock.NewScheduler(), i),
@ -63,7 +63,7 @@ func boltFactory(t *testing.T) (*servicetest.System, context.CancelFunc) {
i := inmem.NewService()
return &servicetest.System{
TaskControlService: servicetest.TaskControlAdaptor(st, lrw, lrw),
TaskControlService: backend.TaskControlAdaptor(st, lrw, lrw),
TaskService: servicetest.UsePlatformAdaptor(st, lrw, mock.NewScheduler(), i),
Ctx: ctx,
I: i,

View File

@ -18,7 +18,6 @@ import (
"github.com/influxdata/influxdb"
icontext "github.com/influxdata/influxdb/context"
"github.com/influxdata/influxdb/inmem"
"github.com/influxdata/influxdb/pkg/pointer"
"github.com/influxdata/influxdb/task"
"github.com/influxdata/influxdb/task/backend"
"github.com/influxdata/influxdb/task/options"
@ -42,99 +41,6 @@ func UsePlatformAdaptor(s backend.Store, lr backend.LogReader, rc task.RunContro
return task.PlatformAdapter(s, lr, rc, i, i, i)
}
// TaskControlAdaptor creates a TaskControlService for the older TaskStore system.
func TaskControlAdaptor(s backend.Store, lw backend.LogWriter, lr backend.LogReader) backend.TaskControlService {
return &taskControlAdaptor{s, lw, lr}
}
// taskControlAdaptor adapts a backend.Store and log readers and writers to implement the task control service.
type taskControlAdaptor struct {
s backend.Store
lw backend.LogWriter
lr backend.LogReader
}
func (tcs *taskControlAdaptor) CreateNextRun(ctx context.Context, taskID influxdb.ID, now int64) (backend.RunCreation, error) {
return tcs.s.CreateNextRun(ctx, taskID, now)
}
func (tcs *taskControlAdaptor) FinishRun(ctx context.Context, taskID, runID influxdb.ID) (*influxdb.Run, error) {
// the tests aren't looking for a returned Run because the old system didn't return one
// Once we completely switch over to the new system we can look at the returned run in the tests.
return nil, tcs.s.FinishRun(ctx, taskID, runID)
}
func (tcs *taskControlAdaptor) NextDueRun(ctx context.Context, taskID influxdb.ID) (int64, error) {
_, m, err := tcs.s.FindTaskByIDWithMeta(ctx, taskID)
if err != nil {
return 0, err
}
return m.NextDueRun()
}
func (tcs *taskControlAdaptor) UpdateRunState(ctx context.Context, taskID, runID influxdb.ID, when time.Time, state backend.RunStatus) error {
st, m, err := tcs.s.FindTaskByIDWithMeta(ctx, taskID)
if err != nil {
return err
}
var (
schedFor, reqAt time.Time
)
// check the log store
r, err := tcs.lr.FindRunByID(ctx, st.Org, runID)
if err == nil {
schedFor, _ = time.Parse(time.RFC3339, r.ScheduledFor)
reqAt, _ = time.Parse(time.RFC3339, r.RequestedAt)
}
// in the old system the log store may not have the run until after the first
// state update, so we will need to pull the currently running.
if schedFor.IsZero() {
for _, cr := range m.CurrentlyRunning {
if influxdb.ID(cr.RunID) == runID {
schedFor = time.Unix(cr.Now, 0)
reqAt = time.Unix(cr.RequestedAt, 0)
}
}
}
rlb := backend.RunLogBase{
Task: st,
RunID: runID,
RunScheduledFor: schedFor.Unix(),
RequestedAt: reqAt.Unix(),
}
if err := tcs.lw.UpdateRunState(ctx, rlb, when, state); err != nil {
return err
}
return nil
}
func (tcs *taskControlAdaptor) AddRunLog(ctx context.Context, taskID, runID influxdb.ID, when time.Time, log string) error {
st, err := tcs.s.FindTaskByID(ctx, taskID)
if err != nil {
return err
}
r, err := tcs.lr.FindRunByID(ctx, st.Org, runID)
if err != nil {
return err
}
schFor, _ := time.Parse(time.RFC3339, r.ScheduledFor)
reqAt, _ := time.Parse(time.RFC3339, r.RequestedAt)
rlb := backend.RunLogBase{
Task: st,
RunID: runID,
RunScheduledFor: schFor.Unix(),
RequestedAt: reqAt.Unix(),
}
return tcs.lw.AddRunLog(ctx, rlb, when, log)
}
// TestTaskService should be called by consumers of the servicetest package.
// This will call fn once to create a single influxdb.TaskService
// used across all subtests in TestTaskService.
@ -176,6 +82,10 @@ func TestTaskService(t *testing.T, fn BackendComponentFactory) {
t.Parallel()
testMetaUpdate(t, sys)
})
t.Run("Task Manual Run", func(t *testing.T) {
t.Parallel()
testManualRun(t, sys)
})
})
}
@ -365,8 +275,8 @@ func testTaskCRUD(t *testing.T, sys *System) {
// Update task: switch to every.
newStatus = string(backend.TaskActive)
newFlux = "import \"http\"\n\noption task = {\n\tname: \"task-changed #98\",\n\tevery: 30000000000ns,\n\toffset: 5s,\n\tconcurrency: 100,\n}\n\nfrom(bucket: \"b\")\n\t|> http.to(url: \"http://example.com\")"
f, err = sys.TaskService.UpdateTask(authorizedCtx, origID, influxdb.TaskUpdate{Options: options.Options{Every: 30 * time.Second}})
newFlux = "import \"http\"\n\noption task = {\n\tname: \"task-changed #98\",\n\tevery: 30s,\n\toffset: 5s,\n\tconcurrency: 100,\n}\n\nfrom(bucket: \"b\")\n\t|> http.to(url: \"http://example.com\")"
f, err = sys.TaskService.UpdateTask(authorizedCtx, origID, influxdb.TaskUpdate{Options: options.Options{Every: *(options.MustParseDuration("30s"))}})
if err != nil {
t.Fatal(err)
}
@ -450,7 +360,7 @@ from(bucket: "b")
expectedFlux := `import "http"
option task = {name: "task-Options-Update", every: 10000000000ns, concurrency: 100}
option task = {name: "task-Options-Update", every: 10s, concurrency: 100}
from(bucket: "b")
|> http.to(url: "http://example.com")`
@ -467,7 +377,7 @@ from(bucket: "b")
if err != nil {
t.Fatal(err)
}
f, err := sys.TaskService.UpdateTask(authorizedCtx, task.ID, influxdb.TaskUpdate{Options: options.Options{Offset: pointer.Duration(0), Every: 10 * time.Second}})
f, err := sys.TaskService.UpdateTask(authorizedCtx, task.ID, influxdb.TaskUpdate{Options: options.Options{Offset: &options.Duration{}, Every: *(options.MustParseDuration("10s"))}})
if err != nil {
t.Fatal(err)
}
@ -1026,7 +936,7 @@ func testTaskConcurrency(t *testing.T, sys *System) {
// Create a run for the last task we found.
// The script should run every minute, so use max now.
tid := tasks[len(tasks)-1].ID
if _, err := sys.TaskControlService.CreateNextRun(sys.Ctx, tid, math.MaxInt64); err != nil {
if _, err := sys.TaskControlService.CreateNextRun(sys.Ctx, tid, math.MaxInt64>>6); err != nil { // we use the >>6 here because math.MaxInt64 is too large which causes problems when converting back and forth from time
// This may have errored due to the task being deleted. Check if the task still exists.
if _, err2 := sys.TaskService.FindTaskByID(sys.Ctx, tid); err2 == backend.ErrTaskNotFound {
@ -1059,6 +969,49 @@ func testTaskConcurrency(t *testing.T, sys *System) {
extraWg.Wait()
}
func testManualRun(t *testing.T, s *System) {
cr := creds(t, s)
// Create a task.
tc := influxdb.TaskCreate{
OrganizationID: cr.OrgID,
Flux: fmt.Sprintf(scriptFmt, 0),
Token: cr.Token,
}
authorizedCtx := icontext.SetAuthorizer(s.Ctx, cr.Authorizer())
tsk, err := s.TaskService.CreateTask(authorizedCtx, tc)
if err != nil {
t.Fatal(err)
}
if !tsk.ID.Valid() {
t.Fatal("no task ID set")
}
scheduledFor := time.Now().UTC()
run, err := s.TaskService.ForceRun(authorizedCtx, tsk.ID, scheduledFor.Unix())
if err != nil {
t.Fatal(err)
}
if run.ScheduledFor != scheduledFor.Format(time.RFC3339) {
t.Fatalf("force run returned a different scheduled for time expected: %s, got %s", scheduledFor.Format(time.RFC3339), run.ScheduledFor)
}
runs, err := s.TaskControlService.ManualRuns(authorizedCtx, tsk.ID)
if err != nil {
t.Fatal(err)
}
if len(runs) != 1 {
t.Fatalf("expected 1 manual run: got %d", len(runs))
}
if runs[0].ID != run.ID {
diff := cmp.Diff(runs[0], run)
t.Fatalf("manual run missmatch: %s", diff)
}
}
func creds(t *testing.T, s *System) TestCreds {
t.Helper()

View File

@ -3,11 +3,9 @@ package influxdb_test
import (
"encoding/json"
"testing"
"time"
"github.com/google/go-cmp/cmp"
platform "github.com/influxdata/influxdb"
"github.com/influxdata/influxdb/pkg/pointer"
_ "github.com/influxdata/influxdb/query/builtin"
"github.com/influxdata/influxdb/task/options"
)
@ -18,10 +16,10 @@ func TestOptionsMarshal(t *testing.T) {
if err := json.Unmarshal([]byte(`{"every":"10s", "offset":"1h"}`), tu); err != nil {
t.Fatal(err)
}
if tu.Options.Every != 10*time.Second {
if tu.Options.Every.String() != "10s" {
t.Fatalf("option.every not properly unmarshaled, expected 10s got %s", tu.Options.Every)
}
if *tu.Options.Offset != time.Hour {
if tu.Options.Offset.String() != "1h" {
t.Fatalf("option.every not properly unmarshaled, expected 1h got %s", tu.Options.Offset)
}
@ -38,22 +36,22 @@ func TestOptionsMarshal(t *testing.T) {
func TestOptionsEdit(t *testing.T) {
tu := &platform.TaskUpdate{}
tu.Options.Every = 10 * time.Second
tu.Options.Every = *(options.MustParseDuration("10s"))
if err := tu.UpdateFlux(`option task = {every: 20s, name: "foo"} from(bucket:"x") |> range(start:-1h)`); err != nil {
t.Fatal(err)
}
t.Run("zeroing", func(t *testing.T) {
if tu.Options.Every != 0 {
t.Errorf("expected Every to be zeroed but it wasn't")
if !tu.Options.Every.IsZero() {
t.Errorf("expected Every to be zeroed but it was not")
}
})
t.Run("fmt string", func(t *testing.T) {
t.Skip("This won't work until the flux formatter formats durations in a nicer way")
expected := `option task = {every: 10s, name: "foo"}
from(bucket:"x")
|> range(start:-1h)`
from(bucket: "x")
|> range(start: -1h)`
if *tu.Flux != expected {
t.Errorf("got the wrong task back, expected %s,\n got %s\n", expected, *tu.Flux)
t.Errorf("got the wrong task back, expected %s,\n got %s\n diff: %s", expected, *tu.Flux, cmp.Diff(expected, *tu.Flux))
}
})
t.Run("replacement", func(t *testing.T) {
@ -61,15 +59,14 @@ func TestOptionsEdit(t *testing.T) {
if err != nil {
t.Error(err)
}
if op.Every != 10*time.Second {
if op.Every.String() != "10s" {
t.Logf("expected every to be 10s but was %s", op.Every)
t.Fail()
}
})
t.Run("add new option", func(t *testing.T) {
tu := &platform.TaskUpdate{}
ofst := 30 * time.Second
tu.Options.Offset = &ofst
tu.Options.Offset = options.MustParseDuration("30s")
if err := tu.UpdateFlux(`option task = {every: 20s, name: "foo"} from(bucket:"x") |> range(start:-1h)`); err != nil {
t.Fatal(err)
}
@ -77,7 +74,7 @@ func TestOptionsEdit(t *testing.T) {
if err != nil {
t.Error(err)
}
if op.Offset == nil || *op.Offset != 30*time.Second {
if op.Offset == nil || op.Offset.String() != "30s" {
t.Fatalf("expected every to be 30s but was %s", op.Every)
}
})
@ -91,7 +88,7 @@ func TestOptionsEdit(t *testing.T) {
if err != nil {
t.Error(err)
}
if op.Every != 0 {
if !op.Every.IsZero() {
t.Fatalf("expected every to be 0 but was %s", op.Every)
}
if op.Cron != "* * * * *" {
@ -100,7 +97,7 @@ func TestOptionsEdit(t *testing.T) {
})
t.Run("switching from cron to every", func(t *testing.T) {
tu := &platform.TaskUpdate{}
tu.Options.Every = 10 * time.Second
tu.Options.Every = *(options.MustParseDuration("10s"))
if err := tu.UpdateFlux(`option task = {cron: "* * * * *", name: "foo"} from(bucket:"x") |> range(start:-1h)`); err != nil {
t.Fatal(err)
}
@ -108,7 +105,7 @@ func TestOptionsEdit(t *testing.T) {
if err != nil {
t.Error(err)
}
if op.Every != 10*time.Second {
if op.Every.String() != "10s" {
t.Fatalf("expected every to be 10s but was %s", op.Every)
}
if op.Cron != "" {
@ -117,7 +114,7 @@ func TestOptionsEdit(t *testing.T) {
})
t.Run("delete deletable option", func(t *testing.T) {
tu := &platform.TaskUpdate{}
tu.Options.Offset = pointer.Duration(0)
tu.Options.Offset = &options.Duration{}
expscript := `option task = {cron: "* * * * *", name: "foo"}
from(bucket: "x")
@ -129,7 +126,7 @@ from(bucket: "x")
if err != nil {
t.Error(err)
}
if op.Every != 0 {
if !op.Every.IsZero() {
t.Fatalf("expected every to be 0s but was %s", op.Every)
}
if op.Cron != "* * * * *" {

View File

@ -62,8 +62,8 @@ func AuthorizationService(
fn: FindAuthorizationByToken,
},
{
name: "UpdateAuthorizationStatus",
fn: UpdateAuthorizationStatus,
name: "UpdateAuthorization",
fn: UpdateAuthorization,
},
{
name: "FindAuthorizations",
@ -447,14 +447,18 @@ func FindAuthorizationByID(
}
}
// UpdateAuthorizationStatus testing
func UpdateAuthorizationStatus(
func stringPtr(s string) *string {
return &s
}
// UpdateAuthorization testing
func UpdateAuthorization(
init func(AuthorizationFields, *testing.T) (platform.AuthorizationService, string, func()),
t *testing.T,
) {
type args struct {
id platform.ID
status platform.Status
upd *platform.AuthorizationUpdate
}
type wants struct {
err error
@ -523,7 +527,10 @@ func UpdateAuthorizationStatus(
},
args: args{
id: MustIDBase16(authTwoID),
status: platform.Inactive,
upd: &platform.AuthorizationUpdate{
Status: platform.Inactive.Ptr(),
Description: stringPtr("desc1"),
},
},
wants: wants{
authorization: &platform.Authorization{
@ -533,6 +540,7 @@ func UpdateAuthorizationStatus(
Token: "rand2",
Permissions: createUsersPermission(MustIDBase16(orgOneID)),
Status: platform.Inactive,
Description: "desc1",
},
},
},
@ -586,12 +594,14 @@ func UpdateAuthorizationStatus(
},
args: args{
id: MustIDBase16(authThreeID),
status: platform.Inactive,
upd: &platform.AuthorizationUpdate{
Status: platform.Inactive.Ptr(),
},
},
wants: wants{
err: &platform.Error{
Code: platform.ENotFound,
Op: platform.OpSetAuthorizationStatus,
Op: platform.OpUpdateAuthorization,
Msg: "authorization not found",
},
},
@ -653,12 +663,14 @@ func UpdateAuthorizationStatus(
},
args: args{
id: MustIDBase16(authTwoID),
status: platform.Status("unknown"),
upd: &platform.AuthorizationUpdate{
Status: platform.Status("unknown").Ptr(),
},
},
wants: wants{
err: &platform.Error{
Code: platform.EInvalid,
Op: platform.OpSetAuthorizationStatus,
Op: platform.OpUpdateAuthorization,
Msg: "unknown authorization status",
},
},
@ -670,7 +682,7 @@ func UpdateAuthorizationStatus(
defer done()
ctx := context.Background()
err := s.SetAuthorizationStatus(ctx, tt.args.id, tt.args.status)
err := s.UpdateAuthorization(ctx, tt.args.id, tt.args.upd)
diffPlatformErrors(tt.name, err, tt.wants.err, opPrefix, t)
if tt.wants.err == nil {

View File

@ -1,694 +0,0 @@
package testing
import (
"bytes"
"context"
"sort"
"testing"
"github.com/google/go-cmp/cmp"
platform "github.com/influxdata/influxdb"
"github.com/influxdata/influxdb/mock"
)
const (
viewOneID = "020f755c3c082000"
viewTwoID = "020f755c3c082001"
viewThreeID = "020f755c3c082002"
)
var viewCmpOptions = cmp.Options{
cmp.Comparer(func(x, y []byte) bool {
return bytes.Equal(x, y)
}),
cmp.Transformer("Sort", func(in []*platform.View) []*platform.View {
out := append([]*platform.View(nil), in...) // Copy input to avoid mutating it
sort.Slice(out, func(i, j int) bool {
return out[i].ID.String() > out[j].ID.String()
})
return out
}),
}
// ViewFields will include the IDGenerator, and views
type ViewFields struct {
IDGenerator platform.IDGenerator
Views []*platform.View
}
// CreateView testing
func CreateView(
init func(ViewFields, *testing.T) (platform.ViewService, string, func()),
t *testing.T,
) {
type args struct {
view *platform.View
}
type wants struct {
err error
views []*platform.View
}
tests := []struct {
name string
fields ViewFields
args args
wants wants
}{
{
name: "basic create view",
fields: ViewFields{
IDGenerator: &mock.IDGenerator{
IDFn: func() platform.ID {
return MustIDBase16(viewTwoID)
},
},
Views: []*platform.View{
{
ViewContents: platform.ViewContents{
ID: MustIDBase16(viewOneID),
Name: "view1",
},
},
},
},
args: args{
view: &platform.View{
ViewContents: platform.ViewContents{
Name: "view2",
},
Properties: platform.TableViewProperties{
Type: "table",
TimeFormat: "rfc3339",
},
},
},
wants: wants{
views: []*platform.View{
{
ViewContents: platform.ViewContents{
ID: MustIDBase16(viewOneID),
Name: "view1",
},
Properties: platform.EmptyViewProperties{},
},
{
ViewContents: platform.ViewContents{
ID: MustIDBase16(viewTwoID),
Name: "view2",
},
Properties: platform.TableViewProperties{
Type: "table",
TimeFormat: "rfc3339",
},
},
},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
s, opPrefix, done := init(tt.fields, t)
defer done()
ctx := context.Background()
err := s.CreateView(ctx, tt.args.view)
diffPlatformErrors(tt.name, err, tt.wants.err, opPrefix, t)
defer s.DeleteView(ctx, tt.args.view.ID)
views, _, err := s.FindViews(ctx, platform.ViewFilter{})
if err != nil {
t.Fatalf("failed to retrieve views: %v", err)
}
if diff := cmp.Diff(views, tt.wants.views, viewCmpOptions...); diff != "" {
t.Errorf("views are different -got/+want\ndiff %s", diff)
}
})
}
}
// FindViewByID testing
func FindViewByID(
init func(ViewFields, *testing.T) (platform.ViewService, string, func()),
t *testing.T,
) {
type args struct {
id platform.ID
}
type wants struct {
err error
view *platform.View
}
tests := []struct {
name string
fields ViewFields
args args
wants wants
}{
{
name: "basic find view by id",
fields: ViewFields{
Views: []*platform.View{
{
ViewContents: platform.ViewContents{
ID: MustIDBase16(viewOneID),
Name: "view1",
},
Properties: platform.EmptyViewProperties{},
},
{
ViewContents: platform.ViewContents{
ID: MustIDBase16(viewTwoID),
Name: "view2",
},
Properties: platform.TableViewProperties{
Type: "table",
TimeFormat: "rfc3339",
},
},
},
},
args: args{
id: MustIDBase16(viewTwoID),
},
wants: wants{
view: &platform.View{
ViewContents: platform.ViewContents{
ID: MustIDBase16(viewTwoID),
Name: "view2",
},
Properties: platform.TableViewProperties{
Type: "table",
TimeFormat: "rfc3339",
},
},
},
},
{
name: "find view by id not found",
fields: ViewFields{
Views: []*platform.View{
{
ViewContents: platform.ViewContents{
ID: MustIDBase16(viewOneID),
Name: "view1",
},
Properties: platform.EmptyViewProperties{},
},
{
ViewContents: platform.ViewContents{
ID: MustIDBase16(viewTwoID),
Name: "view2",
},
Properties: platform.TableViewProperties{
Type: "table",
TimeFormat: "rfc3339",
},
},
},
},
args: args{
id: MustIDBase16(threeID),
},
wants: wants{
err: &platform.Error{
Code: platform.ENotFound,
Op: platform.OpFindViewByID,
Msg: "view not found",
},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
s, opPrefix, done := init(tt.fields, t)
defer done()
ctx := context.Background()
view, err := s.FindViewByID(ctx, tt.args.id)
diffPlatformErrors(tt.name, err, tt.wants.err, opPrefix, t)
if diff := cmp.Diff(view, tt.wants.view, viewCmpOptions...); diff != "" {
t.Errorf("view is different -got/+want\ndiff %s", diff)
}
})
}
}
// FindViews testing
func FindViews(
init func(ViewFields, *testing.T) (platform.ViewService, string, func()),
t *testing.T,
) {
type args struct {
ID platform.ID
}
type wants struct {
views []*platform.View
err error
}
tests := []struct {
name string
fields ViewFields
args args
wants wants
}{
{
name: "find all views",
fields: ViewFields{
Views: []*platform.View{
{
ViewContents: platform.ViewContents{
ID: MustIDBase16(viewOneID),
Name: "view1",
},
Properties: platform.EmptyViewProperties{},
},
{
ViewContents: platform.ViewContents{
ID: MustIDBase16(viewTwoID),
Name: "view2",
},
Properties: platform.TableViewProperties{
Type: "table",
TimeFormat: "rfc3339",
},
},
},
},
args: args{},
wants: wants{
views: []*platform.View{
{
ViewContents: platform.ViewContents{
ID: MustIDBase16(viewOneID),
Name: "view1",
},
Properties: platform.EmptyViewProperties{},
},
{
ViewContents: platform.ViewContents{
ID: MustIDBase16(viewTwoID),
Name: "view2",
},
Properties: platform.TableViewProperties{
Type: "table",
TimeFormat: "rfc3339",
},
},
},
},
},
{
name: "find view by id",
fields: ViewFields{
Views: []*platform.View{
{
ViewContents: platform.ViewContents{
ID: MustIDBase16(viewOneID),
Name: "view1",
},
Properties: platform.EmptyViewProperties{},
},
{
ViewContents: platform.ViewContents{
ID: MustIDBase16(viewTwoID),
Name: "view2",
},
Properties: platform.TableViewProperties{
Type: "table",
TimeFormat: "rfc3339",
},
},
},
},
args: args{
ID: MustIDBase16(viewTwoID),
},
wants: wants{
views: []*platform.View{
{
ViewContents: platform.ViewContents{
ID: MustIDBase16(viewTwoID),
Name: "view2",
},
Properties: platform.TableViewProperties{
Type: "table",
TimeFormat: "rfc3339",
},
},
},
},
},
{
name: "find view by id not found",
fields: ViewFields{
Views: []*platform.View{
{
ViewContents: platform.ViewContents{
ID: MustIDBase16(viewOneID),
Name: "view1",
},
Properties: platform.EmptyViewProperties{},
},
{
ViewContents: platform.ViewContents{
ID: MustIDBase16(viewTwoID),
Name: "view2",
},
Properties: platform.TableViewProperties{
Type: "table",
TimeFormat: "rfc3339",
},
},
},
},
args: args{
ID: MustIDBase16(threeID),
},
wants: wants{
views: []*platform.View{},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
s, opPrefix, done := init(tt.fields, t)
defer done()
ctx := context.Background()
filter := platform.ViewFilter{}
if tt.args.ID.Valid() {
filter.ID = &tt.args.ID
}
views, _, err := s.FindViews(ctx, filter)
diffPlatformErrors(tt.name, err, tt.wants.err, opPrefix, t)
if diff := cmp.Diff(views, tt.wants.views, viewCmpOptions...); diff != "" {
t.Errorf("views are different -got/+want\ndiff %s", diff)
}
})
}
}
// DeleteView testing
func DeleteView(
init func(ViewFields, *testing.T) (platform.ViewService, string, func()),
t *testing.T,
) {
type args struct {
ID platform.ID
}
type wants struct {
err error
views []*platform.View
}
tests := []struct {
name string
fields ViewFields
args args
wants wants
}{
{
name: "delete views using exist id",
fields: ViewFields{
Views: []*platform.View{
{
ViewContents: platform.ViewContents{
ID: MustIDBase16(viewOneID),
Name: "view1",
},
Properties: platform.EmptyViewProperties{},
},
{
ViewContents: platform.ViewContents{
ID: MustIDBase16(viewTwoID),
Name: "view2",
},
Properties: platform.TableViewProperties{
Type: "table",
TimeFormat: "rfc3339",
},
},
},
},
args: args{
ID: MustIDBase16(viewOneID),
},
wants: wants{
views: []*platform.View{
{
ViewContents: platform.ViewContents{
ID: MustIDBase16(viewTwoID),
Name: "view2",
},
Properties: platform.TableViewProperties{
Type: "table",
TimeFormat: "rfc3339",
},
},
},
},
},
{
name: "delete views using id that does not exist",
fields: ViewFields{
Views: []*platform.View{
{
ViewContents: platform.ViewContents{
ID: MustIDBase16(viewOneID),
Name: "view1",
},
Properties: platform.EmptyViewProperties{},
},
{
ViewContents: platform.ViewContents{
ID: MustIDBase16(viewTwoID),
Name: "view2",
},
Properties: platform.TableViewProperties{
Type: "table",
TimeFormat: "rfc3339",
},
},
},
},
args: args{
ID: MustIDBase16(viewThreeID),
},
wants: wants{
err: &platform.Error{
Code: platform.ENotFound,
Op: platform.OpDeleteView,
Msg: "view not found",
},
views: []*platform.View{
{
ViewContents: platform.ViewContents{
ID: MustIDBase16(viewOneID),
Name: "view1",
},
Properties: platform.EmptyViewProperties{},
},
{
ViewContents: platform.ViewContents{
ID: MustIDBase16(viewTwoID),
Name: "view2",
},
Properties: platform.TableViewProperties{
Type: "table",
TimeFormat: "rfc3339",
},
},
},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
s, opPrefix, done := init(tt.fields, t)
defer done()
ctx := context.Background()
err := s.DeleteView(ctx, tt.args.ID)
diffPlatformErrors(tt.name, err, tt.wants.err, opPrefix, t)
filter := platform.ViewFilter{}
views, _, err := s.FindViews(ctx, filter)
if err != nil {
t.Fatalf("failed to retrieve views: %v", err)
}
if diff := cmp.Diff(views, tt.wants.views, viewCmpOptions...); diff != "" {
t.Errorf("views are different -got/+want\ndiff %s", diff)
}
})
}
}
// UpdateView testing
func UpdateView(
init func(ViewFields, *testing.T) (platform.ViewService, string, func()),
t *testing.T,
) {
type args struct {
name string
properties platform.ViewProperties
id platform.ID
}
type wants struct {
err error
view *platform.View
}
tests := []struct {
name string
fields ViewFields
args args
wants wants
}{
{
name: "update name",
fields: ViewFields{
Views: []*platform.View{
{
ViewContents: platform.ViewContents{
ID: MustIDBase16(viewOneID),
Name: "view1",
},
Properties: platform.EmptyViewProperties{},
},
{
ViewContents: platform.ViewContents{
ID: MustIDBase16(viewTwoID),
Name: "view2",
},
Properties: platform.TableViewProperties{
Type: "table",
TimeFormat: "rfc3339",
},
},
},
},
args: args{
id: MustIDBase16(viewOneID),
name: "changed",
},
wants: wants{
view: &platform.View{
ViewContents: platform.ViewContents{
ID: MustIDBase16(viewOneID),
Name: "changed",
},
Properties: platform.EmptyViewProperties{},
},
},
},
{
name: "update properties",
fields: ViewFields{
Views: []*platform.View{
{
ViewContents: platform.ViewContents{
ID: MustIDBase16(viewOneID),
Name: "view1",
},
Properties: platform.EmptyViewProperties{},
},
{
ViewContents: platform.ViewContents{
ID: MustIDBase16(viewTwoID),
Name: "view2",
},
Properties: platform.TableViewProperties{
Type: "table",
TimeFormat: "rfc3339",
},
},
},
},
args: args{
id: MustIDBase16(viewOneID),
properties: platform.TableViewProperties{
Type: "table",
TimeFormat: "rfc3339",
},
},
wants: wants{
view: &platform.View{
ViewContents: platform.ViewContents{
ID: MustIDBase16(viewOneID),
Name: "view1",
},
Properties: platform.TableViewProperties{
Type: "table",
TimeFormat: "rfc3339",
},
},
},
},
{
name: "update id not exists",
fields: ViewFields{
Views: []*platform.View{
{
ViewContents: platform.ViewContents{
ID: MustIDBase16(viewOneID),
Name: "view1",
},
Properties: platform.EmptyViewProperties{},
},
{
ViewContents: platform.ViewContents{
ID: MustIDBase16(viewTwoID),
Name: "view2",
},
Properties: platform.TableViewProperties{
Type: "table",
TimeFormat: "rfc3339",
},
},
},
},
args: args{
id: MustIDBase16(threeID),
name: "changed",
},
wants: wants{
err: &platform.Error{
Code: platform.ENotFound,
Op: platform.OpUpdateView,
Msg: "view not found",
},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
s, opPrefix, done := init(tt.fields, t)
defer done()
ctx := context.Background()
upd := platform.ViewUpdate{}
if tt.args.name != "" {
upd.Name = &tt.args.name
}
if tt.args.properties != nil {
upd.Properties = tt.args.properties
}
view, err := s.UpdateView(ctx, tt.args.id, upd)
diffPlatformErrors(tt.name, err, tt.wants.err, opPrefix, t)
if diff := cmp.Diff(view, tt.wants.view, viewCmpOptions...); diff != "" {
t.Errorf("view is different -got/+want\ndiff %s", diff)
}
})
}
}

View File

@ -34,6 +34,7 @@ func NewDocumentIntegrationTest(store kv.Store) func(t *testing.T) {
l1 := &influxdb.Label{Name: "l1"}
l2 := &influxdb.Label{Name: "l2"}
mustCreateLabels(ctx, svc, l1, l2)
lBad := &influxdb.Label{Name: "bad"}
o1 := &influxdb.Organization{Name: "foo"}
o2 := &influxdb.Organization{Name: "bar"}
@ -109,6 +110,22 @@ func NewDocumentIntegrationTest(store kv.Store) func(t *testing.T) {
}
})
t.Run("can't create document with unexisted label", func(t *testing.T) {
d4 := &influxdb.Document{
Meta: influxdb.DocumentMeta{
Name: "i4",
},
Content: map[string]interface{}{
"k4": "v4",
},
}
err = s.CreateDocument(ctx, d4, influxdb.WithLabel(lBad.Name))
ErrorsEqual(t, err, &influxdb.Error{
Code: influxdb.ENotFound,
Msg: "label not found",
})
})
dl1 := new(influxdb.Document)
*dl1 = *d1
dl1.Labels = append([]*influxdb.Label{}, l1)

View File

@ -937,6 +937,17 @@ func (e *Engine) ShouldCompactCache(t time.Time) CacheStatus {
return CacheStatusOkay
}
func (e *Engine) lastModified() time.Time {
fsTime := e.FileStore.LastModified()
cacheTime := e.Cache.LastWriteTime()
if cacheTime.After(fsTime) {
return cacheTime
}
return fsTime
}
func (e *Engine) compact(wg *sync.WaitGroup) {
t := time.NewTicker(time.Second)
defer t.Stop()
@ -956,7 +967,7 @@ func (e *Engine) compact(wg *sync.WaitGroup) {
level1Groups := e.CompactionPlan.PlanLevel(1)
level2Groups := e.CompactionPlan.PlanLevel(2)
level3Groups := e.CompactionPlan.PlanLevel(3)
level4Groups := e.CompactionPlan.Plan(e.FileStore.LastModified())
level4Groups := e.CompactionPlan.Plan(e.lastModified())
e.compactionTracker.SetOptimiseQueue(uint64(len(level4Groups)))
// If no full compactions are need, see if an optimize is needed

View File

@ -21,7 +21,7 @@ describe('Tasks', () => {
cy.getByTestID('dropdown--item New Task').click()
cy.getByInputName('name').type(taskName)
cy.getByInputName('interval').type('1d')
cy.getByInputName('interval').type('24h')
cy.getByInputName('offset').type('20m')
cy.get<Bucket>('@bucket').then(({name}) => {
@ -103,7 +103,7 @@ describe('Tasks', () => {
cy.getByTestID('dropdown--item New Task').click()
cy.getByInputName('name').type('🦄ask')
cy.getByInputName('interval').type('1d')
cy.getByInputName('interval').type('24h')
cy.getByInputName('offset').type('20m')
cy.getByTestID('flux-editor').within(() => {

View File

@ -61,7 +61,7 @@ export const createTask = (
): Cypress.Chainable<Cypress.Response> => {
const flux = `option task = {
name: "${name}",
every: 1d,
every: 24h,
offset: 20m
}
from(bucket: "defbuck")

298
ui/package-lock.json generated
View File

@ -985,9 +985,9 @@
}
},
"@influxdata/influx": {
"version": "0.2.52",
"resolved": "https://registry.npmjs.org/@influxdata/influx/-/influx-0.2.52.tgz",
"integrity": "sha512-EK1JR2c7pHqJVmWF8KcBVqdoM9MUn/tK+GeRUC2WLuI+HK7dAuc8oVvnDb1dXh01VQq3oQQxwSNO40tm9Opgrw==",
"version": "0.2.54",
"resolved": "https://registry.npmjs.org/@influxdata/influx/-/influx-0.2.54.tgz",
"integrity": "sha512-KDVe8ZYeNA/Ii9P9USWW28n4fES9ydQ2rrrQXzOqfqUQu2wkXDNluIKtMdjqJ0zwhqlc0v3hSSYGxPCHOIATEA==",
"requires": {
"axios": "^0.18.0"
}
@ -1002,6 +1002,55 @@
"raf": "^3.1.0"
}
},
"@influxdata/vis": {
"version": "0.1.1",
"resolved": "https://registry.npmjs.org/@influxdata/vis/-/vis-0.1.1.tgz",
"integrity": "sha512-qicZWRqujBbZo9NRh0kWxifXamL5fvBBW6ilhrrZHxN4O9spkwrMJB1mhxlR7+UxUsWt+YBQffCIlxKeOrxJBQ==",
"requires": {
"chroma-js": "^2.0.2",
"d3-array": "^2.0.3",
"d3-scale": "^2.2.2",
"immer": "^2.0.0",
"react-virtualized-auto-sizer": "^1.0.2"
},
"dependencies": {
"chroma-js": {
"version": "2.0.3",
"resolved": "https://registry.npmjs.org/chroma-js/-/chroma-js-2.0.3.tgz",
"integrity": "sha512-2kTvZZOFSV1O81/rm99t9vmkh9jQxsHqsRRoZevDVz/VCC3yKMyPuMK8M5yHG+UMg2tV6cRoqtZtgcD92udcBw=="
},
"d3-array": {
"version": "2.0.3",
"resolved": "https://registry.npmjs.org/d3-array/-/d3-array-2.0.3.tgz",
"integrity": "sha512-C7g4aCOoJa+/K5hPVqZLG8wjYHsTUROTk7Z1Ep9F4P5l+WVrvV0+6nAZ1wKTRLMhFWpGbozxUpyjIPZYAaLi+g=="
},
"d3-scale": {
"version": "2.2.2",
"resolved": "https://registry.npmjs.org/d3-scale/-/d3-scale-2.2.2.tgz",
"integrity": "sha512-LbeEvGgIb8UMcAa0EATLNX0lelKWGYDQiPdHj+gLblGVhGLyNbaCn3EvrJf0A3Y/uOOU5aD6MTh5ZFCdEwGiCw==",
"requires": {
"d3-array": "^1.2.0",
"d3-collection": "1",
"d3-format": "1",
"d3-interpolate": "1",
"d3-time": "1",
"d3-time-format": "2"
},
"dependencies": {
"d3-array": {
"version": "1.2.4",
"resolved": "https://registry.npmjs.org/d3-array/-/d3-array-1.2.4.tgz",
"integrity": "sha512-KHW6M86R+FUPYGb3R5XiYjXPq7VzwxZ22buHhAEVG5ztoEcZZMLov530mmccaqA1GghZArjQV46fuc8kUqhhHw=="
}
}
},
"immer": {
"version": "2.1.4",
"resolved": "https://registry.npmjs.org/immer/-/immer-2.1.4.tgz",
"integrity": "sha512-6UPbG/DIXFSWp10oJJaCPl5/lp5GhGEscDH0QGYKc5EMT5PLZ9+L8hhyc44zRHksI7CQXJp8r6nlDR3n09X6SA=="
}
}
},
"@mrmlnc/readdir-enhanced": {
"version": "2.2.1",
"resolved": "https://registry.npmjs.org/@mrmlnc/readdir-enhanced/-/readdir-enhanced-2.2.1.tgz",
@ -1127,7 +1176,7 @@
},
"@types/codemirror": {
"version": "0.0.56",
"resolved": "http://registry.npmjs.org/@types/codemirror/-/codemirror-0.0.56.tgz",
"resolved": "https://registry.npmjs.org/@types/codemirror/-/codemirror-0.0.56.tgz",
"integrity": "sha512-OMtPqg2wFOEcNeVga+m+UXpYJw8ugISPCQOtShdFUho/k91Ms1oWOozoDT1I87Phv6IdwLfMLtIOahh1tO1cJQ==",
"dev": true
},
@ -1306,7 +1355,7 @@
},
"@types/react-dnd-html5-backend": {
"version": "2.1.9",
"resolved": "http://registry.npmjs.org/@types/react-dnd-html5-backend/-/react-dnd-html5-backend-2.1.9.tgz",
"resolved": "https://registry.npmjs.org/@types/react-dnd-html5-backend/-/react-dnd-html5-backend-2.1.9.tgz",
"integrity": "sha512-o42zIpcgXXj04xYDT9o9kXoldqDa81ie5XAKKCo7/fOWEhOCRt9UYu+LzOXC308eKKf8v0HzbJaDTr93j3JrTw==",
"dev": true,
"requires": {
@ -2046,7 +2095,7 @@
},
"array-equal": {
"version": "1.0.0",
"resolved": "http://registry.npmjs.org/array-equal/-/array-equal-1.0.0.tgz",
"resolved": "https://registry.npmjs.org/array-equal/-/array-equal-1.0.0.tgz",
"integrity": "sha1-jCpe8kcv2ep0KwTHenUJO6J1fJM=",
"dev": true
},
@ -2131,7 +2180,7 @@
},
"util": {
"version": "0.10.3",
"resolved": "http://registry.npmjs.org/util/-/util-0.10.3.tgz",
"resolved": "https://registry.npmjs.org/util/-/util-0.10.3.tgz",
"integrity": "sha1-evsa/lCAUkZInj23/g7TeTNqwPk=",
"dev": true,
"requires": {
@ -2299,7 +2348,7 @@
},
"axios": {
"version": "0.18.0",
"resolved": "http://registry.npmjs.org/axios/-/axios-0.18.0.tgz",
"resolved": "https://registry.npmjs.org/axios/-/axios-0.18.0.tgz",
"integrity": "sha1-MtU+SFHv3AoRmTts0AB4nXDAUQI=",
"requires": {
"follow-redirects": "^1.3.0",
@ -2683,7 +2732,7 @@
},
"browserify-aes": {
"version": "1.2.0",
"resolved": "http://registry.npmjs.org/browserify-aes/-/browserify-aes-1.2.0.tgz",
"resolved": "https://registry.npmjs.org/browserify-aes/-/browserify-aes-1.2.0.tgz",
"integrity": "sha512-+7CHXqGuspUn/Sl5aO7Ea0xWGAtETPXNSAjHo48JfLdPWcMng33Xe4znFvQweqc/uzk5zSOI3H52CYnjCfb5hA==",
"dev": true,
"requires": {
@ -2720,7 +2769,7 @@
},
"browserify-rsa": {
"version": "4.0.1",
"resolved": "http://registry.npmjs.org/browserify-rsa/-/browserify-rsa-4.0.1.tgz",
"resolved": "https://registry.npmjs.org/browserify-rsa/-/browserify-rsa-4.0.1.tgz",
"integrity": "sha1-IeCr+vbyApzy+vsTNWenAdQTVSQ=",
"dev": true,
"requires": {
@ -2799,7 +2848,7 @@
},
"buffer": {
"version": "4.9.1",
"resolved": "http://registry.npmjs.org/buffer/-/buffer-4.9.1.tgz",
"resolved": "https://registry.npmjs.org/buffer/-/buffer-4.9.1.tgz",
"integrity": "sha1-bRu2AbB6TvztlwlBMgkwJ8lbwpg=",
"dev": true,
"requires": {
@ -3339,6 +3388,11 @@
"integrity": "sha512-FXDYw4TjR8wgPZYui2LeTqWh1BLpfQ8lB6upMtlpDF6WlOOxghmTTxWyngdKTgozqBgKnHbTVwTE+hOHqAykuQ==",
"dev": true
},
"cnbuilder": {
"version": "1.0.8",
"resolved": "https://registry.npmjs.org/cnbuilder/-/cnbuilder-1.0.8.tgz",
"integrity": "sha512-05l9Bhs0FhEFGJ6vFkqL9O9USCKT3zBfOoTAYXGDKA4nFBX1Qc780bvppG9av2U1sKpa27JT8brJtM6VQquRcQ=="
},
"co": {
"version": "4.6.0",
"resolved": "https://registry.npmjs.org/co/-/co-4.6.0.tgz",
@ -3664,7 +3718,7 @@
},
"create-hash": {
"version": "1.2.0",
"resolved": "http://registry.npmjs.org/create-hash/-/create-hash-1.2.0.tgz",
"resolved": "https://registry.npmjs.org/create-hash/-/create-hash-1.2.0.tgz",
"integrity": "sha512-z00bCGNHDG8mHAkP7CtT1qVu+bFQUPjYq/4Iv3C3kWjTFV10zIjfSoeqXo9Asws8gwSHDGj/hl2u4OGIjapeCg==",
"dev": true,
"requires": {
@ -3677,7 +3731,7 @@
},
"create-hmac": {
"version": "1.1.7",
"resolved": "http://registry.npmjs.org/create-hmac/-/create-hmac-1.1.7.tgz",
"resolved": "https://registry.npmjs.org/create-hmac/-/create-hmac-1.1.7.tgz",
"integrity": "sha512-MJG9liiZ+ogc4TzUwuvbER1JRdgvUFSB5+VR/g5h82fGaIRWMWddtKBHi7/sVhfjQZ6SehlyhvQYrcYkaUIpLg==",
"dev": true,
"requires": {
@ -4494,7 +4548,7 @@
},
"diffie-hellman": {
"version": "5.0.3",
"resolved": "http://registry.npmjs.org/diffie-hellman/-/diffie-hellman-5.0.3.tgz",
"resolved": "https://registry.npmjs.org/diffie-hellman/-/diffie-hellman-5.0.3.tgz",
"integrity": "sha512-kqag/Nl+f3GwyK25fhUMYj81BUOrZ9IuJsjIcDE5icNM9FJHAVm3VcUDxdLPoQtTuUylWm6ZIknYJwwaPxsUzg==",
"dev": true,
"requires": {
@ -4516,7 +4570,7 @@
},
"dnd-core": {
"version": "2.6.0",
"resolved": "http://registry.npmjs.org/dnd-core/-/dnd-core-2.6.0.tgz",
"resolved": "https://registry.npmjs.org/dnd-core/-/dnd-core-2.6.0.tgz",
"integrity": "sha1-ErrWbVh0LG5ffPKUP7aFlED4CcQ=",
"requires": {
"asap": "^2.0.6",
@ -5766,7 +5820,7 @@
"dependencies": {
"core-js": {
"version": "1.2.7",
"resolved": "http://registry.npmjs.org/core-js/-/core-js-1.2.7.tgz",
"resolved": "https://registry.npmjs.org/core-js/-/core-js-1.2.7.tgz",
"integrity": "sha1-ZSKUwUZR2yj6k70tX/KYOk8IxjY="
}
}
@ -5835,7 +5889,7 @@
},
"finalhandler": {
"version": "1.1.1",
"resolved": "http://registry.npmjs.org/finalhandler/-/finalhandler-1.1.1.tgz",
"resolved": "https://registry.npmjs.org/finalhandler/-/finalhandler-1.1.1.tgz",
"integrity": "sha512-Y1GUDo39ez4aHAw7MysnUD5JzYX+WaIj8I57kO3aEPT1fFRL4sr7mjei97FgnwhAyyzRYmQZaTHb2+9uZ1dPtg==",
"dev": true,
"requires": {
@ -6097,21 +6151,20 @@
"dependencies": {
"abbrev": {
"version": "1.1.1",
"resolved": "https://registry.npmjs.org/abbrev/-/abbrev-1.1.1.tgz",
"resolved": false,
"integrity": "sha512-nne9/IiQ/hzIhY6pdDnbBtz7DjPTKrY00P/zvPSm5pOFkl6xuGrGnXn/VtTNNfNtAfZ9/1RtehkszU9qcTii0Q==",
"dev": true,
"optional": true
},
"ansi-regex": {
"version": "2.1.1",
"resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-2.1.1.tgz",
"resolved": false,
"integrity": "sha1-w7M6te42DYbg5ijwRorn7yfWVN8=",
"dev": true,
"optional": true
"dev": true
},
"aproba": {
"version": "1.2.0",
"resolved": "https://registry.npmjs.org/aproba/-/aproba-1.2.0.tgz",
"resolved": false,
"integrity": "sha512-Y9J6ZjXtoYh8RnXVCMOU/ttDmk1aBjunq9vO0ta5x85WDQiQfUF9sIPBITdbiiIVcBo03Hi3jMxigBtsddlXRw==",
"dev": true,
"optional": true
@ -6129,17 +6182,15 @@
},
"balanced-match": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.0.tgz",
"resolved": false,
"integrity": "sha1-ibTRmasr7kneFk6gK4nORi1xt2c=",
"dev": true,
"optional": true
"dev": true
},
"brace-expansion": {
"version": "1.1.11",
"resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz",
"resolved": false,
"integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==",
"dev": true,
"optional": true,
"requires": {
"balanced-match": "^1.0.0",
"concat-map": "0.0.1"
@ -6154,35 +6205,32 @@
},
"code-point-at": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/code-point-at/-/code-point-at-1.1.0.tgz",
"resolved": false,
"integrity": "sha1-DQcLTQQ6W+ozovGkDi7bPZpMz3c=",
"dev": true,
"optional": true
"dev": true
},
"concat-map": {
"version": "0.0.1",
"resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz",
"resolved": false,
"integrity": "sha1-2Klr13/Wjfd5OnMDajug1UBdR3s=",
"dev": true,
"optional": true
"dev": true
},
"console-control-strings": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/console-control-strings/-/console-control-strings-1.1.0.tgz",
"resolved": false,
"integrity": "sha1-PXz0Rk22RG6mRL9LOVB/mFEAjo4=",
"dev": true,
"optional": true
"dev": true
},
"core-util-is": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.2.tgz",
"resolved": false,
"integrity": "sha1-tf1UIgqivFq1eqtxQMlAdUUDwac=",
"dev": true,
"optional": true
},
"debug": {
"version": "2.6.9",
"resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz",
"resolved": false,
"integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==",
"dev": true,
"optional": true,
@ -6199,21 +6247,21 @@
},
"delegates": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/delegates/-/delegates-1.0.0.tgz",
"resolved": false,
"integrity": "sha1-hMbhWbgZBP3KWaDvRM2HDTElD5o=",
"dev": true,
"optional": true
},
"detect-libc": {
"version": "1.0.3",
"resolved": "https://registry.npmjs.org/detect-libc/-/detect-libc-1.0.3.tgz",
"resolved": false,
"integrity": "sha1-+hN8S9aY7fVc1c0CrFWfkaTEups=",
"dev": true,
"optional": true
},
"fs-minipass": {
"version": "1.2.5",
"resolved": "https://registry.npmjs.org/fs-minipass/-/fs-minipass-1.2.5.tgz",
"resolved": false,
"integrity": "sha512-JhBl0skXjUPCFH7x6x61gQxrKyXsxB5gcgePLZCwfyCGGsTISMoIeObbrvVeP6Xmyaudw4TT43qV2Gz+iyd2oQ==",
"dev": true,
"optional": true,
@ -6223,14 +6271,14 @@
},
"fs.realpath": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz",
"resolved": false,
"integrity": "sha1-FQStJSMVjKpA20onh8sBQRmU6k8=",
"dev": true,
"optional": true
},
"gauge": {
"version": "2.7.4",
"resolved": "https://registry.npmjs.org/gauge/-/gauge-2.7.4.tgz",
"resolved": false,
"integrity": "sha1-LANAXHU4w51+s3sxcCLjJfsBi/c=",
"dev": true,
"optional": true,
@ -6262,7 +6310,7 @@
},
"has-unicode": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/has-unicode/-/has-unicode-2.0.1.tgz",
"resolved": false,
"integrity": "sha1-4Ob+aijPUROIVeCG0Wkedx3iqLk=",
"dev": true,
"optional": true
@ -6279,7 +6327,7 @@
},
"ignore-walk": {
"version": "3.0.1",
"resolved": "https://registry.npmjs.org/ignore-walk/-/ignore-walk-3.0.1.tgz",
"resolved": false,
"integrity": "sha512-DTVlMx3IYPe0/JJcYP7Gxg7ttZZu3IInhuEhbchuqneY9wWe5Ojy2mXLBaQFUQmo0AW2r3qG7m1mg86js+gnlQ==",
"dev": true,
"optional": true,
@ -6289,7 +6337,7 @@
},
"inflight": {
"version": "1.0.6",
"resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz",
"resolved": false,
"integrity": "sha1-Sb1jMdfQLQwJvJEKEHW6gWW1bfk=",
"dev": true,
"optional": true,
@ -6300,58 +6348,53 @@
},
"inherits": {
"version": "2.0.3",
"resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.3.tgz",
"resolved": false,
"integrity": "sha1-Yzwsg+PaQqUC9SRmAiSA9CCCYd4=",
"dev": true,
"optional": true
"dev": true
},
"ini": {
"version": "1.3.5",
"resolved": "https://registry.npmjs.org/ini/-/ini-1.3.5.tgz",
"resolved": false,
"integrity": "sha512-RZY5huIKCMRWDUqZlEi72f/lmXKMvuszcMBduliQ3nnWbx9X/ZBQO7DijMEYS9EhHBb2qacRUMtC7svLwe0lcw==",
"dev": true,
"optional": true
},
"is-fullwidth-code-point": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-1.0.0.tgz",
"resolved": false,
"integrity": "sha1-754xOG8DGn8NZDr4L95QxFfvAMs=",
"dev": true,
"optional": true,
"requires": {
"number-is-nan": "^1.0.0"
}
},
"isarray": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz",
"resolved": false,
"integrity": "sha1-u5NdSFgsuhaMBoNJV6VKPgcSTxE=",
"dev": true,
"optional": true
},
"minimatch": {
"version": "3.0.4",
"resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.0.4.tgz",
"resolved": false,
"integrity": "sha512-yJHVQEhyqPLUTgt9B83PXu6W3rx4MvvHvSUvToogpwoGDOUQ+yDrR0HRot+yOCdCO7u4hX3pWft6kWBBcqh0UA==",
"dev": true,
"optional": true,
"requires": {
"brace-expansion": "^1.1.7"
}
},
"minimist": {
"version": "0.0.8",
"resolved": "https://registry.npmjs.org/minimist/-/minimist-0.0.8.tgz",
"resolved": false,
"integrity": "sha1-hX/Kv8M5fSYluCKCYuhqp6ARsF0=",
"dev": true,
"optional": true
"dev": true
},
"minipass": {
"version": "2.2.4",
"resolved": "https://registry.npmjs.org/minipass/-/minipass-2.2.4.tgz",
"integrity": "sha512-hzXIWWet/BzWhYs2b+u7dRHlruXhwdgvlTMDKC6Cb1U7ps6Ac6yQlR39xsbjWJE377YTCtKwIXIpJ5oP+j5y8g==",
"dev": true,
"optional": true,
"requires": {
"safe-buffer": "^5.1.1",
"yallist": "^3.0.0"
@ -6369,17 +6412,16 @@
},
"mkdirp": {
"version": "0.5.1",
"resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.1.tgz",
"resolved": false,
"integrity": "sha1-MAV0OOrGz3+MR2fzhkjWaX11yQM=",
"dev": true,
"optional": true,
"requires": {
"minimist": "0.0.8"
}
},
"ms": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz",
"resolved": false,
"integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=",
"dev": true,
"optional": true
@ -6417,7 +6459,7 @@
},
"nopt": {
"version": "4.0.1",
"resolved": "https://registry.npmjs.org/nopt/-/nopt-4.0.1.tgz",
"resolved": false,
"integrity": "sha1-0NRoWv1UFRk8jHUFYC0NF81kR00=",
"dev": true,
"optional": true,
@ -6446,7 +6488,7 @@
},
"npmlog": {
"version": "4.1.2",
"resolved": "https://registry.npmjs.org/npmlog/-/npmlog-4.1.2.tgz",
"resolved": false,
"integrity": "sha512-2uUqazuKlTaSI/dC8AzicUck7+IrEaOnN/e0jd3Xtt1KcGpwx30v50mL7oPyr/h9bL3E4aZccVwpwP+5W9Vjkg==",
"dev": true,
"optional": true,
@ -6459,45 +6501,43 @@
},
"number-is-nan": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/number-is-nan/-/number-is-nan-1.0.1.tgz",
"resolved": false,
"integrity": "sha1-CXtgK1NCKlIsGvuHkDGDNpQaAR0=",
"dev": true,
"optional": true
"dev": true
},
"object-assign": {
"version": "4.1.1",
"resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz",
"resolved": false,
"integrity": "sha1-IQmtx5ZYh8/AXLvUQsrIv7s2CGM=",
"dev": true,
"optional": true
},
"once": {
"version": "1.4.0",
"resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz",
"resolved": false,
"integrity": "sha1-WDsap3WWHUsROsF9nFC6753Xa9E=",
"dev": true,
"optional": true,
"requires": {
"wrappy": "1"
}
},
"os-homedir": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/os-homedir/-/os-homedir-1.0.2.tgz",
"resolved": false,
"integrity": "sha1-/7xJiDNuDoM94MFox+8VISGqf7M=",
"dev": true,
"optional": true
},
"os-tmpdir": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/os-tmpdir/-/os-tmpdir-1.0.2.tgz",
"resolved": false,
"integrity": "sha1-u+Z0BseaqFxc/sdm/lc0VV36EnQ=",
"dev": true,
"optional": true
},
"osenv": {
"version": "0.1.5",
"resolved": "https://registry.npmjs.org/osenv/-/osenv-0.1.5.tgz",
"resolved": false,
"integrity": "sha512-0CWcCECdMVc2Rw3U5w9ZjqX6ga6ubk1xDVKxtBQPK7wis/0F2r9T6k4ydGYhecl7YUBxBVxhL5oisPsNxAPe2g==",
"dev": true,
"optional": true,
@ -6508,14 +6548,14 @@
},
"path-is-absolute": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz",
"resolved": false,
"integrity": "sha1-F0uSaHNVNP+8es5r9TpanhtcX18=",
"dev": true,
"optional": true
},
"process-nextick-args": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/process-nextick-args/-/process-nextick-args-2.0.0.tgz",
"resolved": false,
"integrity": "sha512-MtEC1TqN0EU5nephaJ4rAtThHtC86dNN9qCuEhtshvpVBkAW5ZO7BASN9REnF9eoXGcRub+pFuKEpOHE+HbEMw==",
"dev": true,
"optional": true
@ -6535,7 +6575,7 @@
"dependencies": {
"minimist": {
"version": "1.2.0",
"resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.0.tgz",
"resolved": false,
"integrity": "sha1-o1AIsg9BOD7sH7kU9M1d95omQoQ=",
"dev": true,
"optional": true
@ -6544,7 +6584,7 @@
},
"readable-stream": {
"version": "2.3.6",
"resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.6.tgz",
"resolved": false,
"integrity": "sha512-tQtKA9WIAhBF3+VLAseyMqZeBjW0AHJoxOtYqSUZNJxauErmLbVm2FW1y+J/YA9dUrAC39ITejlZWhVIwawkKw==",
"dev": true,
"optional": true,
@ -6572,19 +6612,18 @@
"version": "5.1.1",
"resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.1.tgz",
"integrity": "sha512-kKvNJn6Mm93gAczWVJg7wH+wGYWNrDHdWvpUmHyEsgCtIwwo3bqPtV4tR5tuPaUhTOo/kvhVwd8XwwOllGYkbg==",
"dev": true,
"optional": true
"dev": true
},
"safer-buffer": {
"version": "2.1.2",
"resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz",
"resolved": false,
"integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==",
"dev": true,
"optional": true
},
"sax": {
"version": "1.2.4",
"resolved": "https://registry.npmjs.org/sax/-/sax-1.2.4.tgz",
"resolved": false,
"integrity": "sha512-NqVDv9TpANUjFm0N8uM5GxL36UgKi9/atZw+x7YFnQ8ckwFGKrl4xX4yWtrey3UJm5nP1kUbnYgLopqWNSRhWw==",
"dev": true,
"optional": true
@ -6598,24 +6637,23 @@
},
"set-blocking": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/set-blocking/-/set-blocking-2.0.0.tgz",
"resolved": false,
"integrity": "sha1-BF+XgtARrppoA93TgrJDkrPYkPc=",
"dev": true,
"optional": true
},
"signal-exit": {
"version": "3.0.2",
"resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.2.tgz",
"resolved": false,
"integrity": "sha1-tf3AjxKH6hF4Yo5BXiUTK3NkbG0=",
"dev": true,
"optional": true
},
"string-width": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/string-width/-/string-width-1.0.2.tgz",
"resolved": false,
"integrity": "sha1-EYvfW4zcUaKn5w0hHgfisLmxB9M=",
"dev": true,
"optional": true,
"requires": {
"code-point-at": "^1.0.0",
"is-fullwidth-code-point": "^1.0.0",
@ -6624,7 +6662,7 @@
},
"string_decoder": {
"version": "1.1.1",
"resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz",
"resolved": false,
"integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==",
"dev": true,
"optional": true,
@ -6634,17 +6672,16 @@
},
"strip-ansi": {
"version": "3.0.1",
"resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-3.0.1.tgz",
"resolved": false,
"integrity": "sha1-ajhfuIU9lS1f8F0Oiq+UJ43GPc8=",
"dev": true,
"optional": true,
"requires": {
"ansi-regex": "^2.0.0"
}
},
"strip-json-comments": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-2.0.1.tgz",
"resolved": false,
"integrity": "sha1-PFMZQukIwml8DsNEhYwobHygpgo=",
"dev": true,
"optional": true
@ -6667,7 +6704,7 @@
},
"util-deprecate": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz",
"resolved": false,
"integrity": "sha1-RQ1Nyfpw3nMnYvvS1KKJgUGaDM8=",
"dev": true,
"optional": true
@ -6684,17 +6721,15 @@
},
"wrappy": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz",
"resolved": false,
"integrity": "sha1-tSQ9jz7BqjXxNkYFvA0QNuMKtp8=",
"dev": true,
"optional": true
"dev": true
},
"yallist": {
"version": "3.0.2",
"resolved": "https://registry.npmjs.org/yallist/-/yallist-3.0.2.tgz",
"integrity": "sha1-hFK0u36Dx8GI2AQcGoN8dz1ti7k=",
"dev": true,
"optional": true
"dev": true
}
}
},
@ -6741,7 +6776,7 @@
},
"get-stream": {
"version": "3.0.0",
"resolved": "http://registry.npmjs.org/get-stream/-/get-stream-3.0.0.tgz",
"resolved": "https://registry.npmjs.org/get-stream/-/get-stream-3.0.0.tgz",
"integrity": "sha1-jpQ9E1jcN1VQVOy+LtsFqhdO3hQ=",
"dev": true
},
@ -7583,7 +7618,7 @@
},
"http-errors": {
"version": "1.6.3",
"resolved": "http://registry.npmjs.org/http-errors/-/http-errors-1.6.3.tgz",
"resolved": "https://registry.npmjs.org/http-errors/-/http-errors-1.6.3.tgz",
"integrity": "sha1-i1VoC7S+KDoLW/TqLjhYC+HZMg0=",
"dev": true,
"requires": {
@ -7606,7 +7641,7 @@
},
"http-proxy-middleware": {
"version": "0.18.0",
"resolved": "http://registry.npmjs.org/http-proxy-middleware/-/http-proxy-middleware-0.18.0.tgz",
"resolved": "https://registry.npmjs.org/http-proxy-middleware/-/http-proxy-middleware-0.18.0.tgz",
"integrity": "sha512-Fs25KVMPAIIcgjMZkVHJoKg9VcXcC1C8yb9JUgeDvVXY0S/zgVIhMb+qVswDIgtJe2DfckMSY2d6TuTEutlk6Q==",
"dev": true,
"requires": {
@ -9967,7 +10002,7 @@
},
"lodash.isempty": {
"version": "4.4.0",
"resolved": "http://registry.npmjs.org/lodash.isempty/-/lodash.isempty-4.4.0.tgz",
"resolved": "https://registry.npmjs.org/lodash.isempty/-/lodash.isempty-4.4.0.tgz",
"integrity": "sha1-b4bL7di+TsmHvpqvM8loTbGzHn4="
},
"lodash.isequal": {
@ -10212,7 +10247,7 @@
},
"media-typer": {
"version": "0.3.0",
"resolved": "http://registry.npmjs.org/media-typer/-/media-typer-0.3.0.tgz",
"resolved": "https://registry.npmjs.org/media-typer/-/media-typer-0.3.0.tgz",
"integrity": "sha1-hxDXrwqmJvj/+hzgAWhUUmMlV0g=",
"dev": true
},
@ -10408,7 +10443,7 @@
},
"minimist": {
"version": "1.2.0",
"resolved": "http://registry.npmjs.org/minimist/-/minimist-1.2.0.tgz",
"resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.0.tgz",
"integrity": "sha1-o1AIsg9BOD7sH7kU9M1d95omQoQ=",
"dev": true
},
@ -10453,7 +10488,7 @@
},
"mkdirp": {
"version": "0.5.1",
"resolved": "http://registry.npmjs.org/mkdirp/-/mkdirp-0.5.1.tgz",
"resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.1.tgz",
"integrity": "sha1-MAV0OOrGz3+MR2fzhkjWaX11yQM=",
"dev": true,
"requires": {
@ -10462,7 +10497,7 @@
"dependencies": {
"minimist": {
"version": "0.0.8",
"resolved": "http://registry.npmjs.org/minimist/-/minimist-0.0.8.tgz",
"resolved": "https://registry.npmjs.org/minimist/-/minimist-0.0.8.tgz",
"integrity": "sha1-hX/Kv8M5fSYluCKCYuhqp6ARsF0=",
"dev": true
}
@ -11098,7 +11133,7 @@
},
"os-homedir": {
"version": "1.0.2",
"resolved": "http://registry.npmjs.org/os-homedir/-/os-homedir-1.0.2.tgz",
"resolved": "https://registry.npmjs.org/os-homedir/-/os-homedir-1.0.2.tgz",
"integrity": "sha1-/7xJiDNuDoM94MFox+8VISGqf7M=",
"dev": true
},
@ -11115,7 +11150,7 @@
},
"os-tmpdir": {
"version": "1.0.2",
"resolved": "http://registry.npmjs.org/os-tmpdir/-/os-tmpdir-1.0.2.tgz",
"resolved": "https://registry.npmjs.org/os-tmpdir/-/os-tmpdir-1.0.2.tgz",
"integrity": "sha1-u+Z0BseaqFxc/sdm/lc0VV36EnQ=",
"dev": true
},
@ -11616,7 +11651,7 @@
},
"path-is-absolute": {
"version": "1.0.1",
"resolved": "http://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz",
"resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz",
"integrity": "sha1-F0uSaHNVNP+8es5r9TpanhtcX18=",
"dev": true
},
@ -12967,7 +13002,7 @@
},
"react-dnd": {
"version": "2.6.0",
"resolved": "http://registry.npmjs.org/react-dnd/-/react-dnd-2.6.0.tgz",
"resolved": "https://registry.npmjs.org/react-dnd/-/react-dnd-2.6.0.tgz",
"integrity": "sha1-f6JWds+CfViokSk+PBq1naACVFo=",
"requires": {
"disposables": "^1.0.1",
@ -12980,7 +13015,7 @@
},
"react-dnd-html5-backend": {
"version": "2.6.0",
"resolved": "http://registry.npmjs.org/react-dnd-html5-backend/-/react-dnd-html5-backend-2.6.0.tgz",
"resolved": "https://registry.npmjs.org/react-dnd-html5-backend/-/react-dnd-html5-backend-2.6.0.tgz",
"integrity": "sha1-WQzRzKeEQbsnTt1XH+9MCxbdz44=",
"requires": {
"lodash": "^4.2.0"
@ -13095,7 +13130,7 @@
},
"react-resize-detector": {
"version": "2.3.0",
"resolved": "http://registry.npmjs.org/react-resize-detector/-/react-resize-detector-2.3.0.tgz",
"resolved": "https://registry.npmjs.org/react-resize-detector/-/react-resize-detector-2.3.0.tgz",
"integrity": "sha512-oCAddEWWeFWYH5FAcHdBYcZjAw9fMzRUK9sWSx6WvSSOPVRxcHd5zTIGy/mOus+AhN/u6T4TMiWxvq79PywnJQ==",
"requires": {
"lodash.debounce": "^4.0.8",
@ -13106,7 +13141,7 @@
},
"react-router": {
"version": "3.2.1",
"resolved": "http://registry.npmjs.org/react-router/-/react-router-3.2.1.tgz",
"resolved": "https://registry.npmjs.org/react-router/-/react-router-3.2.1.tgz",
"integrity": "sha512-SXkhC0nr3G0ltzVU07IN8jYl0bB6FsrDIqlLC9dK3SITXqyTJyM7yhXlUqs89w3Nqi5OkXsfRUeHX+P874HQrg==",
"requires": {
"create-react-class": "^15.5.1",
@ -13123,6 +13158,26 @@
"resolved": "https://registry.npmjs.org/react-router-redux/-/react-router-redux-4.0.8.tgz",
"integrity": "sha1-InQDWWtRUeGCN32rg1tdRfD4BU4="
},
"react-scrollbars-custom": {
"version": "4.0.0-alpha.8",
"resolved": "https://registry.npmjs.org/react-scrollbars-custom/-/react-scrollbars-custom-4.0.0-alpha.8.tgz",
"integrity": "sha512-sj56pEY/0VV551B61yUzYy7YcR/h5ge51lqUGD9CbBmcJnyVDvZ90zaIIeERPsLxSR6Ddb1ueBAexKIe14zpNw==",
"requires": {
"cnbuilder": "^1.0.8",
"react-draggable": "^3.2.1"
},
"dependencies": {
"react-draggable": {
"version": "3.2.1",
"resolved": "https://registry.npmjs.org/react-draggable/-/react-draggable-3.2.1.tgz",
"integrity": "sha512-r+3Bs9InID2lyIEbR8UIRVtpn4jgu1ArFEZgIy8vibJjijLSdNLX7rH9U68BBVD4RD9v44RXbaK4EHLyKXzNQw==",
"requires": {
"classnames": "^2.2.5",
"prop-types": "^15.6.0"
}
}
}
},
"react-test-renderer": {
"version": "16.5.2",
"resolved": "https://registry.npmjs.org/react-test-renderer/-/react-test-renderer-16.5.2.tgz",
@ -13167,6 +13222,11 @@
"react-lifecycles-compat": "^3.0.4"
}
},
"react-virtualized-auto-sizer": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/react-virtualized-auto-sizer/-/react-virtualized-auto-sizer-1.0.2.tgz",
"integrity": "sha512-MYXhTY1BZpdJFjUovvYHVBmkq79szK/k7V3MO+36gJkWGkrXKtyr4vCPtpphaTLRAdDNoYEYFZWE8LjN+PIHNg=="
},
"read-pkg": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/read-pkg/-/read-pkg-3.0.0.tgz",
@ -13235,7 +13295,7 @@
},
"readable-stream": {
"version": "2.3.6",
"resolved": "http://registry.npmjs.org/readable-stream/-/readable-stream-2.3.6.tgz",
"resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.6.tgz",
"integrity": "sha512-tQtKA9WIAhBF3+VLAseyMqZeBjW0AHJoxOtYqSUZNJxauErmLbVm2FW1y+J/YA9dUrAC39ITejlZWhVIwawkKw==",
"requires": {
"core-util-is": "~1.0.0",
@ -13506,7 +13566,7 @@
"dependencies": {
"hoist-non-react-statics": {
"version": "1.2.0",
"resolved": "http://registry.npmjs.org/hoist-non-react-statics/-/hoist-non-react-statics-1.2.0.tgz",
"resolved": "https://registry.npmjs.org/hoist-non-react-statics/-/hoist-non-react-statics-1.2.0.tgz",
"integrity": "sha1-qkSM8JhtVcxAdzsXF0t90GbLfPs="
},
"prop-types": {
@ -13521,7 +13581,7 @@
},
"redux-thunk": {
"version": "1.0.3",
"resolved": "http://registry.npmjs.org/redux-thunk/-/redux-thunk-1.0.3.tgz",
"resolved": "https://registry.npmjs.org/redux-thunk/-/redux-thunk-1.0.3.tgz",
"integrity": "sha1-d4qgCZ7qBZUDGrazkWX2Zw2NJr0="
},
"regenerate": {
@ -13684,7 +13744,7 @@
"dependencies": {
"jsesc": {
"version": "0.5.0",
"resolved": "http://registry.npmjs.org/jsesc/-/jsesc-0.5.0.tgz",
"resolved": "https://registry.npmjs.org/jsesc/-/jsesc-0.5.0.tgz",
"integrity": "sha1-597mbjXW/Bb3EP6R1c9p9w8IkR0=",
"dev": true
}
@ -13980,7 +14040,7 @@
},
"safe-regex": {
"version": "1.1.0",
"resolved": "http://registry.npmjs.org/safe-regex/-/safe-regex-1.1.0.tgz",
"resolved": "https://registry.npmjs.org/safe-regex/-/safe-regex-1.1.0.tgz",
"integrity": "sha1-QKNmnzsHfR6UPURinhV91IAjvy4=",
"dev": true,
"requires": {
@ -14220,7 +14280,7 @@
},
"sha.js": {
"version": "2.4.11",
"resolved": "http://registry.npmjs.org/sha.js/-/sha.js-2.4.11.tgz",
"resolved": "https://registry.npmjs.org/sha.js/-/sha.js-2.4.11.tgz",
"integrity": "sha512-QMEp5B7cftE7APOjk5Y6xgrbWu+WkLVQwk8JNjZ8nKRciZaByEW6MubieAiToS7+dwvrjGhH8jRXz3MVd0AYqQ==",
"dev": true,
"requires": {
@ -14699,7 +14759,7 @@
},
"string_decoder": {
"version": "1.1.1",
"resolved": "http://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz",
"resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz",
"integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==",
"requires": {
"safe-buffer": "~5.1.0"
@ -14730,7 +14790,7 @@
},
"strip-eof": {
"version": "1.0.0",
"resolved": "http://registry.npmjs.org/strip-eof/-/strip-eof-1.0.0.tgz",
"resolved": "https://registry.npmjs.org/strip-eof/-/strip-eof-1.0.0.tgz",
"integrity": "sha1-u0P/VZim6wXYm1n80SnJgzE2Br8=",
"dev": true
},
@ -16168,7 +16228,7 @@
},
"wrap-ansi": {
"version": "2.1.0",
"resolved": "http://registry.npmjs.org/wrap-ansi/-/wrap-ansi-2.1.0.tgz",
"resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-2.1.0.tgz",
"integrity": "sha1-2Pw9KE3QV5T+hJc8rs3Rz4JP3YU=",
"dev": true,
"requires": {

View File

@ -136,9 +136,10 @@
"webpack": "^4.29.0"
},
"dependencies": {
"@influxdata/influx": "0.2.52",
"@influxdata/clockface": "0.0.8",
"@influxdata/influx": "0.2.54",
"@influxdata/react-custom-scrollbars": "4.3.8",
"@influxdata/vis": "^0.1.1",
"axios": "^0.18.0",
"babel-polyfill": "^6.26.0",
"bignumber.js": "^4.0.2",
@ -177,6 +178,7 @@
"react-resize-detector": "^2.3.0",
"react-router": "^3.0.2",
"react-router-redux": "^4.0.8",
"react-scrollbars-custom": "^4.0.0-alpha.8",
"react-tooltip": "^3.2.1",
"react-virtualized": "^9.18.5",
"redux": "^4.0.0",

View File

@ -94,7 +94,7 @@ export class Signin extends PureComponent<Props, State> {
this.props.notify(sessionTimedOut())
}
this.props.router.push(`/signin${returnTo}`)
this.props.router.replace(`/signin${returnTo}`)
}
}
}

View File

@ -6,17 +6,17 @@ import ProtoboardIcon from 'src/clockface/components/card_select/ProtoboardIcon'
interface Props {
id: string
name?: string
label: string
image?: StatelessComponent
checked?: boolean
disabled?: boolean
onClick: () => void
name?: string
image?: StatelessComponent
checked: boolean
disabled: boolean
}
@ErrorHandling
class CardSelectCard extends PureComponent<Props> {
public static defaultProps: Partial<Props> = {
public static defaultProps = {
checked: false,
disabled: false,
}

View File

@ -2,11 +2,11 @@
import React, {PureComponent} from 'react'
interface Props {
displayText?: string
displayText: string
}
class ProtoboardIcon extends PureComponent<Props> {
public static defaultProps: Partial<Props> = {
public static defaultProps = {
displayText: '',
}

View File

@ -22,24 +22,19 @@ import {validateHexCode} from 'src/configuration/utils/labels'
// Styles
import 'src/clockface/components/color_picker/ColorPicker.scss'
interface PassedProps {
interface Props {
color: string
onChange: (color: string, status?: ComponentStatus) => void
testID: string
maintainInputFocus: boolean
}
interface DefaultProps {
maintainInputFocus?: boolean
testID?: string
}
type Props = PassedProps & DefaultProps
interface State {
errorMessage: string
}
export default class ColorPicker extends Component<Props, State> {
public static defaultProps: DefaultProps = {
public static defaultProps = {
maintainInputFocus: false,
testID: 'color-picker',
}

View File

@ -24,16 +24,16 @@ import {ErrorHandling} from 'src/shared/decorators/errors'
interface Props {
confirmText: string
onConfirm: (returnValue?: any) => void
size: ComponentSize
shape: ButtonShape
testID: string
status: ComponentStatus
returnValue?: any
text?: string
size?: ComponentSize
shape?: ButtonShape
icon?: IconFont
status?: ComponentStatus
titleText?: string
tabIndex?: number
className?: string
testID?: string
}
interface State {
@ -42,7 +42,7 @@ interface State {
@ErrorHandling
class ConfirmationButton extends Component<Props, State> {
public static defaultProps: Partial<Props> = {
public static defaultProps = {
size: ComponentSize.Small,
shape: ButtonShape.Default,
status: ComponentStatus.Default,

View File

@ -16,7 +16,7 @@ import {ErrorHandling} from 'src/shared/decorators/errors'
interface Props {
children: JSX.Element | JSX.Element[]
align?: Alignment
align: Alignment
className?: string
}
@ -26,7 +26,7 @@ interface State {
@ErrorHandling
class Context extends PureComponent<Props, State> {
public static defaultProps: Partial<Props> = {
public static defaultProps = {
align: Alignment.Right,
}

View File

@ -17,28 +17,23 @@ import {
import {ErrorHandling} from 'src/shared/decorators/errors'
interface PassedProps {
interface Props {
children: JSX.Element | JSX.Element[]
icon: IconFont
onBoostZIndex?: (boostZIndex: boolean) => void
text: string
color: ComponentColor
shape: ButtonShape
testID: string
}
interface DefaultProps {
text?: string
color?: ComponentColor
shape?: ButtonShape
testID?: string
}
type Props = PassedProps & DefaultProps
interface State {
isExpanded: boolean
}
@ErrorHandling
class ContextMenu extends Component<Props, State> {
public static defaultProps: DefaultProps = {
public static defaultProps = {
color: ComponentColor.Primary,
shape: ButtonShape.Square,
text: '',

View File

@ -2,24 +2,19 @@
import React, {Component} from 'react'
import classnames from 'classnames'
interface PassedProps {
interface Props {
label: string
action: (value?: any) => void
description: string
testID: string
value?: any
onCollapseMenu?: () => void
disabled?: boolean
onCollapseMenu?: () => void
}
interface DefaultProps {
description?: string
testID?: string
}
type Props = PassedProps & DefaultProps
class ContextMenuItem extends Component<Props> {
public static defaultProps: DefaultProps = {
description: null,
public static defaultProps = {
description: '',
testID: 'context-menu-item',
}

View File

@ -6,13 +6,13 @@ import {ErrorHandling} from 'src/shared/decorators/errors'
interface Props {
children: JSX.Element
minSizePixels?: number
minSizePixels: number
sizePercent?: number
}
@ErrorHandling
class DraggableResizerPanel extends Component<Props> {
public static defaultProps: Partial<Props> = {
public static defaultProps = {
minSizePixels: 0,
}

View File

@ -46,7 +46,6 @@
line-height: 12px;
font-weight: 600;
color: fade-out($g20-white, 0.18);
white-space: nowrap;
position: relative;
text-align: left;
@ -64,13 +63,21 @@
cursor: pointer;
}
.dropdown--action & {
padding-left: 11px;
}
}
.dropdown-item--children {
.dropdown-wrap & {
word-break: break-all;
white-space: pre-wrap;
}
.dropdown--action & {
padding-left: 11px;
.dropdown-truncate & {
white-space: nowrap;
overflow: hidden;
text-overflow: ellipsis;
}
}
@ -150,8 +157,7 @@
$dividerA,
$dividerB,
$dividerText,
$scrollA,
$scrollB
$checkbox
) {
@include gradient-h($backgroundA, $backgroundB);
@ -167,13 +173,7 @@
background-color: $dividerA;
}
.dropdown-item--checkbox:after {
background-color: $scrollA;
}
.fancy-scroll--thumb-h {
@include gradient-h($scrollA, $scrollB);
}
.fancy-scroll--thumb-v {
@include gradient-v($scrollA, $scrollB);
background-color: $checkbox;
}
}
@ -186,8 +186,7 @@
$c-amethyst,
$c-ocean,
$c-potassium,
$c-neutrino,
$c-hydrogen
$c-neutrino
);
}
@ -200,8 +199,7 @@
$c-sapphire,
$c-ocean,
$c-laser,
$c-neutrino,
$c-hydrogen
$c-neutrino
);
}
@ -214,8 +212,7 @@
$c-ocean,
$c-viridian,
$c-krypton,
$c-neutrino,
$c-krypton
$c-neutrino
);
}
@ -228,12 +225,6 @@
$g0-obsidian,
$g2-kevlar,
$g11-sidewalk,
$c-pool,
$c-comet
$c-pool
);
}
/* TODO: Make fancyscroll more customizable */
.dropdown--menu-container .fancy-scroll--track-h {
display: none;
}

View File

@ -1,5 +1,5 @@
// Libraries
import React, {Component, CSSProperties, MouseEvent} from 'react'
import React, {Component, MouseEvent} from 'react'
import classnames from 'classnames'
// Components
@ -7,7 +7,7 @@ import {ClickOutside} from 'src/shared/components/ClickOutside'
import DropdownDivider from 'src/clockface/components/dropdowns/DropdownDivider'
import DropdownItem from 'src/clockface/components/dropdowns/DropdownItem'
import DropdownButton from 'src/clockface/components/dropdowns/DropdownButton'
import FancyScrollbar from 'src/shared/components/fancy_scrollbar/FancyScrollbar'
import DapperScrollbars from 'src/shared/components/dapperScrollbars/DapperScrollbars'
import WaitingText from 'src/shared/components/WaitingText'
// Types
@ -26,41 +26,54 @@ export enum DropdownMode {
Radio = 'radio',
}
export interface Props {
interface ThumbColors {
start: string
stop: string
}
interface PassedProps {
children: JSX.Element[]
onChange: (value: any) => void
selectedID?: string
widthPixels?: number
menuWidthPixels?: number
menuHeader?: JSX.Element
icon?: IconFont
customClass?: string
}
export interface DefaultProps {
buttonColor?: ComponentColor
buttonSize?: ComponentSize
menuColor?: DropdownMenuColors
status?: ComponentStatus
widthPixels?: number
icon?: IconFont
wrapText?: boolean
customClass?: string
maxMenuHeight?: number
menuColor?: DropdownMenuColors
mode?: DropdownMode
titleText?: string
menuHeader?: JSX.Element
testID: string
buttonTestID: string
wrapMenuText?: boolean
testID?: string
buttonTestID?: string
}
export type Props = PassedProps & DefaultProps
interface State {
expanded: boolean
}
@ErrorHandling
class Dropdown extends Component<Props, State> {
public static defaultProps: Partial<Props> = {
public static defaultProps: DefaultProps = {
buttonColor: ComponentColor.Default,
buttonSize: ComponentSize.Small,
status: ComponentStatus.Default,
wrapText: false,
maxMenuHeight: 250,
menuColor: DropdownMenuColors.Sapphire,
mode: DropdownMode.Radio,
titleText: '',
wrapMenuText: false,
testID: 'dropdown',
buttonTestID: 'dropdown-button',
}
public static Button = DropdownButton
@ -103,16 +116,17 @@ class Dropdown extends Component<Props, State> {
buttonColor,
buttonSize,
status,
wrapText,
customClass,
mode,
wrapMenuText,
} = this.props
return classnames(
`dropdown dropdown-${buttonSize} dropdown-${buttonColor}`,
{
disabled: status === ComponentStatus.Disabled,
'dropdown-wrap': wrapText,
'dropdown-wrap': wrapMenuText,
'dropdown-truncate': !wrapMenuText,
[customClass]: customClass,
[`dropdown--${mode}`]: mode,
}
@ -171,6 +185,8 @@ class Dropdown extends Component<Props, State> {
const {
selectedID,
maxMenuHeight,
widthPixels,
menuWidthPixels,
menuHeader,
menuColor,
children,
@ -183,15 +199,32 @@ class Dropdown extends Component<Props, State> {
return null
}
let width = '100%'
if (widthPixels) {
width = `${widthPixels}px`
}
if (menuWidthPixels) {
width = `${menuWidthPixels}px`
}
const {start, stop} = this.thumbColorsFromTheme
return (
<div
className={`dropdown--menu-container dropdown--${menuColor}`}
style={this.menuStyle}
style={{width}}
>
<FancyScrollbar
<DapperScrollbars
style={{
maxWidth: '100%',
maxHeight: `${maxMenuHeight}px`,
}}
autoSize={true}
autoHide={false}
autoHeight={true}
maxHeight={maxMenuHeight}
thumbStartColor={start}
thumbStopColor={stop}
>
<div
className="dropdown--menu"
@ -219,28 +252,32 @@ class Dropdown extends Component<Props, State> {
}
})}
</div>
</FancyScrollbar>
</DapperScrollbars>
</div>
)
}
private get menuStyle(): CSSProperties {
const {wrapText, widthPixels} = this.props
private get thumbColorsFromTheme(): ThumbColors {
const {menuColor} = this.props
let containerWidth = '100%'
if (widthPixels) {
containerWidth = `${widthPixels}px`
}
if (wrapText && widthPixels) {
switch (menuColor) {
case DropdownMenuColors.Amethyst:
case DropdownMenuColors.Sapphire:
return {
width: containerWidth,
start: '#BEF0FF',
stop: '#6BDFFF',
}
}
case DropdownMenuColors.Malachite:
return {
minWidth: containerWidth,
start: '#BEF0FF',
stop: '#A5F3B4',
}
default:
case DropdownMenuColors.Onyx:
return {
start: '#22ADF6',
stop: '#9394FF',
}
}
}

View File

@ -17,18 +17,18 @@ import {ErrorHandling} from 'src/shared/decorators/errors'
interface Props {
children: DropdownChild
onClick: (e: MouseEvent<HTMLElement>) => void
status?: ComponentStatus
active?: boolean
color?: ComponentColor
size?: ComponentSize
status: ComponentStatus
color: ComponentColor
size: ComponentSize
active: boolean
icon?: IconFont
title?: string
testID: string
testID?: string
}
@ErrorHandling
class DropdownButton extends Component<Props> {
public static defaultProps: Partial<Props> = {
public static defaultProps = {
color: ComponentColor.Default,
size: ComponentSize.Small,
status: ComponentStatus.Default,

View File

@ -8,14 +8,14 @@ import {DropdownChild} from 'src/clockface/types'
import {ErrorHandling} from 'src/shared/decorators/errors'
interface Props {
children?: DropdownChild
id: string
text?: string
text: string
children?: DropdownChild
}
@ErrorHandling
class DropdownDivider extends Component<Props> {
public static defaultProps: Partial<Props> = {
public static defaultProps = {
text: '',
}

View File

@ -11,15 +11,15 @@ interface Props {
id: string
children: DropdownChild
value: any
selected?: boolean
checkbox?: boolean
selected: boolean
checkbox: boolean
onClick?: (value: any) => void
testID?: string
}
@ErrorHandling
class DropdownItem extends Component<Props> {
public static defaultProps: Partial<Props> = {
public static defaultProps = {
checkbox: false,
selected: false,
}

View File

@ -24,19 +24,19 @@ import {ErrorHandling} from 'src/shared/decorators/errors'
interface Props {
children: JSX.Element[]
onChange: (selectedIDs: string[], value: any) => void
onCollapse?: () => void
selectedIDs: string[]
buttonColor?: ComponentColor
buttonSize?: ComponentSize
menuColor?: DropdownMenuColors
buttonColor: ComponentColor
buttonSize: ComponentSize
menuColor: DropdownMenuColors
wrapText: boolean
maxMenuHeight: number
emptyText: string
separatorText: string
customClass?: string
onCollapse?: () => void
status?: ComponentStatus
widthPixels?: number
icon?: IconFont
wrapText?: boolean
customClass?: string
maxMenuHeight?: number
emptyText?: string
separatorText?: string
}
interface State {
@ -45,7 +45,7 @@ interface State {
@ErrorHandling
class MultiSelectDropdown extends Component<Props, State> {
public static defaultProps: Partial<Props> = {
public static defaultProps = {
buttonColor: ComponentColor.Default,
buttonSize: ComponentSize.Small,
status: ComponentStatus.Default,

View File

@ -57,6 +57,7 @@ exports[`MultiSelectDropdown with menu expanded matches snapshot 1`] = `
<FancyScrollbar
autoHeight={true}
autoHide={false}
className=""
hideTracksWhenNotNeeded={true}
maxHeight={250}
setScrollTop={[Function]}

View File

@ -15,21 +15,16 @@ import 'src/clockface/components/empty_state/EmptyState.scss'
// Decorators
import {ErrorHandling} from 'src/shared/decorators/errors'
interface PassedProps {
interface Props {
children: JSX.Element | JSX.Element[]
size: ComponentSize
testID: string
customClass?: string
}
interface DefaultProps {
size?: ComponentSize
testID?: string
}
type Props = PassedProps & DefaultProps
@ErrorHandling
class EmptyState extends Component<Props> {
public static defaultProps: DefaultProps = {
public static defaultProps = {
size: ComponentSize.Small,
testID: 'empty-state',
}

View File

@ -11,17 +11,13 @@ import FormFooter from 'src/clockface/components/form_layout/FormFooter'
import {ErrorHandling} from 'src/shared/decorators/errors'
interface PassedProps {
interface Props {
children: JSX.Element[] | JSX.Element
style?: React.CSSProperties
className?: string
onSubmit?: (e: React.FormEvent) => void
testID: string
}
interface DefaultProps {
testID?: string
}
type Props = PassedProps & DefaultProps
interface BoxProps {
children: JSX.Element | JSX.Element[]
@ -36,7 +32,7 @@ class Form extends Component<Props> {
public static Divider = FormDivider
public static Footer = FormFooter
public static defaultProps: DefaultProps = {
public static defaultProps = {
testID: 'form-container',
}

View File

@ -9,7 +9,7 @@ import {ErrorHandling} from 'src/shared/decorators/errors'
interface Props {
children: JSX.Element | JSX.Element[]
colsXS?: Columns
colsXS: Columns
colsSM?: Columns
colsMD?: Columns
colsLG?: Columns
@ -21,7 +21,7 @@ interface Props {
@ErrorHandling
class FormFooter extends Component<Props> {
public static defaultProps: Partial<Props> = {
public static defaultProps = {
colsXS: Columns.Twelve,
}

View File

@ -7,7 +7,7 @@ import {Columns} from 'src/clockface/types'
interface Props {
children: JSX.Element[] | JSX.Element
widthXS?: Columns
widthXS: Columns
widthSM?: Columns
widthMD?: Columns
widthLG?: Columns
@ -18,7 +18,7 @@ interface Props {
}
class GridColumn extends Component<Props> {
public static defaultProps: Partial<Props> = {
public static defaultProps = {
widthXS: Columns.Twelve,
}

Some files were not shown because too many files have changed in this diff Show More