2019-10-23 17:09:04 +00:00
|
|
|
package pkger
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
2019-10-26 02:11:47 +00:00
|
|
|
"errors"
|
2019-10-23 17:09:04 +00:00
|
|
|
"fmt"
|
2020-03-20 02:08:35 +00:00
|
|
|
"net/url"
|
2019-10-23 17:09:04 +00:00
|
|
|
"strings"
|
2019-12-07 00:23:09 +00:00
|
|
|
"sync"
|
2019-10-23 17:09:04 +00:00
|
|
|
"time"
|
|
|
|
|
2020-04-03 17:39:20 +00:00
|
|
|
"github.com/influxdata/influxdb/v2"
|
|
|
|
ierrors "github.com/influxdata/influxdb/v2/kit/errors"
|
|
|
|
"github.com/influxdata/influxdb/v2/snowflake"
|
feat(pkger): add stateful management for tasks
notes on this commit. This commit was grueling ;-(. The task API is not a friendly
API to consume. There are a lot of non obvious things going on and almost every
one of them tripped me up. Things of note:
* the http.TaskService does not satisfy the influxdb.TaskService,
making it impossible to use as a dependency if tasks service gets
split out
* the APIs for create and update do not share common types. For example:
creating a task takes every field as a string, but in the update it is
taken as a options.Duration type. A step further and you'll notice that
create does not need an option to be provided, but the update does. Its
jarring trying to understand the indirection here. I struggled mightily
trying to make sense of it all with the indirection and differing types.
Made for a very difficult task (no pun intended) when it should have been
trivial. Opportunity here to fix these up and make this API more uniform
and remove unneccesary complexity like the options type.
* Nested IDs that get marshaled, are no bueno when you want to marshal a task
that does not have an ID in it, for either user/org/or self IDs. Its a challenge
just to do that.
* Lots of logs in the kv.Task portion where we hit errors and log and others where
we return. It isn't clear what is happening. The kv implementation is also very
procedural, and I found myself bouncing around like a ping pong ball trying to
make heads or tails of it.
* There is auth buried deep inside the kv.Task implementation that kept throwing me
off b/c it kept throwing errors, instead of warns. I assume, not sure if I'm
correct on this, but that the stuff being logged is determined inconsequential
to the task working. I had lots of errors from the auth buried in there, and hadn't
a clue what to make of it....
leaving these notes here as a look back at why working with tasks is so
difficult. This API can improve dramatically. I spent 5x the time trying
to figure out how to use the task API, in procedural calls, than I did
writing the business logic to consume it.... that's a scary realization ;-(
references: #17434
2020-04-21 02:59:56 +00:00
|
|
|
"github.com/influxdata/influxdb/v2/task/options"
|
2019-10-23 17:09:04 +00:00
|
|
|
"go.uber.org/zap"
|
|
|
|
)
|
|
|
|
|
2019-11-04 23:15:53 +00:00
|
|
|
// APIVersion marks the current APIVersion for influx packages.
|
2020-01-13 19:13:37 +00:00
|
|
|
const APIVersion = "influxdata.com/v2alpha1"
|
2019-11-04 23:15:53 +00:00
|
|
|
|
2020-03-20 02:08:35 +00:00
|
|
|
type (
|
2020-03-20 02:08:58 +00:00
|
|
|
// Stack is an identifier for stateful application of a package(s). This stack
|
|
|
|
// will map created resources from the pkg(s) to existing resources on the
|
|
|
|
// platform. This stack is updated only after side effects of applying a pkg.
|
|
|
|
// If the pkg is applied, and no changes are had, then the stack is not updated.
|
2020-03-20 02:08:35 +00:00
|
|
|
Stack struct {
|
2020-03-26 20:23:14 +00:00
|
|
|
ID influxdb.ID `json:"id"`
|
|
|
|
OrgID influxdb.ID `json:"orgID"`
|
|
|
|
Name string `json:"name"`
|
|
|
|
Description string `json:"description"`
|
|
|
|
URLs []string `json:"urls"`
|
|
|
|
Resources []StackResource `json:"resources"`
|
2020-03-20 02:08:58 +00:00
|
|
|
|
|
|
|
influxdb.CRUDLog
|
2020-03-20 02:08:35 +00:00
|
|
|
}
|
|
|
|
|
2020-03-20 02:08:58 +00:00
|
|
|
// StackResource is a record for an individual resource side effect genereated from
|
|
|
|
// applying a pkg.
|
2020-03-20 02:08:35 +00:00
|
|
|
StackResource struct {
|
2020-04-22 23:28:08 +00:00
|
|
|
APIVersion string `json:"apiVersion"`
|
|
|
|
ID influxdb.ID `json:"resourceID"`
|
|
|
|
Kind Kind `json:"kind"`
|
|
|
|
PkgName string `json:"pkgName"`
|
|
|
|
Associations []StackResourceAssociation `json:"associations"`
|
|
|
|
}
|
|
|
|
|
|
|
|
// StackResourceAssociation associates a stack resource with another stack resource.
|
|
|
|
StackResourceAssociation struct {
|
|
|
|
Kind Kind `json:"kind"`
|
|
|
|
PkgName string `json:"pkgName"`
|
2020-03-20 02:08:35 +00:00
|
|
|
}
|
|
|
|
)
|
|
|
|
|
2020-03-26 00:29:36 +00:00
|
|
|
const ResourceTypeStack influxdb.ResourceType = "stack"
|
|
|
|
|
2019-11-05 01:40:42 +00:00
|
|
|
// SVC is the packages service interface.
|
|
|
|
type SVC interface {
|
2020-03-20 23:20:53 +00:00
|
|
|
InitStack(ctx context.Context, userID influxdb.ID, stack Stack) (Stack, error)
|
2019-11-05 01:40:42 +00:00
|
|
|
CreatePkg(ctx context.Context, setters ...CreatePkgSetFn) (*Pkg, error)
|
2020-02-06 05:42:01 +00:00
|
|
|
DryRun(ctx context.Context, orgID, userID influxdb.ID, pkg *Pkg, opts ...ApplyOptFn) (Summary, Diff, error)
|
2020-04-11 04:51:13 +00:00
|
|
|
Apply(ctx context.Context, orgID, userID influxdb.ID, pkg *Pkg, opts ...ApplyOptFn) (Summary, Diff, error)
|
2019-11-05 01:40:42 +00:00
|
|
|
}
|
|
|
|
|
2020-01-15 19:00:31 +00:00
|
|
|
// SVCMiddleware is a service middleware func.
|
|
|
|
type SVCMiddleware func(SVC) SVC
|
|
|
|
|
2019-11-07 00:45:00 +00:00
|
|
|
type serviceOpt struct {
|
2019-12-27 19:22:05 +00:00
|
|
|
logger *zap.Logger
|
|
|
|
|
|
|
|
applyReqLimit int
|
2020-03-20 23:20:53 +00:00
|
|
|
idGen influxdb.IDGenerator
|
|
|
|
timeGen influxdb.TimeGenerator
|
|
|
|
store Store
|
2019-12-27 19:22:05 +00:00
|
|
|
|
2019-12-10 21:35:23 +00:00
|
|
|
bucketSVC influxdb.BucketService
|
2019-12-18 07:05:28 +00:00
|
|
|
checkSVC influxdb.CheckService
|
2019-12-10 21:35:23 +00:00
|
|
|
dashSVC influxdb.DashboardService
|
2019-12-18 07:05:28 +00:00
|
|
|
labelSVC influxdb.LabelService
|
2019-12-10 21:35:23 +00:00
|
|
|
endpointSVC influxdb.NotificationEndpointService
|
2020-03-20 23:20:53 +00:00
|
|
|
orgSVC influxdb.OrganizationService
|
2019-12-20 17:10:10 +00:00
|
|
|
ruleSVC influxdb.NotificationRuleStore
|
2019-12-16 17:39:55 +00:00
|
|
|
secretSVC influxdb.SecretService
|
2019-12-23 19:51:00 +00:00
|
|
|
taskSVC influxdb.TaskService
|
2019-12-10 21:35:23 +00:00
|
|
|
teleSVC influxdb.TelegrafConfigStore
|
|
|
|
varSVC influxdb.VariableService
|
2019-11-07 00:45:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// ServiceSetterFn is a means of setting dependencies on the Service type.
|
|
|
|
type ServiceSetterFn func(opt *serviceOpt)
|
|
|
|
|
2019-12-06 00:53:00 +00:00
|
|
|
// WithLogger sets the logger for the service.
|
|
|
|
func WithLogger(log *zap.Logger) ServiceSetterFn {
|
|
|
|
return func(o *serviceOpt) {
|
|
|
|
o.logger = log
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-03-20 23:20:53 +00:00
|
|
|
// WithIDGenerator sets the id generator for the service.
|
|
|
|
func WithIDGenerator(idGen influxdb.IDGenerator) ServiceSetterFn {
|
|
|
|
return func(opt *serviceOpt) {
|
|
|
|
opt.idGen = idGen
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// WithTimeGenerator sets the time generator for the service.
|
|
|
|
func WithTimeGenerator(timeGen influxdb.TimeGenerator) ServiceSetterFn {
|
|
|
|
return func(opt *serviceOpt) {
|
|
|
|
opt.timeGen = timeGen
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// WithStore sets the store for the service.
|
|
|
|
func WithStore(store Store) ServiceSetterFn {
|
|
|
|
return func(opt *serviceOpt) {
|
|
|
|
opt.store = store
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-07 00:45:00 +00:00
|
|
|
// WithBucketSVC sets the bucket service.
|
|
|
|
func WithBucketSVC(bktSVC influxdb.BucketService) ServiceSetterFn {
|
|
|
|
return func(opt *serviceOpt) {
|
|
|
|
opt.bucketSVC = bktSVC
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-12-18 07:05:28 +00:00
|
|
|
// WithCheckSVC sets the check service.
|
|
|
|
func WithCheckSVC(checkSVC influxdb.CheckService) ServiceSetterFn {
|
|
|
|
return func(opt *serviceOpt) {
|
|
|
|
opt.checkSVC = checkSVC
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-07 00:45:00 +00:00
|
|
|
// WithDashboardSVC sets the dashboard service.
|
|
|
|
func WithDashboardSVC(dashSVC influxdb.DashboardService) ServiceSetterFn {
|
|
|
|
return func(opt *serviceOpt) {
|
|
|
|
opt.dashSVC = dashSVC
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-12-20 17:10:10 +00:00
|
|
|
// WithNotificationEndpointSVC sets the endpoint notification service.
|
|
|
|
func WithNotificationEndpointSVC(endpointSVC influxdb.NotificationEndpointService) ServiceSetterFn {
|
2019-12-10 21:35:23 +00:00
|
|
|
return func(opt *serviceOpt) {
|
|
|
|
opt.endpointSVC = endpointSVC
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-12-20 17:10:10 +00:00
|
|
|
// WithNotificationRuleSVC sets the endpoint rule service.
|
|
|
|
func WithNotificationRuleSVC(ruleSVC influxdb.NotificationRuleStore) ServiceSetterFn {
|
|
|
|
return func(opt *serviceOpt) {
|
|
|
|
opt.ruleSVC = ruleSVC
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-03-20 23:20:53 +00:00
|
|
|
// WithOrganizationService sets the organization service for the service.
|
|
|
|
func WithOrganizationService(orgSVC influxdb.OrganizationService) ServiceSetterFn {
|
|
|
|
return func(opt *serviceOpt) {
|
|
|
|
opt.orgSVC = orgSVC
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-07 00:45:00 +00:00
|
|
|
// WithLabelSVC sets the label service.
|
|
|
|
func WithLabelSVC(labelSVC influxdb.LabelService) ServiceSetterFn {
|
|
|
|
return func(opt *serviceOpt) {
|
|
|
|
opt.labelSVC = labelSVC
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-12-16 17:39:55 +00:00
|
|
|
// WithSecretSVC sets the secret service.
|
|
|
|
func WithSecretSVC(secretSVC influxdb.SecretService) ServiceSetterFn {
|
|
|
|
return func(opt *serviceOpt) {
|
|
|
|
opt.secretSVC = secretSVC
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-12-27 19:22:05 +00:00
|
|
|
// WithTaskSVC sets the task service.
|
2019-12-23 19:51:00 +00:00
|
|
|
func WithTaskSVC(taskSVC influxdb.TaskService) ServiceSetterFn {
|
|
|
|
return func(opt *serviceOpt) {
|
|
|
|
opt.taskSVC = taskSVC
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-12-04 01:00:15 +00:00
|
|
|
// WithTelegrafSVC sets the telegraf service.
|
|
|
|
func WithTelegrafSVC(telegrafSVC influxdb.TelegrafConfigStore) ServiceSetterFn {
|
|
|
|
return func(opt *serviceOpt) {
|
|
|
|
opt.teleSVC = telegrafSVC
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-07 00:45:00 +00:00
|
|
|
// WithVariableSVC sets the variable service.
|
|
|
|
func WithVariableSVC(varSVC influxdb.VariableService) ServiceSetterFn {
|
|
|
|
return func(opt *serviceOpt) {
|
|
|
|
opt.varSVC = varSVC
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-03-20 02:08:58 +00:00
|
|
|
// Store is the storage behavior the Service depends on.
|
2020-03-20 02:08:35 +00:00
|
|
|
type Store interface {
|
2020-03-20 23:20:53 +00:00
|
|
|
CreateStack(ctx context.Context, stack Stack) error
|
2020-03-20 02:08:35 +00:00
|
|
|
ReadStackByID(ctx context.Context, id influxdb.ID) (Stack, error)
|
2020-03-20 23:20:53 +00:00
|
|
|
UpdateStack(ctx context.Context, stack Stack) error
|
2020-03-20 02:08:35 +00:00
|
|
|
DeleteStack(ctx context.Context, id influxdb.ID) error
|
|
|
|
}
|
|
|
|
|
2019-10-28 22:23:40 +00:00
|
|
|
// Service provides the pkger business logic including all the dependencies to make
|
|
|
|
// this resource sausage.
|
2019-10-23 17:09:04 +00:00
|
|
|
type Service struct {
|
2019-12-04 23:10:23 +00:00
|
|
|
log *zap.Logger
|
2019-10-30 21:13:42 +00:00
|
|
|
|
2020-03-20 02:08:35 +00:00
|
|
|
// internal dependencies
|
2020-03-20 23:20:53 +00:00
|
|
|
applyReqLimit int
|
|
|
|
idGen influxdb.IDGenerator
|
|
|
|
store Store
|
|
|
|
timeGen influxdb.TimeGenerator
|
2020-03-20 02:08:35 +00:00
|
|
|
|
|
|
|
// external service dependencies
|
2019-12-10 21:35:23 +00:00
|
|
|
bucketSVC influxdb.BucketService
|
2019-12-18 07:05:28 +00:00
|
|
|
checkSVC influxdb.CheckService
|
2019-12-10 21:35:23 +00:00
|
|
|
dashSVC influxdb.DashboardService
|
2019-12-18 07:05:28 +00:00
|
|
|
labelSVC influxdb.LabelService
|
2019-12-10 21:35:23 +00:00
|
|
|
endpointSVC influxdb.NotificationEndpointService
|
2020-03-20 23:20:53 +00:00
|
|
|
orgSVC influxdb.OrganizationService
|
2019-12-20 17:10:10 +00:00
|
|
|
ruleSVC influxdb.NotificationRuleStore
|
2019-12-16 17:39:55 +00:00
|
|
|
secretSVC influxdb.SecretService
|
2019-12-23 19:51:00 +00:00
|
|
|
taskSVC influxdb.TaskService
|
2019-12-10 21:35:23 +00:00
|
|
|
teleSVC influxdb.TelegrafConfigStore
|
|
|
|
varSVC influxdb.VariableService
|
2019-10-23 17:09:04 +00:00
|
|
|
}
|
|
|
|
|
2019-12-12 19:09:32 +00:00
|
|
|
var _ SVC = (*Service)(nil)
|
|
|
|
|
2019-10-28 22:23:40 +00:00
|
|
|
// NewService is a constructor for a pkger Service.
|
2019-12-06 00:53:00 +00:00
|
|
|
func NewService(opts ...ServiceSetterFn) *Service {
|
|
|
|
opt := &serviceOpt{
|
2019-12-07 00:23:09 +00:00
|
|
|
logger: zap.NewNop(),
|
|
|
|
applyReqLimit: 5,
|
2020-03-23 23:51:43 +00:00
|
|
|
idGen: snowflake.NewDefaultIDGenerator(),
|
|
|
|
timeGen: influxdb.RealTimeGenerator{},
|
2019-12-06 00:53:00 +00:00
|
|
|
}
|
2019-11-07 00:45:00 +00:00
|
|
|
for _, o := range opts {
|
|
|
|
o(opt)
|
2019-10-23 17:09:04 +00:00
|
|
|
}
|
|
|
|
|
2019-11-07 00:45:00 +00:00
|
|
|
return &Service{
|
2020-03-20 23:20:53 +00:00
|
|
|
log: opt.logger,
|
|
|
|
|
2019-12-07 00:23:09 +00:00
|
|
|
applyReqLimit: opt.applyReqLimit,
|
2020-03-20 23:20:53 +00:00
|
|
|
idGen: opt.idGen,
|
|
|
|
store: opt.store,
|
|
|
|
timeGen: opt.timeGen,
|
|
|
|
|
|
|
|
bucketSVC: opt.bucketSVC,
|
|
|
|
checkSVC: opt.checkSVC,
|
|
|
|
labelSVC: opt.labelSVC,
|
|
|
|
dashSVC: opt.dashSVC,
|
|
|
|
endpointSVC: opt.endpointSVC,
|
|
|
|
orgSVC: opt.orgSVC,
|
|
|
|
ruleSVC: opt.ruleSVC,
|
|
|
|
secretSVC: opt.secretSVC,
|
|
|
|
taskSVC: opt.taskSVC,
|
|
|
|
teleSVC: opt.teleSVC,
|
|
|
|
varSVC: opt.varSVC,
|
2019-10-23 17:09:04 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-03-20 02:08:58 +00:00
|
|
|
// InitStack will create a new stack for the given user and its given org. The stack can be created
|
|
|
|
// with urls that point to the location of packages that are included as part of the stack when
|
|
|
|
// it is applied.
|
2020-03-20 23:20:53 +00:00
|
|
|
func (s *Service) InitStack(ctx context.Context, userID influxdb.ID, stack Stack) (Stack, error) {
|
2020-03-26 20:23:14 +00:00
|
|
|
if err := validURLs(stack.URLs); err != nil {
|
|
|
|
return Stack{}, err
|
|
|
|
}
|
|
|
|
|
2020-03-20 23:20:53 +00:00
|
|
|
if _, err := s.orgSVC.FindOrganizationByID(ctx, stack.OrgID); err != nil {
|
|
|
|
if influxdb.ErrorCode(err) == influxdb.ENotFound {
|
|
|
|
msg := fmt.Sprintf("organization dependency does not exist for id[%q]", stack.OrgID.String())
|
|
|
|
return Stack{}, toInfluxError(influxdb.EConflict, msg)
|
|
|
|
}
|
|
|
|
return Stack{}, internalErr(err)
|
|
|
|
}
|
2020-03-20 02:08:58 +00:00
|
|
|
|
2020-03-20 23:20:53 +00:00
|
|
|
stack.ID = s.idGen.ID()
|
|
|
|
now := s.timeGen.Now()
|
|
|
|
stack.CRUDLog = influxdb.CRUDLog{
|
|
|
|
CreatedAt: now,
|
|
|
|
UpdatedAt: now,
|
2020-03-20 02:08:35 +00:00
|
|
|
}
|
2020-03-20 02:08:58 +00:00
|
|
|
|
2020-03-20 23:20:53 +00:00
|
|
|
if err := s.store.CreateStack(ctx, stack); err != nil {
|
|
|
|
return Stack{}, internalErr(err)
|
2020-03-20 02:08:58 +00:00
|
|
|
}
|
|
|
|
|
2020-03-20 02:08:35 +00:00
|
|
|
return stack, nil
|
|
|
|
}
|
|
|
|
|
2020-03-06 18:52:18 +00:00
|
|
|
type (
|
|
|
|
// CreatePkgSetFn is a functional input for setting the pkg fields.
|
|
|
|
CreatePkgSetFn func(opt *CreateOpt) error
|
2019-11-08 19:33:41 +00:00
|
|
|
|
2020-03-06 18:52:18 +00:00
|
|
|
// CreateOpt are the options for creating a new package.
|
|
|
|
CreateOpt struct {
|
|
|
|
OrgIDs []CreateByOrgIDOpt
|
|
|
|
Resources []ResourceToClone
|
|
|
|
}
|
|
|
|
|
|
|
|
// CreateByOrgIDOpt identifies an org to export resources for and provides
|
|
|
|
// multiple filtering options.
|
|
|
|
CreateByOrgIDOpt struct {
|
|
|
|
OrgID influxdb.ID
|
|
|
|
LabelNames []string
|
|
|
|
ResourceKinds []Kind
|
|
|
|
}
|
|
|
|
)
|
2019-11-04 23:15:53 +00:00
|
|
|
|
2019-11-09 02:12:48 +00:00
|
|
|
// CreateWithExistingResources allows the create method to clone existing resources.
|
|
|
|
func CreateWithExistingResources(resources ...ResourceToClone) CreatePkgSetFn {
|
2019-11-21 00:38:12 +00:00
|
|
|
return func(opt *CreateOpt) error {
|
2019-11-08 19:33:41 +00:00
|
|
|
for _, r := range resources {
|
|
|
|
if err := r.OK(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
2019-11-21 00:38:12 +00:00
|
|
|
opt.Resources = append(opt.Resources, resources...)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// CreateWithAllOrgResources allows the create method to clone all existing resources
|
|
|
|
// for the given organization.
|
2020-03-06 18:52:18 +00:00
|
|
|
func CreateWithAllOrgResources(orgIDOpt CreateByOrgIDOpt) CreatePkgSetFn {
|
2019-11-21 00:38:12 +00:00
|
|
|
return func(opt *CreateOpt) error {
|
2020-03-06 18:52:18 +00:00
|
|
|
if orgIDOpt.OrgID == 0 {
|
2019-11-21 00:38:12 +00:00
|
|
|
return errors.New("orgID provided must not be zero")
|
|
|
|
}
|
2020-03-06 18:52:18 +00:00
|
|
|
for _, k := range orgIDOpt.ResourceKinds {
|
|
|
|
if err := k.OK(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2019-11-21 00:38:12 +00:00
|
|
|
}
|
2020-03-06 18:52:18 +00:00
|
|
|
opt.OrgIDs = append(opt.OrgIDs, orgIDOpt)
|
2019-11-04 23:15:53 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// CreatePkg will produce a pkg from the parameters provided.
|
|
|
|
func (s *Service) CreatePkg(ctx context.Context, setters ...CreatePkgSetFn) (*Pkg, error) {
|
2019-11-21 00:38:12 +00:00
|
|
|
opt := new(CreateOpt)
|
2019-11-08 19:33:41 +00:00
|
|
|
for _, setter := range setters {
|
|
|
|
if err := setter(opt); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-03-18 22:22:17 +00:00
|
|
|
exporter := newResourceExporter(s)
|
2019-11-04 23:15:53 +00:00
|
|
|
|
2020-03-06 18:52:18 +00:00
|
|
|
for _, orgIDOpt := range opt.OrgIDs {
|
|
|
|
resourcesToClone, err := s.cloneOrgResources(ctx, orgIDOpt.OrgID, orgIDOpt.ResourceKinds)
|
2019-11-21 00:38:12 +00:00
|
|
|
if err != nil {
|
2019-12-21 23:57:41 +00:00
|
|
|
return nil, internalErr(err)
|
2019-11-21 00:38:12 +00:00
|
|
|
}
|
|
|
|
|
2020-03-18 18:47:13 +00:00
|
|
|
if err := exporter.Export(ctx, resourcesToClone, orgIDOpt.LabelNames...); err != nil {
|
|
|
|
return nil, internalErr(err)
|
2019-11-04 23:15:53 +00:00
|
|
|
}
|
2019-11-08 19:33:41 +00:00
|
|
|
}
|
|
|
|
|
2020-03-18 18:47:13 +00:00
|
|
|
if err := exporter.Export(ctx, opt.Resources); err != nil {
|
|
|
|
return nil, internalErr(err)
|
|
|
|
}
|
2019-11-12 20:29:50 +00:00
|
|
|
|
2020-03-18 18:47:13 +00:00
|
|
|
pkg := &Pkg{Objects: exporter.Objects()}
|
2019-11-09 02:12:48 +00:00
|
|
|
if err := pkg.Validate(ValidWithoutResources()); err != nil {
|
2019-12-21 23:57:41 +00:00
|
|
|
return nil, failedValidationErr(err)
|
2019-11-04 23:15:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return pkg, nil
|
|
|
|
}
|
|
|
|
|
2020-03-06 18:52:18 +00:00
|
|
|
func (s *Service) cloneOrgResources(ctx context.Context, orgID influxdb.ID, resourceKinds []Kind) ([]ResourceToClone, error) {
|
2019-11-21 00:38:12 +00:00
|
|
|
var resources []ResourceToClone
|
2020-03-06 18:52:18 +00:00
|
|
|
for _, resGen := range s.filterOrgResourceKinds(resourceKinds) {
|
2019-11-21 00:38:12 +00:00
|
|
|
existingResources, err := resGen.cloneFn(ctx, orgID)
|
|
|
|
if err != nil {
|
|
|
|
return nil, ierrors.Wrap(err, "finding "+string(resGen.resType))
|
|
|
|
}
|
|
|
|
resources = append(resources, existingResources...)
|
|
|
|
}
|
|
|
|
|
|
|
|
return resources, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *Service) cloneOrgBuckets(ctx context.Context, orgID influxdb.ID) ([]ResourceToClone, error) {
|
|
|
|
buckets, _, err := s.bucketSVC.FindBuckets(ctx, influxdb.BucketFilter{
|
|
|
|
OrganizationID: &orgID,
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
resources := make([]ResourceToClone, 0, len(buckets))
|
|
|
|
for _, b := range buckets {
|
|
|
|
if b.Type == influxdb.BucketTypeSystem {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
resources = append(resources, ResourceToClone{
|
|
|
|
Kind: KindBucket,
|
|
|
|
ID: b.ID,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
return resources, nil
|
|
|
|
}
|
|
|
|
|
2019-12-19 01:03:19 +00:00
|
|
|
func (s *Service) cloneOrgChecks(ctx context.Context, orgID influxdb.ID) ([]ResourceToClone, error) {
|
|
|
|
checks, _, err := s.checkSVC.FindChecks(ctx, influxdb.CheckFilter{
|
|
|
|
OrgID: &orgID,
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
resources := make([]ResourceToClone, 0, len(checks))
|
|
|
|
for _, c := range checks {
|
|
|
|
resources = append(resources, ResourceToClone{
|
|
|
|
Kind: KindCheck,
|
|
|
|
ID: c.GetID(),
|
|
|
|
})
|
|
|
|
}
|
|
|
|
return resources, nil
|
|
|
|
}
|
|
|
|
|
2019-11-21 00:38:12 +00:00
|
|
|
func (s *Service) cloneOrgDashboards(ctx context.Context, orgID influxdb.ID) ([]ResourceToClone, error) {
|
|
|
|
dashs, _, err := s.dashSVC.FindDashboards(ctx, influxdb.DashboardFilter{
|
|
|
|
OrganizationID: &orgID,
|
|
|
|
}, influxdb.FindOptions{Limit: 100})
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
resources := make([]ResourceToClone, 0, len(dashs))
|
|
|
|
for _, d := range dashs {
|
|
|
|
resources = append(resources, ResourceToClone{
|
|
|
|
Kind: KindDashboard,
|
|
|
|
ID: d.ID,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
return resources, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *Service) cloneOrgLabels(ctx context.Context, orgID influxdb.ID) ([]ResourceToClone, error) {
|
|
|
|
labels, err := s.labelSVC.FindLabels(ctx, influxdb.LabelFilter{
|
|
|
|
OrgID: &orgID,
|
|
|
|
}, influxdb.FindOptions{Limit: 10000})
|
|
|
|
if err != nil {
|
|
|
|
return nil, ierrors.Wrap(err, "finding labels")
|
|
|
|
}
|
|
|
|
|
|
|
|
resources := make([]ResourceToClone, 0, len(labels))
|
|
|
|
for _, l := range labels {
|
|
|
|
resources = append(resources, ResourceToClone{
|
|
|
|
Kind: KindLabel,
|
|
|
|
ID: l.ID,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
return resources, nil
|
|
|
|
}
|
|
|
|
|
2019-12-16 20:10:45 +00:00
|
|
|
func (s *Service) cloneOrgNotificationEndpoints(ctx context.Context, orgID influxdb.ID) ([]ResourceToClone, error) {
|
|
|
|
endpoints, _, err := s.endpointSVC.FindNotificationEndpoints(ctx, influxdb.NotificationEndpointFilter{
|
|
|
|
OrgID: &orgID,
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
resources := make([]ResourceToClone, 0, len(endpoints))
|
|
|
|
for _, e := range endpoints {
|
|
|
|
resources = append(resources, ResourceToClone{
|
|
|
|
Kind: KindNotificationEndpoint,
|
|
|
|
ID: e.GetID(),
|
|
|
|
})
|
|
|
|
}
|
|
|
|
return resources, nil
|
|
|
|
}
|
|
|
|
|
2019-12-20 20:51:27 +00:00
|
|
|
func (s *Service) cloneOrgNotificationRules(ctx context.Context, orgID influxdb.ID) ([]ResourceToClone, error) {
|
|
|
|
rules, _, err := s.ruleSVC.FindNotificationRules(ctx, influxdb.NotificationRuleFilter{
|
|
|
|
OrgID: &orgID,
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
resources := make([]ResourceToClone, 0, len(rules))
|
|
|
|
for _, r := range rules {
|
|
|
|
resources = append(resources, ResourceToClone{
|
|
|
|
Kind: KindNotificationRule,
|
|
|
|
ID: r.GetID(),
|
|
|
|
})
|
|
|
|
}
|
|
|
|
return resources, nil
|
|
|
|
}
|
|
|
|
|
2019-12-23 22:31:56 +00:00
|
|
|
func (s *Service) cloneOrgTasks(ctx context.Context, orgID influxdb.ID) ([]ResourceToClone, error) {
|
2020-02-28 00:02:11 +00:00
|
|
|
tasks, _, err := s.taskSVC.FindTasks(ctx, influxdb.TaskFilter{OrganizationID: &orgID})
|
2019-12-27 17:21:05 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2020-02-28 00:02:11 +00:00
|
|
|
if len(tasks) == 0 {
|
2019-12-27 22:00:49 +00:00
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
checks, _, err := s.checkSVC.FindChecks(ctx, influxdb.CheckFilter{
|
|
|
|
OrgID: &orgID,
|
|
|
|
})
|
2019-12-23 22:31:56 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2020-02-27 23:01:42 +00:00
|
|
|
rules, _, err := s.ruleSVC.FindNotificationRules(ctx, influxdb.NotificationRuleFilter{
|
|
|
|
OrgID: &orgID,
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2020-02-28 00:02:11 +00:00
|
|
|
mTasks := make(map[influxdb.ID]*influxdb.Task)
|
|
|
|
for i := range tasks {
|
|
|
|
t := tasks[i]
|
|
|
|
if t.Type != influxdb.TaskSystemType {
|
2019-12-27 22:00:49 +00:00
|
|
|
continue
|
|
|
|
}
|
2020-02-28 00:02:11 +00:00
|
|
|
mTasks[t.ID] = t
|
2019-12-27 17:21:05 +00:00
|
|
|
}
|
|
|
|
for _, c := range checks {
|
2020-02-28 00:02:11 +00:00
|
|
|
delete(mTasks, c.GetTaskID())
|
2019-12-27 17:21:05 +00:00
|
|
|
}
|
2020-02-27 23:01:42 +00:00
|
|
|
for _, r := range rules {
|
2020-02-28 00:02:11 +00:00
|
|
|
delete(mTasks, r.GetTaskID())
|
2020-02-27 23:01:42 +00:00
|
|
|
}
|
2019-12-27 17:21:05 +00:00
|
|
|
|
2020-02-28 00:02:11 +00:00
|
|
|
resources := make([]ResourceToClone, 0, len(mTasks))
|
|
|
|
for _, t := range mTasks {
|
2019-12-23 22:31:56 +00:00
|
|
|
resources = append(resources, ResourceToClone{
|
|
|
|
Kind: KindTask,
|
|
|
|
ID: t.ID,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
return resources, nil
|
|
|
|
}
|
|
|
|
|
2019-12-16 20:10:45 +00:00
|
|
|
func (s *Service) cloneOrgTelegrafs(ctx context.Context, orgID influxdb.ID) ([]ResourceToClone, error) {
|
2019-12-05 00:17:35 +00:00
|
|
|
teles, _, err := s.teleSVC.FindTelegrafConfigs(ctx, influxdb.TelegrafConfigFilter{OrgID: &orgID})
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2019-12-16 20:10:45 +00:00
|
|
|
|
2019-12-05 00:17:35 +00:00
|
|
|
resources := make([]ResourceToClone, 0, len(teles))
|
|
|
|
for _, t := range teles {
|
|
|
|
resources = append(resources, ResourceToClone{
|
|
|
|
Kind: KindTelegraf,
|
|
|
|
ID: t.ID,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
return resources, nil
|
|
|
|
}
|
|
|
|
|
2019-11-21 00:38:12 +00:00
|
|
|
func (s *Service) cloneOrgVariables(ctx context.Context, orgID influxdb.ID) ([]ResourceToClone, error) {
|
|
|
|
vars, err := s.varSVC.FindVariables(ctx, influxdb.VariableFilter{
|
|
|
|
OrganizationID: &orgID,
|
|
|
|
}, influxdb.FindOptions{Limit: 10000})
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
resources := make([]ResourceToClone, 0, len(vars))
|
|
|
|
for _, v := range vars {
|
|
|
|
resources = append(resources, ResourceToClone{
|
|
|
|
Kind: KindVariable,
|
|
|
|
ID: v.ID,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
return resources, nil
|
|
|
|
}
|
|
|
|
|
2020-03-06 18:52:18 +00:00
|
|
|
type cloneResFn func(context.Context, influxdb.ID) ([]ResourceToClone, error)
|
|
|
|
|
|
|
|
func (s *Service) filterOrgResourceKinds(resourceKindFilters []Kind) []struct {
|
|
|
|
resType influxdb.ResourceType
|
|
|
|
cloneFn cloneResFn
|
|
|
|
} {
|
|
|
|
mKinds := map[Kind]cloneResFn{
|
|
|
|
KindBucket: s.cloneOrgBuckets,
|
|
|
|
KindCheck: s.cloneOrgChecks,
|
|
|
|
KindDashboard: s.cloneOrgDashboards,
|
|
|
|
KindLabel: s.cloneOrgLabels,
|
|
|
|
KindNotificationEndpoint: s.cloneOrgNotificationEndpoints,
|
|
|
|
KindNotificationRule: s.cloneOrgNotificationRules,
|
|
|
|
KindTask: s.cloneOrgTasks,
|
|
|
|
KindTelegraf: s.cloneOrgTelegrafs,
|
|
|
|
KindVariable: s.cloneOrgVariables,
|
|
|
|
}
|
|
|
|
|
|
|
|
newResGen := func(resType influxdb.ResourceType, cloneFn cloneResFn) struct {
|
|
|
|
resType influxdb.ResourceType
|
|
|
|
cloneFn cloneResFn
|
|
|
|
} {
|
|
|
|
return struct {
|
|
|
|
resType influxdb.ResourceType
|
|
|
|
cloneFn cloneResFn
|
|
|
|
}{
|
|
|
|
resType: resType,
|
|
|
|
cloneFn: cloneFn,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
var resourceTypeGens []struct {
|
|
|
|
resType influxdb.ResourceType
|
|
|
|
cloneFn cloneResFn
|
|
|
|
}
|
|
|
|
if len(resourceKindFilters) == 0 {
|
|
|
|
for k, cloneFn := range mKinds {
|
|
|
|
resourceTypeGens = append(resourceTypeGens, newResGen(k.ResourceType(), cloneFn))
|
|
|
|
}
|
|
|
|
return resourceTypeGens
|
|
|
|
}
|
|
|
|
|
|
|
|
seenKinds := make(map[Kind]bool)
|
|
|
|
for _, k := range resourceKindFilters {
|
|
|
|
cloneFn, ok := mKinds[k]
|
|
|
|
if !ok || seenKinds[k] {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
seenKinds[k] = true
|
|
|
|
resourceTypeGens = append(resourceTypeGens, newResGen(k.ResourceType(), cloneFn))
|
|
|
|
}
|
|
|
|
|
|
|
|
return resourceTypeGens
|
|
|
|
}
|
|
|
|
|
2019-10-28 22:23:40 +00:00
|
|
|
// DryRun provides a dry run of the pkg application. The pkg will be marked verified
|
|
|
|
// for later calls to Apply. This func will be run on an Apply if it has not been run
|
|
|
|
// already.
|
2020-02-06 05:42:01 +00:00
|
|
|
func (s *Service) DryRun(ctx context.Context, orgID, userID influxdb.ID, pkg *Pkg, opts ...ApplyOptFn) (Summary, Diff, error) {
|
2020-04-17 02:27:58 +00:00
|
|
|
state, err := s.dryRun(ctx, orgID, pkg, opts...)
|
|
|
|
if err != nil {
|
|
|
|
return Summary{}, Diff{}, err
|
|
|
|
}
|
|
|
|
return newSummaryFromStatePkg(state, pkg), state.diff(), nil
|
2020-04-11 04:51:13 +00:00
|
|
|
}
|
|
|
|
|
2020-04-17 02:27:58 +00:00
|
|
|
func (s *Service) dryRun(ctx context.Context, orgID influxdb.ID, pkg *Pkg, opts ...ApplyOptFn) (*stateCoordinator, error) {
|
2019-11-14 00:43:28 +00:00
|
|
|
// so here's the deal, when we have issues with the parsing validation, we
|
|
|
|
// continue to do the diff anyhow. any resource that does not have a name
|
|
|
|
// will be skipped, and won't bleed into the dry run here. We can now return
|
|
|
|
// a error (parseErr) and valid diff/summary.
|
|
|
|
var parseErr error
|
2019-11-06 18:02:45 +00:00
|
|
|
if !pkg.isParsed {
|
2019-11-14 00:43:28 +00:00
|
|
|
err := pkg.Validate()
|
|
|
|
if err != nil && !IsParseErr(err) {
|
2020-04-17 02:27:58 +00:00
|
|
|
return nil, internalErr(err)
|
2019-11-06 18:02:45 +00:00
|
|
|
}
|
2019-11-14 00:43:28 +00:00
|
|
|
parseErr = err
|
2019-11-06 18:02:45 +00:00
|
|
|
}
|
|
|
|
|
2020-04-01 00:01:45 +00:00
|
|
|
opt := applyOptFromOptFns(opts...)
|
2020-02-06 05:42:01 +00:00
|
|
|
|
|
|
|
if len(opt.EnvRefs) > 0 {
|
2020-02-06 17:28:04 +00:00
|
|
|
err := pkg.applyEnvRefs(opt.EnvRefs)
|
2020-02-06 05:42:01 +00:00
|
|
|
if err != nil && !IsParseErr(err) {
|
2020-04-17 02:27:58 +00:00
|
|
|
return nil, internalErr(err)
|
2020-02-06 05:42:01 +00:00
|
|
|
}
|
|
|
|
parseErr = err
|
|
|
|
}
|
|
|
|
|
2020-04-11 04:51:13 +00:00
|
|
|
state := newStateCoordinator(pkg)
|
|
|
|
|
2020-04-01 00:01:45 +00:00
|
|
|
if opt.StackID > 0 {
|
2020-04-17 02:27:58 +00:00
|
|
|
if err := s.addStackState(ctx, opt.StackID, state); err != nil {
|
|
|
|
return nil, internalErr(err)
|
2020-04-01 00:01:45 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-12-16 17:39:55 +00:00
|
|
|
if err := s.dryRunSecrets(ctx, orgID, pkg); err != nil {
|
2020-04-17 02:27:58 +00:00
|
|
|
return nil, err
|
2019-12-16 17:39:55 +00:00
|
|
|
}
|
|
|
|
|
2020-04-11 04:51:13 +00:00
|
|
|
s.dryRunBuckets(ctx, orgID, state.mBuckets)
|
2020-04-14 22:19:15 +00:00
|
|
|
s.dryRunChecks(ctx, orgID, state.mChecks)
|
2020-04-20 19:29:30 +00:00
|
|
|
s.dryRunDashboards(ctx, orgID, state.mDashboards)
|
2020-04-11 04:51:13 +00:00
|
|
|
s.dryRunLabels(ctx, orgID, state.mLabels)
|
feat(pkger): add stateful management for tasks
notes on this commit. This commit was grueling ;-(. The task API is not a friendly
API to consume. There are a lot of non obvious things going on and almost every
one of them tripped me up. Things of note:
* the http.TaskService does not satisfy the influxdb.TaskService,
making it impossible to use as a dependency if tasks service gets
split out
* the APIs for create and update do not share common types. For example:
creating a task takes every field as a string, but in the update it is
taken as a options.Duration type. A step further and you'll notice that
create does not need an option to be provided, but the update does. Its
jarring trying to understand the indirection here. I struggled mightily
trying to make sense of it all with the indirection and differing types.
Made for a very difficult task (no pun intended) when it should have been
trivial. Opportunity here to fix these up and make this API more uniform
and remove unneccesary complexity like the options type.
* Nested IDs that get marshaled, are no bueno when you want to marshal a task
that does not have an ID in it, for either user/org/or self IDs. Its a challenge
just to do that.
* Lots of logs in the kv.Task portion where we hit errors and log and others where
we return. It isn't clear what is happening. The kv implementation is also very
procedural, and I found myself bouncing around like a ping pong ball trying to
make heads or tails of it.
* There is auth buried deep inside the kv.Task implementation that kept throwing me
off b/c it kept throwing errors, instead of warns. I assume, not sure if I'm
correct on this, but that the stuff being logged is determined inconsequential
to the task working. I had lots of errors from the auth buried in there, and hadn't
a clue what to make of it....
leaving these notes here as a look back at why working with tasks is so
difficult. This API can improve dramatically. I spent 5x the time trying
to figure out how to use the task API, in procedural calls, than I did
writing the business logic to consume it.... that's a scary realization ;-(
references: #17434
2020-04-21 02:59:56 +00:00
|
|
|
s.dryRunTasks(ctx, orgID, state.mTasks)
|
2020-04-21 22:00:29 +00:00
|
|
|
s.dryRunTelegrafConfigs(ctx, orgID, state.mTelegrafs)
|
2020-04-14 23:18:34 +00:00
|
|
|
s.dryRunVariables(ctx, orgID, state.mVariables)
|
2020-04-15 19:46:17 +00:00
|
|
|
err := s.dryRunNotificationEndpoints(ctx, orgID, state.mEndpoints)
|
|
|
|
if err != nil {
|
2020-04-17 02:27:58 +00:00
|
|
|
return nil, ierrors.Wrap(err, "failed to dry run notification endpoints")
|
2020-04-15 19:46:17 +00:00
|
|
|
}
|
2020-04-11 04:51:13 +00:00
|
|
|
|
2020-04-17 02:27:58 +00:00
|
|
|
err = s.dryRunNotificationRules(ctx, orgID, state.mRules, state.mEndpoints)
|
2019-12-19 22:02:34 +00:00
|
|
|
if err != nil {
|
2020-04-17 02:27:58 +00:00
|
|
|
return nil, err
|
2019-12-19 22:02:34 +00:00
|
|
|
}
|
2019-11-07 00:45:00 +00:00
|
|
|
|
2020-04-17 02:27:58 +00:00
|
|
|
stateLabelMappings, err := s.dryRunLabelMappings(ctx, state)
|
2019-10-28 22:23:40 +00:00
|
|
|
if err != nil {
|
2020-04-17 02:27:58 +00:00
|
|
|
return nil, err
|
2019-10-28 22:23:40 +00:00
|
|
|
}
|
2020-04-11 04:51:13 +00:00
|
|
|
state.labelMappings = stateLabelMappings
|
2019-10-28 22:23:40 +00:00
|
|
|
|
2020-04-17 02:27:58 +00:00
|
|
|
return state, parseErr
|
2020-04-11 04:51:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (s *Service) dryRunBuckets(ctx context.Context, orgID influxdb.ID, bkts map[string]*stateBucket) {
|
|
|
|
for _, stateBkt := range bkts {
|
2020-04-14 20:21:05 +00:00
|
|
|
stateBkt.orgID = orgID
|
|
|
|
var existing *influxdb.Bucket
|
2020-04-11 04:51:13 +00:00
|
|
|
if stateBkt.ID() != 0 {
|
2020-04-14 20:21:05 +00:00
|
|
|
existing, _ = s.bucketSVC.FindBucketByID(ctx, stateBkt.ID())
|
2020-04-11 04:51:13 +00:00
|
|
|
} else {
|
2020-04-14 20:21:05 +00:00
|
|
|
existing, _ = s.bucketSVC.FindBucketByName(ctx, orgID, stateBkt.parserBkt.Name())
|
2020-04-11 04:51:13 +00:00
|
|
|
}
|
2020-04-14 20:21:05 +00:00
|
|
|
if IsNew(stateBkt.stateStatus) && existing != nil {
|
|
|
|
stateBkt.stateStatus = StateStatusExists
|
|
|
|
}
|
|
|
|
stateBkt.existing = existing
|
2020-04-11 04:51:13 +00:00
|
|
|
}
|
2019-10-28 22:23:40 +00:00
|
|
|
}
|
|
|
|
|
2020-04-14 22:19:15 +00:00
|
|
|
func (s *Service) dryRunChecks(ctx context.Context, orgID influxdb.ID, checks map[string]*stateCheck) {
|
|
|
|
for _, c := range checks {
|
|
|
|
c.orgID = orgID
|
2019-12-18 07:05:28 +00:00
|
|
|
|
2020-04-14 22:19:15 +00:00
|
|
|
var existing influxdb.Check
|
|
|
|
if c.ID() != 0 {
|
|
|
|
existing, _ = s.checkSVC.FindCheckByID(ctx, c.ID())
|
|
|
|
} else {
|
|
|
|
name := c.parserCheck.Name()
|
|
|
|
existing, _ = s.checkSVC.FindCheck(ctx, influxdb.CheckFilter{
|
|
|
|
Name: &name,
|
|
|
|
OrgID: &orgID,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
if IsNew(c.stateStatus) && existing != nil {
|
|
|
|
c.stateStatus = StateStatusExists
|
|
|
|
}
|
|
|
|
c.existing = existing
|
2019-12-18 07:05:28 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-20 19:29:30 +00:00
|
|
|
func (s *Service) dryRunDashboards(ctx context.Context, orgID influxdb.ID, dashs map[string]*stateDashboard) {
|
|
|
|
for _, stateDash := range dashs {
|
|
|
|
stateDash.orgID = orgID
|
|
|
|
var existing *influxdb.Dashboard
|
|
|
|
if stateDash.ID() != 0 {
|
|
|
|
existing, _ = s.dashSVC.FindDashboardByID(ctx, stateDash.ID())
|
|
|
|
}
|
|
|
|
if IsNew(stateDash.stateStatus) && existing != nil {
|
|
|
|
stateDash.stateStatus = StateStatusExists
|
|
|
|
}
|
|
|
|
stateDash.existing = existing
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-11 04:51:13 +00:00
|
|
|
func (s *Service) dryRunLabels(ctx context.Context, orgID influxdb.ID, labels map[string]*stateLabel) {
|
|
|
|
for _, pkgLabel := range labels {
|
2020-04-14 20:21:05 +00:00
|
|
|
pkgLabel.orgID = orgID
|
2020-04-01 23:44:17 +00:00
|
|
|
existingLabel, _ := s.findLabel(ctx, orgID, pkgLabel)
|
2020-04-14 20:21:05 +00:00
|
|
|
if IsNew(pkgLabel.stateStatus) && existingLabel != nil {
|
|
|
|
pkgLabel.stateStatus = StateStatusExists
|
|
|
|
}
|
2020-04-01 23:44:17 +00:00
|
|
|
pkgLabel.existing = existingLabel
|
2019-10-28 22:23:40 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-15 19:46:17 +00:00
|
|
|
func (s *Service) dryRunNotificationEndpoints(ctx context.Context, orgID influxdb.ID, endpoints map[string]*stateEndpoint) error {
|
2019-12-10 21:35:23 +00:00
|
|
|
existingEndpoints, _, err := s.endpointSVC.FindNotificationEndpoints(ctx, influxdb.NotificationEndpointFilter{
|
|
|
|
OrgID: &orgID,
|
|
|
|
}) // grab em all
|
|
|
|
if err != nil {
|
2020-04-15 19:46:17 +00:00
|
|
|
return internalErr(err)
|
2019-12-10 21:35:23 +00:00
|
|
|
}
|
|
|
|
|
2020-04-06 17:25:20 +00:00
|
|
|
mExistingByName := make(map[string]influxdb.NotificationEndpoint)
|
|
|
|
mExistingByID := make(map[influxdb.ID]influxdb.NotificationEndpoint)
|
2019-12-10 21:35:23 +00:00
|
|
|
for i := range existingEndpoints {
|
|
|
|
e := existingEndpoints[i]
|
2020-04-06 17:25:20 +00:00
|
|
|
mExistingByName[e.GetName()] = e
|
|
|
|
mExistingByID[e.GetID()] = e
|
|
|
|
}
|
|
|
|
|
2020-04-15 19:46:17 +00:00
|
|
|
findEndpoint := func(e *stateEndpoint) influxdb.NotificationEndpoint {
|
2020-04-06 17:25:20 +00:00
|
|
|
if iExisting, ok := mExistingByID[e.ID()]; ok {
|
|
|
|
return iExisting
|
|
|
|
}
|
2020-04-15 19:46:17 +00:00
|
|
|
if iExisting, ok := mExistingByName[e.parserEndpoint.Name()]; ok {
|
2020-04-06 17:25:20 +00:00
|
|
|
return iExisting
|
|
|
|
}
|
|
|
|
return nil
|
2019-12-10 21:35:23 +00:00
|
|
|
}
|
|
|
|
|
2020-04-15 19:46:17 +00:00
|
|
|
for _, newEndpoint := range endpoints {
|
2020-04-06 17:25:20 +00:00
|
|
|
existing := findEndpoint(newEndpoint)
|
2020-04-15 19:46:17 +00:00
|
|
|
if IsNew(newEndpoint.stateStatus) && existing != nil {
|
|
|
|
newEndpoint.stateStatus = StateStatusExists
|
|
|
|
}
|
2020-04-06 17:25:20 +00:00
|
|
|
newEndpoint.existing = existing
|
2019-12-10 21:35:23 +00:00
|
|
|
}
|
|
|
|
|
2020-04-15 19:46:17 +00:00
|
|
|
return nil
|
2019-12-10 21:35:23 +00:00
|
|
|
}
|
|
|
|
|
2020-04-17 02:27:58 +00:00
|
|
|
func (s *Service) dryRunNotificationRules(ctx context.Context, orgID influxdb.ID, rules map[string]*stateRule, endpoints map[string]*stateEndpoint) error {
|
2020-04-08 17:08:28 +00:00
|
|
|
iRules, _, err := s.ruleSVC.FindNotificationRules(ctx, influxdb.NotificationRuleFilter{
|
|
|
|
OrgID: &orgID,
|
|
|
|
}, influxdb.FindOptions{Limit: 100})
|
|
|
|
if err != nil {
|
2020-04-17 02:27:58 +00:00
|
|
|
return internalErr(err)
|
2020-04-08 17:08:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
mExistingRulesByID := make(map[influxdb.ID]influxdb.NotificationRule)
|
|
|
|
for _, r := range iRules {
|
|
|
|
mExistingRulesByID[r.GetID()] = r
|
2019-12-19 22:02:34 +00:00
|
|
|
}
|
|
|
|
|
2020-04-17 02:27:58 +00:00
|
|
|
for _, r := range rules {
|
|
|
|
e, ok := endpoints[r.parserRule.associatedEndpoint.PkgName()]
|
2019-12-19 22:02:34 +00:00
|
|
|
if !ok {
|
2020-04-17 02:27:58 +00:00
|
|
|
err := fmt.Errorf("failed to find notification endpoint %q dependency for notification rule %q", r.parserRule.endpointName, r.parserRule.Name())
|
|
|
|
return &influxdb.Error{
|
|
|
|
Code: influxdb.EUnprocessableEntity,
|
|
|
|
Err: err,
|
2020-04-08 17:08:28 +00:00
|
|
|
}
|
|
|
|
}
|
2020-04-17 02:27:58 +00:00
|
|
|
r.associatedEndpoint = e
|
|
|
|
r.existing = mExistingRulesByID[r.ID()]
|
2019-12-19 22:02:34 +00:00
|
|
|
}
|
2020-04-17 02:27:58 +00:00
|
|
|
|
|
|
|
return nil
|
2019-12-19 22:02:34 +00:00
|
|
|
}
|
|
|
|
|
2019-12-16 17:39:55 +00:00
|
|
|
func (s *Service) dryRunSecrets(ctx context.Context, orgID influxdb.ID, pkg *Pkg) error {
|
2019-12-27 19:22:05 +00:00
|
|
|
pkgSecrets := pkg.mSecrets
|
2019-12-21 23:57:41 +00:00
|
|
|
if len(pkgSecrets) == 0 {
|
2019-12-16 17:39:55 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
existingSecrets, err := s.secretSVC.GetSecretKeys(ctx, orgID)
|
|
|
|
if err != nil {
|
2019-12-21 23:57:41 +00:00
|
|
|
return &influxdb.Error{Code: influxdb.EInternal, Err: err}
|
2019-12-16 17:39:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
for _, secret := range existingSecrets {
|
2019-12-27 19:22:05 +00:00
|
|
|
pkgSecrets[secret] = true // marked true since it exists in the platform
|
2019-12-16 17:39:55 +00:00
|
|
|
}
|
|
|
|
|
2019-12-27 19:22:05 +00:00
|
|
|
return nil
|
2019-12-16 17:39:55 +00:00
|
|
|
}
|
|
|
|
|
feat(pkger): add stateful management for tasks
notes on this commit. This commit was grueling ;-(. The task API is not a friendly
API to consume. There are a lot of non obvious things going on and almost every
one of them tripped me up. Things of note:
* the http.TaskService does not satisfy the influxdb.TaskService,
making it impossible to use as a dependency if tasks service gets
split out
* the APIs for create and update do not share common types. For example:
creating a task takes every field as a string, but in the update it is
taken as a options.Duration type. A step further and you'll notice that
create does not need an option to be provided, but the update does. Its
jarring trying to understand the indirection here. I struggled mightily
trying to make sense of it all with the indirection and differing types.
Made for a very difficult task (no pun intended) when it should have been
trivial. Opportunity here to fix these up and make this API more uniform
and remove unneccesary complexity like the options type.
* Nested IDs that get marshaled, are no bueno when you want to marshal a task
that does not have an ID in it, for either user/org/or self IDs. Its a challenge
just to do that.
* Lots of logs in the kv.Task portion where we hit errors and log and others where
we return. It isn't clear what is happening. The kv implementation is also very
procedural, and I found myself bouncing around like a ping pong ball trying to
make heads or tails of it.
* There is auth buried deep inside the kv.Task implementation that kept throwing me
off b/c it kept throwing errors, instead of warns. I assume, not sure if I'm
correct on this, but that the stuff being logged is determined inconsequential
to the task working. I had lots of errors from the auth buried in there, and hadn't
a clue what to make of it....
leaving these notes here as a look back at why working with tasks is so
difficult. This API can improve dramatically. I spent 5x the time trying
to figure out how to use the task API, in procedural calls, than I did
writing the business logic to consume it.... that's a scary realization ;-(
references: #17434
2020-04-21 02:59:56 +00:00
|
|
|
func (s *Service) dryRunTasks(ctx context.Context, orgID influxdb.ID, tasks map[string]*stateTask) {
|
|
|
|
for _, stateTask := range tasks {
|
|
|
|
stateTask.orgID = orgID
|
|
|
|
var existing *influxdb.Task
|
|
|
|
if stateTask.ID() != 0 {
|
|
|
|
existing, _ = s.taskSVC.FindTaskByID(ctx, stateTask.ID())
|
|
|
|
}
|
|
|
|
if IsNew(stateTask.stateStatus) && existing != nil {
|
|
|
|
stateTask.stateStatus = StateStatusExists
|
|
|
|
}
|
|
|
|
stateTask.existing = existing
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-21 22:00:29 +00:00
|
|
|
func (s *Service) dryRunTelegrafConfigs(ctx context.Context, orgID influxdb.ID, teleConfigs map[string]*stateTelegraf) {
|
|
|
|
for _, stateTele := range teleConfigs {
|
|
|
|
stateTele.orgID = orgID
|
|
|
|
var existing *influxdb.TelegrafConfig
|
|
|
|
if stateTele.ID() != 0 {
|
|
|
|
existing, _ = s.teleSVC.FindTelegrafConfigByID(ctx, stateTele.ID())
|
|
|
|
}
|
|
|
|
if IsNew(stateTele.stateStatus) && existing != nil {
|
|
|
|
stateTele.stateStatus = StateStatusExists
|
|
|
|
}
|
|
|
|
stateTele.existing = existing
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-14 23:18:34 +00:00
|
|
|
func (s *Service) dryRunVariables(ctx context.Context, orgID influxdb.ID, vars map[string]*stateVariable) {
|
2020-04-03 00:44:27 +00:00
|
|
|
existingVars, _ := s.getAllPlatformVariables(ctx, orgID)
|
|
|
|
|
|
|
|
mIDs := make(map[influxdb.ID]*influxdb.Variable)
|
|
|
|
mNames := make(map[string]*influxdb.Variable)
|
|
|
|
for _, v := range existingVars {
|
|
|
|
mIDs[v.ID] = v
|
|
|
|
mNames[v.Name] = v
|
|
|
|
}
|
|
|
|
|
2020-04-14 23:18:34 +00:00
|
|
|
for _, v := range vars {
|
2020-04-03 00:44:27 +00:00
|
|
|
var existing *influxdb.Variable
|
2020-04-14 23:18:34 +00:00
|
|
|
if v.ID() != 0 {
|
|
|
|
existing = mIDs[v.ID()]
|
2020-04-03 00:44:27 +00:00
|
|
|
} else {
|
2020-04-14 23:18:34 +00:00
|
|
|
existing = mNames[v.parserVar.Name()]
|
2019-11-07 00:45:00 +00:00
|
|
|
}
|
2020-04-14 23:18:34 +00:00
|
|
|
if IsNew(v.stateStatus) && existing != nil {
|
|
|
|
v.stateStatus = StateStatusExists
|
|
|
|
}
|
|
|
|
v.existing = existing
|
2019-11-07 00:45:00 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-17 02:27:58 +00:00
|
|
|
func (s *Service) dryRunLabelMappings(ctx context.Context, state *stateCoordinator) ([]stateLabelMapping, error) {
|
2020-04-11 04:51:13 +00:00
|
|
|
stateLabelsByResName := make(map[string]*stateLabel)
|
|
|
|
for _, l := range state.mLabels {
|
2020-04-14 22:19:15 +00:00
|
|
|
if IsRemoval(l.stateStatus) {
|
2020-04-11 04:51:13 +00:00
|
|
|
continue
|
|
|
|
}
|
2020-04-14 20:21:05 +00:00
|
|
|
stateLabelsByResName[l.parserLabel.Name()] = l
|
2020-04-11 04:51:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
var mappings []stateLabelMapping
|
|
|
|
for _, b := range state.mBuckets {
|
2020-04-14 22:19:15 +00:00
|
|
|
if IsRemoval(b.stateStatus) {
|
2020-04-11 04:51:13 +00:00
|
|
|
continue
|
|
|
|
}
|
2020-04-17 02:27:58 +00:00
|
|
|
mm, err := s.dryRunResourceLabelMapping(ctx, state, stateLabelsByResName, b)
|
2020-04-11 04:51:13 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
mappings = append(mappings, mm...)
|
|
|
|
}
|
|
|
|
|
2020-04-14 22:19:15 +00:00
|
|
|
for _, c := range state.mChecks {
|
|
|
|
if IsRemoval(c.stateStatus) {
|
|
|
|
continue
|
|
|
|
}
|
2020-04-17 02:27:58 +00:00
|
|
|
mm, err := s.dryRunResourceLabelMapping(ctx, state, stateLabelsByResName, c)
|
2020-04-14 22:19:15 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
mappings = append(mappings, mm...)
|
|
|
|
}
|
|
|
|
|
2020-04-16 19:41:02 +00:00
|
|
|
for _, d := range state.mDashboards {
|
|
|
|
if IsRemoval(d.stateStatus) {
|
|
|
|
continue
|
|
|
|
}
|
2020-04-17 02:27:58 +00:00
|
|
|
mm, err := s.dryRunResourceLabelMapping(ctx, state, stateLabelsByResName, d)
|
2020-04-16 19:41:02 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
mappings = append(mappings, mm...)
|
|
|
|
}
|
|
|
|
|
2020-04-15 19:46:17 +00:00
|
|
|
for _, e := range state.mEndpoints {
|
|
|
|
if IsRemoval(e.stateStatus) {
|
|
|
|
continue
|
|
|
|
}
|
2020-04-17 02:27:58 +00:00
|
|
|
mm, err := s.dryRunResourceLabelMapping(ctx, state, stateLabelsByResName, e)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
mappings = append(mappings, mm...)
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, r := range state.mRules {
|
|
|
|
if IsRemoval(r.stateStatus) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
mm, err := s.dryRunResourceLabelMapping(ctx, state, stateLabelsByResName, r)
|
2020-04-15 19:46:17 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
mappings = append(mappings, mm...)
|
|
|
|
}
|
|
|
|
|
2020-04-16 00:18:19 +00:00
|
|
|
for _, t := range state.mTasks {
|
|
|
|
if IsRemoval(t.stateStatus) {
|
|
|
|
continue
|
|
|
|
}
|
2020-04-17 02:27:58 +00:00
|
|
|
mm, err := s.dryRunResourceLabelMapping(ctx, state, stateLabelsByResName, t)
|
2020-04-16 00:18:19 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
mappings = append(mappings, mm...)
|
|
|
|
}
|
|
|
|
|
2020-04-16 18:27:30 +00:00
|
|
|
for _, t := range state.mTelegrafs {
|
|
|
|
if IsRemoval(t.stateStatus) {
|
|
|
|
continue
|
|
|
|
}
|
2020-04-17 02:27:58 +00:00
|
|
|
mm, err := s.dryRunResourceLabelMapping(ctx, state, stateLabelsByResName, t)
|
2020-04-16 18:27:30 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
mappings = append(mappings, mm...)
|
|
|
|
}
|
|
|
|
|
2020-04-14 23:18:34 +00:00
|
|
|
for _, v := range state.mVariables {
|
|
|
|
if IsRemoval(v.stateStatus) {
|
|
|
|
continue
|
|
|
|
}
|
2020-04-17 02:27:58 +00:00
|
|
|
mm, err := s.dryRunResourceLabelMapping(ctx, state, stateLabelsByResName, v)
|
2020-04-14 23:18:34 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
mappings = append(mappings, mm...)
|
|
|
|
}
|
|
|
|
|
2020-04-11 04:51:13 +00:00
|
|
|
return mappings, nil
|
|
|
|
}
|
|
|
|
|
2020-04-17 02:27:58 +00:00
|
|
|
func (s *Service) dryRunResourceLabelMapping(ctx context.Context, state *stateCoordinator, stateLabelsByResName map[string]*stateLabel, associatedResource interface {
|
2020-04-11 04:51:13 +00:00
|
|
|
labels() []*label
|
|
|
|
stateIdentity() stateIdentity
|
|
|
|
}) ([]stateLabelMapping, error) {
|
|
|
|
|
|
|
|
ident := associatedResource.stateIdentity()
|
|
|
|
pkgResourceLabels := associatedResource.labels()
|
|
|
|
|
|
|
|
var mappings []stateLabelMapping
|
|
|
|
if !ident.exists() {
|
|
|
|
for _, l := range pkgResourceLabels {
|
|
|
|
mappings = append(mappings, stateLabelMapping{
|
|
|
|
status: StateStatusNew,
|
|
|
|
resource: associatedResource,
|
|
|
|
label: state.getLabelByPkgName(l.PkgName()),
|
|
|
|
})
|
|
|
|
}
|
|
|
|
return mappings, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
existingLabels, err := s.labelSVC.FindResourceLabels(ctx, influxdb.LabelMappingFilter{
|
|
|
|
ResourceID: ident.id,
|
|
|
|
ResourceType: ident.resourceType,
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
pkgLabels := labelSlcToMap(pkgResourceLabels)
|
|
|
|
for _, l := range existingLabels {
|
|
|
|
// if label is found in state then we track the mapping and mark it existing
|
|
|
|
// otherwise we continue on
|
|
|
|
delete(pkgLabels, l.Name)
|
|
|
|
if sLabel, ok := stateLabelsByResName[l.Name]; ok {
|
|
|
|
mappings = append(mappings, stateLabelMapping{
|
|
|
|
status: StateStatusExists,
|
|
|
|
resource: associatedResource,
|
|
|
|
label: sLabel,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// now we add labels that do not exist
|
|
|
|
for _, l := range pkgLabels {
|
|
|
|
mappings = append(mappings, stateLabelMapping{
|
|
|
|
status: StateStatusNew,
|
|
|
|
resource: associatedResource,
|
|
|
|
label: state.getLabelByPkgName(l.PkgName()),
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
return mappings, nil
|
|
|
|
}
|
|
|
|
|
2020-04-17 02:27:58 +00:00
|
|
|
func (s *Service) addStackState(ctx context.Context, stackID influxdb.ID, state *stateCoordinator) error {
|
2020-04-01 00:01:45 +00:00
|
|
|
stack, err := s.store.ReadStackByID(ctx, stackID)
|
|
|
|
if err != nil {
|
2020-04-11 04:51:13 +00:00
|
|
|
return ierrors.Wrap(internalErr(err), "reading stack")
|
|
|
|
}
|
|
|
|
|
2020-04-01 00:01:45 +00:00
|
|
|
for _, r := range stack.Resources {
|
2020-04-17 02:27:58 +00:00
|
|
|
updateFn := state.setObjectID
|
2020-04-22 23:28:08 +00:00
|
|
|
if !state.Contains(r.Kind, r.PkgName) {
|
2020-04-17 02:27:58 +00:00
|
|
|
updateFn = state.addObjectForRemoval
|
2020-04-01 00:01:45 +00:00
|
|
|
}
|
2020-04-22 23:28:08 +00:00
|
|
|
updateFn(r.Kind, r.PkgName, r.ID)
|
2020-04-01 00:01:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-12-27 19:22:05 +00:00
|
|
|
// ApplyOpt is an option for applying a package.
|
|
|
|
type ApplyOpt struct {
|
2020-02-05 00:15:20 +00:00
|
|
|
EnvRefs map[string]string
|
2019-12-27 19:22:05 +00:00
|
|
|
MissingSecrets map[string]string
|
2020-04-01 00:01:45 +00:00
|
|
|
StackID influxdb.ID
|
2019-12-27 19:22:05 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// ApplyOptFn updates the ApplyOpt per the functional option.
|
2020-04-01 00:01:45 +00:00
|
|
|
type ApplyOptFn func(opt *ApplyOpt)
|
2019-12-27 19:22:05 +00:00
|
|
|
|
2020-02-05 00:15:20 +00:00
|
|
|
// ApplyWithEnvRefs provides env refs to saturate the missing reference fields in the pkg.
|
|
|
|
func ApplyWithEnvRefs(envRefs map[string]string) ApplyOptFn {
|
2020-04-01 00:01:45 +00:00
|
|
|
return func(o *ApplyOpt) {
|
2020-02-05 00:15:20 +00:00
|
|
|
o.EnvRefs = envRefs
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-12-27 19:22:05 +00:00
|
|
|
// ApplyWithSecrets provides secrets to the platform that the pkg will need.
|
|
|
|
func ApplyWithSecrets(secrets map[string]string) ApplyOptFn {
|
2020-04-01 00:01:45 +00:00
|
|
|
return func(o *ApplyOpt) {
|
2019-12-27 19:22:05 +00:00
|
|
|
o.MissingSecrets = secrets
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-01 00:01:45 +00:00
|
|
|
// ApplyWithStackID associates the application of a pkg with a stack.
|
|
|
|
func ApplyWithStackID(stackID influxdb.ID) ApplyOptFn {
|
|
|
|
return func(o *ApplyOpt) {
|
|
|
|
o.StackID = stackID
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func applyOptFromOptFns(opts ...ApplyOptFn) ApplyOpt {
|
|
|
|
var opt ApplyOpt
|
|
|
|
for _, o := range opts {
|
|
|
|
o(&opt)
|
|
|
|
}
|
|
|
|
return opt
|
|
|
|
}
|
|
|
|
|
2019-10-28 22:23:40 +00:00
|
|
|
// Apply will apply all the resources identified in the provided pkg. The entire pkg will be applied
|
|
|
|
// in its entirety. If a failure happens midway then the entire pkg will be rolled back to the state
|
|
|
|
// from before the pkg were applied.
|
2020-04-11 04:51:13 +00:00
|
|
|
func (s *Service) Apply(ctx context.Context, orgID, userID influxdb.ID, pkg *Pkg, opts ...ApplyOptFn) (sum Summary, diff Diff, e error) {
|
2019-11-06 18:02:45 +00:00
|
|
|
if !pkg.isParsed {
|
|
|
|
if err := pkg.Validate(); err != nil {
|
2020-04-11 04:51:13 +00:00
|
|
|
return Summary{}, Diff{}, failedValidationErr(err)
|
2019-11-06 18:02:45 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-01 00:01:45 +00:00
|
|
|
opt := applyOptFromOptFns(opts...)
|
2019-12-27 19:22:05 +00:00
|
|
|
|
2020-02-06 17:28:04 +00:00
|
|
|
if err := pkg.applyEnvRefs(opt.EnvRefs); err != nil {
|
2020-04-11 04:51:13 +00:00
|
|
|
return Summary{}, Diff{}, failedValidationErr(err)
|
2020-02-06 17:28:04 +00:00
|
|
|
}
|
2020-02-05 00:15:20 +00:00
|
|
|
|
2020-04-17 02:27:58 +00:00
|
|
|
state, err := s.dryRun(ctx, orgID, pkg, opts...)
|
2020-04-11 04:51:13 +00:00
|
|
|
if err != nil {
|
|
|
|
return Summary{}, Diff{}, err
|
2019-10-28 22:23:40 +00:00
|
|
|
}
|
|
|
|
|
2020-04-14 20:21:05 +00:00
|
|
|
defer func(stackID influxdb.ID) {
|
2020-04-01 00:01:45 +00:00
|
|
|
if stackID == 0 {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
updateStackFn := s.updateStackAfterSuccess
|
|
|
|
if e != nil {
|
|
|
|
updateStackFn = s.updateStackAfterRollback
|
|
|
|
}
|
2020-04-17 02:27:58 +00:00
|
|
|
if err := updateStackFn(ctx, stackID, state); err != nil {
|
2020-04-01 00:01:45 +00:00
|
|
|
s.log.Error("failed to update stack", zap.Error(err))
|
|
|
|
}
|
2020-04-14 20:21:05 +00:00
|
|
|
}(opt.StackID)
|
2020-04-01 00:01:45 +00:00
|
|
|
|
2019-12-07 00:23:09 +00:00
|
|
|
coordinator := &rollbackCoordinator{sem: make(chan struct{}, s.applyReqLimit)}
|
2019-12-27 19:22:05 +00:00
|
|
|
defer coordinator.rollback(s.log, &e, orgID)
|
2019-10-26 02:11:47 +00:00
|
|
|
|
2020-04-17 02:27:58 +00:00
|
|
|
err = s.applyState(ctx, coordinator, orgID, userID, state, opt.MissingSecrets)
|
2020-04-01 00:01:45 +00:00
|
|
|
if err != nil {
|
2020-04-11 04:51:13 +00:00
|
|
|
return Summary{}, Diff{}, err
|
2020-04-01 00:01:45 +00:00
|
|
|
}
|
|
|
|
|
2020-04-17 02:27:58 +00:00
|
|
|
pkg.applySecrets(opt.MissingSecrets)
|
|
|
|
|
|
|
|
return newSummaryFromStatePkg(state, pkg), state.diff(), err
|
2020-04-01 00:01:45 +00:00
|
|
|
}
|
|
|
|
|
2020-04-17 02:27:58 +00:00
|
|
|
func (s *Service) applyState(ctx context.Context, coordinator *rollbackCoordinator, orgID, userID influxdb.ID, state *stateCoordinator, missingSecrets map[string]string) (e error) {
|
2019-12-07 00:23:09 +00:00
|
|
|
// each grouping here runs for its entirety, then returns an error that
|
|
|
|
// is indicative of running all appliers provided. For instance, the labels
|
2019-12-10 22:51:11 +00:00
|
|
|
// may have 1 variable fail and one of the buckets fails. The errors aggregate so
|
|
|
|
// the caller will be informed of both the failed label variable the failed bucket.
|
2019-12-07 00:23:09 +00:00
|
|
|
// the groupings here allow for steps to occur before exiting. The first step is
|
2019-12-10 22:51:11 +00:00
|
|
|
// adding the dependencies, resources that are associated by other resources. Then the
|
|
|
|
// primary resources. Here we get all the errors associated with them.
|
2019-12-07 00:23:09 +00:00
|
|
|
// If those are all good, then we run the secondary(dependent) resources which
|
|
|
|
// rely on the primary resources having been created.
|
2019-12-10 22:51:11 +00:00
|
|
|
appliers := [][]applier{
|
2019-12-27 19:22:05 +00:00
|
|
|
{
|
|
|
|
// adds secrets that are referenced it the pkg, this allows user to
|
|
|
|
// provide data that does not rest in the pkg.
|
2020-04-01 00:01:45 +00:00
|
|
|
s.applySecrets(missingSecrets),
|
2019-12-27 19:22:05 +00:00
|
|
|
},
|
2019-12-10 22:51:11 +00:00
|
|
|
{
|
|
|
|
// deps for primary resources
|
2020-04-11 04:51:13 +00:00
|
|
|
s.applyLabels(ctx, state.labels()),
|
2019-12-10 22:51:11 +00:00
|
|
|
},
|
|
|
|
{
|
2019-12-27 19:22:05 +00:00
|
|
|
// primary resources, can have relationships to labels
|
2020-04-14 23:18:34 +00:00
|
|
|
s.applyVariables(ctx, state.variables()),
|
2020-04-11 04:51:13 +00:00
|
|
|
s.applyBuckets(ctx, state.buckets()),
|
2020-04-14 22:19:15 +00:00
|
|
|
s.applyChecks(ctx, state.checks()),
|
2020-04-20 19:29:30 +00:00
|
|
|
s.applyDashboards(ctx, state.dashboards()),
|
2020-04-15 19:46:17 +00:00
|
|
|
s.applyNotificationEndpoints(ctx, userID, state.endpoints()),
|
feat(pkger): add stateful management for tasks
notes on this commit. This commit was grueling ;-(. The task API is not a friendly
API to consume. There are a lot of non obvious things going on and almost every
one of them tripped me up. Things of note:
* the http.TaskService does not satisfy the influxdb.TaskService,
making it impossible to use as a dependency if tasks service gets
split out
* the APIs for create and update do not share common types. For example:
creating a task takes every field as a string, but in the update it is
taken as a options.Duration type. A step further and you'll notice that
create does not need an option to be provided, but the update does. Its
jarring trying to understand the indirection here. I struggled mightily
trying to make sense of it all with the indirection and differing types.
Made for a very difficult task (no pun intended) when it should have been
trivial. Opportunity here to fix these up and make this API more uniform
and remove unneccesary complexity like the options type.
* Nested IDs that get marshaled, are no bueno when you want to marshal a task
that does not have an ID in it, for either user/org/or self IDs. Its a challenge
just to do that.
* Lots of logs in the kv.Task portion where we hit errors and log and others where
we return. It isn't clear what is happening. The kv implementation is also very
procedural, and I found myself bouncing around like a ping pong ball trying to
make heads or tails of it.
* There is auth buried deep inside the kv.Task implementation that kept throwing me
off b/c it kept throwing errors, instead of warns. I assume, not sure if I'm
correct on this, but that the stuff being logged is determined inconsequential
to the task working. I had lots of errors from the auth buried in there, and hadn't
a clue what to make of it....
leaving these notes here as a look back at why working with tasks is so
difficult. This API can improve dramatically. I spent 5x the time trying
to figure out how to use the task API, in procedural calls, than I did
writing the business logic to consume it.... that's a scary realization ;-(
references: #17434
2020-04-21 02:59:56 +00:00
|
|
|
s.applyTasks(ctx, state.tasks()),
|
2020-04-21 22:00:29 +00:00
|
|
|
s.applyTelegrafs(ctx, userID, state.telegrafConfigs()),
|
2019-12-10 22:51:11 +00:00
|
|
|
},
|
2019-10-24 23:59:01 +00:00
|
|
|
}
|
|
|
|
|
2019-12-10 22:51:11 +00:00
|
|
|
for _, group := range appliers {
|
2019-12-12 19:09:32 +00:00
|
|
|
if err := coordinator.runTilEnd(ctx, orgID, userID, group...); err != nil {
|
2020-04-17 02:27:58 +00:00
|
|
|
return internalErr(err)
|
2019-12-10 22:51:11 +00:00
|
|
|
}
|
2019-12-07 00:23:09 +00:00
|
|
|
}
|
2019-12-10 22:51:11 +00:00
|
|
|
|
2019-12-20 20:51:27 +00:00
|
|
|
// this has to be run after the above primary resources, because it relies on
|
|
|
|
// notification endpoints already being applied.
|
2020-04-17 02:27:58 +00:00
|
|
|
app, err := s.applyNotificationRulesGenerator(state.rules(), state.mEndpoints)
|
2019-12-20 17:10:10 +00:00
|
|
|
if err != nil {
|
2020-04-17 02:27:58 +00:00
|
|
|
return err
|
2019-12-20 17:10:10 +00:00
|
|
|
}
|
|
|
|
if err := coordinator.runTilEnd(ctx, orgID, userID, app); err != nil {
|
2020-04-17 02:27:58 +00:00
|
|
|
return err
|
2019-12-20 17:10:10 +00:00
|
|
|
}
|
|
|
|
|
2019-12-10 22:51:11 +00:00
|
|
|
// secondary resources
|
|
|
|
// this last grouping relies on the above 2 steps having completely successfully
|
2020-04-17 02:27:58 +00:00
|
|
|
secondary := []applier{s.applyLabelMappings(state.labelMappings)}
|
2019-12-12 19:09:32 +00:00
|
|
|
if err := coordinator.runTilEnd(ctx, orgID, userID, secondary...); err != nil {
|
2020-04-17 02:27:58 +00:00
|
|
|
return internalErr(err)
|
2019-10-23 17:09:04 +00:00
|
|
|
}
|
|
|
|
|
2020-04-17 02:27:58 +00:00
|
|
|
return nil
|
2019-10-23 17:09:04 +00:00
|
|
|
}
|
|
|
|
|
2020-04-11 04:51:13 +00:00
|
|
|
func (s *Service) applyBuckets(ctx context.Context, buckets []*stateBucket) applier {
|
2019-10-30 21:13:42 +00:00
|
|
|
const resource = "bucket"
|
|
|
|
|
2019-12-07 00:23:09 +00:00
|
|
|
mutex := new(doMutex)
|
2020-04-11 04:51:13 +00:00
|
|
|
rollbackBuckets := make([]*stateBucket, 0, len(buckets))
|
2019-10-24 23:59:01 +00:00
|
|
|
|
2019-12-12 19:09:32 +00:00
|
|
|
createFn := func(ctx context.Context, i int, orgID, userID influxdb.ID) *applyErrBody {
|
2020-04-11 04:51:13 +00:00
|
|
|
var b *stateBucket
|
2019-12-07 00:23:09 +00:00
|
|
|
mutex.Do(func() {
|
2020-04-11 04:51:13 +00:00
|
|
|
buckets[i].orgID = orgID
|
|
|
|
b = buckets[i]
|
2019-12-07 00:23:09 +00:00
|
|
|
})
|
|
|
|
if !b.shouldApply() {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
influxBucket, err := s.applyBucket(ctx, b)
|
|
|
|
if err != nil {
|
|
|
|
return &applyErrBody{
|
2020-04-14 20:21:05 +00:00
|
|
|
name: b.parserBkt.PkgName(),
|
2019-12-07 00:23:09 +00:00
|
|
|
msg: err.Error(),
|
2019-10-24 23:59:01 +00:00
|
|
|
}
|
2019-12-07 00:23:09 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
mutex.Do(func() {
|
2019-10-30 17:55:13 +00:00
|
|
|
buckets[i].id = influxBucket.ID
|
2019-10-28 22:23:40 +00:00
|
|
|
rollbackBuckets = append(rollbackBuckets, buckets[i])
|
2019-12-07 00:23:09 +00:00
|
|
|
})
|
2019-10-23 17:09:04 +00:00
|
|
|
|
2019-12-07 00:23:09 +00:00
|
|
|
return nil
|
2019-10-23 17:09:04 +00:00
|
|
|
}
|
|
|
|
|
2019-10-24 23:59:01 +00:00
|
|
|
return applier{
|
2019-12-07 00:23:09 +00:00
|
|
|
creater: creater{
|
|
|
|
entries: len(buckets),
|
|
|
|
fn: createFn,
|
|
|
|
},
|
2019-10-24 23:59:01 +00:00
|
|
|
rollbacker: rollbacker{
|
2019-10-30 21:13:42 +00:00
|
|
|
resource: resource,
|
2020-04-01 00:01:45 +00:00
|
|
|
fn: func(_ influxdb.ID) error { return s.rollbackBuckets(ctx, rollbackBuckets) },
|
2019-10-24 23:59:01 +00:00
|
|
|
},
|
|
|
|
}
|
2019-10-23 17:09:04 +00:00
|
|
|
}
|
|
|
|
|
2020-04-11 04:51:13 +00:00
|
|
|
func (s *Service) rollbackBuckets(ctx context.Context, buckets []*stateBucket) error {
|
|
|
|
rollbackFn := func(b *stateBucket) error {
|
2020-04-01 00:01:45 +00:00
|
|
|
var err error
|
2020-04-14 20:21:05 +00:00
|
|
|
switch b.stateStatus {
|
|
|
|
case StateStatusRemove:
|
2020-04-11 04:51:13 +00:00
|
|
|
err = ierrors.Wrap(s.bucketSVC.CreateBucket(ctx, b.existing), "rolling back removed bucket")
|
2020-04-14 20:21:05 +00:00
|
|
|
case StateStatusExists:
|
|
|
|
rp := b.parserBkt.RetentionRules.RP()
|
2020-04-01 00:01:45 +00:00
|
|
|
_, err = s.bucketSVC.UpdateBucket(ctx, b.ID(), influxdb.BucketUpdate{
|
2020-04-14 20:21:05 +00:00
|
|
|
Description: &b.parserBkt.Description,
|
2020-04-01 00:01:45 +00:00
|
|
|
RetentionPeriod: &rp,
|
|
|
|
})
|
2020-04-11 04:51:13 +00:00
|
|
|
err = ierrors.Wrap(err, "rolling back existing bucket to previous state")
|
2020-04-14 20:21:05 +00:00
|
|
|
default:
|
|
|
|
err = ierrors.Wrap(s.bucketSVC.DeleteBucket(ctx, b.ID()), "rolling back new bucket")
|
2019-10-28 22:23:40 +00:00
|
|
|
}
|
2020-04-01 00:01:45 +00:00
|
|
|
return err
|
|
|
|
}
|
2019-10-28 22:23:40 +00:00
|
|
|
|
2020-04-01 00:01:45 +00:00
|
|
|
var errs []string
|
|
|
|
for _, b := range buckets {
|
2020-04-02 22:28:11 +00:00
|
|
|
if err := rollbackFn(b); err != nil {
|
2020-04-01 00:01:45 +00:00
|
|
|
errs = append(errs, fmt.Sprintf("error for bucket[%q]: %s", b.ID(), err))
|
2019-10-23 17:09:04 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(errs) > 0 {
|
|
|
|
// TODO: fixup error
|
2020-04-01 00:01:45 +00:00
|
|
|
return errors.New(strings.Join(errs, ", "))
|
2019-10-23 17:09:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
2019-10-24 23:59:01 +00:00
|
|
|
|
2020-04-11 04:51:13 +00:00
|
|
|
func (s *Service) applyBucket(ctx context.Context, b *stateBucket) (influxdb.Bucket, error) {
|
2020-04-14 20:21:05 +00:00
|
|
|
switch b.stateStatus {
|
|
|
|
case StateStatusRemove:
|
2020-04-01 00:01:45 +00:00
|
|
|
if err := s.bucketSVC.DeleteBucket(ctx, b.ID()); err != nil {
|
|
|
|
return influxdb.Bucket{}, fmt.Errorf("failed to delete bucket[%q]: %w", b.ID(), err)
|
|
|
|
}
|
|
|
|
return *b.existing, nil
|
2020-04-14 20:21:05 +00:00
|
|
|
case StateStatusExists:
|
|
|
|
rp := b.parserBkt.RetentionRules.RP()
|
|
|
|
newName := b.parserBkt.Name()
|
2019-10-30 17:55:13 +00:00
|
|
|
influxBucket, err := s.bucketSVC.UpdateBucket(ctx, b.ID(), influxdb.BucketUpdate{
|
2020-04-14 20:21:05 +00:00
|
|
|
Description: &b.parserBkt.Description,
|
2020-04-01 00:01:45 +00:00
|
|
|
Name: &newName,
|
2019-11-22 18:41:08 +00:00
|
|
|
RetentionPeriod: &rp,
|
2019-10-28 22:23:40 +00:00
|
|
|
})
|
|
|
|
if err != nil {
|
2020-04-01 00:01:45 +00:00
|
|
|
return influxdb.Bucket{}, fmt.Errorf("failed to updated bucket[%q]: %w", b.ID(), err)
|
2019-10-28 22:23:40 +00:00
|
|
|
}
|
|
|
|
return *influxBucket, nil
|
2020-04-14 20:21:05 +00:00
|
|
|
default:
|
|
|
|
rp := b.parserBkt.RetentionRules.RP()
|
|
|
|
influxBucket := influxdb.Bucket{
|
|
|
|
OrgID: b.orgID,
|
|
|
|
Description: b.parserBkt.Description,
|
|
|
|
Name: b.parserBkt.Name(),
|
|
|
|
RetentionPeriod: rp,
|
|
|
|
}
|
|
|
|
err := s.bucketSVC.CreateBucket(ctx, &influxBucket)
|
|
|
|
if err != nil {
|
|
|
|
return influxdb.Bucket{}, fmt.Errorf("failed to create bucket[%q]: %w", b.ID(), err)
|
|
|
|
}
|
|
|
|
return influxBucket, nil
|
2019-10-28 22:23:40 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-14 22:19:15 +00:00
|
|
|
func (s *Service) applyChecks(ctx context.Context, checks []*stateCheck) applier {
|
2019-12-18 20:23:06 +00:00
|
|
|
const resource = "check"
|
|
|
|
|
|
|
|
mutex := new(doMutex)
|
2020-04-14 22:19:15 +00:00
|
|
|
rollbackChecks := make([]*stateCheck, 0, len(checks))
|
2019-12-18 20:23:06 +00:00
|
|
|
|
|
|
|
createFn := func(ctx context.Context, i int, orgID, userID influxdb.ID) *applyErrBody {
|
2020-04-14 22:19:15 +00:00
|
|
|
var c *stateCheck
|
2019-12-18 20:23:06 +00:00
|
|
|
mutex.Do(func() {
|
|
|
|
checks[i].orgID = orgID
|
2020-04-14 22:19:15 +00:00
|
|
|
c = checks[i]
|
2019-12-18 20:23:06 +00:00
|
|
|
})
|
|
|
|
|
2020-04-14 22:19:15 +00:00
|
|
|
influxCheck, err := s.applyCheck(ctx, c, userID)
|
2019-12-18 20:23:06 +00:00
|
|
|
if err != nil {
|
|
|
|
return &applyErrBody{
|
2020-04-14 22:19:15 +00:00
|
|
|
name: c.parserCheck.Name(),
|
2019-12-18 20:23:06 +00:00
|
|
|
msg: err.Error(),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
mutex.Do(func() {
|
2020-04-14 22:19:15 +00:00
|
|
|
checks[i].id = influxCheck.GetID()
|
2019-12-18 20:23:06 +00:00
|
|
|
rollbackChecks = append(rollbackChecks, checks[i])
|
|
|
|
})
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
return applier{
|
|
|
|
creater: creater{
|
|
|
|
entries: len(checks),
|
|
|
|
fn: createFn,
|
|
|
|
},
|
|
|
|
rollbacker: rollbacker{
|
|
|
|
resource: resource,
|
2020-04-02 22:28:11 +00:00
|
|
|
fn: func(_ influxdb.ID) error { return s.rollbackChecks(ctx, rollbackChecks) },
|
2019-12-18 20:23:06 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-14 22:19:15 +00:00
|
|
|
func (s *Service) rollbackChecks(ctx context.Context, checks []*stateCheck) error {
|
|
|
|
rollbackFn := func(c *stateCheck) error {
|
2020-04-02 22:28:11 +00:00
|
|
|
var err error
|
2020-04-14 22:19:15 +00:00
|
|
|
switch c.stateStatus {
|
|
|
|
case StateStatusRemove:
|
2020-04-02 22:28:11 +00:00
|
|
|
err = s.checkSVC.CreateCheck(
|
|
|
|
ctx,
|
|
|
|
influxdb.CheckCreate{
|
|
|
|
Check: c.existing,
|
2020-04-14 22:19:15 +00:00
|
|
|
Status: c.parserCheck.Status(),
|
2020-04-02 22:28:11 +00:00
|
|
|
},
|
|
|
|
c.existing.GetOwnerID(),
|
|
|
|
)
|
2020-04-14 22:19:15 +00:00
|
|
|
c.id = c.existing.GetID()
|
|
|
|
case StateStatusNew:
|
2020-04-02 22:28:11 +00:00
|
|
|
err = s.checkSVC.DeleteCheck(ctx, c.ID())
|
|
|
|
default:
|
|
|
|
_, err = s.checkSVC.UpdateCheck(ctx, c.ID(), influxdb.CheckCreate{
|
|
|
|
Check: c.summarize().Check,
|
2020-04-14 22:19:15 +00:00
|
|
|
Status: influxdb.Status(c.parserCheck.status),
|
2020-04-02 22:28:11 +00:00
|
|
|
})
|
2019-12-18 20:23:06 +00:00
|
|
|
}
|
2020-04-02 22:28:11 +00:00
|
|
|
return err
|
|
|
|
}
|
2019-12-18 20:23:06 +00:00
|
|
|
|
2020-04-02 22:28:11 +00:00
|
|
|
var errs []string
|
|
|
|
for _, c := range checks {
|
|
|
|
if err := rollbackFn(c); err != nil {
|
|
|
|
errs = append(errs, fmt.Sprintf("error for check[%q]: %s", c.ID(), err))
|
2019-12-18 20:23:06 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(errs) > 0 {
|
2020-04-02 22:28:11 +00:00
|
|
|
return errors.New(strings.Join(errs, "; "))
|
2019-12-18 20:23:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-04-14 22:19:15 +00:00
|
|
|
func (s *Service) applyCheck(ctx context.Context, c *stateCheck, userID influxdb.ID) (influxdb.Check, error) {
|
|
|
|
switch c.stateStatus {
|
|
|
|
case StateStatusRemove:
|
2020-04-02 22:28:11 +00:00
|
|
|
if err := s.checkSVC.DeleteCheck(ctx, c.ID()); err != nil {
|
|
|
|
return nil, fmt.Errorf("failed to delete check[%q]: %w", c.ID(), err)
|
|
|
|
}
|
|
|
|
return c.existing, nil
|
2020-04-14 22:19:15 +00:00
|
|
|
case StateStatusExists:
|
2019-12-18 20:23:06 +00:00
|
|
|
influxCheck, err := s.checkSVC.UpdateCheck(ctx, c.ID(), influxdb.CheckCreate{
|
|
|
|
Check: c.summarize().Check,
|
2020-04-14 22:19:15 +00:00
|
|
|
Status: c.parserCheck.Status(),
|
2019-12-18 20:23:06 +00:00
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return influxCheck, nil
|
2020-04-14 22:19:15 +00:00
|
|
|
default:
|
|
|
|
checkStub := influxdb.CheckCreate{
|
|
|
|
Check: c.summarize().Check,
|
|
|
|
Status: c.parserCheck.Status(),
|
|
|
|
}
|
|
|
|
err := s.checkSVC.CreateCheck(ctx, checkStub, userID)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return checkStub.Check, nil
|
2019-12-18 20:23:06 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-20 19:29:30 +00:00
|
|
|
func (s *Service) applyDashboards(ctx context.Context, dashboards []*stateDashboard) applier {
|
2019-10-30 21:13:42 +00:00
|
|
|
const resource = "dashboard"
|
|
|
|
|
2019-12-07 00:23:09 +00:00
|
|
|
mutex := new(doMutex)
|
2020-04-16 19:41:02 +00:00
|
|
|
rollbackDashboards := make([]*stateDashboard, 0, len(dashboards))
|
2019-12-07 00:23:09 +00:00
|
|
|
|
2019-12-12 19:09:32 +00:00
|
|
|
createFn := func(ctx context.Context, i int, orgID, userID influxdb.ID) *applyErrBody {
|
2020-04-16 19:41:02 +00:00
|
|
|
var d *stateDashboard
|
2019-12-07 00:23:09 +00:00
|
|
|
mutex.Do(func() {
|
2020-04-16 19:41:02 +00:00
|
|
|
dashboards[i].orgID = orgID
|
|
|
|
d = dashboards[i]
|
2019-12-07 00:23:09 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
influxBucket, err := s.applyDashboard(ctx, d)
|
|
|
|
if err != nil {
|
|
|
|
return &applyErrBody{
|
2020-04-16 19:41:02 +00:00
|
|
|
name: d.parserDash.Name(),
|
2019-12-07 00:23:09 +00:00
|
|
|
msg: err.Error(),
|
2019-10-30 21:13:42 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-12-07 00:23:09 +00:00
|
|
|
mutex.Do(func() {
|
|
|
|
dashboards[i].id = influxBucket.ID
|
|
|
|
rollbackDashboards = append(rollbackDashboards, dashboards[i])
|
|
|
|
})
|
|
|
|
return nil
|
2019-10-30 21:13:42 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return applier{
|
2019-12-07 00:23:09 +00:00
|
|
|
creater: creater{
|
|
|
|
entries: len(dashboards),
|
|
|
|
fn: createFn,
|
|
|
|
},
|
2019-10-30 21:13:42 +00:00
|
|
|
rollbacker: rollbacker{
|
|
|
|
resource: resource,
|
2019-12-27 19:22:05 +00:00
|
|
|
fn: func(_ influxdb.ID) error {
|
2020-04-20 19:29:30 +00:00
|
|
|
return s.rollbackDashboards(ctx, rollbackDashboards)
|
2019-12-06 00:53:00 +00:00
|
|
|
},
|
2019-10-30 21:13:42 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-16 19:41:02 +00:00
|
|
|
func (s *Service) applyDashboard(ctx context.Context, d *stateDashboard) (influxdb.Dashboard, error) {
|
2020-04-20 19:29:30 +00:00
|
|
|
switch d.stateStatus {
|
|
|
|
case StateStatusRemove:
|
|
|
|
if err := s.dashSVC.DeleteDashboard(ctx, d.ID()); err != nil {
|
|
|
|
return influxdb.Dashboard{}, fmt.Errorf("failed to delete dashboard[%q]: %w", d.ID(), err)
|
|
|
|
}
|
|
|
|
return *d.existing, nil
|
|
|
|
case StateStatusExists:
|
|
|
|
name := d.parserDash.Name()
|
|
|
|
cells := convertChartsToCells(d.parserDash.Charts)
|
|
|
|
dash, err := s.dashSVC.UpdateDashboard(ctx, d.ID(), influxdb.DashboardUpdate{
|
|
|
|
Name: &name,
|
|
|
|
Description: &d.parserDash.Description,
|
|
|
|
Cells: &cells,
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return influxdb.Dashboard{}, ierrors.Wrap(err, "failed to update dashboard")
|
|
|
|
}
|
|
|
|
return *dash, nil
|
|
|
|
default:
|
|
|
|
cells := convertChartsToCells(d.parserDash.Charts)
|
|
|
|
influxDashboard := influxdb.Dashboard{
|
|
|
|
OrganizationID: d.orgID,
|
|
|
|
Description: d.parserDash.Description,
|
|
|
|
Name: d.parserDash.Name(),
|
|
|
|
Cells: cells,
|
|
|
|
}
|
|
|
|
err := s.dashSVC.CreateDashboard(ctx, &influxDashboard)
|
|
|
|
if err != nil {
|
|
|
|
return influxdb.Dashboard{}, ierrors.Wrap(err, "failed to create dashboard")
|
|
|
|
}
|
|
|
|
return influxDashboard, nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *Service) rollbackDashboards(ctx context.Context, dashs []*stateDashboard) error {
|
|
|
|
rollbackFn := func(d *stateDashboard) error {
|
|
|
|
var err error
|
|
|
|
switch d.stateStatus {
|
|
|
|
case StateStatusRemove:
|
|
|
|
err = ierrors.Wrap(s.dashSVC.CreateDashboard(ctx, d.existing), "rolling back removed dashboard")
|
|
|
|
case StateStatusExists:
|
|
|
|
_, err := s.dashSVC.UpdateDashboard(ctx, d.ID(), influxdb.DashboardUpdate{
|
|
|
|
Name: &d.existing.Name,
|
|
|
|
Description: &d.existing.Description,
|
|
|
|
Cells: &d.existing.Cells,
|
|
|
|
})
|
|
|
|
return ierrors.Wrap(err, "failed to update dashboard")
|
|
|
|
default:
|
|
|
|
err = ierrors.Wrap(s.dashSVC.DeleteDashboard(ctx, d.ID()), "rolling back new dashboard")
|
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
var errs []string
|
|
|
|
for _, d := range dashs {
|
|
|
|
if err := rollbackFn(d); err != nil {
|
feat(pkger): add stateful management for tasks
notes on this commit. This commit was grueling ;-(. The task API is not a friendly
API to consume. There are a lot of non obvious things going on and almost every
one of them tripped me up. Things of note:
* the http.TaskService does not satisfy the influxdb.TaskService,
making it impossible to use as a dependency if tasks service gets
split out
* the APIs for create and update do not share common types. For example:
creating a task takes every field as a string, but in the update it is
taken as a options.Duration type. A step further and you'll notice that
create does not need an option to be provided, but the update does. Its
jarring trying to understand the indirection here. I struggled mightily
trying to make sense of it all with the indirection and differing types.
Made for a very difficult task (no pun intended) when it should have been
trivial. Opportunity here to fix these up and make this API more uniform
and remove unneccesary complexity like the options type.
* Nested IDs that get marshaled, are no bueno when you want to marshal a task
that does not have an ID in it, for either user/org/or self IDs. Its a challenge
just to do that.
* Lots of logs in the kv.Task portion where we hit errors and log and others where
we return. It isn't clear what is happening. The kv implementation is also very
procedural, and I found myself bouncing around like a ping pong ball trying to
make heads or tails of it.
* There is auth buried deep inside the kv.Task implementation that kept throwing me
off b/c it kept throwing errors, instead of warns. I assume, not sure if I'm
correct on this, but that the stuff being logged is determined inconsequential
to the task working. I had lots of errors from the auth buried in there, and hadn't
a clue what to make of it....
leaving these notes here as a look back at why working with tasks is so
difficult. This API can improve dramatically. I spent 5x the time trying
to figure out how to use the task API, in procedural calls, than I did
writing the business logic to consume it.... that's a scary realization ;-(
references: #17434
2020-04-21 02:59:56 +00:00
|
|
|
errs = append(errs, fmt.Sprintf("error for dashboard[%q]: %s", d.ID(), err))
|
2020-04-20 19:29:30 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(errs) > 0 {
|
|
|
|
// TODO: fixup error
|
|
|
|
return errors.New(strings.Join(errs, ", "))
|
2019-10-30 21:13:42 +00:00
|
|
|
}
|
|
|
|
|
2020-04-20 19:29:30 +00:00
|
|
|
return nil
|
2019-10-30 21:13:42 +00:00
|
|
|
}
|
|
|
|
|
2019-12-06 17:13:06 +00:00
|
|
|
func convertChartsToCells(ch []chart) []*influxdb.Cell {
|
2019-11-01 18:11:42 +00:00
|
|
|
icells := make([]*influxdb.Cell, 0, len(ch))
|
2019-12-06 17:13:06 +00:00
|
|
|
for _, c := range ch {
|
2019-11-01 18:11:42 +00:00
|
|
|
icell := &influxdb.Cell{
|
|
|
|
CellProperty: influxdb.CellProperty{
|
2019-11-08 19:33:41 +00:00
|
|
|
X: int32(c.XPos),
|
|
|
|
Y: int32(c.YPos),
|
2019-11-01 18:11:42 +00:00
|
|
|
H: int32(c.Height),
|
|
|
|
W: int32(c.Width),
|
|
|
|
},
|
2019-12-06 17:13:06 +00:00
|
|
|
View: &influxdb.View{
|
|
|
|
ViewContents: influxdb.ViewContents{Name: c.Name},
|
|
|
|
Properties: c.properties(),
|
|
|
|
},
|
2019-11-01 18:11:42 +00:00
|
|
|
}
|
|
|
|
icells = append(icells, icell)
|
|
|
|
}
|
2019-12-06 17:13:06 +00:00
|
|
|
return icells
|
2019-11-01 18:11:42 +00:00
|
|
|
}
|
|
|
|
|
2020-04-11 04:51:13 +00:00
|
|
|
func (s *Service) applyLabels(ctx context.Context, labels []*stateLabel) applier {
|
2019-10-30 21:13:42 +00:00
|
|
|
const resource = "label"
|
|
|
|
|
2019-12-07 00:23:09 +00:00
|
|
|
mutex := new(doMutex)
|
2020-04-11 04:51:13 +00:00
|
|
|
rollBackLabels := make([]*stateLabel, 0, len(labels))
|
2019-10-24 23:59:01 +00:00
|
|
|
|
2019-12-12 19:09:32 +00:00
|
|
|
createFn := func(ctx context.Context, i int, orgID, userID influxdb.ID) *applyErrBody {
|
2020-04-11 04:51:13 +00:00
|
|
|
var l *stateLabel
|
2019-12-07 00:23:09 +00:00
|
|
|
mutex.Do(func() {
|
2020-04-11 04:51:13 +00:00
|
|
|
labels[i].orgID = orgID
|
|
|
|
l = labels[i]
|
2019-12-07 00:23:09 +00:00
|
|
|
})
|
|
|
|
if !l.shouldApply() {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
influxLabel, err := s.applyLabel(ctx, l)
|
|
|
|
if err != nil {
|
|
|
|
return &applyErrBody{
|
2020-04-14 20:21:05 +00:00
|
|
|
name: l.parserLabel.PkgName(),
|
2019-12-07 00:23:09 +00:00
|
|
|
msg: err.Error(),
|
2019-10-24 23:59:01 +00:00
|
|
|
}
|
2019-12-07 00:23:09 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
mutex.Do(func() {
|
2019-10-30 17:55:13 +00:00
|
|
|
labels[i].id = influxLabel.ID
|
2019-10-28 22:23:40 +00:00
|
|
|
rollBackLabels = append(rollBackLabels, labels[i])
|
2019-12-07 00:23:09 +00:00
|
|
|
})
|
2019-10-24 23:59:01 +00:00
|
|
|
|
2019-12-07 00:23:09 +00:00
|
|
|
return nil
|
2019-10-24 23:59:01 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return applier{
|
2019-12-07 00:23:09 +00:00
|
|
|
creater: creater{
|
|
|
|
entries: len(labels),
|
|
|
|
fn: createFn,
|
|
|
|
},
|
2019-10-24 23:59:01 +00:00
|
|
|
rollbacker: rollbacker{
|
2019-10-30 21:13:42 +00:00
|
|
|
resource: resource,
|
2020-04-01 23:44:17 +00:00
|
|
|
fn: func(_ influxdb.ID) error { return s.rollbackLabels(ctx, rollBackLabels) },
|
2019-10-24 23:59:01 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-11 04:51:13 +00:00
|
|
|
func (s *Service) rollbackLabels(ctx context.Context, labels []*stateLabel) error {
|
|
|
|
rollbackFn := func(l *stateLabel) error {
|
2020-04-01 23:44:17 +00:00
|
|
|
var err error
|
2020-04-14 20:21:05 +00:00
|
|
|
switch l.stateStatus {
|
|
|
|
case StateStatusRemove:
|
2020-04-01 23:44:17 +00:00
|
|
|
err = s.labelSVC.CreateLabel(ctx, l.existing)
|
2020-04-14 20:21:05 +00:00
|
|
|
case StateStatusExists:
|
2020-04-01 23:44:17 +00:00
|
|
|
_, err = s.labelSVC.UpdateLabel(ctx, l.ID(), influxdb.LabelUpdate{
|
2020-04-14 20:21:05 +00:00
|
|
|
Name: l.parserLabel.Name(),
|
2020-04-01 23:44:17 +00:00
|
|
|
Properties: l.existing.Properties,
|
|
|
|
})
|
2020-04-14 20:21:05 +00:00
|
|
|
default:
|
|
|
|
err = s.labelSVC.DeleteLabel(ctx, l.ID())
|
2019-11-07 00:45:00 +00:00
|
|
|
}
|
2020-04-01 23:44:17 +00:00
|
|
|
return err
|
|
|
|
}
|
2019-11-07 00:45:00 +00:00
|
|
|
|
2020-04-01 23:44:17 +00:00
|
|
|
var errs []string
|
|
|
|
for _, l := range labels {
|
|
|
|
if err := rollbackFn(l); err != nil {
|
|
|
|
errs = append(errs, fmt.Sprintf("error for label[%q]: %s", l.ID(), err))
|
2019-10-24 23:59:01 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(errs) > 0 {
|
2020-04-01 23:44:17 +00:00
|
|
|
return errors.New(strings.Join(errs, ", "))
|
2019-10-24 23:59:01 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
2019-10-26 02:11:47 +00:00
|
|
|
|
2020-04-11 04:51:13 +00:00
|
|
|
func (s *Service) applyLabel(ctx context.Context, l *stateLabel) (influxdb.Label, error) {
|
|
|
|
var (
|
|
|
|
influxLabel *influxdb.Label
|
|
|
|
err error
|
|
|
|
)
|
2020-04-14 20:21:05 +00:00
|
|
|
switch l.stateStatus {
|
|
|
|
case StateStatusRemove:
|
2020-04-11 04:51:13 +00:00
|
|
|
influxLabel, err = l.existing, s.labelSVC.DeleteLabel(ctx, l.ID())
|
2020-04-14 20:21:05 +00:00
|
|
|
case StateStatusExists:
|
2020-04-11 04:51:13 +00:00
|
|
|
influxLabel, err = s.labelSVC.UpdateLabel(ctx, l.ID(), influxdb.LabelUpdate{
|
2020-04-14 20:21:05 +00:00
|
|
|
Name: l.parserLabel.Name(),
|
2019-10-28 22:23:40 +00:00
|
|
|
Properties: l.properties(),
|
|
|
|
})
|
2020-04-11 04:51:13 +00:00
|
|
|
err = ierrors.Wrap(err, "updating")
|
|
|
|
default:
|
|
|
|
creatLabel := l.toInfluxLabel()
|
|
|
|
influxLabel = &creatLabel
|
|
|
|
err = ierrors.Wrap(s.labelSVC.CreateLabel(ctx, &creatLabel), "creating")
|
2019-10-28 22:23:40 +00:00
|
|
|
}
|
2020-04-11 04:51:13 +00:00
|
|
|
if err != nil || influxLabel == nil {
|
2019-10-28 22:23:40 +00:00
|
|
|
return influxdb.Label{}, err
|
|
|
|
}
|
|
|
|
|
2020-04-11 04:51:13 +00:00
|
|
|
return *influxLabel, nil
|
2019-10-28 22:23:40 +00:00
|
|
|
}
|
|
|
|
|
2020-04-15 19:46:17 +00:00
|
|
|
func (s *Service) applyNotificationEndpoints(ctx context.Context, userID influxdb.ID, endpoints []*stateEndpoint) applier {
|
2019-12-10 22:51:11 +00:00
|
|
|
const resource = "notification_endpoints"
|
|
|
|
|
|
|
|
mutex := new(doMutex)
|
2020-04-15 19:46:17 +00:00
|
|
|
rollbackEndpoints := make([]*stateEndpoint, 0, len(endpoints))
|
2019-12-10 22:51:11 +00:00
|
|
|
|
2019-12-12 19:09:32 +00:00
|
|
|
createFn := func(ctx context.Context, i int, orgID, userID influxdb.ID) *applyErrBody {
|
2020-04-15 19:46:17 +00:00
|
|
|
var endpoint *stateEndpoint
|
2019-12-10 22:51:11 +00:00
|
|
|
mutex.Do(func() {
|
2020-04-15 19:46:17 +00:00
|
|
|
endpoints[i].orgID = orgID
|
|
|
|
endpoint = endpoints[i]
|
2019-12-10 22:51:11 +00:00
|
|
|
})
|
|
|
|
|
2019-12-12 19:09:32 +00:00
|
|
|
influxEndpoint, err := s.applyNotificationEndpoint(ctx, endpoint, userID)
|
2019-12-10 22:51:11 +00:00
|
|
|
if err != nil {
|
|
|
|
return &applyErrBody{
|
2020-04-15 19:46:17 +00:00
|
|
|
name: endpoint.parserEndpoint.Name(),
|
2019-12-10 22:51:11 +00:00
|
|
|
msg: err.Error(),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
mutex.Do(func() {
|
|
|
|
endpoints[i].id = influxEndpoint.GetID()
|
2019-12-16 17:39:55 +00:00
|
|
|
for _, secret := range influxEndpoint.SecretFields() {
|
|
|
|
switch {
|
|
|
|
case strings.HasSuffix(secret.Key, "-routing-key"):
|
2020-04-15 19:46:17 +00:00
|
|
|
if endpoints[i].parserEndpoint.routingKey == nil {
|
|
|
|
endpoints[i].parserEndpoint.routingKey = new(references)
|
2020-04-09 19:35:19 +00:00
|
|
|
}
|
2020-04-15 19:46:17 +00:00
|
|
|
endpoints[i].parserEndpoint.routingKey.Secret = secret.Key
|
2019-12-16 17:39:55 +00:00
|
|
|
case strings.HasSuffix(secret.Key, "-token"):
|
2020-04-15 19:46:17 +00:00
|
|
|
if endpoints[i].parserEndpoint.token == nil {
|
|
|
|
endpoints[i].parserEndpoint.token = new(references)
|
2020-04-09 19:35:19 +00:00
|
|
|
}
|
2020-04-15 19:46:17 +00:00
|
|
|
endpoints[i].parserEndpoint.token.Secret = secret.Key
|
2019-12-16 17:39:55 +00:00
|
|
|
case strings.HasSuffix(secret.Key, "-username"):
|
2020-04-15 19:46:17 +00:00
|
|
|
if endpoints[i].parserEndpoint.username == nil {
|
|
|
|
endpoints[i].parserEndpoint.username = new(references)
|
2020-04-09 19:35:19 +00:00
|
|
|
}
|
2020-04-15 19:46:17 +00:00
|
|
|
endpoints[i].parserEndpoint.username.Secret = secret.Key
|
2019-12-16 17:39:55 +00:00
|
|
|
case strings.HasSuffix(secret.Key, "-password"):
|
2020-04-15 19:46:17 +00:00
|
|
|
if endpoints[i].parserEndpoint.password == nil {
|
|
|
|
endpoints[i].parserEndpoint.password = new(references)
|
2020-04-09 19:35:19 +00:00
|
|
|
}
|
2020-04-15 19:46:17 +00:00
|
|
|
endpoints[i].parserEndpoint.password.Secret = secret.Key
|
2019-12-16 17:39:55 +00:00
|
|
|
}
|
|
|
|
}
|
2019-12-10 22:51:11 +00:00
|
|
|
rollbackEndpoints = append(rollbackEndpoints, endpoints[i])
|
|
|
|
})
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
return applier{
|
|
|
|
creater: creater{
|
|
|
|
entries: len(endpoints),
|
|
|
|
fn: createFn,
|
|
|
|
},
|
|
|
|
rollbacker: rollbacker{
|
|
|
|
resource: resource,
|
2019-12-27 19:22:05 +00:00
|
|
|
fn: func(_ influxdb.ID) error {
|
2020-04-06 17:25:20 +00:00
|
|
|
return s.rollbackNotificationEndpoints(ctx, userID, rollbackEndpoints)
|
2019-12-10 22:51:11 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-15 19:46:17 +00:00
|
|
|
func (s *Service) applyNotificationEndpoint(ctx context.Context, e *stateEndpoint, userID influxdb.ID) (influxdb.NotificationEndpoint, error) {
|
|
|
|
switch e.stateStatus {
|
|
|
|
case StateStatusRemove:
|
2020-04-06 17:25:20 +00:00
|
|
|
_, _, err := s.endpointSVC.DeleteNotificationEndpoint(ctx, e.ID())
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return e.existing, nil
|
2020-04-15 19:46:17 +00:00
|
|
|
case StateStatusExists:
|
2019-12-10 22:51:11 +00:00
|
|
|
// stub out userID since we're always using hte http client which will fill it in for us with the token
|
|
|
|
// feels a bit broken that is required.
|
|
|
|
// TODO: look into this userID requirement
|
2020-04-08 17:08:28 +00:00
|
|
|
return s.endpointSVC.UpdateNotificationEndpoint(
|
2020-04-06 17:25:20 +00:00
|
|
|
ctx,
|
|
|
|
e.ID(),
|
|
|
|
e.summarize().NotificationEndpoint,
|
|
|
|
userID,
|
|
|
|
)
|
2020-04-15 19:46:17 +00:00
|
|
|
default:
|
|
|
|
actual := e.summarize().NotificationEndpoint
|
|
|
|
err := s.endpointSVC.CreateNotificationEndpoint(ctx, actual, userID)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2019-12-10 22:51:11 +00:00
|
|
|
|
2020-04-15 19:46:17 +00:00
|
|
|
return actual, nil
|
2019-12-10 22:51:11 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-15 19:46:17 +00:00
|
|
|
func (s *Service) rollbackNotificationEndpoints(ctx context.Context, userID influxdb.ID, endpoints []*stateEndpoint) error {
|
|
|
|
rollbackFn := func(e *stateEndpoint) error {
|
2020-04-06 17:25:20 +00:00
|
|
|
var err error
|
2020-04-15 19:46:17 +00:00
|
|
|
switch e.stateStatus {
|
|
|
|
case StateStatusRemove:
|
2020-04-06 17:25:20 +00:00
|
|
|
err = s.endpointSVC.CreateNotificationEndpoint(ctx, e.existing, userID)
|
2020-04-15 19:46:17 +00:00
|
|
|
err = ierrors.Wrap(err, "failed to rollback removed endpoint")
|
|
|
|
case StateStatusExists:
|
2020-04-06 17:25:20 +00:00
|
|
|
_, err = s.endpointSVC.UpdateNotificationEndpoint(ctx, e.ID(), e.existing, userID)
|
2020-04-15 19:46:17 +00:00
|
|
|
err = ierrors.Wrap(err, "failed to rollback updated endpoint")
|
|
|
|
default:
|
|
|
|
_, _, err = s.endpointSVC.DeleteNotificationEndpoint(ctx, e.ID())
|
|
|
|
err = ierrors.Wrap(err, "failed to rollback created endpoint")
|
2019-12-10 22:51:11 +00:00
|
|
|
}
|
2020-04-06 17:25:20 +00:00
|
|
|
return err
|
|
|
|
}
|
2019-12-10 22:51:11 +00:00
|
|
|
|
2020-04-06 17:25:20 +00:00
|
|
|
var errs []string
|
|
|
|
for _, e := range endpoints {
|
|
|
|
if err := rollbackFn(e); err != nil {
|
|
|
|
errs = append(errs, fmt.Sprintf("error for notification endpoint[%q]: %s", e.ID(), err))
|
2019-12-10 22:51:11 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(errs) > 0 {
|
2020-04-06 17:25:20 +00:00
|
|
|
return errors.New(strings.Join(errs, "; "))
|
2019-12-10 22:51:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-04-17 02:27:58 +00:00
|
|
|
func (s *Service) applyNotificationRulesGenerator(rules []*stateRule, stateEndpoints map[string]*stateEndpoint) (applier, error) {
|
2019-12-20 17:10:10 +00:00
|
|
|
var errs applyErrs
|
|
|
|
for _, r := range rules {
|
2020-04-17 02:27:58 +00:00
|
|
|
v, ok := stateEndpoints[r.parserRule.associatedEndpoint.PkgName()]
|
2019-12-20 17:10:10 +00:00
|
|
|
if !ok {
|
|
|
|
errs = append(errs, &applyErrBody{
|
2020-04-17 02:27:58 +00:00
|
|
|
name: r.parserRule.Name(),
|
|
|
|
msg: fmt.Sprintf("notification rule endpoint dependency does not exist; endpointName=%q", r.parserRule.associatedEndpoint.PkgName()),
|
2019-12-20 17:10:10 +00:00
|
|
|
})
|
|
|
|
continue
|
|
|
|
}
|
2020-04-17 02:27:58 +00:00
|
|
|
r.associatedEndpoint = v
|
2019-12-20 17:10:10 +00:00
|
|
|
}
|
|
|
|
|
2020-04-17 02:27:58 +00:00
|
|
|
err := errs.toError("notification_rules", "failed to find dependency")
|
2019-12-20 17:10:10 +00:00
|
|
|
if err != nil {
|
|
|
|
return applier{}, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return s.applyNotificationRules(rules), nil
|
|
|
|
}
|
|
|
|
|
2020-04-17 02:27:58 +00:00
|
|
|
func (s *Service) applyNotificationRules(rules []*stateRule) applier {
|
2019-12-20 17:10:10 +00:00
|
|
|
const resource = "notification_rules"
|
|
|
|
|
|
|
|
mutex := new(doMutex)
|
2020-04-17 02:27:58 +00:00
|
|
|
rollbackEndpoints := make([]*stateRule, 0, len(rules))
|
2019-12-20 17:10:10 +00:00
|
|
|
|
|
|
|
createFn := func(ctx context.Context, i int, orgID, userID influxdb.ID) *applyErrBody {
|
2020-04-17 02:27:58 +00:00
|
|
|
var rule *stateRule
|
2019-12-20 17:10:10 +00:00
|
|
|
mutex.Do(func() {
|
|
|
|
rules[i].orgID = orgID
|
2020-04-17 02:27:58 +00:00
|
|
|
rule = rules[i]
|
2019-12-20 17:10:10 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
influxRule, err := s.applyNotificationRule(ctx, rule, userID)
|
|
|
|
if err != nil {
|
|
|
|
return &applyErrBody{
|
2020-04-17 02:27:58 +00:00
|
|
|
name: rule.parserRule.PkgName(),
|
2019-12-20 17:10:10 +00:00
|
|
|
msg: err.Error(),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
mutex.Do(func() {
|
|
|
|
rules[i].id = influxRule.GetID()
|
|
|
|
rollbackEndpoints = append(rollbackEndpoints, rules[i])
|
|
|
|
})
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
return applier{
|
|
|
|
creater: creater{
|
|
|
|
entries: len(rules),
|
|
|
|
fn: createFn,
|
|
|
|
},
|
|
|
|
rollbacker: rollbacker{
|
|
|
|
resource: resource,
|
2019-12-27 19:22:05 +00:00
|
|
|
fn: func(_ influxdb.ID) error {
|
2019-12-20 17:10:10 +00:00
|
|
|
return s.rollbackNotificationRules(rollbackEndpoints)
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-17 02:27:58 +00:00
|
|
|
func (s *Service) applyNotificationRule(ctx context.Context, r *stateRule, userID influxdb.ID) (influxdb.NotificationRule, error) {
|
|
|
|
influxRule := influxdb.NotificationRuleCreate{
|
|
|
|
NotificationRule: r.toInfluxRule(),
|
|
|
|
Status: r.parserRule.Status(),
|
2019-12-20 17:10:10 +00:00
|
|
|
}
|
2020-04-17 02:27:58 +00:00
|
|
|
err := s.ruleSVC.CreateNotificationRule(ctx, influxRule, userID)
|
2019-12-20 17:10:10 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2020-04-17 02:27:58 +00:00
|
|
|
return influxRule, nil
|
2019-12-20 17:10:10 +00:00
|
|
|
}
|
|
|
|
|
2020-04-17 02:27:58 +00:00
|
|
|
func (s *Service) rollbackNotificationRules(rules []*stateRule) error {
|
2019-12-20 17:10:10 +00:00
|
|
|
var errs []string
|
|
|
|
for _, e := range rules {
|
|
|
|
err := s.ruleSVC.DeleteNotificationRule(context.Background(), e.ID())
|
|
|
|
if err != nil {
|
|
|
|
errs = append(errs, e.ID().String())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(errs) > 0 {
|
|
|
|
return fmt.Errorf(`notication_rule_ids=[%s] err="unable to delete"`, strings.Join(errs, ", "))
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-12-27 19:22:05 +00:00
|
|
|
func (s *Service) applySecrets(secrets map[string]string) applier {
|
|
|
|
const resource = "secrets"
|
|
|
|
|
|
|
|
if len(secrets) == 0 {
|
|
|
|
return applier{
|
|
|
|
rollbacker: rollbacker{fn: func(orgID influxdb.ID) error { return nil }},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
mutex := new(doMutex)
|
|
|
|
rollbackSecrets := make([]string, 0)
|
|
|
|
|
|
|
|
createFn := func(ctx context.Context, i int, orgID, userID influxdb.ID) *applyErrBody {
|
|
|
|
err := s.secretSVC.PutSecrets(ctx, orgID, secrets)
|
|
|
|
if err != nil {
|
|
|
|
return &applyErrBody{name: "secrets", msg: err.Error()}
|
|
|
|
}
|
|
|
|
|
|
|
|
mutex.Do(func() {
|
|
|
|
for key := range secrets {
|
|
|
|
rollbackSecrets = append(rollbackSecrets, key)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
return applier{
|
|
|
|
creater: creater{
|
|
|
|
entries: 1,
|
|
|
|
fn: createFn,
|
|
|
|
},
|
|
|
|
rollbacker: rollbacker{
|
|
|
|
resource: resource,
|
|
|
|
fn: func(orgID influxdb.ID) error {
|
|
|
|
return s.secretSVC.DeleteSecret(context.Background(), orgID)
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
feat(pkger): add stateful management for tasks
notes on this commit. This commit was grueling ;-(. The task API is not a friendly
API to consume. There are a lot of non obvious things going on and almost every
one of them tripped me up. Things of note:
* the http.TaskService does not satisfy the influxdb.TaskService,
making it impossible to use as a dependency if tasks service gets
split out
* the APIs for create and update do not share common types. For example:
creating a task takes every field as a string, but in the update it is
taken as a options.Duration type. A step further and you'll notice that
create does not need an option to be provided, but the update does. Its
jarring trying to understand the indirection here. I struggled mightily
trying to make sense of it all with the indirection and differing types.
Made for a very difficult task (no pun intended) when it should have been
trivial. Opportunity here to fix these up and make this API more uniform
and remove unneccesary complexity like the options type.
* Nested IDs that get marshaled, are no bueno when you want to marshal a task
that does not have an ID in it, for either user/org/or self IDs. Its a challenge
just to do that.
* Lots of logs in the kv.Task portion where we hit errors and log and others where
we return. It isn't clear what is happening. The kv implementation is also very
procedural, and I found myself bouncing around like a ping pong ball trying to
make heads or tails of it.
* There is auth buried deep inside the kv.Task implementation that kept throwing me
off b/c it kept throwing errors, instead of warns. I assume, not sure if I'm
correct on this, but that the stuff being logged is determined inconsequential
to the task working. I had lots of errors from the auth buried in there, and hadn't
a clue what to make of it....
leaving these notes here as a look back at why working with tasks is so
difficult. This API can improve dramatically. I spent 5x the time trying
to figure out how to use the task API, in procedural calls, than I did
writing the business logic to consume it.... that's a scary realization ;-(
references: #17434
2020-04-21 02:59:56 +00:00
|
|
|
func (s *Service) applyTasks(ctx context.Context, tasks []*stateTask) applier {
|
2019-12-23 19:51:00 +00:00
|
|
|
const resource = "tasks"
|
|
|
|
|
|
|
|
mutex := new(doMutex)
|
2020-04-16 00:18:19 +00:00
|
|
|
rollbackTasks := make([]*stateTask, 0, len(tasks))
|
2019-12-23 19:51:00 +00:00
|
|
|
|
|
|
|
createFn := func(ctx context.Context, i int, orgID, userID influxdb.ID) *applyErrBody {
|
2020-04-16 00:18:19 +00:00
|
|
|
var t *stateTask
|
2019-12-23 19:51:00 +00:00
|
|
|
mutex.Do(func() {
|
|
|
|
tasks[i].orgID = orgID
|
2020-04-16 00:18:19 +00:00
|
|
|
t = tasks[i]
|
2019-12-23 19:51:00 +00:00
|
|
|
})
|
|
|
|
|
feat(pkger): add stateful management for tasks
notes on this commit. This commit was grueling ;-(. The task API is not a friendly
API to consume. There are a lot of non obvious things going on and almost every
one of them tripped me up. Things of note:
* the http.TaskService does not satisfy the influxdb.TaskService,
making it impossible to use as a dependency if tasks service gets
split out
* the APIs for create and update do not share common types. For example:
creating a task takes every field as a string, but in the update it is
taken as a options.Duration type. A step further and you'll notice that
create does not need an option to be provided, but the update does. Its
jarring trying to understand the indirection here. I struggled mightily
trying to make sense of it all with the indirection and differing types.
Made for a very difficult task (no pun intended) when it should have been
trivial. Opportunity here to fix these up and make this API more uniform
and remove unneccesary complexity like the options type.
* Nested IDs that get marshaled, are no bueno when you want to marshal a task
that does not have an ID in it, for either user/org/or self IDs. Its a challenge
just to do that.
* Lots of logs in the kv.Task portion where we hit errors and log and others where
we return. It isn't clear what is happening. The kv implementation is also very
procedural, and I found myself bouncing around like a ping pong ball trying to
make heads or tails of it.
* There is auth buried deep inside the kv.Task implementation that kept throwing me
off b/c it kept throwing errors, instead of warns. I assume, not sure if I'm
correct on this, but that the stuff being logged is determined inconsequential
to the task working. I had lots of errors from the auth buried in there, and hadn't
a clue what to make of it....
leaving these notes here as a look back at why working with tasks is so
difficult. This API can improve dramatically. I spent 5x the time trying
to figure out how to use the task API, in procedural calls, than I did
writing the business logic to consume it.... that's a scary realization ;-(
references: #17434
2020-04-21 02:59:56 +00:00
|
|
|
newTask, err := s.applyTask(ctx, userID, t)
|
2019-12-23 19:51:00 +00:00
|
|
|
if err != nil {
|
2020-04-16 00:18:19 +00:00
|
|
|
return &applyErrBody{
|
|
|
|
name: t.parserTask.Name(),
|
|
|
|
msg: err.Error(),
|
|
|
|
}
|
2019-12-23 19:51:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
mutex.Do(func() {
|
|
|
|
tasks[i].id = newTask.ID
|
2020-04-16 00:18:19 +00:00
|
|
|
rollbackTasks = append(rollbackTasks, tasks[i])
|
2019-12-23 19:51:00 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
return applier{
|
|
|
|
creater: creater{
|
|
|
|
entries: len(tasks),
|
|
|
|
fn: createFn,
|
|
|
|
},
|
|
|
|
rollbacker: rollbacker{
|
|
|
|
resource: resource,
|
2019-12-27 19:22:05 +00:00
|
|
|
fn: func(_ influxdb.ID) error {
|
feat(pkger): add stateful management for tasks
notes on this commit. This commit was grueling ;-(. The task API is not a friendly
API to consume. There are a lot of non obvious things going on and almost every
one of them tripped me up. Things of note:
* the http.TaskService does not satisfy the influxdb.TaskService,
making it impossible to use as a dependency if tasks service gets
split out
* the APIs for create and update do not share common types. For example:
creating a task takes every field as a string, but in the update it is
taken as a options.Duration type. A step further and you'll notice that
create does not need an option to be provided, but the update does. Its
jarring trying to understand the indirection here. I struggled mightily
trying to make sense of it all with the indirection and differing types.
Made for a very difficult task (no pun intended) when it should have been
trivial. Opportunity here to fix these up and make this API more uniform
and remove unneccesary complexity like the options type.
* Nested IDs that get marshaled, are no bueno when you want to marshal a task
that does not have an ID in it, for either user/org/or self IDs. Its a challenge
just to do that.
* Lots of logs in the kv.Task portion where we hit errors and log and others where
we return. It isn't clear what is happening. The kv implementation is also very
procedural, and I found myself bouncing around like a ping pong ball trying to
make heads or tails of it.
* There is auth buried deep inside the kv.Task implementation that kept throwing me
off b/c it kept throwing errors, instead of warns. I assume, not sure if I'm
correct on this, but that the stuff being logged is determined inconsequential
to the task working. I had lots of errors from the auth buried in there, and hadn't
a clue what to make of it....
leaving these notes here as a look back at why working with tasks is so
difficult. This API can improve dramatically. I spent 5x the time trying
to figure out how to use the task API, in procedural calls, than I did
writing the business logic to consume it.... that's a scary realization ;-(
references: #17434
2020-04-21 02:59:56 +00:00
|
|
|
return s.rollbackTasks(ctx, rollbackTasks)
|
2019-12-23 19:51:00 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
feat(pkger): add stateful management for tasks
notes on this commit. This commit was grueling ;-(. The task API is not a friendly
API to consume. There are a lot of non obvious things going on and almost every
one of them tripped me up. Things of note:
* the http.TaskService does not satisfy the influxdb.TaskService,
making it impossible to use as a dependency if tasks service gets
split out
* the APIs for create and update do not share common types. For example:
creating a task takes every field as a string, but in the update it is
taken as a options.Duration type. A step further and you'll notice that
create does not need an option to be provided, but the update does. Its
jarring trying to understand the indirection here. I struggled mightily
trying to make sense of it all with the indirection and differing types.
Made for a very difficult task (no pun intended) when it should have been
trivial. Opportunity here to fix these up and make this API more uniform
and remove unneccesary complexity like the options type.
* Nested IDs that get marshaled, are no bueno when you want to marshal a task
that does not have an ID in it, for either user/org/or self IDs. Its a challenge
just to do that.
* Lots of logs in the kv.Task portion where we hit errors and log and others where
we return. It isn't clear what is happening. The kv implementation is also very
procedural, and I found myself bouncing around like a ping pong ball trying to
make heads or tails of it.
* There is auth buried deep inside the kv.Task implementation that kept throwing me
off b/c it kept throwing errors, instead of warns. I assume, not sure if I'm
correct on this, but that the stuff being logged is determined inconsequential
to the task working. I had lots of errors from the auth buried in there, and hadn't
a clue what to make of it....
leaving these notes here as a look back at why working with tasks is so
difficult. This API can improve dramatically. I spent 5x the time trying
to figure out how to use the task API, in procedural calls, than I did
writing the business logic to consume it.... that's a scary realization ;-(
references: #17434
2020-04-21 02:59:56 +00:00
|
|
|
func (s *Service) applyTask(ctx context.Context, userID influxdb.ID, t *stateTask) (influxdb.Task, error) {
|
|
|
|
switch t.stateStatus {
|
|
|
|
case StateStatusRemove:
|
|
|
|
if err := s.taskSVC.DeleteTask(ctx, t.ID()); err != nil {
|
|
|
|
return influxdb.Task{}, ierrors.Wrap(err, "failed to delete task")
|
|
|
|
}
|
|
|
|
return *t.existing, nil
|
|
|
|
case StateStatusExists:
|
|
|
|
newFlux := t.parserTask.flux()
|
|
|
|
newStatus := string(t.parserTask.Status())
|
|
|
|
opt := options.Options{
|
|
|
|
Name: t.parserTask.Name(),
|
|
|
|
Cron: t.parserTask.cron,
|
|
|
|
}
|
|
|
|
if every := t.parserTask.every; every > 0 {
|
|
|
|
opt.Every.Parse(every.String())
|
|
|
|
}
|
|
|
|
if offset := t.parserTask.offset; offset > 0 {
|
|
|
|
opt.Offset.Parse(offset.String())
|
|
|
|
}
|
|
|
|
|
|
|
|
updatedTask, err := s.taskSVC.UpdateTask(ctx, t.ID(), influxdb.TaskUpdate{
|
|
|
|
Flux: &newFlux,
|
|
|
|
Status: &newStatus,
|
|
|
|
Description: &t.parserTask.description,
|
|
|
|
Options: opt,
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return influxdb.Task{}, ierrors.Wrap(err, "failed to update task")
|
|
|
|
}
|
|
|
|
return *updatedTask, nil
|
|
|
|
default:
|
|
|
|
newTask, err := s.taskSVC.CreateTask(ctx, influxdb.TaskCreate{
|
|
|
|
Type: influxdb.TaskSystemType,
|
|
|
|
Flux: t.parserTask.flux(),
|
|
|
|
OwnerID: userID,
|
|
|
|
Description: t.parserTask.description,
|
|
|
|
Status: string(t.parserTask.Status()),
|
|
|
|
OrganizationID: t.orgID,
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return influxdb.Task{}, ierrors.Wrap(err, "failed to create task")
|
|
|
|
}
|
|
|
|
return *newTask, nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *Service) rollbackTasks(ctx context.Context, tasks []*stateTask) error {
|
|
|
|
rollbackFn := func(t *stateTask) error {
|
|
|
|
var err error
|
|
|
|
switch t.stateStatus {
|
|
|
|
case StateStatusRemove:
|
|
|
|
newTask, err := s.taskSVC.CreateTask(ctx, influxdb.TaskCreate{
|
|
|
|
Type: t.existing.Type,
|
|
|
|
Flux: t.existing.Flux,
|
|
|
|
OwnerID: t.existing.OwnerID,
|
|
|
|
Description: t.existing.Description,
|
|
|
|
Status: t.existing.Status,
|
|
|
|
OrganizationID: t.orgID,
|
|
|
|
Metadata: t.existing.Metadata,
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return ierrors.Wrap(err, "failed to rollback removed task")
|
|
|
|
}
|
|
|
|
t.existing = newTask
|
|
|
|
case StateStatusExists:
|
|
|
|
opt := options.Options{
|
|
|
|
Name: t.existing.Name,
|
|
|
|
Cron: t.existing.Cron,
|
|
|
|
}
|
|
|
|
if every := t.existing.Every; every != "" {
|
|
|
|
opt.Every.Parse(every)
|
|
|
|
}
|
|
|
|
if offset := t.existing.Offset; offset > 0 {
|
|
|
|
opt.Offset.Parse(offset.String())
|
|
|
|
}
|
|
|
|
|
|
|
|
_, err = s.taskSVC.UpdateTask(ctx, t.ID(), influxdb.TaskUpdate{
|
|
|
|
Flux: &t.existing.Flux,
|
|
|
|
Status: &t.existing.Status,
|
|
|
|
Description: &t.existing.Description,
|
|
|
|
Metadata: t.existing.Metadata,
|
|
|
|
Options: opt,
|
|
|
|
})
|
|
|
|
err = ierrors.Wrap(err, "failed to rollback updated task")
|
|
|
|
default:
|
|
|
|
err = s.taskSVC.DeleteTask(ctx, t.ID())
|
|
|
|
err = ierrors.Wrap(err, "failed to rollback created task")
|
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
var errs []string
|
|
|
|
for _, d := range tasks {
|
|
|
|
if err := rollbackFn(d); err != nil {
|
|
|
|
errs = append(errs, fmt.Sprintf("error for task[%q]: %s", d.ID(), err))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(errs) > 0 {
|
|
|
|
// TODO: fixup error
|
|
|
|
return errors.New(strings.Join(errs, ", "))
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-04-21 22:00:29 +00:00
|
|
|
func (s *Service) applyTelegrafs(ctx context.Context, userID influxdb.ID, teles []*stateTelegraf) applier {
|
2019-12-04 01:00:15 +00:00
|
|
|
const resource = "telegrafs"
|
|
|
|
|
2019-12-07 00:23:09 +00:00
|
|
|
mutex := new(doMutex)
|
2020-04-16 18:27:30 +00:00
|
|
|
rollbackTelegrafs := make([]*stateTelegraf, 0, len(teles))
|
2019-12-07 00:23:09 +00:00
|
|
|
|
2019-12-12 19:09:32 +00:00
|
|
|
createFn := func(ctx context.Context, i int, orgID, userID influxdb.ID) *applyErrBody {
|
2020-04-21 22:00:29 +00:00
|
|
|
var t *stateTelegraf
|
2019-12-07 00:23:09 +00:00
|
|
|
mutex.Do(func() {
|
2020-04-16 18:27:30 +00:00
|
|
|
teles[i].orgID = orgID
|
2020-04-21 22:00:29 +00:00
|
|
|
t = teles[i]
|
2019-12-07 00:23:09 +00:00
|
|
|
})
|
|
|
|
|
2020-04-21 22:00:29 +00:00
|
|
|
existing, err := s.applyTelegrafConfig(ctx, userID, t)
|
2019-12-07 00:23:09 +00:00
|
|
|
if err != nil {
|
|
|
|
return &applyErrBody{
|
2020-04-21 22:00:29 +00:00
|
|
|
name: t.parserTelegraf.Name(),
|
2019-12-07 00:23:09 +00:00
|
|
|
msg: err.Error(),
|
2019-12-04 01:00:15 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-12-07 00:23:09 +00:00
|
|
|
mutex.Do(func() {
|
2020-04-21 22:00:29 +00:00
|
|
|
teles[i].id = existing.ID
|
2019-12-07 00:23:09 +00:00
|
|
|
rollbackTelegrafs = append(rollbackTelegrafs, teles[i])
|
|
|
|
})
|
|
|
|
|
|
|
|
return nil
|
2019-12-04 01:00:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return applier{
|
2019-12-07 00:23:09 +00:00
|
|
|
creater: creater{
|
|
|
|
entries: len(teles),
|
|
|
|
fn: createFn,
|
|
|
|
},
|
2019-12-04 01:00:15 +00:00
|
|
|
rollbacker: rollbacker{
|
|
|
|
resource: resource,
|
2019-12-27 19:22:05 +00:00
|
|
|
fn: func(_ influxdb.ID) error {
|
2020-04-21 22:00:29 +00:00
|
|
|
return s.rollbackTelegrafConfigs(ctx, userID, rollbackTelegrafs)
|
2019-12-06 00:53:00 +00:00
|
|
|
},
|
2019-12-04 01:00:15 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-21 22:00:29 +00:00
|
|
|
func (s *Service) applyTelegrafConfig(ctx context.Context, userID influxdb.ID, t *stateTelegraf) (influxdb.TelegrafConfig, error) {
|
|
|
|
switch t.stateStatus {
|
|
|
|
case StateStatusRemove:
|
|
|
|
if err := s.teleSVC.DeleteTelegrafConfig(ctx, t.ID()); err != nil {
|
|
|
|
return influxdb.TelegrafConfig{}, ierrors.Wrap(err, "failed to delete config")
|
|
|
|
}
|
|
|
|
return *t.existing, nil
|
|
|
|
case StateStatusExists:
|
|
|
|
cfg := t.summarize().TelegrafConfig
|
|
|
|
updatedConfig, err := s.teleSVC.UpdateTelegrafConfig(ctx, t.ID(), &cfg, userID)
|
|
|
|
if err != nil {
|
|
|
|
return influxdb.TelegrafConfig{}, ierrors.Wrap(err, "failed to update config")
|
|
|
|
}
|
|
|
|
return *updatedConfig, nil
|
|
|
|
default:
|
|
|
|
cfg := t.summarize().TelegrafConfig
|
|
|
|
err := s.teleSVC.CreateTelegrafConfig(ctx, &cfg, userID)
|
|
|
|
if err != nil {
|
|
|
|
return influxdb.TelegrafConfig{}, ierrors.Wrap(err, "failed to create telegraf config")
|
|
|
|
}
|
|
|
|
return cfg, nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *Service) rollbackTelegrafConfigs(ctx context.Context, userID influxdb.ID, cfgs []*stateTelegraf) error {
|
|
|
|
rollbackFn := func(t *stateTelegraf) error {
|
|
|
|
var err error
|
|
|
|
switch t.stateStatus {
|
|
|
|
case StateStatusRemove:
|
|
|
|
err = ierrors.Wrap(s.teleSVC.CreateTelegrafConfig(ctx, t.existing, userID), "rolling back removed telegraf config")
|
|
|
|
case StateStatusExists:
|
|
|
|
_, err = s.teleSVC.UpdateTelegrafConfig(ctx, t.ID(), t.existing, userID)
|
|
|
|
err = ierrors.Wrap(err, "rolling back updated telegraf config")
|
|
|
|
default:
|
|
|
|
err = ierrors.Wrap(s.teleSVC.DeleteTelegrafConfig(ctx, t.ID()), "rolling back created telegraf config")
|
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
var errs []string
|
|
|
|
for _, v := range cfgs {
|
|
|
|
if err := rollbackFn(v); err != nil {
|
|
|
|
errs = append(errs, fmt.Sprintf("error for variable[%q]: %s", v.ID(), err))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(errs) > 0 {
|
|
|
|
return errors.New(strings.Join(errs, "; "))
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-04-14 23:18:34 +00:00
|
|
|
func (s *Service) applyVariables(ctx context.Context, vars []*stateVariable) applier {
|
2019-11-07 00:45:00 +00:00
|
|
|
const resource = "variable"
|
|
|
|
|
2019-12-07 00:23:09 +00:00
|
|
|
mutex := new(doMutex)
|
2020-04-14 23:18:34 +00:00
|
|
|
rollBackVars := make([]*stateVariable, 0, len(vars))
|
2019-11-07 00:45:00 +00:00
|
|
|
|
2019-12-12 19:09:32 +00:00
|
|
|
createFn := func(ctx context.Context, i int, orgID, userID influxdb.ID) *applyErrBody {
|
2020-04-14 23:18:34 +00:00
|
|
|
var v *stateVariable
|
2019-12-07 00:23:09 +00:00
|
|
|
mutex.Do(func() {
|
2020-04-14 23:18:34 +00:00
|
|
|
vars[i].orgID = orgID
|
|
|
|
v = vars[i]
|
2019-12-07 00:23:09 +00:00
|
|
|
})
|
|
|
|
if !v.shouldApply() {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
influxVar, err := s.applyVariable(ctx, v)
|
|
|
|
if err != nil {
|
|
|
|
return &applyErrBody{
|
2020-04-14 23:18:34 +00:00
|
|
|
name: v.parserVar.Name(),
|
2019-12-07 00:23:09 +00:00
|
|
|
msg: err.Error(),
|
2019-11-07 00:45:00 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-12-07 00:23:09 +00:00
|
|
|
mutex.Do(func() {
|
|
|
|
vars[i].id = influxVar.ID
|
|
|
|
rollBackVars = append(rollBackVars, vars[i])
|
|
|
|
})
|
|
|
|
return nil
|
2019-11-07 00:45:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return applier{
|
2019-12-07 00:23:09 +00:00
|
|
|
creater: creater{
|
|
|
|
entries: len(vars),
|
|
|
|
fn: createFn,
|
|
|
|
},
|
2019-11-07 00:45:00 +00:00
|
|
|
rollbacker: rollbacker{
|
|
|
|
resource: resource,
|
2020-04-03 00:44:27 +00:00
|
|
|
fn: func(_ influxdb.ID) error { return s.rollbackVariables(ctx, rollBackVars) },
|
2019-11-07 00:45:00 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-14 23:18:34 +00:00
|
|
|
func (s *Service) rollbackVariables(ctx context.Context, variables []*stateVariable) error {
|
|
|
|
rollbackFn := func(v *stateVariable) error {
|
2020-04-03 00:44:27 +00:00
|
|
|
var err error
|
2020-04-14 23:18:34 +00:00
|
|
|
switch v.stateStatus {
|
|
|
|
case StateStatusRemove:
|
|
|
|
err = ierrors.Wrap(s.varSVC.CreateVariable(ctx, v.existing), "rolling back removed variable")
|
|
|
|
case StateStatusExists:
|
2020-04-03 00:44:27 +00:00
|
|
|
_, err = s.varSVC.UpdateVariable(ctx, v.ID(), &influxdb.VariableUpdate{
|
2020-04-14 23:18:34 +00:00
|
|
|
Name: v.parserVar.Name(),
|
|
|
|
Description: v.parserVar.Description,
|
|
|
|
Arguments: v.parserVar.influxVarArgs(),
|
2020-04-03 00:44:27 +00:00
|
|
|
})
|
2020-04-14 23:18:34 +00:00
|
|
|
err = ierrors.Wrap(err, "rolling back updated variable")
|
|
|
|
default:
|
|
|
|
err = ierrors.Wrap(s.varSVC.DeleteVariable(ctx, v.ID()), "rolling back created variable")
|
2019-11-07 00:45:00 +00:00
|
|
|
}
|
2020-04-03 00:44:27 +00:00
|
|
|
return err
|
|
|
|
}
|
2019-11-07 00:45:00 +00:00
|
|
|
|
2020-04-03 00:44:27 +00:00
|
|
|
var errs []string
|
|
|
|
for _, v := range variables {
|
|
|
|
if err := rollbackFn(v); err != nil {
|
|
|
|
errs = append(errs, fmt.Sprintf("error for variable[%q]: %s", v.ID(), err))
|
2019-11-07 00:45:00 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(errs) > 0 {
|
2020-04-03 00:44:27 +00:00
|
|
|
return errors.New(strings.Join(errs, "; "))
|
2019-11-07 00:45:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-04-14 23:18:34 +00:00
|
|
|
func (s *Service) applyVariable(ctx context.Context, v *stateVariable) (influxdb.Variable, error) {
|
|
|
|
switch v.stateStatus {
|
|
|
|
case StateStatusRemove:
|
2020-04-03 00:44:27 +00:00
|
|
|
if err := s.varSVC.DeleteVariable(ctx, v.id); err != nil {
|
|
|
|
return influxdb.Variable{}, err
|
|
|
|
}
|
|
|
|
return *v.existing, nil
|
2020-04-14 23:18:34 +00:00
|
|
|
case StateStatusExists:
|
2019-11-07 00:45:00 +00:00
|
|
|
updatedVar, err := s.varSVC.UpdateVariable(ctx, v.ID(), &influxdb.VariableUpdate{
|
2020-04-14 23:18:34 +00:00
|
|
|
Name: v.parserVar.Name(),
|
|
|
|
Description: v.parserVar.Description,
|
|
|
|
Arguments: v.parserVar.influxVarArgs(),
|
2019-11-07 00:45:00 +00:00
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return influxdb.Variable{}, err
|
|
|
|
}
|
|
|
|
return *updatedVar, nil
|
2020-04-14 23:18:34 +00:00
|
|
|
default:
|
|
|
|
influxVar := influxdb.Variable{
|
|
|
|
OrganizationID: v.orgID,
|
|
|
|
Name: v.parserVar.Name(),
|
|
|
|
Description: v.parserVar.Description,
|
|
|
|
Arguments: v.parserVar.influxVarArgs(),
|
|
|
|
}
|
|
|
|
err := s.varSVC.CreateVariable(ctx, &influxVar)
|
|
|
|
if err != nil {
|
|
|
|
return influxdb.Variable{}, err
|
|
|
|
}
|
2019-11-07 00:45:00 +00:00
|
|
|
|
2020-04-14 23:18:34 +00:00
|
|
|
return influxVar, nil
|
2019-11-07 00:45:00 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-17 02:27:58 +00:00
|
|
|
func (s *Service) applyLabelMappings(labelMappings []stateLabelMapping) applier {
|
2020-04-11 04:51:13 +00:00
|
|
|
const resource = "label_mapping"
|
|
|
|
|
|
|
|
mutex := new(doMutex)
|
|
|
|
rollbackMappings := make([]stateLabelMapping, 0, len(labelMappings))
|
|
|
|
|
|
|
|
createFn := func(ctx context.Context, i int, orgID, userID influxdb.ID) *applyErrBody {
|
|
|
|
var mapping stateLabelMapping
|
|
|
|
mutex.Do(func() {
|
|
|
|
mapping = labelMappings[i]
|
|
|
|
})
|
|
|
|
|
|
|
|
ident := mapping.resource.stateIdentity()
|
2020-04-14 22:19:15 +00:00
|
|
|
if IsExisting(mapping.status) || mapping.label.ID() == 0 || ident.id == 0 {
|
2020-04-11 04:51:13 +00:00
|
|
|
// this block here does 2 things, it does not write a
|
|
|
|
// mapping when one exists. it also avoids having to worry
|
|
|
|
// about deleting an existing mapping since it will not be
|
|
|
|
// passed to the delete function below b/c it is never added
|
|
|
|
// to the list of mappings that is referenced in the delete
|
|
|
|
// call.
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
m := influxdb.LabelMapping{
|
|
|
|
LabelID: mapping.label.ID(),
|
|
|
|
ResourceID: ident.id,
|
|
|
|
ResourceType: ident.resourceType,
|
|
|
|
}
|
|
|
|
err := s.labelSVC.CreateLabelMapping(ctx, &m)
|
|
|
|
if err != nil {
|
|
|
|
return &applyErrBody{
|
|
|
|
name: fmt.Sprintf("%s:%s:%s", ident.resourceType, ident.id, mapping.label.ID()),
|
|
|
|
msg: err.Error(),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
mutex.Do(func() {
|
|
|
|
rollbackMappings = append(rollbackMappings, mapping)
|
|
|
|
})
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
return applier{
|
|
|
|
creater: creater{
|
|
|
|
entries: len(labelMappings),
|
|
|
|
fn: createFn,
|
|
|
|
},
|
|
|
|
rollbacker: rollbacker{
|
|
|
|
resource: resource,
|
2020-04-17 02:27:58 +00:00
|
|
|
fn: func(_ influxdb.ID) error { return s.rollbackLabelMappings(rollbackMappings) },
|
2020-04-11 04:51:13 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-17 02:27:58 +00:00
|
|
|
func (s *Service) rollbackLabelMappings(mappings []stateLabelMapping) error {
|
2020-04-11 04:51:13 +00:00
|
|
|
var errs []string
|
|
|
|
for _, stateMapping := range mappings {
|
|
|
|
influxMapping := stateLabelMappingToInfluxLabelMapping(stateMapping)
|
|
|
|
err := s.labelSVC.DeleteLabelMapping(context.Background(), &influxMapping)
|
|
|
|
if err != nil {
|
|
|
|
errs = append(errs, fmt.Sprintf("%s:%s", stateMapping.label.ID(), stateMapping.resource.stateIdentity().id))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(errs) > 0 {
|
|
|
|
return fmt.Errorf(`label_resource_id_pairs=[%s] err="unable to delete label"`, strings.Join(errs, ", "))
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-04-17 02:27:58 +00:00
|
|
|
func (s *Service) updateStackAfterSuccess(ctx context.Context, stackID influxdb.ID, state *stateCoordinator) error {
|
2020-04-01 00:01:45 +00:00
|
|
|
stack, err := s.store.ReadStackByID(ctx, stackID)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
var stackResources []StackResource
|
2020-04-11 04:51:13 +00:00
|
|
|
for _, b := range state.mBuckets {
|
2020-04-14 22:19:15 +00:00
|
|
|
if IsRemoval(b.stateStatus) {
|
2020-04-02 22:28:11 +00:00
|
|
|
continue
|
|
|
|
}
|
2020-04-01 00:01:45 +00:00
|
|
|
stackResources = append(stackResources, StackResource{
|
|
|
|
APIVersion: APIVersion,
|
|
|
|
ID: b.ID(),
|
|
|
|
Kind: KindBucket,
|
2020-04-22 23:28:08 +00:00
|
|
|
PkgName: b.parserBkt.PkgName(),
|
2020-04-01 00:01:45 +00:00
|
|
|
})
|
|
|
|
}
|
2020-04-14 22:19:15 +00:00
|
|
|
for _, c := range state.mChecks {
|
|
|
|
if IsRemoval(c.stateStatus) {
|
2020-04-02 22:28:11 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
stackResources = append(stackResources, StackResource{
|
|
|
|
APIVersion: APIVersion,
|
|
|
|
ID: c.ID(),
|
|
|
|
Kind: KindCheck,
|
2020-04-22 23:28:08 +00:00
|
|
|
PkgName: c.parserCheck.PkgName(),
|
2020-04-02 22:28:11 +00:00
|
|
|
})
|
|
|
|
}
|
2020-04-20 19:29:30 +00:00
|
|
|
for _, d := range state.mDashboards {
|
|
|
|
if IsRemoval(d.stateStatus) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
stackResources = append(stackResources, StackResource{
|
|
|
|
APIVersion: APIVersion,
|
|
|
|
ID: d.ID(),
|
|
|
|
Kind: KindDashboard,
|
2020-04-22 23:28:08 +00:00
|
|
|
PkgName: d.parserDash.PkgName(),
|
2020-04-20 19:29:30 +00:00
|
|
|
})
|
|
|
|
}
|
2020-04-15 19:46:17 +00:00
|
|
|
for _, n := range state.mEndpoints {
|
|
|
|
if IsRemoval(n.stateStatus) {
|
2020-04-06 17:25:20 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
stackResources = append(stackResources, StackResource{
|
|
|
|
APIVersion: APIVersion,
|
|
|
|
ID: n.ID(),
|
|
|
|
Kind: KindNotificationEndpoint,
|
2020-04-22 23:28:08 +00:00
|
|
|
PkgName: n.parserEndpoint.PkgName(),
|
2020-04-06 17:25:20 +00:00
|
|
|
})
|
|
|
|
}
|
2020-04-11 04:51:13 +00:00
|
|
|
for _, l := range state.mLabels {
|
2020-04-14 22:19:15 +00:00
|
|
|
if IsRemoval(l.stateStatus) {
|
2020-04-02 22:28:11 +00:00
|
|
|
continue
|
|
|
|
}
|
2020-04-01 23:44:17 +00:00
|
|
|
stackResources = append(stackResources, StackResource{
|
|
|
|
APIVersion: APIVersion,
|
|
|
|
ID: l.ID(),
|
|
|
|
Kind: KindLabel,
|
2020-04-22 23:28:08 +00:00
|
|
|
PkgName: l.parserLabel.PkgName(),
|
2020-04-01 23:44:17 +00:00
|
|
|
})
|
|
|
|
}
|
feat(pkger): add stateful management for tasks
notes on this commit. This commit was grueling ;-(. The task API is not a friendly
API to consume. There are a lot of non obvious things going on and almost every
one of them tripped me up. Things of note:
* the http.TaskService does not satisfy the influxdb.TaskService,
making it impossible to use as a dependency if tasks service gets
split out
* the APIs for create and update do not share common types. For example:
creating a task takes every field as a string, but in the update it is
taken as a options.Duration type. A step further and you'll notice that
create does not need an option to be provided, but the update does. Its
jarring trying to understand the indirection here. I struggled mightily
trying to make sense of it all with the indirection and differing types.
Made for a very difficult task (no pun intended) when it should have been
trivial. Opportunity here to fix these up and make this API more uniform
and remove unneccesary complexity like the options type.
* Nested IDs that get marshaled, are no bueno when you want to marshal a task
that does not have an ID in it, for either user/org/or self IDs. Its a challenge
just to do that.
* Lots of logs in the kv.Task portion where we hit errors and log and others where
we return. It isn't clear what is happening. The kv implementation is also very
procedural, and I found myself bouncing around like a ping pong ball trying to
make heads or tails of it.
* There is auth buried deep inside the kv.Task implementation that kept throwing me
off b/c it kept throwing errors, instead of warns. I assume, not sure if I'm
correct on this, but that the stuff being logged is determined inconsequential
to the task working. I had lots of errors from the auth buried in there, and hadn't
a clue what to make of it....
leaving these notes here as a look back at why working with tasks is so
difficult. This API can improve dramatically. I spent 5x the time trying
to figure out how to use the task API, in procedural calls, than I did
writing the business logic to consume it.... that's a scary realization ;-(
references: #17434
2020-04-21 02:59:56 +00:00
|
|
|
for _, t := range state.mTasks {
|
|
|
|
if IsRemoval(t.stateStatus) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
stackResources = append(stackResources, StackResource{
|
|
|
|
APIVersion: APIVersion,
|
|
|
|
ID: t.ID(),
|
|
|
|
Kind: KindTask,
|
2020-04-22 23:28:08 +00:00
|
|
|
PkgName: t.parserTask.PkgName(),
|
feat(pkger): add stateful management for tasks
notes on this commit. This commit was grueling ;-(. The task API is not a friendly
API to consume. There are a lot of non obvious things going on and almost every
one of them tripped me up. Things of note:
* the http.TaskService does not satisfy the influxdb.TaskService,
making it impossible to use as a dependency if tasks service gets
split out
* the APIs for create and update do not share common types. For example:
creating a task takes every field as a string, but in the update it is
taken as a options.Duration type. A step further and you'll notice that
create does not need an option to be provided, but the update does. Its
jarring trying to understand the indirection here. I struggled mightily
trying to make sense of it all with the indirection and differing types.
Made for a very difficult task (no pun intended) when it should have been
trivial. Opportunity here to fix these up and make this API more uniform
and remove unneccesary complexity like the options type.
* Nested IDs that get marshaled, are no bueno when you want to marshal a task
that does not have an ID in it, for either user/org/or self IDs. Its a challenge
just to do that.
* Lots of logs in the kv.Task portion where we hit errors and log and others where
we return. It isn't clear what is happening. The kv implementation is also very
procedural, and I found myself bouncing around like a ping pong ball trying to
make heads or tails of it.
* There is auth buried deep inside the kv.Task implementation that kept throwing me
off b/c it kept throwing errors, instead of warns. I assume, not sure if I'm
correct on this, but that the stuff being logged is determined inconsequential
to the task working. I had lots of errors from the auth buried in there, and hadn't
a clue what to make of it....
leaving these notes here as a look back at why working with tasks is so
difficult. This API can improve dramatically. I spent 5x the time trying
to figure out how to use the task API, in procedural calls, than I did
writing the business logic to consume it.... that's a scary realization ;-(
references: #17434
2020-04-21 02:59:56 +00:00
|
|
|
})
|
|
|
|
}
|
2020-04-21 22:00:29 +00:00
|
|
|
for _, t := range state.mTelegrafs {
|
|
|
|
if IsRemoval(t.stateStatus) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
stackResources = append(stackResources, StackResource{
|
|
|
|
APIVersion: APIVersion,
|
|
|
|
ID: t.ID(),
|
|
|
|
Kind: KindTelegraf,
|
2020-04-22 23:28:08 +00:00
|
|
|
PkgName: t.parserTelegraf.PkgName(),
|
2020-04-21 22:00:29 +00:00
|
|
|
})
|
|
|
|
}
|
2020-04-14 23:18:34 +00:00
|
|
|
for _, v := range state.mVariables {
|
|
|
|
if IsRemoval(v.stateStatus) {
|
2020-04-03 00:44:27 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
stackResources = append(stackResources, StackResource{
|
|
|
|
APIVersion: APIVersion,
|
|
|
|
ID: v.ID(),
|
|
|
|
Kind: KindVariable,
|
2020-04-22 23:28:08 +00:00
|
|
|
PkgName: v.parserVar.PkgName(),
|
2020-04-03 00:44:27 +00:00
|
|
|
})
|
|
|
|
}
|
2020-04-01 00:01:45 +00:00
|
|
|
stack.Resources = stackResources
|
|
|
|
|
|
|
|
stack.UpdatedAt = time.Now()
|
|
|
|
return s.store.UpdateStack(ctx, stack)
|
|
|
|
}
|
|
|
|
|
2020-04-17 02:27:58 +00:00
|
|
|
func (s *Service) updateStackAfterRollback(ctx context.Context, stackID influxdb.ID, state *stateCoordinator) error {
|
2020-04-01 00:01:45 +00:00
|
|
|
stack, err := s.store.ReadStackByID(ctx, stackID)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
type key struct {
|
|
|
|
k Kind
|
|
|
|
pkgName string
|
|
|
|
}
|
|
|
|
newKey := func(k Kind, pkgName string) key {
|
|
|
|
return key{k: k, pkgName: pkgName}
|
|
|
|
}
|
|
|
|
|
|
|
|
existingResources := make(map[key]*StackResource)
|
|
|
|
for i := range stack.Resources {
|
|
|
|
res := stack.Resources[i]
|
2020-04-22 23:28:08 +00:00
|
|
|
existingResources[newKey(res.Kind, res.PkgName)] = &stack.Resources[i]
|
2020-04-01 00:01:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
hasChanges := false
|
2020-04-01 23:44:17 +00:00
|
|
|
{
|
|
|
|
// these are the case where a deletion happens and is rolled back creating a new resource.
|
|
|
|
// when resource is not to be removed this is a nothing burger, as it should be
|
|
|
|
// rolled back to previous state.
|
2020-04-11 04:51:13 +00:00
|
|
|
for _, b := range state.mBuckets {
|
2020-04-14 22:19:15 +00:00
|
|
|
res, ok := existingResources[newKey(KindBucket, b.parserBkt.PkgName())]
|
|
|
|
if ok && res.ID != b.ID() {
|
|
|
|
hasChanges = true
|
|
|
|
res.ID = b.existing.ID
|
2020-04-01 23:44:17 +00:00
|
|
|
}
|
|
|
|
}
|
2020-04-14 22:19:15 +00:00
|
|
|
for _, c := range state.mChecks {
|
|
|
|
res, ok := existingResources[newKey(KindCheck, c.parserCheck.PkgName())]
|
|
|
|
if ok && res.ID != c.ID() {
|
|
|
|
hasChanges = true
|
|
|
|
res.ID = c.existing.GetID()
|
2020-04-02 22:28:11 +00:00
|
|
|
}
|
|
|
|
}
|
2020-04-20 19:29:30 +00:00
|
|
|
for _, d := range state.mDashboards {
|
|
|
|
res, ok := existingResources[newKey(KindDashboard, d.parserDash.PkgName())]
|
|
|
|
if ok && res.ID != d.ID() {
|
|
|
|
hasChanges = true
|
|
|
|
res.ID = d.existing.ID
|
|
|
|
}
|
|
|
|
}
|
2020-04-15 19:46:17 +00:00
|
|
|
for _, e := range state.mEndpoints {
|
|
|
|
res, ok := existingResources[newKey(KindNotificationEndpoint, e.parserEndpoint.PkgName())]
|
|
|
|
if ok && res.ID != e.ID() {
|
|
|
|
hasChanges = true
|
|
|
|
res.ID = e.existing.GetID()
|
2020-04-06 17:25:20 +00:00
|
|
|
}
|
|
|
|
}
|
2020-04-11 04:51:13 +00:00
|
|
|
for _, l := range state.mLabels {
|
2020-04-14 22:19:15 +00:00
|
|
|
res, ok := existingResources[newKey(KindLabel, l.parserLabel.PkgName())]
|
|
|
|
if ok && res.ID != l.ID() {
|
|
|
|
hasChanges = true
|
|
|
|
res.ID = l.existing.ID
|
2020-04-01 00:01:45 +00:00
|
|
|
}
|
|
|
|
}
|
feat(pkger): add stateful management for tasks
notes on this commit. This commit was grueling ;-(. The task API is not a friendly
API to consume. There are a lot of non obvious things going on and almost every
one of them tripped me up. Things of note:
* the http.TaskService does not satisfy the influxdb.TaskService,
making it impossible to use as a dependency if tasks service gets
split out
* the APIs for create and update do not share common types. For example:
creating a task takes every field as a string, but in the update it is
taken as a options.Duration type. A step further and you'll notice that
create does not need an option to be provided, but the update does. Its
jarring trying to understand the indirection here. I struggled mightily
trying to make sense of it all with the indirection and differing types.
Made for a very difficult task (no pun intended) when it should have been
trivial. Opportunity here to fix these up and make this API more uniform
and remove unneccesary complexity like the options type.
* Nested IDs that get marshaled, are no bueno when you want to marshal a task
that does not have an ID in it, for either user/org/or self IDs. Its a challenge
just to do that.
* Lots of logs in the kv.Task portion where we hit errors and log and others where
we return. It isn't clear what is happening. The kv implementation is also very
procedural, and I found myself bouncing around like a ping pong ball trying to
make heads or tails of it.
* There is auth buried deep inside the kv.Task implementation that kept throwing me
off b/c it kept throwing errors, instead of warns. I assume, not sure if I'm
correct on this, but that the stuff being logged is determined inconsequential
to the task working. I had lots of errors from the auth buried in there, and hadn't
a clue what to make of it....
leaving these notes here as a look back at why working with tasks is so
difficult. This API can improve dramatically. I spent 5x the time trying
to figure out how to use the task API, in procedural calls, than I did
writing the business logic to consume it.... that's a scary realization ;-(
references: #17434
2020-04-21 02:59:56 +00:00
|
|
|
for _, t := range state.mTasks {
|
|
|
|
res, ok := existingResources[newKey(KindTask, t.parserTask.PkgName())]
|
|
|
|
if ok && res.ID != t.ID() {
|
|
|
|
hasChanges = true
|
|
|
|
res.ID = t.existing.ID
|
|
|
|
}
|
|
|
|
}
|
2020-04-21 22:00:29 +00:00
|
|
|
for _, t := range state.mTelegrafs {
|
|
|
|
res, ok := existingResources[newKey(KindTelegraf, t.parserTelegraf.PkgName())]
|
|
|
|
if ok && res.ID != t.ID() {
|
|
|
|
hasChanges = true
|
|
|
|
res.ID = t.existing.ID
|
|
|
|
}
|
|
|
|
}
|
2020-04-14 23:18:34 +00:00
|
|
|
for _, v := range state.mVariables {
|
|
|
|
res, ok := existingResources[newKey(KindVariable, v.parserVar.PkgName())]
|
|
|
|
if ok && res.ID != v.ID() {
|
|
|
|
hasChanges = true
|
|
|
|
res.ID = v.existing.ID
|
2020-04-03 00:44:27 +00:00
|
|
|
}
|
|
|
|
}
|
2020-04-01 00:01:45 +00:00
|
|
|
}
|
|
|
|
if !hasChanges {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
stack.UpdatedAt = time.Now()
|
|
|
|
return s.store.UpdateStack(ctx, stack)
|
|
|
|
}
|
|
|
|
|
2020-04-11 04:51:13 +00:00
|
|
|
func (s *Service) findLabel(ctx context.Context, orgID influxdb.ID, l *stateLabel) (*influxdb.Label, error) {
|
2020-04-02 22:28:11 +00:00
|
|
|
if l.ID() != 0 {
|
|
|
|
return s.labelSVC.FindLabelByID(ctx, l.ID())
|
2020-04-01 23:44:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
existingLabels, err := s.labelSVC.FindLabels(ctx, influxdb.LabelFilter{
|
2020-04-14 20:21:05 +00:00
|
|
|
Name: l.parserLabel.Name(),
|
2020-04-01 23:44:17 +00:00
|
|
|
OrgID: &orgID,
|
|
|
|
}, influxdb.FindOptions{Limit: 1})
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
if len(existingLabels) == 0 {
|
2020-04-14 20:21:05 +00:00
|
|
|
return nil, errors.New("no labels found for name: " + l.parserLabel.Name())
|
2020-04-01 23:44:17 +00:00
|
|
|
}
|
|
|
|
return existingLabels[0], nil
|
|
|
|
}
|
|
|
|
|
2020-04-03 00:44:27 +00:00
|
|
|
func (s *Service) getAllPlatformVariables(ctx context.Context, orgID influxdb.ID) ([]*influxdb.Variable, error) {
|
|
|
|
const limit = 100
|
|
|
|
|
|
|
|
var (
|
|
|
|
existingVars []*influxdb.Variable
|
|
|
|
offset int
|
|
|
|
)
|
|
|
|
for {
|
|
|
|
vars, err := s.varSVC.FindVariables(ctx, influxdb.VariableFilter{
|
|
|
|
OrganizationID: &orgID,
|
|
|
|
// TODO: would be ideal to extend find variables to allow for a name matcher
|
|
|
|
// since names are unique for vars within an org. In the meanwhile, make large
|
|
|
|
// limit returned vars, should be more than enough for the time being.
|
|
|
|
}, influxdb.FindOptions{Limit: limit, Offset: offset})
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
existingVars = append(existingVars, vars...)
|
|
|
|
|
|
|
|
if len(vars) < limit {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
offset += len(vars)
|
|
|
|
}
|
|
|
|
return existingVars, nil
|
|
|
|
}
|
|
|
|
|
2020-04-17 02:27:58 +00:00
|
|
|
func newSummaryFromStatePkg(state *stateCoordinator, pkg *Pkg) Summary {
|
2020-04-15 19:46:17 +00:00
|
|
|
stateSum := state.summary()
|
2020-04-17 02:27:58 +00:00
|
|
|
stateSum.MissingEnvs = pkg.missingEnvRefs()
|
|
|
|
stateSum.MissingSecrets = pkg.missingSecrets()
|
|
|
|
return stateSum
|
2020-04-15 19:46:17 +00:00
|
|
|
}
|
|
|
|
|
2020-03-06 23:50:04 +00:00
|
|
|
func getLabelIDMap(ctx context.Context, labelSVC influxdb.LabelService, labelNames []string) (map[influxdb.ID]bool, error) {
|
|
|
|
mLabelIDs := make(map[influxdb.ID]bool)
|
|
|
|
for _, labelName := range labelNames {
|
|
|
|
iLabels, err := labelSVC.FindLabels(ctx, influxdb.LabelFilter{
|
|
|
|
Name: labelName,
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
if len(iLabels) == 1 {
|
|
|
|
mLabelIDs[iLabels[0].ID] = true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return mLabelIDs, nil
|
|
|
|
}
|
|
|
|
|
2019-12-07 00:23:09 +00:00
|
|
|
type doMutex struct {
|
|
|
|
sync.Mutex
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *doMutex) Do(fn func()) {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
fn()
|
|
|
|
}
|
|
|
|
|
2019-10-28 22:23:40 +00:00
|
|
|
type (
|
|
|
|
applier struct {
|
|
|
|
creater creater
|
|
|
|
rollbacker rollbacker
|
|
|
|
}
|
|
|
|
|
|
|
|
rollbacker struct {
|
|
|
|
resource string
|
2019-12-27 19:22:05 +00:00
|
|
|
fn func(orgID influxdb.ID) error
|
2019-10-28 22:23:40 +00:00
|
|
|
}
|
|
|
|
|
2019-12-07 00:23:09 +00:00
|
|
|
creater struct {
|
|
|
|
entries int
|
2019-12-12 19:09:32 +00:00
|
|
|
fn func(ctx context.Context, i int, orgID, userID influxdb.ID) *applyErrBody
|
2019-12-07 00:23:09 +00:00
|
|
|
}
|
2019-10-28 22:23:40 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
type rollbackCoordinator struct {
|
|
|
|
rollbacks []rollbacker
|
2019-12-07 00:23:09 +00:00
|
|
|
|
|
|
|
sem chan struct{}
|
2019-10-28 22:23:40 +00:00
|
|
|
}
|
|
|
|
|
2019-12-12 19:09:32 +00:00
|
|
|
func (r *rollbackCoordinator) runTilEnd(ctx context.Context, orgID, userID influxdb.ID, appliers ...applier) error {
|
2019-12-07 00:23:09 +00:00
|
|
|
errStr := newErrStream(ctx)
|
|
|
|
|
|
|
|
wg := new(sync.WaitGroup)
|
|
|
|
for i := range appliers {
|
|
|
|
// cannot reuse the shared variable from for loop since we're using concurrency b/c
|
|
|
|
// that temp var gets recycled between iterations
|
|
|
|
app := appliers[i]
|
2019-10-28 22:23:40 +00:00
|
|
|
r.rollbacks = append(r.rollbacks, app.rollbacker)
|
2019-12-07 00:23:09 +00:00
|
|
|
for idx := range make([]struct{}, app.creater.entries) {
|
|
|
|
r.sem <- struct{}{}
|
|
|
|
wg.Add(1)
|
|
|
|
|
|
|
|
go func(i int, resource string) {
|
|
|
|
defer func() {
|
|
|
|
wg.Done()
|
|
|
|
<-r.sem
|
|
|
|
}()
|
|
|
|
|
|
|
|
ctx, cancel := context.WithTimeout(ctx, 30*time.Second)
|
|
|
|
defer cancel()
|
|
|
|
|
2019-12-12 19:09:32 +00:00
|
|
|
if err := app.creater.fn(ctx, i, orgID, userID); err != nil {
|
2019-12-07 00:23:09 +00:00
|
|
|
errStr.add(errMsg{resource: resource, err: *err})
|
|
|
|
}
|
|
|
|
}(idx, app.rollbacker.resource)
|
2019-10-28 22:23:40 +00:00
|
|
|
}
|
|
|
|
}
|
2019-12-07 00:23:09 +00:00
|
|
|
wg.Wait()
|
2019-10-28 22:23:40 +00:00
|
|
|
|
2019-12-07 00:23:09 +00:00
|
|
|
return errStr.close()
|
2019-10-28 22:23:40 +00:00
|
|
|
}
|
|
|
|
|
2019-12-27 19:22:05 +00:00
|
|
|
func (r *rollbackCoordinator) rollback(l *zap.Logger, err *error, orgID influxdb.ID) {
|
2019-10-28 22:23:40 +00:00
|
|
|
if *err == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, r := range r.rollbacks {
|
2019-12-27 19:22:05 +00:00
|
|
|
if err := r.fn(orgID); err != nil {
|
2019-10-28 22:23:40 +00:00
|
|
|
l.Error("failed to delete "+r.resource, zap.Error(err))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-12-07 00:23:09 +00:00
|
|
|
type errMsg struct {
|
|
|
|
resource string
|
|
|
|
err applyErrBody
|
|
|
|
}
|
|
|
|
|
|
|
|
type errStream struct {
|
|
|
|
msgStream chan errMsg
|
|
|
|
err chan error
|
|
|
|
done <-chan struct{}
|
|
|
|
}
|
|
|
|
|
|
|
|
func newErrStream(ctx context.Context) *errStream {
|
|
|
|
e := &errStream{
|
|
|
|
msgStream: make(chan errMsg),
|
|
|
|
err: make(chan error),
|
|
|
|
done: ctx.Done(),
|
|
|
|
}
|
|
|
|
e.do()
|
|
|
|
return e
|
|
|
|
}
|
|
|
|
|
|
|
|
func (e *errStream) do() {
|
|
|
|
go func() {
|
|
|
|
mErrs := func() map[string]applyErrs {
|
|
|
|
mErrs := make(map[string]applyErrs)
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-e.done:
|
|
|
|
return nil
|
|
|
|
case msg, ok := <-e.msgStream:
|
|
|
|
if !ok {
|
|
|
|
return mErrs
|
|
|
|
}
|
|
|
|
mErrs[msg.resource] = append(mErrs[msg.resource], &msg.err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
if len(mErrs) == 0 {
|
|
|
|
e.err <- nil
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
var errs []string
|
|
|
|
for resource, err := range mErrs {
|
2020-04-01 00:01:45 +00:00
|
|
|
errs = append(errs, err.toError(resource, "failed to apply resource").Error())
|
2019-12-07 00:23:09 +00:00
|
|
|
}
|
|
|
|
e.err <- errors.New(strings.Join(errs, "\n"))
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (e *errStream) close() error {
|
|
|
|
close(e.msgStream)
|
|
|
|
return <-e.err
|
|
|
|
}
|
|
|
|
|
|
|
|
func (e *errStream) add(msg errMsg) {
|
|
|
|
select {
|
|
|
|
case <-e.done:
|
|
|
|
case e.msgStream <- msg:
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-10-28 22:23:40 +00:00
|
|
|
// TODO: clean up apply errors to inform the user in an actionable way
|
|
|
|
type applyErrBody struct {
|
|
|
|
name string
|
|
|
|
msg string
|
|
|
|
}
|
|
|
|
|
2019-12-07 00:23:09 +00:00
|
|
|
type applyErrs []*applyErrBody
|
2019-10-28 22:23:40 +00:00
|
|
|
|
|
|
|
func (a applyErrs) toError(resType, msg string) error {
|
|
|
|
if len(a) == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
errMsg := fmt.Sprintf(`resource_type=%q err=%q`, resType, msg)
|
|
|
|
for _, e := range a {
|
2020-04-01 00:01:45 +00:00
|
|
|
errMsg += fmt.Sprintf("\n\tpkg_name=%q err_msg=%q", e.name, e.msg)
|
2019-10-28 22:23:40 +00:00
|
|
|
}
|
|
|
|
return errors.New(errMsg)
|
|
|
|
}
|
2019-11-21 00:38:12 +00:00
|
|
|
|
2020-03-26 20:23:14 +00:00
|
|
|
func validURLs(urls []string) error {
|
|
|
|
for _, u := range urls {
|
|
|
|
if _, err := url.Parse(u); err != nil {
|
|
|
|
msg := fmt.Sprintf("url invalid for entry %q", u)
|
|
|
|
return toInfluxError(influxdb.EInvalid, msg)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-11-21 00:38:12 +00:00
|
|
|
func labelSlcToMap(labels []*label) map[string]*label {
|
|
|
|
m := make(map[string]*label)
|
|
|
|
for i := range labels {
|
2019-12-03 02:05:10 +00:00
|
|
|
m[labels[i].Name()] = labels[i]
|
2019-11-21 00:38:12 +00:00
|
|
|
}
|
|
|
|
return m
|
|
|
|
}
|
2019-12-21 23:57:41 +00:00
|
|
|
|
|
|
|
func failedValidationErr(err error) error {
|
|
|
|
if err == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return &influxdb.Error{Code: influxdb.EUnprocessableEntity, Err: err}
|
|
|
|
}
|
|
|
|
|
|
|
|
func internalErr(err error) error {
|
|
|
|
if err == nil {
|
|
|
|
return nil
|
|
|
|
}
|
2020-03-20 23:20:53 +00:00
|
|
|
return toInfluxError(influxdb.EInternal, err.Error())
|
|
|
|
}
|
|
|
|
|
|
|
|
func toInfluxError(code string, msg string) *influxdb.Error {
|
|
|
|
return &influxdb.Error{
|
|
|
|
Code: code,
|
|
|
|
Msg: msg,
|
|
|
|
}
|
2019-12-21 23:57:41 +00:00
|
|
|
}
|