2019-10-23 17:09:04 +00:00
|
|
|
package pkger
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
2019-10-26 02:11:47 +00:00
|
|
|
"errors"
|
2019-10-23 17:09:04 +00:00
|
|
|
"fmt"
|
2019-11-08 19:33:41 +00:00
|
|
|
"math/rand"
|
2019-10-28 22:23:40 +00:00
|
|
|
"sort"
|
2019-10-23 17:09:04 +00:00
|
|
|
"strings"
|
2019-12-07 00:23:09 +00:00
|
|
|
"sync"
|
2019-10-23 17:09:04 +00:00
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/influxdata/influxdb"
|
2019-11-21 00:38:12 +00:00
|
|
|
ierrors "github.com/influxdata/influxdb/kit/errors"
|
2019-10-23 17:09:04 +00:00
|
|
|
"go.uber.org/zap"
|
|
|
|
)
|
|
|
|
|
2019-11-04 23:15:53 +00:00
|
|
|
// APIVersion marks the current APIVersion for influx packages.
|
|
|
|
const APIVersion = "0.1.0"
|
|
|
|
|
2019-11-05 01:40:42 +00:00
|
|
|
// SVC is the packages service interface.
|
|
|
|
type SVC interface {
|
|
|
|
CreatePkg(ctx context.Context, setters ...CreatePkgSetFn) (*Pkg, error)
|
2019-12-12 19:09:32 +00:00
|
|
|
DryRun(ctx context.Context, orgID, userID influxdb.ID, pkg *Pkg) (Summary, Diff, error)
|
2019-12-27 19:22:05 +00:00
|
|
|
Apply(ctx context.Context, orgID, userID influxdb.ID, pkg *Pkg, opts ...ApplyOptFn) (Summary, error)
|
2019-11-05 01:40:42 +00:00
|
|
|
}
|
|
|
|
|
2019-11-07 00:45:00 +00:00
|
|
|
type serviceOpt struct {
|
2019-12-27 19:22:05 +00:00
|
|
|
logger *zap.Logger
|
|
|
|
|
|
|
|
applyReqLimit int
|
|
|
|
|
2019-12-10 21:35:23 +00:00
|
|
|
bucketSVC influxdb.BucketService
|
2019-12-18 07:05:28 +00:00
|
|
|
checkSVC influxdb.CheckService
|
2019-12-10 21:35:23 +00:00
|
|
|
dashSVC influxdb.DashboardService
|
2019-12-18 07:05:28 +00:00
|
|
|
labelSVC influxdb.LabelService
|
2019-12-10 21:35:23 +00:00
|
|
|
endpointSVC influxdb.NotificationEndpointService
|
2019-12-20 17:10:10 +00:00
|
|
|
ruleSVC influxdb.NotificationRuleStore
|
2019-12-16 17:39:55 +00:00
|
|
|
secretSVC influxdb.SecretService
|
2019-12-23 19:51:00 +00:00
|
|
|
taskSVC influxdb.TaskService
|
2019-12-10 21:35:23 +00:00
|
|
|
teleSVC influxdb.TelegrafConfigStore
|
|
|
|
varSVC influxdb.VariableService
|
2019-11-07 00:45:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// ServiceSetterFn is a means of setting dependencies on the Service type.
|
|
|
|
type ServiceSetterFn func(opt *serviceOpt)
|
|
|
|
|
2019-12-06 00:53:00 +00:00
|
|
|
// WithLogger sets the logger for the service.
|
|
|
|
func WithLogger(log *zap.Logger) ServiceSetterFn {
|
|
|
|
return func(o *serviceOpt) {
|
|
|
|
o.logger = log
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-07 00:45:00 +00:00
|
|
|
// WithBucketSVC sets the bucket service.
|
|
|
|
func WithBucketSVC(bktSVC influxdb.BucketService) ServiceSetterFn {
|
|
|
|
return func(opt *serviceOpt) {
|
|
|
|
opt.bucketSVC = bktSVC
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-12-18 07:05:28 +00:00
|
|
|
// WithCheckSVC sets the check service.
|
|
|
|
func WithCheckSVC(checkSVC influxdb.CheckService) ServiceSetterFn {
|
|
|
|
return func(opt *serviceOpt) {
|
|
|
|
opt.checkSVC = checkSVC
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-07 00:45:00 +00:00
|
|
|
// WithDashboardSVC sets the dashboard service.
|
|
|
|
func WithDashboardSVC(dashSVC influxdb.DashboardService) ServiceSetterFn {
|
|
|
|
return func(opt *serviceOpt) {
|
|
|
|
opt.dashSVC = dashSVC
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-12-20 17:10:10 +00:00
|
|
|
// WithNotificationEndpointSVC sets the endpoint notification service.
|
|
|
|
func WithNotificationEndpointSVC(endpointSVC influxdb.NotificationEndpointService) ServiceSetterFn {
|
2019-12-10 21:35:23 +00:00
|
|
|
return func(opt *serviceOpt) {
|
|
|
|
opt.endpointSVC = endpointSVC
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-12-20 17:10:10 +00:00
|
|
|
// WithNotificationRuleSVC sets the endpoint rule service.
|
|
|
|
func WithNotificationRuleSVC(ruleSVC influxdb.NotificationRuleStore) ServiceSetterFn {
|
|
|
|
return func(opt *serviceOpt) {
|
|
|
|
opt.ruleSVC = ruleSVC
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-07 00:45:00 +00:00
|
|
|
// WithLabelSVC sets the label service.
|
|
|
|
func WithLabelSVC(labelSVC influxdb.LabelService) ServiceSetterFn {
|
|
|
|
return func(opt *serviceOpt) {
|
|
|
|
opt.labelSVC = labelSVC
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-12-16 17:39:55 +00:00
|
|
|
// WithSecretSVC sets the secret service.
|
|
|
|
func WithSecretSVC(secretSVC influxdb.SecretService) ServiceSetterFn {
|
|
|
|
return func(opt *serviceOpt) {
|
|
|
|
opt.secretSVC = secretSVC
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-12-27 19:22:05 +00:00
|
|
|
// WithTaskSVC sets the task service.
|
2019-12-23 19:51:00 +00:00
|
|
|
func WithTaskSVC(taskSVC influxdb.TaskService) ServiceSetterFn {
|
|
|
|
return func(opt *serviceOpt) {
|
|
|
|
opt.taskSVC = taskSVC
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-12-04 01:00:15 +00:00
|
|
|
// WithTelegrafSVC sets the telegraf service.
|
|
|
|
func WithTelegrafSVC(telegrafSVC influxdb.TelegrafConfigStore) ServiceSetterFn {
|
|
|
|
return func(opt *serviceOpt) {
|
|
|
|
opt.teleSVC = telegrafSVC
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-07 00:45:00 +00:00
|
|
|
// WithVariableSVC sets the variable service.
|
|
|
|
func WithVariableSVC(varSVC influxdb.VariableService) ServiceSetterFn {
|
|
|
|
return func(opt *serviceOpt) {
|
|
|
|
opt.varSVC = varSVC
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-10-28 22:23:40 +00:00
|
|
|
// Service provides the pkger business logic including all the dependencies to make
|
|
|
|
// this resource sausage.
|
2019-10-23 17:09:04 +00:00
|
|
|
type Service struct {
|
2019-12-04 23:10:23 +00:00
|
|
|
log *zap.Logger
|
2019-10-30 21:13:42 +00:00
|
|
|
|
2019-12-10 21:35:23 +00:00
|
|
|
bucketSVC influxdb.BucketService
|
2019-12-18 07:05:28 +00:00
|
|
|
checkSVC influxdb.CheckService
|
2019-12-10 21:35:23 +00:00
|
|
|
dashSVC influxdb.DashboardService
|
2019-12-18 07:05:28 +00:00
|
|
|
labelSVC influxdb.LabelService
|
2019-12-10 21:35:23 +00:00
|
|
|
endpointSVC influxdb.NotificationEndpointService
|
2019-12-20 17:10:10 +00:00
|
|
|
ruleSVC influxdb.NotificationRuleStore
|
2019-12-16 17:39:55 +00:00
|
|
|
secretSVC influxdb.SecretService
|
2019-12-23 19:51:00 +00:00
|
|
|
taskSVC influxdb.TaskService
|
2019-12-10 21:35:23 +00:00
|
|
|
teleSVC influxdb.TelegrafConfigStore
|
|
|
|
varSVC influxdb.VariableService
|
2019-12-07 00:23:09 +00:00
|
|
|
|
|
|
|
applyReqLimit int
|
2019-10-23 17:09:04 +00:00
|
|
|
}
|
|
|
|
|
2019-12-12 19:09:32 +00:00
|
|
|
var _ SVC = (*Service)(nil)
|
|
|
|
|
2019-10-28 22:23:40 +00:00
|
|
|
// NewService is a constructor for a pkger Service.
|
2019-12-06 00:53:00 +00:00
|
|
|
func NewService(opts ...ServiceSetterFn) *Service {
|
|
|
|
opt := &serviceOpt{
|
2019-12-07 00:23:09 +00:00
|
|
|
logger: zap.NewNop(),
|
|
|
|
applyReqLimit: 5,
|
2019-12-06 00:53:00 +00:00
|
|
|
}
|
2019-11-07 00:45:00 +00:00
|
|
|
for _, o := range opts {
|
|
|
|
o(opt)
|
2019-10-23 17:09:04 +00:00
|
|
|
}
|
|
|
|
|
2019-11-07 00:45:00 +00:00
|
|
|
return &Service{
|
2019-12-07 00:23:09 +00:00
|
|
|
log: opt.logger,
|
|
|
|
bucketSVC: opt.bucketSVC,
|
2019-12-18 07:05:28 +00:00
|
|
|
checkSVC: opt.checkSVC,
|
2019-12-07 00:23:09 +00:00
|
|
|
labelSVC: opt.labelSVC,
|
|
|
|
dashSVC: opt.dashSVC,
|
2019-12-10 21:35:23 +00:00
|
|
|
endpointSVC: opt.endpointSVC,
|
2019-12-20 17:10:10 +00:00
|
|
|
ruleSVC: opt.ruleSVC,
|
2019-12-16 17:39:55 +00:00
|
|
|
secretSVC: opt.secretSVC,
|
2019-12-23 19:51:00 +00:00
|
|
|
taskSVC: opt.taskSVC,
|
2019-12-07 00:23:09 +00:00
|
|
|
teleSVC: opt.teleSVC,
|
|
|
|
varSVC: opt.varSVC,
|
|
|
|
applyReqLimit: opt.applyReqLimit,
|
2019-10-23 17:09:04 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-04 23:15:53 +00:00
|
|
|
// CreatePkgSetFn is a functional input for setting the pkg fields.
|
2019-11-21 00:38:12 +00:00
|
|
|
type CreatePkgSetFn func(opt *CreateOpt) error
|
2019-11-08 19:33:41 +00:00
|
|
|
|
2019-11-21 00:38:12 +00:00
|
|
|
// CreateOpt are the options for creating a new package.
|
|
|
|
type CreateOpt struct {
|
|
|
|
Metadata Metadata
|
|
|
|
OrgIDs map[influxdb.ID]bool
|
|
|
|
Resources []ResourceToClone
|
2019-11-08 19:33:41 +00:00
|
|
|
}
|
2019-11-04 23:15:53 +00:00
|
|
|
|
2019-11-09 02:12:48 +00:00
|
|
|
// CreateWithMetadata sets the metadata on the pkg in a CreatePkg call.
|
|
|
|
func CreateWithMetadata(meta Metadata) CreatePkgSetFn {
|
2019-11-21 00:38:12 +00:00
|
|
|
return func(opt *CreateOpt) error {
|
|
|
|
opt.Metadata = meta
|
2019-11-08 19:33:41 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-09 02:12:48 +00:00
|
|
|
// CreateWithExistingResources allows the create method to clone existing resources.
|
|
|
|
func CreateWithExistingResources(resources ...ResourceToClone) CreatePkgSetFn {
|
2019-11-21 00:38:12 +00:00
|
|
|
return func(opt *CreateOpt) error {
|
2019-11-08 19:33:41 +00:00
|
|
|
for _, r := range resources {
|
|
|
|
if err := r.OK(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2019-11-21 00:38:12 +00:00
|
|
|
r.Kind = NewKind(string(r.Kind))
|
2019-11-08 19:33:41 +00:00
|
|
|
}
|
2019-11-21 00:38:12 +00:00
|
|
|
opt.Resources = append(opt.Resources, resources...)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// CreateWithAllOrgResources allows the create method to clone all existing resources
|
|
|
|
// for the given organization.
|
|
|
|
func CreateWithAllOrgResources(orgID influxdb.ID) CreatePkgSetFn {
|
|
|
|
return func(opt *CreateOpt) error {
|
|
|
|
if orgID == 0 {
|
|
|
|
return errors.New("orgID provided must not be zero")
|
|
|
|
}
|
|
|
|
if opt.OrgIDs == nil {
|
|
|
|
opt.OrgIDs = make(map[influxdb.ID]bool)
|
|
|
|
}
|
|
|
|
opt.OrgIDs[orgID] = true
|
2019-11-04 23:15:53 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// CreatePkg will produce a pkg from the parameters provided.
|
|
|
|
func (s *Service) CreatePkg(ctx context.Context, setters ...CreatePkgSetFn) (*Pkg, error) {
|
2019-11-21 00:38:12 +00:00
|
|
|
opt := new(CreateOpt)
|
2019-11-08 19:33:41 +00:00
|
|
|
for _, setter := range setters {
|
|
|
|
if err := setter(opt); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-04 23:15:53 +00:00
|
|
|
pkg := &Pkg{
|
|
|
|
APIVersion: APIVersion,
|
2019-11-21 00:38:12 +00:00
|
|
|
Kind: KindPackage,
|
|
|
|
Metadata: opt.Metadata,
|
2019-11-04 23:15:53 +00:00
|
|
|
Spec: struct {
|
|
|
|
Resources []Resource `yaml:"resources" json:"resources"`
|
|
|
|
}{
|
2019-11-21 00:38:12 +00:00
|
|
|
Resources: make([]Resource, 0, len(opt.Resources)),
|
2019-11-04 23:15:53 +00:00
|
|
|
},
|
|
|
|
}
|
2019-11-08 19:33:41 +00:00
|
|
|
if pkg.Metadata.Name == "" {
|
|
|
|
// sudo randomness, this is not an attempt at making charts unique
|
|
|
|
// that is a problem for the consumer.
|
|
|
|
pkg.Metadata.Name = fmt.Sprintf("new_%7d", rand.Int())
|
|
|
|
}
|
|
|
|
if pkg.Metadata.Version == "" {
|
|
|
|
pkg.Metadata.Version = "v1"
|
|
|
|
}
|
2019-11-04 23:15:53 +00:00
|
|
|
|
2019-11-09 02:12:48 +00:00
|
|
|
cloneAssFn := s.resourceCloneAssociationsGen()
|
2019-11-21 00:38:12 +00:00
|
|
|
for orgID := range opt.OrgIDs {
|
|
|
|
resourcesToClone, err := s.cloneOrgResources(ctx, orgID)
|
|
|
|
if err != nil {
|
2019-12-21 23:57:41 +00:00
|
|
|
return nil, internalErr(err)
|
2019-11-21 00:38:12 +00:00
|
|
|
}
|
|
|
|
opt.Resources = append(opt.Resources, resourcesToClone...)
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, r := range uniqResourcesToClone(opt.Resources) {
|
2019-11-09 02:12:48 +00:00
|
|
|
newResources, err := s.resourceCloneToResource(ctx, r, cloneAssFn)
|
2019-11-04 23:15:53 +00:00
|
|
|
if err != nil {
|
2019-12-21 23:57:41 +00:00
|
|
|
return nil, internalErr(err)
|
2019-11-04 23:15:53 +00:00
|
|
|
}
|
2019-11-09 02:12:48 +00:00
|
|
|
pkg.Spec.Resources = append(pkg.Spec.Resources, newResources...)
|
2019-11-08 19:33:41 +00:00
|
|
|
}
|
|
|
|
|
2019-11-12 20:29:50 +00:00
|
|
|
pkg.Spec.Resources = uniqResources(pkg.Spec.Resources)
|
|
|
|
|
2019-11-09 02:12:48 +00:00
|
|
|
if err := pkg.Validate(ValidWithoutResources()); err != nil {
|
2019-12-21 23:57:41 +00:00
|
|
|
return nil, failedValidationErr(err)
|
2019-11-04 23:15:53 +00:00
|
|
|
}
|
|
|
|
|
2019-11-09 02:12:48 +00:00
|
|
|
var kindPriorities = map[Kind]int{
|
2019-12-20 20:51:27 +00:00
|
|
|
KindLabel: 1,
|
|
|
|
KindBucket: 2,
|
|
|
|
KindCheckDeadman: 3,
|
|
|
|
KindCheckThreshold: 4,
|
|
|
|
KindNotificationEndpointHTTP: 5,
|
|
|
|
KindNotificationEndpointPagerDuty: 6,
|
|
|
|
KindNotificationEndpointSlack: 7,
|
|
|
|
KindNotificationRule: 8,
|
|
|
|
KindVariable: 9,
|
|
|
|
KindTelegraf: 10,
|
|
|
|
KindDashboard: 11,
|
2019-11-09 02:12:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
sort.Slice(pkg.Spec.Resources, func(i, j int) bool {
|
|
|
|
iName, jName := pkg.Spec.Resources[i].Name(), pkg.Spec.Resources[j].Name()
|
|
|
|
iKind, _ := pkg.Spec.Resources[i].kind()
|
|
|
|
jKind, _ := pkg.Spec.Resources[j].kind()
|
|
|
|
|
|
|
|
if iKind.is(jKind) {
|
|
|
|
return iName < jName
|
|
|
|
}
|
|
|
|
return kindPriorities[iKind] < kindPriorities[jKind]
|
|
|
|
})
|
|
|
|
|
2019-11-04 23:15:53 +00:00
|
|
|
return pkg, nil
|
|
|
|
}
|
|
|
|
|
2019-11-21 00:38:12 +00:00
|
|
|
func (s *Service) cloneOrgResources(ctx context.Context, orgID influxdb.ID) ([]ResourceToClone, error) {
|
|
|
|
resourceTypeGens := []struct {
|
|
|
|
resType influxdb.ResourceType
|
|
|
|
cloneFn func(context.Context, influxdb.ID) ([]ResourceToClone, error)
|
|
|
|
}{
|
|
|
|
{
|
2019-12-06 00:53:00 +00:00
|
|
|
resType: KindBucket.ResourceType(),
|
2019-11-21 00:38:12 +00:00
|
|
|
cloneFn: s.cloneOrgBuckets,
|
|
|
|
},
|
2019-12-19 01:03:19 +00:00
|
|
|
{
|
|
|
|
resType: KindCheck.ResourceType(),
|
|
|
|
cloneFn: s.cloneOrgChecks,
|
|
|
|
},
|
2019-11-21 00:38:12 +00:00
|
|
|
{
|
2019-12-06 00:53:00 +00:00
|
|
|
resType: KindDashboard.ResourceType(),
|
2019-11-21 00:38:12 +00:00
|
|
|
cloneFn: s.cloneOrgDashboards,
|
|
|
|
},
|
|
|
|
{
|
2019-12-06 00:53:00 +00:00
|
|
|
resType: KindLabel.ResourceType(),
|
2019-11-21 00:38:12 +00:00
|
|
|
cloneFn: s.cloneOrgLabels,
|
|
|
|
},
|
2019-12-16 20:10:45 +00:00
|
|
|
{
|
|
|
|
resType: KindNotificationEndpoint.ResourceType(),
|
|
|
|
cloneFn: s.cloneOrgNotificationEndpoints,
|
|
|
|
},
|
2019-12-20 20:51:27 +00:00
|
|
|
{
|
|
|
|
resType: KindNotificationRule.ResourceType(),
|
|
|
|
cloneFn: s.cloneOrgNotificationRules,
|
|
|
|
},
|
2019-12-23 22:31:56 +00:00
|
|
|
{
|
|
|
|
resType: KindTask.ResourceType(),
|
|
|
|
cloneFn: s.cloneOrgTasks,
|
|
|
|
},
|
2019-12-05 00:17:35 +00:00
|
|
|
{
|
2019-12-06 00:53:00 +00:00
|
|
|
resType: KindTelegraf.ResourceType(),
|
2019-12-16 20:10:45 +00:00
|
|
|
cloneFn: s.cloneOrgTelegrafs,
|
2019-12-05 00:17:35 +00:00
|
|
|
},
|
2019-11-21 00:38:12 +00:00
|
|
|
{
|
2019-12-06 00:53:00 +00:00
|
|
|
resType: KindVariable.ResourceType(),
|
2019-11-21 00:38:12 +00:00
|
|
|
cloneFn: s.cloneOrgVariables,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
var resources []ResourceToClone
|
|
|
|
for _, resGen := range resourceTypeGens {
|
|
|
|
existingResources, err := resGen.cloneFn(ctx, orgID)
|
|
|
|
if err != nil {
|
|
|
|
return nil, ierrors.Wrap(err, "finding "+string(resGen.resType))
|
|
|
|
}
|
|
|
|
resources = append(resources, existingResources...)
|
|
|
|
}
|
|
|
|
|
|
|
|
return resources, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *Service) cloneOrgBuckets(ctx context.Context, orgID influxdb.ID) ([]ResourceToClone, error) {
|
|
|
|
buckets, _, err := s.bucketSVC.FindBuckets(ctx, influxdb.BucketFilter{
|
|
|
|
OrganizationID: &orgID,
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
resources := make([]ResourceToClone, 0, len(buckets))
|
|
|
|
for _, b := range buckets {
|
|
|
|
if b.Type == influxdb.BucketTypeSystem {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
resources = append(resources, ResourceToClone{
|
|
|
|
Kind: KindBucket,
|
|
|
|
ID: b.ID,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
return resources, nil
|
|
|
|
}
|
|
|
|
|
2019-12-19 01:03:19 +00:00
|
|
|
func (s *Service) cloneOrgChecks(ctx context.Context, orgID influxdb.ID) ([]ResourceToClone, error) {
|
|
|
|
checks, _, err := s.checkSVC.FindChecks(ctx, influxdb.CheckFilter{
|
|
|
|
OrgID: &orgID,
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
resources := make([]ResourceToClone, 0, len(checks))
|
|
|
|
for _, c := range checks {
|
|
|
|
resources = append(resources, ResourceToClone{
|
|
|
|
Kind: KindCheck,
|
|
|
|
ID: c.GetID(),
|
|
|
|
})
|
|
|
|
}
|
|
|
|
return resources, nil
|
|
|
|
}
|
|
|
|
|
2019-11-21 00:38:12 +00:00
|
|
|
func (s *Service) cloneOrgDashboards(ctx context.Context, orgID influxdb.ID) ([]ResourceToClone, error) {
|
|
|
|
dashs, _, err := s.dashSVC.FindDashboards(ctx, influxdb.DashboardFilter{
|
|
|
|
OrganizationID: &orgID,
|
|
|
|
}, influxdb.FindOptions{Limit: 100})
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
resources := make([]ResourceToClone, 0, len(dashs))
|
|
|
|
for _, d := range dashs {
|
|
|
|
resources = append(resources, ResourceToClone{
|
|
|
|
Kind: KindDashboard,
|
|
|
|
ID: d.ID,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
return resources, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *Service) cloneOrgLabels(ctx context.Context, orgID influxdb.ID) ([]ResourceToClone, error) {
|
|
|
|
labels, err := s.labelSVC.FindLabels(ctx, influxdb.LabelFilter{
|
|
|
|
OrgID: &orgID,
|
|
|
|
}, influxdb.FindOptions{Limit: 10000})
|
|
|
|
if err != nil {
|
|
|
|
return nil, ierrors.Wrap(err, "finding labels")
|
|
|
|
}
|
|
|
|
|
|
|
|
resources := make([]ResourceToClone, 0, len(labels))
|
|
|
|
for _, l := range labels {
|
|
|
|
resources = append(resources, ResourceToClone{
|
|
|
|
Kind: KindLabel,
|
|
|
|
ID: l.ID,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
return resources, nil
|
|
|
|
}
|
|
|
|
|
2019-12-16 20:10:45 +00:00
|
|
|
func (s *Service) cloneOrgNotificationEndpoints(ctx context.Context, orgID influxdb.ID) ([]ResourceToClone, error) {
|
|
|
|
endpoints, _, err := s.endpointSVC.FindNotificationEndpoints(ctx, influxdb.NotificationEndpointFilter{
|
|
|
|
OrgID: &orgID,
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
resources := make([]ResourceToClone, 0, len(endpoints))
|
|
|
|
for _, e := range endpoints {
|
|
|
|
resources = append(resources, ResourceToClone{
|
|
|
|
Kind: KindNotificationEndpoint,
|
|
|
|
ID: e.GetID(),
|
|
|
|
})
|
|
|
|
}
|
|
|
|
return resources, nil
|
|
|
|
}
|
|
|
|
|
2019-12-20 20:51:27 +00:00
|
|
|
func (s *Service) cloneOrgNotificationRules(ctx context.Context, orgID influxdb.ID) ([]ResourceToClone, error) {
|
|
|
|
rules, _, err := s.ruleSVC.FindNotificationRules(ctx, influxdb.NotificationRuleFilter{
|
|
|
|
OrgID: &orgID,
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
resources := make([]ResourceToClone, 0, len(rules))
|
|
|
|
for _, r := range rules {
|
|
|
|
resources = append(resources, ResourceToClone{
|
|
|
|
Kind: KindNotificationRule,
|
|
|
|
ID: r.GetID(),
|
|
|
|
})
|
|
|
|
}
|
|
|
|
return resources, nil
|
|
|
|
}
|
|
|
|
|
2019-12-23 22:31:56 +00:00
|
|
|
func (s *Service) cloneOrgTasks(ctx context.Context, orgID influxdb.ID) ([]ResourceToClone, error) {
|
2019-12-27 22:00:49 +00:00
|
|
|
teles, _, err := s.taskSVC.FindTasks(ctx, influxdb.TaskFilter{OrganizationID: &orgID})
|
2019-12-27 17:21:05 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2019-12-27 22:00:49 +00:00
|
|
|
if len(teles) == 0 {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
checks, _, err := s.checkSVC.FindChecks(ctx, influxdb.CheckFilter{
|
|
|
|
OrgID: &orgID,
|
|
|
|
})
|
2019-12-23 22:31:56 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2019-12-27 17:21:05 +00:00
|
|
|
mTeles := make(map[influxdb.ID]*influxdb.Task)
|
|
|
|
for i := range teles {
|
|
|
|
t := teles[i]
|
2019-12-27 22:00:49 +00:00
|
|
|
if t.Type == influxdb.TaskSystemType {
|
|
|
|
continue
|
|
|
|
}
|
2019-12-27 17:21:05 +00:00
|
|
|
mTeles[t.ID] = t
|
|
|
|
}
|
|
|
|
for _, c := range checks {
|
|
|
|
delete(mTeles, c.GetTaskID())
|
|
|
|
}
|
|
|
|
|
|
|
|
resources := make([]ResourceToClone, 0, len(mTeles))
|
|
|
|
for _, t := range mTeles {
|
2019-12-23 22:31:56 +00:00
|
|
|
resources = append(resources, ResourceToClone{
|
|
|
|
Kind: KindTask,
|
|
|
|
ID: t.ID,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
return resources, nil
|
|
|
|
}
|
|
|
|
|
2019-12-16 20:10:45 +00:00
|
|
|
func (s *Service) cloneOrgTelegrafs(ctx context.Context, orgID influxdb.ID) ([]ResourceToClone, error) {
|
2019-12-05 00:17:35 +00:00
|
|
|
teles, _, err := s.teleSVC.FindTelegrafConfigs(ctx, influxdb.TelegrafConfigFilter{OrgID: &orgID})
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2019-12-16 20:10:45 +00:00
|
|
|
|
2019-12-05 00:17:35 +00:00
|
|
|
resources := make([]ResourceToClone, 0, len(teles))
|
|
|
|
for _, t := range teles {
|
|
|
|
resources = append(resources, ResourceToClone{
|
|
|
|
Kind: KindTelegraf,
|
|
|
|
ID: t.ID,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
return resources, nil
|
|
|
|
}
|
|
|
|
|
2019-11-21 00:38:12 +00:00
|
|
|
func (s *Service) cloneOrgVariables(ctx context.Context, orgID influxdb.ID) ([]ResourceToClone, error) {
|
|
|
|
vars, err := s.varSVC.FindVariables(ctx, influxdb.VariableFilter{
|
|
|
|
OrganizationID: &orgID,
|
|
|
|
}, influxdb.FindOptions{Limit: 10000})
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
resources := make([]ResourceToClone, 0, len(vars))
|
|
|
|
for _, v := range vars {
|
|
|
|
resources = append(resources, ResourceToClone{
|
|
|
|
Kind: KindVariable,
|
|
|
|
ID: v.ID,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
return resources, nil
|
|
|
|
}
|
|
|
|
|
2019-12-05 00:17:35 +00:00
|
|
|
func (s *Service) resourceCloneToResource(ctx context.Context, r ResourceToClone, cFn cloneAssociationsFn) (newResources []Resource, e error) {
|
|
|
|
defer func() {
|
|
|
|
if e != nil {
|
|
|
|
e = ierrors.Wrap(e, "cloning resource")
|
|
|
|
}
|
|
|
|
}()
|
2019-12-20 20:51:27 +00:00
|
|
|
|
|
|
|
var (
|
|
|
|
newResource Resource
|
|
|
|
sidecarResources []Resource
|
|
|
|
)
|
2019-11-08 19:33:41 +00:00
|
|
|
switch {
|
|
|
|
case r.Kind.is(KindBucket):
|
|
|
|
bkt, err := s.bucketSVC.FindBucketByID(ctx, r.ID)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2019-11-09 02:12:48 +00:00
|
|
|
newResource = bucketToResource(*bkt, r.Name)
|
2019-12-19 01:03:19 +00:00
|
|
|
case r.Kind.is(KindCheck),
|
|
|
|
r.Kind.is(KindCheckDeadman),
|
|
|
|
r.Kind.is(KindCheckThreshold):
|
|
|
|
ch, err := s.checkSVC.FindCheckByID(ctx, r.ID)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
newResource = checkToResource(ch, r.Name)
|
2019-11-08 19:33:41 +00:00
|
|
|
case r.Kind.is(KindDashboard):
|
2019-12-12 19:09:32 +00:00
|
|
|
dash, err := s.findDashboardByIDFull(ctx, r.ID)
|
2019-11-08 19:33:41 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2019-12-06 17:13:06 +00:00
|
|
|
newResource = dashboardToResource(*dash, r.Name)
|
2019-11-08 19:33:41 +00:00
|
|
|
case r.Kind.is(KindLabel):
|
|
|
|
l, err := s.labelSVC.FindLabelByID(ctx, r.ID)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2019-11-09 02:12:48 +00:00
|
|
|
newResource = labelToResource(*l, r.Name)
|
2019-12-16 20:10:45 +00:00
|
|
|
case r.Kind.is(KindNotificationEndpoint),
|
|
|
|
r.Kind.is(KindNotificationEndpointHTTP),
|
|
|
|
r.Kind.is(KindNotificationEndpointPagerDuty),
|
|
|
|
r.Kind.is(KindNotificationEndpointSlack):
|
|
|
|
e, err := s.endpointSVC.FindNotificationEndpointByID(ctx, r.ID)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
newResource = endpointToResource(e, r.Name)
|
2019-12-20 20:51:27 +00:00
|
|
|
case r.Kind.is(KindNotificationRule):
|
|
|
|
ruleRes, endpointRes, err := s.exportNotificationRule(ctx, r)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
newResource, sidecarResources = ruleRes, append(sidecarResources, endpointRes)
|
2019-12-23 22:31:56 +00:00
|
|
|
case r.Kind.is(KindTask):
|
|
|
|
t, err := s.taskSVC.FindTaskByID(ctx, r.ID)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
newResource = taskToResource(*t, r.Name)
|
2019-12-05 00:17:35 +00:00
|
|
|
case r.Kind.is(KindTelegraf):
|
|
|
|
t, err := s.teleSVC.FindTelegrafConfigByID(ctx, r.ID)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
newResource = telegrafToResource(*t, r.Name)
|
2019-11-08 19:33:41 +00:00
|
|
|
case r.Kind.is(KindVariable):
|
|
|
|
v, err := s.varSVC.FindVariableByID(ctx, r.ID)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2019-11-09 02:12:48 +00:00
|
|
|
newResource = variableToResource(*v, r.Name)
|
2019-11-08 19:33:41 +00:00
|
|
|
default:
|
|
|
|
return nil, errors.New("unsupported kind provided: " + string(r.Kind))
|
|
|
|
}
|
2019-11-09 02:12:48 +00:00
|
|
|
|
|
|
|
ass, err := cFn(ctx, r)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
if len(ass.associations) > 0 {
|
|
|
|
newResource[fieldAssociations] = ass.associations
|
|
|
|
}
|
|
|
|
|
2019-12-20 20:51:27 +00:00
|
|
|
return append(ass.newLableResources, append(sidecarResources, newResource)...), nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *Service) exportNotificationRule(ctx context.Context, r ResourceToClone) (Resource, Resource, error) {
|
|
|
|
rule, err := s.ruleSVC.FindNotificationRuleByID(ctx, r.ID)
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
ruleEndpoint, err := s.endpointSVC.FindNotificationEndpointByID(ctx, rule.GetEndpointID())
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return ruleToResource(rule, ruleEndpoint.GetName(), r.Name), endpointToResource(ruleEndpoint, ""), nil
|
2019-11-09 02:12:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
type (
|
|
|
|
associations struct {
|
|
|
|
associations []Resource
|
|
|
|
newLableResources []Resource
|
|
|
|
}
|
|
|
|
|
|
|
|
cloneAssociationsFn func(context.Context, ResourceToClone) (associations, error)
|
|
|
|
)
|
|
|
|
|
|
|
|
func (s *Service) resourceCloneAssociationsGen() cloneAssociationsFn {
|
|
|
|
type key struct {
|
|
|
|
id influxdb.ID
|
|
|
|
name string
|
|
|
|
}
|
|
|
|
// memoize the labels so we dont' create duplicates
|
|
|
|
m := make(map[key]bool)
|
|
|
|
return func(ctx context.Context, r ResourceToClone) (associations, error) {
|
2019-12-06 00:53:00 +00:00
|
|
|
if r.Kind.is(KindUnknown, KindLabel) {
|
2019-11-09 02:12:48 +00:00
|
|
|
return associations{}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
labels, err := s.labelSVC.FindResourceLabels(ctx, influxdb.LabelMappingFilter{
|
|
|
|
ResourceID: r.ID,
|
2019-12-06 00:53:00 +00:00
|
|
|
ResourceType: r.Kind.ResourceType(),
|
2019-11-09 02:12:48 +00:00
|
|
|
})
|
|
|
|
if err != nil {
|
2019-12-06 00:53:00 +00:00
|
|
|
return associations{}, ierrors.Wrap(err, "finding resource labels")
|
2019-11-09 02:12:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
var ass associations
|
|
|
|
for _, l := range labels {
|
|
|
|
ass.associations = append(ass.associations, Resource{
|
|
|
|
fieldKind: KindLabel.String(),
|
|
|
|
fieldName: l.Name,
|
|
|
|
})
|
|
|
|
k := key{id: l.ID, name: l.Name}
|
|
|
|
if m[k] {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
m[k] = true
|
|
|
|
ass.newLableResources = append(ass.newLableResources, labelToResource(*l, ""))
|
|
|
|
}
|
|
|
|
return ass, nil
|
|
|
|
}
|
2019-11-08 19:33:41 +00:00
|
|
|
}
|
|
|
|
|
2019-10-28 22:23:40 +00:00
|
|
|
// DryRun provides a dry run of the pkg application. The pkg will be marked verified
|
|
|
|
// for later calls to Apply. This func will be run on an Apply if it has not been run
|
|
|
|
// already.
|
2019-12-12 19:09:32 +00:00
|
|
|
func (s *Service) DryRun(ctx context.Context, orgID, userID influxdb.ID, pkg *Pkg) (Summary, Diff, error) {
|
2019-11-14 00:43:28 +00:00
|
|
|
// so here's the deal, when we have issues with the parsing validation, we
|
|
|
|
// continue to do the diff anyhow. any resource that does not have a name
|
|
|
|
// will be skipped, and won't bleed into the dry run here. We can now return
|
|
|
|
// a error (parseErr) and valid diff/summary.
|
|
|
|
var parseErr error
|
2019-11-06 18:02:45 +00:00
|
|
|
if !pkg.isParsed {
|
2019-11-14 00:43:28 +00:00
|
|
|
err := pkg.Validate()
|
|
|
|
if err != nil && !IsParseErr(err) {
|
2019-12-21 23:57:41 +00:00
|
|
|
return Summary{}, Diff{}, internalErr(err)
|
2019-11-06 18:02:45 +00:00
|
|
|
}
|
2019-11-14 00:43:28 +00:00
|
|
|
parseErr = err
|
2019-11-06 18:02:45 +00:00
|
|
|
}
|
|
|
|
|
2019-12-16 17:39:55 +00:00
|
|
|
if err := s.dryRunSecrets(ctx, orgID, pkg); err != nil {
|
|
|
|
return Summary{}, Diff{}, err
|
|
|
|
}
|
|
|
|
|
2019-12-21 23:57:41 +00:00
|
|
|
diff := Diff{
|
|
|
|
Buckets: s.dryRunBuckets(ctx, orgID, pkg),
|
|
|
|
Checks: s.dryRunChecks(ctx, orgID, pkg),
|
|
|
|
Dashboards: s.dryRunDashboards(pkg),
|
|
|
|
Labels: s.dryRunLabels(ctx, orgID, pkg),
|
2019-12-23 18:55:55 +00:00
|
|
|
Tasks: s.dryRunTasks(pkg),
|
2019-12-21 23:57:41 +00:00
|
|
|
Telegrafs: s.dryRunTelegraf(pkg),
|
|
|
|
Variables: s.dryRunVariables(ctx, orgID, pkg),
|
2019-10-28 22:23:40 +00:00
|
|
|
}
|
|
|
|
|
2019-12-10 21:35:23 +00:00
|
|
|
diffEndpoints, err := s.dryRunNotificationEndpoints(ctx, orgID, pkg)
|
|
|
|
if err != nil {
|
|
|
|
return Summary{}, Diff{}, err
|
|
|
|
}
|
2019-12-21 23:57:41 +00:00
|
|
|
diff.NotificationEndpoints = diffEndpoints
|
2019-12-10 21:35:23 +00:00
|
|
|
|
2019-12-19 22:02:34 +00:00
|
|
|
diffRules, err := s.dryRunNotificationRules(ctx, orgID, pkg)
|
|
|
|
if err != nil {
|
|
|
|
return Summary{}, Diff{}, err
|
|
|
|
}
|
2019-12-21 23:57:41 +00:00
|
|
|
diff.NotificationRules = diffRules
|
2019-11-07 00:45:00 +00:00
|
|
|
|
2019-10-28 22:23:40 +00:00
|
|
|
diffLabelMappings, err := s.dryRunLabelMappings(ctx, pkg)
|
|
|
|
if err != nil {
|
|
|
|
return Summary{}, Diff{}, err
|
|
|
|
}
|
2019-12-21 23:57:41 +00:00
|
|
|
diff.LabelMappings = diffLabelMappings
|
2019-10-28 22:23:40 +00:00
|
|
|
|
2019-10-30 17:55:13 +00:00
|
|
|
// verify the pkg is verified by a dry run. when calling Service.Apply this
|
2019-10-28 22:23:40 +00:00
|
|
|
// is required to have been run. if it is not true, then apply runs
|
2019-10-30 17:55:13 +00:00
|
|
|
// the Dry run.
|
2019-10-28 22:23:40 +00:00
|
|
|
pkg.isVerified = true
|
2019-11-14 00:43:28 +00:00
|
|
|
return pkg.Summary(), diff, parseErr
|
2019-10-28 22:23:40 +00:00
|
|
|
}
|
|
|
|
|
2019-12-21 23:57:41 +00:00
|
|
|
func (s *Service) dryRunBuckets(ctx context.Context, orgID influxdb.ID, pkg *Pkg) []DiffBucket {
|
2019-10-28 22:23:40 +00:00
|
|
|
mExistingBkts := make(map[string]DiffBucket)
|
|
|
|
bkts := pkg.buckets()
|
|
|
|
for i := range bkts {
|
|
|
|
b := bkts[i]
|
2019-12-03 02:05:10 +00:00
|
|
|
existingBkt, err := s.bucketSVC.FindBucketByName(ctx, orgID, b.Name())
|
2019-10-28 22:23:40 +00:00
|
|
|
switch err {
|
|
|
|
// TODO: case for err not found here and another case handle where
|
|
|
|
// err isn't a not found (some other error)
|
|
|
|
case nil:
|
|
|
|
b.existing = existingBkt
|
2019-12-03 02:05:10 +00:00
|
|
|
mExistingBkts[b.Name()] = newDiffBucket(b, existingBkt)
|
2019-10-28 22:23:40 +00:00
|
|
|
default:
|
2019-12-03 02:05:10 +00:00
|
|
|
mExistingBkts[b.Name()] = newDiffBucket(b, nil)
|
2019-10-23 17:09:04 +00:00
|
|
|
}
|
2019-10-28 22:23:40 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
var diffs []DiffBucket
|
|
|
|
for _, diff := range mExistingBkts {
|
|
|
|
diffs = append(diffs, diff)
|
|
|
|
}
|
|
|
|
sort.Slice(diffs, func(i, j int) bool {
|
|
|
|
return diffs[i].Name < diffs[j].Name
|
|
|
|
})
|
|
|
|
|
2019-12-21 23:57:41 +00:00
|
|
|
return diffs
|
2019-10-28 22:23:40 +00:00
|
|
|
}
|
|
|
|
|
2019-12-21 23:57:41 +00:00
|
|
|
func (s *Service) dryRunChecks(ctx context.Context, orgID influxdb.ID, pkg *Pkg) []DiffCheck {
|
2019-12-18 07:05:28 +00:00
|
|
|
mExistingChecks := make(map[string]DiffCheck)
|
|
|
|
checks := pkg.checks()
|
|
|
|
for i := range checks {
|
|
|
|
c := checks[i]
|
|
|
|
name := c.Name()
|
|
|
|
existingCheck, err := s.checkSVC.FindCheck(ctx, influxdb.CheckFilter{
|
|
|
|
Name: &name,
|
|
|
|
OrgID: &orgID,
|
|
|
|
})
|
|
|
|
switch err {
|
|
|
|
case nil:
|
|
|
|
c.existing = existingCheck
|
|
|
|
mExistingChecks[c.Name()] = newDiffCheck(c, existingCheck)
|
|
|
|
default:
|
|
|
|
mExistingChecks[c.Name()] = newDiffCheck(c, nil)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
var diffs []DiffCheck
|
|
|
|
for _, diff := range mExistingChecks {
|
|
|
|
diffs = append(diffs, diff)
|
|
|
|
}
|
|
|
|
sort.Slice(diffs, func(i, j int) bool {
|
|
|
|
return diffs[i].Name < diffs[j].Name
|
|
|
|
})
|
|
|
|
|
2019-12-21 23:57:41 +00:00
|
|
|
return diffs
|
2019-12-18 07:05:28 +00:00
|
|
|
}
|
|
|
|
|
2019-12-10 22:51:11 +00:00
|
|
|
func (s *Service) dryRunDashboards(pkg *Pkg) []DiffDashboard {
|
2019-10-30 21:13:42 +00:00
|
|
|
var diffs []DiffDashboard
|
2019-11-01 18:11:42 +00:00
|
|
|
for _, d := range pkg.dashboards() {
|
|
|
|
diffs = append(diffs, newDiffDashboard(d))
|
2019-10-30 21:13:42 +00:00
|
|
|
}
|
2019-12-04 01:00:15 +00:00
|
|
|
return diffs
|
2019-10-30 21:13:42 +00:00
|
|
|
}
|
|
|
|
|
2019-12-21 23:57:41 +00:00
|
|
|
func (s *Service) dryRunLabels(ctx context.Context, orgID influxdb.ID, pkg *Pkg) []DiffLabel {
|
2019-10-28 22:23:40 +00:00
|
|
|
mExistingLabels := make(map[string]DiffLabel)
|
|
|
|
labels := pkg.labels()
|
|
|
|
for i := range labels {
|
2019-11-07 00:45:00 +00:00
|
|
|
pkgLabel := labels[i]
|
2019-10-28 22:23:40 +00:00
|
|
|
existingLabels, err := s.labelSVC.FindLabels(ctx, influxdb.LabelFilter{
|
2019-12-03 02:05:10 +00:00
|
|
|
Name: pkgLabel.Name(),
|
2019-10-28 22:23:40 +00:00
|
|
|
OrgID: &orgID,
|
|
|
|
}, influxdb.FindOptions{Limit: 1})
|
|
|
|
switch {
|
|
|
|
// TODO: case for err not found here and another case handle where
|
|
|
|
// err isn't a not found (some other error)
|
|
|
|
case err == nil && len(existingLabels) > 0:
|
|
|
|
existingLabel := existingLabels[0]
|
2019-11-07 00:45:00 +00:00
|
|
|
pkgLabel.existing = existingLabel
|
2019-12-03 02:05:10 +00:00
|
|
|
mExistingLabels[pkgLabel.Name()] = newDiffLabel(pkgLabel, existingLabel)
|
2019-10-28 22:23:40 +00:00
|
|
|
default:
|
2019-12-03 02:05:10 +00:00
|
|
|
mExistingLabels[pkgLabel.Name()] = newDiffLabel(pkgLabel, nil)
|
2019-10-23 17:09:04 +00:00
|
|
|
}
|
2019-10-28 22:23:40 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
diffs := make([]DiffLabel, 0, len(mExistingLabels))
|
|
|
|
for _, diff := range mExistingLabels {
|
|
|
|
diffs = append(diffs, diff)
|
|
|
|
}
|
|
|
|
sort.Slice(diffs, func(i, j int) bool {
|
|
|
|
return diffs[i].Name < diffs[j].Name
|
|
|
|
})
|
|
|
|
|
2019-12-21 23:57:41 +00:00
|
|
|
return diffs
|
2019-10-28 22:23:40 +00:00
|
|
|
}
|
|
|
|
|
2019-12-10 21:35:23 +00:00
|
|
|
func (s *Service) dryRunNotificationEndpoints(ctx context.Context, orgID influxdb.ID, pkg *Pkg) ([]DiffNotificationEndpoint, error) {
|
|
|
|
existingEndpoints, _, err := s.endpointSVC.FindNotificationEndpoints(ctx, influxdb.NotificationEndpointFilter{
|
|
|
|
OrgID: &orgID,
|
|
|
|
}) // grab em all
|
|
|
|
if err != nil {
|
2019-12-21 23:57:41 +00:00
|
|
|
return nil, internalErr(err)
|
2019-12-10 21:35:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
mExisting := make(map[string]influxdb.NotificationEndpoint)
|
|
|
|
for i := range existingEndpoints {
|
|
|
|
e := existingEndpoints[i]
|
|
|
|
mExisting[e.GetName()] = e
|
|
|
|
}
|
|
|
|
|
|
|
|
mExistingToNew := make(map[string]DiffNotificationEndpoint)
|
|
|
|
endpoints := pkg.notificationEndpoints()
|
|
|
|
for i := range endpoints {
|
|
|
|
newEndpoint := endpoints[i]
|
|
|
|
|
|
|
|
var existing influxdb.NotificationEndpoint
|
|
|
|
if iExisting, ok := mExisting[newEndpoint.Name()]; ok {
|
|
|
|
newEndpoint.existing = iExisting
|
|
|
|
existing = iExisting
|
|
|
|
}
|
|
|
|
mExistingToNew[newEndpoint.Name()] = newDiffNotificationEndpoint(newEndpoint, existing)
|
|
|
|
}
|
|
|
|
|
|
|
|
var diffs []DiffNotificationEndpoint
|
|
|
|
for _, diff := range mExistingToNew {
|
|
|
|
diffs = append(diffs, diff)
|
|
|
|
}
|
|
|
|
sort.Slice(diffs, func(i, j int) bool {
|
|
|
|
return diffs[i].Name < diffs[j].Name
|
|
|
|
})
|
|
|
|
|
|
|
|
return diffs, nil
|
|
|
|
}
|
|
|
|
|
2019-12-19 22:02:34 +00:00
|
|
|
func (s *Service) dryRunNotificationRules(ctx context.Context, orgID influxdb.ID, pkg *Pkg) ([]DiffNotificationRule, error) {
|
|
|
|
iEndpoints, _, err := s.endpointSVC.FindNotificationEndpoints(ctx, influxdb.NotificationEndpointFilter{
|
|
|
|
OrgID: &orgID,
|
|
|
|
})
|
|
|
|
if err != nil {
|
2019-12-21 23:57:41 +00:00
|
|
|
return nil, internalErr(err)
|
2019-12-19 22:02:34 +00:00
|
|
|
}
|
|
|
|
mExisting := make(map[string]influxdb.NotificationEndpoint)
|
|
|
|
for _, e := range iEndpoints {
|
|
|
|
mExisting[e.GetName()] = e
|
|
|
|
}
|
|
|
|
|
|
|
|
var diffs []DiffNotificationRule
|
|
|
|
for _, r := range pkg.notificationRules() {
|
|
|
|
e, ok := mExisting[r.endpointName]
|
|
|
|
if !ok {
|
|
|
|
pkgerEndpoint, ok := pkg.mNotificationEndpoints[r.endpointName]
|
|
|
|
if !ok {
|
2019-12-21 23:57:41 +00:00
|
|
|
err := fmt.Errorf("failed to find endpoint by name: %q", r.endpointName)
|
|
|
|
return nil, &influxdb.Error{Code: influxdb.EUnprocessableEntity, Err: err}
|
2019-12-19 22:02:34 +00:00
|
|
|
}
|
|
|
|
e = pkgerEndpoint.summarize().NotificationEndpoint
|
|
|
|
}
|
|
|
|
diffs = append(diffs, newDiffNotificationRule(r, e))
|
|
|
|
|
|
|
|
}
|
|
|
|
return diffs, nil
|
|
|
|
}
|
|
|
|
|
2019-12-16 17:39:55 +00:00
|
|
|
func (s *Service) dryRunSecrets(ctx context.Context, orgID influxdb.ID, pkg *Pkg) error {
|
2019-12-27 19:22:05 +00:00
|
|
|
pkgSecrets := pkg.mSecrets
|
2019-12-21 23:57:41 +00:00
|
|
|
if len(pkgSecrets) == 0 {
|
2019-12-16 17:39:55 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
existingSecrets, err := s.secretSVC.GetSecretKeys(ctx, orgID)
|
|
|
|
if err != nil {
|
2019-12-21 23:57:41 +00:00
|
|
|
return &influxdb.Error{Code: influxdb.EInternal, Err: err}
|
2019-12-16 17:39:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
for _, secret := range existingSecrets {
|
2019-12-27 19:22:05 +00:00
|
|
|
pkgSecrets[secret] = true // marked true since it exists in the platform
|
2019-12-16 17:39:55 +00:00
|
|
|
}
|
|
|
|
|
2019-12-27 19:22:05 +00:00
|
|
|
return nil
|
2019-12-16 17:39:55 +00:00
|
|
|
}
|
|
|
|
|
2019-12-23 18:55:55 +00:00
|
|
|
func (s *Service) dryRunTasks(pkg *Pkg) []DiffTask {
|
|
|
|
var diffs []DiffTask
|
|
|
|
for _, t := range pkg.tasks() {
|
|
|
|
diffs = append(diffs, newDiffTask(t))
|
|
|
|
}
|
|
|
|
return diffs
|
|
|
|
}
|
|
|
|
|
2019-12-10 22:51:11 +00:00
|
|
|
func (s *Service) dryRunTelegraf(pkg *Pkg) []DiffTelegraf {
|
2019-12-04 01:00:15 +00:00
|
|
|
var diffs []DiffTelegraf
|
|
|
|
for _, t := range pkg.telegrafs() {
|
|
|
|
diffs = append(diffs, newDiffTelegraf(t))
|
|
|
|
}
|
|
|
|
return diffs
|
|
|
|
}
|
|
|
|
|
2019-12-21 23:57:41 +00:00
|
|
|
func (s *Service) dryRunVariables(ctx context.Context, orgID influxdb.ID, pkg *Pkg) []DiffVariable {
|
2019-11-07 00:45:00 +00:00
|
|
|
mExistingLabels := make(map[string]DiffVariable)
|
|
|
|
variables := pkg.variables()
|
|
|
|
|
|
|
|
VarLoop:
|
|
|
|
for i := range variables {
|
|
|
|
pkgVar := variables[i]
|
|
|
|
existingLabels, err := s.varSVC.FindVariables(ctx, influxdb.VariableFilter{
|
|
|
|
OrganizationID: &orgID,
|
|
|
|
// TODO: would be ideal to extend find variables to allow for a name matcher
|
|
|
|
// since names are unique for vars within an org, meanwhile, make large limit
|
|
|
|
// returned vars, should be more than enough for the time being.
|
2019-12-10 03:11:53 +00:00
|
|
|
}, influxdb.FindOptions{Limit: 100})
|
2019-11-07 00:45:00 +00:00
|
|
|
switch {
|
|
|
|
case err == nil && len(existingLabels) > 0:
|
|
|
|
for i := range existingLabels {
|
|
|
|
existingVar := existingLabels[i]
|
2019-12-03 02:05:10 +00:00
|
|
|
if existingVar.Name != pkgVar.Name() {
|
2019-11-07 00:45:00 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
pkgVar.existing = existingVar
|
2019-12-03 02:05:10 +00:00
|
|
|
mExistingLabels[pkgVar.Name()] = newDiffVariable(pkgVar, existingVar)
|
2019-11-07 00:45:00 +00:00
|
|
|
continue VarLoop
|
|
|
|
}
|
|
|
|
// fallthrough here for when the variable is not found, it'll fall to the
|
|
|
|
// default case and add it as new.
|
|
|
|
fallthrough
|
|
|
|
default:
|
2019-12-03 02:05:10 +00:00
|
|
|
mExistingLabels[pkgVar.Name()] = newDiffVariable(pkgVar, nil)
|
2019-11-07 00:45:00 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
diffs := make([]DiffVariable, 0, len(mExistingLabels))
|
|
|
|
for _, diff := range mExistingLabels {
|
|
|
|
diffs = append(diffs, diff)
|
|
|
|
}
|
|
|
|
sort.Slice(diffs, func(i, j int) bool {
|
|
|
|
return diffs[i].Name < diffs[j].Name
|
|
|
|
})
|
|
|
|
|
2019-12-21 23:57:41 +00:00
|
|
|
return diffs
|
2019-11-07 00:45:00 +00:00
|
|
|
}
|
|
|
|
|
2019-10-30 21:13:42 +00:00
|
|
|
type (
|
|
|
|
labelMappingDiffFn func(labelID influxdb.ID, labelName string, isNew bool)
|
|
|
|
|
2019-12-10 21:35:23 +00:00
|
|
|
labelMappers interface {
|
|
|
|
Association(i int) labelAssociater
|
|
|
|
Len() int
|
|
|
|
}
|
|
|
|
|
2019-10-30 21:13:42 +00:00
|
|
|
labelAssociater interface {
|
|
|
|
ID() influxdb.ID
|
2019-12-10 21:35:23 +00:00
|
|
|
Name() string
|
|
|
|
Labels() []*label
|
2019-10-30 21:13:42 +00:00
|
|
|
ResourceType() influxdb.ResourceType
|
|
|
|
Exists() bool
|
|
|
|
}
|
|
|
|
)
|
|
|
|
|
2019-10-28 22:23:40 +00:00
|
|
|
func (s *Service) dryRunLabelMappings(ctx context.Context, pkg *Pkg) ([]DiffLabelMapping, error) {
|
2019-12-10 21:35:23 +00:00
|
|
|
mappers := []labelMappers{
|
|
|
|
mapperBuckets(pkg.buckets()),
|
2019-12-18 20:23:06 +00:00
|
|
|
mapperChecks(pkg.checks()),
|
2019-12-10 21:35:23 +00:00
|
|
|
mapperDashboards(pkg.mDashboards),
|
|
|
|
mapperNotificationEndpoints(pkg.notificationEndpoints()),
|
2019-12-20 17:10:10 +00:00
|
|
|
mapperNotificationRules(pkg.mNotificationRules),
|
2019-12-23 19:51:00 +00:00
|
|
|
mapperTasks(pkg.mTasks),
|
2019-12-10 21:35:23 +00:00
|
|
|
mapperTelegrafs(pkg.mTelegrafs),
|
|
|
|
mapperVariables(pkg.variables()),
|
2019-12-04 01:00:15 +00:00
|
|
|
}
|
|
|
|
|
2019-12-10 21:35:23 +00:00
|
|
|
var diffs []DiffLabelMapping
|
|
|
|
for _, mapper := range mappers {
|
|
|
|
for i := 0; i < mapper.Len(); i++ {
|
|
|
|
la := mapper.Association(i)
|
|
|
|
err := s.dryRunResourceLabelMapping(ctx, la, func(labelID influxdb.ID, labelName string, isNew bool) {
|
|
|
|
pkg.mLabels[labelName].setMapping(la, !isNew)
|
|
|
|
diffs = append(diffs, DiffLabelMapping{
|
|
|
|
IsNew: isNew,
|
|
|
|
ResType: la.ResourceType(),
|
|
|
|
ResID: SafeID(la.ID()),
|
|
|
|
ResName: la.Name(),
|
|
|
|
LabelID: SafeID(labelID),
|
|
|
|
LabelName: labelName,
|
|
|
|
})
|
2019-11-07 00:45:00 +00:00
|
|
|
})
|
2019-12-10 21:35:23 +00:00
|
|
|
if err != nil {
|
2019-12-21 23:57:41 +00:00
|
|
|
return nil, internalErr(err)
|
2019-12-10 21:35:23 +00:00
|
|
|
}
|
2019-11-07 00:45:00 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-10-28 22:23:40 +00:00
|
|
|
// sort by res type ASC, then res name ASC, then label name ASC
|
|
|
|
sort.Slice(diffs, func(i, j int) bool {
|
|
|
|
n, m := diffs[i], diffs[j]
|
|
|
|
if n.ResType < m.ResType {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
if n.ResType > m.ResType {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
if n.ResName < m.ResName {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
if n.ResName > m.ResName {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
return n.LabelName < m.LabelName
|
|
|
|
})
|
|
|
|
|
|
|
|
return diffs, nil
|
|
|
|
}
|
|
|
|
|
2019-12-10 21:35:23 +00:00
|
|
|
func (s *Service) dryRunResourceLabelMapping(ctx context.Context, la labelAssociater, mappingFn labelMappingDiffFn) error {
|
2019-10-30 21:13:42 +00:00
|
|
|
if !la.Exists() {
|
2019-12-10 21:35:23 +00:00
|
|
|
for _, l := range la.Labels() {
|
2019-12-03 02:05:10 +00:00
|
|
|
mappingFn(l.ID(), l.Name(), true)
|
2019-10-26 02:11:47 +00:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
2019-11-07 00:45:00 +00:00
|
|
|
|
2019-10-28 22:23:40 +00:00
|
|
|
// loop through and hit api for all labels associated with a bkt
|
|
|
|
// lookup labels in pkg, add it to the label mapping, if exists in
|
|
|
|
// the results from API, mark it exists
|
2019-10-30 21:13:42 +00:00
|
|
|
existingLabels, err := s.labelSVC.FindResourceLabels(ctx, influxdb.LabelMappingFilter{
|
|
|
|
ResourceID: la.ID(),
|
|
|
|
ResourceType: la.ResourceType(),
|
2019-10-28 22:23:40 +00:00
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
// TODO: inspect err, if its a not found error, do nothing, if any other error
|
|
|
|
// handle it better
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2019-12-10 21:35:23 +00:00
|
|
|
pkgLabels := labelSlcToMap(la.Labels())
|
2019-10-30 21:13:42 +00:00
|
|
|
for _, l := range existingLabels {
|
2019-10-28 22:23:40 +00:00
|
|
|
// should ignore any labels that are not specified in pkg
|
|
|
|
mappingFn(l.ID, l.Name, false)
|
|
|
|
delete(pkgLabels, l.Name)
|
|
|
|
}
|
|
|
|
|
|
|
|
// now we add labels that were not apart of the existing labels
|
|
|
|
for _, l := range pkgLabels {
|
2019-12-03 02:05:10 +00:00
|
|
|
mappingFn(l.ID(), l.Name(), true)
|
2019-10-28 22:23:40 +00:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-12-27 19:22:05 +00:00
|
|
|
// ApplyOpt is an option for applying a package.
|
|
|
|
type ApplyOpt struct {
|
|
|
|
MissingSecrets map[string]string
|
|
|
|
}
|
|
|
|
|
|
|
|
// ApplyOptFn updates the ApplyOpt per the functional option.
|
|
|
|
type ApplyOptFn func(opt *ApplyOpt) error
|
|
|
|
|
|
|
|
// ApplyWithSecrets provides secrets to the platform that the pkg will need.
|
|
|
|
func ApplyWithSecrets(secrets map[string]string) ApplyOptFn {
|
|
|
|
return func(o *ApplyOpt) error {
|
|
|
|
o.MissingSecrets = secrets
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-10-28 22:23:40 +00:00
|
|
|
// Apply will apply all the resources identified in the provided pkg. The entire pkg will be applied
|
|
|
|
// in its entirety. If a failure happens midway then the entire pkg will be rolled back to the state
|
|
|
|
// from before the pkg were applied.
|
2019-12-27 19:22:05 +00:00
|
|
|
func (s *Service) Apply(ctx context.Context, orgID, userID influxdb.ID, pkg *Pkg, opts ...ApplyOptFn) (sum Summary, e error) {
|
2019-11-06 18:02:45 +00:00
|
|
|
if !pkg.isParsed {
|
|
|
|
if err := pkg.Validate(); err != nil {
|
2019-12-21 23:57:41 +00:00
|
|
|
return Summary{}, failedValidationErr(err)
|
2019-11-06 18:02:45 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-12-27 19:22:05 +00:00
|
|
|
var opt ApplyOpt
|
|
|
|
for _, o := range opts {
|
|
|
|
if err := o(&opt); err != nil {
|
|
|
|
return Summary{}, internalErr(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-10-28 22:23:40 +00:00
|
|
|
if !pkg.isVerified {
|
2019-12-21 23:57:41 +00:00
|
|
|
if _, _, err := s.DryRun(ctx, orgID, userID, pkg); err != nil {
|
2019-10-28 22:23:40 +00:00
|
|
|
return Summary{}, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-12-07 00:23:09 +00:00
|
|
|
coordinator := &rollbackCoordinator{sem: make(chan struct{}, s.applyReqLimit)}
|
2019-12-27 19:22:05 +00:00
|
|
|
defer coordinator.rollback(s.log, &e, orgID)
|
2019-10-26 02:11:47 +00:00
|
|
|
|
2019-12-07 00:23:09 +00:00
|
|
|
// each grouping here runs for its entirety, then returns an error that
|
|
|
|
// is indicative of running all appliers provided. For instance, the labels
|
2019-12-10 22:51:11 +00:00
|
|
|
// may have 1 variable fail and one of the buckets fails. The errors aggregate so
|
|
|
|
// the caller will be informed of both the failed label variable the failed bucket.
|
2019-12-07 00:23:09 +00:00
|
|
|
// the groupings here allow for steps to occur before exiting. The first step is
|
2019-12-10 22:51:11 +00:00
|
|
|
// adding the dependencies, resources that are associated by other resources. Then the
|
|
|
|
// primary resources. Here we get all the errors associated with them.
|
2019-12-07 00:23:09 +00:00
|
|
|
// If those are all good, then we run the secondary(dependent) resources which
|
|
|
|
// rely on the primary resources having been created.
|
2019-12-10 22:51:11 +00:00
|
|
|
appliers := [][]applier{
|
2019-12-27 19:22:05 +00:00
|
|
|
{
|
|
|
|
// adds secrets that are referenced it the pkg, this allows user to
|
|
|
|
// provide data that does not rest in the pkg.
|
|
|
|
s.applySecrets(opt.MissingSecrets),
|
|
|
|
},
|
2019-12-10 22:51:11 +00:00
|
|
|
{
|
|
|
|
// deps for primary resources
|
|
|
|
s.applyLabels(pkg.labels()),
|
|
|
|
},
|
|
|
|
{
|
2019-12-27 19:22:05 +00:00
|
|
|
// primary resources, can have relationships to labels
|
2019-12-10 22:51:11 +00:00
|
|
|
s.applyVariables(pkg.variables()),
|
|
|
|
s.applyBuckets(pkg.buckets()),
|
2019-12-18 20:23:06 +00:00
|
|
|
s.applyChecks(pkg.checks()),
|
2019-12-10 22:51:11 +00:00
|
|
|
s.applyDashboards(pkg.dashboards()),
|
|
|
|
s.applyNotificationEndpoints(pkg.notificationEndpoints()),
|
2019-12-23 19:51:00 +00:00
|
|
|
s.applyTasks(pkg.tasks()),
|
2019-12-10 22:51:11 +00:00
|
|
|
s.applyTelegrafs(pkg.telegrafs()),
|
|
|
|
},
|
2019-10-24 23:59:01 +00:00
|
|
|
}
|
|
|
|
|
2019-12-10 22:51:11 +00:00
|
|
|
for _, group := range appliers {
|
2019-12-12 19:09:32 +00:00
|
|
|
if err := coordinator.runTilEnd(ctx, orgID, userID, group...); err != nil {
|
2019-12-21 23:57:41 +00:00
|
|
|
return Summary{}, internalErr(err)
|
2019-12-10 22:51:11 +00:00
|
|
|
}
|
2019-12-07 00:23:09 +00:00
|
|
|
}
|
2019-12-10 22:51:11 +00:00
|
|
|
|
2019-12-20 20:51:27 +00:00
|
|
|
// this has to be run after the above primary resources, because it relies on
|
|
|
|
// notification endpoints already being applied.
|
2019-12-20 17:10:10 +00:00
|
|
|
app, err := s.applyNotificationRulesGenerator(ctx, orgID, pkg.notificationRules())
|
|
|
|
if err != nil {
|
|
|
|
return Summary{}, err
|
|
|
|
}
|
|
|
|
if err := coordinator.runTilEnd(ctx, orgID, userID, app); err != nil {
|
|
|
|
return Summary{}, err
|
|
|
|
}
|
|
|
|
|
2019-12-10 22:51:11 +00:00
|
|
|
// secondary resources
|
|
|
|
// this last grouping relies on the above 2 steps having completely successfully
|
|
|
|
secondary := []applier{s.applyLabelMappings(pkg.labelMappings())}
|
2019-12-12 19:09:32 +00:00
|
|
|
if err := coordinator.runTilEnd(ctx, orgID, userID, secondary...); err != nil {
|
2019-12-21 23:57:41 +00:00
|
|
|
return Summary{}, internalErr(err)
|
2019-10-23 17:09:04 +00:00
|
|
|
}
|
|
|
|
|
2019-12-27 19:22:05 +00:00
|
|
|
pkg.applySecrets(opt.MissingSecrets)
|
|
|
|
|
2019-10-23 17:09:04 +00:00
|
|
|
return pkg.Summary(), nil
|
|
|
|
}
|
|
|
|
|
2019-10-26 02:11:47 +00:00
|
|
|
func (s *Service) applyBuckets(buckets []*bucket) applier {
|
2019-10-30 21:13:42 +00:00
|
|
|
const resource = "bucket"
|
|
|
|
|
2019-12-07 00:23:09 +00:00
|
|
|
mutex := new(doMutex)
|
2019-10-28 22:23:40 +00:00
|
|
|
rollbackBuckets := make([]*bucket, 0, len(buckets))
|
2019-10-24 23:59:01 +00:00
|
|
|
|
2019-12-12 19:09:32 +00:00
|
|
|
createFn := func(ctx context.Context, i int, orgID, userID influxdb.ID) *applyErrBody {
|
2019-12-07 00:23:09 +00:00
|
|
|
var b bucket
|
|
|
|
mutex.Do(func() {
|
2019-10-28 22:23:40 +00:00
|
|
|
buckets[i].OrgID = orgID
|
2019-12-07 00:23:09 +00:00
|
|
|
b = *buckets[i]
|
|
|
|
})
|
|
|
|
if !b.shouldApply() {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
influxBucket, err := s.applyBucket(ctx, b)
|
|
|
|
if err != nil {
|
|
|
|
return &applyErrBody{
|
|
|
|
name: b.Name(),
|
|
|
|
msg: err.Error(),
|
2019-10-24 23:59:01 +00:00
|
|
|
}
|
2019-12-07 00:23:09 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
mutex.Do(func() {
|
2019-10-30 17:55:13 +00:00
|
|
|
buckets[i].id = influxBucket.ID
|
2019-10-28 22:23:40 +00:00
|
|
|
rollbackBuckets = append(rollbackBuckets, buckets[i])
|
2019-12-07 00:23:09 +00:00
|
|
|
})
|
2019-10-23 17:09:04 +00:00
|
|
|
|
2019-12-07 00:23:09 +00:00
|
|
|
return nil
|
2019-10-23 17:09:04 +00:00
|
|
|
}
|
|
|
|
|
2019-10-24 23:59:01 +00:00
|
|
|
return applier{
|
2019-12-07 00:23:09 +00:00
|
|
|
creater: creater{
|
|
|
|
entries: len(buckets),
|
|
|
|
fn: createFn,
|
|
|
|
},
|
2019-10-24 23:59:01 +00:00
|
|
|
rollbacker: rollbacker{
|
2019-10-30 21:13:42 +00:00
|
|
|
resource: resource,
|
2019-12-27 19:22:05 +00:00
|
|
|
fn: func(_ influxdb.ID) error { return s.rollbackBuckets(rollbackBuckets) },
|
2019-10-24 23:59:01 +00:00
|
|
|
},
|
|
|
|
}
|
2019-10-23 17:09:04 +00:00
|
|
|
}
|
|
|
|
|
2019-10-28 22:23:40 +00:00
|
|
|
func (s *Service) rollbackBuckets(buckets []*bucket) error {
|
2019-10-23 17:09:04 +00:00
|
|
|
var errs []string
|
|
|
|
for _, b := range buckets {
|
2019-10-28 22:23:40 +00:00
|
|
|
if b.existing == nil {
|
2019-10-30 17:55:13 +00:00
|
|
|
err := s.bucketSVC.DeleteBucket(context.Background(), b.ID())
|
2019-10-28 22:23:40 +00:00
|
|
|
if err != nil {
|
2019-10-30 17:55:13 +00:00
|
|
|
errs = append(errs, b.ID().String())
|
2019-10-28 22:23:40 +00:00
|
|
|
}
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2019-11-22 18:41:08 +00:00
|
|
|
rp := b.RetentionRules.RP()
|
2019-10-30 17:55:13 +00:00
|
|
|
_, err := s.bucketSVC.UpdateBucket(context.Background(), b.ID(), influxdb.BucketUpdate{
|
2019-10-28 22:23:40 +00:00
|
|
|
Description: &b.Description,
|
2019-11-22 18:41:08 +00:00
|
|
|
RetentionPeriod: &rp,
|
2019-10-28 22:23:40 +00:00
|
|
|
})
|
2019-10-23 17:09:04 +00:00
|
|
|
if err != nil {
|
2019-10-30 17:55:13 +00:00
|
|
|
errs = append(errs, b.ID().String())
|
2019-10-23 17:09:04 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(errs) > 0 {
|
|
|
|
// TODO: fixup error
|
|
|
|
return fmt.Errorf(`bucket_ids=[%s] err="unable to delete bucket"`, strings.Join(errs, ", "))
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
2019-10-24 23:59:01 +00:00
|
|
|
|
2019-12-07 00:23:09 +00:00
|
|
|
func (s *Service) applyBucket(ctx context.Context, b bucket) (influxdb.Bucket, error) {
|
2019-11-22 18:41:08 +00:00
|
|
|
rp := b.RetentionRules.RP()
|
2019-10-28 22:23:40 +00:00
|
|
|
if b.existing != nil {
|
2019-10-30 17:55:13 +00:00
|
|
|
influxBucket, err := s.bucketSVC.UpdateBucket(ctx, b.ID(), influxdb.BucketUpdate{
|
2019-10-28 22:23:40 +00:00
|
|
|
Description: &b.Description,
|
2019-11-22 18:41:08 +00:00
|
|
|
RetentionPeriod: &rp,
|
2019-10-28 22:23:40 +00:00
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return influxdb.Bucket{}, err
|
|
|
|
}
|
|
|
|
return *influxBucket, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
influxBucket := influxdb.Bucket{
|
|
|
|
OrgID: b.OrgID,
|
|
|
|
Description: b.Description,
|
2019-12-03 02:05:10 +00:00
|
|
|
Name: b.Name(),
|
2019-11-22 18:41:08 +00:00
|
|
|
RetentionPeriod: rp,
|
2019-10-28 22:23:40 +00:00
|
|
|
}
|
|
|
|
err := s.bucketSVC.CreateBucket(ctx, &influxBucket)
|
|
|
|
if err != nil {
|
|
|
|
return influxdb.Bucket{}, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return influxBucket, nil
|
|
|
|
}
|
|
|
|
|
2019-12-18 20:23:06 +00:00
|
|
|
func (s *Service) applyChecks(checks []*check) applier {
|
|
|
|
const resource = "check"
|
|
|
|
|
|
|
|
mutex := new(doMutex)
|
|
|
|
rollbackChecks := make([]*check, 0, len(checks))
|
|
|
|
|
|
|
|
createFn := func(ctx context.Context, i int, orgID, userID influxdb.ID) *applyErrBody {
|
|
|
|
var c check
|
|
|
|
mutex.Do(func() {
|
|
|
|
checks[i].orgID = orgID
|
|
|
|
c = *checks[i]
|
|
|
|
})
|
|
|
|
|
|
|
|
influxBucket, err := s.applyCheck(ctx, c, userID)
|
|
|
|
if err != nil {
|
|
|
|
return &applyErrBody{
|
|
|
|
name: c.Name(),
|
|
|
|
msg: err.Error(),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
mutex.Do(func() {
|
|
|
|
checks[i].id = influxBucket.GetID()
|
|
|
|
rollbackChecks = append(rollbackChecks, checks[i])
|
|
|
|
})
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
return applier{
|
|
|
|
creater: creater{
|
|
|
|
entries: len(checks),
|
|
|
|
fn: createFn,
|
|
|
|
},
|
|
|
|
rollbacker: rollbacker{
|
|
|
|
resource: resource,
|
2019-12-27 19:22:05 +00:00
|
|
|
fn: func(_ influxdb.ID) error { return s.rollbackChecks(rollbackChecks) },
|
2019-12-18 20:23:06 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *Service) rollbackChecks(checks []*check) error {
|
|
|
|
var errs []string
|
|
|
|
for _, c := range checks {
|
|
|
|
if c.existing == nil {
|
|
|
|
err := s.checkSVC.DeleteCheck(context.Background(), c.ID())
|
|
|
|
if err != nil {
|
|
|
|
errs = append(errs, c.ID().String())
|
|
|
|
}
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
_, err := s.checkSVC.UpdateCheck(context.Background(), c.ID(), influxdb.CheckCreate{
|
|
|
|
Check: c.summarize().Check,
|
|
|
|
Status: influxdb.Status(c.status),
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
errs = append(errs, c.ID().String())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(errs) > 0 {
|
|
|
|
// TODO: fixup error
|
|
|
|
return fmt.Errorf(`check_ids=[%s] err="unable to delete"`, strings.Join(errs, ", "))
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *Service) applyCheck(ctx context.Context, c check, userID influxdb.ID) (influxdb.Check, error) {
|
|
|
|
if c.existing != nil {
|
|
|
|
influxCheck, err := s.checkSVC.UpdateCheck(ctx, c.ID(), influxdb.CheckCreate{
|
|
|
|
Check: c.summarize().Check,
|
|
|
|
Status: c.Status(),
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return influxCheck, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
checkStub := influxdb.CheckCreate{
|
|
|
|
Check: c.summarize().Check,
|
|
|
|
Status: c.Status(),
|
|
|
|
}
|
|
|
|
err := s.checkSVC.CreateCheck(ctx, checkStub, userID)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return checkStub.Check, nil
|
|
|
|
}
|
|
|
|
|
2019-10-30 21:13:42 +00:00
|
|
|
func (s *Service) applyDashboards(dashboards []*dashboard) applier {
|
|
|
|
const resource = "dashboard"
|
|
|
|
|
2019-12-07 00:23:09 +00:00
|
|
|
mutex := new(doMutex)
|
2019-10-30 21:13:42 +00:00
|
|
|
rollbackDashboards := make([]*dashboard, 0, len(dashboards))
|
2019-12-07 00:23:09 +00:00
|
|
|
|
2019-12-12 19:09:32 +00:00
|
|
|
createFn := func(ctx context.Context, i int, orgID, userID influxdb.ID) *applyErrBody {
|
2019-12-07 00:23:09 +00:00
|
|
|
var d dashboard
|
|
|
|
mutex.Do(func() {
|
|
|
|
dashboards[i].OrgID = orgID
|
|
|
|
d = *dashboards[i]
|
|
|
|
})
|
|
|
|
|
|
|
|
influxBucket, err := s.applyDashboard(ctx, d)
|
|
|
|
if err != nil {
|
|
|
|
return &applyErrBody{
|
|
|
|
name: d.Name(),
|
|
|
|
msg: err.Error(),
|
2019-10-30 21:13:42 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-12-07 00:23:09 +00:00
|
|
|
mutex.Do(func() {
|
|
|
|
dashboards[i].id = influxBucket.ID
|
|
|
|
rollbackDashboards = append(rollbackDashboards, dashboards[i])
|
|
|
|
})
|
|
|
|
return nil
|
2019-10-30 21:13:42 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return applier{
|
2019-12-07 00:23:09 +00:00
|
|
|
creater: creater{
|
|
|
|
entries: len(dashboards),
|
|
|
|
fn: createFn,
|
|
|
|
},
|
2019-10-30 21:13:42 +00:00
|
|
|
rollbacker: rollbacker{
|
|
|
|
resource: resource,
|
2019-12-27 19:22:05 +00:00
|
|
|
fn: func(_ influxdb.ID) error {
|
2019-12-06 00:53:00 +00:00
|
|
|
return s.deleteByIDs("dashboard", len(rollbackDashboards), s.dashSVC.DeleteDashboard, func(i int) influxdb.ID {
|
|
|
|
return rollbackDashboards[i].ID()
|
|
|
|
})
|
|
|
|
},
|
2019-10-30 21:13:42 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-12-07 00:23:09 +00:00
|
|
|
func (s *Service) applyDashboard(ctx context.Context, d dashboard) (influxdb.Dashboard, error) {
|
2019-12-06 17:13:06 +00:00
|
|
|
cells := convertChartsToCells(d.Charts)
|
2019-10-30 21:13:42 +00:00
|
|
|
influxDashboard := influxdb.Dashboard{
|
|
|
|
OrganizationID: d.OrgID,
|
|
|
|
Description: d.Description,
|
2019-12-03 02:05:10 +00:00
|
|
|
Name: d.Name(),
|
2019-11-01 18:11:42 +00:00
|
|
|
Cells: cells,
|
2019-10-30 21:13:42 +00:00
|
|
|
}
|
|
|
|
err := s.dashSVC.CreateDashboard(ctx, &influxDashboard)
|
|
|
|
if err != nil {
|
|
|
|
return influxdb.Dashboard{}, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return influxDashboard, nil
|
|
|
|
}
|
|
|
|
|
2019-12-06 17:13:06 +00:00
|
|
|
func convertChartsToCells(ch []chart) []*influxdb.Cell {
|
2019-11-01 18:11:42 +00:00
|
|
|
icells := make([]*influxdb.Cell, 0, len(ch))
|
2019-12-06 17:13:06 +00:00
|
|
|
for _, c := range ch {
|
2019-11-01 18:11:42 +00:00
|
|
|
icell := &influxdb.Cell{
|
|
|
|
CellProperty: influxdb.CellProperty{
|
2019-11-08 19:33:41 +00:00
|
|
|
X: int32(c.XPos),
|
|
|
|
Y: int32(c.YPos),
|
2019-11-01 18:11:42 +00:00
|
|
|
H: int32(c.Height),
|
|
|
|
W: int32(c.Width),
|
|
|
|
},
|
2019-12-06 17:13:06 +00:00
|
|
|
View: &influxdb.View{
|
|
|
|
ViewContents: influxdb.ViewContents{Name: c.Name},
|
|
|
|
Properties: c.properties(),
|
|
|
|
},
|
2019-11-01 18:11:42 +00:00
|
|
|
}
|
|
|
|
icells = append(icells, icell)
|
|
|
|
}
|
2019-12-06 17:13:06 +00:00
|
|
|
return icells
|
2019-11-01 18:11:42 +00:00
|
|
|
}
|
|
|
|
|
2019-10-24 23:59:01 +00:00
|
|
|
func (s *Service) applyLabels(labels []*label) applier {
|
2019-10-30 21:13:42 +00:00
|
|
|
const resource = "label"
|
|
|
|
|
2019-12-07 00:23:09 +00:00
|
|
|
mutex := new(doMutex)
|
2019-10-28 22:23:40 +00:00
|
|
|
rollBackLabels := make([]*label, 0, len(labels))
|
2019-10-24 23:59:01 +00:00
|
|
|
|
2019-12-12 19:09:32 +00:00
|
|
|
createFn := func(ctx context.Context, i int, orgID, userID influxdb.ID) *applyErrBody {
|
2019-12-07 00:23:09 +00:00
|
|
|
var l label
|
|
|
|
mutex.Do(func() {
|
2019-10-28 22:23:40 +00:00
|
|
|
labels[i].OrgID = orgID
|
2019-12-07 00:23:09 +00:00
|
|
|
l = *labels[i]
|
|
|
|
})
|
|
|
|
if !l.shouldApply() {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
influxLabel, err := s.applyLabel(ctx, l)
|
|
|
|
if err != nil {
|
|
|
|
return &applyErrBody{
|
|
|
|
name: l.Name(),
|
|
|
|
msg: err.Error(),
|
2019-10-24 23:59:01 +00:00
|
|
|
}
|
2019-12-07 00:23:09 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
mutex.Do(func() {
|
2019-10-30 17:55:13 +00:00
|
|
|
labels[i].id = influxLabel.ID
|
2019-10-28 22:23:40 +00:00
|
|
|
rollBackLabels = append(rollBackLabels, labels[i])
|
2019-12-07 00:23:09 +00:00
|
|
|
})
|
2019-10-24 23:59:01 +00:00
|
|
|
|
2019-12-07 00:23:09 +00:00
|
|
|
return nil
|
2019-10-24 23:59:01 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return applier{
|
2019-12-07 00:23:09 +00:00
|
|
|
creater: creater{
|
|
|
|
entries: len(labels),
|
|
|
|
fn: createFn,
|
|
|
|
},
|
2019-10-24 23:59:01 +00:00
|
|
|
rollbacker: rollbacker{
|
2019-10-30 21:13:42 +00:00
|
|
|
resource: resource,
|
2019-12-27 19:22:05 +00:00
|
|
|
fn: func(_ influxdb.ID) error { return s.rollbackLabels(rollBackLabels) },
|
2019-10-24 23:59:01 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-10-28 22:23:40 +00:00
|
|
|
func (s *Service) rollbackLabels(labels []*label) error {
|
2019-10-24 23:59:01 +00:00
|
|
|
var errs []string
|
|
|
|
for _, l := range labels {
|
2019-11-07 00:45:00 +00:00
|
|
|
if l.existing == nil {
|
|
|
|
err := s.labelSVC.DeleteLabel(context.Background(), l.ID())
|
|
|
|
if err != nil {
|
|
|
|
errs = append(errs, l.ID().String())
|
|
|
|
}
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
_, err := s.labelSVC.UpdateLabel(context.Background(), l.ID(), influxdb.LabelUpdate{
|
|
|
|
Properties: l.existing.Properties,
|
|
|
|
})
|
2019-10-24 23:59:01 +00:00
|
|
|
if err != nil {
|
2019-10-30 17:55:13 +00:00
|
|
|
errs = append(errs, l.ID().String())
|
2019-10-24 23:59:01 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(errs) > 0 {
|
|
|
|
return fmt.Errorf(`label_ids=[%s] err="unable to delete label"`, strings.Join(errs, ", "))
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
2019-10-26 02:11:47 +00:00
|
|
|
|
2019-12-07 00:23:09 +00:00
|
|
|
func (s *Service) applyLabel(ctx context.Context, l label) (influxdb.Label, error) {
|
2019-10-28 22:23:40 +00:00
|
|
|
if l.existing != nil {
|
2019-10-30 17:55:13 +00:00
|
|
|
updatedlabel, err := s.labelSVC.UpdateLabel(ctx, l.ID(), influxdb.LabelUpdate{
|
2019-10-28 22:23:40 +00:00
|
|
|
Properties: l.properties(),
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return influxdb.Label{}, err
|
|
|
|
}
|
|
|
|
return *updatedlabel, nil
|
|
|
|
}
|
|
|
|
|
2019-12-10 22:51:11 +00:00
|
|
|
influxLabel := l.toInfluxLabel()
|
2019-10-28 22:23:40 +00:00
|
|
|
err := s.labelSVC.CreateLabel(ctx, &influxLabel)
|
|
|
|
if err != nil {
|
|
|
|
return influxdb.Label{}, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return influxLabel, nil
|
|
|
|
}
|
|
|
|
|
2019-12-10 22:51:11 +00:00
|
|
|
func (s *Service) applyNotificationEndpoints(endpoints []*notificationEndpoint) applier {
|
|
|
|
const resource = "notification_endpoints"
|
|
|
|
|
|
|
|
mutex := new(doMutex)
|
|
|
|
rollbackEndpoints := make([]*notificationEndpoint, 0, len(endpoints))
|
|
|
|
|
2019-12-12 19:09:32 +00:00
|
|
|
createFn := func(ctx context.Context, i int, orgID, userID influxdb.ID) *applyErrBody {
|
2019-12-10 22:51:11 +00:00
|
|
|
var endpoint notificationEndpoint
|
|
|
|
mutex.Do(func() {
|
|
|
|
endpoints[i].OrgID = orgID
|
|
|
|
endpoint = *endpoints[i]
|
|
|
|
})
|
|
|
|
|
2019-12-12 19:09:32 +00:00
|
|
|
influxEndpoint, err := s.applyNotificationEndpoint(ctx, endpoint, userID)
|
2019-12-10 22:51:11 +00:00
|
|
|
if err != nil {
|
|
|
|
return &applyErrBody{
|
|
|
|
name: endpoint.Name(),
|
|
|
|
msg: err.Error(),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
mutex.Do(func() {
|
|
|
|
endpoints[i].id = influxEndpoint.GetID()
|
2019-12-16 17:39:55 +00:00
|
|
|
for _, secret := range influxEndpoint.SecretFields() {
|
|
|
|
switch {
|
|
|
|
case strings.HasSuffix(secret.Key, "-routing-key"):
|
|
|
|
endpoints[i].routingKey.Secret = secret.Key
|
|
|
|
case strings.HasSuffix(secret.Key, "-token"):
|
|
|
|
endpoints[i].token.Secret = secret.Key
|
|
|
|
case strings.HasSuffix(secret.Key, "-username"):
|
|
|
|
endpoints[i].username.Secret = secret.Key
|
|
|
|
case strings.HasSuffix(secret.Key, "-password"):
|
|
|
|
endpoints[i].password.Secret = secret.Key
|
|
|
|
}
|
|
|
|
}
|
2019-12-10 22:51:11 +00:00
|
|
|
rollbackEndpoints = append(rollbackEndpoints, endpoints[i])
|
|
|
|
})
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
return applier{
|
|
|
|
creater: creater{
|
|
|
|
entries: len(endpoints),
|
|
|
|
fn: createFn,
|
|
|
|
},
|
|
|
|
rollbacker: rollbacker{
|
|
|
|
resource: resource,
|
2019-12-27 19:22:05 +00:00
|
|
|
fn: func(_ influxdb.ID) error {
|
2019-12-10 22:51:11 +00:00
|
|
|
return s.rollbackNotificationEndpoints(rollbackEndpoints)
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-12-12 19:09:32 +00:00
|
|
|
func (s *Service) applyNotificationEndpoint(ctx context.Context, e notificationEndpoint, userID influxdb.ID) (influxdb.NotificationEndpoint, error) {
|
2019-12-10 22:51:11 +00:00
|
|
|
if e.existing != nil {
|
|
|
|
// stub out userID since we're always using hte http client which will fill it in for us with the token
|
|
|
|
// feels a bit broken that is required.
|
|
|
|
// TODO: look into this userID requirement
|
2019-12-16 17:39:55 +00:00
|
|
|
updatedEndpoint, err := s.endpointSVC.UpdateNotificationEndpoint(ctx, e.ID(), e.existing, userID)
|
2019-12-10 22:51:11 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return updatedEndpoint, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
actual := e.summarize().NotificationEndpoint
|
2019-12-12 19:09:32 +00:00
|
|
|
err := s.endpointSVC.CreateNotificationEndpoint(ctx, actual, userID)
|
2019-12-10 22:51:11 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return actual, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *Service) rollbackNotificationEndpoints(endpoints []*notificationEndpoint) error {
|
|
|
|
var errs []string
|
|
|
|
for _, e := range endpoints {
|
|
|
|
if e.existing == nil {
|
|
|
|
_, _, err := s.endpointSVC.DeleteNotificationEndpoint(context.Background(), e.ID())
|
|
|
|
if err != nil {
|
|
|
|
errs = append(errs, e.ID().String())
|
|
|
|
}
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
_, err := s.endpointSVC.UpdateNotificationEndpoint(context.Background(), e.ID(), e.existing, 0)
|
|
|
|
if err != nil {
|
|
|
|
errs = append(errs, e.ID().String())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(errs) > 0 {
|
|
|
|
return fmt.Errorf(`notication_endpoint_ids=[%s] err="unable to delete"`, strings.Join(errs, ", "))
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-12-20 17:10:10 +00:00
|
|
|
func (s *Service) applyNotificationRulesGenerator(ctx context.Context, orgID influxdb.ID, rules []*notificationRule) (applier, error) {
|
|
|
|
endpoints, _, err := s.endpointSVC.FindNotificationEndpoints(ctx, influxdb.NotificationEndpointFilter{
|
|
|
|
OrgID: &orgID,
|
|
|
|
})
|
|
|
|
if err != nil {
|
2019-12-21 23:57:41 +00:00
|
|
|
return applier{}, internalErr(err)
|
2019-12-20 17:10:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
type mVal struct {
|
|
|
|
id influxdb.ID
|
|
|
|
eType string
|
|
|
|
}
|
|
|
|
mEndpoints := make(map[string]mVal)
|
|
|
|
for _, e := range endpoints {
|
|
|
|
mEndpoints[e.GetName()] = mVal{
|
|
|
|
id: e.GetID(),
|
|
|
|
eType: e.Type(),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
var errs applyErrs
|
|
|
|
for _, r := range rules {
|
|
|
|
v, ok := mEndpoints[r.endpointName]
|
|
|
|
if !ok {
|
|
|
|
errs = append(errs, &applyErrBody{
|
|
|
|
name: r.Name(),
|
|
|
|
msg: fmt.Sprintf("endpoint dependency does not exist; endpointName=%q", r.endpointName),
|
|
|
|
})
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
r.endpointID = v.id
|
|
|
|
r.endpointType = v.eType
|
|
|
|
}
|
|
|
|
|
|
|
|
err = errs.toError("notification_rules", "failed to find dependency")
|
|
|
|
if err != nil {
|
|
|
|
return applier{}, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return s.applyNotificationRules(rules), nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *Service) applyNotificationRules(rules []*notificationRule) applier {
|
|
|
|
const resource = "notification_rules"
|
|
|
|
|
|
|
|
mutex := new(doMutex)
|
|
|
|
rollbackEndpoints := make([]*notificationRule, 0, len(rules))
|
|
|
|
|
|
|
|
createFn := func(ctx context.Context, i int, orgID, userID influxdb.ID) *applyErrBody {
|
|
|
|
var rule notificationRule
|
|
|
|
mutex.Do(func() {
|
|
|
|
rules[i].orgID = orgID
|
|
|
|
rule = *rules[i]
|
|
|
|
})
|
|
|
|
|
|
|
|
influxRule, err := s.applyNotificationRule(ctx, rule, userID)
|
|
|
|
if err != nil {
|
|
|
|
return &applyErrBody{
|
|
|
|
name: rule.Name(),
|
|
|
|
msg: err.Error(),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
mutex.Do(func() {
|
|
|
|
rules[i].id = influxRule.GetID()
|
|
|
|
rollbackEndpoints = append(rollbackEndpoints, rules[i])
|
|
|
|
})
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
return applier{
|
|
|
|
creater: creater{
|
|
|
|
entries: len(rules),
|
|
|
|
fn: createFn,
|
|
|
|
},
|
|
|
|
rollbacker: rollbacker{
|
|
|
|
resource: resource,
|
2019-12-27 19:22:05 +00:00
|
|
|
fn: func(_ influxdb.ID) error {
|
2019-12-20 17:10:10 +00:00
|
|
|
return s.rollbackNotificationRules(rollbackEndpoints)
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *Service) applyNotificationRule(ctx context.Context, e notificationRule, userID influxdb.ID) (influxdb.NotificationRule, error) {
|
|
|
|
actual := influxdb.NotificationRuleCreate{
|
|
|
|
NotificationRule: e.toInfluxRule(),
|
|
|
|
Status: e.Status(),
|
|
|
|
}
|
|
|
|
err := s.ruleSVC.CreateNotificationRule(ctx, actual, userID)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return actual, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *Service) rollbackNotificationRules(rules []*notificationRule) error {
|
|
|
|
var errs []string
|
|
|
|
for _, e := range rules {
|
|
|
|
err := s.ruleSVC.DeleteNotificationRule(context.Background(), e.ID())
|
|
|
|
if err != nil {
|
|
|
|
errs = append(errs, e.ID().String())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(errs) > 0 {
|
|
|
|
return fmt.Errorf(`notication_rule_ids=[%s] err="unable to delete"`, strings.Join(errs, ", "))
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-12-27 19:22:05 +00:00
|
|
|
func (s *Service) applySecrets(secrets map[string]string) applier {
|
|
|
|
const resource = "secrets"
|
|
|
|
|
|
|
|
if len(secrets) == 0 {
|
|
|
|
return applier{
|
|
|
|
rollbacker: rollbacker{fn: func(orgID influxdb.ID) error { return nil }},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
mutex := new(doMutex)
|
|
|
|
rollbackSecrets := make([]string, 0)
|
|
|
|
|
|
|
|
createFn := func(ctx context.Context, i int, orgID, userID influxdb.ID) *applyErrBody {
|
|
|
|
err := s.secretSVC.PutSecrets(ctx, orgID, secrets)
|
|
|
|
if err != nil {
|
|
|
|
return &applyErrBody{name: "secrets", msg: err.Error()}
|
|
|
|
}
|
|
|
|
|
|
|
|
mutex.Do(func() {
|
|
|
|
for key := range secrets {
|
|
|
|
rollbackSecrets = append(rollbackSecrets, key)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
return applier{
|
|
|
|
creater: creater{
|
|
|
|
entries: 1,
|
|
|
|
fn: createFn,
|
|
|
|
},
|
|
|
|
rollbacker: rollbacker{
|
|
|
|
resource: resource,
|
|
|
|
fn: func(orgID influxdb.ID) error {
|
|
|
|
return s.secretSVC.DeleteSecret(context.Background(), orgID)
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-12-23 19:51:00 +00:00
|
|
|
func (s *Service) applyTasks(tasks []*task) applier {
|
|
|
|
const resource = "tasks"
|
|
|
|
|
|
|
|
mutex := new(doMutex)
|
|
|
|
rollbackTasks := make([]task, 0, len(tasks))
|
|
|
|
|
|
|
|
createFn := func(ctx context.Context, i int, orgID, userID influxdb.ID) *applyErrBody {
|
|
|
|
var t task
|
|
|
|
mutex.Do(func() {
|
|
|
|
tasks[i].orgID = orgID
|
|
|
|
t = *tasks[i]
|
|
|
|
})
|
|
|
|
|
|
|
|
newTask, err := s.taskSVC.CreateTask(ctx, influxdb.TaskCreate{
|
|
|
|
Type: influxdb.TaskSystemType,
|
|
|
|
Flux: t.flux(),
|
|
|
|
OwnerID: userID,
|
|
|
|
Description: t.description,
|
|
|
|
Status: string(t.Status()),
|
|
|
|
OrganizationID: t.orgID,
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return &applyErrBody{name: t.Name(), msg: err.Error()}
|
|
|
|
}
|
|
|
|
|
|
|
|
mutex.Do(func() {
|
|
|
|
tasks[i].id = newTask.ID
|
|
|
|
rollbackTasks = append(rollbackTasks, *tasks[i])
|
|
|
|
})
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
return applier{
|
|
|
|
creater: creater{
|
|
|
|
entries: len(tasks),
|
|
|
|
fn: createFn,
|
|
|
|
},
|
|
|
|
rollbacker: rollbacker{
|
|
|
|
resource: resource,
|
2019-12-27 19:22:05 +00:00
|
|
|
fn: func(_ influxdb.ID) error {
|
2019-12-23 19:51:00 +00:00
|
|
|
return s.deleteByIDs("task", len(rollbackTasks), s.taskSVC.DeleteTask, func(i int) influxdb.ID {
|
|
|
|
return rollbackTasks[i].ID()
|
|
|
|
})
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-12-04 01:00:15 +00:00
|
|
|
func (s *Service) applyTelegrafs(teles []*telegraf) applier {
|
|
|
|
const resource = "telegrafs"
|
|
|
|
|
2019-12-07 00:23:09 +00:00
|
|
|
mutex := new(doMutex)
|
2019-12-04 01:00:15 +00:00
|
|
|
rollbackTelegrafs := make([]*telegraf, 0, len(teles))
|
2019-12-07 00:23:09 +00:00
|
|
|
|
2019-12-12 19:09:32 +00:00
|
|
|
createFn := func(ctx context.Context, i int, orgID, userID influxdb.ID) *applyErrBody {
|
2019-12-07 00:23:09 +00:00
|
|
|
var cfg influxdb.TelegrafConfig
|
|
|
|
mutex.Do(func() {
|
|
|
|
teles[i].config.OrgID = orgID
|
|
|
|
cfg = teles[i].config
|
|
|
|
})
|
|
|
|
|
2019-12-12 19:09:32 +00:00
|
|
|
err := s.teleSVC.CreateTelegrafConfig(ctx, &cfg, userID)
|
2019-12-07 00:23:09 +00:00
|
|
|
if err != nil {
|
|
|
|
return &applyErrBody{
|
|
|
|
name: cfg.Name,
|
|
|
|
msg: err.Error(),
|
2019-12-04 01:00:15 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-12-07 00:23:09 +00:00
|
|
|
mutex.Do(func() {
|
|
|
|
teles[i].config = cfg
|
|
|
|
rollbackTelegrafs = append(rollbackTelegrafs, teles[i])
|
|
|
|
})
|
|
|
|
|
|
|
|
return nil
|
2019-12-04 01:00:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return applier{
|
2019-12-07 00:23:09 +00:00
|
|
|
creater: creater{
|
|
|
|
entries: len(teles),
|
|
|
|
fn: createFn,
|
|
|
|
},
|
2019-12-04 01:00:15 +00:00
|
|
|
rollbacker: rollbacker{
|
|
|
|
resource: resource,
|
2019-12-27 19:22:05 +00:00
|
|
|
fn: func(_ influxdb.ID) error {
|
2019-12-06 00:53:00 +00:00
|
|
|
return s.deleteByIDs("telegraf", len(rollbackTelegrafs), s.teleSVC.DeleteTelegrafConfig, func(i int) influxdb.ID {
|
|
|
|
return rollbackTelegrafs[i].ID()
|
|
|
|
})
|
|
|
|
},
|
2019-12-04 01:00:15 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-07 00:45:00 +00:00
|
|
|
func (s *Service) applyVariables(vars []*variable) applier {
|
|
|
|
const resource = "variable"
|
|
|
|
|
2019-12-07 00:23:09 +00:00
|
|
|
mutex := new(doMutex)
|
2019-11-07 00:45:00 +00:00
|
|
|
rollBackVars := make([]*variable, 0, len(vars))
|
|
|
|
|
2019-12-12 19:09:32 +00:00
|
|
|
createFn := func(ctx context.Context, i int, orgID, userID influxdb.ID) *applyErrBody {
|
2019-12-07 00:23:09 +00:00
|
|
|
var v variable
|
|
|
|
mutex.Do(func() {
|
2019-11-07 00:45:00 +00:00
|
|
|
vars[i].OrgID = orgID
|
2019-12-07 00:23:09 +00:00
|
|
|
v = *vars[i]
|
|
|
|
})
|
|
|
|
if !v.shouldApply() {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
influxVar, err := s.applyVariable(ctx, v)
|
|
|
|
if err != nil {
|
|
|
|
return &applyErrBody{
|
|
|
|
name: v.Name(),
|
|
|
|
msg: err.Error(),
|
2019-11-07 00:45:00 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-12-07 00:23:09 +00:00
|
|
|
mutex.Do(func() {
|
|
|
|
vars[i].id = influxVar.ID
|
|
|
|
rollBackVars = append(rollBackVars, vars[i])
|
|
|
|
})
|
|
|
|
return nil
|
2019-11-07 00:45:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return applier{
|
2019-12-07 00:23:09 +00:00
|
|
|
creater: creater{
|
|
|
|
entries: len(vars),
|
|
|
|
fn: createFn,
|
|
|
|
},
|
2019-11-07 00:45:00 +00:00
|
|
|
rollbacker: rollbacker{
|
|
|
|
resource: resource,
|
2019-12-27 19:22:05 +00:00
|
|
|
fn: func(_ influxdb.ID) error { return s.rollbackVariables(rollBackVars) },
|
2019-11-07 00:45:00 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *Service) rollbackVariables(variables []*variable) error {
|
|
|
|
var errs []string
|
|
|
|
for _, v := range variables {
|
|
|
|
if v.existing == nil {
|
|
|
|
err := s.varSVC.DeleteVariable(context.Background(), v.ID())
|
|
|
|
if err != nil {
|
|
|
|
errs = append(errs, v.ID().String())
|
|
|
|
}
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
_, err := s.varSVC.UpdateVariable(context.Background(), v.ID(), &influxdb.VariableUpdate{
|
|
|
|
Description: v.existing.Description,
|
|
|
|
Arguments: v.existing.Arguments,
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
errs = append(errs, v.ID().String())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(errs) > 0 {
|
|
|
|
return fmt.Errorf(`variable_ids=[%s] err="unable to delete variable"`, strings.Join(errs, ", "))
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-12-07 00:23:09 +00:00
|
|
|
func (s *Service) applyVariable(ctx context.Context, v variable) (influxdb.Variable, error) {
|
2019-11-07 00:45:00 +00:00
|
|
|
if v.existing != nil {
|
|
|
|
updatedVar, err := s.varSVC.UpdateVariable(ctx, v.ID(), &influxdb.VariableUpdate{
|
|
|
|
Description: v.Description,
|
|
|
|
Arguments: v.influxVarArgs(),
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return influxdb.Variable{}, err
|
|
|
|
}
|
|
|
|
return *updatedVar, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
influxVar := influxdb.Variable{
|
|
|
|
OrganizationID: v.OrgID,
|
2019-12-03 02:05:10 +00:00
|
|
|
Name: v.Name(),
|
2019-11-07 00:45:00 +00:00
|
|
|
Description: v.Description,
|
|
|
|
Arguments: v.influxVarArgs(),
|
|
|
|
}
|
|
|
|
err := s.varSVC.CreateVariable(ctx, &influxVar)
|
|
|
|
if err != nil {
|
|
|
|
return influxdb.Variable{}, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return influxVar, nil
|
|
|
|
}
|
|
|
|
|
2019-12-07 00:23:09 +00:00
|
|
|
func (s *Service) applyLabelMappings(labelMappings []SummaryLabelMapping) applier {
|
|
|
|
const resource = "label_mapping"
|
|
|
|
|
|
|
|
mutex := new(doMutex)
|
|
|
|
rollbackMappings := make([]influxdb.LabelMapping, 0, len(labelMappings))
|
|
|
|
|
2019-12-12 19:09:32 +00:00
|
|
|
createFn := func(ctx context.Context, i int, orgID, userID influxdb.ID) *applyErrBody {
|
2019-12-07 00:23:09 +00:00
|
|
|
var mapping SummaryLabelMapping
|
|
|
|
mutex.Do(func() {
|
|
|
|
mapping = labelMappings[i]
|
|
|
|
})
|
2019-12-18 07:05:28 +00:00
|
|
|
if mapping.exists || mapping.LabelID == 0 || mapping.ResourceID == 0 {
|
2019-12-07 00:23:09 +00:00
|
|
|
// this block here does 2 things, it does not write a
|
|
|
|
// mapping when one exists. it also avoids having to worry
|
|
|
|
// about deleting an existing mapping since it will not be
|
|
|
|
// passed to the delete function below b/c it is never added
|
|
|
|
// to the list of mappings that is referenced in the delete
|
|
|
|
// call.
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-12-12 19:09:32 +00:00
|
|
|
m := influxdb.LabelMapping{
|
|
|
|
LabelID: influxdb.ID(mapping.LabelID),
|
|
|
|
ResourceID: influxdb.ID(mapping.ResourceID),
|
|
|
|
ResourceType: mapping.ResourceType,
|
|
|
|
}
|
|
|
|
err := s.labelSVC.CreateLabelMapping(ctx, &m)
|
2019-12-07 00:23:09 +00:00
|
|
|
if err != nil {
|
|
|
|
return &applyErrBody{
|
2019-12-10 21:35:23 +00:00
|
|
|
name: fmt.Sprintf("%s:%s:%s", mapping.ResourceType, mapping.ResourceID, mapping.LabelID),
|
2019-12-07 00:23:09 +00:00
|
|
|
msg: err.Error(),
|
2019-10-26 02:11:47 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-12-07 00:23:09 +00:00
|
|
|
mutex.Do(func() {
|
2019-12-12 19:09:32 +00:00
|
|
|
rollbackMappings = append(rollbackMappings, m)
|
2019-12-07 00:23:09 +00:00
|
|
|
})
|
|
|
|
|
2019-10-26 02:11:47 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
return applier{
|
2019-12-07 00:23:09 +00:00
|
|
|
creater: creater{
|
|
|
|
entries: len(labelMappings),
|
|
|
|
fn: createFn,
|
|
|
|
},
|
2019-10-26 02:11:47 +00:00
|
|
|
rollbacker: rollbacker{
|
2019-12-07 00:23:09 +00:00
|
|
|
resource: resource,
|
2019-12-27 19:22:05 +00:00
|
|
|
fn: func(_ influxdb.ID) error { return s.rollbackLabelMappings(rollbackMappings) },
|
2019-10-26 02:11:47 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-10-28 22:23:40 +00:00
|
|
|
func (s *Service) rollbackLabelMappings(mappings []influxdb.LabelMapping) error {
|
2019-10-26 02:11:47 +00:00
|
|
|
var errs []string
|
|
|
|
for i := range mappings {
|
|
|
|
l := mappings[i]
|
|
|
|
err := s.labelSVC.DeleteLabelMapping(context.Background(), &l)
|
|
|
|
if err != nil {
|
|
|
|
errs = append(errs, fmt.Sprintf("%s:%s", l.LabelID.String(), l.ResourceID.String()))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(errs) > 0 {
|
|
|
|
return fmt.Errorf(`label_resource_id_pairs=[%s] err="unable to delete label"`, strings.Join(errs, ", "))
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
2019-10-28 22:23:40 +00:00
|
|
|
|
2019-12-04 01:00:15 +00:00
|
|
|
func (s *Service) deleteByIDs(resource string, numIDs int, deleteFn func(context.Context, influxdb.ID) error, iterFn func(int) influxdb.ID) error {
|
|
|
|
var errs []string
|
|
|
|
for i := range make([]struct{}, numIDs) {
|
|
|
|
id := iterFn(i)
|
|
|
|
err := deleteFn(context.Background(), id)
|
|
|
|
if err != nil {
|
|
|
|
errs = append(errs, id.String())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(errs) > 0 {
|
|
|
|
return fmt.Errorf(`%s_ids=[%s] err="unable to delete"`, resource, strings.Join(errs, ", "))
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-12-12 19:09:32 +00:00
|
|
|
func (s *Service) findDashboardByIDFull(ctx context.Context, id influxdb.ID) (*influxdb.Dashboard, error) {
|
|
|
|
dash, err := s.dashSVC.FindDashboardByID(ctx, id)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
for _, cell := range dash.Cells {
|
|
|
|
v, err := s.dashSVC.GetDashboardCellView(ctx, id, cell.ID)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
cell.View = v
|
|
|
|
}
|
|
|
|
return dash, nil
|
|
|
|
}
|
|
|
|
|
2019-12-07 00:23:09 +00:00
|
|
|
type doMutex struct {
|
|
|
|
sync.Mutex
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *doMutex) Do(fn func()) {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
fn()
|
|
|
|
}
|
|
|
|
|
2019-10-28 22:23:40 +00:00
|
|
|
type (
|
|
|
|
applier struct {
|
|
|
|
creater creater
|
|
|
|
rollbacker rollbacker
|
|
|
|
}
|
|
|
|
|
|
|
|
rollbacker struct {
|
|
|
|
resource string
|
2019-12-27 19:22:05 +00:00
|
|
|
fn func(orgID influxdb.ID) error
|
2019-10-28 22:23:40 +00:00
|
|
|
}
|
|
|
|
|
2019-12-07 00:23:09 +00:00
|
|
|
creater struct {
|
|
|
|
entries int
|
2019-12-12 19:09:32 +00:00
|
|
|
fn func(ctx context.Context, i int, orgID, userID influxdb.ID) *applyErrBody
|
2019-12-07 00:23:09 +00:00
|
|
|
}
|
2019-10-28 22:23:40 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
type rollbackCoordinator struct {
|
|
|
|
rollbacks []rollbacker
|
2019-12-07 00:23:09 +00:00
|
|
|
|
|
|
|
sem chan struct{}
|
2019-10-28 22:23:40 +00:00
|
|
|
}
|
|
|
|
|
2019-12-12 19:09:32 +00:00
|
|
|
func (r *rollbackCoordinator) runTilEnd(ctx context.Context, orgID, userID influxdb.ID, appliers ...applier) error {
|
2019-12-07 00:23:09 +00:00
|
|
|
errStr := newErrStream(ctx)
|
|
|
|
|
|
|
|
wg := new(sync.WaitGroup)
|
|
|
|
for i := range appliers {
|
|
|
|
// cannot reuse the shared variable from for loop since we're using concurrency b/c
|
|
|
|
// that temp var gets recycled between iterations
|
|
|
|
app := appliers[i]
|
2019-10-28 22:23:40 +00:00
|
|
|
r.rollbacks = append(r.rollbacks, app.rollbacker)
|
2019-12-07 00:23:09 +00:00
|
|
|
for idx := range make([]struct{}, app.creater.entries) {
|
|
|
|
r.sem <- struct{}{}
|
|
|
|
wg.Add(1)
|
|
|
|
|
|
|
|
go func(i int, resource string) {
|
|
|
|
defer func() {
|
|
|
|
wg.Done()
|
|
|
|
<-r.sem
|
|
|
|
}()
|
|
|
|
|
|
|
|
ctx, cancel := context.WithTimeout(ctx, 30*time.Second)
|
|
|
|
defer cancel()
|
|
|
|
|
2019-12-12 19:09:32 +00:00
|
|
|
if err := app.creater.fn(ctx, i, orgID, userID); err != nil {
|
2019-12-07 00:23:09 +00:00
|
|
|
errStr.add(errMsg{resource: resource, err: *err})
|
|
|
|
}
|
|
|
|
}(idx, app.rollbacker.resource)
|
2019-10-28 22:23:40 +00:00
|
|
|
}
|
|
|
|
}
|
2019-12-07 00:23:09 +00:00
|
|
|
wg.Wait()
|
2019-10-28 22:23:40 +00:00
|
|
|
|
2019-12-07 00:23:09 +00:00
|
|
|
return errStr.close()
|
2019-10-28 22:23:40 +00:00
|
|
|
}
|
|
|
|
|
2019-12-27 19:22:05 +00:00
|
|
|
func (r *rollbackCoordinator) rollback(l *zap.Logger, err *error, orgID influxdb.ID) {
|
2019-10-28 22:23:40 +00:00
|
|
|
if *err == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, r := range r.rollbacks {
|
2019-12-27 19:22:05 +00:00
|
|
|
if err := r.fn(orgID); err != nil {
|
2019-10-28 22:23:40 +00:00
|
|
|
l.Error("failed to delete "+r.resource, zap.Error(err))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-12-07 00:23:09 +00:00
|
|
|
type errMsg struct {
|
|
|
|
resource string
|
|
|
|
err applyErrBody
|
|
|
|
}
|
|
|
|
|
|
|
|
type errStream struct {
|
|
|
|
msgStream chan errMsg
|
|
|
|
err chan error
|
|
|
|
done <-chan struct{}
|
|
|
|
}
|
|
|
|
|
|
|
|
func newErrStream(ctx context.Context) *errStream {
|
|
|
|
e := &errStream{
|
|
|
|
msgStream: make(chan errMsg),
|
|
|
|
err: make(chan error),
|
|
|
|
done: ctx.Done(),
|
|
|
|
}
|
|
|
|
e.do()
|
|
|
|
return e
|
|
|
|
}
|
|
|
|
|
|
|
|
func (e *errStream) do() {
|
|
|
|
go func() {
|
|
|
|
mErrs := func() map[string]applyErrs {
|
|
|
|
mErrs := make(map[string]applyErrs)
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-e.done:
|
|
|
|
return nil
|
|
|
|
case msg, ok := <-e.msgStream:
|
|
|
|
if !ok {
|
|
|
|
return mErrs
|
|
|
|
}
|
|
|
|
mErrs[msg.resource] = append(mErrs[msg.resource], &msg.err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
if len(mErrs) == 0 {
|
|
|
|
e.err <- nil
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
var errs []string
|
|
|
|
for resource, err := range mErrs {
|
|
|
|
errs = append(errs, err.toError(resource, "failed to create").Error())
|
|
|
|
}
|
|
|
|
e.err <- errors.New(strings.Join(errs, "\n"))
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (e *errStream) close() error {
|
|
|
|
close(e.msgStream)
|
|
|
|
return <-e.err
|
|
|
|
}
|
|
|
|
|
|
|
|
func (e *errStream) add(msg errMsg) {
|
|
|
|
select {
|
|
|
|
case <-e.done:
|
|
|
|
case e.msgStream <- msg:
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-10-28 22:23:40 +00:00
|
|
|
// TODO: clean up apply errors to inform the user in an actionable way
|
|
|
|
type applyErrBody struct {
|
|
|
|
name string
|
|
|
|
msg string
|
|
|
|
}
|
|
|
|
|
2019-12-07 00:23:09 +00:00
|
|
|
type applyErrs []*applyErrBody
|
2019-10-28 22:23:40 +00:00
|
|
|
|
|
|
|
func (a applyErrs) toError(resType, msg string) error {
|
|
|
|
if len(a) == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
errMsg := fmt.Sprintf(`resource_type=%q err=%q`, resType, msg)
|
|
|
|
for _, e := range a {
|
|
|
|
errMsg += fmt.Sprintf("\n\tname=%q err_msg=%q", e.name, e.msg)
|
|
|
|
}
|
|
|
|
return errors.New(errMsg)
|
|
|
|
}
|
2019-11-21 00:38:12 +00:00
|
|
|
|
|
|
|
func labelSlcToMap(labels []*label) map[string]*label {
|
|
|
|
m := make(map[string]*label)
|
|
|
|
for i := range labels {
|
2019-12-03 02:05:10 +00:00
|
|
|
m[labels[i].Name()] = labels[i]
|
2019-11-21 00:38:12 +00:00
|
|
|
}
|
|
|
|
return m
|
|
|
|
}
|
2019-12-21 23:57:41 +00:00
|
|
|
|
|
|
|
func failedValidationErr(err error) error {
|
|
|
|
if err == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return &influxdb.Error{Code: influxdb.EUnprocessableEntity, Err: err}
|
|
|
|
}
|
|
|
|
|
|
|
|
func internalErr(err error) error {
|
|
|
|
if err == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return &influxdb.Error{Code: influxdb.EInternal, Err: err}
|
|
|
|
}
|