2019-10-23 17:09:04 +00:00
|
|
|
package pkger
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
2019-10-26 02:11:47 +00:00
|
|
|
"errors"
|
2019-10-23 17:09:04 +00:00
|
|
|
"fmt"
|
2019-10-28 22:23:40 +00:00
|
|
|
"sort"
|
2019-10-23 17:09:04 +00:00
|
|
|
"strings"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/influxdata/influxdb"
|
|
|
|
"go.uber.org/zap"
|
|
|
|
)
|
|
|
|
|
2019-11-04 23:15:53 +00:00
|
|
|
// APIVersion marks the current APIVersion for influx packages.
|
|
|
|
const APIVersion = "0.1.0"
|
|
|
|
|
2019-11-05 01:40:42 +00:00
|
|
|
// SVC is the packages service interface.
|
|
|
|
type SVC interface {
|
|
|
|
CreatePkg(ctx context.Context, setters ...CreatePkgSetFn) (*Pkg, error)
|
|
|
|
DryRun(ctx context.Context, orgID influxdb.ID, pkg *Pkg) (Summary, Diff, error)
|
|
|
|
Apply(ctx context.Context, orgID influxdb.ID, pkg *Pkg) (Summary, error)
|
|
|
|
}
|
|
|
|
|
2019-10-28 22:23:40 +00:00
|
|
|
// Service provides the pkger business logic including all the dependencies to make
|
|
|
|
// this resource sausage.
|
2019-10-23 17:09:04 +00:00
|
|
|
type Service struct {
|
2019-10-30 21:13:42 +00:00
|
|
|
logger *zap.Logger
|
|
|
|
|
2019-10-24 23:59:01 +00:00
|
|
|
labelSVC influxdb.LabelService
|
2019-10-23 17:09:04 +00:00
|
|
|
bucketSVC influxdb.BucketService
|
2019-10-30 21:13:42 +00:00
|
|
|
dashSVC influxdb.DashboardService
|
2019-10-23 17:09:04 +00:00
|
|
|
}
|
|
|
|
|
2019-10-28 22:23:40 +00:00
|
|
|
// NewService is a constructor for a pkger Service.
|
2019-10-30 21:13:42 +00:00
|
|
|
func NewService(l *zap.Logger, bucketSVC influxdb.BucketService, labelSVC influxdb.LabelService, dashSVC influxdb.DashboardService) *Service {
|
2019-10-23 17:09:04 +00:00
|
|
|
svc := Service{
|
|
|
|
logger: zap.NewNop(),
|
|
|
|
bucketSVC: bucketSVC,
|
2019-10-24 23:59:01 +00:00
|
|
|
labelSVC: labelSVC,
|
2019-10-30 21:13:42 +00:00
|
|
|
dashSVC: dashSVC,
|
2019-10-23 17:09:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if l != nil {
|
|
|
|
svc.logger = l
|
|
|
|
}
|
|
|
|
return &svc
|
|
|
|
}
|
|
|
|
|
2019-11-04 23:15:53 +00:00
|
|
|
// CreatePkgSetFn is a functional input for setting the pkg fields.
|
|
|
|
type CreatePkgSetFn func(ctx context.Context, pkg *Pkg) error
|
|
|
|
|
|
|
|
// WithMetadata sets the metadata on the pkg in a CreatePkg call.
|
|
|
|
func WithMetadata(meta Metadata) CreatePkgSetFn {
|
|
|
|
return func(ctx context.Context, pkg *Pkg) error {
|
|
|
|
pkg.Metadata = meta
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// CreatePkg will produce a pkg from the parameters provided.
|
|
|
|
func (s *Service) CreatePkg(ctx context.Context, setters ...CreatePkgSetFn) (*Pkg, error) {
|
|
|
|
pkg := &Pkg{
|
|
|
|
APIVersion: APIVersion,
|
|
|
|
Kind: kindPackage.String(),
|
|
|
|
Spec: struct {
|
|
|
|
Resources []Resource `yaml:"resources" json:"resources"`
|
|
|
|
}{
|
|
|
|
Resources: []Resource{},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, setter := range setters {
|
|
|
|
err := setter(ctx, pkg)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return pkg, nil
|
|
|
|
}
|
|
|
|
|
2019-10-28 22:23:40 +00:00
|
|
|
// DryRun provides a dry run of the pkg application. The pkg will be marked verified
|
|
|
|
// for later calls to Apply. This func will be run on an Apply if it has not been run
|
|
|
|
// already.
|
|
|
|
func (s *Service) DryRun(ctx context.Context, orgID influxdb.ID, pkg *Pkg) (Summary, Diff, error) {
|
2019-11-06 18:02:45 +00:00
|
|
|
if !pkg.isParsed {
|
|
|
|
if err := pkg.Validate(); err != nil {
|
|
|
|
return Summary{}, Diff{}, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-10-28 22:23:40 +00:00
|
|
|
diffBuckets, err := s.dryRunBuckets(ctx, orgID, pkg)
|
|
|
|
if err != nil {
|
|
|
|
return Summary{}, Diff{}, err
|
|
|
|
}
|
|
|
|
|
2019-10-30 21:13:42 +00:00
|
|
|
diffDashes, err := s.dryRunDashboards(ctx, orgID, pkg)
|
|
|
|
if err != nil {
|
|
|
|
return Summary{}, Diff{}, err
|
|
|
|
}
|
|
|
|
|
2019-10-28 22:23:40 +00:00
|
|
|
diffLabels, err := s.dryRunLabels(ctx, orgID, pkg)
|
|
|
|
if err != nil {
|
|
|
|
return Summary{}, Diff{}, err
|
|
|
|
}
|
|
|
|
|
|
|
|
diffLabelMappings, err := s.dryRunLabelMappings(ctx, pkg)
|
|
|
|
if err != nil {
|
|
|
|
return Summary{}, Diff{}, err
|
|
|
|
}
|
|
|
|
|
2019-10-30 17:55:13 +00:00
|
|
|
// verify the pkg is verified by a dry run. when calling Service.Apply this
|
2019-10-28 22:23:40 +00:00
|
|
|
// is required to have been run. if it is not true, then apply runs
|
2019-10-30 17:55:13 +00:00
|
|
|
// the Dry run.
|
2019-10-28 22:23:40 +00:00
|
|
|
pkg.isVerified = true
|
|
|
|
|
|
|
|
diff := Diff{
|
|
|
|
Buckets: diffBuckets,
|
2019-10-30 21:13:42 +00:00
|
|
|
Dashboards: diffDashes,
|
2019-10-28 22:23:40 +00:00
|
|
|
Labels: diffLabels,
|
|
|
|
LabelMappings: diffLabelMappings,
|
|
|
|
}
|
|
|
|
return pkg.Summary(), diff, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *Service) dryRunBuckets(ctx context.Context, orgID influxdb.ID, pkg *Pkg) ([]DiffBucket, error) {
|
|
|
|
mExistingBkts := make(map[string]DiffBucket)
|
|
|
|
bkts := pkg.buckets()
|
|
|
|
for i := range bkts {
|
|
|
|
b := bkts[i]
|
|
|
|
existingBkt, err := s.bucketSVC.FindBucketByName(ctx, orgID, b.Name)
|
|
|
|
switch err {
|
|
|
|
// TODO: case for err not found here and another case handle where
|
|
|
|
// err isn't a not found (some other error)
|
|
|
|
case nil:
|
|
|
|
b.existing = existingBkt
|
|
|
|
mExistingBkts[b.Name] = newDiffBucket(b, *existingBkt)
|
|
|
|
default:
|
|
|
|
mExistingBkts[b.Name] = newDiffBucket(b, influxdb.Bucket{})
|
2019-10-23 17:09:04 +00:00
|
|
|
}
|
2019-10-28 22:23:40 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
var diffs []DiffBucket
|
|
|
|
for _, diff := range mExistingBkts {
|
|
|
|
diffs = append(diffs, diff)
|
|
|
|
}
|
|
|
|
sort.Slice(diffs, func(i, j int) bool {
|
|
|
|
return diffs[i].Name < diffs[j].Name
|
|
|
|
})
|
|
|
|
|
|
|
|
return diffs, nil
|
|
|
|
}
|
|
|
|
|
2019-10-30 21:13:42 +00:00
|
|
|
func (s *Service) dryRunDashboards(ctx context.Context, orgID influxdb.ID, pkg *Pkg) ([]DiffDashboard, error) {
|
|
|
|
var diffs []DiffDashboard
|
2019-11-01 18:11:42 +00:00
|
|
|
for _, d := range pkg.dashboards() {
|
|
|
|
diffs = append(diffs, newDiffDashboard(d))
|
2019-10-30 21:13:42 +00:00
|
|
|
}
|
2019-11-01 18:11:42 +00:00
|
|
|
|
2019-10-30 21:13:42 +00:00
|
|
|
sort.Slice(diffs, func(i, j int) bool {
|
|
|
|
return diffs[i].Name < diffs[j].Name
|
|
|
|
})
|
|
|
|
|
|
|
|
return diffs, nil
|
|
|
|
}
|
|
|
|
|
2019-10-28 22:23:40 +00:00
|
|
|
func (s *Service) dryRunLabels(ctx context.Context, orgID influxdb.ID, pkg *Pkg) ([]DiffLabel, error) {
|
|
|
|
mExistingLabels := make(map[string]DiffLabel)
|
|
|
|
labels := pkg.labels()
|
|
|
|
for i := range labels {
|
|
|
|
l := labels[i]
|
|
|
|
existingLabels, err := s.labelSVC.FindLabels(ctx, influxdb.LabelFilter{
|
|
|
|
Name: l.Name,
|
|
|
|
OrgID: &orgID,
|
|
|
|
}, influxdb.FindOptions{Limit: 1})
|
|
|
|
switch {
|
|
|
|
// TODO: case for err not found here and another case handle where
|
|
|
|
// err isn't a not found (some other error)
|
|
|
|
case err == nil && len(existingLabels) > 0:
|
|
|
|
existingLabel := existingLabels[0]
|
|
|
|
l.existing = existingLabel
|
|
|
|
mExistingLabels[l.Name] = newDiffLabel(l, *existingLabel)
|
|
|
|
default:
|
|
|
|
mExistingLabels[l.Name] = newDiffLabel(l, influxdb.Label{})
|
2019-10-23 17:09:04 +00:00
|
|
|
}
|
2019-10-28 22:23:40 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
diffs := make([]DiffLabel, 0, len(mExistingLabels))
|
|
|
|
for _, diff := range mExistingLabels {
|
|
|
|
diffs = append(diffs, diff)
|
|
|
|
}
|
|
|
|
sort.Slice(diffs, func(i, j int) bool {
|
|
|
|
return diffs[i].Name < diffs[j].Name
|
|
|
|
})
|
|
|
|
|
|
|
|
return diffs, nil
|
|
|
|
}
|
|
|
|
|
2019-10-30 21:13:42 +00:00
|
|
|
type (
|
|
|
|
labelMappingDiffFn func(labelID influxdb.ID, labelName string, isNew bool)
|
|
|
|
|
|
|
|
labelAssociater interface {
|
|
|
|
ID() influxdb.ID
|
|
|
|
ResourceType() influxdb.ResourceType
|
|
|
|
Exists() bool
|
|
|
|
}
|
|
|
|
)
|
|
|
|
|
2019-10-28 22:23:40 +00:00
|
|
|
func (s *Service) dryRunLabelMappings(ctx context.Context, pkg *Pkg) ([]DiffLabelMapping, error) {
|
|
|
|
var diffs []DiffLabelMapping
|
|
|
|
for _, b := range pkg.buckets() {
|
2019-10-30 21:13:42 +00:00
|
|
|
err := s.dryRunResourceLabelMapping(ctx, b, b.labels, func(labelID influxdb.ID, labelName string, isNew bool) {
|
2019-10-28 22:23:40 +00:00
|
|
|
pkg.mLabels[labelName].setBucketMapping(b, !isNew)
|
|
|
|
diffs = append(diffs, DiffLabelMapping{
|
|
|
|
IsNew: isNew,
|
2019-10-30 21:13:42 +00:00
|
|
|
ResType: b.ResourceType(),
|
2019-11-05 01:40:42 +00:00
|
|
|
ResID: SafeID(b.ID()),
|
2019-10-28 22:23:40 +00:00
|
|
|
ResName: b.Name,
|
2019-11-05 01:40:42 +00:00
|
|
|
LabelID: SafeID(labelID),
|
2019-10-28 22:23:40 +00:00
|
|
|
LabelName: labelName,
|
|
|
|
})
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2019-10-26 02:11:47 +00:00
|
|
|
}
|
2019-10-28 22:23:40 +00:00
|
|
|
}
|
2019-10-26 02:11:47 +00:00
|
|
|
|
2019-10-30 21:13:42 +00:00
|
|
|
for _, d := range pkg.dashboards() {
|
|
|
|
err := s.dryRunResourceLabelMapping(ctx, d, d.labels, func(labelID influxdb.ID, labelName string, isNew bool) {
|
2019-11-01 18:11:42 +00:00
|
|
|
pkg.mLabels[labelName].setDashboardMapping(d)
|
2019-10-30 21:13:42 +00:00
|
|
|
diffs = append(diffs, DiffLabelMapping{
|
|
|
|
IsNew: isNew,
|
|
|
|
ResType: d.ResourceType(),
|
2019-11-05 01:40:42 +00:00
|
|
|
ResID: SafeID(d.ID()),
|
2019-10-30 21:13:42 +00:00
|
|
|
ResName: d.Name,
|
2019-11-05 01:40:42 +00:00
|
|
|
LabelID: SafeID(labelID),
|
2019-10-30 21:13:42 +00:00
|
|
|
LabelName: labelName,
|
|
|
|
})
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-10-28 22:23:40 +00:00
|
|
|
// sort by res type ASC, then res name ASC, then label name ASC
|
|
|
|
sort.Slice(diffs, func(i, j int) bool {
|
|
|
|
n, m := diffs[i], diffs[j]
|
|
|
|
if n.ResType < m.ResType {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
if n.ResType > m.ResType {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
if n.ResName < m.ResName {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
if n.ResName > m.ResName {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
return n.LabelName < m.LabelName
|
|
|
|
})
|
|
|
|
|
|
|
|
return diffs, nil
|
|
|
|
}
|
|
|
|
|
2019-10-30 21:13:42 +00:00
|
|
|
func (s *Service) dryRunResourceLabelMapping(ctx context.Context, la labelAssociater, labels []*label, mappingFn labelMappingDiffFn) error {
|
|
|
|
if !la.Exists() {
|
|
|
|
for _, l := range labels {
|
2019-10-30 17:55:13 +00:00
|
|
|
mappingFn(l.ID(), l.Name, true)
|
2019-10-26 02:11:47 +00:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
2019-10-28 22:23:40 +00:00
|
|
|
// loop through and hit api for all labels associated with a bkt
|
|
|
|
// lookup labels in pkg, add it to the label mapping, if exists in
|
|
|
|
// the results from API, mark it exists
|
2019-10-30 21:13:42 +00:00
|
|
|
existingLabels, err := s.labelSVC.FindResourceLabels(ctx, influxdb.LabelMappingFilter{
|
|
|
|
ResourceID: la.ID(),
|
|
|
|
ResourceType: la.ResourceType(),
|
2019-10-28 22:23:40 +00:00
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
// TODO: inspect err, if its a not found error, do nothing, if any other error
|
|
|
|
// handle it better
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2019-10-30 21:13:42 +00:00
|
|
|
pkgLabels := labelSlcToMap(labels)
|
|
|
|
for _, l := range existingLabels {
|
2019-10-28 22:23:40 +00:00
|
|
|
// should ignore any labels that are not specified in pkg
|
|
|
|
mappingFn(l.ID, l.Name, false)
|
|
|
|
delete(pkgLabels, l.Name)
|
|
|
|
}
|
|
|
|
|
|
|
|
// now we add labels that were not apart of the existing labels
|
|
|
|
for _, l := range pkgLabels {
|
2019-10-30 17:55:13 +00:00
|
|
|
mappingFn(l.ID(), l.Name, true)
|
2019-10-28 22:23:40 +00:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func labelSlcToMap(labels []*label) map[string]*label {
|
|
|
|
m := make(map[string]*label)
|
|
|
|
for i := range labels {
|
|
|
|
m[labels[i].Name] = labels[i]
|
|
|
|
}
|
|
|
|
return m
|
|
|
|
}
|
|
|
|
|
|
|
|
// Apply will apply all the resources identified in the provided pkg. The entire pkg will be applied
|
|
|
|
// in its entirety. If a failure happens midway then the entire pkg will be rolled back to the state
|
|
|
|
// from before the pkg were applied.
|
|
|
|
func (s *Service) Apply(ctx context.Context, orgID influxdb.ID, pkg *Pkg) (sum Summary, e error) {
|
2019-11-06 18:02:45 +00:00
|
|
|
if !pkg.isParsed {
|
|
|
|
if err := pkg.Validate(); err != nil {
|
|
|
|
return Summary{}, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-10-28 22:23:40 +00:00
|
|
|
if !pkg.isVerified {
|
|
|
|
_, _, err := s.DryRun(ctx, orgID, pkg)
|
|
|
|
if err != nil {
|
|
|
|
return Summary{}, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
coordinator := new(rollbackCoordinator)
|
|
|
|
defer coordinator.rollback(s.logger, &e)
|
2019-10-26 02:11:47 +00:00
|
|
|
|
|
|
|
runners := [][]applier{
|
|
|
|
// each grouping here runs for its entirety, then returns an error that
|
|
|
|
// is indicative of running all appliers provided. For instance, the labels
|
|
|
|
// may have 1 label fail and one of the buckets fails. The errors aggregate so
|
|
|
|
// the caller will be informed of both the failed label and the failed bucket.
|
|
|
|
// the groupings here allow for steps to occur before exiting. The first step is
|
|
|
|
// adding the primary resources. Here we get all the errors associated with them.
|
|
|
|
// If those are all good, then we run the secondary(dependent) resources which
|
|
|
|
// rely on the primary resources having been created.
|
|
|
|
{
|
|
|
|
// primary resources
|
|
|
|
s.applyLabels(pkg.labels()),
|
|
|
|
s.applyBuckets(pkg.buckets()),
|
2019-10-30 21:13:42 +00:00
|
|
|
s.applyDashboards(pkg.dashboards()),
|
2019-10-26 02:11:47 +00:00
|
|
|
},
|
|
|
|
{
|
|
|
|
// secondary (dependent) resources
|
|
|
|
s.applyLabelMappings(pkg),
|
|
|
|
},
|
2019-10-24 23:59:01 +00:00
|
|
|
}
|
|
|
|
|
2019-10-26 02:11:47 +00:00
|
|
|
for _, appliers := range runners {
|
2019-10-28 22:23:40 +00:00
|
|
|
err := coordinator.runTilEnd(ctx, orgID, appliers...)
|
|
|
|
if err != nil {
|
2019-10-24 23:59:01 +00:00
|
|
|
return Summary{}, err
|
|
|
|
}
|
2019-10-23 17:09:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return pkg.Summary(), nil
|
|
|
|
}
|
|
|
|
|
2019-10-26 02:11:47 +00:00
|
|
|
func (s *Service) applyBuckets(buckets []*bucket) applier {
|
2019-10-30 21:13:42 +00:00
|
|
|
const resource = "bucket"
|
|
|
|
|
2019-10-28 22:23:40 +00:00
|
|
|
rollbackBuckets := make([]*bucket, 0, len(buckets))
|
2019-10-24 23:59:01 +00:00
|
|
|
createFn := func(ctx context.Context, orgID influxdb.ID) error {
|
|
|
|
ctx, cancel := context.WithTimeout(ctx, 1*time.Minute)
|
|
|
|
defer cancel()
|
|
|
|
|
2019-10-26 02:11:47 +00:00
|
|
|
var errs applyErrs
|
2019-10-24 23:59:01 +00:00
|
|
|
for i, b := range buckets {
|
2019-10-28 22:23:40 +00:00
|
|
|
buckets[i].OrgID = orgID
|
2019-11-03 17:33:36 +00:00
|
|
|
if !b.shouldApply() {
|
|
|
|
continue
|
|
|
|
}
|
2019-10-28 22:23:40 +00:00
|
|
|
influxBucket, err := s.applyBucket(ctx, b)
|
2019-10-24 23:59:01 +00:00
|
|
|
if err != nil {
|
2019-10-26 02:11:47 +00:00
|
|
|
errs = append(errs, applyErrBody{
|
|
|
|
name: b.Name,
|
|
|
|
msg: err.Error(),
|
|
|
|
})
|
|
|
|
continue
|
2019-10-24 23:59:01 +00:00
|
|
|
}
|
2019-10-30 17:55:13 +00:00
|
|
|
buckets[i].id = influxBucket.ID
|
2019-10-28 22:23:40 +00:00
|
|
|
rollbackBuckets = append(rollbackBuckets, buckets[i])
|
2019-10-23 17:09:04 +00:00
|
|
|
}
|
|
|
|
|
2019-10-30 21:13:42 +00:00
|
|
|
return errs.toError(resource, "failed to create bucket")
|
2019-10-23 17:09:04 +00:00
|
|
|
}
|
|
|
|
|
2019-10-24 23:59:01 +00:00
|
|
|
return applier{
|
|
|
|
creater: createFn,
|
|
|
|
rollbacker: rollbacker{
|
2019-10-30 21:13:42 +00:00
|
|
|
resource: resource,
|
2019-10-28 22:23:40 +00:00
|
|
|
fn: func() error { return s.rollbackBuckets(rollbackBuckets) },
|
2019-10-24 23:59:01 +00:00
|
|
|
},
|
|
|
|
}
|
2019-10-23 17:09:04 +00:00
|
|
|
}
|
|
|
|
|
2019-10-28 22:23:40 +00:00
|
|
|
func (s *Service) rollbackBuckets(buckets []*bucket) error {
|
2019-10-23 17:09:04 +00:00
|
|
|
var errs []string
|
|
|
|
for _, b := range buckets {
|
2019-10-28 22:23:40 +00:00
|
|
|
if b.existing == nil {
|
2019-10-30 17:55:13 +00:00
|
|
|
err := s.bucketSVC.DeleteBucket(context.Background(), b.ID())
|
2019-10-28 22:23:40 +00:00
|
|
|
if err != nil {
|
2019-10-30 17:55:13 +00:00
|
|
|
errs = append(errs, b.ID().String())
|
2019-10-28 22:23:40 +00:00
|
|
|
}
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2019-10-30 17:55:13 +00:00
|
|
|
_, err := s.bucketSVC.UpdateBucket(context.Background(), b.ID(), influxdb.BucketUpdate{
|
2019-10-28 22:23:40 +00:00
|
|
|
Description: &b.Description,
|
|
|
|
RetentionPeriod: &b.RetentionPeriod,
|
|
|
|
})
|
2019-10-23 17:09:04 +00:00
|
|
|
if err != nil {
|
2019-10-30 17:55:13 +00:00
|
|
|
errs = append(errs, b.ID().String())
|
2019-10-23 17:09:04 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(errs) > 0 {
|
|
|
|
// TODO: fixup error
|
|
|
|
return fmt.Errorf(`bucket_ids=[%s] err="unable to delete bucket"`, strings.Join(errs, ", "))
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
2019-10-24 23:59:01 +00:00
|
|
|
|
2019-10-28 22:23:40 +00:00
|
|
|
func (s *Service) applyBucket(ctx context.Context, b *bucket) (influxdb.Bucket, error) {
|
|
|
|
if b.existing != nil {
|
2019-10-30 17:55:13 +00:00
|
|
|
influxBucket, err := s.bucketSVC.UpdateBucket(ctx, b.ID(), influxdb.BucketUpdate{
|
2019-10-28 22:23:40 +00:00
|
|
|
Description: &b.Description,
|
|
|
|
RetentionPeriod: &b.RetentionPeriod,
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return influxdb.Bucket{}, err
|
|
|
|
}
|
|
|
|
return *influxBucket, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
influxBucket := influxdb.Bucket{
|
|
|
|
OrgID: b.OrgID,
|
|
|
|
Description: b.Description,
|
|
|
|
Name: b.Name,
|
|
|
|
RetentionPeriod: b.RetentionPeriod,
|
|
|
|
}
|
|
|
|
err := s.bucketSVC.CreateBucket(ctx, &influxBucket)
|
|
|
|
if err != nil {
|
|
|
|
return influxdb.Bucket{}, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return influxBucket, nil
|
|
|
|
}
|
|
|
|
|
2019-10-30 21:13:42 +00:00
|
|
|
func (s *Service) applyDashboards(dashboards []*dashboard) applier {
|
|
|
|
const resource = "dashboard"
|
|
|
|
|
|
|
|
rollbackDashboards := make([]*dashboard, 0, len(dashboards))
|
|
|
|
createFn := func(ctx context.Context, orgID influxdb.ID) error {
|
|
|
|
ctx, cancel := context.WithTimeout(ctx, 1*time.Minute)
|
|
|
|
defer cancel()
|
|
|
|
|
|
|
|
var errs applyErrs
|
|
|
|
for i := range dashboards {
|
|
|
|
d := dashboards[i]
|
|
|
|
d.OrgID = orgID
|
|
|
|
influxBucket, err := s.applyDashboard(ctx, d)
|
|
|
|
if err != nil {
|
|
|
|
errs = append(errs, applyErrBody{
|
|
|
|
name: d.Name,
|
|
|
|
msg: err.Error(),
|
|
|
|
})
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
d.id = influxBucket.ID
|
|
|
|
rollbackDashboards = append(rollbackDashboards, d)
|
|
|
|
}
|
|
|
|
|
|
|
|
return errs.toError(resource, "failed to create bucket")
|
|
|
|
}
|
|
|
|
|
|
|
|
return applier{
|
|
|
|
creater: createFn,
|
|
|
|
rollbacker: rollbacker{
|
|
|
|
resource: resource,
|
|
|
|
fn: func() error { return s.rollbackDashboards(rollbackDashboards) },
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *Service) rollbackDashboards(dashboards []*dashboard) error {
|
|
|
|
var errs []string
|
2019-11-01 18:11:42 +00:00
|
|
|
for _, d := range dashboards {
|
|
|
|
err := s.dashSVC.DeleteDashboard(context.Background(), d.ID())
|
2019-10-30 21:13:42 +00:00
|
|
|
if err != nil {
|
|
|
|
errs = append(errs, d.ID().String())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(errs) > 0 {
|
|
|
|
// TODO: fixup error
|
|
|
|
return fmt.Errorf(`dashboard_ids=[%s] err="unable to delete dashboard"`, strings.Join(errs, ", "))
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *Service) applyDashboard(ctx context.Context, d *dashboard) (influxdb.Dashboard, error) {
|
2019-11-01 18:11:42 +00:00
|
|
|
cells, cellChartMap := convertChartsToCells(d.Charts)
|
2019-10-30 21:13:42 +00:00
|
|
|
influxDashboard := influxdb.Dashboard{
|
|
|
|
OrganizationID: d.OrgID,
|
|
|
|
Description: d.Description,
|
|
|
|
Name: d.Name,
|
2019-11-01 18:11:42 +00:00
|
|
|
Cells: cells,
|
2019-10-30 21:13:42 +00:00
|
|
|
}
|
|
|
|
err := s.dashSVC.CreateDashboard(ctx, &influxDashboard)
|
|
|
|
if err != nil {
|
|
|
|
return influxdb.Dashboard{}, err
|
|
|
|
}
|
|
|
|
|
2019-11-01 18:11:42 +00:00
|
|
|
for cell, i := range cellChartMap {
|
|
|
|
ch := d.Charts[i]
|
|
|
|
|
|
|
|
_, err := s.dashSVC.UpdateDashboardCellView(ctx, influxDashboard.ID, cell.ID, influxdb.ViewUpdate{
|
|
|
|
ViewContentsUpdate: influxdb.ViewContentsUpdate{
|
|
|
|
Name: &ch.Name,
|
|
|
|
},
|
|
|
|
Properties: ch.properties(),
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return influxdb.Dashboard{}, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-10-30 21:13:42 +00:00
|
|
|
return influxDashboard, nil
|
|
|
|
}
|
|
|
|
|
2019-11-01 18:11:42 +00:00
|
|
|
func convertChartsToCells(ch []chart) ([]*influxdb.Cell, map[*influxdb.Cell]int) {
|
|
|
|
cellChartMap := make(map[*influxdb.Cell]int)
|
|
|
|
icells := make([]*influxdb.Cell, 0, len(ch))
|
|
|
|
for i, c := range ch {
|
|
|
|
icell := &influxdb.Cell{
|
|
|
|
CellProperty: influxdb.CellProperty{
|
|
|
|
H: int32(c.Height),
|
|
|
|
W: int32(c.Width),
|
|
|
|
},
|
|
|
|
}
|
|
|
|
cellChartMap[icell] = i
|
|
|
|
icells = append(icells, icell)
|
|
|
|
}
|
|
|
|
return icells, cellChartMap
|
|
|
|
}
|
|
|
|
|
2019-10-24 23:59:01 +00:00
|
|
|
func (s *Service) applyLabels(labels []*label) applier {
|
2019-10-30 21:13:42 +00:00
|
|
|
const resource = "label"
|
|
|
|
|
2019-10-28 22:23:40 +00:00
|
|
|
rollBackLabels := make([]*label, 0, len(labels))
|
2019-10-24 23:59:01 +00:00
|
|
|
createFn := func(ctx context.Context, orgID influxdb.ID) error {
|
|
|
|
ctx, cancel := context.WithTimeout(ctx, 1*time.Minute)
|
|
|
|
defer cancel()
|
|
|
|
|
2019-10-26 02:11:47 +00:00
|
|
|
var errs applyErrs
|
2019-10-24 23:59:01 +00:00
|
|
|
for i, l := range labels {
|
2019-10-28 22:23:40 +00:00
|
|
|
labels[i].OrgID = orgID
|
2019-11-03 17:33:36 +00:00
|
|
|
if !l.shouldApply() {
|
|
|
|
continue
|
|
|
|
}
|
2019-10-28 22:23:40 +00:00
|
|
|
influxLabel, err := s.applyLabel(ctx, l)
|
2019-10-24 23:59:01 +00:00
|
|
|
if err != nil {
|
2019-10-26 02:11:47 +00:00
|
|
|
errs = append(errs, applyErrBody{
|
|
|
|
name: l.Name,
|
|
|
|
msg: err.Error(),
|
|
|
|
})
|
|
|
|
continue
|
2019-10-24 23:59:01 +00:00
|
|
|
}
|
2019-10-30 17:55:13 +00:00
|
|
|
labels[i].id = influxLabel.ID
|
2019-10-28 22:23:40 +00:00
|
|
|
rollBackLabels = append(rollBackLabels, labels[i])
|
2019-10-24 23:59:01 +00:00
|
|
|
}
|
|
|
|
|
2019-10-30 21:13:42 +00:00
|
|
|
return errs.toError(resource, "failed to create label")
|
2019-10-24 23:59:01 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return applier{
|
|
|
|
creater: createFn,
|
|
|
|
rollbacker: rollbacker{
|
2019-10-30 21:13:42 +00:00
|
|
|
resource: resource,
|
2019-10-28 22:23:40 +00:00
|
|
|
fn: func() error { return s.rollbackLabels(rollBackLabels) },
|
2019-10-24 23:59:01 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-10-28 22:23:40 +00:00
|
|
|
func (s *Service) rollbackLabels(labels []*label) error {
|
2019-10-24 23:59:01 +00:00
|
|
|
var errs []string
|
|
|
|
for _, l := range labels {
|
2019-10-30 17:55:13 +00:00
|
|
|
err := s.labelSVC.DeleteLabel(context.Background(), l.ID())
|
2019-10-24 23:59:01 +00:00
|
|
|
if err != nil {
|
2019-10-30 17:55:13 +00:00
|
|
|
errs = append(errs, l.ID().String())
|
2019-10-24 23:59:01 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(errs) > 0 {
|
|
|
|
return fmt.Errorf(`label_ids=[%s] err="unable to delete label"`, strings.Join(errs, ", "))
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
2019-10-26 02:11:47 +00:00
|
|
|
|
2019-10-28 22:23:40 +00:00
|
|
|
func (s *Service) applyLabel(ctx context.Context, l *label) (influxdb.Label, error) {
|
|
|
|
if l.existing != nil {
|
2019-10-30 17:55:13 +00:00
|
|
|
updatedlabel, err := s.labelSVC.UpdateLabel(ctx, l.ID(), influxdb.LabelUpdate{
|
2019-10-28 22:23:40 +00:00
|
|
|
Properties: l.properties(),
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return influxdb.Label{}, err
|
|
|
|
}
|
|
|
|
return *updatedlabel, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
influxLabel := influxdb.Label{
|
|
|
|
OrgID: l.OrgID,
|
|
|
|
Name: l.Name,
|
|
|
|
Properties: l.properties(),
|
|
|
|
}
|
|
|
|
err := s.labelSVC.CreateLabel(ctx, &influxLabel)
|
|
|
|
if err != nil {
|
|
|
|
return influxdb.Label{}, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return influxLabel, nil
|
|
|
|
}
|
|
|
|
|
2019-10-26 02:11:47 +00:00
|
|
|
func (s *Service) applyLabelMappings(pkg *Pkg) applier {
|
|
|
|
var mappings []influxdb.LabelMapping
|
|
|
|
createFn := func(ctx context.Context, orgID influxdb.ID) error {
|
|
|
|
ctx, cancel := context.WithTimeout(ctx, time.Minute)
|
|
|
|
defer cancel()
|
|
|
|
|
|
|
|
labelMappings := pkg.labelMappings()
|
|
|
|
for i := range labelMappings {
|
|
|
|
mapping := labelMappings[i]
|
2019-10-28 22:23:40 +00:00
|
|
|
if mapping.exists {
|
|
|
|
// this block here does 2 things, it does note write a
|
|
|
|
// mapping when one exists. it also avoids having to worry
|
|
|
|
// about deleting an existing mapping since it will not be
|
|
|
|
// passed to the delete function below b/c it is never added
|
|
|
|
// to the list of mappings that is referenced in the delete
|
|
|
|
// call.
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
err := s.labelSVC.CreateLabelMapping(ctx, &mapping.LabelMapping)
|
2019-10-26 02:11:47 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2019-10-28 22:23:40 +00:00
|
|
|
mappings = append(mappings, mapping.LabelMapping)
|
2019-10-26 02:11:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
return applier{
|
|
|
|
creater: createFn,
|
|
|
|
rollbacker: rollbacker{
|
|
|
|
resource: "label_mapping",
|
2019-10-28 22:23:40 +00:00
|
|
|
fn: func() error { return s.rollbackLabelMappings(mappings) },
|
2019-10-26 02:11:47 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-10-28 22:23:40 +00:00
|
|
|
func (s *Service) rollbackLabelMappings(mappings []influxdb.LabelMapping) error {
|
2019-10-26 02:11:47 +00:00
|
|
|
var errs []string
|
|
|
|
for i := range mappings {
|
|
|
|
l := mappings[i]
|
|
|
|
err := s.labelSVC.DeleteLabelMapping(context.Background(), &l)
|
|
|
|
if err != nil {
|
|
|
|
errs = append(errs, fmt.Sprintf("%s:%s", l.LabelID.String(), l.ResourceID.String()))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(errs) > 0 {
|
|
|
|
return fmt.Errorf(`label_resource_id_pairs=[%s] err="unable to delete label"`, strings.Join(errs, ", "))
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
2019-10-28 22:23:40 +00:00
|
|
|
|
|
|
|
type (
|
|
|
|
applier struct {
|
|
|
|
creater creater
|
|
|
|
rollbacker rollbacker
|
|
|
|
}
|
|
|
|
|
|
|
|
rollbacker struct {
|
|
|
|
resource string
|
|
|
|
fn func() error
|
|
|
|
}
|
|
|
|
|
|
|
|
creater func(ctx context.Context, orgID influxdb.ID) error
|
|
|
|
)
|
|
|
|
|
|
|
|
type rollbackCoordinator struct {
|
|
|
|
rollbacks []rollbacker
|
|
|
|
}
|
|
|
|
|
|
|
|
func (r *rollbackCoordinator) runTilEnd(ctx context.Context, orgID influxdb.ID, appliers ...applier) error {
|
|
|
|
var errs []string
|
|
|
|
for _, app := range appliers {
|
|
|
|
r.rollbacks = append(r.rollbacks, app.rollbacker)
|
|
|
|
if err := app.creater(ctx, orgID); err != nil {
|
|
|
|
errs = append(errs, fmt.Sprintf("failed %s create: %s", app.rollbacker.resource, err.Error()))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(errs) > 0 {
|
|
|
|
// TODO: fix error up to be more actionable
|
|
|
|
return errors.New(strings.Join(errs, "\n"))
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (r *rollbackCoordinator) rollback(l *zap.Logger, err *error) {
|
|
|
|
if *err == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, r := range r.rollbacks {
|
|
|
|
if err := r.fn(); err != nil {
|
|
|
|
l.Error("failed to delete "+r.resource, zap.Error(err))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// TODO: clean up apply errors to inform the user in an actionable way
|
|
|
|
type applyErrBody struct {
|
|
|
|
name string
|
|
|
|
msg string
|
|
|
|
}
|
|
|
|
|
|
|
|
type applyErrs []applyErrBody
|
|
|
|
|
|
|
|
func (a applyErrs) toError(resType, msg string) error {
|
|
|
|
if len(a) == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
errMsg := fmt.Sprintf(`resource_type=%q err=%q`, resType, msg)
|
|
|
|
for _, e := range a {
|
|
|
|
errMsg += fmt.Sprintf("\n\tname=%q err_msg=%q", e.name, e.msg)
|
|
|
|
}
|
|
|
|
return errors.New(errMsg)
|
|
|
|
}
|