feat(pkger): add ability to export resources by name from cli ()

* feat(pkger): add ability to export dashboards by name

* chore: cleanup types

* chore: actually restrict exported dashboards to specified name

* feat: export buckets by name

* feat: export checks by name

* feat: export labels by name

* feat: export notification endpoints by name

* feat: export notification rules by name

* feat: export tasks by name

* feat: export telegraf configs by name

* feat: export variables by name

* chore: remove name from service clone org resources

these functions are not hit by the cli

* chore: update old tests and add new tests

* chore: revert notificationEndpoints to be name unique

* chore: address feedback

* chore: define template export by name in swagger

* chore: proper swagger syntax

* chore: remove crufty comments

* chore: fix typo in cli flag

* chore: update changelog
pull/19625/head
Greg 2020-09-23 11:01:09 -06:00 committed by GitHub
commit 85d75e3d4e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
11 changed files with 1776 additions and 211 deletions

View File

@ -34,6 +34,7 @@ need to update any InfluxDB CLI config profiles with the new port number.
1. [19433](https://github.com/influxdata/influxdb/pull/19433): Add option to dump raw query results in CLI 1. [19433](https://github.com/influxdata/influxdb/pull/19433): Add option to dump raw query results in CLI
1. [19506](https://github.com/influxdata/influxdb/pull/19506): Add TSM 1.x storage options as flags 1. [19506](https://github.com/influxdata/influxdb/pull/19506): Add TSM 1.x storage options as flags
1. [19508](https://github.com/influxdata/influxdb/pull/19508): Add subset of InfluxQL coordinator options as flags 1. [19508](https://github.com/influxdata/influxdb/pull/19508): Add subset of InfluxQL coordinator options as flags
1. [19457](https://github.com/influxdata/influxdb/pull/19457): Add ability to export resources by name via the CLI
### Bug Fixes ### Bug Fixes

View File

@ -77,16 +77,25 @@ type cmdTemplateBuilder struct {
} }
exportOpts struct { exportOpts struct {
resourceType string resourceType string
buckets string buckets string
checks string checks string
dashboards string dashboards string
endpoints string endpoints string
labels string labels string
rules string rules string
tasks string tasks string
telegrafs string telegrafs string
variables string variables string
bucketNames string
checkNames string
dashboardNames string
endpointNames string
labelNames string
ruleNames string
taskNames string
telegrafNames string
variableNames string
} }
updateStackOpts struct { updateStackOpts struct {
@ -351,6 +360,15 @@ func (b *cmdTemplateBuilder) cmdExport() *cobra.Command {
cmd.Flags().StringVar(&b.exportOpts.tasks, "tasks", "", "List of task ids comma separated") cmd.Flags().StringVar(&b.exportOpts.tasks, "tasks", "", "List of task ids comma separated")
cmd.Flags().StringVar(&b.exportOpts.telegrafs, "telegraf-configs", "", "List of telegraf config ids comma separated") cmd.Flags().StringVar(&b.exportOpts.telegrafs, "telegraf-configs", "", "List of telegraf config ids comma separated")
cmd.Flags().StringVar(&b.exportOpts.variables, "variables", "", "List of variable ids comma separated") cmd.Flags().StringVar(&b.exportOpts.variables, "variables", "", "List of variable ids comma separated")
cmd.Flags().StringVar(&b.exportOpts.bucketNames, "bucket-names", "", "List of bucket names comma separated")
cmd.Flags().StringVar(&b.exportOpts.checkNames, "check-names", "", "List of check names comma separated")
cmd.Flags().StringVar(&b.exportOpts.dashboardNames, "dashboard-names", "", "List of dashboard names comma separated")
cmd.Flags().StringVar(&b.exportOpts.endpointNames, "endpoint-names", "", "List of notification endpoint names comma separated")
cmd.Flags().StringVar(&b.exportOpts.labelNames, "label-names", "", "List of label names comma separated")
cmd.Flags().StringVar(&b.exportOpts.ruleNames, "rule-names", "", "List of notification rule names comma separated")
cmd.Flags().StringVar(&b.exportOpts.taskNames, "task-names", "", "List of task names comma separated")
cmd.Flags().StringVar(&b.exportOpts.telegrafNames, "telegraf-config-names", "", "List of telegraf config names comma separated")
cmd.Flags().StringVar(&b.exportOpts.variableNames, "variable-names", "", "List of variable names comma separated")
return cmd return cmd
} }
@ -364,21 +382,22 @@ func (b *cmdTemplateBuilder) exportRunEFn(cmd *cobra.Command, args []string) err
resTypes := []struct { resTypes := []struct {
kind pkger.Kind kind pkger.Kind
idStrs []string idStrs []string
names []string
}{ }{
{kind: pkger.KindBucket, idStrs: strings.Split(b.exportOpts.buckets, ",")}, {kind: pkger.KindBucket, idStrs: strings.Split(b.exportOpts.buckets, ","), names: strings.Split(b.exportOpts.bucketNames, ",")},
{kind: pkger.KindCheck, idStrs: strings.Split(b.exportOpts.checks, ",")}, {kind: pkger.KindCheck, idStrs: strings.Split(b.exportOpts.checks, ","), names: strings.Split(b.exportOpts.checkNames, ",")},
{kind: pkger.KindDashboard, idStrs: strings.Split(b.exportOpts.dashboards, ",")}, {kind: pkger.KindDashboard, idStrs: strings.Split(b.exportOpts.dashboards, ","), names: strings.Split(b.exportOpts.dashboardNames, ",")},
{kind: pkger.KindLabel, idStrs: strings.Split(b.exportOpts.labels, ",")}, {kind: pkger.KindLabel, idStrs: strings.Split(b.exportOpts.labels, ","), names: strings.Split(b.exportOpts.labelNames, ",")},
{kind: pkger.KindNotificationEndpoint, idStrs: strings.Split(b.exportOpts.endpoints, ",")}, {kind: pkger.KindNotificationEndpoint, idStrs: strings.Split(b.exportOpts.endpoints, ","), names: strings.Split(b.exportOpts.endpointNames, ",")},
{kind: pkger.KindNotificationRule, idStrs: strings.Split(b.exportOpts.rules, ",")}, {kind: pkger.KindNotificationRule, idStrs: strings.Split(b.exportOpts.rules, ","), names: strings.Split(b.exportOpts.ruleNames, ",")},
{kind: pkger.KindTask, idStrs: strings.Split(b.exportOpts.tasks, ",")}, {kind: pkger.KindTask, idStrs: strings.Split(b.exportOpts.tasks, ","), names: strings.Split(b.exportOpts.taskNames, ",")},
{kind: pkger.KindTelegraf, idStrs: strings.Split(b.exportOpts.telegrafs, ",")}, {kind: pkger.KindTelegraf, idStrs: strings.Split(b.exportOpts.telegrafs, ","), names: strings.Split(b.exportOpts.telegrafNames, ",")},
{kind: pkger.KindVariable, idStrs: strings.Split(b.exportOpts.variables, ",")}, {kind: pkger.KindVariable, idStrs: strings.Split(b.exportOpts.variables, ","), names: strings.Split(b.exportOpts.variableNames, ",")},
} }
var opts []pkger.ExportOptFn var opts []pkger.ExportOptFn
for _, rt := range resTypes { for _, rt := range resTypes {
newOpt, err := newResourcesToClone(rt.kind, rt.idStrs) newOpt, err := newResourcesToClone(rt.kind, rt.idStrs, rt.names)
if err != nil { if err != nil {
return ierror.Wrap(err, rt.kind.String()) return ierror.Wrap(err, rt.kind.String())
} }
@ -410,7 +429,7 @@ func (b *cmdTemplateBuilder) exportRunEFn(cmd *cobra.Command, args []string) err
} }
} }
resTypeOpt, err := newResourcesToClone(resKind, args) resTypeOpt, err := newResourcesToClone(resKind, args, []string{})
if err != nil { if err != nil {
return err return err
} }
@ -1133,7 +1152,7 @@ func (b *cmdTemplateBuilder) convertEncoding() pkger.Encoding {
} }
} }
func newResourcesToClone(kind pkger.Kind, idStrs []string) (pkger.ExportOptFn, error) { func newResourcesToClone(kind pkger.Kind, idStrs, names []string) (pkger.ExportOptFn, error) {
ids, err := toInfluxIDs(idStrs) ids, err := toInfluxIDs(idStrs)
if err != nil { if err != nil {
return nil, err return nil, err
@ -1146,6 +1165,15 @@ func newResourcesToClone(kind pkger.Kind, idStrs []string) (pkger.ExportOptFn, e
ID: id, ID: id,
}) })
} }
for _, name := range names {
if len(name) == 0 {
continue
}
resources = append(resources, pkger.ResourceToClone{
Kind: kind,
Name: name,
})
}
return pkger.ExportWithExistingResources(resources...), nil return pkger.ExportWithExistingResources(resources...), nil
} }
@ -1156,7 +1184,7 @@ func toInfluxIDs(args []string) ([]influxdb.ID, error) {
) )
for _, arg := range args { for _, arg := range args {
normedArg := strings.TrimSpace(strings.ToLower(arg)) normedArg := strings.TrimSpace(strings.ToLower(arg))
if normedArg == "" { if len(normedArg) == 0 {
continue continue
} }

View File

@ -4602,7 +4602,9 @@ paths:
content: content:
application/json: application/json:
schema: schema:
$ref: "#/components/schemas/TemplateExport" oneOf:
- $ref: "#/components/schemas/TemplateExportByID"
- $ref: "#/components/schemas/TemplateExportByName"
responses: responses:
"200": "200":
description: InfluxDB template created description: InfluxDB template created
@ -7475,7 +7477,7 @@ components:
- Task - Task
- Telegraf - Telegraf
- Variable - Variable
TemplateExport: TemplateExportByID:
type: object type: object
properties: properties:
stackID: stackID:
@ -7507,7 +7509,39 @@ components:
$ref: "#/components/schemas/TemplateKind" $ref: "#/components/schemas/TemplateKind"
name: name:
type: string type: string
description: "if defined with id, name is used for resource exported by id. if defined independently, resources strictly matching name are exported"
required: [id, kind] required: [id, kind]
TemplateExportByName:
type: object
properties:
stackID:
type: string
orgIDs:
type: array
items:
type: object
properties:
orgID:
type: string
resourceFilters:
type: object
properties:
byLabel:
type: array
items:
type: string
byResourceKind:
type: array
items:
$ref: "#/components/schemas/TemplateKind"
resources:
type: object
properties:
kind:
$ref: "#/components/schemas/TemplateKind"
name:
type: string
required: [name, kind]
Template: Template:
type: array type: array
items: items:

View File

@ -135,7 +135,9 @@ func filterDashboardsFn(filter influxdb.DashboardFilter) func(d *influxdb.Dashbo
} }
} }
return func(d *influxdb.Dashboard) bool { return true } return func(d *influxdb.Dashboard) bool {
return ((filter.OrganizationID == nil) || (*filter.OrganizationID == d.OrganizationID))
}
} }
// FindDashboards retrives all dashboards that match an arbitrary dashboard filter. // FindDashboards retrives all dashboards that match an arbitrary dashboard filter.

View File

@ -27,7 +27,7 @@ type NameGenerator func() string
// ResourceToClone is a resource that will be cloned. // ResourceToClone is a resource that will be cloned.
type ResourceToClone struct { type ResourceToClone struct {
Kind Kind `json:"kind"` Kind Kind `json:"kind"`
ID influxdb.ID `json:"id"` ID influxdb.ID `json:"id,omitempty"`
Name string `json:"name"` Name string `json:"name"`
// note(jsteenb2): For time being we'll allow this internally, but not externally. A lot of // note(jsteenb2): For time being we'll allow this internally, but not externally. A lot of
// issues to account for when exposing this to the outside world. Not something I'm keen // issues to account for when exposing this to the outside world. Not something I'm keen
@ -40,8 +40,8 @@ func (r ResourceToClone) OK() error {
if err := r.Kind.OK(); err != nil { if err := r.Kind.OK(); err != nil {
return err return err
} }
if r.ID == influxdb.ID(0) { if r.ID == influxdb.ID(0) && len(r.Name) == 0 {
return errors.New("must provide an ID") return errors.New("must provide an ID or name")
} }
return nil return nil
} }
@ -171,13 +171,10 @@ func (ex *resourceExporter) StackResources() []StackResource {
return resources return resources
} }
func (ex *resourceExporter) uniqByNameResID() influxdb.ID { // we only need an id when we have resources that are not unique by name via the
// we only need an id when we have resources that are not unique by name via the // metastore. resoureces that are unique by name will be provided a default stamp
// metastore. resoureces that are unique by name will be provided a default stamp // making looksup unique since each resource will be unique by name.
// making looksup unique since each resource will be unique by name. const uniqByNameResID = influxdb.ID(0)
const uniqByNameResID = 0
return uniqByNameResID
}
type cloneAssociationsFn func(context.Context, ResourceToClone) (associations []ObjectAssociation, skipResource bool, err error) type cloneAssociationsFn func(context.Context, ResourceToClone) (associations []ObjectAssociation, skipResource bool, err error)
@ -220,77 +217,255 @@ func (ex *resourceExporter) resourceCloneToKind(ctx context.Context, r ResourceT
ex.mStackResources[key] = stackResource ex.mStackResources[key] = stackResource
} }
uniqByNameResID := ex.uniqByNameResID()
switch { switch {
case r.Kind.is(KindBucket): case r.Kind.is(KindBucket):
bkt, err := ex.bucketSVC.FindBucketByID(ctx, r.ID) filter := influxdb.BucketFilter{}
if r.ID != influxdb.ID(0) {
filter.ID = &r.ID
}
if len(r.Name) > 0 {
filter.Name = &r.Name
}
bkts, n, err := ex.bucketSVC.FindBuckets(ctx, filter)
if err != nil { if err != nil {
return err return err
} }
mapResource(bkt.OrgID, uniqByNameResID, KindBucket, BucketToObject(r.Name, *bkt)) if n < 1 {
case r.Kind.is(KindCheck), return errors.New("no buckets found")
r.Kind.is(KindCheckDeadman), }
r.Kind.is(KindCheckThreshold):
ch, err := ex.checkSVC.FindCheckByID(ctx, r.ID) for _, bkt := range bkts {
mapResource(bkt.OrgID, bkt.ID, KindBucket, BucketToObject(r.Name, *bkt))
}
case r.Kind.is(KindCheck), r.Kind.is(KindCheckDeadman), r.Kind.is(KindCheckThreshold):
filter := influxdb.CheckFilter{}
if r.ID != influxdb.ID(0) {
filter.ID = &r.ID
}
if len(r.Name) > 0 {
filter.Name = &r.Name
}
chs, n, err := ex.checkSVC.FindChecks(ctx, filter)
if err != nil { if err != nil {
return err return err
} }
mapResource(ch.GetOrgID(), uniqByNameResID, KindCheck, CheckToObject(r.Name, ch)) if n < 1 {
return errors.New("no checks found")
}
for _, ch := range chs {
mapResource(ch.GetOrgID(), ch.GetID(), KindCheck, CheckToObject(r.Name, ch))
}
case r.Kind.is(KindDashboard): case r.Kind.is(KindDashboard):
dash, err := findDashboardByIDFull(ctx, ex.dashSVC, r.ID) var (
hasID bool
filter = influxdb.DashboardFilter{}
)
if r.ID != influxdb.ID(0) {
hasID = true
filter.IDs = []*influxdb.ID{&r.ID}
}
dashes, _, err := ex.dashSVC.FindDashboards(ctx, filter, influxdb.DefaultDashboardFindOptions)
if err != nil { if err != nil {
return err return err
} }
mapResource(dash.OrganizationID, dash.ID, KindDashboard, DashboardToObject(r.Name, *dash))
var mapped bool
for _, dash := range dashes {
if (!hasID && len(r.Name) > 0 && dash.Name != r.Name) || (hasID && dash.ID != r.ID) {
continue
}
for _, cell := range dash.Cells {
v, err := ex.dashSVC.GetDashboardCellView(ctx, dash.ID, cell.ID)
if err != nil {
continue
}
cell.View = v
}
mapResource(dash.OrganizationID, dash.ID, KindDashboard, DashboardToObject(r.Name, *dash))
mapped = true
}
if !mapped {
return errors.New("no dashboards found")
}
case r.Kind.is(KindLabel): case r.Kind.is(KindLabel):
l, err := ex.labelSVC.FindLabelByID(ctx, r.ID) switch {
if err != nil { case r.ID != influxdb.ID(0):
return err l, err := ex.labelSVC.FindLabelByID(ctx, r.ID)
if err != nil {
return err
}
mapResource(l.OrgID, uniqByNameResID, KindLabel, LabelToObject(r.Name, *l))
case len(r.Name) > 0:
labels, err := ex.labelSVC.FindLabels(ctx, influxdb.LabelFilter{Name: r.Name})
if err != nil {
return err
}
for _, l := range labels {
mapResource(l.OrgID, uniqByNameResID, KindLabel, LabelToObject(r.Name, *l))
}
} }
mapResource(l.OrgID, uniqByNameResID, KindLabel, LabelToObject(r.Name, *l))
case r.Kind.is(KindNotificationEndpoint), case r.Kind.is(KindNotificationEndpoint),
r.Kind.is(KindNotificationEndpointHTTP), r.Kind.is(KindNotificationEndpointHTTP),
r.Kind.is(KindNotificationEndpointPagerDuty), r.Kind.is(KindNotificationEndpointPagerDuty),
r.Kind.is(KindNotificationEndpointSlack): r.Kind.is(KindNotificationEndpointSlack):
e, err := ex.endpointSVC.FindNotificationEndpointByID(ctx, r.ID) var endpoints []influxdb.NotificationEndpoint
if err != nil {
return err switch {
case r.ID != influxdb.ID(0):
notifEndpoint, err := ex.endpointSVC.FindNotificationEndpointByID(ctx, r.ID)
if err != nil {
return err
}
endpoints = append(endpoints, notifEndpoint)
case len(r.Name) != 0:
allEndpoints, _, err := ex.endpointSVC.FindNotificationEndpoints(ctx, influxdb.NotificationEndpointFilter{})
if err != nil {
return err
}
for _, notifEndpoint := range allEndpoints {
if notifEndpoint.GetName() != r.Name || notifEndpoint == nil {
continue
}
endpoints = append(endpoints, notifEndpoint)
}
}
if len(endpoints) == 0 {
return errors.New("no notification endpoints found")
}
for _, e := range endpoints {
mapResource(e.GetOrgID(), uniqByNameResID, KindNotificationEndpoint, NotificationEndpointToObject(r.Name, e))
} }
mapResource(e.GetOrgID(), uniqByNameResID, KindNotificationEndpoint, NotificationEndpointToObject(r.Name, e))
case r.Kind.is(KindNotificationRule): case r.Kind.is(KindNotificationRule):
rule, ruleEndpoint, err := ex.getEndpointRule(ctx, r.ID) var rules []influxdb.NotificationRule
if err != nil {
return err switch {
case r.ID != influxdb.ID(0):
r, err := ex.ruleSVC.FindNotificationRuleByID(ctx, r.ID)
if err != nil {
return err
}
rules = append(rules, r)
case len(r.Name) != 0:
allRules, _, err := ex.ruleSVC.FindNotificationRules(ctx, influxdb.NotificationRuleFilter{})
if err != nil {
return err
}
for _, rule := range allRules {
if rule.GetName() != r.Name {
continue
}
rules = append(rules, rule)
}
} }
endpointKey := newExportKey(ruleEndpoint.GetOrgID(), uniqByNameResID, KindNotificationEndpoint, ruleEndpoint.GetName()) if len(rules) == 0 {
object, ok := ex.mObjects[endpointKey] return errors.New("no notification rules found")
if !ok {
mapResource(ruleEndpoint.GetOrgID(), uniqByNameResID, KindNotificationEndpoint, NotificationEndpointToObject("", ruleEndpoint))
object = ex.mObjects[endpointKey]
} }
endpointObjectName := object.Name()
mapResource(rule.GetOrgID(), rule.GetID(), KindNotificationRule, NotificationRuleToObject(r.Name, endpointObjectName, rule)) for _, rule := range rules {
ruleEndpoint, err := ex.endpointSVC.FindNotificationEndpointByID(ctx, rule.GetEndpointID())
if err != nil {
return err
}
endpointKey := newExportKey(ruleEndpoint.GetOrgID(), uniqByNameResID, KindNotificationEndpoint, ruleEndpoint.GetName())
object, ok := ex.mObjects[endpointKey]
if !ok {
mapResource(ruleEndpoint.GetOrgID(), uniqByNameResID, KindNotificationEndpoint, NotificationEndpointToObject("", ruleEndpoint))
object = ex.mObjects[endpointKey]
}
endpointObjectName := object.Name()
mapResource(rule.GetOrgID(), rule.GetID(), KindNotificationRule, NotificationRuleToObject(r.Name, endpointObjectName, rule))
}
case r.Kind.is(KindTask): case r.Kind.is(KindTask):
t, err := ex.taskSVC.FindTaskByID(ctx, r.ID) switch {
if err != nil { case r.ID != influxdb.ID(0):
return err t, err := ex.taskSVC.FindTaskByID(ctx, r.ID)
if err != nil {
return err
}
mapResource(t.OrganizationID, t.ID, KindTask, TaskToObject(r.Name, *t))
case len(r.Name) > 0:
tasks, n, err := ex.taskSVC.FindTasks(ctx, influxdb.TaskFilter{Name: &r.Name})
if err != nil {
return err
}
if n < 1 {
return errors.New("no tasks found")
}
for _, t := range tasks {
mapResource(t.OrganizationID, t.ID, KindTask, TaskToObject(r.Name, *t))
}
} }
mapResource(t.OrganizationID, t.ID, KindTask, TaskToObject(r.Name, *t))
case r.Kind.is(KindTelegraf): case r.Kind.is(KindTelegraf):
t, err := ex.teleSVC.FindTelegrafConfigByID(ctx, r.ID) switch {
if err != nil { case r.ID != influxdb.ID(0):
return err t, err := ex.teleSVC.FindTelegrafConfigByID(ctx, r.ID)
if err != nil {
return err
}
mapResource(t.OrgID, t.ID, KindTelegraf, TelegrafToObject(r.Name, *t))
case len(r.Name) > 0:
telegrafs, _, err := ex.teleSVC.FindTelegrafConfigs(ctx, influxdb.TelegrafConfigFilter{})
if err != nil {
return err
}
var mapped bool
for _, t := range telegrafs {
if t.Name != r.Name {
continue
}
mapResource(t.OrgID, t.ID, KindTelegraf, TelegrafToObject(r.Name, *t))
mapped = true
}
if !mapped {
return errors.New("no telegraf configs found")
}
} }
mapResource(t.OrgID, t.ID, KindTelegraf, TelegrafToObject(r.Name, *t))
case r.Kind.is(KindVariable): case r.Kind.is(KindVariable):
v, err := ex.varSVC.FindVariableByID(ctx, r.ID) switch {
if err != nil { case r.ID != influxdb.ID(0):
return err v, err := ex.varSVC.FindVariableByID(ctx, r.ID)
if err != nil {
return err
}
mapResource(v.OrganizationID, uniqByNameResID, KindVariable, VariableToObject(r.Name, *v))
case len(r.Name) > 0:
variables, err := ex.varSVC.FindVariables(ctx, influxdb.VariableFilter{})
if err != nil {
return err
}
var mapped bool
for _, v := range variables {
if v.Name != r.Name {
continue
}
mapResource(v.OrganizationID, uniqByNameResID, KindVariable, VariableToObject(r.Name, *v))
mapped = true
}
if !mapped {
return errors.New("no variables found")
}
} }
mapResource(v.OrganizationID, uniqByNameResID, KindVariable, VariableToObject(r.Name, *v))
default: default:
return errors.New("unsupported kind provided: " + string(r.Kind)) return errors.New("unsupported kind provided: " + string(r.Kind))
} }
@ -319,6 +494,10 @@ func (ex *resourceExporter) resourceCloneAssociationsGen(ctx context.Context, la
return nil, shouldSkip, nil return nil, shouldSkip, nil
} }
if len(r.Name) > 0 && r.ID == influxdb.ID(0) {
return nil, false, nil
}
labels, err := ex.labelSVC.FindResourceLabels(ctx, influxdb.LabelMappingFilter{ labels, err := ex.labelSVC.FindResourceLabels(ctx, influxdb.LabelMappingFilter{
ResourceID: r.ID, ResourceID: r.ID,
ResourceType: r.Kind.ResourceType(), ResourceType: r.Kind.ResourceType(),
@ -355,7 +534,7 @@ func (ex *resourceExporter) resourceCloneAssociationsGen(ctx context.Context, la
} }
labelObject.Metadata[fieldName] = metaName labelObject.Metadata[fieldName] = metaName
k := newExportKey(l.OrgID, ex.uniqByNameResID(), KindLabel, l.Name) k := newExportKey(l.OrgID, uniqByNameResID, KindLabel, l.Name)
existing, ok := ex.mObjects[k] existing, ok := ex.mObjects[k]
if ok { if ok {
associations = append(associations, ObjectAssociation{ associations = append(associations, ObjectAssociation{
@ -379,20 +558,6 @@ func (ex *resourceExporter) resourceCloneAssociationsGen(ctx context.Context, la
return cloneFn, nil return cloneFn, nil
} }
func (ex *resourceExporter) getEndpointRule(ctx context.Context, id influxdb.ID) (influxdb.NotificationRule, influxdb.NotificationEndpoint, error) {
rule, err := ex.ruleSVC.FindNotificationRuleByID(ctx, id)
if err != nil {
return nil, nil, err
}
ruleEndpoint, err := ex.endpointSVC.FindNotificationEndpointByID(ctx, rule.GetEndpointID())
if err != nil {
return nil, nil, err
}
return rule, ruleEndpoint, nil
}
func (ex *resourceExporter) uniqName() string { func (ex *resourceExporter) uniqName() string {
return uniqMetaName(ex.nameGen, idGenerator, ex.mPkgNames) return uniqMetaName(ex.nameGen, idGenerator, ex.mPkgNames)
} }
@ -409,21 +574,6 @@ func uniqMetaName(nameGen NameGenerator, idGen influxdb.IDGenerator, existingNam
return name return name
} }
func findDashboardByIDFull(ctx context.Context, dashSVC influxdb.DashboardService, id influxdb.ID) (*influxdb.Dashboard, error) {
dash, err := dashSVC.FindDashboardByID(ctx, id)
if err != nil {
return nil, err
}
for _, cell := range dash.Cells {
v, err := dashSVC.GetDashboardCellView(ctx, id, cell.ID)
if err != nil {
return nil, err
}
cell.View = v
}
return dash, nil
}
func uniqResourcesToClone(resources []ResourceToClone) []ResourceToClone { func uniqResourcesToClone(resources []ResourceToClone) []ResourceToClone {
type key struct { type key struct {
kind Kind kind Kind

View File

@ -860,7 +860,8 @@ func (p *Template) graphLabels() *parseErr {
func (p *Template) graphChecks() *parseErr { func (p *Template) graphChecks() *parseErr {
p.mChecks = make(map[string]*check) p.mChecks = make(map[string]*check)
tracker := p.trackNames(true) // todo: what is the business goal wrt having unique names? (currently duplicates are allowed)
tracker := p.trackNames(false)
checkKinds := []struct { checkKinds := []struct {
kind Kind kind Kind

View File

@ -2123,7 +2123,6 @@ func (v *variable) summarize() SummaryVariable {
envRefs = append(envRefs, convertRefToRefSummary(field, sel)) envRefs = append(envRefs, convertRefToRefSummary(field, sel))
} }
} }
return SummaryVariable{ return SummaryVariable{
SummaryIdentifier: SummaryIdentifier{ SummaryIdentifier: SummaryIdentifier{
Kind: KindVariable, Kind: KindVariable,

View File

@ -904,40 +904,42 @@ spec:
`, `,
}, },
}, },
{ /* checks are not name unique
kind: KindCheckDeadman, {
resErr: testTemplateResourceError{ kind: KindCheckDeadman,
name: "duplicate meta name and spec name", resErr: testTemplateResourceError{
validationErrs: 1, name: "duplicate meta name and spec name",
valFields: []string{fieldSpec, fieldAssociations}, validationErrs: 1,
templateStr: ` valFields: []string{fieldSpec, fieldAssociations},
apiVersion: influxdata.com/v2alpha1 templateStr: `
kind: CheckDeadman apiVersion: influxdata.com/v2alpha1
metadata: kind: CheckDeadman
name: check-1 metadata:
spec: name: check-1
every: 5m spec:
level: cRiT every: 5m
query: > level: cRiT
from(bucket: "rucket_1") |> yield(name: "mean") query: >
statusMessageTemplate: "Check: ${ r._check_name } is: ${ r._level }" from(bucket: "rucket_1") |> yield(name: "mean")
timeSince: 90s statusMessageTemplate: "Check: ${ r._check_name } is: ${ r._level }"
--- timeSince: 90s
apiVersion: influxdata.com/v2alpha1 ---
kind: CheckDeadman apiVersion: influxdata.com/v2alpha1
metadata: kind: CheckDeadman
name: valid-name metadata:
spec: name: valid-name
name: check-1 spec:
every: 5m name: check-1
level: cRiT every: 5m
query: > level: cRiT
from(bucket: "rucket_1") |> yield(name: "mean") query: >
statusMessageTemplate: "Check: ${ r._check_name } is: ${ r._level }" from(bucket: "rucket_1") |> yield(name: "mean")
timeSince: 90s statusMessageTemplate: "Check: ${ r._check_name } is: ${ r._level }"
`, timeSince: 90s
}, `,
}, },
},
*/
} }
for _, tt := range tests { for _, tt := range tests {

View File

@ -72,6 +72,7 @@ type (
StackResource struct { StackResource struct {
APIVersion string APIVersion string
ID influxdb.ID ID influxdb.ID
Name string
Kind Kind Kind Kind
MetaName string MetaName string
Associations []StackResourceAssociation Associations []StackResourceAssociation
@ -631,6 +632,7 @@ func (s *Service) Export(ctx context.Context, setters ...ExportOptFn) (*Template
Kind: r.Kind, Kind: r.Kind,
ID: r.ID, ID: r.ID,
MetaName: r.MetaName, MetaName: r.MetaName,
Name: r.Name,
})) }))
} }
@ -694,6 +696,7 @@ func (s *Service) cloneOrgBuckets(ctx context.Context, orgID influxdb.ID) ([]Res
resources = append(resources, ResourceToClone{ resources = append(resources, ResourceToClone{
Kind: KindBucket, Kind: KindBucket,
ID: b.ID, ID: b.ID,
Name: b.Name,
}) })
} }
return resources, nil return resources, nil
@ -712,6 +715,7 @@ func (s *Service) cloneOrgChecks(ctx context.Context, orgID influxdb.ID) ([]Reso
resources = append(resources, ResourceToClone{ resources = append(resources, ResourceToClone{
Kind: KindCheck, Kind: KindCheck,
ID: c.GetID(), ID: c.GetID(),
Name: c.GetName(),
}) })
} }
return resources, nil return resources, nil
@ -736,9 +740,11 @@ func (s *Service) cloneOrgDashboards(ctx context.Context, orgID influxdb.ID) ([]
} }
func (s *Service) cloneOrgLabels(ctx context.Context, orgID influxdb.ID) ([]ResourceToClone, error) { func (s *Service) cloneOrgLabels(ctx context.Context, orgID influxdb.ID) ([]ResourceToClone, error) {
labels, err := s.labelSVC.FindLabels(ctx, influxdb.LabelFilter{ filter := influxdb.LabelFilter{
OrgID: &orgID, OrgID: &orgID,
}, influxdb.FindOptions{Limit: 10000}) }
labels, err := s.labelSVC.FindLabels(ctx, filter, influxdb.FindOptions{Limit: 100})
if err != nil { if err != nil {
return nil, ierrors.Wrap(err, "finding labels") return nil, ierrors.Wrap(err, "finding labels")
} }
@ -748,6 +754,7 @@ func (s *Service) cloneOrgLabels(ctx context.Context, orgID influxdb.ID) ([]Reso
resources = append(resources, ResourceToClone{ resources = append(resources, ResourceToClone{
Kind: KindLabel, Kind: KindLabel,
ID: l.ID, ID: l.ID,
Name: l.Name,
}) })
} }
return resources, nil return resources, nil
@ -766,6 +773,7 @@ func (s *Service) cloneOrgNotificationEndpoints(ctx context.Context, orgID influ
resources = append(resources, ResourceToClone{ resources = append(resources, ResourceToClone{
Kind: KindNotificationEndpoint, Kind: KindNotificationEndpoint,
ID: e.GetID(), ID: e.GetID(),
Name: e.GetName(),
}) })
} }
return resources, nil return resources, nil
@ -784,6 +792,7 @@ func (s *Service) cloneOrgNotificationRules(ctx context.Context, orgID influxdb.
resources = append(resources, ResourceToClone{ resources = append(resources, ResourceToClone{
Kind: KindNotificationRule, Kind: KindNotificationRule,
ID: r.GetID(), ID: r.GetID(),
Name: r.GetName(),
}) })
} }
return resources, nil return resources, nil
@ -869,12 +878,15 @@ func (s *Service) cloneOrgVariables(ctx context.Context, orgID influxdb.ID) ([]R
return resources, nil return resources, nil
} }
type cloneResFn func(context.Context, influxdb.ID) ([]ResourceToClone, error) type (
cloneResFn func(context.Context, influxdb.ID) ([]ResourceToClone, error)
resClone struct {
resType influxdb.ResourceType
cloneFn cloneResFn
}
)
func (s *Service) filterOrgResourceKinds(resourceKindFilters []Kind) []struct { func (s *Service) filterOrgResourceKinds(resourceKindFilters []Kind) []resClone {
resType influxdb.ResourceType
cloneFn cloneResFn
} {
mKinds := map[Kind]cloneResFn{ mKinds := map[Kind]cloneResFn{
KindBucket: s.cloneOrgBuckets, KindBucket: s.cloneOrgBuckets,
KindCheck: s.cloneOrgChecks, KindCheck: s.cloneOrgChecks,
@ -887,23 +899,14 @@ func (s *Service) filterOrgResourceKinds(resourceKindFilters []Kind) []struct {
KindVariable: s.cloneOrgVariables, KindVariable: s.cloneOrgVariables,
} }
newResGen := func(resType influxdb.ResourceType, cloneFn cloneResFn) struct { newResGen := func(resType influxdb.ResourceType, cloneFn cloneResFn) resClone {
resType influxdb.ResourceType return resClone{
cloneFn cloneResFn
} {
return struct {
resType influxdb.ResourceType
cloneFn cloneResFn
}{
resType: resType, resType: resType,
cloneFn: cloneFn, cloneFn: cloneFn,
} }
} }
var resourceTypeGens []struct { var resourceTypeGens []resClone
resType influxdb.ResourceType
cloneFn cloneResFn
}
if len(resourceKindFilters) == 0 { if len(resourceKindFilters) == 0 {
for k, cloneFn := range mKinds { for k, cloneFn := range mKinds {
resourceTypeGens = append(resourceTypeGens, newResGen(k.ResourceType(), cloneFn)) resourceTypeGens = append(resourceTypeGens, newResGen(k.ResourceType(), cloneFn))

View File

@ -148,7 +148,7 @@ func (s *loggingMW) Export(ctx context.Context, opts ...ExportOptFn) (template *
s.logger.Error("failed to export template", zap.Error(err), dur) s.logger.Error("failed to export template", zap.Error(err), dur)
return return
} }
s.logger.Info("failed to export template", append(s.summaryLogFields(template.Summary()), dur)...) s.logger.Info("exported template", append(s.summaryLogFields(template.Summary()), dur)...)
}(time.Now()) }(time.Now())
return s.next.Export(ctx, opts...) return s.next.Export(ctx, opts...)
} }

File diff suppressed because it is too large Load Diff