parent
5776350a53
commit
b9e6744807
|
@ -37,7 +37,7 @@ func exec(dbPath, out string) error {
|
|||
return err
|
||||
}
|
||||
|
||||
pkg := &pkger.Pkg{
|
||||
pkg := &pkger.Template{
|
||||
Objects: make([]pkger.Object, 0),
|
||||
}
|
||||
|
||||
|
|
|
@ -231,7 +231,7 @@ func (b *cmdTemplateBuilder) applyRunEFn(cmd *cobra.Command, args []string) erro
|
|||
}
|
||||
|
||||
opts := []pkger.ApplyOptFn{
|
||||
pkger.ApplyWithPkg(template),
|
||||
pkger.ApplyWithTemplate(template),
|
||||
pkger.ApplyWithEnvRefs(providedEnvRefs),
|
||||
pkger.ApplyWithStackID(stackID),
|
||||
}
|
||||
|
@ -1059,7 +1059,7 @@ func (b *cmdTemplateBuilder) exportTemplate(w io.Writer, templateSVC pkger.SVC,
|
|||
return b.writeTemplate(w, outPath, template)
|
||||
}
|
||||
|
||||
func (b *cmdTemplateBuilder) writeTemplate(w io.Writer, outPath string, template *pkger.Pkg) error {
|
||||
func (b *cmdTemplateBuilder) writeTemplate(w io.Writer, outPath string, template *pkger.Template) error {
|
||||
buf, err := createTemplateBuf(template, outPath)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -1073,7 +1073,7 @@ func (b *cmdTemplateBuilder) writeTemplate(w io.Writer, outPath string, template
|
|||
return ioutil.WriteFile(outPath, buf.Bytes(), os.ModePerm)
|
||||
}
|
||||
|
||||
func (b *cmdTemplateBuilder) readRawTemplatesFromFiles(filePaths []string, recurse bool) ([]*pkger.Pkg, error) {
|
||||
func (b *cmdTemplateBuilder) readRawTemplatesFromFiles(filePaths []string, recurse bool) ([]*pkger.Template, error) {
|
||||
mFiles := make(map[string]struct{})
|
||||
for _, f := range filePaths {
|
||||
files, err := readFilesFromPath(f, recurse)
|
||||
|
@ -1085,7 +1085,7 @@ func (b *cmdTemplateBuilder) readRawTemplatesFromFiles(filePaths []string, recur
|
|||
}
|
||||
}
|
||||
|
||||
var rawTemplates []*pkger.Pkg
|
||||
var rawTemplates []*pkger.Template
|
||||
for f := range mFiles {
|
||||
template, err := pkger.Parse(b.convertFileEncoding(f), pkger.FromFile(f), pkger.ValidSkipParseError())
|
||||
if err != nil {
|
||||
|
@ -1097,13 +1097,13 @@ func (b *cmdTemplateBuilder) readRawTemplatesFromFiles(filePaths []string, recur
|
|||
return rawTemplates, nil
|
||||
}
|
||||
|
||||
func (b *cmdTemplateBuilder) readRawTemplatesFromURLs(urls []string) ([]*pkger.Pkg, error) {
|
||||
func (b *cmdTemplateBuilder) readRawTemplatesFromURLs(urls []string) ([]*pkger.Template, error) {
|
||||
mURLs := make(map[string]struct{})
|
||||
for _, f := range urls {
|
||||
mURLs[f] = struct{}{}
|
||||
}
|
||||
|
||||
var rawTemplates []*pkger.Pkg
|
||||
var rawTemplates []*pkger.Template
|
||||
for u := range mURLs {
|
||||
template, err := pkger.Parse(b.convertURLEncoding(u), pkger.FromHTTPRequest(u), pkger.ValidSkipParseError())
|
||||
if err != nil {
|
||||
|
@ -1114,7 +1114,7 @@ func (b *cmdTemplateBuilder) readRawTemplatesFromURLs(urls []string) ([]*pkger.P
|
|||
return rawTemplates, nil
|
||||
}
|
||||
|
||||
func (b *cmdTemplateBuilder) readTemplate() (*pkger.Pkg, bool, error) {
|
||||
func (b *cmdTemplateBuilder) readTemplate() (*pkger.Template, bool, error) {
|
||||
var remotes, files []string
|
||||
for _, rawURL := range append(b.files, b.urls...) {
|
||||
u, err := url.Parse(rawURL)
|
||||
|
@ -1280,7 +1280,7 @@ func toInfluxIDs(args []string) ([]influxdb.ID, error) {
|
|||
return ids, nil
|
||||
}
|
||||
|
||||
func createTemplateBuf(template *pkger.Pkg, outPath string) (*bytes.Buffer, error) {
|
||||
func createTemplateBuf(template *pkger.Template, outPath string) (*bytes.Buffer, error) {
|
||||
var encoding pkger.Encoding
|
||||
switch ext := filepath.Ext(outPath); ext {
|
||||
case ".json":
|
||||
|
@ -1601,7 +1601,7 @@ func (b *cmdTemplateBuilder) printTemplateDiff(diff pkger.Diff) error {
|
|||
Title("Label Associations").
|
||||
SetHeaders(
|
||||
"Resource Type",
|
||||
"Resource Package Name", "Resource Name", "Resource ID",
|
||||
"Resource Meta Name", "Resource Name", "Resource ID",
|
||||
"Label Package Name", "Label Name", "Label ID",
|
||||
)
|
||||
|
||||
|
@ -1718,7 +1718,7 @@ func (b *cmdTemplateBuilder) printTemplateSummary(stackID influxdb.ID, sum pkger
|
|||
v.Description,
|
||||
v.Every,
|
||||
v.Offset,
|
||||
v.EndpointPkgName,
|
||||
v.EndpointMetaName,
|
||||
v.EndpointID.String(),
|
||||
v.EndpointType,
|
||||
}
|
||||
|
|
|
@ -35,7 +35,7 @@ func Test_Template_Commands(t *testing.T) {
|
|||
}
|
||||
|
||||
t.Run("export all", func(t *testing.T) {
|
||||
defaultAssertFn := func(t *testing.T, pkg *pkger.Pkg) {
|
||||
defaultAssertFn := func(t *testing.T, pkg *pkger.Template) {
|
||||
t.Helper()
|
||||
sum := pkg.Summary()
|
||||
|
||||
|
@ -47,7 +47,7 @@ func Test_Template_Commands(t *testing.T) {
|
|||
|
||||
tests := []struct {
|
||||
templateFileArgs
|
||||
assertFn func(t *testing.T, pkg *pkger.Pkg)
|
||||
assertFn func(t *testing.T, pkg *pkger.Template)
|
||||
}{
|
||||
{
|
||||
templateFileArgs: templateFileArgs{
|
||||
|
@ -91,7 +91,7 @@ func Test_Template_Commands(t *testing.T) {
|
|||
"--filter=labelName=foo",
|
||||
},
|
||||
},
|
||||
assertFn: func(t *testing.T, pkg *pkger.Pkg) {
|
||||
assertFn: func(t *testing.T, pkg *pkger.Template) {
|
||||
defaultAssertFn(t, pkg)
|
||||
|
||||
sum := pkg.Summary()
|
||||
|
@ -111,7 +111,7 @@ func Test_Template_Commands(t *testing.T) {
|
|||
"--filter=labelName=bar",
|
||||
},
|
||||
},
|
||||
assertFn: func(t *testing.T, pkg *pkger.Pkg) {
|
||||
assertFn: func(t *testing.T, pkg *pkger.Template) {
|
||||
defaultAssertFn(t, pkg)
|
||||
|
||||
sum := pkg.Summary()
|
||||
|
@ -131,7 +131,7 @@ func Test_Template_Commands(t *testing.T) {
|
|||
"--filter=resourceKind=Dashboard",
|
||||
},
|
||||
},
|
||||
assertFn: func(t *testing.T, pkg *pkger.Pkg) {
|
||||
assertFn: func(t *testing.T, pkg *pkger.Template) {
|
||||
sum := pkg.Summary()
|
||||
|
||||
require.Len(t, sum.Dashboards, 1)
|
||||
|
@ -149,7 +149,7 @@ func Test_Template_Commands(t *testing.T) {
|
|||
"--filter=resourceKind=Bucket",
|
||||
},
|
||||
},
|
||||
assertFn: func(t *testing.T, pkg *pkger.Pkg) {
|
||||
assertFn: func(t *testing.T, pkg *pkger.Template) {
|
||||
sum := pkg.Summary()
|
||||
|
||||
require.Len(t, sum.Buckets, 1)
|
||||
|
@ -170,7 +170,7 @@ func Test_Template_Commands(t *testing.T) {
|
|||
"--filter=resourceKind=Bucket",
|
||||
},
|
||||
},
|
||||
assertFn: func(t *testing.T, pkg *pkger.Pkg) {
|
||||
assertFn: func(t *testing.T, pkg *pkger.Template) {
|
||||
sum := pkg.Summary()
|
||||
|
||||
require.Len(t, sum.Labels, 1)
|
||||
|
@ -185,7 +185,7 @@ func Test_Template_Commands(t *testing.T) {
|
|||
|
||||
cmdFn := func(f *globalFlags, opt genericCLIOpts) *cobra.Command {
|
||||
pkgSVC := &fakePkgSVC{
|
||||
exportFn: func(_ context.Context, opts ...pkger.ExportOptFn) (*pkger.Pkg, error) {
|
||||
exportFn: func(_ context.Context, opts ...pkger.ExportOptFn) (*pkger.Template, error) {
|
||||
opt := pkger.ExportOpt{}
|
||||
for _, o := range opts {
|
||||
if err := o(&opt); err != nil {
|
||||
|
@ -198,7 +198,7 @@ func Test_Template_Commands(t *testing.T) {
|
|||
return nil, errors.New("did not provide expected orgID")
|
||||
}
|
||||
|
||||
var pkg pkger.Pkg
|
||||
var pkg pkger.Template
|
||||
for _, labelName := range orgIDOpt.LabelNames {
|
||||
pkg.Objects = append(pkg.Objects, pkger.Object{
|
||||
APIVersion: pkger.APIVersion,
|
||||
|
@ -324,7 +324,7 @@ func Test_Template_Commands(t *testing.T) {
|
|||
|
||||
cmdFn := func(f *globalFlags, opt genericCLIOpts) *cobra.Command {
|
||||
pkgSVC := &fakePkgSVC{
|
||||
exportFn: func(_ context.Context, opts ...pkger.ExportOptFn) (*pkger.Pkg, error) {
|
||||
exportFn: func(_ context.Context, opts ...pkger.ExportOptFn) (*pkger.Template, error) {
|
||||
var opt pkger.ExportOpt
|
||||
for _, o := range opts {
|
||||
if err := o(&opt); err != nil {
|
||||
|
@ -332,7 +332,7 @@ func Test_Template_Commands(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
var pkg pkger.Pkg
|
||||
var pkg pkger.Template
|
||||
for _, rc := range opt.Resources {
|
||||
if rc.Kind == pkger.KindNotificationEndpoint {
|
||||
rc.Kind = pkger.KindNotificationEndpointHTTP
|
||||
|
@ -365,7 +365,7 @@ func Test_Template_Commands(t *testing.T) {
|
|||
"--variables="+idsStr(tt.varIDs...),
|
||||
)
|
||||
|
||||
testPkgWrites(t, cmdFn, tt.templateFileArgs, func(t *testing.T, pkg *pkger.Pkg) {
|
||||
testPkgWrites(t, cmdFn, tt.templateFileArgs, func(t *testing.T, pkg *pkger.Template) {
|
||||
sum := pkg.Summary()
|
||||
|
||||
kindToName := func(k pkger.Kind, id influxdb.ID) string {
|
||||
|
@ -419,7 +419,7 @@ func Test_Template_Commands(t *testing.T) {
|
|||
t.Run("by stack", func(t *testing.T) {
|
||||
cmdFn := func(f *globalFlags, opt genericCLIOpts) *cobra.Command {
|
||||
pkgSVC := &fakePkgSVC{
|
||||
exportFn: func(_ context.Context, opts ...pkger.ExportOptFn) (*pkger.Pkg, error) {
|
||||
exportFn: func(_ context.Context, opts ...pkger.ExportOptFn) (*pkger.Template, error) {
|
||||
var opt pkger.ExportOpt
|
||||
for _, o := range opts {
|
||||
if err := o(&opt); err != nil {
|
||||
|
@ -430,7 +430,7 @@ func Test_Template_Commands(t *testing.T) {
|
|||
if opt.StackID != 1 {
|
||||
return nil, errors.New("wrong stack ID, got: " + opt.StackID.String())
|
||||
}
|
||||
return &pkger.Pkg{
|
||||
return &pkger.Template{
|
||||
Objects: []pkger.Object{
|
||||
pkger.LabelToObject("", influxdb.Label{
|
||||
Name: "label-1",
|
||||
|
@ -451,7 +451,7 @@ func Test_Template_Commands(t *testing.T) {
|
|||
args: []string{"export", "--stack-id=" + influxdb.ID(1).String()},
|
||||
}
|
||||
|
||||
testPkgWrites(t, cmdFn, tmplFileArgs, func(t *testing.T, pkg *pkger.Pkg) {
|
||||
testPkgWrites(t, cmdFn, tmplFileArgs, func(t *testing.T, pkg *pkger.Template) {
|
||||
sum := pkg.Summary()
|
||||
|
||||
require.Len(t, sum.Labels, 1)
|
||||
|
@ -694,7 +694,7 @@ type templateFileArgs struct {
|
|||
envVars map[string]string
|
||||
}
|
||||
|
||||
func testPkgWrites(t *testing.T, newCmdFn func(*globalFlags, genericCLIOpts) *cobra.Command, args templateFileArgs, assertFn func(t *testing.T, pkg *pkger.Pkg)) {
|
||||
func testPkgWrites(t *testing.T, newCmdFn func(*globalFlags, genericCLIOpts) *cobra.Command, args templateFileArgs, assertFn func(t *testing.T, pkg *pkger.Template)) {
|
||||
t.Helper()
|
||||
|
||||
defer addEnvVars(t, args.envVars)()
|
||||
|
@ -713,7 +713,7 @@ func testPkgWrites(t *testing.T, newCmdFn func(*globalFlags, genericCLIOpts) *co
|
|||
t.Run(path.Join(args.name, "buffer"), testPkgWritesToBuffer(wrappedCmdFn, args, assertFn))
|
||||
}
|
||||
|
||||
func testPkgWritesFile(newCmdFn func(w io.Writer) *cobra.Command, args templateFileArgs, assertFn func(t *testing.T, pkg *pkger.Pkg)) func(t *testing.T) {
|
||||
func testPkgWritesFile(newCmdFn func(w io.Writer) *cobra.Command, args templateFileArgs, assertFn func(t *testing.T, pkg *pkger.Template)) func(t *testing.T) {
|
||||
return func(t *testing.T) {
|
||||
t.Helper()
|
||||
|
||||
|
@ -734,7 +734,7 @@ func testPkgWritesFile(newCmdFn func(w io.Writer) *cobra.Command, args templateF
|
|||
}
|
||||
}
|
||||
|
||||
func testPkgWritesToBuffer(newCmdFn func(w io.Writer) *cobra.Command, args templateFileArgs, assertFn func(t *testing.T, pkg *pkger.Pkg)) func(t *testing.T) {
|
||||
func testPkgWritesToBuffer(newCmdFn func(w io.Writer) *cobra.Command, args templateFileArgs, assertFn func(t *testing.T, pkg *pkger.Template)) func(t *testing.T) {
|
||||
return func(t *testing.T) {
|
||||
t.Helper()
|
||||
|
||||
|
@ -753,7 +753,7 @@ func testPkgWritesToBuffer(newCmdFn func(w io.Writer) *cobra.Command, args templ
|
|||
|
||||
type fakePkgSVC struct {
|
||||
initStackFn func(ctx context.Context, userID influxdb.ID, stack pkger.Stack) (pkger.Stack, error)
|
||||
exportFn func(ctx context.Context, setters ...pkger.ExportOptFn) (*pkger.Pkg, error)
|
||||
exportFn func(ctx context.Context, setters ...pkger.ExportOptFn) (*pkger.Template, error)
|
||||
dryRunFn func(ctx context.Context, orgID, userID influxdb.ID, opts ...pkger.ApplyOptFn) (pkger.ImpactSummary, error)
|
||||
applyFn func(ctx context.Context, orgID, userID influxdb.ID, opts ...pkger.ApplyOptFn) (pkger.ImpactSummary, error)
|
||||
}
|
||||
|
@ -775,7 +775,7 @@ func (f *fakePkgSVC) DeleteStack(ctx context.Context, identifiers struct{ OrgID,
|
|||
panic("not implemented")
|
||||
}
|
||||
|
||||
func (f *fakePkgSVC) ExportStack(ctx context.Context, orgID, stackID influxdb.ID) (*pkger.Pkg, error) {
|
||||
func (f *fakePkgSVC) ExportStack(ctx context.Context, orgID, stackID influxdb.ID) (*pkger.Template, error) {
|
||||
panic("not implemented")
|
||||
}
|
||||
|
||||
|
@ -787,7 +787,7 @@ func (f *fakePkgSVC) UpdateStack(ctx context.Context, upd pkger.StackUpdate) (pk
|
|||
panic("not implemented")
|
||||
}
|
||||
|
||||
func (f *fakePkgSVC) Export(ctx context.Context, setters ...pkger.ExportOptFn) (*pkger.Pkg, error) {
|
||||
func (f *fakePkgSVC) Export(ctx context.Context, setters ...pkger.ExportOptFn) (*pkger.Template, error) {
|
||||
if f.exportFn != nil {
|
||||
return f.exportFn(ctx, setters...)
|
||||
}
|
||||
|
|
|
@ -75,8 +75,8 @@ func TestLauncher_Pkger(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
newTemplate := func(objects ...pkger.Object) *pkger.Pkg {
|
||||
return &pkger.Pkg{Objects: objects}
|
||||
newTemplate := func(objects ...pkger.Object) *pkger.Template {
|
||||
return &pkger.Template{Objects: objects}
|
||||
}
|
||||
|
||||
newBucketObject := func(pkgName, name, desc string) pkger.Object {
|
||||
|
@ -280,7 +280,7 @@ func TestLauncher_Pkger(t *testing.T) {
|
|||
)
|
||||
|
||||
impact, err := svc.Apply(ctx, l.Org.ID, l.User.ID,
|
||||
pkger.ApplyWithPkg(allResourcesPkg),
|
||||
pkger.ApplyWithTemplate(allResourcesPkg),
|
||||
pkger.ApplyWithStackID(newStack.ID),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
@ -519,11 +519,11 @@ func TestLauncher_Pkger(t *testing.T) {
|
|||
})
|
||||
|
||||
t.Run("apply a pkg with a stack and associations", func(t *testing.T) {
|
||||
testLabelMappingFn := func(t *testing.T, stackID influxdb.ID, pkg *pkger.Pkg, assertAssociatedLabelsFn func(pkger.Summary, []*influxdb.Label, influxdb.ResourceType)) pkger.Summary {
|
||||
testLabelMappingFn := func(t *testing.T, stackID influxdb.ID, pkg *pkger.Template, assertAssociatedLabelsFn func(pkger.Summary, []*influxdb.Label, influxdb.ResourceType)) pkger.Summary {
|
||||
t.Helper()
|
||||
|
||||
impact, err := svc.Apply(ctx, l.Org.ID, l.User.ID,
|
||||
pkger.ApplyWithPkg(pkg),
|
||||
pkger.ApplyWithTemplate(pkg),
|
||||
pkger.ApplyWithStackID(stackID),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
@ -667,7 +667,7 @@ func TestLauncher_Pkger(t *testing.T) {
|
|||
|
||||
pkg := newTemplate(append(newObjectsFn(), labelObj)...)
|
||||
_, err := svc.Apply(ctx, l.Org.ID, l.User.ID,
|
||||
pkger.ApplyWithPkg(pkg),
|
||||
pkger.ApplyWithTemplate(pkg),
|
||||
pkger.ApplyWithStackID(stack.ID),
|
||||
)
|
||||
require.Error(t, err)
|
||||
|
@ -746,7 +746,7 @@ func TestLauncher_Pkger(t *testing.T) {
|
|||
)
|
||||
|
||||
impact, err := svc.Apply(ctx, l.Org.ID, l.User.ID,
|
||||
pkger.ApplyWithPkg(initialPkg),
|
||||
pkger.ApplyWithTemplate(initialPkg),
|
||||
pkger.ApplyWithStackID(stack.ID),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
@ -779,7 +779,7 @@ func TestLauncher_Pkger(t *testing.T) {
|
|||
require.Len(t, summary.NotificationRules, 1)
|
||||
assert.NotZero(t, summary.NotificationRules[0].ID)
|
||||
assert.Equal(t, "rule_0", summary.NotificationRules[0].Name)
|
||||
assert.Equal(t, initialEndpointPkgName, summary.NotificationRules[0].EndpointPkgName)
|
||||
assert.Equal(t, initialEndpointPkgName, summary.NotificationRules[0].EndpointMetaName)
|
||||
assert.Equal(t, "init desc", summary.NotificationRules[0].Description)
|
||||
|
||||
require.Len(t, summary.Tasks, 1)
|
||||
|
@ -868,7 +868,7 @@ func TestLauncher_Pkger(t *testing.T) {
|
|||
newVariableObject(initialSum.Variables[0].MetaName, updateVariableName, ""),
|
||||
)
|
||||
impact, err := svc.Apply(ctx, l.Org.ID, l.User.ID,
|
||||
pkger.ApplyWithPkg(updatedPkg),
|
||||
pkger.ApplyWithTemplate(updatedPkg),
|
||||
pkger.ApplyWithStackID(stack.ID),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
@ -985,7 +985,7 @@ func TestLauncher_Pkger(t *testing.T) {
|
|||
newVariableObject("z-var-rolls-back", "", ""),
|
||||
)
|
||||
_, err := svc.Apply(ctx, l.Org.ID, l.User.ID,
|
||||
pkger.ApplyWithPkg(pkgWithDelete),
|
||||
pkger.ApplyWithTemplate(pkgWithDelete),
|
||||
pkger.ApplyWithStackID(stack.ID),
|
||||
)
|
||||
require.Error(t, err)
|
||||
|
@ -1069,7 +1069,7 @@ func TestLauncher_Pkger(t *testing.T) {
|
|||
newVariableObject("non-existent-var", "", ""),
|
||||
)
|
||||
impact, err := svc.Apply(ctx, l.Org.ID, l.User.ID,
|
||||
pkger.ApplyWithPkg(allNewResourcesPkg),
|
||||
pkger.ApplyWithTemplate(allNewResourcesPkg),
|
||||
pkger.ApplyWithStackID(stack.ID),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
@ -1178,7 +1178,7 @@ func TestLauncher_Pkger(t *testing.T) {
|
|||
})
|
||||
|
||||
t.Run("apply should handle cases where users have changed platform data", func(t *testing.T) {
|
||||
initializeStackPkg := func(t *testing.T, pkg *pkger.Pkg) (influxdb.ID, func(), pkger.Summary) {
|
||||
initializeStackPkg := func(t *testing.T, pkg *pkger.Template) (influxdb.ID, func(), pkger.Summary) {
|
||||
t.Helper()
|
||||
|
||||
stack, cleanup := newStackFn(t, pkger.Stack{})
|
||||
|
@ -1189,7 +1189,7 @@ func TestLauncher_Pkger(t *testing.T) {
|
|||
}()
|
||||
|
||||
impact, err := svc.Apply(ctx, l.Org.ID, l.User.ID,
|
||||
pkger.ApplyWithPkg(pkg),
|
||||
pkger.ApplyWithTemplate(pkg),
|
||||
pkger.ApplyWithStackID(stack.ID),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
@ -1226,7 +1226,7 @@ func TestLauncher_Pkger(t *testing.T) {
|
|||
t.Run("should create new resource when attempting to update", func(t *testing.T) {
|
||||
testUserDeletedVariable(t, func(t *testing.T, stackID influxdb.ID, initialVarObj pkger.Object, initialSum pkger.Summary) {
|
||||
pkg := newTemplate(initialVarObj)
|
||||
impact, err := svc.Apply(ctx, l.Org.ID, l.User.ID, pkger.ApplyWithPkg(pkg), pkger.ApplyWithStackID(stackID))
|
||||
impact, err := svc.Apply(ctx, l.Org.ID, l.User.ID, pkger.ApplyWithTemplate(pkg), pkger.ApplyWithStackID(stackID))
|
||||
require.NoError(t, err)
|
||||
|
||||
updateSum := impact.Summary
|
||||
|
@ -1264,7 +1264,7 @@ func TestLauncher_Pkger(t *testing.T) {
|
|||
t.Run("should create new resource when attempting to update", func(t *testing.T) {
|
||||
testUserDeletedBucket(t, func(t *testing.T, stackID influxdb.ID, initialObj pkger.Object, initialSum pkger.Summary) {
|
||||
pkg := newTemplate(initialObj)
|
||||
impact, err := svc.Apply(ctx, l.Org.ID, l.User.ID, pkger.ApplyWithPkg(pkg), pkger.ApplyWithStackID(stackID))
|
||||
impact, err := svc.Apply(ctx, l.Org.ID, l.User.ID, pkger.ApplyWithTemplate(pkg), pkger.ApplyWithStackID(stackID))
|
||||
require.NoError(t, err)
|
||||
|
||||
updateSum := impact.Summary
|
||||
|
@ -1302,7 +1302,7 @@ func TestLauncher_Pkger(t *testing.T) {
|
|||
t.Run("should create new resource when attempting to update", func(t *testing.T) {
|
||||
testUserDeletedCheck(t, func(t *testing.T, stackID influxdb.ID, initialObj pkger.Object, initialSum pkger.Summary) {
|
||||
pkg := newTemplate(initialObj)
|
||||
impact, err := svc.Apply(ctx, l.Org.ID, l.User.ID, pkger.ApplyWithPkg(pkg), pkger.ApplyWithStackID(stackID))
|
||||
impact, err := svc.Apply(ctx, l.Org.ID, l.User.ID, pkger.ApplyWithTemplate(pkg), pkger.ApplyWithStackID(stackID))
|
||||
require.NoError(t, err)
|
||||
|
||||
updateSum := impact.Summary
|
||||
|
@ -1341,7 +1341,7 @@ func TestLauncher_Pkger(t *testing.T) {
|
|||
t.Run("should create new resource when attempting to update", func(t *testing.T) {
|
||||
testUserDeletedDashboard(t, func(t *testing.T, stackID influxdb.ID, initialObj pkger.Object, initialSum pkger.Summary) {
|
||||
pkg := newTemplate(initialObj)
|
||||
impact, err := svc.Apply(ctx, l.Org.ID, l.User.ID, pkger.ApplyWithPkg(pkg), pkger.ApplyWithStackID(stackID))
|
||||
impact, err := svc.Apply(ctx, l.Org.ID, l.User.ID, pkger.ApplyWithTemplate(pkg), pkger.ApplyWithStackID(stackID))
|
||||
require.NoError(t, err)
|
||||
|
||||
updateSum := impact.Summary
|
||||
|
@ -1379,7 +1379,7 @@ func TestLauncher_Pkger(t *testing.T) {
|
|||
t.Run("should create new resource when attempting to update", func(t *testing.T) {
|
||||
testUserDeletedLabel(t, func(t *testing.T, stackID influxdb.ID, initialObj pkger.Object, initialSum pkger.Summary) {
|
||||
pkg := newTemplate(initialObj)
|
||||
impact, err := svc.Apply(ctx, l.Org.ID, l.User.ID, pkger.ApplyWithPkg(pkg), pkger.ApplyWithStackID(stackID))
|
||||
impact, err := svc.Apply(ctx, l.Org.ID, l.User.ID, pkger.ApplyWithTemplate(pkg), pkger.ApplyWithStackID(stackID))
|
||||
require.NoError(t, err)
|
||||
|
||||
updateSum := impact.Summary
|
||||
|
@ -1417,7 +1417,7 @@ func TestLauncher_Pkger(t *testing.T) {
|
|||
t.Run("should create new resource when attempting to update", func(t *testing.T) {
|
||||
testUserDeletedEndpoint(t, func(t *testing.T, stackID influxdb.ID, initialObj pkger.Object, initialSum pkger.Summary) {
|
||||
pkg := newTemplate(initialObj)
|
||||
impact, err := svc.Apply(ctx, l.Org.ID, l.User.ID, pkger.ApplyWithPkg(pkg), pkger.ApplyWithStackID(stackID))
|
||||
impact, err := svc.Apply(ctx, l.Org.ID, l.User.ID, pkger.ApplyWithTemplate(pkg), pkger.ApplyWithStackID(stackID))
|
||||
require.NoError(t, err)
|
||||
|
||||
updateSum := impact.Summary
|
||||
|
@ -1459,7 +1459,7 @@ func TestLauncher_Pkger(t *testing.T) {
|
|||
t.Run("should create new resource when attempting to update", func(t *testing.T) {
|
||||
testUserDeletedRule(t, func(t *testing.T, stackID influxdb.ID, initialObjects []pkger.Object, initialSum pkger.Summary) {
|
||||
pkg := newTemplate(initialObjects...)
|
||||
impact, err := svc.Apply(ctx, l.Org.ID, l.User.ID, pkger.ApplyWithPkg(pkg), pkger.ApplyWithStackID(stackID))
|
||||
impact, err := svc.Apply(ctx, l.Org.ID, l.User.ID, pkger.ApplyWithTemplate(pkg), pkger.ApplyWithStackID(stackID))
|
||||
require.NoError(t, err)
|
||||
|
||||
updateSum := impact.Summary
|
||||
|
@ -1497,7 +1497,7 @@ func TestLauncher_Pkger(t *testing.T) {
|
|||
t.Run("should create new resource when attempting to update", func(t *testing.T) {
|
||||
testUserDeletedTask(t, func(t *testing.T, stackID influxdb.ID, initialObj pkger.Object, initialSum pkger.Summary) {
|
||||
pkg := newTemplate(initialObj)
|
||||
impact, err := svc.Apply(ctx, l.Org.ID, l.User.ID, pkger.ApplyWithPkg(pkg), pkger.ApplyWithStackID(stackID))
|
||||
impact, err := svc.Apply(ctx, l.Org.ID, l.User.ID, pkger.ApplyWithTemplate(pkg), pkger.ApplyWithStackID(stackID))
|
||||
require.NoError(t, err)
|
||||
|
||||
updateSum := impact.Summary
|
||||
|
@ -1535,7 +1535,7 @@ func TestLauncher_Pkger(t *testing.T) {
|
|||
t.Run("should create new resource when attempting to update", func(t *testing.T) {
|
||||
testUserDeletedTelegraf(t, func(t *testing.T, stackID influxdb.ID, initialObj pkger.Object, initialSum pkger.Summary) {
|
||||
pkg := newTemplate(initialObj)
|
||||
impact, err := svc.Apply(ctx, l.Org.ID, l.User.ID, pkger.ApplyWithPkg(pkg), pkger.ApplyWithStackID(stackID))
|
||||
impact, err := svc.Apply(ctx, l.Org.ID, l.User.ID, pkger.ApplyWithTemplate(pkg), pkger.ApplyWithStackID(stackID))
|
||||
require.NoError(t, err)
|
||||
|
||||
updateSum := impact.Summary
|
||||
|
@ -1562,7 +1562,7 @@ func TestLauncher_Pkger(t *testing.T) {
|
|||
|
||||
impact, err := svc.Apply(ctx, l.Org.ID, l.User.ID,
|
||||
pkger.ApplyWithStackID(stack.ID),
|
||||
pkger.ApplyWithPkg(newTemplate(newVariableObject("var", "", ""))),
|
||||
pkger.ApplyWithTemplate(newTemplate(newVariableObject("var", "", ""))),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
|
@ -1573,7 +1573,7 @@ func TestLauncher_Pkger(t *testing.T) {
|
|||
|
||||
impact, err = svc.Apply(ctx, l.Org.ID, l.User.ID,
|
||||
pkger.ApplyWithStackID(stack.ID),
|
||||
pkger.ApplyWithPkg(newTemplate(newVariableObject("var", "", "", "selected"))),
|
||||
pkger.ApplyWithTemplate(newTemplate(newVariableObject("var", "", "", "selected"))),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
|
@ -1596,7 +1596,7 @@ func TestLauncher_Pkger(t *testing.T) {
|
|||
variablePkgName = "laces-out-dan"
|
||||
)
|
||||
|
||||
defaultPkgFn := func(*testing.T) *pkger.Pkg {
|
||||
defaultPkgFn := func(*testing.T) *pkger.Template {
|
||||
return newTemplate(
|
||||
newBucketObject(bucketPkgName, "", ""),
|
||||
newCheckDeadmanObject(t, checkPkgName, "", time.Hour),
|
||||
|
@ -1612,7 +1612,7 @@ func TestLauncher_Pkger(t *testing.T) {
|
|||
|
||||
tests := []struct {
|
||||
name string
|
||||
pkgFn func(t *testing.T) *pkger.Pkg
|
||||
pkgFn func(t *testing.T) *pkger.Template
|
||||
applyOpts []pkger.ApplyOptFn
|
||||
assertFn func(t *testing.T, impact pkger.ImpactSummary)
|
||||
}{
|
||||
|
@ -1717,7 +1717,7 @@ func TestLauncher_Pkger(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "skip label and assoications should be dropped",
|
||||
pkgFn: func(t *testing.T) *pkger.Pkg {
|
||||
pkgFn: func(t *testing.T) *pkger.Template {
|
||||
objs := []pkger.Object{
|
||||
newBucketObject(bucketPkgName, "", ""),
|
||||
newCheckDeadmanObject(t, checkPkgName, "", time.Hour),
|
||||
|
@ -1777,7 +1777,7 @@ func TestLauncher_Pkger(t *testing.T) {
|
|||
impact, err := svc.Apply(ctx, l.Org.ID, l.User.ID,
|
||||
append(
|
||||
tt.applyOpts,
|
||||
pkger.ApplyWithPkg(tt.pkgFn(t)),
|
||||
pkger.ApplyWithTemplate(tt.pkgFn(t)),
|
||||
pkger.ApplyWithStackID(stack.ID),
|
||||
)...,
|
||||
)
|
||||
|
@ -1835,7 +1835,7 @@ func TestLauncher_Pkger(t *testing.T) {
|
|||
)
|
||||
|
||||
impact, err := svc.Apply(ctx, l.Org.ID, l.User.ID,
|
||||
pkger.ApplyWithPkg(initialPkg),
|
||||
pkger.ApplyWithTemplate(initialPkg),
|
||||
pkger.ApplyWithStackID(stack.ID),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
@ -1878,7 +1878,7 @@ func TestLauncher_Pkger(t *testing.T) {
|
|||
require.Len(t, summary.NotificationRules, 1)
|
||||
assert.NotZero(t, summary.NotificationRules[0].ID)
|
||||
assert.Equal(t, "rule_0", summary.NotificationRules[0].Name)
|
||||
assert.Equal(t, initialEndpointPkgName, summary.NotificationRules[0].EndpointPkgName)
|
||||
assert.Equal(t, initialEndpointPkgName, summary.NotificationRules[0].EndpointMetaName)
|
||||
assert.Equal(t, "init desc", summary.NotificationRules[0].Description)
|
||||
hasAssociation(t, summary.NotificationRules[0].LabelAssociations)
|
||||
|
||||
|
@ -1973,7 +1973,7 @@ func TestLauncher_Pkger(t *testing.T) {
|
|||
require.Len(t, sum.NotificationRules, 1, "missing required rules")
|
||||
assert.Equal(t, initialSum.NotificationRules[0].MetaName, sum.NotificationRules[0].MetaName)
|
||||
assert.Equal(t, initialSum.NotificationRules[0].Name, sum.NotificationRules[0].Name)
|
||||
assert.Equal(t, initialSum.NotificationRules[0].EndpointPkgName, sum.NotificationRules[0].EndpointPkgName)
|
||||
assert.Equal(t, initialSum.NotificationRules[0].EndpointMetaName, sum.NotificationRules[0].EndpointMetaName)
|
||||
assert.Equal(t, initialSum.NotificationRules[0].EndpointType, sum.NotificationRules[0].EndpointType)
|
||||
hasAssociation(t, sum.NotificationRules[0].LabelAssociations)
|
||||
|
||||
|
@ -2018,7 +2018,7 @@ func TestLauncher_Pkger(t *testing.T) {
|
|||
|
||||
pkg := newTemplate(pkger.DashboardToObject("", dash))
|
||||
|
||||
impact, err := svc.Apply(ctx, l.Org.ID, l.User.ID, pkger.ApplyWithPkg(pkg))
|
||||
impact, err := svc.Apply(ctx, l.Org.ID, l.User.ID, pkger.ApplyWithTemplate(pkg))
|
||||
require.NoError(t, err)
|
||||
|
||||
defer deleteStackFn(t, impact.StackID)
|
||||
|
@ -2057,7 +2057,7 @@ func TestLauncher_Pkger(t *testing.T) {
|
|||
})
|
||||
pkg := newTemplate(bktObj, labelObj)
|
||||
|
||||
impact, err := svc.Apply(ctx, l.Org.ID, l.User.ID, pkger.ApplyWithPkg(pkg), pkger.ApplyWithStackID(stack.ID))
|
||||
impact, err := svc.Apply(ctx, l.Org.ID, l.User.ID, pkger.ApplyWithTemplate(pkg), pkger.ApplyWithStackID(stack.ID))
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Len(t, impact.Summary.Labels, 1)
|
||||
|
@ -2151,7 +2151,7 @@ func TestLauncher_Pkger(t *testing.T) {
|
|||
pkger.WithVariableSVC(l.VariableService(t)),
|
||||
)
|
||||
|
||||
_, err = svc.Apply(ctx, l.Org.ID, l.User.ID, pkger.ApplyWithPkg(newCompletePkg(t)))
|
||||
_, err = svc.Apply(ctx, l.Org.ID, l.User.ID, pkger.ApplyWithTemplate(newCompletePkg(t)))
|
||||
require.Error(t, err)
|
||||
|
||||
bkts, _, err := l.BucketService(t).FindBuckets(ctx, influxdb.BucketFilter{OrganizationID: &l.Org.ID})
|
||||
|
@ -2226,7 +2226,7 @@ func TestLauncher_Pkger(t *testing.T) {
|
|||
}
|
||||
|
||||
t.Run("dry run a template with no existing resources", func(t *testing.T) {
|
||||
impact, err := svc.DryRun(ctx, l.Org.ID, l.User.ID, pkger.ApplyWithPkg(newCompletePkg(t)))
|
||||
impact, err := svc.DryRun(ctx, l.Org.ID, l.User.ID, pkger.ApplyWithTemplate(newCompletePkg(t)))
|
||||
require.NoError(t, err)
|
||||
|
||||
sum, diff := impact.Summary, impact.Diff
|
||||
|
@ -2338,7 +2338,7 @@ spec:
|
|||
require.NoError(t, err)
|
||||
|
||||
impact, err := svc.DryRun(ctx, l.Org.ID, l.User.ID,
|
||||
pkger.ApplyWithPkg(pkg),
|
||||
pkger.ApplyWithTemplate(pkg),
|
||||
pkger.ApplyWithEnvRefs(map[string]string{
|
||||
"bkt-1-name-ref": "new-bkt-name",
|
||||
"label-1-name-ref": "new-label-name",
|
||||
|
@ -2598,7 +2598,7 @@ spec:
|
|||
})
|
||||
template := newTemplate(obj)
|
||||
|
||||
impact, err := svc.DryRun(ctx, l.Org.ID, l.User.ID, pkger.ApplyWithPkg(template))
|
||||
impact, err := svc.DryRun(ctx, l.Org.ID, l.User.ID, pkger.ApplyWithTemplate(template))
|
||||
require.NoError(t, err)
|
||||
|
||||
diff := impact.Diff.Dashboards
|
||||
|
@ -2619,7 +2619,7 @@ spec:
|
|||
|
||||
t.Run("apply a template of all new resources", func(t *testing.T) {
|
||||
// this initial test is also setup for the sub tests
|
||||
impact, err := svc.Apply(ctx, l.Org.ID, l.User.ID, pkger.ApplyWithPkg(newCompletePkg(t)))
|
||||
impact, err := svc.Apply(ctx, l.Org.ID, l.User.ID, pkger.ApplyWithTemplate(newCompletePkg(t)))
|
||||
require.NoError(t, err)
|
||||
defer deleteStackFn(t, impact.StackID)
|
||||
|
||||
|
@ -2673,7 +2673,7 @@ spec:
|
|||
assert.NotZero(t, rule.ID)
|
||||
assert.Equal(t, "rule_0", rule.Name)
|
||||
assert.Equal(t, pkger.SafeID(endpoints[0].NotificationEndpoint.GetID()), rule.EndpointID)
|
||||
assert.Equal(t, "http-none-auth-notification-endpoint", rule.EndpointPkgName)
|
||||
assert.Equal(t, "http-none-auth-notification-endpoint", rule.EndpointMetaName)
|
||||
assert.Equalf(t, "http", rule.EndpointType, "rule: %+v", rule)
|
||||
|
||||
require.Len(t, sum1.Tasks, 1)
|
||||
|
@ -2708,14 +2708,14 @@ spec:
|
|||
|
||||
newSumMapping := func(id pkger.SafeID, pkgName, name string, rt influxdb.ResourceType) pkger.SummaryLabelMapping {
|
||||
return pkger.SummaryLabelMapping{
|
||||
Status: pkger.StateStatusNew,
|
||||
ResourceID: id,
|
||||
ResourceType: rt,
|
||||
ResourcePkgName: pkgName,
|
||||
ResourceName: name,
|
||||
LabelPkgName: labels[0].MetaName,
|
||||
LabelName: labels[0].Name,
|
||||
LabelID: labels[0].ID,
|
||||
Status: pkger.StateStatusNew,
|
||||
ResourceID: id,
|
||||
ResourceType: rt,
|
||||
ResourceMetaName: pkgName,
|
||||
ResourceName: name,
|
||||
LabelMetaName: labels[0].MetaName,
|
||||
LabelName: labels[0].Name,
|
||||
LabelID: labels[0].ID,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2800,7 +2800,7 @@ spec:
|
|||
rule := sum.NotificationRules[0]
|
||||
assert.Equal(t, "rule_0", rule.Name)
|
||||
assert.Equal(t, pkger.SafeID(endpoints[0].NotificationEndpoint.GetID()), rule.EndpointID)
|
||||
assert.NotEmpty(t, rule.EndpointPkgName)
|
||||
assert.NotEmpty(t, rule.EndpointMetaName)
|
||||
|
||||
require.Len(t, sum.Tasks, 1)
|
||||
task := sum.Tasks[0]
|
||||
|
@ -2827,14 +2827,14 @@ spec:
|
|||
|
||||
newSumMapping := func(id pkger.SafeID, pkgName, name string, rt influxdb.ResourceType) pkger.SummaryLabelMapping {
|
||||
return pkger.SummaryLabelMapping{
|
||||
Status: pkger.StateStatusNew,
|
||||
ResourceID: id,
|
||||
ResourceType: rt,
|
||||
ResourcePkgName: pkgName,
|
||||
ResourceName: name,
|
||||
LabelPkgName: labels[0].MetaName,
|
||||
LabelName: labels[0].Name,
|
||||
LabelID: labels[0].ID,
|
||||
Status: pkger.StateStatusNew,
|
||||
ResourceID: id,
|
||||
ResourceType: rt,
|
||||
ResourceMetaName: pkgName,
|
||||
ResourceName: name,
|
||||
LabelMetaName: labels[0].MetaName,
|
||||
LabelName: labels[0].Name,
|
||||
LabelID: labels[0].ID,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2945,7 +2945,7 @@ spec:
|
|||
t.Run("pkg with same bkt-var-label does nto create new resources for them", func(t *testing.T) {
|
||||
// validate the new package doesn't create new resources for bkts/labels/vars
|
||||
// since names collide.
|
||||
impact, err := svc.Apply(ctx, l.Org.ID, l.User.ID, pkger.ApplyWithPkg(newCompletePkg(t)))
|
||||
impact, err := svc.Apply(ctx, l.Org.ID, l.User.ID, pkger.ApplyWithTemplate(newCompletePkg(t)))
|
||||
require.NoError(t, err)
|
||||
|
||||
sum2 := impact.Summary
|
||||
|
@ -2965,7 +2965,7 @@ spec:
|
|||
pkg, err := pkger.Parse(pkger.EncodingYAML, pkger.FromString(pkgStr))
|
||||
require.NoError(t, err)
|
||||
|
||||
impact, err := svc.Apply(ctx, l.Org.ID, l.User.ID, pkger.ApplyWithPkg(pkg))
|
||||
impact, err := svc.Apply(ctx, l.Org.ID, l.User.ID, pkger.ApplyWithTemplate(pkg))
|
||||
require.NoError(t, err)
|
||||
return impact.Summary
|
||||
}
|
||||
|
@ -3110,7 +3110,7 @@ spec:
|
|||
newRule := newSum.NotificationRules[0]
|
||||
assert.Equal(t, "new rule name", newRule.Name)
|
||||
assert.Zero(t, newRule.EndpointID)
|
||||
assert.NotEmpty(t, newRule.EndpointPkgName)
|
||||
assert.NotEmpty(t, newRule.EndpointMetaName)
|
||||
hasLabelAssociations(t, newRule.LabelAssociations, 1, "label-1")
|
||||
|
||||
require.Len(t, newSum.Tasks, 1)
|
||||
|
@ -3163,7 +3163,7 @@ spec:
|
|||
pkger.WithVariableSVC(l.VariableService(t)),
|
||||
)
|
||||
|
||||
_, err = svc.Apply(ctx, l.Org.ID, 0, pkger.ApplyWithPkg(updatePkg))
|
||||
_, err = svc.Apply(ctx, l.Org.ID, 0, pkger.ApplyWithTemplate(updatePkg))
|
||||
require.Error(t, err)
|
||||
|
||||
bkt, err := l.BucketService(t).FindBucketByID(ctx, influxdb.ID(sum1Bkts[0].ID))
|
||||
|
@ -3244,7 +3244,7 @@ spec:
|
|||
pkg, err := pkger.Parse(pkger.EncodingYAML, pkger.FromString(pkgStr))
|
||||
require.NoError(t, err)
|
||||
|
||||
impact, err := svc.Apply(ctx, l.Org.ID, l.User.ID, pkger.ApplyWithPkg(pkg))
|
||||
impact, err := svc.Apply(ctx, l.Org.ID, l.User.ID, pkger.ApplyWithTemplate(pkg))
|
||||
require.NoError(t, err)
|
||||
assert.NotZero(t, impact.StackID)
|
||||
|
||||
|
@ -3258,7 +3258,7 @@ spec:
|
|||
template := newTemplate(newTelegrafObject("with_underscore-is-bad", "", ""))
|
||||
|
||||
_, err := svc.Apply(ctx, l.Org.ID, l.User.ID,
|
||||
pkger.ApplyWithPkg(template),
|
||||
pkger.ApplyWithTemplate(template),
|
||||
pkger.ApplyWithStackID(stack.ID),
|
||||
)
|
||||
require.Error(t, err)
|
||||
|
@ -3275,7 +3275,7 @@ spec:
|
|||
t.Run("applying a pkg without a stack will have a stack created for it", func(t *testing.T) {
|
||||
pkg := newTemplate(newBucketObject("room", "for", "more"))
|
||||
|
||||
impact, err := svc.Apply(ctx, l.Org.ID, l.User.ID, pkger.ApplyWithPkg(pkg))
|
||||
impact, err := svc.Apply(ctx, l.Org.ID, l.User.ID, pkger.ApplyWithTemplate(pkg))
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NotZero(t, impact.StackID)
|
||||
|
@ -3399,7 +3399,7 @@ spec:
|
|||
pkg, err := pkger.Parse(pkger.EncodingYAML, pkger.FromString(pkgStr))
|
||||
require.NoError(t, err)
|
||||
|
||||
impact, err := svc.DryRun(ctx, l.Org.ID, l.User.ID, pkger.ApplyWithPkg(pkg))
|
||||
impact, err := svc.DryRun(ctx, l.Org.ID, l.User.ID, pkger.ApplyWithTemplate(pkg))
|
||||
require.NoError(t, err)
|
||||
assert.Zero(t, impact.StackID)
|
||||
|
||||
|
@ -3439,7 +3439,7 @@ spec:
|
|||
assert.Equal(t, expectedMissingEnvs, sum.MissingEnvs)
|
||||
|
||||
impact, err = svc.Apply(ctx, l.Org.ID, l.User.ID,
|
||||
pkger.ApplyWithPkg(pkg),
|
||||
pkger.ApplyWithTemplate(pkg),
|
||||
pkger.ApplyWithEnvRefs(map[string]string{
|
||||
"bkt-1-name-ref": "rucket_threeve",
|
||||
"check-1-name-ref": "check_threeve",
|
||||
|
@ -3463,7 +3463,7 @@ spec:
|
|||
assert.Equal(t, "endpoint_threeve", sum.NotificationEndpoints[0].NotificationEndpoint.GetName())
|
||||
assert.Equal(t, "label_threeve", sum.Labels[0].Name)
|
||||
assert.Equal(t, "rule_threeve", sum.NotificationRules[0].Name)
|
||||
assert.Equal(t, "endpoint_threeve", sum.NotificationRules[0].EndpointPkgName)
|
||||
assert.Equal(t, "endpoint_threeve", sum.NotificationRules[0].EndpointMetaName)
|
||||
assert.Equal(t, "telegraf_threeve", sum.TelegrafConfigs[0].TelegrafConfig.Name)
|
||||
assert.Equal(t, "task_threeve", sum.Tasks[0].Name)
|
||||
assert.Equal(t, "var_threeve", sum.Variables[0].Name)
|
||||
|
@ -3471,7 +3471,7 @@ spec:
|
|||
})
|
||||
}
|
||||
|
||||
func newCompletePkg(t *testing.T) *pkger.Pkg {
|
||||
func newCompletePkg(t *testing.T) *pkger.Template {
|
||||
t.Helper()
|
||||
|
||||
pkg, err := pkger.Parse(pkger.EncodingYAML, pkger.FromString(pkgYMLStr))
|
||||
|
|
|
@ -21,7 +21,7 @@ file and see a summary of its contents:
|
|||
The parser will validate all contents of the package and provide any
|
||||
and all fields/entries that failed validation.
|
||||
|
||||
If you wish to use the Pkg type in your transport layer and let the
|
||||
If you wish to use the Template type in your transport layer and let the
|
||||
the transport layer manage the decoding, then you can run the following
|
||||
to validate the package after the raw decoding is done:
|
||||
|
||||
|
|
|
@ -10,7 +10,7 @@ import (
|
|||
"github.com/influxdata/influxdb/v2/pkg/httpc"
|
||||
)
|
||||
|
||||
// HTTPRemoteService provides an http client that is fluent in all things pkger.
|
||||
// HTTPRemoteService provides an http client that is fluent in all things template.
|
||||
type HTTPRemoteService struct {
|
||||
Client *httpc.Client
|
||||
}
|
||||
|
@ -112,8 +112,8 @@ func (s *HTTPRemoteService) UpdateStack(ctx context.Context, upd StackUpdate) (S
|
|||
return convertRespStackToStack(respBody)
|
||||
}
|
||||
|
||||
// Export will produce a pkg from the parameters provided.
|
||||
func (s *HTTPRemoteService) Export(ctx context.Context, opts ...ExportOptFn) (*Pkg, error) {
|
||||
// Export will produce a template from the parameters provided.
|
||||
func (s *HTTPRemoteService) Export(ctx context.Context, opts ...ExportOptFn) (*Template, error) {
|
||||
opt, err := exportOptFromOptFns(opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -139,12 +139,12 @@ func (s *HTTPRemoteService) Export(ctx context.Context, opts ...ExportOptFn) (*P
|
|||
Resources: opt.Resources,
|
||||
}
|
||||
|
||||
var newPkg *Pkg
|
||||
var newTemplate *Template
|
||||
err = s.Client.
|
||||
PostJSON(reqBody, RoutePrefixTemplates, "/export").
|
||||
Decode(func(resp *http.Response) error {
|
||||
pkg, err := Parse(EncodingJSON, FromReader(resp.Body, "export"))
|
||||
newPkg = pkg
|
||||
t, err := Parse(EncodingJSON, FromReader(resp.Body, "export"))
|
||||
newTemplate = t
|
||||
return err
|
||||
}).
|
||||
Do(ctx)
|
||||
|
@ -152,22 +152,22 @@ func (s *HTTPRemoteService) Export(ctx context.Context, opts ...ExportOptFn) (*P
|
|||
return nil, err
|
||||
}
|
||||
|
||||
if err := newPkg.Validate(ValidWithoutResources()); err != nil {
|
||||
if err := newTemplate.Validate(ValidWithoutResources()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return newPkg, nil
|
||||
return newTemplate, nil
|
||||
}
|
||||
|
||||
// DryRun provides a dry run of the pkg application. The pkg will be marked verified
|
||||
// DryRun provides a dry run of the template application. The template will be marked verified
|
||||
// for later calls to Apply. This func will be run on an Apply if it has not been run
|
||||
// already.
|
||||
func (s *HTTPRemoteService) DryRun(ctx context.Context, orgID, userID influxdb.ID, opts ...ApplyOptFn) (ImpactSummary, error) {
|
||||
return s.apply(ctx, orgID, true, opts...)
|
||||
}
|
||||
|
||||
// Apply will apply all the resources identified in the provided pkg. The entire pkg will be applied
|
||||
// in its entirety. If a failure happens midway then the entire pkg will be rolled back to the state
|
||||
// from before the pkg was applied.
|
||||
// Apply will apply all the resources identified in the provided template. The entire template will be applied
|
||||
// in its entirety. If a failure happens midway then the entire template will be rolled back to the state
|
||||
// from before the template was applied.
|
||||
func (s *HTTPRemoteService) Apply(ctx context.Context, orgID, userID influxdb.ID, opts ...ApplyOptFn) (ImpactSummary, error) {
|
||||
return s.apply(ctx, orgID, false, opts...)
|
||||
}
|
||||
|
@ -175,15 +175,15 @@ func (s *HTTPRemoteService) Apply(ctx context.Context, orgID, userID influxdb.ID
|
|||
func (s *HTTPRemoteService) apply(ctx context.Context, orgID influxdb.ID, dryRun bool, opts ...ApplyOptFn) (ImpactSummary, error) {
|
||||
opt := applyOptFromOptFns(opts...)
|
||||
|
||||
var rawPkg ReqRawTemplate
|
||||
for _, pkg := range opt.Pkgs {
|
||||
b, err := pkg.Encode(EncodingJSON)
|
||||
var rawTemplate ReqRawTemplate
|
||||
for _, t := range opt.Templates {
|
||||
b, err := t.Encode(EncodingJSON)
|
||||
if err != nil {
|
||||
return ImpactSummary{}, err
|
||||
}
|
||||
rawPkg.Pkg = b
|
||||
rawPkg.Sources = pkg.sources
|
||||
rawPkg.ContentType = EncodingJSON.String()
|
||||
rawTemplate.Template = b
|
||||
rawTemplate.Sources = t.sources
|
||||
rawTemplate.ContentType = EncodingJSON.String()
|
||||
}
|
||||
|
||||
reqBody := ReqApply{
|
||||
|
@ -191,7 +191,7 @@ func (s *HTTPRemoteService) apply(ctx context.Context, orgID influxdb.ID, dryRun
|
|||
DryRun: dryRun,
|
||||
EnvRefs: opt.EnvRefs,
|
||||
Secrets: opt.MissingSecrets,
|
||||
RawTemplate: rawPkg,
|
||||
RawTemplate: rawTemplate,
|
||||
}
|
||||
if opt.StackID != 0 {
|
||||
stackID := opt.StackID.String()
|
||||
|
|
|
@ -198,7 +198,7 @@ func (s *HTTPServerPackages) exportStack(w http.ResponseWriter, r *http.Request)
|
|||
return
|
||||
}
|
||||
|
||||
encoding := pkgEncoding(r.Header.Get("Accept"))
|
||||
encoding := templateEncoding(r.Header.Get("Accept"))
|
||||
|
||||
b, err := pkg.Encode(encoding)
|
||||
if err != nil {
|
||||
|
@ -321,7 +321,7 @@ func (s *HTTPServerPackages) export(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
|
||||
var enc encoder
|
||||
switch pkgEncoding(r.Header.Get("Accept")) {
|
||||
switch templateEncoding(r.Header.Get("Accept")) {
|
||||
case EncodingYAML:
|
||||
enc = yaml.NewEncoder(w)
|
||||
w.Header().Set("Content-Type", "application/x-yaml")
|
||||
|
@ -361,7 +361,7 @@ func (s *HTTPServerPackages) apply(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
}
|
||||
|
||||
parsedPkg, err := reqBody.Pkgs(encoding)
|
||||
parsedPkg, err := reqBody.Templates(encoding)
|
||||
if err != nil {
|
||||
s.api.Err(w, r, &influxdb.Error{
|
||||
Code: influxdb.EUnprocessableEntity,
|
||||
|
@ -378,7 +378,7 @@ func (s *HTTPServerPackages) apply(w http.ResponseWriter, r *http.Request) {
|
|||
|
||||
applyOpts := []ApplyOptFn{
|
||||
ApplyWithEnvRefs(reqBody.EnvRefs),
|
||||
ApplyWithPkg(parsedPkg),
|
||||
ApplyWithTemplate(parsedPkg),
|
||||
ApplyWithStackID(stackID),
|
||||
}
|
||||
for _, a := range actions.SkipResources {
|
||||
|
|
|
@ -159,7 +159,7 @@ func TestPkgerHTTPServer(t *testing.T) {
|
|||
for _, o := range opts {
|
||||
o(&opt)
|
||||
}
|
||||
pkg, err := pkger.Combine(opt.Pkgs)
|
||||
pkg, err := pkger.Combine(opt.Templates)
|
||||
if err != nil {
|
||||
return pkger.ImpactSummary{}, err
|
||||
}
|
||||
|
@ -227,7 +227,7 @@ func TestPkgerHTTPServer(t *testing.T) {
|
|||
for _, o := range opts {
|
||||
o(&opt)
|
||||
}
|
||||
pkg, err := pkger.Combine(opt.Pkgs)
|
||||
pkg, err := pkger.Combine(opt.Templates)
|
||||
if err != nil {
|
||||
return pkger.ImpactSummary{}, err
|
||||
}
|
||||
|
@ -297,7 +297,7 @@ func TestPkgerHTTPServer(t *testing.T) {
|
|||
return pkger.ReqRawTemplate{
|
||||
ContentType: pkger.EncodingJSON.String(),
|
||||
Sources: pkg.Sources(),
|
||||
Pkg: pkgBytes,
|
||||
Template: pkgBytes,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -347,7 +347,7 @@ func TestPkgerHTTPServer(t *testing.T) {
|
|||
for _, o := range opts {
|
||||
o(&opt)
|
||||
}
|
||||
pkg, err := pkger.Combine(opt.Pkgs)
|
||||
pkg, err := pkger.Combine(opt.Templates)
|
||||
if err != nil {
|
||||
return pkger.ImpactSummary{}, err
|
||||
}
|
||||
|
@ -432,7 +432,7 @@ func TestPkgerHTTPServer(t *testing.T) {
|
|||
for _, o := range opts {
|
||||
o(&opt)
|
||||
}
|
||||
pkg, err := pkger.Combine(opt.Pkgs)
|
||||
pkg, err := pkger.Combine(opt.Templates)
|
||||
if err != nil {
|
||||
return pkger.ImpactSummary{}, err
|
||||
}
|
||||
|
@ -465,7 +465,7 @@ func TestPkgerHTTPServer(t *testing.T) {
|
|||
o(&opt)
|
||||
}
|
||||
|
||||
pkg, err := pkger.Combine(opt.Pkgs)
|
||||
pkg, err := pkger.Combine(opt.Templates)
|
||||
if err != nil {
|
||||
return pkger.ImpactSummary{}, err
|
||||
}
|
||||
|
@ -1127,7 +1127,7 @@ spec:
|
|||
return pkger.ReqRawTemplate{
|
||||
ContentType: encoding.String(),
|
||||
Sources: pkg.Sources(),
|
||||
Pkg: b,
|
||||
Template: b,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1195,7 +1195,7 @@ func (f *fakeSVC) UpdateStack(ctx context.Context, upd pkger.StackUpdate) (pkger
|
|||
panic("not implemented")
|
||||
}
|
||||
|
||||
func (f *fakeSVC) Export(ctx context.Context, setters ...pkger.ExportOptFn) (*pkger.Pkg, error) {
|
||||
func (f *fakeSVC) Export(ctx context.Context, setters ...pkger.ExportOptFn) (*pkger.Template, error) {
|
||||
panic("not implemented")
|
||||
}
|
||||
|
||||
|
|
|
@ -66,7 +66,7 @@ type (
|
|||
|
||||
// RespStackResource is the response for a stack resource. This type exists
|
||||
// to decouple the internal service implementation from the deprecates usage
|
||||
// of pkgs in the API. We could add a custom UnmarshalJSON method, but
|
||||
// of templates in the API. We could add a custom UnmarshalJSON method, but
|
||||
// I would rather keep it obvious and explicit with a separate field.
|
||||
RespStackResource struct {
|
||||
APIVersion string `json:"apiVersion"`
|
||||
|
|
|
@ -97,7 +97,7 @@ func (r *ReqExport) OK() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// RespExport is a response body for the create pkg endpoint.
|
||||
// RespExport is a response body for the create template endpoint.
|
||||
type RespExport []Object
|
||||
|
||||
func (s *HTTPServerTemplates) export(w http.ResponseWriter, r *http.Request) {
|
||||
|
@ -135,19 +135,19 @@ func (s *HTTPServerTemplates) export(w http.ResponseWriter, r *http.Request) {
|
|||
opts = append(opts, ExportWithStackID(*stackID))
|
||||
}
|
||||
|
||||
newPkg, err := s.svc.Export(r.Context(), opts...)
|
||||
newTemplate, err := s.svc.Export(r.Context(), opts...)
|
||||
if err != nil {
|
||||
s.api.Err(w, r, err)
|
||||
return
|
||||
}
|
||||
|
||||
resp := RespExport(newPkg.Objects)
|
||||
resp := RespExport(newTemplate.Objects)
|
||||
if resp == nil {
|
||||
resp = []Object{}
|
||||
}
|
||||
|
||||
var enc encoder
|
||||
switch pkgEncoding(r.Header.Get("Accept")) {
|
||||
switch templateEncoding(r.Header.Get("Accept")) {
|
||||
case EncodingYAML:
|
||||
enc = yaml.NewEncoder(w)
|
||||
w.Header().Set("Content-Type", "application/x-yaml")
|
||||
|
@ -175,7 +175,7 @@ func (p ReqTemplateRemote) Encoding() Encoding {
|
|||
type ReqRawTemplate struct {
|
||||
ContentType string `json:"contentType" yaml:"contentType"`
|
||||
Sources []string `json:"sources" yaml:"sources"`
|
||||
Pkg json.RawMessage `json:"contents" yaml:"contents"`
|
||||
Template json.RawMessage `json:"contents" yaml:"contents"`
|
||||
}
|
||||
|
||||
func (p ReqRawTemplate) Encoding() Encoding {
|
||||
|
@ -193,7 +193,7 @@ type ReqRawAction struct {
|
|||
Properties json.RawMessage `json:"properties"`
|
||||
}
|
||||
|
||||
// ReqApply is the request body for a json or yaml body for the apply pkg endpoint.
|
||||
// ReqApply is the request body for a json or yaml body for the apply template endpoint.
|
||||
type ReqApply struct {
|
||||
DryRun bool `json:"dryRun" yaml:"dryRun"`
|
||||
OrgID string `json:"orgID" yaml:"orgID"`
|
||||
|
@ -216,58 +216,52 @@ type ReqApply struct {
|
|||
RawActions []ReqRawAction `json:"actions"`
|
||||
}
|
||||
|
||||
// Pkgs returns all pkgs associated with the request.
|
||||
func (r ReqApply) Pkgs(encoding Encoding) (*Pkg, error) {
|
||||
var rawPkgs []*Pkg
|
||||
// Templates returns all templates associated with the request.
|
||||
func (r ReqApply) Templates(encoding Encoding) (*Template, error) {
|
||||
var rawTemplates []*Template
|
||||
for _, rem := range r.Remotes {
|
||||
if rem.URL == "" {
|
||||
continue
|
||||
}
|
||||
pkg, err := Parse(rem.Encoding(), FromHTTPRequest(rem.URL), ValidSkipParseError())
|
||||
template, err := Parse(rem.Encoding(), FromHTTPRequest(rem.URL), ValidSkipParseError())
|
||||
if err != nil {
|
||||
return nil, &influxdb.Error{
|
||||
Code: influxdb.EUnprocessableEntity,
|
||||
Msg: fmt.Sprintf("pkg from url[%s] had an issue: %s", rem.URL, err.Error()),
|
||||
}
|
||||
msg := fmt.Sprintf("template from url[%s] had an issue: %s", rem.URL, err.Error())
|
||||
return nil, influxErr(influxdb.EUnprocessableEntity, msg)
|
||||
}
|
||||
rawPkgs = append(rawPkgs, pkg)
|
||||
rawTemplates = append(rawTemplates, template)
|
||||
}
|
||||
|
||||
for i, rawPkg := range append(r.RawPkgs, r.RawPkg) {
|
||||
if rawPkg == nil {
|
||||
for i, rawTemplate := range append(r.RawPkgs, r.RawPkg) {
|
||||
if rawTemplate == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
pkg, err := Parse(encoding, FromReader(bytes.NewReader(rawPkg)), ValidSkipParseError())
|
||||
template, err := Parse(encoding, FromReader(bytes.NewReader(rawTemplate)), ValidSkipParseError())
|
||||
if err != nil {
|
||||
return nil, &influxdb.Error{
|
||||
Code: influxdb.EUnprocessableEntity,
|
||||
Msg: fmt.Sprintf("pkg[%d] had an issue: %s", i, err.Error()),
|
||||
}
|
||||
msg := fmt.Sprintf("template[%d] had an issue: %s", i, err.Error())
|
||||
return nil, influxErr(influxdb.EUnprocessableEntity, msg)
|
||||
}
|
||||
rawPkgs = append(rawPkgs, pkg)
|
||||
rawTemplates = append(rawTemplates, template)
|
||||
}
|
||||
|
||||
for i, rawTmpl := range append(r.RawTemplates, r.RawTemplate) {
|
||||
if rawTmpl.Pkg == nil {
|
||||
if rawTmpl.Template == nil {
|
||||
continue
|
||||
}
|
||||
enc := encoding
|
||||
if sourceEncoding := rawTmpl.Encoding(); sourceEncoding != EncodingSource {
|
||||
enc = sourceEncoding
|
||||
}
|
||||
pkg, err := Parse(enc, FromReader(bytes.NewReader(rawTmpl.Pkg), rawTmpl.Sources...), ValidSkipParseError())
|
||||
template, err := Parse(enc, FromReader(bytes.NewReader(rawTmpl.Template), rawTmpl.Sources...), ValidSkipParseError())
|
||||
if err != nil {
|
||||
sources := formatSources(rawTmpl.Sources)
|
||||
return nil, &influxdb.Error{
|
||||
Code: influxdb.EUnprocessableEntity,
|
||||
Msg: fmt.Sprintf("pkg[%d] from source(s) %q had an issue: %s", i, sources, err.Error()),
|
||||
}
|
||||
msg := fmt.Sprintf("template[%d] from source(s) %q had an issue: %s", i, sources, err.Error())
|
||||
return nil, influxErr(influxdb.EUnprocessableEntity, msg)
|
||||
}
|
||||
rawPkgs = append(rawPkgs, pkg)
|
||||
rawTemplates = append(rawTemplates, template)
|
||||
}
|
||||
|
||||
return Combine(rawPkgs, ValidWithoutResources(), ValidSkipParseError())
|
||||
return Combine(rawTemplates, ValidWithoutResources(), ValidSkipParseError())
|
||||
}
|
||||
|
||||
type actionType string
|
||||
|
@ -330,7 +324,7 @@ func (r ReqApply) validActions() (struct {
|
|||
return out, nil
|
||||
}
|
||||
|
||||
// RespApply is the response body for the apply pkg endpoint.
|
||||
// RespApply is the response body for the apply template endpoint.
|
||||
type RespApply struct {
|
||||
Sources []string `json:"sources" yaml:"sources"`
|
||||
StackID string `json:"stackID" yaml:"stackID"`
|
||||
|
@ -368,7 +362,7 @@ func (s *HTTPServerTemplates) apply(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
}
|
||||
|
||||
parsedPkg, err := reqBody.Pkgs(encoding)
|
||||
parsedTemplate, err := reqBody.Templates(encoding)
|
||||
if err != nil {
|
||||
s.api.Err(w, r, &influxdb.Error{
|
||||
Code: influxdb.EUnprocessableEntity,
|
||||
|
@ -385,7 +379,7 @@ func (s *HTTPServerTemplates) apply(w http.ResponseWriter, r *http.Request) {
|
|||
|
||||
applyOpts := []ApplyOptFn{
|
||||
ApplyWithEnvRefs(reqBody.EnvRefs),
|
||||
ApplyWithPkg(parsedPkg),
|
||||
ApplyWithTemplate(parsedTemplate),
|
||||
ApplyWithStackID(stackID),
|
||||
}
|
||||
for _, a := range actions.SkipResources {
|
||||
|
@ -465,7 +459,7 @@ func formatSources(sources []string) string {
|
|||
}
|
||||
|
||||
func decodeWithEncoding(r *http.Request, v interface{}) (Encoding, error) {
|
||||
encoding := pkgEncoding(r.Header.Get("Content-Type"))
|
||||
encoding := templateEncoding(r.Header.Get("Content-Type"))
|
||||
|
||||
var dec interface{ Decode(interface{}) error }
|
||||
switch encoding {
|
||||
|
@ -480,7 +474,7 @@ func decodeWithEncoding(r *http.Request, v interface{}) (Encoding, error) {
|
|||
return encoding, dec.Decode(v)
|
||||
}
|
||||
|
||||
func pkgEncoding(contentType string) Encoding {
|
||||
func templateEncoding(contentType string) Encoding {
|
||||
switch contentType {
|
||||
case "application/x-jsonnet":
|
||||
return EncodingJsonnet
|
||||
|
|
|
@ -151,7 +151,7 @@ func TestPkgerHTTPServerTemplate(t *testing.T) {
|
|||
for _, o := range opts {
|
||||
o(&opt)
|
||||
}
|
||||
pkg, err := pkger.Combine(opt.Pkgs)
|
||||
pkg, err := pkger.Combine(opt.Templates)
|
||||
if err != nil {
|
||||
return pkger.ImpactSummary{}, err
|
||||
}
|
||||
|
@ -219,7 +219,7 @@ func TestPkgerHTTPServerTemplate(t *testing.T) {
|
|||
for _, o := range opts {
|
||||
o(&opt)
|
||||
}
|
||||
pkg, err := pkger.Combine(opt.Pkgs)
|
||||
pkg, err := pkger.Combine(opt.Templates)
|
||||
if err != nil {
|
||||
return pkger.ImpactSummary{}, err
|
||||
}
|
||||
|
@ -289,7 +289,7 @@ func TestPkgerHTTPServerTemplate(t *testing.T) {
|
|||
return pkger.ReqRawTemplate{
|
||||
ContentType: pkger.EncodingJSON.String(),
|
||||
Sources: pkg.Sources(),
|
||||
Pkg: pkgBytes,
|
||||
Template: pkgBytes,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -339,7 +339,7 @@ func TestPkgerHTTPServerTemplate(t *testing.T) {
|
|||
for _, o := range opts {
|
||||
o(&opt)
|
||||
}
|
||||
pkg, err := pkger.Combine(opt.Pkgs)
|
||||
pkg, err := pkger.Combine(opt.Templates)
|
||||
if err != nil {
|
||||
return pkger.ImpactSummary{}, err
|
||||
}
|
||||
|
@ -424,7 +424,7 @@ func TestPkgerHTTPServerTemplate(t *testing.T) {
|
|||
for _, o := range opts {
|
||||
o(&opt)
|
||||
}
|
||||
pkg, err := pkger.Combine(opt.Pkgs)
|
||||
pkg, err := pkger.Combine(opt.Templates)
|
||||
if err != nil {
|
||||
return pkger.ImpactSummary{}, err
|
||||
}
|
||||
|
@ -457,7 +457,7 @@ func TestPkgerHTTPServerTemplate(t *testing.T) {
|
|||
o(&opt)
|
||||
}
|
||||
|
||||
pkg, err := pkger.Combine(opt.Pkgs)
|
||||
pkg, err := pkger.Combine(opt.Templates)
|
||||
if err != nil {
|
||||
return pkger.ImpactSummary{}, err
|
||||
}
|
||||
|
|
|
@ -589,9 +589,9 @@ type (
|
|||
Description string `json:"description"`
|
||||
|
||||
// These fields represent the relationship of the rule to the endpoint.
|
||||
EndpointID SafeID `json:"endpointID"`
|
||||
EndpointPkgName string `json:"endpointTemplateMetaName"`
|
||||
EndpointType string `json:"endpointType"`
|
||||
EndpointID SafeID `json:"endpointID"`
|
||||
EndpointMetaName string `json:"endpointTemplateMetaName"`
|
||||
EndpointType string `json:"endpointType"`
|
||||
|
||||
Every string `json:"every"`
|
||||
Offset string `json:"offset"`
|
||||
|
@ -632,15 +632,15 @@ type SummaryLabel struct {
|
|||
|
||||
// SummaryLabelMapping provides a summary of a label mapped with a single resource.
|
||||
type SummaryLabelMapping struct {
|
||||
exists bool
|
||||
Status StateStatus `json:"status,omitempty"`
|
||||
ResourceID SafeID `json:"resourceID"`
|
||||
ResourcePkgName string `json:"resourceTemplateMetaName"`
|
||||
ResourceName string `json:"resourceName"`
|
||||
ResourceType influxdb.ResourceType `json:"resourceType"`
|
||||
LabelPkgName string `json:"labelTemplateMetaName"`
|
||||
LabelName string `json:"labelName"`
|
||||
LabelID SafeID `json:"labelID"`
|
||||
exists bool
|
||||
Status StateStatus `json:"status,omitempty"`
|
||||
ResourceID SafeID `json:"resourceID"`
|
||||
ResourceMetaName string `json:"resourceTemplateMetaName"`
|
||||
ResourceName string `json:"resourceName"`
|
||||
ResourceType influxdb.ResourceType `json:"resourceType"`
|
||||
LabelMetaName string `json:"labelTemplateMetaName"`
|
||||
LabelName string `json:"labelName"`
|
||||
LabelID SafeID `json:"labelID"`
|
||||
}
|
||||
|
||||
// SummaryReference informs the consumer of required references for
|
||||
|
|
|
@ -13,15 +13,15 @@ import (
|
|||
func TestPkg(t *testing.T) {
|
||||
t.Run("Summary", func(t *testing.T) {
|
||||
t.Run("buckets returned in asc order by name", func(t *testing.T) {
|
||||
pkg := Pkg{
|
||||
pkg := Template{
|
||||
mBuckets: map[string]*bucket{
|
||||
"buck_2": {
|
||||
Description: "desc2",
|
||||
identity: identity{name: &references{val: "pkgName2"}, displayName: &references{val: "name2"}},
|
||||
identity: identity{name: &references{val: "metaName2"}, displayName: &references{val: "name2"}},
|
||||
RetentionRules: retentionRules{newRetentionRule(2 * time.Hour)},
|
||||
},
|
||||
"buck_1": {
|
||||
identity: identity{name: &references{val: "pkgName1"}, displayName: &references{val: "name1"}},
|
||||
identity: identity{name: &references{val: "metaName1"}, displayName: &references{val: "name1"}},
|
||||
Description: "desc1",
|
||||
RetentionRules: retentionRules{newRetentionRule(time.Hour)},
|
||||
},
|
||||
|
@ -35,14 +35,14 @@ func TestPkg(t *testing.T) {
|
|||
assert.Zero(t, buck.ID)
|
||||
assert.Zero(t, buck.OrgID)
|
||||
assert.Equal(t, "desc"+strconv.Itoa(i), buck.Description)
|
||||
assert.Equal(t, "pkgName"+strconv.Itoa(i), buck.MetaName)
|
||||
assert.Equal(t, "metaName"+strconv.Itoa(i), buck.MetaName)
|
||||
assert.Equal(t, "name"+strconv.Itoa(i), buck.Name)
|
||||
assert.Equal(t, time.Duration(i)*time.Hour, buck.RetentionPeriod)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("labels returned in asc order by name", func(t *testing.T) {
|
||||
pkg := Pkg{
|
||||
pkg := Template{
|
||||
mLabels: map[string]*label{
|
||||
"2": {
|
||||
identity: identity{name: &references{val: "pkgName2"}, displayName: &references{val: "name2"}},
|
||||
|
@ -94,19 +94,19 @@ func TestPkg(t *testing.T) {
|
|||
}
|
||||
bucket1.labels = append(bucket1.labels, label1)
|
||||
|
||||
pkg := Pkg{
|
||||
mBuckets: map[string]*bucket{bucket1.PkgName(): bucket1},
|
||||
mLabels: map[string]*label{label1.PkgName(): label1},
|
||||
pkg := Template{
|
||||
mBuckets: map[string]*bucket{bucket1.MetaName(): bucket1},
|
||||
mLabels: map[string]*label{label1.MetaName(): label1},
|
||||
}
|
||||
|
||||
summary := pkg.Summary()
|
||||
|
||||
require.Len(t, summary.LabelMappings, 1)
|
||||
mapping1 := summary.LabelMappings[0]
|
||||
assert.Equal(t, bucket1.PkgName(), mapping1.ResourcePkgName)
|
||||
assert.Equal(t, bucket1.MetaName(), mapping1.ResourceMetaName)
|
||||
assert.Equal(t, bucket1.Name(), mapping1.ResourceName)
|
||||
assert.Equal(t, influxdb.BucketsResourceType, mapping1.ResourceType)
|
||||
assert.Equal(t, label1.PkgName(), mapping1.LabelPkgName)
|
||||
assert.Equal(t, label1.MetaName(), mapping1.LabelMetaName)
|
||||
assert.Equal(t, label1.Name(), mapping1.LabelName)
|
||||
})
|
||||
})
|
||||
|
@ -490,7 +490,7 @@ func TestPkg(t *testing.T) {
|
|||
|
||||
for _, tt := range tests {
|
||||
fn := func(t *testing.T) {
|
||||
testfileRunner(t, tt.pkgFile, func(t *testing.T, pkg *Pkg) {
|
||||
testfileRunner(t, tt.pkgFile, func(t *testing.T, pkg *Template) {
|
||||
contained := pkg.Contains(tt.kind, tt.validName)
|
||||
assert.True(t, contained)
|
||||
|
||||
|
|
152
pkger/parser.go
152
pkger/parser.go
|
@ -65,14 +65,14 @@ func (e Encoding) String() string {
|
|||
var ErrInvalidEncoding = errors.New("invalid encoding provided")
|
||||
|
||||
// Parse parses a pkg defined by the encoding and readerFns. As of writing this
|
||||
// we can parse both a YAML, JSON, and Jsonnet formats of the Pkg model.
|
||||
func Parse(encoding Encoding, readerFn ReaderFn, opts ...ValidateOptFn) (*Pkg, error) {
|
||||
// we can parse both a YAML, JSON, and Jsonnet formats of the Template model.
|
||||
func Parse(encoding Encoding, readerFn ReaderFn, opts ...ValidateOptFn) (*Template, error) {
|
||||
r, source, err := readerFn()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var pkgFn func(io.Reader, ...ValidateOptFn) (*Pkg, error)
|
||||
var pkgFn func(io.Reader, ...ValidateOptFn) (*Template, error)
|
||||
switch encoding {
|
||||
case EncodingJSON:
|
||||
pkgFn = parseJSON
|
||||
|
@ -200,15 +200,15 @@ func normalizeGithubURLToContent(addr string) string {
|
|||
return u.String()
|
||||
}
|
||||
|
||||
func parseJSON(r io.Reader, opts ...ValidateOptFn) (*Pkg, error) {
|
||||
func parseJSON(r io.Reader, opts ...ValidateOptFn) (*Template, error) {
|
||||
return parse(json.NewDecoder(r), opts...)
|
||||
}
|
||||
|
||||
func parseJsonnet(r io.Reader, opts ...ValidateOptFn) (*Pkg, error) {
|
||||
func parseJsonnet(r io.Reader, opts ...ValidateOptFn) (*Template, error) {
|
||||
return parse(jsonnet.NewDecoder(r), opts...)
|
||||
}
|
||||
|
||||
func parseSource(r io.Reader, opts ...ValidateOptFn) (*Pkg, error) {
|
||||
func parseSource(r io.Reader, opts ...ValidateOptFn) (*Template, error) {
|
||||
var b []byte
|
||||
if byter, ok := r.(interface{ Bytes() []byte }); ok {
|
||||
b = byter.Bytes()
|
||||
|
@ -235,10 +235,10 @@ func parseSource(r io.Reader, opts ...ValidateOptFn) (*Pkg, error) {
|
|||
}
|
||||
}
|
||||
|
||||
func parseYAML(r io.Reader, opts ...ValidateOptFn) (*Pkg, error) {
|
||||
func parseYAML(r io.Reader, opts ...ValidateOptFn) (*Template, error) {
|
||||
dec := yaml.NewDecoder(r)
|
||||
|
||||
var pkg Pkg
|
||||
var pkg Template
|
||||
for {
|
||||
// forced to use this for loop b/c the yaml dependency does not
|
||||
// decode multi documents.
|
||||
|
@ -264,8 +264,8 @@ type decoder interface {
|
|||
Decode(interface{}) error
|
||||
}
|
||||
|
||||
func parse(dec decoder, opts ...ValidateOptFn) (*Pkg, error) {
|
||||
var pkg Pkg
|
||||
func parse(dec decoder, opts ...ValidateOptFn) (*Template, error) {
|
||||
var pkg Template
|
||||
if err := dec.Decode(&pkg.Objects); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -333,13 +333,13 @@ func (k Object) SetMetadataName(name string) {
|
|||
k.Metadata[fieldName] = name
|
||||
}
|
||||
|
||||
// Pkg is the model for a package. The resources are more generic that one might
|
||||
// Template is the model for a package. The resources are more generic that one might
|
||||
// expect at first glance. This was done on purpose. The way json/yaml/toml or
|
||||
// w/e scripting you want to use, can have very different ways of parsing. The
|
||||
// different parsers are limited for the parsers that do not come from the std
|
||||
// lib (looking at you yaml/v2). This allows us to parse it and leave the matching
|
||||
// to another power, the graphing of the package is handled within itself.
|
||||
type Pkg struct {
|
||||
type Template struct {
|
||||
Objects []Object `json:"-" yaml:"-"`
|
||||
sources []string
|
||||
|
||||
|
@ -361,9 +361,9 @@ type Pkg struct {
|
|||
}
|
||||
|
||||
// Encode is a helper for encoding the pkg correctly.
|
||||
func (p *Pkg) Encode(encoding Encoding) ([]byte, error) {
|
||||
func (p *Template) Encode(encoding Encoding) ([]byte, error) {
|
||||
if p == nil {
|
||||
panic("attempted to encode a nil Pkg")
|
||||
panic("attempted to encode a nil Template")
|
||||
}
|
||||
|
||||
var (
|
||||
|
@ -391,7 +391,7 @@ func (p *Pkg) Encode(encoding Encoding) ([]byte, error) {
|
|||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
func (p *Pkg) Sources() []string {
|
||||
func (p *Template) Sources() []string {
|
||||
// note: we prevent the internal field from being changed by enabling access
|
||||
// to the sources via the exported method here.
|
||||
return p.sources
|
||||
|
@ -400,7 +400,7 @@ func (p *Pkg) Sources() []string {
|
|||
// Summary returns a package Summary that describes all the resources and
|
||||
// associations the pkg contains. It is very useful for informing users of
|
||||
// the changes that will take place when this pkg would be applied.
|
||||
func (p *Pkg) Summary() Summary {
|
||||
func (p *Template) Summary() Summary {
|
||||
// ensure zero values for arrays aren't returned, but instead
|
||||
// we always returning an initialized slice.
|
||||
sum := Summary{
|
||||
|
@ -458,7 +458,7 @@ func (p *Pkg) Summary() Summary {
|
|||
return sum
|
||||
}
|
||||
|
||||
func (p *Pkg) applyEnvRefs(envRefs map[string]string) error {
|
||||
func (p *Template) applyEnvRefs(envRefs map[string]string) error {
|
||||
if len(envRefs) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
@ -474,7 +474,7 @@ func (p *Pkg) applyEnvRefs(envRefs map[string]string) error {
|
|||
return p.Validate()
|
||||
}
|
||||
|
||||
func (p *Pkg) applySecrets(secrets map[string]string) {
|
||||
func (p *Template) applySecrets(secrets map[string]string) {
|
||||
for k := range secrets {
|
||||
p.mSecrets[k] = true
|
||||
}
|
||||
|
@ -482,7 +482,7 @@ func (p *Pkg) applySecrets(secrets map[string]string) {
|
|||
|
||||
// Contains identifies if a pkg contains a given object identified
|
||||
// by its kind and metadata.Name (MetaName) field.
|
||||
func (p *Pkg) Contains(k Kind, pkgName string) bool {
|
||||
func (p *Template) Contains(k Kind, pkgName string) bool {
|
||||
switch k {
|
||||
case KindBucket:
|
||||
_, ok := p.mBuckets[pkgName]
|
||||
|
@ -517,8 +517,8 @@ func (p *Pkg) Contains(k Kind, pkgName string) bool {
|
|||
|
||||
// Combine combines pkgs together. Is useful when you want to take multiple disparate pkgs
|
||||
// and compile them into one to take advantage of the parser and service guarantees.
|
||||
func Combine(pkgs []*Pkg, validationOpts ...ValidateOptFn) (*Pkg, error) {
|
||||
newPkg := new(Pkg)
|
||||
func Combine(pkgs []*Template, validationOpts ...ValidateOptFn) (*Template, error) {
|
||||
newPkg := new(Template)
|
||||
for _, p := range pkgs {
|
||||
if len(p.Objects) == 0 {
|
||||
continue
|
||||
|
@ -559,7 +559,7 @@ func ValidSkipParseError() ValidateOptFn {
|
|||
}
|
||||
|
||||
// Validate will graph all resources and validate every thing is in a useful form.
|
||||
func (p *Pkg) Validate(opts ...ValidateOptFn) error {
|
||||
func (p *Template) Validate(opts ...ValidateOptFn) error {
|
||||
opt := &validateOpt{minResources: true}
|
||||
for _, o := range opts {
|
||||
o(opt)
|
||||
|
@ -590,29 +590,29 @@ func (p *Pkg) Validate(opts ...ValidateOptFn) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (p *Pkg) buckets() []*bucket {
|
||||
func (p *Template) buckets() []*bucket {
|
||||
buckets := make([]*bucket, 0, len(p.mBuckets))
|
||||
for _, b := range p.mBuckets {
|
||||
buckets = append(buckets, b)
|
||||
}
|
||||
|
||||
sort.Slice(buckets, func(i, j int) bool { return buckets[i].PkgName() < buckets[j].PkgName() })
|
||||
sort.Slice(buckets, func(i, j int) bool { return buckets[i].MetaName() < buckets[j].MetaName() })
|
||||
|
||||
return buckets
|
||||
}
|
||||
|
||||
func (p *Pkg) checks() []*check {
|
||||
func (p *Template) checks() []*check {
|
||||
checks := make([]*check, 0, len(p.mChecks))
|
||||
for _, c := range p.mChecks {
|
||||
checks = append(checks, c)
|
||||
}
|
||||
|
||||
sort.Slice(checks, func(i, j int) bool { return checks[i].PkgName() < checks[j].PkgName() })
|
||||
sort.Slice(checks, func(i, j int) bool { return checks[i].MetaName() < checks[j].MetaName() })
|
||||
|
||||
return checks
|
||||
}
|
||||
|
||||
func (p *Pkg) labels() []*label {
|
||||
func (p *Template) labels() []*label {
|
||||
labels := make(sortedLabels, 0, len(p.mLabels))
|
||||
for _, l := range p.mLabels {
|
||||
labels = append(labels, l)
|
||||
|
@ -623,16 +623,16 @@ func (p *Pkg) labels() []*label {
|
|||
return labels
|
||||
}
|
||||
|
||||
func (p *Pkg) dashboards() []*dashboard {
|
||||
func (p *Template) dashboards() []*dashboard {
|
||||
dashes := make([]*dashboard, 0, len(p.mDashboards))
|
||||
for _, d := range p.mDashboards {
|
||||
dashes = append(dashes, d)
|
||||
}
|
||||
sort.Slice(dashes, func(i, j int) bool { return dashes[i].PkgName() < dashes[j].PkgName() })
|
||||
sort.Slice(dashes, func(i, j int) bool { return dashes[i].MetaName() < dashes[j].MetaName() })
|
||||
return dashes
|
||||
}
|
||||
|
||||
func (p *Pkg) notificationEndpoints() []*notificationEndpoint {
|
||||
func (p *Template) notificationEndpoints() []*notificationEndpoint {
|
||||
endpoints := make([]*notificationEndpoint, 0, len(p.mNotificationEndpoints))
|
||||
for _, e := range p.mNotificationEndpoints {
|
||||
endpoints = append(endpoints, e)
|
||||
|
@ -640,23 +640,23 @@ func (p *Pkg) notificationEndpoints() []*notificationEndpoint {
|
|||
sort.Slice(endpoints, func(i, j int) bool {
|
||||
ei, ej := endpoints[i], endpoints[j]
|
||||
if ei.kind == ej.kind {
|
||||
return ei.PkgName() < ej.PkgName()
|
||||
return ei.MetaName() < ej.MetaName()
|
||||
}
|
||||
return ei.kind < ej.kind
|
||||
})
|
||||
return endpoints
|
||||
}
|
||||
|
||||
func (p *Pkg) notificationRules() []*notificationRule {
|
||||
func (p *Template) notificationRules() []*notificationRule {
|
||||
rules := make([]*notificationRule, 0, len(p.mNotificationRules))
|
||||
for _, r := range p.mNotificationRules {
|
||||
rules = append(rules, r)
|
||||
}
|
||||
sort.Slice(rules, func(i, j int) bool { return rules[i].PkgName() < rules[j].PkgName() })
|
||||
sort.Slice(rules, func(i, j int) bool { return rules[i].MetaName() < rules[j].MetaName() })
|
||||
return rules
|
||||
}
|
||||
|
||||
func (p *Pkg) missingEnvRefs() []string {
|
||||
func (p *Template) missingEnvRefs() []string {
|
||||
envRefs := make([]string, 0)
|
||||
for envRef, matching := range p.mEnv {
|
||||
if !matching {
|
||||
|
@ -667,7 +667,7 @@ func (p *Pkg) missingEnvRefs() []string {
|
|||
return envRefs
|
||||
}
|
||||
|
||||
func (p *Pkg) missingSecrets() []string {
|
||||
func (p *Template) missingSecrets() []string {
|
||||
secrets := make([]string, 0, len(p.mSecrets))
|
||||
for secret, foundInPlatform := range p.mSecrets {
|
||||
if foundInPlatform {
|
||||
|
@ -678,36 +678,36 @@ func (p *Pkg) missingSecrets() []string {
|
|||
return secrets
|
||||
}
|
||||
|
||||
func (p *Pkg) tasks() []*task {
|
||||
func (p *Template) tasks() []*task {
|
||||
tasks := make([]*task, 0, len(p.mTasks))
|
||||
for _, t := range p.mTasks {
|
||||
tasks = append(tasks, t)
|
||||
}
|
||||
|
||||
sort.Slice(tasks, func(i, j int) bool { return tasks[i].PkgName() < tasks[j].PkgName() })
|
||||
sort.Slice(tasks, func(i, j int) bool { return tasks[i].MetaName() < tasks[j].MetaName() })
|
||||
|
||||
return tasks
|
||||
}
|
||||
|
||||
func (p *Pkg) telegrafs() []*telegraf {
|
||||
func (p *Template) telegrafs() []*telegraf {
|
||||
teles := make([]*telegraf, 0, len(p.mTelegrafs))
|
||||
for _, t := range p.mTelegrafs {
|
||||
t.config.Name = t.Name()
|
||||
teles = append(teles, t)
|
||||
}
|
||||
|
||||
sort.Slice(teles, func(i, j int) bool { return teles[i].PkgName() < teles[j].PkgName() })
|
||||
sort.Slice(teles, func(i, j int) bool { return teles[i].MetaName() < teles[j].MetaName() })
|
||||
|
||||
return teles
|
||||
}
|
||||
|
||||
func (p *Pkg) variables() []*variable {
|
||||
func (p *Template) variables() []*variable {
|
||||
vars := make([]*variable, 0, len(p.mVariables))
|
||||
for _, v := range p.mVariables {
|
||||
vars = append(vars, v)
|
||||
}
|
||||
|
||||
sort.Slice(vars, func(i, j int) bool { return vars[i].PkgName() < vars[j].PkgName() })
|
||||
sort.Slice(vars, func(i, j int) bool { return vars[i].MetaName() < vars[j].MetaName() })
|
||||
|
||||
return vars
|
||||
}
|
||||
|
@ -716,7 +716,7 @@ func (p *Pkg) variables() []*variable {
|
|||
// valid pairs of labels and resources of which all have IDs.
|
||||
// If a resource does not exist yet, a label mapping will not
|
||||
// be returned for it.
|
||||
func (p *Pkg) labelMappings() []SummaryLabelMapping {
|
||||
func (p *Template) labelMappings() []SummaryLabelMapping {
|
||||
labels := p.mLabels
|
||||
mappings := make([]SummaryLabelMapping, 0, len(labels))
|
||||
for _, l := range labels {
|
||||
|
@ -744,7 +744,7 @@ func (p *Pkg) labelMappings() []SummaryLabelMapping {
|
|||
return mappings
|
||||
}
|
||||
|
||||
func (p *Pkg) validResources() error {
|
||||
func (p *Template) validResources() error {
|
||||
if len(p.Objects) > 0 {
|
||||
return nil
|
||||
}
|
||||
|
@ -761,7 +761,7 @@ func (p *Pkg) validResources() error {
|
|||
return &err
|
||||
}
|
||||
|
||||
func (p *Pkg) graphResources() error {
|
||||
func (p *Template) graphResources() error {
|
||||
p.mEnv = make(map[string]bool)
|
||||
p.mSecrets = make(map[string]bool)
|
||||
|
||||
|
@ -796,7 +796,7 @@ func (p *Pkg) graphResources() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (p *Pkg) graphBuckets() *parseErr {
|
||||
func (p *Template) graphBuckets() *parseErr {
|
||||
p.mBuckets = make(map[string]*bucket)
|
||||
tracker := p.trackNames(true)
|
||||
return p.eachResource(KindBucket, func(o Object) []validationErr {
|
||||
|
@ -823,18 +823,18 @@ func (p *Pkg) graphBuckets() *parseErr {
|
|||
|
||||
failures := p.parseNestedLabels(o.Spec, func(l *label) error {
|
||||
bkt.labels = append(bkt.labels, l)
|
||||
p.mLabels[l.PkgName()].setMapping(bkt, false)
|
||||
p.mLabels[l.MetaName()].setMapping(bkt, false)
|
||||
return nil
|
||||
})
|
||||
sort.Sort(bkt.labels)
|
||||
|
||||
p.mBuckets[bkt.PkgName()] = bkt
|
||||
p.mBuckets[bkt.MetaName()] = bkt
|
||||
|
||||
return append(failures, bkt.valid()...)
|
||||
})
|
||||
}
|
||||
|
||||
func (p *Pkg) graphLabels() *parseErr {
|
||||
func (p *Template) graphLabels() *parseErr {
|
||||
p.mLabels = make(map[string]*label)
|
||||
tracker := p.trackNames(true)
|
||||
return p.eachResource(KindLabel, func(o Object) []validationErr {
|
||||
|
@ -848,14 +848,14 @@ func (p *Pkg) graphLabels() *parseErr {
|
|||
Color: o.Spec.stringShort(fieldLabelColor),
|
||||
Description: o.Spec.stringShort(fieldDescription),
|
||||
}
|
||||
p.mLabels[l.PkgName()] = l
|
||||
p.mLabels[l.MetaName()] = l
|
||||
p.setRefs(l.name, l.displayName)
|
||||
|
||||
return l.valid()
|
||||
})
|
||||
}
|
||||
|
||||
func (p *Pkg) graphChecks() *parseErr {
|
||||
func (p *Template) graphChecks() *parseErr {
|
||||
p.mChecks = make(map[string]*check)
|
||||
tracker := p.trackNames(true)
|
||||
|
||||
|
@ -907,12 +907,12 @@ func (p *Pkg) graphChecks() *parseErr {
|
|||
|
||||
failures := p.parseNestedLabels(o.Spec, func(l *label) error {
|
||||
ch.labels = append(ch.labels, l)
|
||||
p.mLabels[l.PkgName()].setMapping(ch, false)
|
||||
p.mLabels[l.MetaName()].setMapping(ch, false)
|
||||
return nil
|
||||
})
|
||||
sort.Sort(ch.labels)
|
||||
|
||||
p.mChecks[ch.PkgName()] = ch
|
||||
p.mChecks[ch.MetaName()] = ch
|
||||
p.setRefs(ch.name, ch.displayName)
|
||||
return append(failures, ch.valid()...)
|
||||
})
|
||||
|
@ -926,7 +926,7 @@ func (p *Pkg) graphChecks() *parseErr {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (p *Pkg) graphDashboards() *parseErr {
|
||||
func (p *Template) graphDashboards() *parseErr {
|
||||
p.mDashboards = make(map[string]*dashboard)
|
||||
tracker := p.trackNames(false)
|
||||
return p.eachResource(KindDashboard, func(o Object) []validationErr {
|
||||
|
@ -942,7 +942,7 @@ func (p *Pkg) graphDashboards() *parseErr {
|
|||
|
||||
failures := p.parseNestedLabels(o.Spec, func(l *label) error {
|
||||
dash.labels = append(dash.labels, l)
|
||||
p.mLabels[l.PkgName()].setMapping(dash, false)
|
||||
p.mLabels[l.MetaName()].setMapping(dash, false)
|
||||
return nil
|
||||
})
|
||||
sort.Sort(dash.labels)
|
||||
|
@ -962,14 +962,14 @@ func (p *Pkg) graphDashboards() *parseErr {
|
|||
dash.Charts = append(dash.Charts, ch)
|
||||
}
|
||||
|
||||
p.mDashboards[dash.PkgName()] = dash
|
||||
p.mDashboards[dash.MetaName()] = dash
|
||||
p.setRefs(dash.name, dash.displayName)
|
||||
|
||||
return append(failures, dash.valid()...)
|
||||
})
|
||||
}
|
||||
|
||||
func (p *Pkg) graphNotificationEndpoints() *parseErr {
|
||||
func (p *Template) graphNotificationEndpoints() *parseErr {
|
||||
p.mNotificationEndpoints = make(map[string]*notificationEndpoint)
|
||||
tracker := p.trackNames(true)
|
||||
|
||||
|
@ -1014,7 +1014,7 @@ func (p *Pkg) graphNotificationEndpoints() *parseErr {
|
|||
}
|
||||
failures := p.parseNestedLabels(o.Spec, func(l *label) error {
|
||||
endpoint.labels = append(endpoint.labels, l)
|
||||
p.mLabels[l.PkgName()].setMapping(endpoint, false)
|
||||
p.mLabels[l.MetaName()].setMapping(endpoint, false)
|
||||
return nil
|
||||
})
|
||||
sort.Sort(endpoint.labels)
|
||||
|
@ -1028,7 +1028,7 @@ func (p *Pkg) graphNotificationEndpoints() *parseErr {
|
|||
endpoint.username,
|
||||
)
|
||||
|
||||
p.mNotificationEndpoints[endpoint.PkgName()] = endpoint
|
||||
p.mNotificationEndpoints[endpoint.MetaName()] = endpoint
|
||||
return append(failures, endpoint.valid()...)
|
||||
})
|
||||
if err != nil {
|
||||
|
@ -1041,7 +1041,7 @@ func (p *Pkg) graphNotificationEndpoints() *parseErr {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (p *Pkg) graphNotificationRules() *parseErr {
|
||||
func (p *Template) graphNotificationRules() *parseErr {
|
||||
p.mNotificationRules = make(map[string]*notificationRule)
|
||||
tracker := p.trackNames(false)
|
||||
return p.eachResource(KindNotificationRule, func(o Object) []validationErr {
|
||||
|
@ -1080,18 +1080,18 @@ func (p *Pkg) graphNotificationRules() *parseErr {
|
|||
|
||||
failures := p.parseNestedLabels(o.Spec, func(l *label) error {
|
||||
rule.labels = append(rule.labels, l)
|
||||
p.mLabels[l.PkgName()].setMapping(rule, false)
|
||||
p.mLabels[l.MetaName()].setMapping(rule, false)
|
||||
return nil
|
||||
})
|
||||
sort.Sort(rule.labels)
|
||||
|
||||
p.mNotificationRules[rule.PkgName()] = rule
|
||||
p.mNotificationRules[rule.MetaName()] = rule
|
||||
p.setRefs(rule.name, rule.displayName, rule.endpointName)
|
||||
return append(failures, rule.valid()...)
|
||||
})
|
||||
}
|
||||
|
||||
func (p *Pkg) graphTasks() *parseErr {
|
||||
func (p *Template) graphTasks() *parseErr {
|
||||
p.mTasks = make(map[string]*task)
|
||||
tracker := p.trackNames(false)
|
||||
return p.eachResource(KindTask, func(o Object) []validationErr {
|
||||
|
@ -1112,18 +1112,18 @@ func (p *Pkg) graphTasks() *parseErr {
|
|||
|
||||
failures := p.parseNestedLabels(o.Spec, func(l *label) error {
|
||||
t.labels = append(t.labels, l)
|
||||
p.mLabels[l.PkgName()].setMapping(t, false)
|
||||
p.mLabels[l.MetaName()].setMapping(t, false)
|
||||
return nil
|
||||
})
|
||||
sort.Sort(t.labels)
|
||||
|
||||
p.mTasks[t.PkgName()] = t
|
||||
p.mTasks[t.MetaName()] = t
|
||||
p.setRefs(t.name, t.displayName)
|
||||
return append(failures, t.valid()...)
|
||||
})
|
||||
}
|
||||
|
||||
func (p *Pkg) graphTelegrafs() *parseErr {
|
||||
func (p *Template) graphTelegrafs() *parseErr {
|
||||
p.mTelegrafs = make(map[string]*telegraf)
|
||||
tracker := p.trackNames(false)
|
||||
return p.eachResource(KindTelegraf, func(o Object) []validationErr {
|
||||
|
@ -1140,19 +1140,19 @@ func (p *Pkg) graphTelegrafs() *parseErr {
|
|||
|
||||
failures := p.parseNestedLabels(o.Spec, func(l *label) error {
|
||||
tele.labels = append(tele.labels, l)
|
||||
p.mLabels[l.PkgName()].setMapping(tele, false)
|
||||
p.mLabels[l.MetaName()].setMapping(tele, false)
|
||||
return nil
|
||||
})
|
||||
sort.Sort(tele.labels)
|
||||
|
||||
p.mTelegrafs[tele.PkgName()] = tele
|
||||
p.mTelegrafs[tele.MetaName()] = tele
|
||||
p.setRefs(tele.name, tele.displayName)
|
||||
|
||||
return append(failures, tele.valid()...)
|
||||
})
|
||||
}
|
||||
|
||||
func (p *Pkg) graphVariables() *parseErr {
|
||||
func (p *Template) graphVariables() *parseErr {
|
||||
p.mVariables = make(map[string]*variable)
|
||||
tracker := p.trackNames(true)
|
||||
return p.eachResource(KindVariable, func(o Object) []validationErr {
|
||||
|
@ -1179,12 +1179,12 @@ func (p *Pkg) graphVariables() *parseErr {
|
|||
|
||||
failures := p.parseNestedLabels(o.Spec, func(l *label) error {
|
||||
newVar.labels = append(newVar.labels, l)
|
||||
p.mLabels[l.PkgName()].setMapping(newVar, false)
|
||||
p.mLabels[l.MetaName()].setMapping(newVar, false)
|
||||
return nil
|
||||
})
|
||||
sort.Sort(newVar.labels)
|
||||
|
||||
p.mVariables[newVar.PkgName()] = newVar
|
||||
p.mVariables[newVar.MetaName()] = newVar
|
||||
p.setRefs(newVar.name, newVar.displayName)
|
||||
p.setRefs(newVar.selected...)
|
||||
|
||||
|
@ -1192,7 +1192,7 @@ func (p *Pkg) graphVariables() *parseErr {
|
|||
})
|
||||
}
|
||||
|
||||
func (p *Pkg) eachResource(resourceKind Kind, fn func(o Object) []validationErr) *parseErr {
|
||||
func (p *Template) eachResource(resourceKind Kind, fn func(o Object) []validationErr) *parseErr {
|
||||
var pErr parseErr
|
||||
for i, k := range p.Objects {
|
||||
if err := k.Kind.OK(); err != nil {
|
||||
|
@ -1268,7 +1268,7 @@ func (p *Pkg) eachResource(resourceKind Kind, fn func(o Object) []validationErr)
|
|||
return nil
|
||||
}
|
||||
|
||||
func (p *Pkg) parseNestedLabels(r Resource, fn func(lb *label) error) []validationErr {
|
||||
func (p *Template) parseNestedLabels(r Resource, fn func(lb *label) error) []validationErr {
|
||||
nestedLabels := make(map[string]*label)
|
||||
|
||||
var failures []validationErr
|
||||
|
@ -1290,7 +1290,7 @@ func (p *Pkg) parseNestedLabels(r Resource, fn func(lb *label) error) []validati
|
|||
return failures
|
||||
}
|
||||
|
||||
func (p *Pkg) parseNestedLabel(nr Resource, fn func(lb *label) error) *validationErr {
|
||||
func (p *Template) parseNestedLabel(nr Resource, fn func(lb *label) error) *validationErr {
|
||||
k, err := nr.kind()
|
||||
if err != nil {
|
||||
return &validationErr{
|
||||
|
@ -1325,7 +1325,7 @@ func (p *Pkg) parseNestedLabel(nr Resource, fn func(lb *label) error) *validatio
|
|||
return nil
|
||||
}
|
||||
|
||||
func (p *Pkg) trackNames(resourceUniqueByName bool) func(Object) (identity, []validationErr) {
|
||||
func (p *Template) trackNames(resourceUniqueByName bool) func(Object) (identity, []validationErr) {
|
||||
mPkgNames := make(map[string]bool)
|
||||
uniqNames := make(map[string]bool)
|
||||
return func(o Object) (identity, []validationErr) {
|
||||
|
@ -1364,7 +1364,7 @@ func (p *Pkg) trackNames(resourceUniqueByName bool) func(Object) (identity, []va
|
|||
}
|
||||
}
|
||||
|
||||
func (p *Pkg) getRefWithKnownEnvs(r Resource, field string) *references {
|
||||
func (p *Template) getRefWithKnownEnvs(r Resource, field string) *references {
|
||||
nameRef := r.references(field)
|
||||
if v, ok := p.mEnvVals[nameRef.EnvRef]; ok {
|
||||
nameRef.val = v
|
||||
|
@ -1372,7 +1372,7 @@ func (p *Pkg) getRefWithKnownEnvs(r Resource, field string) *references {
|
|||
return nameRef
|
||||
}
|
||||
|
||||
func (p *Pkg) setRefs(refs ...*references) {
|
||||
func (p *Template) setRefs(refs ...*references) {
|
||||
for _, ref := range refs {
|
||||
if ref.Secret != "" {
|
||||
p.mSecrets[ref.Secret] = false
|
||||
|
|
|
@ -28,7 +28,7 @@ func (i *identity) Name() string {
|
|||
return i.name.String()
|
||||
}
|
||||
|
||||
func (i *identity) PkgName() string {
|
||||
func (i *identity) MetaName() string {
|
||||
return i.name.String()
|
||||
}
|
||||
|
||||
|
@ -90,7 +90,7 @@ type bucket struct {
|
|||
func (b *bucket) summarize() SummaryBucket {
|
||||
return SummaryBucket{
|
||||
Name: b.Name(),
|
||||
MetaName: b.PkgName(),
|
||||
MetaName: b.MetaName(),
|
||||
Description: b.Description,
|
||||
RetentionPeriod: b.RetentionRules.RP(),
|
||||
LabelAssociations: toSummaryLabels(b.labels...),
|
||||
|
@ -248,7 +248,7 @@ func (c *check) summarize() SummaryCheck {
|
|||
}
|
||||
|
||||
sum := SummaryCheck{
|
||||
MetaName: c.PkgName(),
|
||||
MetaName: c.MetaName(),
|
||||
Status: c.Status(),
|
||||
LabelAssociations: toSummaryLabels(c.labels...),
|
||||
EnvReferences: summarizeCommonReferences(c.identity, c.labels),
|
||||
|
@ -463,7 +463,7 @@ func (d *dashboard) ResourceType() influxdb.ResourceType {
|
|||
|
||||
func (d *dashboard) summarize() SummaryDashboard {
|
||||
iDash := SummaryDashboard{
|
||||
MetaName: d.PkgName(),
|
||||
MetaName: d.MetaName(),
|
||||
Name: d.Name(),
|
||||
Description: d.Description,
|
||||
LabelAssociations: toSummaryLabels(d.labels...),
|
||||
|
@ -1071,9 +1071,9 @@ type assocMapVal struct {
|
|||
}
|
||||
|
||||
func (l assocMapVal) PkgName() string {
|
||||
t, ok := l.v.(interface{ PkgName() string })
|
||||
t, ok := l.v.(interface{ MetaName() string })
|
||||
if ok {
|
||||
return t.PkgName()
|
||||
return t.MetaName()
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
@ -1131,7 +1131,7 @@ type label struct {
|
|||
|
||||
func (l *label) summarize() SummaryLabel {
|
||||
return SummaryLabel{
|
||||
MetaName: l.PkgName(),
|
||||
MetaName: l.MetaName(),
|
||||
Name: l.Name(),
|
||||
Properties: struct {
|
||||
Color string `json:"color"`
|
||||
|
@ -1153,13 +1153,13 @@ func (l *label) mappingSummary() []SummaryLabelMapping {
|
|||
status = StateStatusExists
|
||||
}
|
||||
mappings = append(mappings, SummaryLabelMapping{
|
||||
exists: v.exists,
|
||||
Status: status,
|
||||
ResourcePkgName: v.PkgName(),
|
||||
ResourceName: resource.name,
|
||||
ResourceType: resource.resType,
|
||||
LabelPkgName: l.PkgName(),
|
||||
LabelName: l.Name(),
|
||||
exists: v.exists,
|
||||
Status: status,
|
||||
ResourceMetaName: v.PkgName(),
|
||||
ResourceName: resource.name,
|
||||
ResourceType: resource.resType,
|
||||
LabelMetaName: l.MetaName(),
|
||||
LabelName: l.Name(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -1207,7 +1207,7 @@ func (s sortedLabels) Len() int {
|
|||
}
|
||||
|
||||
func (s sortedLabels) Less(i, j int) bool {
|
||||
return s[i].PkgName() < s[j].PkgName()
|
||||
return s[i].MetaName() < s[j].MetaName()
|
||||
}
|
||||
|
||||
func (s sortedLabels) Swap(i, j int) {
|
||||
|
@ -1284,7 +1284,7 @@ func (n *notificationEndpoint) base() endpoint.Base {
|
|||
func (n *notificationEndpoint) summarize() SummaryNotificationEndpoint {
|
||||
base := n.base()
|
||||
sum := SummaryNotificationEndpoint{
|
||||
MetaName: n.PkgName(),
|
||||
MetaName: n.MetaName(),
|
||||
LabelAssociations: toSummaryLabels(n.labels...),
|
||||
EnvReferences: summarizeCommonReferences(n.identity, n.labels),
|
||||
}
|
||||
|
@ -1467,9 +1467,9 @@ func (r *notificationRule) Status() influxdb.Status {
|
|||
return influxdb.Status(r.status)
|
||||
}
|
||||
|
||||
func (r *notificationRule) endpointPkgName() string {
|
||||
func (r *notificationRule) endpointMetaName() string {
|
||||
if r.associatedEndpoint != nil {
|
||||
return r.associatedEndpoint.PkgName()
|
||||
return r.associatedEndpoint.MetaName()
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
@ -1477,7 +1477,7 @@ func (r *notificationRule) endpointPkgName() string {
|
|||
func (r *notificationRule) summarize() SummaryNotificationRule {
|
||||
var endpointPkgName, endpointType string
|
||||
if r.associatedEndpoint != nil {
|
||||
endpointPkgName = r.associatedEndpoint.PkgName()
|
||||
endpointPkgName = r.associatedEndpoint.MetaName()
|
||||
endpointType = r.associatedEndpoint.kind.String()
|
||||
}
|
||||
|
||||
|
@ -1487,9 +1487,9 @@ func (r *notificationRule) summarize() SummaryNotificationRule {
|
|||
}
|
||||
|
||||
return SummaryNotificationRule{
|
||||
MetaName: r.PkgName(),
|
||||
MetaName: r.MetaName(),
|
||||
Name: r.Name(),
|
||||
EndpointPkgName: endpointPkgName,
|
||||
EndpointMetaName: endpointPkgName,
|
||||
EndpointType: endpointType,
|
||||
Description: r.description,
|
||||
Every: r.every.String(),
|
||||
|
@ -1721,7 +1721,7 @@ func (t *task) flux() string {
|
|||
|
||||
func (t *task) summarize() SummaryTask {
|
||||
return SummaryTask{
|
||||
MetaName: t.PkgName(),
|
||||
MetaName: t.MetaName(),
|
||||
Name: t.Name(),
|
||||
Cron: t.cron,
|
||||
Description: t.description,
|
||||
|
@ -1856,7 +1856,7 @@ func (t *telegraf) summarize() SummaryTelegraf {
|
|||
cfg := t.config
|
||||
cfg.Name = t.Name()
|
||||
return SummaryTelegraf{
|
||||
MetaName: t.PkgName(),
|
||||
MetaName: t.MetaName(),
|
||||
TelegrafConfig: cfg,
|
||||
LabelAssociations: toSummaryLabels(t.labels...),
|
||||
EnvReferences: summarizeCommonReferences(t.identity, t.labels),
|
||||
|
@ -1935,7 +1935,7 @@ func (v *variable) summarize() SummaryVariable {
|
|||
}
|
||||
|
||||
return SummaryVariable{
|
||||
MetaName: v.PkgName(),
|
||||
MetaName: v.MetaName(),
|
||||
Name: v.Name(),
|
||||
Description: v.Description,
|
||||
Selected: v.Selected(),
|
||||
|
|
File diff suppressed because it is too large
Load Diff
234
pkger/service.go
234
pkger/service.go
|
@ -27,9 +27,9 @@ const APIVersion = "influxdata.com/v2alpha1"
|
|||
|
||||
type (
|
||||
// Stack is an identifier for stateful application of a package(s). This stack
|
||||
// will map created resources from the pkg(s) to existing resources on the
|
||||
// platform. This stack is updated only after side effects of applying a pkg.
|
||||
// If the pkg is applied, and no changes are had, then the stack is not updated.
|
||||
// will map created resources from the template(s) to existing resources on the
|
||||
// platform. This stack is updated only after side effects of applying a template.
|
||||
// If the template is applied, and no changes are had, then the stack is not updated.
|
||||
Stack struct {
|
||||
ID influxdb.ID
|
||||
OrgID influxdb.ID
|
||||
|
@ -43,7 +43,7 @@ type (
|
|||
}
|
||||
|
||||
// StackResource is a record for an individual resource side effect genereated from
|
||||
// applying a pkg.
|
||||
// applying a template.
|
||||
StackResource struct {
|
||||
APIVersion string
|
||||
ID influxdb.ID
|
||||
|
@ -85,7 +85,7 @@ type SVC interface {
|
|||
ReadStack(ctx context.Context, id influxdb.ID) (Stack, error)
|
||||
UpdateStack(ctx context.Context, upd StackUpdate) (Stack, error)
|
||||
|
||||
Export(ctx context.Context, opts ...ExportOptFn) (*Pkg, error)
|
||||
Export(ctx context.Context, opts ...ExportOptFn) (*Template, error)
|
||||
DryRun(ctx context.Context, orgID, userID influxdb.ID, opts ...ApplyOptFn) (ImpactSummary, error)
|
||||
Apply(ctx context.Context, orgID, userID influxdb.ID, opts ...ApplyOptFn) (ImpactSummary, error)
|
||||
}
|
||||
|
@ -238,7 +238,7 @@ type Store interface {
|
|||
DeleteStack(ctx context.Context, id influxdb.ID) error
|
||||
}
|
||||
|
||||
// Service provides the pkger business logic including all the dependencies to make
|
||||
// Service provides the template business logic including all the dependencies to make
|
||||
// this resource sausage.
|
||||
type Service struct {
|
||||
log *zap.Logger
|
||||
|
@ -266,7 +266,7 @@ type Service struct {
|
|||
|
||||
var _ SVC = (*Service)(nil)
|
||||
|
||||
// NewService is a constructor for a pkger Service.
|
||||
// NewService is a constructor for a template Service.
|
||||
func NewService(opts ...ServiceSetterFn) *Service {
|
||||
opt := &serviceOpt{
|
||||
logger: zap.NewNop(),
|
||||
|
@ -348,8 +348,8 @@ func (s *Service) DeleteStack(ctx context.Context, identifiers struct{ OrgID, Us
|
|||
}
|
||||
}
|
||||
|
||||
// providing empty Pkg will remove all applied resources
|
||||
state, err := s.dryRun(ctx, identifiers.OrgID, new(Pkg), applyOptFromOptFns(ApplyWithStackID(identifiers.StackID)))
|
||||
// providing empty template will remove all applied resources
|
||||
state, err := s.dryRun(ctx, identifiers.OrgID, new(Template), applyOptFromOptFns(ApplyWithStackID(identifiers.StackID)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -443,7 +443,7 @@ func (s *Service) applyStackUpdate(existing Stack, upd StackUpdate) Stack {
|
|||
}
|
||||
|
||||
type (
|
||||
// ExportOptFn is a functional input for setting the pkg fields.
|
||||
// ExportOptFn is a functional input for setting the template fields.
|
||||
ExportOptFn func(opt *ExportOpt) error
|
||||
|
||||
// ExportOpt are the options for creating a new package.
|
||||
|
@ -511,7 +511,7 @@ func exportOptFromOptFns(opts []ExportOptFn) (ExportOpt, error) {
|
|||
}
|
||||
|
||||
// Export will produce a templates from the parameters provided.
|
||||
func (s *Service) Export(ctx context.Context, setters ...ExportOptFn) (*Pkg, error) {
|
||||
func (s *Service) Export(ctx context.Context, setters ...ExportOptFn) (*Template, error) {
|
||||
opt, err := exportOptFromOptFns(setters)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -556,12 +556,12 @@ func (s *Service) Export(ctx context.Context, setters ...ExportOptFn) (*Pkg, err
|
|||
return nil, internalErr(err)
|
||||
}
|
||||
|
||||
pkg := &Pkg{Objects: exporter.Objects()}
|
||||
if err := pkg.Validate(ValidWithoutResources()); err != nil {
|
||||
template := &Template{Objects: exporter.Objects()}
|
||||
if err := template.Validate(ValidWithoutResources()); err != nil {
|
||||
return nil, failedValidationErr(err)
|
||||
}
|
||||
|
||||
return pkg, nil
|
||||
return template, nil
|
||||
}
|
||||
|
||||
func (s *Service) cloneOrgResources(ctx context.Context, orgID influxdb.ID, resourceKinds []Kind) ([]ResourceToClone, error) {
|
||||
|
@ -827,7 +827,7 @@ func (s *Service) filterOrgResourceKinds(resourceKindFilters []Kind) []struct {
|
|||
return resourceTypeGens
|
||||
}
|
||||
|
||||
// ImpactSummary represents the impact the application of a pkg will have on the system.
|
||||
// ImpactSummary represents the impact the application of a template will have on the system.
|
||||
type ImpactSummary struct {
|
||||
Sources []string
|
||||
StackID influxdb.ID
|
||||
|
@ -835,50 +835,50 @@ type ImpactSummary struct {
|
|||
Summary Summary
|
||||
}
|
||||
|
||||
// DryRun provides a dry run of the pkg application. The pkg will be marked verified
|
||||
// DryRun provides a dry run of the template application. The template will be marked verified
|
||||
// for later calls to Apply. This func will be run on an Apply if it has not been run
|
||||
// already.
|
||||
func (s *Service) DryRun(ctx context.Context, orgID, userID influxdb.ID, opts ...ApplyOptFn) (ImpactSummary, error) {
|
||||
opt := applyOptFromOptFns(opts...)
|
||||
pkg, err := s.pkgFromApplyOpts(ctx, opt)
|
||||
template, err := s.templateFromApplyOpts(ctx, opt)
|
||||
if err != nil {
|
||||
return ImpactSummary{}, err
|
||||
}
|
||||
|
||||
state, err := s.dryRun(ctx, orgID, pkg, opt)
|
||||
state, err := s.dryRun(ctx, orgID, template, opt)
|
||||
if err != nil {
|
||||
return ImpactSummary{}, err
|
||||
}
|
||||
|
||||
return ImpactSummary{
|
||||
Sources: pkg.sources,
|
||||
Sources: template.sources,
|
||||
StackID: opt.StackID,
|
||||
Diff: state.diff(),
|
||||
Summary: newSummaryFromStatePkg(state, pkg),
|
||||
Summary: newSummaryFromStateTemplate(state, template),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *Service) dryRun(ctx context.Context, orgID influxdb.ID, pkg *Pkg, opt ApplyOpt) (*stateCoordinator, error) {
|
||||
func (s *Service) dryRun(ctx context.Context, orgID influxdb.ID, template *Template, opt ApplyOpt) (*stateCoordinator, error) {
|
||||
// so here's the deal, when we have issues with the parsing validation, we
|
||||
// continue to do the diff anyhow. any resource that does not have a name
|
||||
// will be skipped, and won't bleed into the dry run here. We can now return
|
||||
// a error (parseErr) and valid diff/summary.
|
||||
var parseErr error
|
||||
err := pkg.Validate(ValidWithoutResources())
|
||||
err := template.Validate(ValidWithoutResources())
|
||||
if err != nil && !IsParseErr(err) {
|
||||
return nil, internalErr(err)
|
||||
}
|
||||
parseErr = err
|
||||
|
||||
if len(opt.EnvRefs) > 0 {
|
||||
err := pkg.applyEnvRefs(opt.EnvRefs)
|
||||
err := template.applyEnvRefs(opt.EnvRefs)
|
||||
if err != nil && !IsParseErr(err) {
|
||||
return nil, internalErr(err)
|
||||
}
|
||||
parseErr = err
|
||||
}
|
||||
|
||||
state := newStateCoordinator(pkg, resourceActions{
|
||||
state := newStateCoordinator(template, resourceActions{
|
||||
skipKinds: opt.KindsToSkip,
|
||||
skipResources: opt.ResourcesToSkip,
|
||||
})
|
||||
|
@ -889,7 +889,7 @@ func (s *Service) dryRun(ctx context.Context, orgID influxdb.ID, pkg *Pkg, opt A
|
|||
}
|
||||
}
|
||||
|
||||
if err := s.dryRunSecrets(ctx, orgID, pkg); err != nil {
|
||||
if err := s.dryRunSecrets(ctx, orgID, template); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
@ -972,13 +972,13 @@ func (s *Service) dryRunDashboards(ctx context.Context, orgID influxdb.ID, dashs
|
|||
}
|
||||
|
||||
func (s *Service) dryRunLabels(ctx context.Context, orgID influxdb.ID, labels map[string]*stateLabel) {
|
||||
for _, pkgLabel := range labels {
|
||||
pkgLabel.orgID = orgID
|
||||
existingLabel, _ := s.findLabel(ctx, orgID, pkgLabel)
|
||||
if IsNew(pkgLabel.stateStatus) && existingLabel != nil {
|
||||
pkgLabel.stateStatus = StateStatusExists
|
||||
for _, l := range labels {
|
||||
l.orgID = orgID
|
||||
existingLabel, _ := s.findLabel(ctx, orgID, l)
|
||||
if IsNew(l.stateStatus) && existingLabel != nil {
|
||||
l.stateStatus = StateStatusExists
|
||||
}
|
||||
pkgLabel.existing = existingLabel
|
||||
l.existing = existingLabel
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1034,9 +1034,9 @@ func (s *Service) dryRunNotificationRules(ctx context.Context, orgID influxdb.ID
|
|||
continue
|
||||
}
|
||||
|
||||
e, ok := endpoints[r.parserRule.endpointPkgName()]
|
||||
e, ok := endpoints[r.parserRule.endpointMetaName()]
|
||||
if !IsRemoval(r.stateStatus) && !ok {
|
||||
err := fmt.Errorf("failed to find notification endpoint %q dependency for notification rule %q", r.parserRule.endpointName, r.parserRule.PkgName())
|
||||
err := fmt.Errorf("failed to find notification endpoint %q dependency for notification rule %q", r.parserRule.endpointName, r.parserRule.MetaName())
|
||||
return &influxdb.Error{
|
||||
Code: influxdb.EUnprocessableEntity,
|
||||
Err: err,
|
||||
|
@ -1048,9 +1048,9 @@ func (s *Service) dryRunNotificationRules(ctx context.Context, orgID influxdb.ID
|
|||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) dryRunSecrets(ctx context.Context, orgID influxdb.ID, pkg *Pkg) error {
|
||||
pkgSecrets := pkg.mSecrets
|
||||
if len(pkgSecrets) == 0 {
|
||||
func (s *Service) dryRunSecrets(ctx context.Context, orgID influxdb.ID, template *Template) error {
|
||||
templateSecrets := template.mSecrets
|
||||
if len(templateSecrets) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -1060,7 +1060,7 @@ func (s *Service) dryRunSecrets(ctx context.Context, orgID influxdb.ID, pkg *Pkg
|
|||
}
|
||||
|
||||
for _, secret := range existingSecrets {
|
||||
pkgSecrets[secret] = true // marked true since it exists in the platform
|
||||
templateSecrets[secret] = true // marked true since it exists in the platform
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -1223,11 +1223,11 @@ func (s *Service) dryRunResourceLabelMapping(ctx context.Context, state *stateCo
|
|||
}) ([]stateLabelMapping, error) {
|
||||
|
||||
ident := associatedResource.stateIdentity()
|
||||
pkgResourceLabels := associatedResource.labels()
|
||||
templateResourceLabels := associatedResource.labels()
|
||||
|
||||
var mappings []stateLabelMapping
|
||||
if !ident.exists() {
|
||||
for _, l := range pkgResourceLabels {
|
||||
for _, l := range templateResourceLabels {
|
||||
mappings = append(mappings, stateLabelMapping{
|
||||
status: StateStatusNew,
|
||||
resource: associatedResource,
|
||||
|
@ -1246,11 +1246,11 @@ func (s *Service) dryRunResourceLabelMapping(ctx context.Context, state *stateCo
|
|||
return nil, ierrors.Wrap(err, msgFmt)
|
||||
}
|
||||
|
||||
pkgLabels := labelSlcToMap(pkgResourceLabels)
|
||||
templateLabels := labelSlcToMap(templateResourceLabels)
|
||||
for _, l := range existingLabels {
|
||||
// if label is found in state then we track the mapping and mark it existing
|
||||
// otherwise we continue on
|
||||
delete(pkgLabels, l.Name)
|
||||
delete(templateLabels, l.Name)
|
||||
if sLabel, ok := stateLabelsByResName[l.Name]; ok {
|
||||
mappings = append(mappings, stateLabelMapping{
|
||||
status: StateStatusExists,
|
||||
|
@ -1261,8 +1261,8 @@ func (s *Service) dryRunResourceLabelMapping(ctx context.Context, state *stateCo
|
|||
}
|
||||
|
||||
// now we add labels that do not exist
|
||||
for _, l := range pkgLabels {
|
||||
stLabel, found := state.getLabelByPkgName(l.PkgName())
|
||||
for _, l := range templateLabels {
|
||||
stLabel, found := state.getLabelByMetaName(l.MetaName())
|
||||
if !found {
|
||||
continue
|
||||
}
|
||||
|
@ -1289,7 +1289,7 @@ func (s *Service) addStackState(ctx context.Context, stackID influxdb.ID, state
|
|||
type (
|
||||
// ApplyOpt is an option for applying a package.
|
||||
ApplyOpt struct {
|
||||
Pkgs []*Pkg
|
||||
Templates []*Template
|
||||
EnvRefs map[string]string
|
||||
MissingSecrets map[string]string
|
||||
StackID influxdb.ID
|
||||
|
@ -1297,14 +1297,14 @@ type (
|
|||
KindsToSkip map[Kind]bool
|
||||
}
|
||||
|
||||
// ActionSkipResource provides an action from the consumer to use the pkg with
|
||||
// modifications to the resource kind and pkg name that will be applied.
|
||||
// ActionSkipResource provides an action from the consumer to use the template with
|
||||
// modifications to the resource kind and template name that will be applied.
|
||||
ActionSkipResource struct {
|
||||
Kind Kind `json:"kind"`
|
||||
MetaName string `json:"resourceTemplateName"`
|
||||
}
|
||||
|
||||
// ActionSkipKind provides an action from the consumer to use the pkg with
|
||||
// ActionSkipKind provides an action from the consumer to use the template with
|
||||
// modifications to the resource kinds will be applied.
|
||||
ActionSkipKind struct {
|
||||
Kind Kind `json:"kind"`
|
||||
|
@ -1314,17 +1314,17 @@ type (
|
|||
ApplyOptFn func(opt *ApplyOpt)
|
||||
)
|
||||
|
||||
// ApplyWithEnvRefs provides env refs to saturate the missing reference fields in the pkg.
|
||||
// ApplyWithEnvRefs provides env refs to saturate the missing reference fields in the template.
|
||||
func ApplyWithEnvRefs(envRefs map[string]string) ApplyOptFn {
|
||||
return func(o *ApplyOpt) {
|
||||
o.EnvRefs = envRefs
|
||||
}
|
||||
}
|
||||
|
||||
// ApplyWithPkg provides a pkg to the application/dry run.
|
||||
func ApplyWithPkg(pkg *Pkg) ApplyOptFn {
|
||||
// ApplyWithTemplate provides a template to the application/dry run.
|
||||
func ApplyWithTemplate(template *Template) ApplyOptFn {
|
||||
return func(opt *ApplyOpt) {
|
||||
opt.Pkgs = append(opt.Pkgs, pkg)
|
||||
opt.Templates = append(opt.Templates, template)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1364,14 +1364,14 @@ func ApplyWithKindSkip(action ActionSkipKind) ApplyOptFn {
|
|||
}
|
||||
}
|
||||
|
||||
// ApplyWithSecrets provides secrets to the platform that the pkg will need.
|
||||
// ApplyWithSecrets provides secrets to the platform that the template will need.
|
||||
func ApplyWithSecrets(secrets map[string]string) ApplyOptFn {
|
||||
return func(o *ApplyOpt) {
|
||||
o.MissingSecrets = secrets
|
||||
}
|
||||
}
|
||||
|
||||
// ApplyWithStackID associates the application of a pkg with a stack.
|
||||
// ApplyWithStackID associates the application of a template with a stack.
|
||||
func ApplyWithStackID(stackID influxdb.ID) ApplyOptFn {
|
||||
return func(o *ApplyOpt) {
|
||||
o.StackID = stackID
|
||||
|
@ -1386,26 +1386,26 @@ func applyOptFromOptFns(opts ...ApplyOptFn) ApplyOpt {
|
|||
return opt
|
||||
}
|
||||
|
||||
// Apply will apply all the resources identified in the provided pkg. The entire pkg will be applied
|
||||
// in its entirety. If a failure happens midway then the entire pkg will be rolled back to the state
|
||||
// from before the pkg were applied.
|
||||
// Apply will apply all the resources identified in the provided template. The entire template will be applied
|
||||
// in its entirety. If a failure happens midway then the entire template will be rolled back to the state
|
||||
// from before the template were applied.
|
||||
func (s *Service) Apply(ctx context.Context, orgID, userID influxdb.ID, opts ...ApplyOptFn) (impact ImpactSummary, e error) {
|
||||
opt := applyOptFromOptFns(opts...)
|
||||
|
||||
pkg, err := s.pkgFromApplyOpts(ctx, opt)
|
||||
template, err := s.templateFromApplyOpts(ctx, opt)
|
||||
if err != nil {
|
||||
return ImpactSummary{}, err
|
||||
}
|
||||
|
||||
if err := pkg.Validate(ValidWithoutResources()); err != nil {
|
||||
if err := template.Validate(ValidWithoutResources()); err != nil {
|
||||
return ImpactSummary{}, failedValidationErr(err)
|
||||
}
|
||||
|
||||
if err := pkg.applyEnvRefs(opt.EnvRefs); err != nil {
|
||||
if err := template.applyEnvRefs(opt.EnvRefs); err != nil {
|
||||
return ImpactSummary{}, failedValidationErr(err)
|
||||
}
|
||||
|
||||
state, err := s.dryRun(ctx, orgID, pkg, opt)
|
||||
state, err := s.dryRun(ctx, orgID, template, opt)
|
||||
if err != nil {
|
||||
return ImpactSummary{}, err
|
||||
}
|
||||
|
@ -1431,7 +1431,7 @@ func (s *Service) Apply(ctx context.Context, orgID, userID influxdb.ID, opts ...
|
|||
}
|
||||
}
|
||||
|
||||
err := updateStackFn(ctx, stackID, state, pkg.Sources())
|
||||
err := updateStackFn(ctx, stackID, state, template.Sources())
|
||||
if err != nil {
|
||||
s.log.Error("failed to update stack", zap.Error(err))
|
||||
}
|
||||
|
@ -1445,13 +1445,13 @@ func (s *Service) Apply(ctx context.Context, orgID, userID influxdb.ID, opts ...
|
|||
return ImpactSummary{}, err
|
||||
}
|
||||
|
||||
pkg.applySecrets(opt.MissingSecrets)
|
||||
template.applySecrets(opt.MissingSecrets)
|
||||
|
||||
return ImpactSummary{
|
||||
Sources: pkg.sources,
|
||||
Sources: template.sources,
|
||||
StackID: stackID,
|
||||
Diff: state.diff(),
|
||||
Summary: newSummaryFromStatePkg(state, pkg),
|
||||
Summary: newSummaryFromStateTemplate(state, template),
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
@ -1472,8 +1472,8 @@ func (s *Service) applyState(ctx context.Context, coordinator *rollbackCoordinat
|
|||
// rely on the primary resources having been created.
|
||||
appliers := [][]applier{
|
||||
{
|
||||
// adds secrets that are referenced it the pkg, this allows user to
|
||||
// provide data that does not rest in the pkg.
|
||||
// adds secrets that are referenced it the template, this allows user to
|
||||
// provide data that does not rest in the template.
|
||||
s.applySecrets(missingSecrets),
|
||||
},
|
||||
{
|
||||
|
@ -1536,7 +1536,7 @@ func (s *Service) applyBuckets(ctx context.Context, buckets []*stateBucket) appl
|
|||
influxBucket, err := s.applyBucket(ctx, b)
|
||||
if err != nil {
|
||||
return &applyErrBody{
|
||||
name: b.parserBkt.PkgName(),
|
||||
name: b.parserBkt.MetaName(),
|
||||
msg: err.Error(),
|
||||
}
|
||||
}
|
||||
|
@ -1655,7 +1655,7 @@ func (s *Service) applyChecks(ctx context.Context, checks []*stateCheck) applier
|
|||
influxCheck, err := s.applyCheck(ctx, c, userID)
|
||||
if err != nil {
|
||||
return &applyErrBody{
|
||||
name: c.parserCheck.PkgName(),
|
||||
name: c.parserCheck.MetaName(),
|
||||
msg: err.Error(),
|
||||
}
|
||||
}
|
||||
|
@ -1770,7 +1770,7 @@ func (s *Service) applyDashboards(ctx context.Context, dashboards []*stateDashbo
|
|||
influxBucket, err := s.applyDashboard(ctx, d)
|
||||
if err != nil {
|
||||
return &applyErrBody{
|
||||
name: d.parserDash.PkgName(),
|
||||
name: d.parserDash.MetaName(),
|
||||
msg: err.Error(),
|
||||
}
|
||||
}
|
||||
|
@ -1911,7 +1911,7 @@ func (s *Service) applyLabels(ctx context.Context, labels []*stateLabel) applier
|
|||
influxLabel, err := s.applyLabel(ctx, l)
|
||||
if err != nil {
|
||||
return &applyErrBody{
|
||||
name: l.parserLabel.PkgName(),
|
||||
name: l.parserLabel.MetaName(),
|
||||
msg: err.Error(),
|
||||
}
|
||||
}
|
||||
|
@ -2014,7 +2014,7 @@ func (s *Service) applyNotificationEndpoints(ctx context.Context, userID influxd
|
|||
influxEndpoint, err := s.applyNotificationEndpoint(ctx, endpoint, userID)
|
||||
if err != nil {
|
||||
return &applyErrBody{
|
||||
name: endpoint.parserEndpoint.PkgName(),
|
||||
name: endpoint.parserEndpoint.MetaName(),
|
||||
msg: err.Error(),
|
||||
}
|
||||
}
|
||||
|
@ -2136,7 +2136,7 @@ func (s *Service) rollbackNotificationEndpoints(ctx context.Context, userID infl
|
|||
func (s *Service) applyNotificationGenerator(ctx context.Context, userID influxdb.ID, rules []*stateRule, stateEndpoints []*stateEndpoint) (endpointApplier applier, ruleApplier applier, err error) {
|
||||
mEndpoints := make(map[string]*stateEndpoint)
|
||||
for _, e := range stateEndpoints {
|
||||
mEndpoints[e.parserEndpoint.PkgName()] = e
|
||||
mEndpoints[e.parserEndpoint.MetaName()] = e
|
||||
}
|
||||
|
||||
var errs applyErrs
|
||||
|
@ -2144,11 +2144,11 @@ func (s *Service) applyNotificationGenerator(ctx context.Context, userID influxd
|
|||
if IsRemoval(r.stateStatus) {
|
||||
continue
|
||||
}
|
||||
v, ok := mEndpoints[r.endpointPkgName()]
|
||||
v, ok := mEndpoints[r.endpointTemplateName()]
|
||||
if !ok {
|
||||
errs = append(errs, &applyErrBody{
|
||||
name: r.parserRule.PkgName(),
|
||||
msg: fmt.Sprintf("notification rule endpoint dependency does not exist; endpointName=%q", r.parserRule.associatedEndpoint.PkgName()),
|
||||
name: r.parserRule.MetaName(),
|
||||
msg: fmt.Sprintf("notification rule endpoint dependency does not exist; endpointName=%q", r.parserRule.associatedEndpoint.MetaName()),
|
||||
})
|
||||
continue
|
||||
}
|
||||
|
@ -2193,7 +2193,7 @@ func (s *Service) applyNotificationRules(ctx context.Context, userID influxdb.ID
|
|||
influxRule, err := s.applyNotificationRule(ctx, rule, userID)
|
||||
if err != nil {
|
||||
return &applyErrBody{
|
||||
name: rule.parserRule.PkgName(),
|
||||
name: rule.parserRule.MetaName(),
|
||||
msg: err.Error(),
|
||||
}
|
||||
}
|
||||
|
@ -2278,7 +2278,7 @@ func (s *Service) rollbackNotificationRules(ctx context.Context, userID influxdb
|
|||
// 1. we have no ability to find status via the Service, only to set it...
|
||||
// 2. we have no way of inspecting an existing rule and pulling status from it
|
||||
// 3. since this is a fallback condition, we set things to inactive as a user
|
||||
// is likely to follow up this failure by fixing their pkg up then reapplying
|
||||
// is likely to follow up this failure by fixing their template up then reapplying
|
||||
unknownStatus := influxdb.Inactive
|
||||
|
||||
var err error
|
||||
|
@ -2381,7 +2381,7 @@ func (s *Service) applyTasks(ctx context.Context, tasks []*stateTask) applier {
|
|||
newTask, err := s.applyTask(ctx, userID, t)
|
||||
if err != nil {
|
||||
return &applyErrBody{
|
||||
name: t.parserTask.PkgName(),
|
||||
name: t.parserTask.MetaName(),
|
||||
msg: err.Error(),
|
||||
}
|
||||
}
|
||||
|
@ -2547,7 +2547,7 @@ func (s *Service) applyTelegrafs(ctx context.Context, userID influxdb.ID, teles
|
|||
existing, err := s.applyTelegrafConfig(ctx, userID, t)
|
||||
if err != nil {
|
||||
return &applyErrBody{
|
||||
name: t.parserTelegraf.PkgName(),
|
||||
name: t.parserTelegraf.MetaName(),
|
||||
msg: err.Error(),
|
||||
}
|
||||
}
|
||||
|
@ -2652,7 +2652,7 @@ func (s *Service) applyVariables(ctx context.Context, vars []*stateVariable) app
|
|||
influxVar, err := s.applyVariable(ctx, v)
|
||||
if err != nil {
|
||||
return &applyErrBody{
|
||||
name: v.parserVar.PkgName(),
|
||||
name: v.parserVar.MetaName(),
|
||||
msg: err.Error(),
|
||||
}
|
||||
}
|
||||
|
@ -2896,25 +2896,25 @@ func (s *Service) rollbackLabelMappings(ctx context.Context, mappings []stateLab
|
|||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) pkgFromApplyOpts(ctx context.Context, opt ApplyOpt) (*Pkg, error) {
|
||||
func (s *Service) templateFromApplyOpts(ctx context.Context, opt ApplyOpt) (*Template, error) {
|
||||
if opt.StackID != 0 {
|
||||
remotePkgs, err := s.getStackRemotePackages(ctx, opt.StackID)
|
||||
remotes, err := s.getStackRemoteTemplates(ctx, opt.StackID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
opt.Pkgs = append(opt.Pkgs, remotePkgs...)
|
||||
opt.Templates = append(opt.Templates, remotes...)
|
||||
}
|
||||
|
||||
return Combine(opt.Pkgs, ValidWithoutResources())
|
||||
return Combine(opt.Templates, ValidWithoutResources())
|
||||
}
|
||||
|
||||
func (s *Service) getStackRemotePackages(ctx context.Context, stackID influxdb.ID) ([]*Pkg, error) {
|
||||
func (s *Service) getStackRemoteTemplates(ctx context.Context, stackID influxdb.ID) ([]*Template, error) {
|
||||
stack, err := s.store.ReadStackByID(ctx, stackID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var remotePkgs []*Pkg
|
||||
var remotes []*Template
|
||||
for _, rawURL := range stack.TemplateURLs {
|
||||
u, err := url.Parse(rawURL)
|
||||
if err != nil {
|
||||
|
@ -2940,13 +2940,13 @@ func (s *Service) getStackRemotePackages(ctx context.Context, stackID influxdb.I
|
|||
readerFn = FromFile(u.Path)
|
||||
}
|
||||
|
||||
pkg, err := Parse(encoding, readerFn)
|
||||
template, err := Parse(encoding, readerFn)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
remotePkgs = append(remotePkgs, pkg)
|
||||
remotes = append(remotes, template)
|
||||
}
|
||||
return remotePkgs, nil
|
||||
return remotes, nil
|
||||
}
|
||||
|
||||
func (s *Service) updateStackAfterSuccess(ctx context.Context, stackID influxdb.ID, state *stateCoordinator, sources []string) error {
|
||||
|
@ -2964,7 +2964,7 @@ func (s *Service) updateStackAfterSuccess(ctx context.Context, stackID influxdb.
|
|||
APIVersion: APIVersion,
|
||||
ID: b.ID(),
|
||||
Kind: KindBucket,
|
||||
MetaName: b.parserBkt.PkgName(),
|
||||
MetaName: b.parserBkt.MetaName(),
|
||||
Associations: stateLabelsToStackAssociations(b.labels()),
|
||||
})
|
||||
}
|
||||
|
@ -2976,7 +2976,7 @@ func (s *Service) updateStackAfterSuccess(ctx context.Context, stackID influxdb.
|
|||
APIVersion: APIVersion,
|
||||
ID: c.ID(),
|
||||
Kind: KindCheck,
|
||||
MetaName: c.parserCheck.PkgName(),
|
||||
MetaName: c.parserCheck.MetaName(),
|
||||
Associations: stateLabelsToStackAssociations(c.labels()),
|
||||
})
|
||||
}
|
||||
|
@ -2988,7 +2988,7 @@ func (s *Service) updateStackAfterSuccess(ctx context.Context, stackID influxdb.
|
|||
APIVersion: APIVersion,
|
||||
ID: d.ID(),
|
||||
Kind: KindDashboard,
|
||||
MetaName: d.parserDash.PkgName(),
|
||||
MetaName: d.parserDash.MetaName(),
|
||||
Associations: stateLabelsToStackAssociations(d.labels()),
|
||||
})
|
||||
}
|
||||
|
@ -3000,7 +3000,7 @@ func (s *Service) updateStackAfterSuccess(ctx context.Context, stackID influxdb.
|
|||
APIVersion: APIVersion,
|
||||
ID: n.ID(),
|
||||
Kind: KindNotificationEndpoint,
|
||||
MetaName: n.parserEndpoint.PkgName(),
|
||||
MetaName: n.parserEndpoint.MetaName(),
|
||||
Associations: stateLabelsToStackAssociations(n.labels()),
|
||||
})
|
||||
}
|
||||
|
@ -3012,7 +3012,7 @@ func (s *Service) updateStackAfterSuccess(ctx context.Context, stackID influxdb.
|
|||
APIVersion: APIVersion,
|
||||
ID: l.ID(),
|
||||
Kind: KindLabel,
|
||||
MetaName: l.parserLabel.PkgName(),
|
||||
MetaName: l.parserLabel.MetaName(),
|
||||
})
|
||||
}
|
||||
for _, r := range state.mRules {
|
||||
|
@ -3023,7 +3023,7 @@ func (s *Service) updateStackAfterSuccess(ctx context.Context, stackID influxdb.
|
|||
APIVersion: APIVersion,
|
||||
ID: r.ID(),
|
||||
Kind: KindNotificationRule,
|
||||
MetaName: r.parserRule.PkgName(),
|
||||
MetaName: r.parserRule.MetaName(),
|
||||
Associations: append(
|
||||
stateLabelsToStackAssociations(r.labels()),
|
||||
r.endpointAssociation(),
|
||||
|
@ -3038,7 +3038,7 @@ func (s *Service) updateStackAfterSuccess(ctx context.Context, stackID influxdb.
|
|||
APIVersion: APIVersion,
|
||||
ID: t.ID(),
|
||||
Kind: KindTask,
|
||||
MetaName: t.parserTask.PkgName(),
|
||||
MetaName: t.parserTask.MetaName(),
|
||||
Associations: stateLabelsToStackAssociations(t.labels()),
|
||||
})
|
||||
}
|
||||
|
@ -3050,7 +3050,7 @@ func (s *Service) updateStackAfterSuccess(ctx context.Context, stackID influxdb.
|
|||
APIVersion: APIVersion,
|
||||
ID: t.ID(),
|
||||
Kind: KindTelegraf,
|
||||
MetaName: t.parserTelegraf.PkgName(),
|
||||
MetaName: t.parserTelegraf.MetaName(),
|
||||
Associations: stateLabelsToStackAssociations(t.labels()),
|
||||
})
|
||||
}
|
||||
|
@ -3062,7 +3062,7 @@ func (s *Service) updateStackAfterSuccess(ctx context.Context, stackID influxdb.
|
|||
APIVersion: APIVersion,
|
||||
ID: v.ID(),
|
||||
Kind: KindVariable,
|
||||
MetaName: v.parserVar.PkgName(),
|
||||
MetaName: v.parserVar.MetaName(),
|
||||
Associations: stateLabelsToStackAssociations(v.labels()),
|
||||
})
|
||||
}
|
||||
|
@ -3080,11 +3080,11 @@ func (s *Service) updateStackAfterRollback(ctx context.Context, stackID influxdb
|
|||
}
|
||||
|
||||
type key struct {
|
||||
k Kind
|
||||
pkgName string
|
||||
k Kind
|
||||
metaName string
|
||||
}
|
||||
newKey := func(k Kind, pkgName string) key {
|
||||
return key{k: k, pkgName: pkgName}
|
||||
newKey := func(k Kind, metaName string) key {
|
||||
return key{k: k, metaName: metaName}
|
||||
}
|
||||
|
||||
existingResources := make(map[key]*StackResource)
|
||||
|
@ -3099,42 +3099,42 @@ func (s *Service) updateStackAfterRollback(ctx context.Context, stackID influxdb
|
|||
// when resource is not to be removed this is a nothing burger, as it should be
|
||||
// rolled back to previous state.
|
||||
for _, b := range state.mBuckets {
|
||||
res, ok := existingResources[newKey(KindBucket, b.parserBkt.PkgName())]
|
||||
res, ok := existingResources[newKey(KindBucket, b.parserBkt.MetaName())]
|
||||
if ok && res.ID != b.ID() {
|
||||
hasChanges = true
|
||||
res.ID = b.existing.ID
|
||||
}
|
||||
}
|
||||
for _, c := range state.mChecks {
|
||||
res, ok := existingResources[newKey(KindCheck, c.parserCheck.PkgName())]
|
||||
res, ok := existingResources[newKey(KindCheck, c.parserCheck.MetaName())]
|
||||
if ok && res.ID != c.ID() {
|
||||
hasChanges = true
|
||||
res.ID = c.existing.GetID()
|
||||
}
|
||||
}
|
||||
for _, d := range state.mDashboards {
|
||||
res, ok := existingResources[newKey(KindDashboard, d.parserDash.PkgName())]
|
||||
res, ok := existingResources[newKey(KindDashboard, d.parserDash.MetaName())]
|
||||
if ok && res.ID != d.ID() {
|
||||
hasChanges = true
|
||||
res.ID = d.existing.ID
|
||||
}
|
||||
}
|
||||
for _, e := range state.mEndpoints {
|
||||
res, ok := existingResources[newKey(KindNotificationEndpoint, e.parserEndpoint.PkgName())]
|
||||
res, ok := existingResources[newKey(KindNotificationEndpoint, e.parserEndpoint.MetaName())]
|
||||
if ok && res.ID != e.ID() {
|
||||
hasChanges = true
|
||||
res.ID = e.existing.GetID()
|
||||
}
|
||||
}
|
||||
for _, l := range state.mLabels {
|
||||
res, ok := existingResources[newKey(KindLabel, l.parserLabel.PkgName())]
|
||||
res, ok := existingResources[newKey(KindLabel, l.parserLabel.MetaName())]
|
||||
if ok && res.ID != l.ID() {
|
||||
hasChanges = true
|
||||
res.ID = l.existing.ID
|
||||
}
|
||||
}
|
||||
for _, r := range state.mRules {
|
||||
res, ok := existingResources[newKey(KindNotificationRule, r.parserRule.PkgName())]
|
||||
res, ok := existingResources[newKey(KindNotificationRule, r.parserRule.MetaName())]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
@ -3161,21 +3161,21 @@ func (s *Service) updateStackAfterRollback(ctx context.Context, stackID influxdb
|
|||
}
|
||||
}
|
||||
for _, t := range state.mTasks {
|
||||
res, ok := existingResources[newKey(KindTask, t.parserTask.PkgName())]
|
||||
res, ok := existingResources[newKey(KindTask, t.parserTask.MetaName())]
|
||||
if ok && res.ID != t.ID() {
|
||||
hasChanges = true
|
||||
res.ID = t.existing.ID
|
||||
}
|
||||
}
|
||||
for _, t := range state.mTelegrafs {
|
||||
res, ok := existingResources[newKey(KindTelegraf, t.parserTelegraf.PkgName())]
|
||||
res, ok := existingResources[newKey(KindTelegraf, t.parserTelegraf.MetaName())]
|
||||
if ok && res.ID != t.ID() {
|
||||
hasChanges = true
|
||||
res.ID = t.existing.ID
|
||||
}
|
||||
}
|
||||
for _, v := range state.mVariables {
|
||||
res, ok := existingResources[newKey(KindVariable, v.parserVar.PkgName())]
|
||||
res, ok := existingResources[newKey(KindVariable, v.parserVar.MetaName())]
|
||||
if ok && res.ID != v.ID() {
|
||||
hasChanges = true
|
||||
res.ID = v.existing.ID
|
||||
|
@ -3235,10 +3235,10 @@ func (s *Service) getAllPlatformVariables(ctx context.Context, orgID influxdb.ID
|
|||
return existingVars, nil
|
||||
}
|
||||
|
||||
func newSummaryFromStatePkg(state *stateCoordinator, pkg *Pkg) Summary {
|
||||
func newSummaryFromStateTemplate(state *stateCoordinator, template *Template) Summary {
|
||||
stateSum := state.summary()
|
||||
stateSum.MissingEnvs = pkg.missingEnvRefs()
|
||||
stateSum.MissingSecrets = pkg.missingSecrets()
|
||||
stateSum.MissingEnvs = template.missingEnvRefs()
|
||||
stateSum.MissingSecrets = template.missingSecrets()
|
||||
return stateSum
|
||||
}
|
||||
|
||||
|
@ -3247,7 +3247,7 @@ func stateLabelsToStackAssociations(stateLabels []*stateLabel) []StackResourceAs
|
|||
for _, l := range stateLabels {
|
||||
out = append(out, StackResourceAssociation{
|
||||
Kind: KindLabel,
|
||||
MetaName: l.parserLabel.PkgName(),
|
||||
MetaName: l.parserLabel.MetaName(),
|
||||
})
|
||||
}
|
||||
return out
|
||||
|
@ -3256,7 +3256,7 @@ func stateLabelsToStackAssociations(stateLabels []*stateLabel) []StackResourceAs
|
|||
func applyFailErr(method string, ident stateIdentity, err error) error {
|
||||
v := ident.id.String()
|
||||
if v == "" {
|
||||
v = ident.pkgName
|
||||
v = ident.metaName
|
||||
}
|
||||
msg := fmt.Sprintf("failed to %s %s[%q]", method, ident.resourceType, v)
|
||||
return ierrors.Wrap(err, msg)
|
||||
|
@ -3469,7 +3469,7 @@ func (a applyErrs) toError(resType, msg string) error {
|
|||
}
|
||||
errMsg := fmt.Sprintf(`resource_type=%q err=%q`, resType, msg)
|
||||
for _, e := range a {
|
||||
errMsg += fmt.Sprintf("\n\tpkg_name=%q err_msg=%q", e.name, e.msg)
|
||||
errMsg += fmt.Sprintf("\n\tmetadata_name=%q err_msg=%q", e.name, e.msg)
|
||||
}
|
||||
return errors.New(errMsg)
|
||||
}
|
||||
|
|
|
@ -78,7 +78,7 @@ func (s *authMW) UpdateStack(ctx context.Context, upd StackUpdate) (Stack, error
|
|||
return s.next.UpdateStack(ctx, upd)
|
||||
}
|
||||
|
||||
func (s *authMW) Export(ctx context.Context, opts ...ExportOptFn) (*Pkg, error) {
|
||||
func (s *authMW) Export(ctx context.Context, opts ...ExportOptFn) (*Template, error) {
|
||||
opt, err := exportOptFromOptFns(opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
|
@ -123,14 +123,14 @@ func (s *loggingMW) UpdateStack(ctx context.Context, upd StackUpdate) (_ Stack,
|
|||
return s.next.UpdateStack(ctx, upd)
|
||||
}
|
||||
|
||||
func (s *loggingMW) Export(ctx context.Context, opts ...ExportOptFn) (pkg *Pkg, err error) {
|
||||
func (s *loggingMW) Export(ctx context.Context, opts ...ExportOptFn) (template *Template, err error) {
|
||||
defer func(start time.Time) {
|
||||
dur := zap.Duration("took", time.Since(start))
|
||||
if err != nil {
|
||||
s.logger.Error("failed to create pkg", zap.Error(err), dur)
|
||||
s.logger.Error("failed to export template", zap.Error(err), dur)
|
||||
return
|
||||
}
|
||||
s.logger.Info("pkg create", append(s.summaryLogFields(pkg.Summary()), dur)...)
|
||||
s.logger.Info("failed to export template", append(s.summaryLogFields(template.Summary()), dur)...)
|
||||
}(time.Now())
|
||||
return s.next.Export(ctx, opts...)
|
||||
}
|
||||
|
@ -139,7 +139,7 @@ func (s *loggingMW) DryRun(ctx context.Context, orgID, userID influxdb.ID, opts
|
|||
defer func(start time.Time) {
|
||||
dur := zap.Duration("took", time.Since(start))
|
||||
if err != nil {
|
||||
s.logger.Error("failed to dry run pkg",
|
||||
s.logger.Error("failed to dry run template",
|
||||
zap.String("orgID", orgID.String()),
|
||||
zap.String("userID", userID.String()),
|
||||
zap.Error(err),
|
||||
|
@ -158,7 +158,7 @@ func (s *loggingMW) DryRun(ctx context.Context, orgID, userID influxdb.ID, opts
|
|||
fields = append(fields, zap.Stringer("stackID", opt.StackID))
|
||||
}
|
||||
fields = append(fields, dur)
|
||||
s.logger.Info("pkg dry run successful", fields...)
|
||||
s.logger.Info("template dry run successful", fields...)
|
||||
}(time.Now())
|
||||
return s.next.DryRun(ctx, orgID, userID, opts...)
|
||||
}
|
||||
|
@ -167,7 +167,7 @@ func (s *loggingMW) Apply(ctx context.Context, orgID, userID influxdb.ID, opts .
|
|||
defer func(start time.Time) {
|
||||
dur := zap.Duration("took", time.Since(start))
|
||||
if err != nil {
|
||||
s.logger.Error("failed to apply pkg",
|
||||
s.logger.Error("failed to apply template",
|
||||
zap.String("orgID", orgID.String()),
|
||||
zap.String("userID", userID.String()),
|
||||
zap.Error(err),
|
||||
|
@ -183,7 +183,7 @@ func (s *loggingMW) Apply(ctx context.Context, orgID, userID influxdb.ID, opts .
|
|||
fields = append(fields, zap.Stringer("stackID", opt.StackID))
|
||||
}
|
||||
fields = append(fields, dur)
|
||||
s.logger.Info("pkg apply successful", fields...)
|
||||
s.logger.Info("template apply successful", fields...)
|
||||
}(time.Now())
|
||||
return s.next.Apply(ctx, orgID, userID, opts...)
|
||||
}
|
||||
|
|
|
@ -61,21 +61,21 @@ func (s *mwMetrics) UpdateStack(ctx context.Context, upd StackUpdate) (Stack, er
|
|||
return stack, rec(err)
|
||||
}
|
||||
|
||||
func (s *mwMetrics) Export(ctx context.Context, opts ...ExportOptFn) (*Pkg, error) {
|
||||
rec := s.rec.Record("create_pkg")
|
||||
func (s *mwMetrics) Export(ctx context.Context, opts ...ExportOptFn) (*Template, error) {
|
||||
rec := s.rec.Record("export")
|
||||
opt, err := exportOptFromOptFns(opts)
|
||||
if err != nil {
|
||||
return nil, rec(err)
|
||||
}
|
||||
|
||||
pkg, err := s.next.Export(ctx, opts...)
|
||||
template, err := s.next.Export(ctx, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return pkg, rec(err, metric.RecordAdditional(map[string]interface{}{
|
||||
return template, rec(err, metric.RecordAdditional(map[string]interface{}{
|
||||
"num_org_ids": len(opt.OrgIDs),
|
||||
"summary": pkg.Summary(),
|
||||
"summary": template.Summary(),
|
||||
"by_stack": opt.StackID != 0,
|
||||
}))
|
||||
}
|
||||
|
|
|
@ -23,7 +23,7 @@ type stateCoordinator struct {
|
|||
labelMappingsToRemove []stateLabelMappingForRemoval
|
||||
}
|
||||
|
||||
func newStateCoordinator(pkg *Pkg, acts resourceActions) *stateCoordinator {
|
||||
func newStateCoordinator(template *Template, acts resourceActions) *stateCoordinator {
|
||||
state := stateCoordinator{
|
||||
mBuckets: make(map[string]*stateBucket),
|
||||
mChecks: make(map[string]*stateCheck),
|
||||
|
@ -40,93 +40,93 @@ func newStateCoordinator(pkg *Pkg, acts resourceActions) *stateCoordinator {
|
|||
// when a label is skipped by an action, this will still be accurate
|
||||
// for hte individual labels, and cascades to the resources that are
|
||||
// associated to a label.
|
||||
for _, pkgLabel := range pkg.labels() {
|
||||
if acts.skipResource(KindLabel, pkgLabel.PkgName()) {
|
||||
for _, l := range template.labels() {
|
||||
if acts.skipResource(KindLabel, l.MetaName()) {
|
||||
continue
|
||||
}
|
||||
state.mLabels[pkgLabel.PkgName()] = &stateLabel{
|
||||
parserLabel: pkgLabel,
|
||||
state.mLabels[l.MetaName()] = &stateLabel{
|
||||
parserLabel: l,
|
||||
stateStatus: StateStatusNew,
|
||||
}
|
||||
}
|
||||
for _, pkgBkt := range pkg.buckets() {
|
||||
if acts.skipResource(KindBucket, pkgBkt.PkgName()) {
|
||||
for _, b := range template.buckets() {
|
||||
if acts.skipResource(KindBucket, b.MetaName()) {
|
||||
continue
|
||||
}
|
||||
state.mBuckets[pkgBkt.PkgName()] = &stateBucket{
|
||||
parserBkt: pkgBkt,
|
||||
state.mBuckets[b.MetaName()] = &stateBucket{
|
||||
parserBkt: b,
|
||||
stateStatus: StateStatusNew,
|
||||
labelAssociations: state.pkgToStateLabels(pkgBkt.labels),
|
||||
labelAssociations: state.templateToStateLabels(b.labels),
|
||||
}
|
||||
}
|
||||
for _, pkgCheck := range pkg.checks() {
|
||||
if acts.skipResource(KindCheck, pkgCheck.PkgName()) {
|
||||
for _, c := range template.checks() {
|
||||
if acts.skipResource(KindCheck, c.MetaName()) {
|
||||
continue
|
||||
}
|
||||
state.mChecks[pkgCheck.PkgName()] = &stateCheck{
|
||||
parserCheck: pkgCheck,
|
||||
state.mChecks[c.MetaName()] = &stateCheck{
|
||||
parserCheck: c,
|
||||
stateStatus: StateStatusNew,
|
||||
labelAssociations: state.pkgToStateLabels(pkgCheck.labels),
|
||||
labelAssociations: state.templateToStateLabels(c.labels),
|
||||
}
|
||||
}
|
||||
for _, pkgDash := range pkg.dashboards() {
|
||||
if acts.skipResource(KindDashboard, pkgDash.PkgName()) {
|
||||
for _, d := range template.dashboards() {
|
||||
if acts.skipResource(KindDashboard, d.MetaName()) {
|
||||
continue
|
||||
}
|
||||
state.mDashboards[pkgDash.PkgName()] = &stateDashboard{
|
||||
parserDash: pkgDash,
|
||||
state.mDashboards[d.MetaName()] = &stateDashboard{
|
||||
parserDash: d,
|
||||
stateStatus: StateStatusNew,
|
||||
labelAssociations: state.pkgToStateLabels(pkgDash.labels),
|
||||
labelAssociations: state.templateToStateLabels(d.labels),
|
||||
}
|
||||
}
|
||||
for _, pkgEndpoint := range pkg.notificationEndpoints() {
|
||||
if acts.skipResource(KindNotificationEndpoint, pkgEndpoint.PkgName()) {
|
||||
for _, e := range template.notificationEndpoints() {
|
||||
if acts.skipResource(KindNotificationEndpoint, e.MetaName()) {
|
||||
continue
|
||||
}
|
||||
state.mEndpoints[pkgEndpoint.PkgName()] = &stateEndpoint{
|
||||
parserEndpoint: pkgEndpoint,
|
||||
state.mEndpoints[e.MetaName()] = &stateEndpoint{
|
||||
parserEndpoint: e,
|
||||
stateStatus: StateStatusNew,
|
||||
labelAssociations: state.pkgToStateLabels(pkgEndpoint.labels),
|
||||
labelAssociations: state.templateToStateLabels(e.labels),
|
||||
}
|
||||
}
|
||||
for _, pkgRule := range pkg.notificationRules() {
|
||||
if acts.skipResource(KindNotificationRule, pkgRule.PkgName()) {
|
||||
for _, r := range template.notificationRules() {
|
||||
if acts.skipResource(KindNotificationRule, r.MetaName()) {
|
||||
continue
|
||||
}
|
||||
state.mRules[pkgRule.PkgName()] = &stateRule{
|
||||
parserRule: pkgRule,
|
||||
state.mRules[r.MetaName()] = &stateRule{
|
||||
parserRule: r,
|
||||
stateStatus: StateStatusNew,
|
||||
labelAssociations: state.pkgToStateLabels(pkgRule.labels),
|
||||
labelAssociations: state.templateToStateLabels(r.labels),
|
||||
}
|
||||
}
|
||||
for _, pkgTask := range pkg.tasks() {
|
||||
if acts.skipResource(KindTask, pkgTask.PkgName()) {
|
||||
for _, task := range template.tasks() {
|
||||
if acts.skipResource(KindTask, task.MetaName()) {
|
||||
continue
|
||||
}
|
||||
state.mTasks[pkgTask.PkgName()] = &stateTask{
|
||||
parserTask: pkgTask,
|
||||
state.mTasks[task.MetaName()] = &stateTask{
|
||||
parserTask: task,
|
||||
stateStatus: StateStatusNew,
|
||||
labelAssociations: state.pkgToStateLabels(pkgTask.labels),
|
||||
labelAssociations: state.templateToStateLabels(task.labels),
|
||||
}
|
||||
}
|
||||
for _, pkgTele := range pkg.telegrafs() {
|
||||
if acts.skipResource(KindTelegraf, pkgTele.PkgName()) {
|
||||
for _, tele := range template.telegrafs() {
|
||||
if acts.skipResource(KindTelegraf, tele.MetaName()) {
|
||||
continue
|
||||
}
|
||||
state.mTelegrafs[pkgTele.PkgName()] = &stateTelegraf{
|
||||
parserTelegraf: pkgTele,
|
||||
state.mTelegrafs[tele.MetaName()] = &stateTelegraf{
|
||||
parserTelegraf: tele,
|
||||
stateStatus: StateStatusNew,
|
||||
labelAssociations: state.pkgToStateLabels(pkgTele.labels),
|
||||
labelAssociations: state.templateToStateLabels(tele.labels),
|
||||
}
|
||||
}
|
||||
for _, pkgVar := range pkg.variables() {
|
||||
if acts.skipResource(KindVariable, pkgVar.PkgName()) {
|
||||
for _, v := range template.variables() {
|
||||
if acts.skipResource(KindVariable, v.MetaName()) {
|
||||
continue
|
||||
}
|
||||
state.mVariables[pkgVar.PkgName()] = &stateVariable{
|
||||
parserVar: pkgVar,
|
||||
state.mVariables[v.MetaName()] = &stateVariable{
|
||||
parserVar: v,
|
||||
stateStatus: StateStatusNew,
|
||||
labelAssociations: state.pkgToStateLabels(pkgVar.labels),
|
||||
labelAssociations: state.templateToStateLabels(v.labels),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -397,8 +397,8 @@ func (s *stateCoordinator) summary() Summary {
|
|||
if n.ResourceType != m.ResourceType {
|
||||
return n.ResourceType < m.ResourceType
|
||||
}
|
||||
if n.ResourcePkgName != m.ResourcePkgName {
|
||||
return n.ResourcePkgName < m.ResourcePkgName
|
||||
if n.ResourceMetaName != m.ResourceMetaName {
|
||||
return n.ResourceMetaName < m.ResourceMetaName
|
||||
}
|
||||
return n.LabelName < m.LabelName
|
||||
})
|
||||
|
@ -406,15 +406,15 @@ func (s *stateCoordinator) summary() Summary {
|
|||
return sum
|
||||
}
|
||||
|
||||
func (s *stateCoordinator) getLabelByPkgName(pkgName string) (*stateLabel, bool) {
|
||||
l, ok := s.mLabels[pkgName]
|
||||
func (s *stateCoordinator) getLabelByMetaName(metaName string) (*stateLabel, bool) {
|
||||
l, ok := s.mLabels[metaName]
|
||||
return l, ok
|
||||
}
|
||||
|
||||
func (s *stateCoordinator) pkgToStateLabels(labels []*label) []*stateLabel {
|
||||
func (s *stateCoordinator) templateToStateLabels(labels []*label) []*stateLabel {
|
||||
var out []*stateLabel
|
||||
for _, l := range labels {
|
||||
stLabel, found := s.getLabelByPkgName(l.PkgName())
|
||||
stLabel, found := s.getLabelByMetaName(l.MetaName())
|
||||
if !found {
|
||||
continue
|
||||
}
|
||||
|
@ -445,10 +445,10 @@ func (s *stateCoordinator) reconcileStackResources(stackResources []StackResourc
|
|||
}
|
||||
|
||||
func (s *stateCoordinator) reconcileLabelMappings(stackResources []StackResource) {
|
||||
mLabelPkgNameToID := make(map[string]influxdb.ID)
|
||||
mLabelMetaNameToID := make(map[string]influxdb.ID)
|
||||
for _, r := range stackResources {
|
||||
if r.Kind.is(KindLabel) {
|
||||
mLabelPkgNameToID[r.MetaName] = r.ID
|
||||
mLabelMetaNameToID[r.MetaName] = r.ID
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -472,7 +472,7 @@ func (s *stateCoordinator) reconcileLabelMappings(stackResources []StackResource
|
|||
// we want to keep associations that are from previous application and are not changing
|
||||
delete(mStackAss, StackResourceAssociation{
|
||||
Kind: KindLabel,
|
||||
MetaName: l.parserLabel.PkgName(),
|
||||
MetaName: l.parserLabel.MetaName(),
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -480,11 +480,11 @@ func (s *stateCoordinator) reconcileLabelMappings(stackResources []StackResource
|
|||
// state fall into here and are marked for removal.
|
||||
for assForRemoval := range mStackAss {
|
||||
s.labelMappingsToRemove = append(s.labelMappingsToRemove, stateLabelMappingForRemoval{
|
||||
LabelPkgName: assForRemoval.MetaName,
|
||||
LabelID: mLabelPkgNameToID[assForRemoval.MetaName],
|
||||
ResourceID: r.ID,
|
||||
ResourcePkgName: r.MetaName,
|
||||
ResourceType: r.Kind.ResourceType(),
|
||||
LabelMetaName: assForRemoval.MetaName,
|
||||
LabelID: mLabelMetaNameToID[assForRemoval.MetaName],
|
||||
ResourceID: r.ID,
|
||||
ResourceMetaName: r.MetaName,
|
||||
ResourceType: r.Kind.ResourceType(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -503,45 +503,45 @@ func (s *stateCoordinator) reconcileNotificationDependencies(stackResources []St
|
|||
}
|
||||
}
|
||||
|
||||
func (s *stateCoordinator) get(k Kind, pkgName string) (interface{}, bool) {
|
||||
func (s *stateCoordinator) get(k Kind, metaName string) (interface{}, bool) {
|
||||
switch k {
|
||||
case KindBucket:
|
||||
v, ok := s.mBuckets[pkgName]
|
||||
v, ok := s.mBuckets[metaName]
|
||||
return v, ok
|
||||
case KindCheck, KindCheckDeadman, KindCheckThreshold:
|
||||
v, ok := s.mChecks[pkgName]
|
||||
v, ok := s.mChecks[metaName]
|
||||
return v, ok
|
||||
case KindDashboard:
|
||||
v, ok := s.mDashboards[pkgName]
|
||||
v, ok := s.mDashboards[metaName]
|
||||
return v, ok
|
||||
case KindLabel:
|
||||
v, ok := s.mLabels[pkgName]
|
||||
v, ok := s.mLabels[metaName]
|
||||
return v, ok
|
||||
case KindNotificationEndpoint,
|
||||
KindNotificationEndpointHTTP,
|
||||
KindNotificationEndpointPagerDuty,
|
||||
KindNotificationEndpointSlack:
|
||||
v, ok := s.mEndpoints[pkgName]
|
||||
v, ok := s.mEndpoints[metaName]
|
||||
return v, ok
|
||||
case KindNotificationRule:
|
||||
v, ok := s.mRules[pkgName]
|
||||
v, ok := s.mRules[metaName]
|
||||
return v, ok
|
||||
case KindTask:
|
||||
v, ok := s.mTasks[pkgName]
|
||||
v, ok := s.mTasks[metaName]
|
||||
return v, ok
|
||||
case KindTelegraf:
|
||||
v, ok := s.mTelegrafs[pkgName]
|
||||
v, ok := s.mTelegrafs[metaName]
|
||||
return v, ok
|
||||
case KindVariable:
|
||||
v, ok := s.mVariables[pkgName]
|
||||
v, ok := s.mVariables[metaName]
|
||||
return v, ok
|
||||
default:
|
||||
return nil, false
|
||||
}
|
||||
}
|
||||
|
||||
func (s *stateCoordinator) labelAssociations(k Kind, pkgName string) []*stateLabel {
|
||||
v, _ := s.get(k, pkgName)
|
||||
func (s *stateCoordinator) labelAssociations(k Kind, metaName string) []*stateLabel {
|
||||
v, _ := s.get(k, metaName)
|
||||
labeler, ok := v.(interface {
|
||||
labels() []*stateLabel
|
||||
})
|
||||
|
@ -552,14 +552,14 @@ func (s *stateCoordinator) labelAssociations(k Kind, pkgName string) []*stateLab
|
|||
return labeler.labels()
|
||||
}
|
||||
|
||||
func (s *stateCoordinator) Contains(k Kind, pkgName string) bool {
|
||||
_, ok := s.get(k, pkgName)
|
||||
func (s *stateCoordinator) Contains(k Kind, metaName string) bool {
|
||||
_, ok := s.get(k, metaName)
|
||||
return ok
|
||||
}
|
||||
|
||||
// setObjectID sets the id for the resource graphed from the object the key identifies.
|
||||
func (s *stateCoordinator) setObjectID(k Kind, pkgName string, id influxdb.ID) {
|
||||
idSetFn, ok := s.getObjectIDSetter(k, pkgName)
|
||||
func (s *stateCoordinator) setObjectID(k Kind, metaName string, id influxdb.ID) {
|
||||
idSetFn, ok := s.getObjectIDSetter(k, metaName)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
@ -567,35 +567,35 @@ func (s *stateCoordinator) setObjectID(k Kind, pkgName string, id influxdb.ID) {
|
|||
}
|
||||
|
||||
// addObjectForRemoval sets the id for the resource graphed from the object the key identifies.
|
||||
// The pkgName and kind are used as the unique identifier, when calling this it will
|
||||
// The metaName and kind are used as the unique identifier, when calling this it will
|
||||
// overwrite any existing value if one exists. If desired, check for the value by using
|
||||
// the Contains method.
|
||||
func (s *stateCoordinator) addObjectForRemoval(k Kind, pkgName string, id influxdb.ID) {
|
||||
func (s *stateCoordinator) addObjectForRemoval(k Kind, metaName string, id influxdb.ID) {
|
||||
newIdentity := identity{
|
||||
name: &references{val: pkgName},
|
||||
name: &references{val: metaName},
|
||||
}
|
||||
|
||||
switch k {
|
||||
case KindBucket:
|
||||
s.mBuckets[pkgName] = &stateBucket{
|
||||
s.mBuckets[metaName] = &stateBucket{
|
||||
id: id,
|
||||
parserBkt: &bucket{identity: newIdentity},
|
||||
stateStatus: StateStatusRemove,
|
||||
}
|
||||
case KindCheck, KindCheckDeadman, KindCheckThreshold:
|
||||
s.mChecks[pkgName] = &stateCheck{
|
||||
s.mChecks[metaName] = &stateCheck{
|
||||
id: id,
|
||||
parserCheck: &check{identity: newIdentity},
|
||||
stateStatus: StateStatusRemove,
|
||||
}
|
||||
case KindDashboard:
|
||||
s.mDashboards[pkgName] = &stateDashboard{
|
||||
s.mDashboards[metaName] = &stateDashboard{
|
||||
id: id,
|
||||
parserDash: &dashboard{identity: newIdentity},
|
||||
stateStatus: StateStatusRemove,
|
||||
}
|
||||
case KindLabel:
|
||||
s.mLabels[pkgName] = &stateLabel{
|
||||
s.mLabels[metaName] = &stateLabel{
|
||||
id: id,
|
||||
parserLabel: &label{identity: newIdentity},
|
||||
stateStatus: StateStatusRemove,
|
||||
|
@ -604,31 +604,31 @@ func (s *stateCoordinator) addObjectForRemoval(k Kind, pkgName string, id influx
|
|||
KindNotificationEndpointHTTP,
|
||||
KindNotificationEndpointPagerDuty,
|
||||
KindNotificationEndpointSlack:
|
||||
s.mEndpoints[pkgName] = &stateEndpoint{
|
||||
s.mEndpoints[metaName] = &stateEndpoint{
|
||||
id: id,
|
||||
parserEndpoint: ¬ificationEndpoint{identity: newIdentity},
|
||||
stateStatus: StateStatusRemove,
|
||||
}
|
||||
case KindNotificationRule:
|
||||
s.mRules[pkgName] = &stateRule{
|
||||
s.mRules[metaName] = &stateRule{
|
||||
id: id,
|
||||
parserRule: ¬ificationRule{identity: newIdentity},
|
||||
stateStatus: StateStatusRemove,
|
||||
}
|
||||
case KindTask:
|
||||
s.mTasks[pkgName] = &stateTask{
|
||||
s.mTasks[metaName] = &stateTask{
|
||||
id: id,
|
||||
parserTask: &task{identity: newIdentity},
|
||||
stateStatus: StateStatusRemove,
|
||||
}
|
||||
case KindTelegraf:
|
||||
s.mTelegrafs[pkgName] = &stateTelegraf{
|
||||
s.mTelegrafs[metaName] = &stateTelegraf{
|
||||
id: id,
|
||||
parserTelegraf: &telegraf{identity: newIdentity},
|
||||
stateStatus: StateStatusRemove,
|
||||
}
|
||||
case KindVariable:
|
||||
s.mVariables[pkgName] = &stateVariable{
|
||||
s.mVariables[metaName] = &stateVariable{
|
||||
id: id,
|
||||
parserVar: &variable{identity: newIdentity},
|
||||
stateStatus: StateStatusRemove,
|
||||
|
@ -636,28 +636,28 @@ func (s *stateCoordinator) addObjectForRemoval(k Kind, pkgName string, id influx
|
|||
}
|
||||
}
|
||||
|
||||
func (s *stateCoordinator) getObjectIDSetter(k Kind, pkgName string) (func(influxdb.ID), bool) {
|
||||
func (s *stateCoordinator) getObjectIDSetter(k Kind, metaName string) (func(influxdb.ID), bool) {
|
||||
switch k {
|
||||
case KindBucket:
|
||||
r, ok := s.mBuckets[pkgName]
|
||||
r, ok := s.mBuckets[metaName]
|
||||
return func(id influxdb.ID) {
|
||||
r.id = id
|
||||
r.stateStatus = StateStatusExists
|
||||
}, ok
|
||||
case KindCheck, KindCheckDeadman, KindCheckThreshold:
|
||||
r, ok := s.mChecks[pkgName]
|
||||
r, ok := s.mChecks[metaName]
|
||||
return func(id influxdb.ID) {
|
||||
r.id = id
|
||||
r.stateStatus = StateStatusExists
|
||||
}, ok
|
||||
case KindDashboard:
|
||||
r, ok := s.mDashboards[pkgName]
|
||||
r, ok := s.mDashboards[metaName]
|
||||
return func(id influxdb.ID) {
|
||||
r.id = id
|
||||
r.stateStatus = StateStatusExists
|
||||
}, ok
|
||||
case KindLabel:
|
||||
r, ok := s.mLabels[pkgName]
|
||||
r, ok := s.mLabels[metaName]
|
||||
return func(id influxdb.ID) {
|
||||
r.id = id
|
||||
r.stateStatus = StateStatusExists
|
||||
|
@ -666,31 +666,31 @@ func (s *stateCoordinator) getObjectIDSetter(k Kind, pkgName string) (func(influ
|
|||
KindNotificationEndpointHTTP,
|
||||
KindNotificationEndpointPagerDuty,
|
||||
KindNotificationEndpointSlack:
|
||||
r, ok := s.mEndpoints[pkgName]
|
||||
r, ok := s.mEndpoints[metaName]
|
||||
return func(id influxdb.ID) {
|
||||
r.id = id
|
||||
r.stateStatus = StateStatusExists
|
||||
}, ok
|
||||
case KindNotificationRule:
|
||||
r, ok := s.mRules[pkgName]
|
||||
r, ok := s.mRules[metaName]
|
||||
return func(id influxdb.ID) {
|
||||
r.id = id
|
||||
r.stateStatus = StateStatusExists
|
||||
}, ok
|
||||
case KindTask:
|
||||
r, ok := s.mTasks[pkgName]
|
||||
r, ok := s.mTasks[metaName]
|
||||
return func(id influxdb.ID) {
|
||||
r.id = id
|
||||
r.stateStatus = StateStatusExists
|
||||
}, ok
|
||||
case KindTelegraf:
|
||||
r, ok := s.mTelegrafs[pkgName]
|
||||
r, ok := s.mTelegrafs[metaName]
|
||||
return func(id influxdb.ID) {
|
||||
r.id = id
|
||||
r.stateStatus = StateStatusExists
|
||||
}, ok
|
||||
case KindVariable:
|
||||
r, ok := s.mVariables[pkgName]
|
||||
r, ok := s.mVariables[metaName]
|
||||
return func(id influxdb.ID) {
|
||||
r.id = id
|
||||
r.stateStatus = StateStatusExists
|
||||
|
@ -703,7 +703,7 @@ func (s *stateCoordinator) getObjectIDSetter(k Kind, pkgName string) (func(influ
|
|||
type stateIdentity struct {
|
||||
id influxdb.ID
|
||||
name string
|
||||
pkgName string
|
||||
metaName string
|
||||
resourceType influxdb.ResourceType
|
||||
stateStatus StateStatus
|
||||
}
|
||||
|
@ -726,7 +726,7 @@ func (b *stateBucket) diffBucket() DiffBucket {
|
|||
DiffIdentifier: DiffIdentifier{
|
||||
ID: SafeID(b.ID()),
|
||||
StateStatus: b.stateStatus,
|
||||
MetaName: b.parserBkt.PkgName(),
|
||||
MetaName: b.parserBkt.MetaName(),
|
||||
},
|
||||
New: DiffBucketValues{
|
||||
Name: b.parserBkt.Name(),
|
||||
|
@ -781,7 +781,7 @@ func (b *stateBucket) stateIdentity() stateIdentity {
|
|||
return stateIdentity{
|
||||
id: b.ID(),
|
||||
name: b.parserBkt.Name(),
|
||||
pkgName: b.parserBkt.PkgName(),
|
||||
metaName: b.parserBkt.MetaName(),
|
||||
resourceType: b.resourceType(),
|
||||
stateStatus: b.stateStatus,
|
||||
}
|
||||
|
@ -823,7 +823,7 @@ func (c *stateCheck) stateIdentity() stateIdentity {
|
|||
return stateIdentity{
|
||||
id: c.ID(),
|
||||
name: c.parserCheck.Name(),
|
||||
pkgName: c.parserCheck.PkgName(),
|
||||
metaName: c.parserCheck.MetaName(),
|
||||
resourceType: c.resourceType(),
|
||||
stateStatus: c.stateStatus,
|
||||
}
|
||||
|
@ -834,7 +834,7 @@ func (c *stateCheck) diffCheck() DiffCheck {
|
|||
DiffIdentifier: DiffIdentifier{
|
||||
ID: SafeID(c.ID()),
|
||||
StateStatus: c.stateStatus,
|
||||
MetaName: c.parserCheck.PkgName(),
|
||||
MetaName: c.parserCheck.MetaName(),
|
||||
},
|
||||
}
|
||||
if newCheck := c.summarize(); newCheck.Check != nil {
|
||||
|
@ -887,7 +887,7 @@ func (d *stateDashboard) stateIdentity() stateIdentity {
|
|||
return stateIdentity{
|
||||
id: d.ID(),
|
||||
name: d.parserDash.Name(),
|
||||
pkgName: d.parserDash.PkgName(),
|
||||
metaName: d.parserDash.MetaName(),
|
||||
resourceType: d.resourceType(),
|
||||
stateStatus: d.stateStatus,
|
||||
}
|
||||
|
@ -898,7 +898,7 @@ func (d *stateDashboard) diffDashboard() DiffDashboard {
|
|||
DiffIdentifier: DiffIdentifier{
|
||||
ID: SafeID(d.ID()),
|
||||
StateStatus: d.stateStatus,
|
||||
MetaName: d.parserDash.PkgName(),
|
||||
MetaName: d.parserDash.MetaName(),
|
||||
},
|
||||
New: DiffDashboardValues{
|
||||
Name: d.parserDash.Name(),
|
||||
|
@ -966,7 +966,7 @@ func (l *stateLabel) diffLabel() DiffLabel {
|
|||
DiffIdentifier: DiffIdentifier{
|
||||
ID: SafeID(l.ID()),
|
||||
StateStatus: l.stateStatus,
|
||||
MetaName: l.parserLabel.PkgName(),
|
||||
MetaName: l.parserLabel.MetaName(),
|
||||
},
|
||||
New: DiffLabelValues{
|
||||
Name: l.parserLabel.Name(),
|
||||
|
@ -1002,8 +1002,8 @@ func (l *stateLabel) Name() string {
|
|||
return l.parserLabel.Name()
|
||||
}
|
||||
|
||||
func (l *stateLabel) PkgName() string {
|
||||
return l.parserLabel.PkgName()
|
||||
func (l *stateLabel) MetaName() string {
|
||||
return l.parserLabel.MetaName()
|
||||
}
|
||||
|
||||
func (l *stateLabel) shouldApply() bool {
|
||||
|
@ -1046,10 +1046,10 @@ func (lm stateLabelMapping) diffLabelMapping() DiffLabelMapping {
|
|||
StateStatus: lm.status,
|
||||
ResType: ident.resourceType,
|
||||
ResID: SafeID(ident.id),
|
||||
ResMetaName: ident.pkgName,
|
||||
ResMetaName: ident.metaName,
|
||||
ResName: ident.name,
|
||||
LabelID: SafeID(lm.label.ID()),
|
||||
LabelMetaName: lm.label.parserLabel.PkgName(),
|
||||
LabelMetaName: lm.label.parserLabel.MetaName(),
|
||||
LabelName: lm.label.parserLabel.Name(),
|
||||
}
|
||||
}
|
||||
|
@ -1057,14 +1057,14 @@ func (lm stateLabelMapping) diffLabelMapping() DiffLabelMapping {
|
|||
func (lm stateLabelMapping) summarize() SummaryLabelMapping {
|
||||
ident := lm.resource.stateIdentity()
|
||||
return SummaryLabelMapping{
|
||||
Status: lm.status,
|
||||
ResourceID: SafeID(ident.id),
|
||||
ResourcePkgName: ident.pkgName,
|
||||
ResourceName: ident.name,
|
||||
ResourceType: ident.resourceType,
|
||||
LabelPkgName: lm.label.parserLabel.PkgName(),
|
||||
LabelName: lm.label.parserLabel.Name(),
|
||||
LabelID: SafeID(lm.label.ID()),
|
||||
Status: lm.status,
|
||||
ResourceID: SafeID(ident.id),
|
||||
ResourceMetaName: ident.metaName,
|
||||
ResourceName: ident.name,
|
||||
ResourceType: ident.resourceType,
|
||||
LabelMetaName: lm.label.parserLabel.MetaName(),
|
||||
LabelName: lm.label.parserLabel.Name(),
|
||||
LabelID: SafeID(lm.label.ID()),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1078,11 +1078,11 @@ func stateLabelMappingToInfluxLabelMapping(mapping stateLabelMapping) influxdb.L
|
|||
}
|
||||
|
||||
type stateLabelMappingForRemoval struct {
|
||||
LabelID influxdb.ID
|
||||
LabelPkgName string
|
||||
ResourceID influxdb.ID
|
||||
ResourcePkgName string
|
||||
ResourceType influxdb.ResourceType
|
||||
LabelID influxdb.ID
|
||||
LabelMetaName string
|
||||
ResourceID influxdb.ID
|
||||
ResourceMetaName string
|
||||
ResourceType influxdb.ResourceType
|
||||
}
|
||||
|
||||
func (m *stateLabelMappingForRemoval) diffLabelMapping() DiffLabelMapping {
|
||||
|
@ -1090,9 +1090,9 @@ func (m *stateLabelMappingForRemoval) diffLabelMapping() DiffLabelMapping {
|
|||
StateStatus: StateStatusRemove,
|
||||
ResType: m.ResourceType,
|
||||
ResID: SafeID(m.ResourceID),
|
||||
ResMetaName: m.ResourcePkgName,
|
||||
ResMetaName: m.ResourceMetaName,
|
||||
LabelID: SafeID(m.LabelID),
|
||||
LabelMetaName: m.LabelPkgName,
|
||||
LabelMetaName: m.LabelMetaName,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1117,7 +1117,7 @@ func (e *stateEndpoint) diffEndpoint() DiffNotificationEndpoint {
|
|||
DiffIdentifier: DiffIdentifier{
|
||||
ID: SafeID(e.ID()),
|
||||
StateStatus: e.stateStatus,
|
||||
MetaName: e.parserEndpoint.PkgName(),
|
||||
MetaName: e.parserEndpoint.MetaName(),
|
||||
},
|
||||
}
|
||||
if sum := e.summarize(); sum.NotificationEndpoint != nil {
|
||||
|
@ -1143,7 +1143,7 @@ func (e *stateEndpoint) stateIdentity() stateIdentity {
|
|||
return stateIdentity{
|
||||
id: e.ID(),
|
||||
name: e.parserEndpoint.Name(),
|
||||
pkgName: e.parserEndpoint.PkgName(),
|
||||
metaName: e.parserEndpoint.MetaName(),
|
||||
resourceType: e.resourceType(),
|
||||
stateStatus: e.stateStatus,
|
||||
}
|
||||
|
@ -1188,7 +1188,7 @@ func (r *stateRule) endpointAssociation() StackResourceAssociation {
|
|||
}
|
||||
return StackResourceAssociation{
|
||||
Kind: KindNotificationEndpoint,
|
||||
MetaName: r.endpointPkgName(),
|
||||
MetaName: r.endpointTemplateName(),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1197,12 +1197,12 @@ func (r *stateRule) diffRule() DiffNotificationRule {
|
|||
DiffIdentifier: DiffIdentifier{
|
||||
ID: SafeID(r.ID()),
|
||||
StateStatus: r.stateStatus,
|
||||
MetaName: r.parserRule.PkgName(),
|
||||
MetaName: r.parserRule.MetaName(),
|
||||
},
|
||||
New: DiffNotificationRuleValues{
|
||||
Name: r.parserRule.Name(),
|
||||
Description: r.parserRule.description,
|
||||
EndpointName: r.endpointPkgName(),
|
||||
EndpointName: r.endpointTemplateName(),
|
||||
EndpointID: SafeID(r.endpointID()),
|
||||
EndpointType: r.endpointType(),
|
||||
Every: r.parserRule.every.String(),
|
||||
|
@ -1269,9 +1269,9 @@ func (r *stateRule) endpointID() influxdb.ID {
|
|||
return 0
|
||||
}
|
||||
|
||||
func (r *stateRule) endpointPkgName() string {
|
||||
func (r *stateRule) endpointTemplateName() string {
|
||||
if r.associatedEndpoint != nil && r.associatedEndpoint.parserEndpoint != nil {
|
||||
return r.associatedEndpoint.parserEndpoint.PkgName()
|
||||
return r.associatedEndpoint.parserEndpoint.MetaName()
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
@ -1295,7 +1295,7 @@ func (r *stateRule) stateIdentity() stateIdentity {
|
|||
return stateIdentity{
|
||||
id: r.ID(),
|
||||
name: r.parserRule.Name(),
|
||||
pkgName: r.parserRule.PkgName(),
|
||||
metaName: r.parserRule.MetaName(),
|
||||
resourceType: r.resourceType(),
|
||||
stateStatus: r.stateStatus,
|
||||
}
|
||||
|
@ -1305,7 +1305,7 @@ func (r *stateRule) summarize() SummaryNotificationRule {
|
|||
sum := r.parserRule.summarize()
|
||||
sum.ID = SafeID(r.id)
|
||||
sum.EndpointID = SafeID(r.associatedEndpoint.ID())
|
||||
sum.EndpointPkgName = r.associatedEndpoint.parserEndpoint.PkgName()
|
||||
sum.EndpointMetaName = r.associatedEndpoint.parserEndpoint.MetaName()
|
||||
sum.EndpointType = r.associatedEndpoint.parserEndpoint.kind.String()
|
||||
sum.LabelAssociations = stateToSummaryLabels(r.labelAssociations)
|
||||
return sum
|
||||
|
@ -1352,7 +1352,7 @@ func (t *stateTask) diffTask() DiffTask {
|
|||
DiffIdentifier: DiffIdentifier{
|
||||
ID: SafeID(t.ID()),
|
||||
StateStatus: t.stateStatus,
|
||||
MetaName: t.parserTask.PkgName(),
|
||||
MetaName: t.parserTask.MetaName(),
|
||||
},
|
||||
New: DiffTaskValues{
|
||||
Name: t.parserTask.Name(),
|
||||
|
@ -1394,7 +1394,7 @@ func (t *stateTask) stateIdentity() stateIdentity {
|
|||
return stateIdentity{
|
||||
id: t.ID(),
|
||||
name: t.parserTask.Name(),
|
||||
pkgName: t.parserTask.PkgName(),
|
||||
metaName: t.parserTask.MetaName(),
|
||||
resourceType: t.resourceType(),
|
||||
stateStatus: t.stateStatus,
|
||||
}
|
||||
|
@ -1428,7 +1428,7 @@ func (t *stateTelegraf) diffTelegraf() DiffTelegraf {
|
|||
DiffIdentifier: DiffIdentifier{
|
||||
ID: SafeID(t.ID()),
|
||||
StateStatus: t.stateStatus,
|
||||
MetaName: t.parserTelegraf.PkgName(),
|
||||
MetaName: t.parserTelegraf.MetaName(),
|
||||
},
|
||||
New: t.parserTelegraf.config,
|
||||
Old: t.existing,
|
||||
|
@ -1447,7 +1447,7 @@ func (t *stateTelegraf) stateIdentity() stateIdentity {
|
|||
return stateIdentity{
|
||||
id: t.ID(),
|
||||
name: t.parserTelegraf.Name(),
|
||||
pkgName: t.parserTelegraf.PkgName(),
|
||||
metaName: t.parserTelegraf.MetaName(),
|
||||
resourceType: t.resourceType(),
|
||||
stateStatus: t.stateStatus,
|
||||
}
|
||||
|
@ -1482,7 +1482,7 @@ func (v *stateVariable) diffVariable() DiffVariable {
|
|||
DiffIdentifier: DiffIdentifier{
|
||||
ID: SafeID(v.ID()),
|
||||
StateStatus: v.stateStatus,
|
||||
MetaName: v.parserVar.PkgName(),
|
||||
MetaName: v.parserVar.MetaName(),
|
||||
},
|
||||
New: DiffVariableValues{
|
||||
Name: v.parserVar.Name(),
|
||||
|
@ -1522,7 +1522,7 @@ func (v *stateVariable) stateIdentity() stateIdentity {
|
|||
return stateIdentity{
|
||||
id: v.ID(),
|
||||
name: v.parserVar.Name(),
|
||||
pkgName: v.parserVar.PkgName(),
|
||||
metaName: v.parserVar.MetaName(),
|
||||
resourceType: v.resourceType(),
|
||||
stateStatus: v.stateStatus,
|
||||
}
|
||||
|
|
|
@ -96,7 +96,7 @@ func TestService(t *testing.T) {
|
|||
skipResOpts = append(skipResOpts, ApplyWithResourceSkip(asr))
|
||||
}
|
||||
|
||||
testfileRunner(t, fields.path, func(t *testing.T, pkg *Pkg) {
|
||||
testfileRunner(t, fields.path, func(t *testing.T, template *Template) {
|
||||
t.Helper()
|
||||
|
||||
tests := []struct {
|
||||
|
@ -133,7 +133,7 @@ func TestService(t *testing.T) {
|
|||
context.TODO(),
|
||||
influxdb.ID(100),
|
||||
0,
|
||||
append(tt.applyOpts, ApplyWithPkg(pkg))...,
|
||||
append(tt.applyOpts, ApplyWithTemplate(template))...,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
|
@ -146,7 +146,7 @@ func TestService(t *testing.T) {
|
|||
|
||||
t.Run("buckets", func(t *testing.T) {
|
||||
t.Run("single bucket updated", func(t *testing.T) {
|
||||
testfileRunner(t, "testdata/bucket.yml", func(t *testing.T, pkg *Pkg) {
|
||||
testfileRunner(t, "testdata/bucket.yml", func(t *testing.T, template *Template) {
|
||||
fakeBktSVC := mock.NewBucketService()
|
||||
fakeBktSVC.FindBucketByNameFn = func(_ context.Context, orgID influxdb.ID, name string) (*influxdb.Bucket, error) {
|
||||
if name != "rucket-11" {
|
||||
|
@ -162,7 +162,7 @@ func TestService(t *testing.T) {
|
|||
}
|
||||
svc := newTestService(WithBucketSVC(fakeBktSVC))
|
||||
|
||||
impact, err := svc.DryRun(context.TODO(), influxdb.ID(100), 0, ApplyWithPkg(pkg))
|
||||
impact, err := svc.DryRun(context.TODO(), influxdb.ID(100), 0, ApplyWithTemplate(template))
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Len(t, impact.Diff.Buckets, 2)
|
||||
|
@ -190,14 +190,14 @@ func TestService(t *testing.T) {
|
|||
})
|
||||
|
||||
t.Run("single bucket new", func(t *testing.T) {
|
||||
testfileRunner(t, "testdata/bucket.json", func(t *testing.T, pkg *Pkg) {
|
||||
testfileRunner(t, "testdata/bucket.json", func(t *testing.T, template *Template) {
|
||||
fakeBktSVC := mock.NewBucketService()
|
||||
fakeBktSVC.FindBucketByNameFn = func(_ context.Context, orgID influxdb.ID, name string) (*influxdb.Bucket, error) {
|
||||
return nil, errors.New("not found")
|
||||
}
|
||||
svc := newTestService(WithBucketSVC(fakeBktSVC))
|
||||
|
||||
impact, err := svc.DryRun(context.TODO(), influxdb.ID(100), 0, ApplyWithPkg(pkg))
|
||||
impact, err := svc.DryRun(context.TODO(), influxdb.ID(100), 0, ApplyWithTemplate(template))
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Len(t, impact.Diff.Buckets, 2)
|
||||
|
@ -240,7 +240,7 @@ func TestService(t *testing.T) {
|
|||
|
||||
t.Run("checks", func(t *testing.T) {
|
||||
t.Run("mixed update and creates", func(t *testing.T) {
|
||||
testfileRunner(t, "testdata/checks.yml", func(t *testing.T, pkg *Pkg) {
|
||||
testfileRunner(t, "testdata/checks.yml", func(t *testing.T, template *Template) {
|
||||
fakeCheckSVC := mock.NewCheckService()
|
||||
id := influxdb.ID(1)
|
||||
existing := &icheck.Deadman{
|
||||
|
@ -259,7 +259,7 @@ func TestService(t *testing.T) {
|
|||
|
||||
svc := newTestService(WithCheckSVC(fakeCheckSVC))
|
||||
|
||||
impact, err := svc.DryRun(context.TODO(), influxdb.ID(100), 0, ApplyWithPkg(pkg))
|
||||
impact, err := svc.DryRun(context.TODO(), influxdb.ID(100), 0, ApplyWithTemplate(template))
|
||||
require.NoError(t, err)
|
||||
|
||||
checks := impact.Diff.Checks
|
||||
|
@ -324,7 +324,7 @@ func TestService(t *testing.T) {
|
|||
|
||||
t.Run("labels", func(t *testing.T) {
|
||||
t.Run("two labels updated", func(t *testing.T) {
|
||||
testfileRunner(t, "testdata/label.json", func(t *testing.T, pkg *Pkg) {
|
||||
testfileRunner(t, "testdata/label.json", func(t *testing.T, template *Template) {
|
||||
fakeLabelSVC := mock.NewLabelService()
|
||||
fakeLabelSVC.FindLabelsFn = func(_ context.Context, filter influxdb.LabelFilter) ([]*influxdb.Label, error) {
|
||||
return []*influxdb.Label{
|
||||
|
@ -340,7 +340,7 @@ func TestService(t *testing.T) {
|
|||
}
|
||||
svc := newTestService(WithLabelSVC(fakeLabelSVC))
|
||||
|
||||
impact, err := svc.DryRun(context.TODO(), influxdb.ID(100), 0, ApplyWithPkg(pkg))
|
||||
impact, err := svc.DryRun(context.TODO(), influxdb.ID(100), 0, ApplyWithTemplate(template))
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Len(t, impact.Diff.Labels, 3)
|
||||
|
@ -374,14 +374,14 @@ func TestService(t *testing.T) {
|
|||
})
|
||||
|
||||
t.Run("two labels created", func(t *testing.T) {
|
||||
testfileRunner(t, "testdata/label.yml", func(t *testing.T, pkg *Pkg) {
|
||||
testfileRunner(t, "testdata/label.yml", func(t *testing.T, template *Template) {
|
||||
fakeLabelSVC := mock.NewLabelService()
|
||||
fakeLabelSVC.FindLabelsFn = func(_ context.Context, filter influxdb.LabelFilter) ([]*influxdb.Label, error) {
|
||||
return nil, errors.New("no labels found")
|
||||
}
|
||||
svc := newTestService(WithLabelSVC(fakeLabelSVC))
|
||||
|
||||
impact, err := svc.DryRun(context.TODO(), influxdb.ID(100), 0, ApplyWithPkg(pkg))
|
||||
impact, err := svc.DryRun(context.TODO(), influxdb.ID(100), 0, ApplyWithTemplate(template))
|
||||
require.NoError(t, err)
|
||||
|
||||
labels := impact.Diff.Labels
|
||||
|
@ -435,7 +435,7 @@ func TestService(t *testing.T) {
|
|||
|
||||
t.Run("notification endpoints", func(t *testing.T) {
|
||||
t.Run("mixed update and created", func(t *testing.T) {
|
||||
testfileRunner(t, "testdata/notification_endpoint.yml", func(t *testing.T, pkg *Pkg) {
|
||||
testfileRunner(t, "testdata/notification_endpoint.yml", func(t *testing.T, template *Template) {
|
||||
fakeEndpointSVC := mock.NewNotificationEndpointService()
|
||||
id := influxdb.ID(1)
|
||||
existing := &endpoint.HTTP{
|
||||
|
@ -455,7 +455,7 @@ func TestService(t *testing.T) {
|
|||
|
||||
svc := newTestService(WithNotificationEndpointSVC(fakeEndpointSVC))
|
||||
|
||||
impact, err := svc.DryRun(context.TODO(), influxdb.ID(100), 0, ApplyWithPkg(pkg))
|
||||
impact, err := svc.DryRun(context.TODO(), influxdb.ID(100), 0, ApplyWithTemplate(template))
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Len(t, impact.Diff.NotificationEndpoints, 5)
|
||||
|
@ -541,13 +541,13 @@ func TestService(t *testing.T) {
|
|||
|
||||
t.Run("notification rules", func(t *testing.T) {
|
||||
t.Run("mixed update and created", func(t *testing.T) {
|
||||
testfileRunner(t, "testdata/notification_rule.yml", func(t *testing.T, pkg *Pkg) {
|
||||
testfileRunner(t, "testdata/notification_rule.yml", func(t *testing.T, template *Template) {
|
||||
fakeEndpointSVC := mock.NewNotificationEndpointService()
|
||||
id := influxdb.ID(1)
|
||||
existing := &endpoint.HTTP{
|
||||
Base: endpoint.Base{
|
||||
ID: &id,
|
||||
// This name here matches the endpoint identified in the pkg notification rule
|
||||
// This name here matches the endpoint identified in the template notification rule
|
||||
Name: "endpoint-0",
|
||||
Description: "old desc",
|
||||
Status: influxdb.TaskStatusInactive,
|
||||
|
@ -562,7 +562,7 @@ func TestService(t *testing.T) {
|
|||
|
||||
svc := newTestService(WithNotificationEndpointSVC(fakeEndpointSVC))
|
||||
|
||||
impact, err := svc.DryRun(context.TODO(), influxdb.ID(100), 0, ApplyWithPkg(pkg))
|
||||
impact, err := svc.DryRun(context.TODO(), influxdb.ID(100), 0, ApplyWithTemplate(template))
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Len(t, impact.Diff.NotificationRules, 1)
|
||||
|
@ -608,14 +608,14 @@ func TestService(t *testing.T) {
|
|||
})
|
||||
|
||||
t.Run("secrets not returns missing secrets", func(t *testing.T) {
|
||||
testfileRunner(t, "testdata/notification_endpoint_secrets.yml", func(t *testing.T, pkg *Pkg) {
|
||||
testfileRunner(t, "testdata/notification_endpoint_secrets.yml", func(t *testing.T, template *Template) {
|
||||
fakeSecretSVC := mock.NewSecretService()
|
||||
fakeSecretSVC.GetSecretKeysFn = func(ctx context.Context, orgID influxdb.ID) ([]string, error) {
|
||||
return []string{"rando-1", "rando-2"}, nil
|
||||
}
|
||||
svc := newTestService(WithSecretSVC(fakeSecretSVC))
|
||||
|
||||
impact, err := svc.DryRun(context.TODO(), influxdb.ID(100), 0, ApplyWithPkg(pkg))
|
||||
impact, err := svc.DryRun(context.TODO(), influxdb.ID(100), 0, ApplyWithTemplate(template))
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, []string{"routing-key"}, impact.Summary.MissingSecrets)
|
||||
|
@ -668,7 +668,7 @@ func TestService(t *testing.T) {
|
|||
|
||||
t.Run("variables", func(t *testing.T) {
|
||||
t.Run("mixed update and created", func(t *testing.T) {
|
||||
testfileRunner(t, "testdata/variables.json", func(t *testing.T, pkg *Pkg) {
|
||||
testfileRunner(t, "testdata/variables.json", func(t *testing.T, template *Template) {
|
||||
fakeVarSVC := mock.NewVariableService()
|
||||
fakeVarSVC.FindVariablesF = func(_ context.Context, filter influxdb.VariableFilter, opts ...influxdb.FindOptions) ([]*influxdb.Variable, error) {
|
||||
return []*influxdb.Variable{
|
||||
|
@ -681,7 +681,7 @@ func TestService(t *testing.T) {
|
|||
}
|
||||
svc := newTestService(WithVariableSVC(fakeVarSVC))
|
||||
|
||||
impact, err := svc.DryRun(context.TODO(), influxdb.ID(100), 0, ApplyWithPkg(pkg))
|
||||
impact, err := svc.DryRun(context.TODO(), influxdb.ID(100), 0, ApplyWithTemplate(template))
|
||||
require.NoError(t, err)
|
||||
|
||||
variables := impact.Diff.Variables
|
||||
|
@ -759,8 +759,8 @@ func TestService(t *testing.T) {
|
|||
|
||||
t.Run("Apply", func(t *testing.T) {
|
||||
t.Run("buckets", func(t *testing.T) {
|
||||
t.Run("successfully creates pkg of buckets", func(t *testing.T) {
|
||||
testfileRunner(t, "testdata/bucket.yml", func(t *testing.T, pkg *Pkg) {
|
||||
t.Run("successfully creates template of buckets", func(t *testing.T) {
|
||||
testfileRunner(t, "testdata/bucket.yml", func(t *testing.T, template *Template) {
|
||||
fakeBktSVC := mock.NewBucketService()
|
||||
fakeBktSVC.CreateBucketFn = func(_ context.Context, b *influxdb.Bucket) error {
|
||||
b.ID = influxdb.ID(b.RetentionPeriod)
|
||||
|
@ -778,7 +778,7 @@ func TestService(t *testing.T) {
|
|||
|
||||
orgID := influxdb.ID(9000)
|
||||
|
||||
impact, err := svc.Apply(context.TODO(), orgID, 0, ApplyWithPkg(pkg))
|
||||
impact, err := svc.Apply(context.TODO(), orgID, 0, ApplyWithTemplate(template))
|
||||
require.NoError(t, err)
|
||||
|
||||
sum := impact.Summary
|
||||
|
@ -799,7 +799,7 @@ func TestService(t *testing.T) {
|
|||
})
|
||||
|
||||
t.Run("will not apply bucket if no changes to be applied", func(t *testing.T) {
|
||||
testfileRunner(t, "testdata/bucket.yml", func(t *testing.T, pkg *Pkg) {
|
||||
testfileRunner(t, "testdata/bucket.yml", func(t *testing.T, template *Template) {
|
||||
orgID := influxdb.ID(9000)
|
||||
|
||||
fakeBktSVC := mock.NewBucketService()
|
||||
|
@ -813,7 +813,7 @@ func TestService(t *testing.T) {
|
|||
id = 4
|
||||
name = "rucket-22"
|
||||
}
|
||||
if bkt, ok := pkg.mBuckets[name]; ok {
|
||||
if bkt, ok := template.mBuckets[name]; ok {
|
||||
return &influxdb.Bucket{
|
||||
ID: id,
|
||||
OrgID: oid,
|
||||
|
@ -830,7 +830,7 @@ func TestService(t *testing.T) {
|
|||
|
||||
svc := newTestService(WithBucketSVC(fakeBktSVC))
|
||||
|
||||
impact, err := svc.Apply(context.TODO(), orgID, 0, ApplyWithPkg(pkg))
|
||||
impact, err := svc.Apply(context.TODO(), orgID, 0, ApplyWithTemplate(template))
|
||||
require.NoError(t, err)
|
||||
|
||||
sum := impact.Summary
|
||||
|
@ -853,7 +853,7 @@ func TestService(t *testing.T) {
|
|||
})
|
||||
|
||||
t.Run("rolls back all created buckets on an error", func(t *testing.T) {
|
||||
testfileRunner(t, "testdata/bucket.yml", func(t *testing.T, pkg *Pkg) {
|
||||
testfileRunner(t, "testdata/bucket.yml", func(t *testing.T, template *Template) {
|
||||
fakeBktSVC := mock.NewBucketService()
|
||||
fakeBktSVC.FindBucketByNameFn = func(_ context.Context, id influxdb.ID, s string) (*influxdb.Bucket, error) {
|
||||
// forces the bucket to be created a new
|
||||
|
@ -870,7 +870,7 @@ func TestService(t *testing.T) {
|
|||
|
||||
orgID := influxdb.ID(9000)
|
||||
|
||||
_, err := svc.Apply(context.TODO(), orgID, 0, ApplyWithPkg(pkg))
|
||||
_, err := svc.Apply(context.TODO(), orgID, 0, ApplyWithTemplate(template))
|
||||
require.Error(t, err)
|
||||
|
||||
assert.GreaterOrEqual(t, fakeBktSVC.DeleteBucketCalls.Count(), 1)
|
||||
|
@ -879,8 +879,8 @@ func TestService(t *testing.T) {
|
|||
})
|
||||
|
||||
t.Run("checks", func(t *testing.T) {
|
||||
t.Run("successfully creates pkg of checks", func(t *testing.T) {
|
||||
testfileRunner(t, "testdata/checks.yml", func(t *testing.T, pkg *Pkg) {
|
||||
t.Run("successfully creates template of checks", func(t *testing.T) {
|
||||
testfileRunner(t, "testdata/checks.yml", func(t *testing.T, template *Template) {
|
||||
fakeCheckSVC := mock.NewCheckService()
|
||||
fakeCheckSVC.CreateCheckFn = func(ctx context.Context, c influxdb.CheckCreate, id influxdb.ID) error {
|
||||
c.SetID(influxdb.ID(fakeCheckSVC.CreateCheckCalls.Count() + 1))
|
||||
|
@ -891,7 +891,7 @@ func TestService(t *testing.T) {
|
|||
|
||||
orgID := influxdb.ID(9000)
|
||||
|
||||
impact, err := svc.Apply(context.TODO(), orgID, 0, ApplyWithPkg(pkg))
|
||||
impact, err := svc.Apply(context.TODO(), orgID, 0, ApplyWithTemplate(template))
|
||||
require.NoError(t, err)
|
||||
|
||||
sum := impact.Summary
|
||||
|
@ -919,7 +919,7 @@ func TestService(t *testing.T) {
|
|||
})
|
||||
|
||||
t.Run("rolls back all created checks on an error", func(t *testing.T) {
|
||||
testfileRunner(t, "testdata/checks.yml", func(t *testing.T, pkg *Pkg) {
|
||||
testfileRunner(t, "testdata/checks.yml", func(t *testing.T, template *Template) {
|
||||
fakeCheckSVC := mock.NewCheckService()
|
||||
fakeCheckSVC.CreateCheckFn = func(ctx context.Context, c influxdb.CheckCreate, id influxdb.ID) error {
|
||||
c.SetID(influxdb.ID(fakeCheckSVC.CreateCheckCalls.Count() + 1))
|
||||
|
@ -933,7 +933,7 @@ func TestService(t *testing.T) {
|
|||
|
||||
orgID := influxdb.ID(9000)
|
||||
|
||||
_, err := svc.Apply(context.TODO(), orgID, 0, ApplyWithPkg(pkg))
|
||||
_, err := svc.Apply(context.TODO(), orgID, 0, ApplyWithTemplate(template))
|
||||
require.Error(t, err)
|
||||
|
||||
assert.GreaterOrEqual(t, fakeCheckSVC.DeleteCheckCalls.Count(), 1)
|
||||
|
@ -942,8 +942,8 @@ func TestService(t *testing.T) {
|
|||
})
|
||||
|
||||
t.Run("labels", func(t *testing.T) {
|
||||
t.Run("successfully creates pkg of labels", func(t *testing.T) {
|
||||
testfileRunner(t, "testdata/label.json", func(t *testing.T, pkg *Pkg) {
|
||||
t.Run("successfully creates template of labels", func(t *testing.T) {
|
||||
testfileRunner(t, "testdata/label.json", func(t *testing.T, template *Template) {
|
||||
fakeLabelSVC := mock.NewLabelService()
|
||||
fakeLabelSVC.CreateLabelFn = func(_ context.Context, l *influxdb.Label) error {
|
||||
i, err := strconv.Atoi(l.Name[len(l.Name)-1:])
|
||||
|
@ -958,7 +958,7 @@ func TestService(t *testing.T) {
|
|||
|
||||
orgID := influxdb.ID(9000)
|
||||
|
||||
impact, err := svc.Apply(context.TODO(), orgID, 0, ApplyWithPkg(pkg))
|
||||
impact, err := svc.Apply(context.TODO(), orgID, 0, ApplyWithTemplate(template))
|
||||
require.NoError(t, err)
|
||||
|
||||
sum := impact.Summary
|
||||
|
@ -977,7 +977,7 @@ func TestService(t *testing.T) {
|
|||
})
|
||||
|
||||
t.Run("rolls back all created labels on an error", func(t *testing.T) {
|
||||
testfileRunner(t, "testdata/label", func(t *testing.T, pkg *Pkg) {
|
||||
testfileRunner(t, "testdata/label", func(t *testing.T, template *Template) {
|
||||
fakeLabelSVC := mock.NewLabelService()
|
||||
fakeLabelSVC.CreateLabelFn = func(_ context.Context, l *influxdb.Label) error {
|
||||
// 3rd/4th label will return the error here, and 2 before should be rolled back
|
||||
|
@ -991,7 +991,7 @@ func TestService(t *testing.T) {
|
|||
|
||||
orgID := influxdb.ID(9000)
|
||||
|
||||
_, err := svc.Apply(context.TODO(), orgID, 0, ApplyWithPkg(pkg))
|
||||
_, err := svc.Apply(context.TODO(), orgID, 0, ApplyWithTemplate(template))
|
||||
require.Error(t, err)
|
||||
|
||||
assert.GreaterOrEqual(t, fakeLabelSVC.DeleteLabelCalls.Count(), 1)
|
||||
|
@ -999,19 +999,19 @@ func TestService(t *testing.T) {
|
|||
})
|
||||
|
||||
t.Run("will not apply label if no changes to be applied", func(t *testing.T) {
|
||||
testfileRunner(t, "testdata/label.yml", func(t *testing.T, pkg *Pkg) {
|
||||
testfileRunner(t, "testdata/label.yml", func(t *testing.T, template *Template) {
|
||||
orgID := influxdb.ID(9000)
|
||||
|
||||
stubExisting := func(name string, id influxdb.ID) *influxdb.Label {
|
||||
pkgLabel := pkg.mLabels[name]
|
||||
templateLabel := template.mLabels[name]
|
||||
return &influxdb.Label{
|
||||
// makes all pkg changes same as they are on the existing
|
||||
// makes all template changes same as they are on the existing
|
||||
ID: id,
|
||||
OrgID: orgID,
|
||||
Name: pkgLabel.Name(),
|
||||
Name: templateLabel.Name(),
|
||||
Properties: map[string]string{
|
||||
"color": pkgLabel.Color,
|
||||
"description": pkgLabel.Description,
|
||||
"color": templateLabel.Color,
|
||||
"description": templateLabel.Description,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
@ -1046,7 +1046,7 @@ func TestService(t *testing.T) {
|
|||
|
||||
svc := newTestService(WithLabelSVC(fakeLabelSVC))
|
||||
|
||||
impact, err := svc.Apply(context.TODO(), orgID, 0, ApplyWithPkg(pkg))
|
||||
impact, err := svc.Apply(context.TODO(), orgID, 0, ApplyWithTemplate(template))
|
||||
require.NoError(t, err)
|
||||
|
||||
sum := impact.Summary
|
||||
|
@ -1069,7 +1069,7 @@ func TestService(t *testing.T) {
|
|||
|
||||
t.Run("dashboards", func(t *testing.T) {
|
||||
t.Run("successfully creates a dashboard", func(t *testing.T) {
|
||||
testfileRunner(t, "testdata/dashboard.yml", func(t *testing.T, pkg *Pkg) {
|
||||
testfileRunner(t, "testdata/dashboard.yml", func(t *testing.T, template *Template) {
|
||||
fakeDashSVC := mock.NewDashboardService()
|
||||
fakeDashSVC.CreateDashboardF = func(_ context.Context, d *influxdb.Dashboard) error {
|
||||
d.ID = influxdb.ID(1)
|
||||
|
@ -1083,7 +1083,7 @@ func TestService(t *testing.T) {
|
|||
|
||||
orgID := influxdb.ID(9000)
|
||||
|
||||
impact, err := svc.Apply(context.TODO(), orgID, 0, ApplyWithPkg(pkg))
|
||||
impact, err := svc.Apply(context.TODO(), orgID, 0, ApplyWithTemplate(template))
|
||||
require.NoError(t, err)
|
||||
|
||||
sum := impact.Summary
|
||||
|
@ -1105,7 +1105,7 @@ func TestService(t *testing.T) {
|
|||
})
|
||||
|
||||
t.Run("rolls back created dashboard on an error", func(t *testing.T) {
|
||||
testfileRunner(t, "testdata/dashboard.yml", func(t *testing.T, pkg *Pkg) {
|
||||
testfileRunner(t, "testdata/dashboard.yml", func(t *testing.T, template *Template) {
|
||||
fakeDashSVC := mock.NewDashboardService()
|
||||
fakeDashSVC.CreateDashboardF = func(_ context.Context, d *influxdb.Dashboard) error {
|
||||
// error out on second dashboard attempted
|
||||
|
@ -1125,7 +1125,7 @@ func TestService(t *testing.T) {
|
|||
|
||||
orgID := influxdb.ID(9000)
|
||||
|
||||
_, err := svc.Apply(context.TODO(), orgID, 0, ApplyWithPkg(pkg))
|
||||
_, err := svc.Apply(context.TODO(), orgID, 0, ApplyWithTemplate(template))
|
||||
require.Error(t, err)
|
||||
|
||||
assert.True(t, deletedDashs[1])
|
||||
|
@ -1136,7 +1136,7 @@ func TestService(t *testing.T) {
|
|||
t.Run("label mapping", func(t *testing.T) {
|
||||
testLabelMappingApplyFn := func(t *testing.T, filename string, numExpected int, settersFn func() []ServiceSetterFn) {
|
||||
t.Helper()
|
||||
testfileRunner(t, filename, func(t *testing.T, pkg *Pkg) {
|
||||
testfileRunner(t, filename, func(t *testing.T, template *Template) {
|
||||
t.Helper()
|
||||
|
||||
fakeLabelSVC := mock.NewLabelService()
|
||||
|
@ -1160,7 +1160,7 @@ func TestService(t *testing.T) {
|
|||
|
||||
orgID := influxdb.ID(9000)
|
||||
|
||||
_, err := svc.Apply(context.TODO(), orgID, 0, ApplyWithPkg(pkg))
|
||||
_, err := svc.Apply(context.TODO(), orgID, 0, ApplyWithTemplate(template))
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, numExpected, fakeLabelSVC.CreateLabelMappingCalls.Count())
|
||||
|
@ -1169,7 +1169,7 @@ func TestService(t *testing.T) {
|
|||
|
||||
testLabelMappingRollbackFn := func(t *testing.T, filename string, killCount int, settersFn func() []ServiceSetterFn) {
|
||||
t.Helper()
|
||||
testfileRunner(t, filename, func(t *testing.T, pkg *Pkg) {
|
||||
testfileRunner(t, filename, func(t *testing.T, template *Template) {
|
||||
t.Helper()
|
||||
|
||||
fakeLabelSVC := mock.NewLabelService()
|
||||
|
@ -1196,7 +1196,7 @@ func TestService(t *testing.T) {
|
|||
|
||||
orgID := influxdb.ID(9000)
|
||||
|
||||
_, err := svc.Apply(context.TODO(), orgID, 0, ApplyWithPkg(pkg))
|
||||
_, err := svc.Apply(context.TODO(), orgID, 0, ApplyWithTemplate(template))
|
||||
require.Error(t, err)
|
||||
|
||||
assert.GreaterOrEqual(t, fakeLabelSVC.DeleteLabelMappingCalls.Count(), killCount)
|
||||
|
@ -1380,8 +1380,8 @@ func TestService(t *testing.T) {
|
|||
})
|
||||
|
||||
t.Run("notification endpoints", func(t *testing.T) {
|
||||
t.Run("successfully creates pkg of endpoints", func(t *testing.T) {
|
||||
testfileRunner(t, "testdata/notification_endpoint.yml", func(t *testing.T, pkg *Pkg) {
|
||||
t.Run("successfully creates template of endpoints", func(t *testing.T) {
|
||||
testfileRunner(t, "testdata/notification_endpoint.yml", func(t *testing.T, template *Template) {
|
||||
fakeEndpointSVC := mock.NewNotificationEndpointService()
|
||||
fakeEndpointSVC.CreateNotificationEndpointF = func(ctx context.Context, nr influxdb.NotificationEndpoint, userID influxdb.ID) error {
|
||||
nr.SetID(influxdb.ID(fakeEndpointSVC.CreateNotificationEndpointCalls.Count() + 1))
|
||||
|
@ -1392,7 +1392,7 @@ func TestService(t *testing.T) {
|
|||
|
||||
orgID := influxdb.ID(9000)
|
||||
|
||||
impact, err := svc.Apply(context.TODO(), orgID, 0, ApplyWithPkg(pkg))
|
||||
impact, err := svc.Apply(context.TODO(), orgID, 0, ApplyWithTemplate(template))
|
||||
require.NoError(t, err)
|
||||
|
||||
sum := impact.Summary
|
||||
|
@ -1427,7 +1427,7 @@ func TestService(t *testing.T) {
|
|||
})
|
||||
|
||||
t.Run("rolls back all created notifications on an error", func(t *testing.T) {
|
||||
testfileRunner(t, "testdata/notification_endpoint.yml", func(t *testing.T, pkg *Pkg) {
|
||||
testfileRunner(t, "testdata/notification_endpoint.yml", func(t *testing.T, template *Template) {
|
||||
fakeEndpointSVC := mock.NewNotificationEndpointService()
|
||||
fakeEndpointSVC.CreateNotificationEndpointF = func(ctx context.Context, nr influxdb.NotificationEndpoint, userID influxdb.ID) error {
|
||||
nr.SetID(influxdb.ID(fakeEndpointSVC.CreateNotificationEndpointCalls.Count() + 1))
|
||||
|
@ -1441,7 +1441,7 @@ func TestService(t *testing.T) {
|
|||
|
||||
orgID := influxdb.ID(9000)
|
||||
|
||||
_, err := svc.Apply(context.TODO(), orgID, 0, ApplyWithPkg(pkg))
|
||||
_, err := svc.Apply(context.TODO(), orgID, 0, ApplyWithTemplate(template))
|
||||
require.Error(t, err)
|
||||
|
||||
assert.GreaterOrEqual(t, fakeEndpointSVC.DeleteNotificationEndpointCalls.Count(), 3)
|
||||
|
@ -1451,7 +1451,7 @@ func TestService(t *testing.T) {
|
|||
|
||||
t.Run("notification rules", func(t *testing.T) {
|
||||
t.Run("successfully creates", func(t *testing.T) {
|
||||
testfileRunner(t, "testdata/notification_rule.yml", func(t *testing.T, pkg *Pkg) {
|
||||
testfileRunner(t, "testdata/notification_rule.yml", func(t *testing.T, template *Template) {
|
||||
fakeEndpointSVC := mock.NewNotificationEndpointService()
|
||||
fakeEndpointSVC.CreateNotificationEndpointF = func(ctx context.Context, nr influxdb.NotificationEndpoint, userID influxdb.ID) error {
|
||||
nr.SetID(influxdb.ID(fakeEndpointSVC.CreateNotificationEndpointCalls.Count() + 1))
|
||||
|
@ -1470,7 +1470,7 @@ func TestService(t *testing.T) {
|
|||
|
||||
orgID := influxdb.ID(9000)
|
||||
|
||||
impact, err := svc.Apply(context.TODO(), orgID, 0, ApplyWithPkg(pkg))
|
||||
impact, err := svc.Apply(context.TODO(), orgID, 0, ApplyWithTemplate(template))
|
||||
require.NoError(t, err)
|
||||
|
||||
sum := impact.Summary
|
||||
|
@ -1479,13 +1479,13 @@ func TestService(t *testing.T) {
|
|||
assert.Equal(t, "rule_0", sum.NotificationRules[0].Name)
|
||||
assert.Equal(t, "desc_0", sum.NotificationRules[0].Description)
|
||||
assert.Equal(t, SafeID(1), sum.NotificationRules[0].EndpointID)
|
||||
assert.Equal(t, "endpoint-0", sum.NotificationRules[0].EndpointPkgName)
|
||||
assert.Equal(t, "endpoint-0", sum.NotificationRules[0].EndpointMetaName)
|
||||
assert.Equal(t, "slack", sum.NotificationRules[0].EndpointType)
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("rolls back all created notification rules on an error", func(t *testing.T) {
|
||||
testfileRunner(t, "testdata/notification_rule.yml", func(t *testing.T, pkg *Pkg) {
|
||||
testfileRunner(t, "testdata/notification_rule.yml", func(t *testing.T, template *Template) {
|
||||
fakeRuleStore := mock.NewNotificationRuleStore()
|
||||
fakeRuleStore.CreateNotificationRuleF = func(ctx context.Context, nr influxdb.NotificationRuleCreate, userID influxdb.ID) error {
|
||||
nr.SetID(influxdb.ID(fakeRuleStore.CreateNotificationRuleCalls.Count() + 1))
|
||||
|
@ -1513,7 +1513,7 @@ func TestService(t *testing.T) {
|
|||
|
||||
orgID := influxdb.ID(9000)
|
||||
|
||||
_, err := svc.Apply(context.TODO(), orgID, 0, ApplyWithPkg(pkg))
|
||||
_, err := svc.Apply(context.TODO(), orgID, 0, ApplyWithTemplate(template))
|
||||
require.Error(t, err)
|
||||
|
||||
assert.Equal(t, 1, fakeRuleStore.DeleteNotificationRuleCalls.Count())
|
||||
|
@ -1523,7 +1523,7 @@ func TestService(t *testing.T) {
|
|||
|
||||
t.Run("tasks", func(t *testing.T) {
|
||||
t.Run("successfuly creates", func(t *testing.T) {
|
||||
testfileRunner(t, "testdata/tasks.yml", func(t *testing.T, pkg *Pkg) {
|
||||
testfileRunner(t, "testdata/tasks.yml", func(t *testing.T, template *Template) {
|
||||
orgID := influxdb.ID(9000)
|
||||
|
||||
fakeTaskSVC := mock.NewTaskService()
|
||||
|
@ -1547,7 +1547,7 @@ func TestService(t *testing.T) {
|
|||
|
||||
svc := newTestService(WithTaskSVC(fakeTaskSVC))
|
||||
|
||||
impact, err := svc.Apply(context.TODO(), orgID, 0, ApplyWithPkg(pkg))
|
||||
impact, err := svc.Apply(context.TODO(), orgID, 0, ApplyWithTemplate(template))
|
||||
require.NoError(t, err)
|
||||
|
||||
sum := impact.Summary
|
||||
|
@ -1565,7 +1565,7 @@ func TestService(t *testing.T) {
|
|||
})
|
||||
|
||||
t.Run("rolls back all created tasks on an error", func(t *testing.T) {
|
||||
testfileRunner(t, "testdata/tasks.yml", func(t *testing.T, pkg *Pkg) {
|
||||
testfileRunner(t, "testdata/tasks.yml", func(t *testing.T, template *Template) {
|
||||
fakeTaskSVC := mock.NewTaskService()
|
||||
fakeTaskSVC.CreateTaskFn = func(ctx context.Context, tc influxdb.TaskCreate) (*influxdb.Task, error) {
|
||||
if fakeTaskSVC.CreateTaskCalls.Count() == 1 {
|
||||
|
@ -1580,7 +1580,7 @@ func TestService(t *testing.T) {
|
|||
|
||||
orgID := influxdb.ID(9000)
|
||||
|
||||
_, err := svc.Apply(context.TODO(), orgID, 0, ApplyWithPkg(pkg))
|
||||
_, err := svc.Apply(context.TODO(), orgID, 0, ApplyWithTemplate(template))
|
||||
require.Error(t, err)
|
||||
|
||||
assert.Equal(t, 1, fakeTaskSVC.DeleteTaskCalls.Count())
|
||||
|
@ -1590,7 +1590,7 @@ func TestService(t *testing.T) {
|
|||
|
||||
t.Run("telegrafs", func(t *testing.T) {
|
||||
t.Run("successfuly creates", func(t *testing.T) {
|
||||
testfileRunner(t, "testdata/telegraf.yml", func(t *testing.T, pkg *Pkg) {
|
||||
testfileRunner(t, "testdata/telegraf.yml", func(t *testing.T, template *Template) {
|
||||
orgID := influxdb.ID(9000)
|
||||
|
||||
fakeTeleSVC := mock.NewTelegrafConfigStore()
|
||||
|
@ -1601,7 +1601,7 @@ func TestService(t *testing.T) {
|
|||
|
||||
svc := newTestService(WithTelegrafSVC(fakeTeleSVC))
|
||||
|
||||
impact, err := svc.Apply(context.TODO(), orgID, 0, ApplyWithPkg(pkg))
|
||||
impact, err := svc.Apply(context.TODO(), orgID, 0, ApplyWithTemplate(template))
|
||||
require.NoError(t, err)
|
||||
|
||||
sum := impact.Summary
|
||||
|
@ -1613,7 +1613,7 @@ func TestService(t *testing.T) {
|
|||
})
|
||||
|
||||
t.Run("rolls back all created telegrafs on an error", func(t *testing.T) {
|
||||
testfileRunner(t, "testdata/telegraf.yml", func(t *testing.T, pkg *Pkg) {
|
||||
testfileRunner(t, "testdata/telegraf.yml", func(t *testing.T, template *Template) {
|
||||
fakeTeleSVC := mock.NewTelegrafConfigStore()
|
||||
fakeTeleSVC.CreateTelegrafConfigF = func(_ context.Context, tc *influxdb.TelegrafConfig, userID influxdb.ID) error {
|
||||
t.Log("called")
|
||||
|
@ -1634,7 +1634,7 @@ func TestService(t *testing.T) {
|
|||
|
||||
orgID := influxdb.ID(9000)
|
||||
|
||||
_, err := svc.Apply(context.TODO(), orgID, 0, ApplyWithPkg(pkg))
|
||||
_, err := svc.Apply(context.TODO(), orgID, 0, ApplyWithTemplate(template))
|
||||
require.Error(t, err)
|
||||
|
||||
assert.Equal(t, 1, fakeTeleSVC.DeleteTelegrafConfigCalls.Count())
|
||||
|
@ -1643,8 +1643,8 @@ func TestService(t *testing.T) {
|
|||
})
|
||||
|
||||
t.Run("variables", func(t *testing.T) {
|
||||
t.Run("successfully creates pkg of variables", func(t *testing.T) {
|
||||
testfileRunner(t, "testdata/variables.yml", func(t *testing.T, pkg *Pkg) {
|
||||
t.Run("successfully creates template of variables", func(t *testing.T) {
|
||||
testfileRunner(t, "testdata/variables.yml", func(t *testing.T, template *Template) {
|
||||
fakeVarSVC := mock.NewVariableService()
|
||||
fakeVarSVC.CreateVariableF = func(_ context.Context, v *influxdb.Variable) error {
|
||||
v.ID = influxdb.ID(fakeVarSVC.CreateVariableCalls.Count() + 1)
|
||||
|
@ -1655,7 +1655,7 @@ func TestService(t *testing.T) {
|
|||
|
||||
orgID := influxdb.ID(9000)
|
||||
|
||||
impact, err := svc.Apply(context.TODO(), orgID, 0, ApplyWithPkg(pkg))
|
||||
impact, err := svc.Apply(context.TODO(), orgID, 0, ApplyWithTemplate(template))
|
||||
require.NoError(t, err)
|
||||
|
||||
sum := impact.Summary
|
||||
|
@ -1679,7 +1679,7 @@ func TestService(t *testing.T) {
|
|||
})
|
||||
|
||||
t.Run("rolls back all created variables on an error", func(t *testing.T) {
|
||||
testfileRunner(t, "testdata/variables.yml", func(t *testing.T, pkg *Pkg) {
|
||||
testfileRunner(t, "testdata/variables.yml", func(t *testing.T, template *Template) {
|
||||
fakeVarSVC := mock.NewVariableService()
|
||||
fakeVarSVC.CreateVariableF = func(_ context.Context, l *influxdb.Variable) error {
|
||||
// 4th variable will return the error here, and 3 before should be rolled back
|
||||
|
@ -1693,7 +1693,7 @@ func TestService(t *testing.T) {
|
|||
|
||||
orgID := influxdb.ID(9000)
|
||||
|
||||
_, err := svc.Apply(context.TODO(), orgID, 0, ApplyWithPkg(pkg))
|
||||
_, err := svc.Apply(context.TODO(), orgID, 0, ApplyWithTemplate(template))
|
||||
require.Error(t, err)
|
||||
|
||||
assert.GreaterOrEqual(t, fakeVarSVC.DeleteVariableCalls.Count(), 1)
|
||||
|
@ -1701,17 +1701,17 @@ func TestService(t *testing.T) {
|
|||
})
|
||||
|
||||
t.Run("will not apply variable if no changes to be applied", func(t *testing.T) {
|
||||
testfileRunner(t, "testdata/variables.yml", func(t *testing.T, pkg *Pkg) {
|
||||
testfileRunner(t, "testdata/variables.yml", func(t *testing.T, template *Template) {
|
||||
orgID := influxdb.ID(9000)
|
||||
|
||||
fakeVarSVC := mock.NewVariableService()
|
||||
fakeVarSVC.FindVariablesF = func(ctx context.Context, f influxdb.VariableFilter, _ ...influxdb.FindOptions) ([]*influxdb.Variable, error) {
|
||||
return []*influxdb.Variable{
|
||||
{
|
||||
// makes all pkg changes same as they are on the existing
|
||||
// makes all template changes same as they are on the existing
|
||||
ID: influxdb.ID(1),
|
||||
OrganizationID: orgID,
|
||||
Name: pkg.mVariables["var-const-3"].Name(),
|
||||
Name: template.mVariables["var-const-3"].Name(),
|
||||
Arguments: &influxdb.VariableArguments{
|
||||
Type: "constant",
|
||||
Values: influxdb.VariableConstantValues{"first val"},
|
||||
|
@ -1734,7 +1734,7 @@ func TestService(t *testing.T) {
|
|||
|
||||
svc := newTestService(WithVariableSVC(fakeVarSVC))
|
||||
|
||||
impact, err := svc.Apply(context.TODO(), orgID, 0, ApplyWithPkg(pkg))
|
||||
impact, err := svc.Apply(context.TODO(), orgID, 0, ApplyWithTemplate(template))
|
||||
require.NoError(t, err)
|
||||
|
||||
sum := impact.Summary
|
||||
|
@ -1776,16 +1776,16 @@ func TestService(t *testing.T) {
|
|||
}
|
||||
|
||||
t.Run("with existing resources", func(t *testing.T) {
|
||||
encodeAndDecode := func(t *testing.T, pkg *Pkg) *Pkg {
|
||||
encodeAndDecode := func(t *testing.T, template *Template) *Template {
|
||||
t.Helper()
|
||||
|
||||
b, err := pkg.Encode(EncodingJSON)
|
||||
b, err := template.Encode(EncodingJSON)
|
||||
require.NoError(t, err)
|
||||
|
||||
newPkg, err := Parse(EncodingJSON, FromReader(bytes.NewReader(b)))
|
||||
newTemplate, err := Parse(EncodingJSON, FromReader(bytes.NewReader(b)))
|
||||
require.NoError(t, err)
|
||||
|
||||
return newPkg
|
||||
return newTemplate
|
||||
}
|
||||
|
||||
t.Run("bucket", func(t *testing.T) {
|
||||
|
@ -1826,12 +1826,12 @@ func TestService(t *testing.T) {
|
|||
ID: expected.ID,
|
||||
Name: tt.newName,
|
||||
}
|
||||
pkg, err := svc.Export(context.TODO(), ExportWithExistingResources(resToClone))
|
||||
template, err := svc.Export(context.TODO(), ExportWithExistingResources(resToClone))
|
||||
require.NoError(t, err)
|
||||
|
||||
newPkg := encodeAndDecode(t, pkg)
|
||||
newTemplate := encodeAndDecode(t, template)
|
||||
|
||||
bkts := newPkg.Summary().Buckets
|
||||
bkts := newTemplate.Summary().Buckets
|
||||
require.Len(t, bkts, 1)
|
||||
|
||||
actual := bkts[0]
|
||||
|
@ -1926,12 +1926,12 @@ func TestService(t *testing.T) {
|
|||
ID: tt.expected.GetID(),
|
||||
Name: tt.newName,
|
||||
}
|
||||
pkg, err := svc.Export(context.TODO(), ExportWithExistingResources(resToClone))
|
||||
template, err := svc.Export(context.TODO(), ExportWithExistingResources(resToClone))
|
||||
require.NoError(t, err)
|
||||
|
||||
newPkg := encodeAndDecode(t, pkg)
|
||||
newTemplate := encodeAndDecode(t, template)
|
||||
|
||||
checks := newPkg.Summary().Checks
|
||||
checks := newTemplate.Summary().Checks
|
||||
require.Len(t, checks, 1)
|
||||
|
||||
actual := checks[0].Check
|
||||
|
@ -2313,12 +2313,12 @@ func TestService(t *testing.T) {
|
|||
ID: expected.ID,
|
||||
Name: tt.newName,
|
||||
}
|
||||
pkg, err := svc.Export(context.TODO(), ExportWithExistingResources(resToClone))
|
||||
template, err := svc.Export(context.TODO(), ExportWithExistingResources(resToClone))
|
||||
require.NoError(t, err)
|
||||
|
||||
newPkg := encodeAndDecode(t, pkg)
|
||||
newTemplate := encodeAndDecode(t, template)
|
||||
|
||||
dashs := newPkg.Summary().Dashboards
|
||||
dashs := newTemplate.Summary().Dashboards
|
||||
require.Len(t, dashs, 1)
|
||||
|
||||
actual := dashs[0]
|
||||
|
@ -2363,12 +2363,12 @@ func TestService(t *testing.T) {
|
|||
ID: 2,
|
||||
},
|
||||
}
|
||||
pkg, err := svc.Export(context.TODO(), ExportWithExistingResources(resourcesToClone...))
|
||||
template, err := svc.Export(context.TODO(), ExportWithExistingResources(resourcesToClone...))
|
||||
require.NoError(t, err)
|
||||
|
||||
newPkg := encodeAndDecode(t, pkg)
|
||||
newTemplate := encodeAndDecode(t, template)
|
||||
|
||||
dashs := newPkg.Summary().Dashboards
|
||||
dashs := newTemplate.Summary().Dashboards
|
||||
require.Len(t, dashs, len(resourcesToClone))
|
||||
|
||||
for i := range resourcesToClone {
|
||||
|
@ -2419,12 +2419,12 @@ func TestService(t *testing.T) {
|
|||
ID: expectedLabel.ID,
|
||||
Name: tt.newName,
|
||||
}
|
||||
pkg, err := svc.Export(context.TODO(), ExportWithExistingResources(resToClone))
|
||||
template, err := svc.Export(context.TODO(), ExportWithExistingResources(resToClone))
|
||||
require.NoError(t, err)
|
||||
|
||||
newPkg := encodeAndDecode(t, pkg)
|
||||
newTemplate := encodeAndDecode(t, template)
|
||||
|
||||
newLabels := newPkg.Summary().Labels
|
||||
newLabels := newTemplate.Summary().Labels
|
||||
require.Len(t, newLabels, 1)
|
||||
|
||||
actual := newLabels[0]
|
||||
|
@ -2547,12 +2547,12 @@ func TestService(t *testing.T) {
|
|||
ID: tt.expected.GetID(),
|
||||
Name: tt.newName,
|
||||
}
|
||||
pkg, err := svc.Export(context.TODO(), ExportWithExistingResources(resToClone))
|
||||
template, err := svc.Export(context.TODO(), ExportWithExistingResources(resToClone))
|
||||
require.NoError(t, err)
|
||||
|
||||
newPkg := encodeAndDecode(t, pkg)
|
||||
newTemplate := encodeAndDecode(t, template)
|
||||
|
||||
endpoints := newPkg.Summary().NotificationEndpoints
|
||||
endpoints := newTemplate.Summary().NotificationEndpoints
|
||||
require.Len(t, endpoints, 1)
|
||||
|
||||
actual := endpoints[0].NotificationEndpoint
|
||||
|
@ -2674,19 +2674,19 @@ func TestService(t *testing.T) {
|
|||
ID: tt.rule.GetID(),
|
||||
Name: tt.newName,
|
||||
}
|
||||
pkg, err := svc.Export(context.TODO(), ExportWithExistingResources(resToClone))
|
||||
template, err := svc.Export(context.TODO(), ExportWithExistingResources(resToClone))
|
||||
require.NoError(t, err)
|
||||
|
||||
newPkg := encodeAndDecode(t, pkg)
|
||||
newTemplate := encodeAndDecode(t, template)
|
||||
|
||||
sum := newPkg.Summary()
|
||||
sum := newTemplate.Summary()
|
||||
require.Len(t, sum.NotificationRules, 1)
|
||||
|
||||
actualRule := sum.NotificationRules[0]
|
||||
assert.Zero(t, actualRule.ID)
|
||||
assert.Zero(t, actualRule.EndpointID)
|
||||
assert.NotEmpty(t, actualRule.EndpointType)
|
||||
assert.NotEmpty(t, actualRule.EndpointPkgName)
|
||||
assert.NotEmpty(t, actualRule.EndpointMetaName)
|
||||
|
||||
baseEqual := func(t *testing.T, base rule.Base) {
|
||||
t.Helper()
|
||||
|
@ -2727,9 +2727,9 @@ func TestService(t *testing.T) {
|
|||
assert.Equal(t, p.MessageTemplate, actualRule.MessageTemplate)
|
||||
}
|
||||
|
||||
require.Len(t, pkg.Summary().NotificationEndpoints, 1)
|
||||
require.Len(t, template.Summary().NotificationEndpoints, 1)
|
||||
|
||||
actualEndpoint := pkg.Summary().NotificationEndpoints[0].NotificationEndpoint
|
||||
actualEndpoint := template.Summary().NotificationEndpoints[0].NotificationEndpoint
|
||||
assert.Equal(t, tt.endpoint.GetName(), actualEndpoint.GetName())
|
||||
assert.Equal(t, tt.endpoint.GetDescription(), actualEndpoint.GetDescription())
|
||||
assert.Equal(t, tt.endpoint.GetStatus(), actualEndpoint.GetStatus())
|
||||
|
@ -2775,15 +2775,15 @@ func TestService(t *testing.T) {
|
|||
ID: 2,
|
||||
},
|
||||
}
|
||||
pkg, err := svc.Export(context.TODO(), ExportWithExistingResources(resourcesToClone...))
|
||||
template, err := svc.Export(context.TODO(), ExportWithExistingResources(resourcesToClone...))
|
||||
require.NoError(t, err)
|
||||
|
||||
newPkg := encodeAndDecode(t, pkg)
|
||||
newTemplate := encodeAndDecode(t, template)
|
||||
|
||||
sum := newPkg.Summary()
|
||||
sum := newTemplate.Summary()
|
||||
require.Len(t, sum.NotificationRules, len(resourcesToClone))
|
||||
|
||||
expectedSameEndpointName := sum.NotificationRules[0].EndpointPkgName
|
||||
expectedSameEndpointName := sum.NotificationRules[0].EndpointMetaName
|
||||
assert.NotZero(t, expectedSameEndpointName)
|
||||
assert.NotEqual(t, "endpoint_0", expectedSameEndpointName)
|
||||
|
||||
|
@ -2791,7 +2791,7 @@ func TestService(t *testing.T) {
|
|||
actual := sum.NotificationRules[i]
|
||||
assert.Equal(t, "old_name", actual.Name)
|
||||
assert.Equal(t, "desc", actual.Description)
|
||||
assert.Equal(t, expectedSameEndpointName, actual.EndpointPkgName)
|
||||
assert.Equal(t, expectedSameEndpointName, actual.EndpointMetaName)
|
||||
}
|
||||
|
||||
require.Len(t, sum.NotificationEndpoints, 1)
|
||||
|
@ -2847,12 +2847,12 @@ func TestService(t *testing.T) {
|
|||
ID: tt.task.ID,
|
||||
Name: tt.newName,
|
||||
}
|
||||
pkg, err := svc.Export(context.TODO(), ExportWithExistingResources(resToClone))
|
||||
template, err := svc.Export(context.TODO(), ExportWithExistingResources(resToClone))
|
||||
require.NoError(t, err)
|
||||
|
||||
newPkg := encodeAndDecode(t, pkg)
|
||||
newTemplate := encodeAndDecode(t, template)
|
||||
|
||||
sum := newPkg.Summary()
|
||||
sum := newTemplate.Summary()
|
||||
|
||||
tasks := sum.Tasks
|
||||
require.Len(t, tasks, 1)
|
||||
|
@ -2901,12 +2901,12 @@ func TestService(t *testing.T) {
|
|||
ID: 2,
|
||||
},
|
||||
}
|
||||
pkg, err := svc.Export(context.TODO(), ExportWithExistingResources(resourcesToClone...))
|
||||
template, err := svc.Export(context.TODO(), ExportWithExistingResources(resourcesToClone...))
|
||||
require.NoError(t, err)
|
||||
|
||||
newPkg := encodeAndDecode(t, pkg)
|
||||
newTemplate := encodeAndDecode(t, template)
|
||||
|
||||
sum := newPkg.Summary()
|
||||
sum := newTemplate.Summary()
|
||||
|
||||
tasks := sum.Tasks
|
||||
require.Len(t, tasks, len(resourcesToClone))
|
||||
|
@ -2946,12 +2946,12 @@ func TestService(t *testing.T) {
|
|||
ID: 2,
|
||||
},
|
||||
}
|
||||
pkg, err := svc.Export(context.TODO(), ExportWithExistingResources(resourcesToClone...))
|
||||
template, err := svc.Export(context.TODO(), ExportWithExistingResources(resourcesToClone...))
|
||||
require.NoError(t, err)
|
||||
|
||||
newPkg := encodeAndDecode(t, pkg)
|
||||
newTemplate := encodeAndDecode(t, template)
|
||||
|
||||
sum := newPkg.Summary()
|
||||
sum := newTemplate.Summary()
|
||||
|
||||
teles := sum.TelegrafConfigs
|
||||
sort.Slice(teles, func(i, j int) bool {
|
||||
|
@ -3046,12 +3046,12 @@ func TestService(t *testing.T) {
|
|||
ID: tt.expectedVar.ID,
|
||||
Name: tt.newName,
|
||||
}
|
||||
pkg, err := svc.Export(context.TODO(), ExportWithExistingResources(resToClone))
|
||||
template, err := svc.Export(context.TODO(), ExportWithExistingResources(resToClone))
|
||||
require.NoError(t, err)
|
||||
|
||||
newPkg := encodeAndDecode(t, pkg)
|
||||
newTemplate := encodeAndDecode(t, template)
|
||||
|
||||
newVars := newPkg.Summary().Variables
|
||||
newVars := newTemplate.Summary().Variables
|
||||
require.Len(t, newVars, 1)
|
||||
|
||||
actual := newVars[0]
|
||||
|
@ -3101,11 +3101,11 @@ func TestService(t *testing.T) {
|
|||
Kind: KindBucket,
|
||||
ID: expected.ID,
|
||||
}
|
||||
pkg, err := svc.Export(context.TODO(), ExportWithExistingResources(resToClone))
|
||||
template, err := svc.Export(context.TODO(), ExportWithExistingResources(resToClone))
|
||||
require.NoError(t, err)
|
||||
|
||||
newPkg := encodeAndDecode(t, pkg)
|
||||
sum := newPkg.Summary()
|
||||
newTemplate := encodeAndDecode(t, template)
|
||||
sum := newTemplate.Summary()
|
||||
|
||||
bkts := sum.Buckets
|
||||
require.Len(t, bkts, 1)
|
||||
|
@ -3149,11 +3149,11 @@ func TestService(t *testing.T) {
|
|||
ID: 20,
|
||||
},
|
||||
}
|
||||
pkg, err := svc.Export(context.TODO(), ExportWithExistingResources(resourcesToClone...))
|
||||
template, err := svc.Export(context.TODO(), ExportWithExistingResources(resourcesToClone...))
|
||||
require.NoError(t, err)
|
||||
|
||||
newPkg := encodeAndDecode(t, pkg)
|
||||
sum := newPkg.Summary()
|
||||
newTemplate := encodeAndDecode(t, template)
|
||||
sum := newTemplate.Summary()
|
||||
|
||||
bkts := sum.Buckets
|
||||
sort.Slice(bkts, func(i, j int) bool {
|
||||
|
@ -3191,12 +3191,12 @@ func TestService(t *testing.T) {
|
|||
Kind: KindLabel,
|
||||
ID: 1,
|
||||
}
|
||||
pkg, err := svc.Export(context.TODO(), ExportWithExistingResources(resToClone))
|
||||
template, err := svc.Export(context.TODO(), ExportWithExistingResources(resToClone))
|
||||
require.NoError(t, err)
|
||||
|
||||
newPkg := encodeAndDecode(t, pkg)
|
||||
newTemplate := encodeAndDecode(t, template)
|
||||
|
||||
labels := newPkg.Summary().Labels
|
||||
labels := newTemplate.Summary().Labels
|
||||
require.Len(t, labels, 1)
|
||||
assert.Equal(t, "label_1", labels[0].Name)
|
||||
})
|
||||
|
@ -3372,7 +3372,7 @@ func TestService(t *testing.T) {
|
|||
WithVariableSVC(varSVC),
|
||||
)
|
||||
|
||||
pkg, err := svc.Export(
|
||||
template, err := svc.Export(
|
||||
context.TODO(),
|
||||
ExportWithAllOrgResources(ExportByOrgIDOpt{
|
||||
OrgID: orgID,
|
||||
|
@ -3380,7 +3380,7 @@ func TestService(t *testing.T) {
|
|||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
summary := pkg.Summary()
|
||||
summary := template.Summary()
|
||||
bkts := summary.Buckets
|
||||
require.Len(t, bkts, 1)
|
||||
assert.Equal(t, "bucket", bkts[0].Name)
|
||||
|
@ -3404,7 +3404,7 @@ func TestService(t *testing.T) {
|
|||
rules := summary.NotificationRules
|
||||
require.Len(t, rules, 1)
|
||||
assert.Equal(t, expectedRule.Name, rules[0].Name)
|
||||
assert.NotEmpty(t, rules[0].EndpointPkgName)
|
||||
assert.NotEmpty(t, rules[0].EndpointMetaName)
|
||||
|
||||
require.Len(t, summary.Tasks, 1)
|
||||
task1 := summary.Tasks[0]
|
||||
|
|
|
@ -57,7 +57,7 @@ func (s *traceMW) UpdateStack(ctx context.Context, upd StackUpdate) (Stack, erro
|
|||
return s.next.UpdateStack(ctx, upd)
|
||||
}
|
||||
|
||||
func (s *traceMW) Export(ctx context.Context, opts ...ExportOptFn) (pkg *Pkg, err error) {
|
||||
func (s *traceMW) Export(ctx context.Context, opts ...ExportOptFn) (template *Template, err error) {
|
||||
span, ctx := tracing.StartSpanFromContext(ctx)
|
||||
defer span.Finish()
|
||||
return s.next.Export(ctx, opts...)
|
||||
|
|
Loading…
Reference in New Issue