feat(pkger): extend buckets to be able to define a bucket and rename it

this work is the first of making resources ALL unique by metadata.name. The
displayName is a means to rename an existing resource. This is all to support
pkger idempotency. The metadata.name field will be the unique identifier within
a pkg.
pull/17300/head
Johnny Steenbergen 2020-03-16 11:25:39 -07:00 committed by Johnny Steenbergen
parent fb300a1182
commit 952d7d7528
7 changed files with 183 additions and 53 deletions

View File

@ -155,7 +155,7 @@ func TestLauncher_Pkger(t *testing.T) {
bkts := sum.Buckets
require.Len(t, bkts, 1)
assert.Equal(t, "rucket_1", bkts[0].Name)
assert.Equal(t, "rucketeer", bkts[0].Name)
hasLabelAssociations(t, bkts[0].LabelAssociations, 2, "label_1", "label_2")
checks := sum.Checks
@ -269,7 +269,7 @@ spec:
if !exportAllSum {
assert.NotZero(t, bkts[0].ID)
}
assert.Equal(t, "rucket_1", bkts[0].Name)
assert.Equal(t, "rucketeer", bkts[0].Name)
hasLabelAssociations(t, bkts[0].LabelAssociations, 2, "label_1", "label_2")
checks := sum1.Checks
@ -617,7 +617,7 @@ spec:
bkts := newSum.Buckets
require.Len(t, bkts, 1)
assert.Zero(t, bkts[0].ID)
assert.Equal(t, "rucket_1", bkts[0].Name)
assert.Equal(t, "rucketeer", bkts[0].Name)
hasLabelAssociations(t, bkts[0].LabelAssociations, 2, "label_1", "label_2")
checks := newSum.Checks
@ -995,6 +995,7 @@ kind: Bucket
metadata:
name: rucket_1
spec:
name: rucketeer
associations:
- kind: Label
name: label_1

View File

@ -773,12 +773,14 @@ const (
fieldLevel = "level"
fieldMin = "min"
fieldMax = "max"
fieldMetadata = "metadata"
fieldName = "name"
fieldOffset = "offset"
fieldOperator = "operator"
fieldPrefix = "prefix"
fieldQuery = "query"
fieldSuffix = "suffix"
fieldSpec = "spec"
fieldStatus = "status"
fieldType = "type"
fieldValue = "value"
@ -789,11 +791,14 @@ const (
fieldBucketRetentionRules = "retentionRules"
)
const bucketNameMinLength = 2
type bucket struct {
id influxdb.ID
OrgID influxdb.ID
Description string
name *references
displayName *references
RetentionRules retentionRules
labels sortedLabels
@ -815,6 +820,9 @@ func (b *bucket) Labels() []*label {
}
func (b *bucket) Name() string {
if displayName := b.displayName.String(); displayName != "" {
return displayName
}
return b.name.String()
}
@ -838,7 +846,20 @@ func (b *bucket) summarize() SummaryBucket {
}
func (b *bucket) valid() []validationErr {
return b.RetentionRules.valid()
var vErrs []validationErr
if len(b.Name()) < 2 {
vErrs = append(vErrs, validationErr{
Field: fieldName,
Msg: fmt.Sprintf("must be a string of at least %d chars in length", bucketNameMinLength),
})
}
vErrs = append(vErrs, b.RetentionRules.valid()...)
if len(vErrs) == 0 {
return nil
}
return []validationErr{
objectValidationErr(fieldSpec, vErrs...),
}
}
func (b *bucket) shouldApply() bool {
@ -2765,6 +2786,9 @@ func (r *references) hasValue() bool {
}
func (r *references) String() string {
if r == nil {
return ""
}
if v := r.StringVal(); v != "" {
return v
}

View File

@ -218,7 +218,7 @@ type Object struct {
// Name returns the name of the kind.
func (k Object) Name() string {
return k.Metadata.references("name").String()
return k.Metadata.references(fieldName).String()
}
// Pkg is the model for a package. The resources are more generic that one might
@ -440,7 +440,7 @@ func (p *Pkg) buckets() []*bucket {
buckets = append(buckets, b)
}
sort.Slice(buckets, func(i, j int) bool { return buckets[i].name.String() < buckets[j].name.String() })
sort.Slice(buckets, func(i, j int) bool { return buckets[i].Name() < buckets[j].Name() })
return buckets
}
@ -631,17 +631,37 @@ func (p *Pkg) graphResources() error {
func (p *Pkg) graphBuckets() *parseErr {
p.mBuckets = make(map[string]*bucket)
return p.eachResource(KindBucket, 2, func(o Object) []validationErr {
uniqNames := make(map[string]bool)
return p.eachResource(KindBucket, bucketNameMinLength, func(o Object) []validationErr {
nameRef := p.getRefWithKnownEnvs(o.Metadata, fieldName)
if _, ok := p.mBuckets[nameRef.String()]; ok {
return []validationErr{{
Field: fieldName,
Msg: "duplicate name: " + nameRef.String(),
}}
return []validationErr{
objectValidationErr(fieldMetadata, validationErr{
Field: fieldName,
Msg: "duplicate name: " + nameRef.String(),
}),
}
}
displayNameRef := p.getRefWithKnownEnvs(o.Spec, fieldName)
name := nameRef.String()
if displayName := displayNameRef.String(); displayName != "" {
name = displayName
}
if uniqNames[name] {
return []validationErr{
objectValidationErr(fieldSpec, validationErr{
Field: fieldName,
Msg: "duplicate name: " + nameRef.String(),
}),
}
}
uniqNames[name] = true
bkt := &bucket{
name: nameRef,
displayName: displayNameRef,
Description: o.Spec.stringShort(fieldDescription),
}
if rules, ok := o.Spec[fieldBucketRetentionRules].(retentionRules); ok {
@ -654,7 +674,7 @@ func (p *Pkg) graphBuckets() *parseErr {
})
}
}
p.setRefs(bkt.name)
p.setRefs(bkt.name, bkt.displayName)
failures := p.parseNestedLabels(o.Spec, func(l *label) error {
bkt.labels = append(bkt.labels, l)
@ -1040,10 +1060,10 @@ func (p *Pkg) eachResource(resourceKind Kind, minNameLen int, fn func(o Object)
Kind: k.Type.String(),
Idx: intPtr(i),
ValidationErrs: []validationErr{
{
Field: "name",
objectValidationErr(fieldMetadata, validationErr{
Field: fieldName,
Msg: fmt.Sprintf("must be a string of at least %d chars in length", minNameLen),
},
}),
},
})
continue
@ -1732,6 +1752,13 @@ func IsParseErr(err error) bool {
return IsParseErr(iErr.Err)
}
func objectValidationErr(field string, vErrs ...validationErr) validationErr {
return validationErr{
Field: field,
Nested: vErrs,
}
}
func normStr(s string) string {
return strings.TrimSpace(strings.ToLower(s))
}

View File

@ -22,10 +22,18 @@ func TestParse(t *testing.T) {
t.Run("with valid bucket pkg should be valid", func(t *testing.T) {
testfileRunner(t, "testdata/bucket", func(t *testing.T, pkg *Pkg) {
buckets := pkg.Summary().Buckets
require.Len(t, buckets, 1)
require.Len(t, buckets, 2)
actual := buckets[0]
expectedBucket := SummaryBucket{
Name: "display name",
Description: "bucket 2 description",
LabelAssociations: []SummaryLabel{},
}
assert.Equal(t, expectedBucket, actual)
actual = buckets[1]
expectedBucket = SummaryBucket{
Name: "rucket_11",
Description: "bucket 1 description",
RetentionPeriod: time.Hour,
@ -40,7 +48,7 @@ func TestParse(t *testing.T) {
{
name: "missing name",
validationErrs: 1,
valFields: []string{fieldName},
valFields: []string{fieldMetadata, fieldName},
pkgStr: `apiVersion: influxdata.com/v2alpha1
kind: Bucket
metadata:
@ -50,7 +58,7 @@ spec:
{
name: "mixed valid and missing name",
validationErrs: 1,
valFields: []string{fieldName},
valFields: []string{fieldMetadata, fieldName},
pkgStr: `apiVersion: influxdata.com/v2alpha1
kind: Bucket
metadata:
@ -66,7 +74,7 @@ spec:
name: "mixed valid and multiple bad names",
resourceErrs: 2,
validationErrs: 1,
valFields: []string{fieldName},
valFields: []string{fieldMetadata, fieldName},
pkgStr: `apiVersion: influxdata.com/v2alpha1
kind: Bucket
metadata:
@ -87,7 +95,7 @@ spec:
name: "duplicate bucket names",
resourceErrs: 1,
validationErrs: 1,
valFields: []string{fieldName},
valFields: []string{fieldMetadata, fieldName},
pkgStr: `apiVersion: influxdata.com/v2alpha1
kind: Bucket
metadata:
@ -97,6 +105,42 @@ apiVersion: influxdata.com/v2alpha1
kind: Bucket
metadata:
name: valid name
`,
},
{
name: "duplicate bucket meta name and display name",
resourceErrs: 1,
validationErrs: 1,
valFields: []string{fieldSpec, fieldName},
pkgStr: `apiVersion: influxdata.com/v2alpha1
kind: Bucket
metadata:
name: rucket_1
---
apiVersion: influxdata.com/v2alpha1
kind: Bucket
metadata:
name: valid name
spec:
name: rucket_1
`,
},
{
name: "display name too short",
resourceErrs: 1,
validationErrs: 1,
valFields: []string{fieldSpec, fieldName},
pkgStr: `apiVersion: influxdata.com/v2alpha1
kind: Bucket
metadata:
name: rucket_1
---
apiVersion: influxdata.com/v2alpha1
kind: Bucket
metadata:
name: invalid name
spec:
name: f
`,
},
}
@ -134,7 +178,7 @@ metadata:
{
name: "missing name",
validationErrs: 1,
valFields: []string{"name"},
valFields: []string{fieldMetadata, fieldName},
pkgStr: `apiVersion: influxdata.com/v2alpha1
kind: Label
metadata:
@ -144,7 +188,7 @@ spec:
{
name: "mixed valid and missing name",
validationErrs: 1,
valFields: []string{"name"},
valFields: []string{fieldMetadata, fieldName},
pkgStr: `apiVersion: influxdata.com/v2alpha1
kind: Label
metadata:
@ -162,7 +206,7 @@ spec:
name: "multiple labels with missing name",
resourceErrs: 2,
validationErrs: 1,
valFields: []string{"name"},
valFields: []string{fieldMetadata, fieldName},
pkgStr: `apiVersion: influxdata.com/v2alpha1
kind: Label
---
@ -2873,7 +2917,7 @@ spec:
resErr: testPkgResourceError{
name: "missing name",
validationErrs: 1,
valFields: []string{fieldName},
valFields: []string{fieldMetadata, fieldName},
pkgStr: `apiVersion: influxdata.com/v2alpha1
kind: NotificationRule
metadata:
@ -3124,7 +3168,7 @@ spec:
resErr: testPkgResourceError{
name: "missing name",
validationErrs: 1,
valFields: []string{fieldName},
valFields: []string{fieldMetadata, fieldName},
pkgStr: `apiVersion: influxdata.com/v2alpha1
kind: Task
metadata:
@ -3348,7 +3392,7 @@ spec:
{
name: "name missing",
validationErrs: 1,
valFields: []string{"name"},
valFields: []string{fieldMetadata, fieldName},
pkgStr: `apiVersion: influxdata.com/v2alpha1
kind: Variable
metadata:

View File

@ -59,6 +59,9 @@ func TestService(t *testing.T) {
testfileRunner(t, "testdata/bucket.yml", func(t *testing.T, pkg *Pkg) {
fakeBktSVC := mock.NewBucketService()
fakeBktSVC.FindBucketByNameFn = func(_ context.Context, orgID influxdb.ID, name string) (*influxdb.Bucket, error) {
if name != "rucket_11" {
return nil, errors.New("not found")
}
return &influxdb.Bucket{
ID: influxdb.ID(1),
OrgID: orgID,
@ -72,7 +75,7 @@ func TestService(t *testing.T) {
_, diff, err := svc.DryRun(context.TODO(), influxdb.ID(100), 0, pkg)
require.NoError(t, err)
require.Len(t, diff.Buckets, 1)
require.Len(t, diff.Buckets, 2)
expected := DiffBucket{
ID: SafeID(1),
@ -86,7 +89,7 @@ func TestService(t *testing.T) {
RetentionRules: retentionRules{newRetentionRule(time.Hour)},
},
}
assert.Equal(t, expected, diff.Buckets[0])
assert.Contains(t, diff.Buckets, expected)
})
})
@ -101,7 +104,7 @@ func TestService(t *testing.T) {
_, diff, err := svc.DryRun(context.TODO(), influxdb.ID(100), 0, pkg)
require.NoError(t, err)
require.Len(t, diff.Buckets, 1)
require.Len(t, diff.Buckets, 2)
expected := DiffBucket{
Name: "rucket_11",
@ -110,7 +113,7 @@ func TestService(t *testing.T) {
RetentionRules: retentionRules{newRetentionRule(time.Hour)},
},
}
assert.Equal(t, expected, diff.Buckets[0])
assert.Contains(t, diff.Buckets, expected)
})
})
})
@ -442,30 +445,38 @@ func TestService(t *testing.T) {
sum, err := svc.Apply(context.TODO(), orgID, 0, pkg)
require.NoError(t, err)
require.Len(t, sum.Buckets, 1)
buck1 := sum.Buckets[0]
assert.Equal(t, SafeID(time.Hour), buck1.ID)
assert.Equal(t, SafeID(orgID), buck1.OrgID)
assert.Equal(t, "rucket_11", buck1.Name)
assert.Equal(t, time.Hour, buck1.RetentionPeriod)
assert.Equal(t, "bucket 1 description", buck1.Description)
require.Len(t, sum.Buckets, 2)
expected := SummaryBucket{
ID: SafeID(time.Hour),
OrgID: SafeID(orgID),
Name: "rucket_11",
Description: "bucket 1 description",
RetentionPeriod: time.Hour,
LabelAssociations: []SummaryLabel{},
}
assert.Contains(t, sum.Buckets, expected)
})
})
t.Run("will not apply bucket if no changes to be applied", func(t *testing.T) {
testfileRunner(t, "testdata/bucket", func(t *testing.T, pkg *Pkg) {
testfileRunner(t, "testdata/bucket.yml", func(t *testing.T, pkg *Pkg) {
orgID := influxdb.ID(9000)
pkg.isVerified = true
pkgBkt := pkg.mBuckets["rucket_11"]
pkgBkt.existing = &influxdb.Bucket{
// makes all pkg changes same as they are on thes existing bucket
ID: influxdb.ID(3),
OrgID: orgID,
Name: pkgBkt.Name(),
Description: pkgBkt.Description,
RetentionPeriod: pkgBkt.RetentionRules.RP(),
stubExisting := func(name string, id influxdb.ID) {
pkgBkt := pkg.mBuckets[name]
pkgBkt.existing = &influxdb.Bucket{
// makes all pkg changes same as they are on thes existing bucket
ID: id,
OrgID: orgID,
Name: pkgBkt.Name(),
Description: pkgBkt.Description,
RetentionPeriod: pkgBkt.RetentionRules.RP(),
}
}
stubExisting("rucket_11", 3)
stubExisting("rucket_222", 4)
fakeBktSVC := mock.NewBucketService()
fakeBktSVC.UpdateBucketFn = func(_ context.Context, id influxdb.ID, upd influxdb.BucketUpdate) (*influxdb.Bucket, error) {
@ -477,13 +488,17 @@ func TestService(t *testing.T) {
sum, err := svc.Apply(context.TODO(), orgID, 0, pkg)
require.NoError(t, err)
require.Len(t, sum.Buckets, 1)
buck1 := sum.Buckets[0]
assert.Equal(t, SafeID(3), buck1.ID)
assert.Equal(t, SafeID(orgID), buck1.OrgID)
assert.Equal(t, "rucket_11", buck1.Name)
assert.Equal(t, time.Hour, buck1.RetentionPeriod)
assert.Equal(t, "bucket 1 description", buck1.Description)
require.Len(t, sum.Buckets, 2)
expected := SummaryBucket{
ID: SafeID(3),
OrgID: SafeID(orgID),
Name: "rucket_11",
Description: "bucket 1 description",
RetentionPeriod: time.Hour,
LabelAssociations: []SummaryLabel{},
}
assert.Contains(t, sum.Buckets, expected)
assert.Zero(t, fakeBktSVC.CreateBucketCalls.Count())
assert.Zero(t, fakeBktSVC.UpdateBucketCalls.Count())
})

View File

@ -14,5 +14,16 @@
}
]
}
},
{
"apiVersion": "influxdata.com/v2alpha1",
"kind": "Bucket",
"metadata": {
"name": "rucket_22"
},
"spec": {
"name": "display name",
"description": "bucket 2 description"
}
}
]

View File

@ -7,3 +7,11 @@ spec:
retentionRules:
- type: expire
everySeconds: 3600
---
apiVersion: influxdata.com/v2alpha1
kind: Bucket
metadata:
name: rucket_222
spec:
name: display name
description: bucket 2 description