If includeClusterResources is nil/auto, pull in necessary CRDs (#1831)
Related issue: https://github.com/heptio/velero/issues/1830 This accomplishes everything that's needed, although there might be room for improvement in avoiding a GET call for matching CRDs for each resource backed up. An alternative could be a single call to get all CRDs prior to iterating over resources and passing this into the backupResource function. Signed-off-by: Scott Seago <sseago@redhat.com>pull/1976/head
parent
09a1c4dac7
commit
970f17c1ef
|
@ -0,0 +1 @@
|
|||
If includeClusterResources is nil/auto, pull in necessary CRDs in backupResource
|
|
@ -597,6 +597,164 @@ func TestBackupResourceFiltering(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
// TestCRDInclusion tests whether related CRDs are included, based on
|
||||
// backed-up resources and "include cluster resources" flag, and
|
||||
// verifies that the set of items written to the backup tarball are
|
||||
// correct. Validation is done by looking at the names of the files in
|
||||
// the backup tarball; the contents of the files are not checked.
|
||||
func TestCRDInclusion(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
backup *velerov1.Backup
|
||||
apiResources []*test.APIResource
|
||||
want []string
|
||||
}{
|
||||
{
|
||||
name: "include cluster resources=auto includes all CRDs when backing up all namespaces",
|
||||
backup: defaultBackup().
|
||||
Result(),
|
||||
apiResources: []*test.APIResource{
|
||||
test.CRDs(
|
||||
builder.ForCustomResourceDefinition("backups.velero.io").Result(),
|
||||
builder.ForCustomResourceDefinition("volumesnapshotlocations.velero.io").Result(),
|
||||
builder.ForCustomResourceDefinition("test.velero.io").Result(),
|
||||
),
|
||||
test.VSLs(
|
||||
builder.ForVolumeSnapshotLocation("foo", "vsl-1").Result(),
|
||||
),
|
||||
},
|
||||
want: []string{
|
||||
"resources/customresourcedefinitions.apiextensions.k8s.io/cluster/backups.velero.io.json",
|
||||
"resources/customresourcedefinitions.apiextensions.k8s.io/cluster/volumesnapshotlocations.velero.io.json",
|
||||
"resources/customresourcedefinitions.apiextensions.k8s.io/cluster/test.velero.io.json",
|
||||
"resources/volumesnapshotlocations.velero.io/namespaces/foo/vsl-1.json",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "include cluster resources=false excludes all CRDs when backing up all namespaces",
|
||||
backup: defaultBackup().
|
||||
IncludeClusterResources(false).
|
||||
Result(),
|
||||
apiResources: []*test.APIResource{
|
||||
test.CRDs(
|
||||
builder.ForCustomResourceDefinition("backups.velero.io").Result(),
|
||||
builder.ForCustomResourceDefinition("volumesnapshotlocations.velero.io").Result(),
|
||||
builder.ForCustomResourceDefinition("test.velero.io").Result(),
|
||||
),
|
||||
test.VSLs(
|
||||
builder.ForVolumeSnapshotLocation("foo", "vsl-1").Result(),
|
||||
),
|
||||
},
|
||||
want: []string{
|
||||
"resources/volumesnapshotlocations.velero.io/namespaces/foo/vsl-1.json",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "include cluster resources=true includes all CRDs when backing up all namespaces",
|
||||
backup: defaultBackup().
|
||||
IncludeClusterResources(true).
|
||||
Result(),
|
||||
apiResources: []*test.APIResource{
|
||||
test.CRDs(
|
||||
builder.ForCustomResourceDefinition("backups.velero.io").Result(),
|
||||
builder.ForCustomResourceDefinition("volumesnapshotlocations.velero.io").Result(),
|
||||
builder.ForCustomResourceDefinition("test.velero.io").Result(),
|
||||
),
|
||||
test.VSLs(
|
||||
builder.ForVolumeSnapshotLocation("foo", "vsl-1").Result(),
|
||||
),
|
||||
},
|
||||
want: []string{
|
||||
"resources/customresourcedefinitions.apiextensions.k8s.io/cluster/backups.velero.io.json",
|
||||
"resources/customresourcedefinitions.apiextensions.k8s.io/cluster/volumesnapshotlocations.velero.io.json",
|
||||
"resources/customresourcedefinitions.apiextensions.k8s.io/cluster/test.velero.io.json",
|
||||
"resources/volumesnapshotlocations.velero.io/namespaces/foo/vsl-1.json",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "include cluster resources=auto includes CRDs with CRs when backing up selected namespaces",
|
||||
backup: defaultBackup().
|
||||
IncludedNamespaces("foo").
|
||||
Result(),
|
||||
apiResources: []*test.APIResource{
|
||||
test.CRDs(
|
||||
builder.ForCustomResourceDefinition("backups.velero.io").Result(),
|
||||
builder.ForCustomResourceDefinition("volumesnapshotlocations.velero.io").Result(),
|
||||
builder.ForCustomResourceDefinition("test.velero.io").Result(),
|
||||
),
|
||||
test.VSLs(
|
||||
builder.ForVolumeSnapshotLocation("foo", "vsl-1").Result(),
|
||||
),
|
||||
},
|
||||
want: []string{
|
||||
"resources/customresourcedefinitions.apiextensions.k8s.io/cluster/volumesnapshotlocations.velero.io.json",
|
||||
"resources/volumesnapshotlocations.velero.io/namespaces/foo/vsl-1.json",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "include cluster resources=false excludes all CRDs when backing up selected namespaces",
|
||||
backup: defaultBackup().
|
||||
IncludeClusterResources(false).
|
||||
IncludedNamespaces("foo").
|
||||
Result(),
|
||||
apiResources: []*test.APIResource{
|
||||
test.CRDs(
|
||||
builder.ForCustomResourceDefinition("backups.velero.io").Result(),
|
||||
builder.ForCustomResourceDefinition("volumesnapshotlocations.velero.io").Result(),
|
||||
builder.ForCustomResourceDefinition("test.velero.io").Result(),
|
||||
),
|
||||
test.VSLs(
|
||||
builder.ForVolumeSnapshotLocation("foo", "vsl-1").Result(),
|
||||
),
|
||||
},
|
||||
want: []string{
|
||||
"resources/volumesnapshotlocations.velero.io/namespaces/foo/vsl-1.json",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "include cluster resources=true includes all CRDs when backing up selected namespaces",
|
||||
backup: defaultBackup().
|
||||
IncludeClusterResources(true).
|
||||
IncludedNamespaces("foo").
|
||||
Result(),
|
||||
apiResources: []*test.APIResource{
|
||||
test.CRDs(
|
||||
builder.ForCustomResourceDefinition("backups.velero.io").Result(),
|
||||
builder.ForCustomResourceDefinition("volumesnapshotlocations.velero.io").Result(),
|
||||
builder.ForCustomResourceDefinition("test.velero.io").Result(),
|
||||
),
|
||||
test.VSLs(
|
||||
builder.ForVolumeSnapshotLocation("foo", "vsl-1").Result(),
|
||||
),
|
||||
},
|
||||
want: []string{
|
||||
"resources/customresourcedefinitions.apiextensions.k8s.io/cluster/backups.velero.io.json",
|
||||
"resources/customresourcedefinitions.apiextensions.k8s.io/cluster/volumesnapshotlocations.velero.io.json",
|
||||
"resources/customresourcedefinitions.apiextensions.k8s.io/cluster/test.velero.io.json",
|
||||
"resources/volumesnapshotlocations.velero.io/namespaces/foo/vsl-1.json",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
var (
|
||||
h = newHarness(t)
|
||||
req = &Request{Backup: tc.backup}
|
||||
backupFile = bytes.NewBuffer([]byte{})
|
||||
)
|
||||
|
||||
for _, resource := range tc.apiResources {
|
||||
h.addItems(t, resource)
|
||||
}
|
||||
|
||||
h.backupper.Backup(h.log, req, backupFile, nil, nil)
|
||||
|
||||
assertTarballContents(t, backupFile, append(tc.want, "metadata/version")...)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestBackupResourceCohabitation runs backups for resources that "cohabitate",
|
||||
// meaning they exist in multiple API groups (e.g. deployments.extensions and
|
||||
// deployments.apps), and verifies that only one copy of each resource is backed
|
||||
|
|
|
@ -90,7 +90,7 @@ func (f *defaultItemBackupperFactory) newItemBackupper(
|
|||
}
|
||||
|
||||
type ItemBackupper interface {
|
||||
backupItem(logger logrus.FieldLogger, obj runtime.Unstructured, groupResource schema.GroupResource) error
|
||||
backupItem(logger logrus.FieldLogger, obj runtime.Unstructured, groupResource schema.GroupResource) (bool, error)
|
||||
}
|
||||
|
||||
type defaultItemBackupper struct {
|
||||
|
@ -109,10 +109,12 @@ type defaultItemBackupper struct {
|
|||
|
||||
// backupItem backs up an individual item to tarWriter. The item may be excluded based on the
|
||||
// namespaces IncludesExcludes list.
|
||||
func (ib *defaultItemBackupper) backupItem(logger logrus.FieldLogger, obj runtime.Unstructured, groupResource schema.GroupResource) error {
|
||||
// In addition to the error return, backupItem also returns a bool indicating whether the item
|
||||
// was actually backed up.
|
||||
func (ib *defaultItemBackupper) backupItem(logger logrus.FieldLogger, obj runtime.Unstructured, groupResource schema.GroupResource) (bool, error) {
|
||||
metadata, err := meta.Accessor(obj)
|
||||
if err != nil {
|
||||
return err
|
||||
return false, err
|
||||
}
|
||||
|
||||
namespace := metadata.GetNamespace()
|
||||
|
@ -124,31 +126,31 @@ func (ib *defaultItemBackupper) backupItem(logger logrus.FieldLogger, obj runtim
|
|||
|
||||
if metadata.GetLabels()["velero.io/exclude-from-backup"] == "true" {
|
||||
log.Info("Excluding item because it has label velero.io/exclude-from-backup=true")
|
||||
return nil
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// NOTE: we have to re-check namespace & resource includes/excludes because it's possible that
|
||||
// backupItem can be invoked by a custom action.
|
||||
if namespace != "" && !ib.backupRequest.NamespaceIncludesExcludes.ShouldInclude(namespace) {
|
||||
log.Info("Excluding item because namespace is excluded")
|
||||
return nil
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// NOTE: we specifically allow namespaces to be backed up even if IncludeClusterResources is
|
||||
// false.
|
||||
if namespace == "" && groupResource != kuberesource.Namespaces && ib.backupRequest.Spec.IncludeClusterResources != nil && !*ib.backupRequest.Spec.IncludeClusterResources {
|
||||
log.Info("Excluding item because resource is cluster-scoped and backup.spec.includeClusterResources is false")
|
||||
return nil
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if !ib.backupRequest.ResourceIncludesExcludes.ShouldInclude(groupResource.String()) {
|
||||
log.Info("Excluding item because resource is excluded")
|
||||
return nil
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if metadata.GetDeletionTimestamp() != nil {
|
||||
log.Info("Skipping item because it's being deleted.")
|
||||
return nil
|
||||
return false, nil
|
||||
}
|
||||
|
||||
key := itemKey{
|
||||
|
@ -159,7 +161,8 @@ func (ib *defaultItemBackupper) backupItem(logger logrus.FieldLogger, obj runtim
|
|||
|
||||
if _, exists := ib.backupRequest.BackedUpItems[key]; exists {
|
||||
log.Info("Skipping item because it's already been backed up.")
|
||||
return nil
|
||||
// returning true since this item *is* in the backup, even though we're not backing it up here
|
||||
return true, nil
|
||||
}
|
||||
ib.backupRequest.BackedUpItems[key] = struct{}{}
|
||||
|
||||
|
@ -167,7 +170,7 @@ func (ib *defaultItemBackupper) backupItem(logger logrus.FieldLogger, obj runtim
|
|||
|
||||
log.Debug("Executing pre hooks")
|
||||
if err := ib.itemHookHandler.handleHooks(log, groupResource, obj, ib.backupRequest.ResourceHooks, hookPhasePre); err != nil {
|
||||
return err
|
||||
return false, err
|
||||
}
|
||||
|
||||
var (
|
||||
|
@ -216,11 +219,11 @@ func (ib *defaultItemBackupper) backupItem(logger logrus.FieldLogger, obj runtim
|
|||
backupErrs = append(backupErrs, err)
|
||||
}
|
||||
|
||||
return kubeerrs.NewAggregate(backupErrs)
|
||||
return false, kubeerrs.NewAggregate(backupErrs)
|
||||
}
|
||||
obj = updatedObj
|
||||
if metadata, err = meta.Accessor(obj); err != nil {
|
||||
return errors.WithStack(err)
|
||||
return false, errors.WithStack(err)
|
||||
}
|
||||
// update name and namespace in case they were modified in an action
|
||||
name = metadata.GetName()
|
||||
|
@ -247,7 +250,7 @@ func (ib *defaultItemBackupper) backupItem(logger logrus.FieldLogger, obj runtim
|
|||
}
|
||||
|
||||
if len(backupErrs) != 0 {
|
||||
return kubeerrs.NewAggregate(backupErrs)
|
||||
return false, kubeerrs.NewAggregate(backupErrs)
|
||||
}
|
||||
|
||||
var filePath string
|
||||
|
@ -259,7 +262,7 @@ func (ib *defaultItemBackupper) backupItem(logger logrus.FieldLogger, obj runtim
|
|||
|
||||
itemBytes, err := json.Marshal(obj.UnstructuredContent())
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
return false, errors.WithStack(err)
|
||||
}
|
||||
|
||||
hdr := &tar.Header{
|
||||
|
@ -271,14 +274,14 @@ func (ib *defaultItemBackupper) backupItem(logger logrus.FieldLogger, obj runtim
|
|||
}
|
||||
|
||||
if err := ib.tarWriter.WriteHeader(hdr); err != nil {
|
||||
return errors.WithStack(err)
|
||||
return false, errors.WithStack(err)
|
||||
}
|
||||
|
||||
if _, err := ib.tarWriter.Write(itemBytes); err != nil {
|
||||
return errors.WithStack(err)
|
||||
return false, errors.WithStack(err)
|
||||
}
|
||||
|
||||
return nil
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// backupPodVolumes triggers restic backups of the specified pod volumes, and returns a list of PodVolumeBackups
|
||||
|
@ -348,7 +351,7 @@ func (ib *defaultItemBackupper) executeActions(
|
|||
return nil, errors.WithStack(err)
|
||||
}
|
||||
|
||||
if err = ib.additionalItemBackupper.backupItem(log, additionalItem, gvr.GroupResource()); err != nil {
|
||||
if _, err = ib.additionalItemBackupper.backupItem(log, additionalItem, gvr.GroupResource()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,6 +19,8 @@ package backup
|
|||
import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
apiextv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
|
@ -193,7 +195,7 @@ func (rb *defaultResourceBackupper) backupResource(group *metav1.APIResourceList
|
|||
continue
|
||||
}
|
||||
|
||||
if err := itemBackupper.backupItem(log, unstructured, gr); err != nil {
|
||||
if _, err := itemBackupper.backupItem(log, unstructured, gr); err != nil {
|
||||
log.WithError(errors.WithStack(err)).Error("Error backing up namespace")
|
||||
}
|
||||
}
|
||||
|
@ -207,6 +209,7 @@ func (rb *defaultResourceBackupper) backupResource(group *metav1.APIResourceList
|
|||
namespacesToList = []string{""}
|
||||
}
|
||||
|
||||
backedUpItem := false
|
||||
for _, namespace := range namespacesToList {
|
||||
log = log.WithField("namespace", namespace)
|
||||
|
||||
|
@ -243,39 +246,87 @@ func (rb *defaultResourceBackupper) backupResource(group *metav1.APIResourceList
|
|||
log.Errorf("Unexpected type %T", item)
|
||||
continue
|
||||
}
|
||||
|
||||
metadata, err := meta.Accessor(unstructured)
|
||||
if err != nil {
|
||||
log.WithError(errors.WithStack(err)).Error("Error getting a metadata accessor")
|
||||
continue
|
||||
}
|
||||
|
||||
if gr == kuberesource.Namespaces && !rb.backupRequest.NamespaceIncludesExcludes.ShouldInclude(metadata.GetName()) {
|
||||
log.WithField("name", metadata.GetName()).Info("Skipping namespace because it's excluded")
|
||||
continue
|
||||
}
|
||||
|
||||
err = itemBackupper.backupItem(log, unstructured, gr)
|
||||
if aggregate, ok := err.(kubeerrs.Aggregate); ok {
|
||||
log.WithField("name", metadata.GetName()).Infof("%d errors encountered backup up item", len(aggregate.Errors()))
|
||||
// log each error separately so we get error location info in the log, and an
|
||||
// accurate count of errors
|
||||
for _, err = range aggregate.Errors() {
|
||||
log.WithError(err).WithField("name", metadata.GetName()).Error("Error backing up item")
|
||||
}
|
||||
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("name", metadata.GetName()).Error("Error backing up item")
|
||||
continue
|
||||
if rb.backupItem(log, gr, itemBackupper, unstructured) {
|
||||
backedUpItem = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// back up CRD for resource if found. We should only need to do this if we've backed up at least
|
||||
// one item and IncludeClusterResources is nil. If IncludeClusterResources is false
|
||||
// we don't want to back it up, and if it's true it will already be included.
|
||||
if backedUpItem && rb.backupRequest.Spec.IncludeClusterResources == nil {
|
||||
rb.backupCRD(log, gr, itemBackupper)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rb *defaultResourceBackupper) backupItem(
|
||||
log logrus.FieldLogger,
|
||||
gr schema.GroupResource,
|
||||
itemBackupper ItemBackupper,
|
||||
unstructured runtime.Unstructured,
|
||||
) bool {
|
||||
metadata, err := meta.Accessor(unstructured)
|
||||
if err != nil {
|
||||
log.WithError(errors.WithStack(err)).Error("Error getting a metadata accessor")
|
||||
return false
|
||||
}
|
||||
|
||||
if gr == kuberesource.Namespaces && !rb.backupRequest.NamespaceIncludesExcludes.ShouldInclude(metadata.GetName()) {
|
||||
log.WithField("name", metadata.GetName()).Info("Skipping namespace because it's excluded")
|
||||
return false
|
||||
}
|
||||
|
||||
backedUpItem, err := itemBackupper.backupItem(log, unstructured, gr)
|
||||
if aggregate, ok := err.(kubeerrs.Aggregate); ok {
|
||||
log.WithField("name", metadata.GetName()).Infof("%d errors encountered backup up item", len(aggregate.Errors()))
|
||||
// log each error separately so we get error location info in the log, and an
|
||||
// accurate count of errors
|
||||
for _, err = range aggregate.Errors() {
|
||||
log.WithError(err).WithField("name", metadata.GetName()).Error("Error backing up item")
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("name", metadata.GetName()).Error("Error backing up item")
|
||||
return false
|
||||
}
|
||||
return backedUpItem
|
||||
}
|
||||
|
||||
// Adds CRD to the backup if one is found corresponding to this resource
|
||||
func (rb *defaultResourceBackupper) backupCRD(
|
||||
log logrus.FieldLogger,
|
||||
gr schema.GroupResource,
|
||||
itemBackupper ItemBackupper,
|
||||
) {
|
||||
crdGr := schema.GroupResource{Group: apiextv1beta1.GroupName, Resource: "customresourcedefinitions"}
|
||||
crdClient, err := rb.dynamicFactory.ClientForGroupVersionResource(apiextv1beta1.SchemeGroupVersion,
|
||||
metav1.APIResource{
|
||||
Name: "customresourcedefinitions",
|
||||
Namespaced: false,
|
||||
},
|
||||
"",
|
||||
)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Error getting dynamic client for CRDs")
|
||||
return
|
||||
}
|
||||
|
||||
unstructured, err := crdClient.Get(gr.String(), metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if !apierrors.IsNotFound(err) {
|
||||
log.WithError(errors.WithStack(err)).Error("Error getting CRD")
|
||||
}
|
||||
return
|
||||
}
|
||||
log.Infof("Found associated CRD to add to backup %d", gr.String())
|
||||
_ = rb.backupItem(log, crdGr, itemBackupper, unstructured)
|
||||
}
|
||||
|
||||
// getNamespacesToList examines ie and resolves the includes and excludes to a full list of
|
||||
// namespaces to list. If ie is nil or it includes *, the result is just "" (list across all
|
||||
// namespaces). Otherwise, the result is a list of every included namespace minus all excluded ones.
|
||||
|
|
|
@ -0,0 +1,56 @@
|
|||
/*
|
||||
Copyright 2019 the Velero contributors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package builder
|
||||
|
||||
import (
|
||||
apiextv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// CustomResourceDefinitionBuilder builds CustomResourceDefinition objects.
|
||||
type CustomResourceDefinitionBuilder struct {
|
||||
object *apiextv1beta1.CustomResourceDefinition
|
||||
}
|
||||
|
||||
// ForCustomResourceDefinition is the constructor for a CustomResourceDefinitionBuilder.
|
||||
func ForCustomResourceDefinition(name string) *CustomResourceDefinitionBuilder {
|
||||
return &CustomResourceDefinitionBuilder{
|
||||
object: &apiextv1beta1.CustomResourceDefinition{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: apiextv1beta1.SchemeGroupVersion.String(),
|
||||
Kind: "CustomResourceDefinition",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Result returns the built CustomResourceDefinition.
|
||||
func (b *CustomResourceDefinitionBuilder) Result() *apiextv1beta1.CustomResourceDefinition {
|
||||
return b.object
|
||||
}
|
||||
|
||||
// ObjectMeta applies functional options to the Namespace's ObjectMeta.
|
||||
func (b *CustomResourceDefinitionBuilder) ObjectMeta(opts ...ObjectMetaOpt) *CustomResourceDefinitionBuilder {
|
||||
for _, opt := range opts {
|
||||
opt(b.object)
|
||||
}
|
||||
|
||||
return b
|
||||
}
|
|
@ -129,3 +129,24 @@ func ServiceAccounts(items ...metav1.Object) *APIResource {
|
|||
Items: items,
|
||||
}
|
||||
}
|
||||
|
||||
func CRDs(items ...metav1.Object) *APIResource {
|
||||
return &APIResource{
|
||||
Group: "apiextensions.k8s.io",
|
||||
Version: "v1beta1",
|
||||
Name: "customresourcedefinitions",
|
||||
ShortName: "crd",
|
||||
Namespaced: false,
|
||||
Items: items,
|
||||
}
|
||||
}
|
||||
|
||||
func VSLs(items ...metav1.Object) *APIResource {
|
||||
return &APIResource{
|
||||
Group: "velero.io",
|
||||
Version: "v1",
|
||||
Name: "volumesnapshotlocations",
|
||||
Namespaced: true,
|
||||
Items: items,
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue