From db393ec19961036afcdd87316c1872439682b43d Mon Sep 17 00:00:00 2001 From: Steve Kriss Date: Thu, 27 Jun 2019 14:48:50 -0600 Subject: [PATCH] properly filter additional items returned from restore item actions Signed-off-by: Steve Kriss --- changelogs/unreleased/1612-skriss | 1 + pkg/restore/restore.go | 52 +++++++++- pkg/restore/restore_new_test.go | 151 +++++++++++++++--------------- pkg/restore/restore_test.go | 4 +- 4 files changed, 125 insertions(+), 83 deletions(-) create mode 100644 changelogs/unreleased/1612-skriss diff --git a/changelogs/unreleased/1612-skriss b/changelogs/unreleased/1612-skriss new file mode 100644 index 000000000..d2896dcf1 --- /dev/null +++ b/changelogs/unreleased/1612-skriss @@ -0,0 +1 @@ +bug fix: only restore additional items returned from restore item actions if they match the restore's namespace/resource selectors diff --git a/pkg/restore/restore.go b/pkg/restore/restore.go index e316ae1c4..70f1cb730 100644 --- a/pkg/restore/restore.go +++ b/pkg/restore/restore.go @@ -51,6 +51,7 @@ import ( "github.com/heptio/velero/pkg/label" "github.com/heptio/velero/pkg/plugin/velero" "github.com/heptio/velero/pkg/restic" + "github.com/heptio/velero/pkg/util/boolptr" "github.com/heptio/velero/pkg/util/collections" "github.com/heptio/velero/pkg/util/filesystem" "github.com/heptio/velero/pkg/util/kube" @@ -205,6 +206,11 @@ func (kr *kubernetesRestorer) Restore( return Result{}, Result{Velero: []string{err.Error()}} } + // get namespace includes-excludes + namespaceIncludesExcludes := collections.NewIncludesExcludes(). + Includes(restore.Spec.IncludedNamespaces...). + Excludes(restore.Spec.ExcludedNamespaces...) + resolvedActions, err := resolveActions(actions, kr.discoveryHelper) if err != nil { return Result{}, Result{Velero: []string{err.Error()}} @@ -245,6 +251,8 @@ func (kr *kubernetesRestorer) Restore( backup: backup, backupReader: backupReader, restore: restore, + resourceIncludesExcludes: resourceIncludesExcludes, + namespaceIncludesExcludes: namespaceIncludesExcludes, prioritizedResources: prioritizedResources, selector: selector, log: log, @@ -335,6 +343,8 @@ type context struct { backupReader io.Reader restore *api.Restore restoreDir string + resourceIncludesExcludes *collections.IncludesExcludes + namespaceIncludesExcludes *collections.IncludesExcludes prioritizedResources []schema.GroupResource selector labels.Selector log logrus.FieldLogger @@ -380,10 +390,6 @@ func (ctx *context) execute() (Result, Result) { func (ctx *context) restoreFromDir() (Result, Result) { warnings, errs := Result{}, Result{} - namespaceFilter := collections.NewIncludesExcludes(). - Includes(ctx.restore.Spec.IncludedNamespaces...). - Excludes(ctx.restore.Spec.ExcludedNamespaces...) - // Make sure the top level "resources" dir exists: resourcesDir := filepath.Join(ctx.restoreDir, api.ResourcesDir) rde, err := ctx.fileSystem.DirExists(resourcesDir) @@ -460,7 +466,7 @@ func (ctx *context) restoreFromDir() (Result, Result) { nsName := nsDir.Name() nsPath := filepath.Join(nsSubDir, nsName) - if !namespaceFilter.ShouldInclude(nsName) { + if !ctx.namespaceIncludesExcludes.ShouldInclude(nsName) { ctx.log.Infof("Skipping namespace %s", nsName) continue } @@ -787,6 +793,42 @@ func (ctx *context) restoreItem(obj *unstructured.Unstructured, groupResource sc warnings, errs := Result{}, Result{} resourceID := getResourceID(groupResource, namespace, obj.GetName()) + // Check if group/resource should be restored. We need to do this here since + // this method may be getting called for an additional item which is a group/resource + // that's excluded. + if !ctx.resourceIncludesExcludes.ShouldInclude(groupResource.String()) { + ctx.log.WithFields(logrus.Fields{ + "namespace": obj.GetNamespace(), + "name": obj.GetName(), + "groupResource": groupResource.String(), + }).Info("Not restoring item because resource is excluded") + return warnings, errs + } + + // Check if namespace/cluster-scoped resource should be restored. We need + // to do this here since this method may be getting called for an additional + // item which is in a namespace that's excluded, or which is cluster-scoped + // and should be excluded. + if namespace != "" { + if !ctx.namespaceIncludesExcludes.ShouldInclude(namespace) { + ctx.log.WithFields(logrus.Fields{ + "namespace": obj.GetNamespace(), + "name": obj.GetName(), + "groupResource": groupResource.String(), + }).Info("Not restoring item because namespace is excluded") + return warnings, errs + } + } else { + if boolptr.IsSetToFalse(ctx.restore.Spec.IncludeClusterResources) { + ctx.log.WithFields(logrus.Fields{ + "namespace": obj.GetNamespace(), + "name": obj.GetName(), + "groupResource": groupResource.String(), + }).Info("Not restoring item because it's cluster-scoped") + return warnings, errs + } + } + // make a copy of object retrieved from backup // to make it available unchanged inside restore actions itemFromBackup := obj.DeepCopy() diff --git a/pkg/restore/restore_new_test.go b/pkg/restore/restore_new_test.go index e82dd930c..9509d2436 100644 --- a/pkg/restore/restore_new_test.go +++ b/pkg/restore/restore_new_test.go @@ -1286,29 +1286,28 @@ func TestRestoreActionAdditionalItems(t *testing.T) { test.Pods(): {"ns-1/pod-1", "ns-2/pod-2"}, }, }, - // TODO the below test case fails, which seems like a bug - // { - // name: "when using a restore namespace filter, additional items that are in a non-included namespace are not restored", - // restore: defaultRestore().IncludedNamespaces("ns-1").Restore(), - // backup: defaultBackup().Backup(), - // tarball: newTarWriter(t).addItems("pods", test.NewPod("ns-1", "pod-1"), test.NewPod("ns-2", "pod-2")).done(), - // apiResources: []*test.APIResource{test.Pods()}, - // actions: []velero.RestoreItemAction{ - // &pluggableAction{ - // executeFunc: func(input *velero.RestoreItemActionExecuteInput) (*velero.RestoreItemActionExecuteOutput, error) { - // return &velero.RestoreItemActionExecuteOutput{ - // UpdatedItem: input.Item, - // AdditionalItems: []velero.ResourceIdentifier{ - // {GroupResource: kuberesource.Pods, Namespace: "ns-2", Name: "pod-2"}, - // }, - // }, nil - // }, - // }, - // }, - // want: map[*test.APIResource][]string{ - // test.Pods(): {"ns-1/pod-1"}, - // }, - // }, + { + name: "when using a restore namespace filter, additional items that are in a non-included namespace are not restored", + restore: defaultRestore().IncludedNamespaces("ns-1").Restore(), + backup: defaultBackup().Backup(), + tarball: newTarWriter(t).addItems("pods", test.NewPod("ns-1", "pod-1"), test.NewPod("ns-2", "pod-2")).done(), + apiResources: []*test.APIResource{test.Pods()}, + actions: []velero.RestoreItemAction{ + &pluggableAction{ + executeFunc: func(input *velero.RestoreItemActionExecuteInput) (*velero.RestoreItemActionExecuteOutput, error) { + return &velero.RestoreItemActionExecuteOutput{ + UpdatedItem: input.Item, + AdditionalItems: []velero.ResourceIdentifier{ + {GroupResource: kuberesource.Pods, Namespace: "ns-2", Name: "pod-2"}, + }, + }, nil + }, + }, + }, + want: map[*test.APIResource][]string{ + test.Pods(): {"ns-1/pod-1"}, + }, + }, { name: "when using a restore namespace filter, additional items that are cluster-scoped are restored", restore: defaultRestore().IncludedNamespaces("ns-1").Restore(), @@ -1335,60 +1334,58 @@ func TestRestoreActionAdditionalItems(t *testing.T) { test.PVs(): {"/pv-1"}, }, }, - // TODO the below test case fails, which seems like a bug - // { - // name: "when using a restore resource filter, additional items that are non-included resources are not restored", - // restore: defaultRestore().IncludedResources("pods").Restore(), - // backup: defaultBackup().Backup(), - // tarball: newTarWriter(t). - // addItems("pods", test.NewPod("ns-1", "pod-1")). - // addItems("persistentvolumes", test.NewPV("pv-1")). - // done(), - // apiResources: []*test.APIResource{test.Pods(), test.PVs()}, - // actions: []velero.RestoreItemAction{ - // &pluggableAction{ - // executeFunc: func(input *velero.RestoreItemActionExecuteInput) (*velero.RestoreItemActionExecuteOutput, error) { - // return &velero.RestoreItemActionExecuteOutput{ - // UpdatedItem: input.Item, - // AdditionalItems: []velero.ResourceIdentifier{ - // {GroupResource: kuberesource.PersistentVolumes, Name: "pv-1"}, - // }, - // }, nil - // }, - // }, - // }, - // want: map[*test.APIResource][]string{ - // test.Pods(): {"ns-1/pod-1"}, - // test.PVs(): nil, - // }, - // }, - // TODO the below test case fails, which seems like a bug - // { - // name: "when IncludeClusterResources=false, additional items that are cluster-scoped are not restored", - // restore: defaultRestore().IncludeClusterResources(false).Restore(), - // backup: defaultBackup().Backup(), - // tarball: newTarWriter(t). - // addItems("pods", test.NewPod("ns-1", "pod-1")). - // addItems("persistentvolumes", test.NewPV("pv-1")). - // done(), - // apiResources: []*test.APIResource{test.Pods(), test.PVs()}, - // actions: []velero.RestoreItemAction{ - // &pluggableAction{ - // executeFunc: func(input *velero.RestoreItemActionExecuteInput) (*velero.RestoreItemActionExecuteOutput, error) { - // return &velero.RestoreItemActionExecuteOutput{ - // UpdatedItem: input.Item, - // AdditionalItems: []velero.ResourceIdentifier{ - // {GroupResource: kuberesource.PersistentVolumes, Name: "pv-1"}, - // }, - // }, nil - // }, - // }, - // }, - // want: map[*test.APIResource][]string{ - // test.Pods(): {"ns-1/pod-1"}, - // test.PVs(): nil, - // }, - // }, + { + name: "when using a restore resource filter, additional items that are non-included resources are not restored", + restore: defaultRestore().IncludedResources("pods").Restore(), + backup: defaultBackup().Backup(), + tarball: newTarWriter(t). + addItems("pods", test.NewPod("ns-1", "pod-1")). + addItems("persistentvolumes", test.NewPV("pv-1")). + done(), + apiResources: []*test.APIResource{test.Pods(), test.PVs()}, + actions: []velero.RestoreItemAction{ + &pluggableAction{ + executeFunc: func(input *velero.RestoreItemActionExecuteInput) (*velero.RestoreItemActionExecuteOutput, error) { + return &velero.RestoreItemActionExecuteOutput{ + UpdatedItem: input.Item, + AdditionalItems: []velero.ResourceIdentifier{ + {GroupResource: kuberesource.PersistentVolumes, Name: "pv-1"}, + }, + }, nil + }, + }, + }, + want: map[*test.APIResource][]string{ + test.Pods(): {"ns-1/pod-1"}, + test.PVs(): nil, + }, + }, + { + name: "when IncludeClusterResources=false, additional items that are cluster-scoped are not restored", + restore: defaultRestore().IncludeClusterResources(false).Restore(), + backup: defaultBackup().Backup(), + tarball: newTarWriter(t). + addItems("pods", test.NewPod("ns-1", "pod-1")). + addItems("persistentvolumes", test.NewPV("pv-1")). + done(), + apiResources: []*test.APIResource{test.Pods(), test.PVs()}, + actions: []velero.RestoreItemAction{ + &pluggableAction{ + executeFunc: func(input *velero.RestoreItemActionExecuteInput) (*velero.RestoreItemActionExecuteOutput, error) { + return &velero.RestoreItemActionExecuteOutput{ + UpdatedItem: input.Item, + AdditionalItems: []velero.ResourceIdentifier{ + {GroupResource: kuberesource.PersistentVolumes, Name: "pv-1"}, + }, + }, nil + }, + }, + }, + want: map[*test.APIResource][]string{ + test.Pods(): {"ns-1/pod-1"}, + test.PVs(): nil, + }, + }, } for _, tc := range tests { diff --git a/pkg/restore/restore_test.go b/pkg/restore/restore_test.go index a2bb5dd11..a4a5e8e98 100644 --- a/pkg/restore/restore_test.go +++ b/pkg/restore/restore_test.go @@ -324,7 +324,9 @@ status: fileSystem: velerotest.NewFakeFileSystem(). WithFile("foo/resources/persistentvolumes/cluster/pv.json", pvBytes). WithFile("foo/resources/persistentvolumeclaims/default/pvc.json", pvcBytes), - selector: labels.NewSelector(), + selector: labels.NewSelector(), + resourceIncludesExcludes: collections.NewIncludesExcludes(), + namespaceIncludesExcludes: collections.NewIncludesExcludes(), prioritizedResources: []schema.GroupResource{ kuberesource.PersistentVolumes, kuberesource.PersistentVolumeClaims,