Wait for additional items to be ready before restoring current item

Signed-off-by: Scott Seago <sseago@redhat.com>
pull/5933/head
Scott Seago 2023-02-28 18:40:32 -05:00
parent 36163c9a0e
commit a2621caa74
5 changed files with 139 additions and 71 deletions

View File

@ -0,0 +1 @@
Wait for additional items to be ready before restoring current item

View File

@ -54,19 +54,24 @@ type Request struct {
PodVolumeBackups []*velerov1api.PodVolumeBackup
VolumeSnapshots []*volume.Snapshot
BackupReader io.Reader
RestoredItems map[itemKey]string
RestoredItems map[itemKey]restoredItemStatus
}
type restoredItemStatus struct {
action string
itemExists bool
}
// RestoredResourceList returns the list of restored resources grouped by the API
// Version and Kind
func (r *Request) RestoredResourceList() map[string][]string {
resources := map[string][]string{}
for i, action := range r.RestoredItems {
for i, item := range r.RestoredItems {
entry := i.name
if i.namespace != "" {
entry = fmt.Sprintf("%s/%s", i.namespace, i.name)
}
entry = fmt.Sprintf("%s(%s)", entry, action)
entry = fmt.Sprintf("%s(%s)", entry, item.action)
resources[i.resource] = append(resources[i.resource], entry)
}

View File

@ -44,17 +44,17 @@ func TestResourceKey(t *testing.T) {
func TestRestoredResourceList(t *testing.T) {
request := &Request{
RestoredItems: map[itemKey]string{
RestoredItems: map[itemKey]restoredItemStatus{
{
resource: "v1/Namespace",
namespace: "",
name: "default",
}: "created",
}: {action: "created"},
{
resource: "v1/ConfigMap",
namespace: "default",
name: "cm",
}: "skipped",
}: {action: "skipped"},
},
}

View File

@ -273,7 +273,7 @@ func (kr *kubernetesRestorer) RestoreWithResolvers(
credentialFileStore: kr.credentialFileStore,
}
req.RestoredItems = make(map[itemKey]string)
req.RestoredItems = make(map[itemKey]restoredItemStatus)
restoreCtx := &restoreContext{
backup: req.Backup,
@ -345,7 +345,7 @@ type restoreContext struct {
resourceTerminatingTimeout time.Duration
resourceTimeout time.Duration
resourceClients map[resourceClientKey]client.Dynamic
restoredItems map[itemKey]string
restoredItems map[itemKey]restoredItemStatus
renamedPVs map[string]string
pvRenamer func(string) (string, error)
discoveryHelper discovery.Helper
@ -632,7 +632,7 @@ func (ctx *restoreContext) processSelectedResource(
namespace: ns.Namespace,
name: ns.Name,
}
ctx.restoredItems[itemKey] = itemRestoreResultCreated
ctx.restoredItems[itemKey] = restoredItemStatus{action: itemRestoreResultCreated, itemExists: true}
}
// Keep track of namespaces that we know exist so we don't
@ -653,7 +653,7 @@ func (ctx *restoreContext) processSelectedResource(
continue
}
w, e := ctx.restoreItem(obj, groupResource, selectedItem.targetNamespace)
w, e, _ := ctx.restoreItem(obj, groupResource, selectedItem.targetNamespace)
warnings.Merge(&w)
errs.Merge(&e)
processedItems++
@ -874,6 +874,42 @@ func (ctx *restoreContext) crdAvailable(name string, crdClient client.Dynamic) (
return available, err
}
// itemsAvailable waits for the passed-in additional items to be available for use before letting the restore continue.
func (ctx *restoreContext) itemsAvailable(action framework.RestoreItemResolvedActionV2, restoreItemOut *velero.RestoreItemActionExecuteOutput) (bool, error) {
// if RestoreItemAction doesn't define set WaitForAdditionalItems, then return true
if !restoreItemOut.WaitForAdditionalItems {
return true, nil
}
var available bool
timeout := ctx.resourceTimeout
if restoreItemOut.AdditionalItemsReadyTimeout != 0 {
timeout = restoreItemOut.AdditionalItemsReadyTimeout
}
err := wait.PollImmediate(time.Second, timeout, func() (bool, error) {
var err error
available, err = action.AreAdditionalItemsReady(restoreItemOut.AdditionalItems, ctx.restore)
if err != nil {
return true, err
}
if !available {
ctx.log.Debug("AdditionalItems not yet ready for use")
}
// If the AdditionalItems are not available, keep polling (false, nil)
// If the AdditionalItems are available, break the poll and return back to caller (true, nil)
return available, nil
})
if err == wait.ErrWaitTimeout {
ctx.log.Debug("timeout reached waiting for AdditionalItems to be ready")
}
return available, err
}
func (ctx *restoreContext) getResourceClient(groupResource schema.GroupResource, obj *unstructured.Unstructured, namespace string) (client.Dynamic, error) {
key := resourceClientKey{
resource: groupResource.WithVersion(obj.GroupVersionKind().Version),
@ -910,8 +946,10 @@ func getResourceID(groupResource schema.GroupResource, namespace, name string) s
return fmt.Sprintf("%s/%s/%s", groupResource.String(), namespace, name)
}
func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupResource schema.GroupResource, namespace string) (Result, Result) {
func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupResource schema.GroupResource, namespace string) (Result, Result, bool) {
warnings, errs := Result{}, Result{}
// itemExists bool is used to determine whether to include this item in the "wait for additional items" list
itemExists := false
resourceID := getResourceID(groupResource, namespace, obj.GetName())
// Check if group/resource should be restored. We need to do this here since
@ -923,7 +961,7 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso
"name": obj.GetName(),
"groupResource": groupResource.String(),
}).Info("Not restoring item because resource is excluded")
return warnings, errs
return warnings, errs, itemExists
}
// Check if namespace/cluster-scoped resource should be restored. We need
@ -939,7 +977,7 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso
"name": obj.GetName(),
"groupResource": groupResource.String(),
}).Info("Not restoring item because namespace is excluded")
return warnings, errs
return warnings, errs, itemExists
}
// If the namespace scoped resource should be restored, ensure that the
@ -948,7 +986,7 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso
nsToEnsure := getNamespace(ctx.log, archive.GetItemFilePath(ctx.restoreDir, "namespaces", "", obj.GetNamespace()), namespace)
if _, nsCreated, err := kube.EnsureNamespaceExistsAndIsReady(nsToEnsure, ctx.namespaceClient, ctx.resourceTerminatingTimeout); err != nil {
errs.AddVeleroError(err)
return warnings, errs
return warnings, errs, itemExists
} else {
// Add the newly created namespace to the list of restored items.
if nsCreated {
@ -957,7 +995,7 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso
namespace: nsToEnsure.Namespace,
name: nsToEnsure.Name,
}
ctx.restoredItems[itemKey] = itemRestoreResultCreated
ctx.restoredItems[itemKey] = restoredItemStatus{action: itemRestoreResultCreated, itemExists: true}
}
}
} else {
@ -967,7 +1005,7 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso
"name": obj.GetName(),
"groupResource": groupResource.String(),
}).Info("Not restoring item because it's cluster-scoped")
return warnings, errs
return warnings, errs, itemExists
}
}
@ -978,11 +1016,11 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso
complete, err := isCompleted(obj, groupResource)
if err != nil {
errs.Add(namespace, fmt.Errorf("error checking completion of %q: %v", resourceID, err))
return warnings, errs
return warnings, errs, itemExists
}
if complete {
ctx.log.Infof("%s is complete - skipping", kube.NamespaceAndName(obj))
return warnings, errs
return warnings, errs, itemExists
}
name := obj.GetName()
@ -993,36 +1031,40 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso
namespace: namespace,
name: name,
}
if _, exists := ctx.restoredItems[itemKey]; exists {
if prevRestoredItemStatus, exists := ctx.restoredItems[itemKey]; exists {
ctx.log.Infof("Skipping %s because it's already been restored.", resourceID)
return warnings, errs
itemExists = prevRestoredItemStatus.itemExists
return warnings, errs, itemExists
}
ctx.restoredItems[itemKey] = ""
ctx.restoredItems[itemKey] = restoredItemStatus{itemExists: itemExists}
defer func() {
itemStatus := ctx.restoredItems[itemKey]
// the action field is set explicitly
if action := ctx.restoredItems[itemKey]; len(action) > 0 {
if len(itemStatus.action) > 0 {
return
}
// no action specified, and no warnings and errors
if errs.IsEmpty() && warnings.IsEmpty() {
ctx.restoredItems[itemKey] = itemRestoreResultSkipped
itemStatus.action = itemRestoreResultSkipped
ctx.restoredItems[itemKey] = itemStatus
return
}
// others are all failed
ctx.restoredItems[itemKey] = itemRestoreResultFailed
itemStatus.action = itemRestoreResultFailed
ctx.restoredItems[itemKey] = itemStatus
}()
// TODO: move to restore item action if/when we add a ShouldRestore() method
// to the interface.
if groupResource == kuberesource.Pods && obj.GetAnnotations()[v1.MirrorPodAnnotationKey] != "" {
ctx.log.Infof("Not restoring pod because it's a mirror pod")
return warnings, errs
return warnings, errs, itemExists
}
resourceClient, err := ctx.getResourceClient(groupResource, obj, namespace)
if err != nil {
errs.AddVeleroError(fmt.Errorf("error getting resource client for namespace %q, resource %q: %v", namespace, &groupResource, err))
return warnings, errs
return warnings, errs, itemExists
}
if groupResource == kuberesource.PersistentVolumes {
@ -1032,7 +1074,7 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso
shouldRenamePV, err := shouldRenamePV(ctx, obj, resourceClient)
if err != nil {
errs.Add(namespace, err)
return warnings, errs
return warnings, errs, itemExists
}
// Check to see if the claimRef.namespace field needs to be remapped,
@ -1040,7 +1082,7 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso
_, err = remapClaimRefNS(ctx, obj)
if err != nil {
errs.Add(namespace, err)
return warnings, errs
return warnings, errs, itemExists
}
var shouldRestoreSnapshot bool
@ -1050,7 +1092,7 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso
shouldRestoreSnapshot, err = ctx.shouldRestore(name, resourceClient)
if err != nil {
errs.Add(namespace, errors.Wrapf(err, "error waiting on in-cluster persistentvolume %s", name))
return warnings, errs
return warnings, errs, itemExists
}
} else {
// If we're renaming the PV, we're going to give it a new random name,
@ -1070,7 +1112,7 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso
updatedObj, err := ctx.pvRestorer.executePVAction(obj)
if err != nil {
errs.Add(namespace, fmt.Errorf("error executing PVAction for %s: %v", resourceID, err))
return warnings, errs
return warnings, errs, itemExists
}
obj = updatedObj
@ -1087,7 +1129,7 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso
pvName, err = ctx.pvRenamer(oldName)
if err != nil {
errs.Add(namespace, errors.Wrapf(err, "error renaming PV"))
return warnings, errs
return warnings, errs, itemExists
}
} else {
// VolumeSnapshotter could have modified the PV name through
@ -1113,7 +1155,7 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso
// Return early because we don't want to restore the PV itself, we
// want to dynamically re-provision it.
return warnings, errs
return warnings, errs, itemExists
case hasDeleteReclaimPolicy(obj.Object):
ctx.log.Infof("Dynamically re-provisioning persistent volume because it doesn't have a snapshot and its reclaim policy is Delete.")
@ -1121,7 +1163,7 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso
// Return early because we don't want to restore the PV itself, we
// want to dynamically re-provision it.
return warnings, errs
return warnings, errs, itemExists
default:
ctx.log.Infof("Restoring persistent volume as-is because it doesn't have a snapshot and its reclaim policy is not Delete.")
@ -1130,7 +1172,7 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso
_, err = remapClaimRefNS(ctx, obj)
if err != nil {
errs.Add(namespace, err)
return warnings, errs
return warnings, errs, itemExists
}
obj = resetVolumeBindingInfo(obj)
// We call the pvRestorer here to clear out the PV's claimRef.UID,
@ -1138,7 +1180,7 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso
updatedObj, err := ctx.pvRestorer.executePVAction(obj)
if err != nil {
errs.Add(namespace, fmt.Errorf("error executing PVAction for %s: %v", resourceID, err))
return warnings, errs
return warnings, errs, itemExists
}
obj = updatedObj
}
@ -1148,7 +1190,7 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso
// Clear out non-core metadata fields and status.
if obj, err = resetMetadataAndStatus(obj); err != nil {
errs.Add(namespace, err)
return warnings, errs
return warnings, errs, itemExists
}
ctx.log.Infof("restore status includes excludes: %+v", ctx.resourceStatusIncludesExcludes)
@ -1166,21 +1208,22 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso
})
if err != nil {
errs.Add(namespace, fmt.Errorf("error preparing %s: %v", resourceID, err))
return warnings, errs
return warnings, errs, itemExists
}
if executeOutput.SkipRestore {
ctx.log.Infof("Skipping restore of %s: %v because a registered plugin discarded it", obj.GroupVersionKind().Kind, name)
return warnings, errs
return warnings, errs, itemExists
}
unstructuredObj, ok := executeOutput.UpdatedItem.(*unstructured.Unstructured)
if !ok {
errs.Add(namespace, fmt.Errorf("%s: unexpected type %T", resourceID, executeOutput.UpdatedItem))
return warnings, errs
return warnings, errs, itemExists
}
obj = unstructuredObj
var filteredAdditionalItems []velero.ResourceIdentifier
for _, additionalItem := range executeOutput.AdditionalItems {
itemPath := archive.GetItemFilePath(ctx.restoreDir, additionalItem.GroupResource.String(), additionalItem.Namespace, additionalItem.Name)
@ -1208,10 +1251,22 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso
}
}
w, e := ctx.restoreItem(additionalObj, additionalItem.GroupResource, additionalItemNamespace)
w, e, additionalItemExists := ctx.restoreItem(additionalObj, additionalItem.GroupResource, additionalItemNamespace)
if additionalItemExists {
filteredAdditionalItems = append(filteredAdditionalItems, additionalItem)
}
warnings.Merge(&w)
errs.Merge(&e)
}
executeOutput.AdditionalItems = filteredAdditionalItems
available, err := ctx.itemsAvailable(action, executeOutput)
if err != nil {
errs.Add(namespace, errors.Wrapf(err, "error verifying additional items are ready to use"))
} else if !available {
errs.Add(namespace, fmt.Errorf("Additional items for %s are not ready to use.", resourceID))
}
}
// This comes after running item actions because we have built-in actions that restore
@ -1226,7 +1281,7 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso
pvc := new(v1.PersistentVolumeClaim)
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.UnstructuredContent(), pvc); err != nil {
errs.Add(namespace, err)
return warnings, errs
return warnings, errs, itemExists
}
if pvc.Spec.VolumeName != "" {
@ -1245,7 +1300,7 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso
ctx.log.Infof("Updating persistent volume claim %s/%s to reference renamed persistent volume (%s -> %s)", namespace, name, pvc.Spec.VolumeName, newName)
if err := unstructured.SetNestedField(obj.Object, newName, "spec", "volumeName"); err != nil {
errs.Add(namespace, err)
return warnings, errs
return warnings, errs, itemExists
}
}
}
@ -1265,12 +1320,13 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso
ctx.log.Infof("Attempting to restore %s: %v", obj.GroupVersionKind().Kind, name)
createdObj, restoreErr := resourceClient.Create(obj)
if restoreErr == nil {
ctx.restoredItems[itemKey] = itemRestoreResultCreated
itemExists = true
ctx.restoredItems[itemKey] = restoredItemStatus{action: itemRestoreResultCreated, itemExists: itemExists}
}
isAlreadyExistsError, err := isAlreadyExistsError(ctx, obj, restoreErr, resourceClient)
if err != nil {
errs.Add(namespace, err)
return warnings, errs
return warnings, errs, itemExists
}
// check if we want to treat the error as a warning, in some cases the creation call might not get executed due to object API validations
@ -1286,17 +1342,21 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso
if err != nil && isAlreadyExistsError {
ctx.log.Errorf("Error retrieving in-cluster version of %s: %v", kube.NamespaceAndName(obj), err)
errs.Add(namespace, err)
return warnings, errs
return warnings, errs, itemExists
}
}
if fromCluster != nil {
itemExists = true
itemStatus := ctx.restoredItems[itemKey]
itemStatus.itemExists = itemExists
ctx.restoredItems[itemKey] = itemStatus
// Remove insubstantial metadata.
fromCluster, err = resetMetadataAndStatus(fromCluster)
if err != nil {
ctx.log.Infof("Error trying to reset metadata for %s: %v", kube.NamespaceAndName(obj), err)
warnings.Add(namespace, err)
return warnings, errs
return warnings, errs, itemExists
}
// We know the object from the cluster won't have the backup/restore name
@ -1312,20 +1372,20 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso
if err != nil {
ctx.log.Infof("error merging secrets for ServiceAccount %s: %v", kube.NamespaceAndName(obj), err)
warnings.Add(namespace, err)
return warnings, errs
return warnings, errs, itemExists
}
patchBytes, err := generatePatch(fromCluster, desired)
if err != nil {
ctx.log.Infof("error generating patch for ServiceAccount %s: %v", kube.NamespaceAndName(obj), err)
warnings.Add(namespace, err)
return warnings, errs
return warnings, errs, itemExists
}
if patchBytes == nil {
// In-cluster and desired state are the same, so move on to
// the next item.
return warnings, errs
return warnings, errs, itemExists
}
_, err = resourceClient.Patch(name, patchBytes)
@ -1341,7 +1401,8 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso
errs.Merge(&errsFromUpdate)
}
} else {
ctx.restoredItems[itemKey] = itemRestoreResultUpdated
itemStatus.action = itemRestoreResultUpdated
ctx.restoredItems[itemKey] = itemStatus
ctx.log.Infof("ServiceAccount %s successfully updated", kube.NamespaceAndName(obj))
}
default:
@ -1360,7 +1421,8 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso
// processing update as existingResourcePolicy
warningsFromUpdateRP, errsFromUpdateRP := ctx.processUpdateResourcePolicy(fromCluster, fromClusterWithLabels, obj, namespace, resourceClient)
if warningsFromUpdateRP.IsEmpty() && errsFromUpdateRP.IsEmpty() {
ctx.restoredItems[itemKey] = itemRestoreResultUpdated
itemStatus.action = itemRestoreResultUpdated
ctx.restoredItems[itemKey] = itemStatus
}
warnings.Merge(&warningsFromUpdateRP)
errs.Merge(&errsFromUpdateRP)
@ -1372,7 +1434,7 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso
warnings.Add(namespace, e)
}
}
return warnings, errs
return warnings, errs, itemExists
}
//update backup/restore labels on the unchanged resources if existingResourcePolicy is set as update
@ -1388,14 +1450,14 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso
}
ctx.log.Infof("Restore of %s, %v skipped: it already exists in the cluster and is the same as the backed up version", obj.GroupVersionKind().Kind, name)
return warnings, errs
return warnings, errs, itemExists
}
// Error was something other than an AlreadyExists.
if restoreErr != nil {
ctx.log.Errorf("error restoring %s: %+v", name, restoreErr)
errs.Add(namespace, fmt.Errorf("error restoring %s: %v", resourceID, restoreErr))
return warnings, errs
return warnings, errs, itemExists
}
shouldRestoreStatus := ctx.resourceStatusIncludesExcludes != nil && ctx.resourceStatusIncludesExcludes.ShouldInclude(groupResource.String())
@ -1403,7 +1465,7 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso
err := fmt.Errorf("could not get status to be restored %s: %v", kube.NamespaceAndName(obj), statusFieldErr)
ctx.log.Errorf(err.Error())
errs.Add(namespace, err)
return warnings, errs
return warnings, errs, itemExists
}
ctx.log.Debugf("status field for %s: exists: %v, should restore: %v", groupResource, statusFieldExists, shouldRestoreStatus)
// if it should restore status, run a UpdateStatus
@ -1411,7 +1473,7 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso
if err := unstructured.SetNestedField(obj.Object, objStatus, "status"); err != nil {
ctx.log.Errorf("could not set status field %s: %v", kube.NamespaceAndName(obj), err)
errs.Add(namespace, err)
return warnings, errs
return warnings, errs, itemExists
}
obj.SetResourceVersion(createdObj.GetResourceVersion())
updated, err := resourceClient.UpdateStatus(obj, metav1.UpdateOptions{})
@ -1430,13 +1492,13 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso
if err != nil {
ctx.log.Errorf("error generating patch for managed fields %s: %v", kube.NamespaceAndName(obj), err)
errs.Add(namespace, err)
return warnings, errs
return warnings, errs, itemExists
}
if patchBytes != nil {
if _, err = resourceClient.Patch(name, patchBytes); err != nil {
ctx.log.Errorf("error patch for managed fields %s: %v", kube.NamespaceAndName(obj), err)
errs.Add(namespace, err)
return warnings, errs
return warnings, errs, itemExists
}
ctx.log.Infof("the managed fields for %s is patched", kube.NamespaceAndName(obj))
}
@ -1445,7 +1507,7 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso
pod := new(v1.Pod)
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.UnstructuredContent(), pod); err != nil {
errs.Add(namespace, err)
return warnings, errs
return warnings, errs, itemExists
}
// Do not create podvolumerestore when current restore excludes pv/pvc
@ -1471,7 +1533,7 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso
}
}
return warnings, errs
return warnings, errs, itemExists
}
func isAlreadyExistsError(ctx *restoreContext, obj *unstructured.Unstructured, err error, client client.Dynamic) (bool, error) {

View File

@ -862,7 +862,7 @@ func TestRestoreItems(t *testing.T) {
apiResources []*test.APIResource
tarball io.Reader
want []*test.APIResource
expectedRestoreItems map[itemKey]string
expectedRestoreItems map[itemKey]restoredItemStatus
}{
{
name: "metadata uid/resourceVersion/etc. gets removed",
@ -894,9 +894,9 @@ func TestRestoreItems(t *testing.T) {
Result(),
),
},
expectedRestoreItems: map[itemKey]string{
{resource: "v1/Namespace", namespace: "", name: "ns-1"}: "created",
{resource: "v1/Pod", namespace: "ns-1", name: "pod-1"}: "created",
expectedRestoreItems: map[itemKey]restoredItemStatus{
{resource: "v1/Namespace", namespace: "", name: "ns-1"}: {action: "created", itemExists: true},
{resource: "v1/Pod", namespace: "ns-1", name: "pod-1"}: {action: "created", itemExists: true},
},
},
{
@ -1004,9 +1004,9 @@ func TestRestoreItems(t *testing.T) {
want: []*test.APIResource{
test.ServiceAccounts(builder.ForServiceAccount("ns-1", "sa-1").Result()),
},
expectedRestoreItems: map[itemKey]string{
{resource: "v1/Namespace", namespace: "", name: "ns-1"}: "created",
{resource: "v1/ServiceAccount", namespace: "ns-1", name: "sa-1"}: "skipped",
expectedRestoreItems: map[itemKey]restoredItemStatus{
{resource: "v1/Namespace", namespace: "", name: "ns-1"}: {action: "created", itemExists: true},
{resource: "v1/ServiceAccount", namespace: "ns-1", name: "sa-1"}: {action: "skipped", itemExists: true},
},
},
{
@ -1022,9 +1022,9 @@ func TestRestoreItems(t *testing.T) {
want: []*test.APIResource{
test.Secrets(builder.ForSecret("ns-1", "sa-1").ObjectMeta(builder.WithLabels("velero.io/backup-name", "backup-1", "velero.io/restore-name", "restore-1")).Data(map[string][]byte{"key-1": []byte("value-1")}).Result()),
},
expectedRestoreItems: map[itemKey]string{
{resource: "v1/Namespace", namespace: "", name: "ns-1"}: "created",
{resource: "v1/Secret", namespace: "ns-1", name: "sa-1"}: "updated",
expectedRestoreItems: map[itemKey]restoredItemStatus{
{resource: "v1/Namespace", namespace: "", name: "ns-1"}: {action: "created", itemExists: true},
{resource: "v1/Secret", namespace: "ns-1", name: "sa-1"}: {action: "updated", itemExists: true},
},
},
{
@ -1183,7 +1183,7 @@ func TestRestoreItems(t *testing.T) {
PodVolumeBackups: nil,
VolumeSnapshots: nil,
BackupReader: tc.tarball,
RestoredItems: map[itemKey]string{},
RestoredItems: map[itemKey]restoredItemStatus{},
}
warnings, errs := h.restorer.Restore(
data,