velero/pkg/restore/restore.go

1066 lines
35 KiB
Go
Raw Normal View History

/*
Copyright 2017 the Heptio Ark contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package restore
import (
go_context "context"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"sort"
"time"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/equality"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
kubeerrs "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
api "github.com/heptio/velero/pkg/apis/velero/v1"
"github.com/heptio/velero/pkg/client"
"github.com/heptio/velero/pkg/cloudprovider"
"github.com/heptio/velero/pkg/discovery"
listers "github.com/heptio/velero/pkg/generated/listers/velero/v1"
"github.com/heptio/velero/pkg/kuberesource"
"github.com/heptio/velero/pkg/restic"
"github.com/heptio/velero/pkg/util/collections"
"github.com/heptio/velero/pkg/util/filesystem"
"github.com/heptio/velero/pkg/util/kube"
velerosync "github.com/heptio/velero/pkg/util/sync"
"github.com/heptio/velero/pkg/volume"
)
type BlockStoreGetter interface {
GetBlockStore(name string) (cloudprovider.BlockStore, error)
}
// Restorer knows how to restore a backup.
type Restorer interface {
// Restore restores the backup data from backupReader, returning warnings and errors.
Restore(log logrus.FieldLogger,
restore *api.Restore,
backup *api.Backup,
volumeSnapshots []*volume.Snapshot,
backupReader io.Reader,
actions []ItemAction,
snapshotLocationLister listers.VolumeSnapshotLocationLister,
blockStoreGetter BlockStoreGetter,
) (api.RestoreResult, api.RestoreResult)
}
// kubernetesRestorer implements Restorer for restoring into a Kubernetes cluster.
type kubernetesRestorer struct {
discoveryHelper discovery.Helper
dynamicFactory client.DynamicFactory
namespaceClient corev1.NamespaceInterface
resticRestorerFactory restic.RestorerFactory
resticTimeout time.Duration
resourceTerminatingTimeout time.Duration
resourcePriorities []string
fileSystem filesystem.Interface
logger logrus.FieldLogger
}
// prioritizeResources returns an ordered, fully-resolved list of resources to restore based on
// the provided discovery helper, resource priorities, and included/excluded resources.
func prioritizeResources(helper discovery.Helper, priorities []string, includedResources *collections.IncludesExcludes, logger logrus.FieldLogger) ([]schema.GroupResource, error) {
var ret []schema.GroupResource
// set keeps track of resolved GroupResource names
set := sets.NewString()
// start by resolving priorities into GroupResources and adding them to ret
for _, r := range priorities {
gvr, _, err := helper.ResourceFor(schema.ParseGroupResource(r).WithVersion(""))
if err != nil {
return nil, err
}
gr := gvr.GroupResource()
if !includedResources.ShouldInclude(gr.String()) {
logger.WithField("groupResource", gr).Info("Not including resource")
continue
}
ret = append(ret, gr)
set.Insert(gr.String())
}
// go through everything we got from discovery and add anything not in "set" to byName
var byName []schema.GroupResource
for _, resourceGroup := range helper.Resources() {
// will be something like storage.k8s.io/v1
groupVersion, err := schema.ParseGroupVersion(resourceGroup.GroupVersion)
if err != nil {
return nil, err
}
for _, resource := range resourceGroup.APIResources {
gr := groupVersion.WithResource(resource.Name).GroupResource()
if !includedResources.ShouldInclude(gr.String()) {
logger.WithField("groupResource", gr.String()).Info("Not including resource")
continue
}
if !set.Has(gr.String()) {
byName = append(byName, gr)
}
}
}
// sort byName by name
sort.Slice(byName, func(i, j int) bool {
return byName[i].String() < byName[j].String()
})
// combine prioritized with by-name
ret = append(ret, byName...)
return ret, nil
}
// NewKubernetesRestorer creates a new kubernetesRestorer.
func NewKubernetesRestorer(
discoveryHelper discovery.Helper,
dynamicFactory client.DynamicFactory,
resourcePriorities []string,
namespaceClient corev1.NamespaceInterface,
resticRestorerFactory restic.RestorerFactory,
resticTimeout time.Duration,
resourceTerminatingTimeout time.Duration,
logger logrus.FieldLogger,
) (Restorer, error) {
return &kubernetesRestorer{
discoveryHelper: discoveryHelper,
dynamicFactory: dynamicFactory,
namespaceClient: namespaceClient,
resticRestorerFactory: resticRestorerFactory,
resticTimeout: resticTimeout,
resourceTerminatingTimeout: resourceTerminatingTimeout,
resourcePriorities: resourcePriorities,
logger: logger,
fileSystem: filesystem.NewFileSystem(),
}, nil
}
// Restore executes a restore into the target Kubernetes cluster according to the restore spec
// and using data from the provided backup/backup reader. Returns a warnings and errors RestoreResult,
// respectively, summarizing info about the restore.
func (kr *kubernetesRestorer) Restore(
log logrus.FieldLogger,
restore *api.Restore,
backup *api.Backup,
volumeSnapshots []*volume.Snapshot,
backupReader io.Reader,
actions []ItemAction,
snapshotLocationLister listers.VolumeSnapshotLocationLister,
blockStoreGetter BlockStoreGetter,
) (api.RestoreResult, api.RestoreResult) {
// metav1.LabelSelectorAsSelector converts a nil LabelSelector to a
// Nothing Selector, i.e. a selector that matches nothing. We want
// a selector that matches everything. This can be accomplished by
// passing a non-nil empty LabelSelector.
ls := restore.Spec.LabelSelector
if ls == nil {
ls = &metav1.LabelSelector{}
}
selector, err := metav1.LabelSelectorAsSelector(ls)
if err != nil {
return api.RestoreResult{}, api.RestoreResult{Ark: []string{err.Error()}}
}
// get resource includes-excludes
resourceIncludesExcludes := getResourceIncludesExcludes(kr.discoveryHelper, restore.Spec.IncludedResources, restore.Spec.ExcludedResources)
prioritizedResources, err := prioritizeResources(kr.discoveryHelper, kr.resourcePriorities, resourceIncludesExcludes, log)
if err != nil {
return api.RestoreResult{}, api.RestoreResult{Ark: []string{err.Error()}}
}
resolvedActions, err := resolveActions(actions, kr.discoveryHelper)
if err != nil {
return api.RestoreResult{}, api.RestoreResult{Ark: []string{err.Error()}}
}
podVolumeTimeout := kr.resticTimeout
if val := restore.Annotations[api.PodVolumeOperationTimeoutAnnotation]; val != "" {
parsed, err := time.ParseDuration(val)
if err != nil {
log.WithError(errors.WithStack(err)).Errorf("Unable to parse pod volume timeout annotation %s, using server value.", val)
} else {
podVolumeTimeout = parsed
}
}
ctx, cancelFunc := go_context.WithTimeout(go_context.Background(), podVolumeTimeout)
defer cancelFunc()
var resticRestorer restic.Restorer
if kr.resticRestorerFactory != nil {
resticRestorer, err = kr.resticRestorerFactory.NewRestorer(ctx, restore)
if err != nil {
return api.RestoreResult{}, api.RestoreResult{Ark: []string{err.Error()}}
}
}
pvRestorer := &pvRestorer{
logger: log,
backup: backup,
snapshotVolumes: backup.Spec.SnapshotVolumes,
restorePVs: restore.Spec.RestorePVs,
volumeSnapshots: volumeSnapshots,
blockStoreGetter: blockStoreGetter,
snapshotLocationLister: snapshotLocationLister,
}
restoreCtx := &context{
backup: backup,
backupReader: backupReader,
restore: restore,
prioritizedResources: prioritizedResources,
selector: selector,
log: log,
dynamicFactory: kr.dynamicFactory,
fileSystem: kr.fileSystem,
namespaceClient: kr.namespaceClient,
actions: resolvedActions,
blockStoreGetter: blockStoreGetter,
resticRestorer: resticRestorer,
pvsToProvision: sets.NewString(),
pvRestorer: pvRestorer,
volumeSnapshots: volumeSnapshots,
resourceTerminatingTimeout: kr.resourceTerminatingTimeout,
extractor: &backupExtractor{
log: log,
fileSystem: kr.fileSystem,
},
}
return restoreCtx.execute()
}
// getResourceIncludesExcludes takes the lists of resources to include and exclude, uses the
// discovery helper to resolve them to fully-qualified group-resource names, and returns an
// IncludesExcludes list.
func getResourceIncludesExcludes(helper discovery.Helper, includes, excludes []string) *collections.IncludesExcludes {
resources := collections.GenerateIncludesExcludes(
includes,
excludes,
func(item string) string {
gvr, _, err := helper.ResourceFor(schema.ParseGroupResource(item).WithVersion(""))
if err != nil {
return ""
}
gr := gvr.GroupResource()
return gr.String()
},
)
return resources
}
type resolvedAction struct {
ItemAction
resourceIncludesExcludes *collections.IncludesExcludes
namespaceIncludesExcludes *collections.IncludesExcludes
selector labels.Selector
}
func resolveActions(actions []ItemAction, helper discovery.Helper) ([]resolvedAction, error) {
var resolved []resolvedAction
for _, action := range actions {
resourceSelector, err := action.AppliesTo()
if err != nil {
return nil, err
}
resources := getResourceIncludesExcludes(helper, resourceSelector.IncludedResources, resourceSelector.ExcludedResources)
namespaces := collections.NewIncludesExcludes().Includes(resourceSelector.IncludedNamespaces...).Excludes(resourceSelector.ExcludedNamespaces...)
selector := labels.Everything()
if resourceSelector.LabelSelector != "" {
if selector, err = labels.Parse(resourceSelector.LabelSelector); err != nil {
return nil, err
}
}
res := resolvedAction{
ItemAction: action,
resourceIncludesExcludes: resources,
namespaceIncludesExcludes: namespaces,
selector: selector,
}
resolved = append(resolved, res)
}
return resolved, nil
}
type context struct {
backup *api.Backup
backupReader io.Reader
restore *api.Restore
prioritizedResources []schema.GroupResource
selector labels.Selector
log logrus.FieldLogger
dynamicFactory client.DynamicFactory
fileSystem filesystem.Interface
namespaceClient corev1.NamespaceInterface
actions []resolvedAction
blockStoreGetter BlockStoreGetter
resticRestorer restic.Restorer
globalWaitGroup velerosync.ErrorGroup
pvsToProvision sets.String
pvRestorer PVRestorer
volumeSnapshots []*volume.Snapshot
resourceTerminatingTimeout time.Duration
extractor *backupExtractor
}
func (ctx *context) execute() (api.RestoreResult, api.RestoreResult) {
ctx.log.Infof("Starting restore of backup %s", kube.NamespaceAndName(ctx.backup))
dir, err := ctx.extractor.unzipAndExtractBackup(ctx.backupReader)
if err != nil {
ctx.log.Infof("error unzipping and extracting: %v", err)
return api.RestoreResult{}, api.RestoreResult{Ark: []string{err.Error()}}
}
defer ctx.fileSystem.RemoveAll(dir)
return ctx.restoreFromDir(dir)
}
// restoreFromDir executes a restore based on backup data contained within a local
// directory.
func (ctx *context) restoreFromDir(dir string) (api.RestoreResult, api.RestoreResult) {
warnings, errs := api.RestoreResult{}, api.RestoreResult{}
namespaceFilter := collections.NewIncludesExcludes().
Includes(ctx.restore.Spec.IncludedNamespaces...).
Excludes(ctx.restore.Spec.ExcludedNamespaces...)
// Make sure the top level "resources" dir exists:
resourcesDir := filepath.Join(dir, api.ResourcesDir)
rde, err := ctx.fileSystem.DirExists(resourcesDir)
if err != nil {
addVeleroError(&errs, err)
return warnings, errs
}
if !rde {
addVeleroError(&errs, errors.New("backup does not contain top level resources directory"))
return warnings, errs
}
resourceDirs, err := ctx.fileSystem.ReadDir(resourcesDir)
if err != nil {
addVeleroError(&errs, err)
return warnings, errs
}
resourceDirsMap := make(map[string]os.FileInfo)
for _, rscDir := range resourceDirs {
rscName := rscDir.Name()
resourceDirsMap[rscName] = rscDir
}
existingNamespaces := sets.NewString()
for _, resource := range ctx.prioritizedResources {
// we don't want to explicitly restore namespace API objs because we'll handle
// them as a special case prior to restoring anything into them
if resource == kuberesource.Namespaces {
continue
}
rscDir := resourceDirsMap[resource.String()]
if rscDir == nil {
continue
}
resourcePath := filepath.Join(resourcesDir, rscDir.Name())
clusterSubDir := filepath.Join(resourcePath, api.ClusterScopedDir)
clusterSubDirExists, err := ctx.fileSystem.DirExists(clusterSubDir)
if err != nil {
addVeleroError(&errs, err)
return warnings, errs
}
if clusterSubDirExists {
w, e := ctx.restoreResource(resource.String(), "", clusterSubDir)
merge(&warnings, &w)
merge(&errs, &e)
continue
}
nsSubDir := filepath.Join(resourcePath, api.NamespaceScopedDir)
nsSubDirExists, err := ctx.fileSystem.DirExists(nsSubDir)
if err != nil {
addVeleroError(&errs, err)
return warnings, errs
}
if !nsSubDirExists {
continue
}
nsDirs, err := ctx.fileSystem.ReadDir(nsSubDir)
if err != nil {
addVeleroError(&errs, err)
return warnings, errs
}
for _, nsDir := range nsDirs {
if !nsDir.IsDir() {
continue
}
nsName := nsDir.Name()
nsPath := filepath.Join(nsSubDir, nsName)
if !namespaceFilter.ShouldInclude(nsName) {
ctx.log.Infof("Skipping namespace %s", nsName)
continue
}
// fetch mapped NS name
mappedNsName := nsName
if target, ok := ctx.restore.Spec.NamespaceMapping[nsName]; ok {
mappedNsName = target
}
// if we don't know whether this namespace exists yet, attempt to create
// it in order to ensure it exists. Try to get it from the backup tarball
// (in order to get any backed-up metadata), but if we don't find it there,
// create a blank one.
if !existingNamespaces.Has(mappedNsName) {
logger := ctx.log.WithField("namespace", nsName)
ns := getNamespace(logger, filepath.Join(dir, api.ResourcesDir, "namespaces", api.ClusterScopedDir, nsName+".json"), mappedNsName)
if _, err := kube.EnsureNamespaceExistsAndIsReady(ns, ctx.namespaceClient, ctx.resourceTerminatingTimeout); err != nil {
addVeleroError(&errs, err)
continue
}
// keep track of namespaces that we know exist so we don't
// have to try to create them multiple times
existingNamespaces.Insert(mappedNsName)
}
w, e := ctx.restoreResource(resource.String(), mappedNsName, nsPath)
merge(&warnings, &w)
merge(&errs, &e)
}
}
// TODO timeout?
ctx.log.Debug("Waiting on global wait group")
waitErrs := ctx.globalWaitGroup.Wait()
ctx.log.Debug("Done waiting on global wait group")
for _, err := range waitErrs {
// TODO not ideal to be adding these to Velero-level errors
// rather than a specific namespace, but don't have a way
// to track the namespace right now.
errs.Velero = append(errs.Velero, err.Error())
}
return warnings, errs
}
// getNamespace returns a namespace API object that we should attempt to
// create before restoring anything into it. It will come from the backup
// tarball if it exists, else will be a new one. If from the tarball, it
// will retain its labels, annotations, and spec.
func getNamespace(logger logrus.FieldLogger, path, remappedName string) *v1.Namespace {
var nsBytes []byte
var err error
if nsBytes, err = ioutil.ReadFile(path); err != nil {
return &v1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: remappedName,
},
}
}
var backupNS v1.Namespace
if err := json.Unmarshal(nsBytes, &backupNS); err != nil {
logger.Warnf("Error unmarshalling namespace from backup, creating new one.")
return &v1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: remappedName,
},
}
}
return &v1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: remappedName,
Labels: backupNS.Labels,
Annotations: backupNS.Annotations,
},
Spec: backupNS.Spec,
}
}
// merge combines two RestoreResult objects into one
// by appending the corresponding lists to one another.
func merge(a, b *api.RestoreResult) {
a.Cluster = append(a.Cluster, b.Cluster...)
a.Velero = append(a.Velero, b.Velero...)
for k, v := range b.Namespaces {
if a.Namespaces == nil {
a.Namespaces = make(map[string][]string)
}
a.Namespaces[k] = append(a.Namespaces[k], v...)
}
}
// addVeleroError appends an error to the provided RestoreResult's Velero list.
func addVeleroError(r *api.RestoreResult, err error) {
r.Velero = append(r.Velero, err.Error())
}
// addToResult appends an error to the provided RestoreResult, either within
// the cluster-scoped list (if ns == "") or within the provided namespace's
// entry.
func addToResult(r *api.RestoreResult, ns string, e error) {
if ns == "" {
r.Cluster = append(r.Cluster, e.Error())
} else {
if r.Namespaces == nil {
r.Namespaces = make(map[string][]string)
}
r.Namespaces[ns] = append(r.Namespaces[ns], e.Error())
}
}
func (ctx *context) shouldRestore(name string, pvClient client.Dynamic) (bool, error) {
pvLogger := ctx.log.WithField("pvName", name)
var shouldRestore bool
err := wait.PollImmediate(time.Second, ctx.resourceTerminatingTimeout, func() (bool, error) {
unstructuredPV, err := pvClient.Get(name, metav1.GetOptions{})
if apierrors.IsNotFound(err) {
pvLogger.Debug("PV not found, safe to restore")
// PV not found, can safely exit loop and proceed with restore.
shouldRestore = true
return true, nil
}
if err != nil {
return false, errors.Wrapf(err, "could not retrieve in-cluster copy of PV %s", name)
}
clusterPV := new(v1.PersistentVolume)
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(unstructuredPV.Object, clusterPV); err != nil {
return false, errors.Wrap(err, "error converting PV from unstructured")
}
if clusterPV.Status.Phase == v1.VolumeReleased || clusterPV.DeletionTimestamp != nil {
// PV was found and marked for deletion, or it was released; wait for it to go away.
pvLogger.Debugf("PV found, but marked for deletion, waiting")
return false, nil
}
// Check for the namespace and PVC to see if anything that's referencing the PV is deleting.
// If either the namespace or PVC is in a deleting/terminating state, wait for them to finish before
// trying to restore the PV
// Not doing so may result in the underlying PV disappearing but not restoring due to timing issues,
// then the PVC getting restored and showing as lost.
if clusterPV.Spec.ClaimRef == nil {
pvLogger.Debugf("PV is not marked for deletion and is not claimed by a PVC")
return true, nil
}
namespace := clusterPV.Spec.ClaimRef.Namespace
pvcName := clusterPV.Spec.ClaimRef.Name
// Have to create the PVC client here because we don't know what namespace we're using til we get to this point.
// Using a dynamic client since it's easier to mock for testing
pvcResource := metav1.APIResource{Name: "persistentvolumeclaims", Namespaced: true}
pvcClient, err := ctx.dynamicFactory.ClientForGroupVersionResource(schema.GroupVersion{Group: "", Version: "v1"}, pvcResource, namespace)
if err != nil {
return false, errors.Wrapf(err, "error getting pvc client")
}
pvc, err := pvcClient.Get(pvcName, metav1.GetOptions{})
if apierrors.IsNotFound(err) {
pvLogger.Debugf("PVC %s for PV not found, waiting", pvcName)
// PVC wasn't found, but the PV still exists, so continue to wait.
return false, nil
}
if err != nil {
return false, errors.Wrapf(err, "error getting claim %s for persistent volume", pvcName)
}
if pvc != nil && pvc.GetDeletionTimestamp() != nil {
pvLogger.Debugf("PVC for PV marked for deletion, waiting")
// PVC is still deleting, continue to wait.
return false, nil
}
// Check the namespace associated with the claimRef to see if it's deleting/terminating before proceeding
ns, err := ctx.namespaceClient.Get(namespace, metav1.GetOptions{})
if apierrors.IsNotFound(err) {
pvLogger.Debugf("namespace %s for PV not found, waiting", namespace)
// namespace not found but the PV still exists, so continue to wait
return false, nil
}
if err != nil {
return false, errors.Wrapf(err, "error getting namespace %s associated with PV %s", namespace, name)
}
if ns != nil && (ns.GetDeletionTimestamp() != nil || ns.Status.Phase == v1.NamespaceTerminating) {
pvLogger.Debugf("namespace %s associated with PV is deleting, waiting", namespace)
// namespace is in the process of deleting, keep looping
return false, nil
}
// None of the PV, PVC, or NS are marked for deletion, break the loop.
pvLogger.Debug("PV, associated PVC and namespace are not marked for deletion")
return true, nil
})
if err == wait.ErrWaitTimeout {
pvLogger.Debug("timeout reached waiting for persistent volume to delete")
}
return shouldRestore, err
}
// restoreResource restores the specified cluster or namespace scoped resource. If namespace is
// empty we are restoring a cluster level resource, otherwise into the specified namespace.
func (ctx *context) restoreResource(resource, namespace, resourcePath string) (api.RestoreResult, api.RestoreResult) {
warnings, errs := api.RestoreResult{}, api.RestoreResult{}
if ctx.restore.Spec.IncludeClusterResources != nil && !*ctx.restore.Spec.IncludeClusterResources && namespace == "" {
ctx.log.Infof("Skipping resource %s because it's cluster-scoped", resource)
return warnings, errs
}
if namespace != "" {
ctx.log.Infof("Restoring resource '%s' into namespace '%s' from: %s", resource, namespace, resourcePath)
} else {
ctx.log.Infof("Restoring cluster level resource '%s' from: %s", resource, resourcePath)
}
files, err := ctx.fileSystem.ReadDir(resourcePath)
if err != nil {
addToResult(&errs, namespace, fmt.Errorf("error reading %q resource directory: %v", resource, err))
return warnings, errs
}
if len(files) == 0 {
return warnings, errs
}
var (
resourceClient client.Dynamic
groupResource = schema.ParseGroupResource(resource)
applicableActions []resolvedAction
)
// pre-filter the actions based on namespace & resource includes/excludes since
// these will be the same for all items being restored below
for _, action := range ctx.actions {
if !action.resourceIncludesExcludes.ShouldInclude(groupResource.String()) {
continue
}
if namespace != "" && !action.namespaceIncludesExcludes.ShouldInclude(namespace) {
continue
}
applicableActions = append(applicableActions, action)
}
for _, file := range files {
fullPath := filepath.Join(resourcePath, file.Name())
obj, err := ctx.unmarshal(fullPath)
if err != nil {
addToResult(&errs, namespace, fmt.Errorf("error decoding %q: %v", fullPath, err))
continue
}
// make a copy of object retrieved from backup
// to make it available unchanged inside restore actions
itemFromBackup := obj.DeepCopy()
if !ctx.selector.Matches(labels.Set(obj.GetLabels())) {
continue
}
complete, err := isCompleted(obj, groupResource)
if err != nil {
addToResult(&errs, namespace, fmt.Errorf("error checking completion %q: %v", fullPath, err))
continue
}
if complete {
ctx.log.Infof("%s is complete - skipping", kube.NamespaceAndName(obj))
continue
}
if resourceClient == nil {
// initialize client for this Resource. we need
// metadata from an object to do this.
ctx.log.Infof("Getting client for %v", obj.GroupVersionKind())
resource := metav1.APIResource{
Namespaced: len(namespace) > 0,
Name: groupResource.Resource,
}
var err error
resourceClient, err = ctx.dynamicFactory.ClientForGroupVersionResource(obj.GroupVersionKind().GroupVersion(), resource, namespace)
if err != nil {
addVeleroError(&errs, fmt.Errorf("error getting resource client for namespace %q, resource %q: %v", namespace, &groupResource, err))
return warnings, errs
}
}
name := obj.GetName()
// TODO: move to restore item action if/when we add a ShouldRestore() method to the interface
if groupResource == kuberesource.Pods && obj.GetAnnotations()[v1.MirrorPodAnnotationKey] != "" {
ctx.log.Infof("Not restoring pod because it's a mirror pod")
continue
}
if groupResource == kuberesource.PersistentVolumes {
var hasSnapshot bool
if len(ctx.backup.Status.VolumeBackups) > 0 {
// pre-v0.10 backup
_, hasSnapshot = ctx.backup.Status.VolumeBackups[name]
} else {
// v0.10+ backup
for _, snapshot := range ctx.volumeSnapshots {
if snapshot.Spec.PersistentVolumeName == name {
hasSnapshot = true
break
}
}
}
if !hasSnapshot && hasDeleteReclaimPolicy(obj.Object) {
ctx.log.Infof("Not restoring PV because it doesn't have a snapshot and its reclaim policy is Delete.")
ctx.pvsToProvision.Insert(name)
continue
}
// Check if the PV exists in the cluster before attempting to create
// a volume from the snapshot, in order to avoid orphaned volumes (GH #609)
shouldRestoreSnapshot, err := ctx.shouldRestore(name, resourceClient)
if err != nil {
addToResult(&errs, namespace, errors.Wrapf(err, "error waiting on in-cluster persistentvolume %s", name))
continue
}
// PV's existence will be recorded later. Just skip the volume restore logic.
if shouldRestoreSnapshot {
// restore the PV from snapshot (if applicable)
updatedObj, err := ctx.pvRestorer.executePVAction(obj)
if err != nil {
addToResult(&errs, namespace, fmt.Errorf("error executing PVAction for %s: %v", fullPath, err))
continue
}
obj = updatedObj
} else if err != nil {
addToResult(&errs, namespace, fmt.Errorf("error checking existence for PV %s: %v", name, err))
continue
}
}
if groupResource == kuberesource.PersistentVolumeClaims {
pvc := new(v1.PersistentVolumeClaim)
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.UnstructuredContent(), pvc); err != nil {
addToResult(&errs, namespace, err)
continue
}
if pvc.Spec.VolumeName != "" && ctx.pvsToProvision.Has(pvc.Spec.VolumeName) {
ctx.log.Infof("Resetting PersistentVolumeClaim %s/%s for dynamic provisioning because its PV %v has a reclaim policy of Delete", namespace, name, pvc.Spec.VolumeName)
pvc.Spec.VolumeName = ""
delete(pvc.Annotations, "pv.kubernetes.io/bind-completed")
delete(pvc.Annotations, "pv.kubernetes.io/bound-by-controller")
res, err := runtime.DefaultUnstructuredConverter.ToUnstructured(pvc)
if err != nil {
addToResult(&errs, namespace, err)
continue
}
obj.Object = res
}
}
// clear out non-core metadata fields & status
if obj, err = resetMetadataAndStatus(obj); err != nil {
addToResult(&errs, namespace, err)
continue
}
for _, action := range applicableActions {
if !action.selector.Matches(labels.Set(obj.GetLabels())) {
continue
}
ctx.log.Infof("Executing item action for %v", &groupResource)
executeOutput, err := action.Execute(&RestoreItemActionExecuteInput{
Item: obj,
ItemFromBackup: itemFromBackup,
Restore: ctx.restore,
})
if executeOutput.Warning != nil {
addToResult(&warnings, namespace, fmt.Errorf("warning preparing %s: %v", fullPath, executeOutput.Warning))
}
if err != nil {
addToResult(&errs, namespace, fmt.Errorf("error preparing %s: %v", fullPath, err))
continue
}
unstructuredObj, ok := executeOutput.UpdatedItem.(*unstructured.Unstructured)
if !ok {
addToResult(&errs, namespace, fmt.Errorf("%s: unexpected type %T", fullPath, executeOutput.UpdatedItem))
continue
}
obj = unstructuredObj
}
// necessary because we may have remapped the namespace
// if the namespace is blank, don't create the key
originalNamespace := obj.GetNamespace()
if namespace != "" {
obj.SetNamespace(namespace)
}
// label the resource with the restore's name and the restored backup's name
// for easy identification of all cluster resources created by this restore
// and which backup they came from
addRestoreLabels(obj, ctx.restore.Name, ctx.restore.Spec.BackupName)
ctx.log.Infof("Attempting to restore %s: %v", obj.GroupVersionKind().Kind, name)
createdObj, restoreErr := resourceClient.Create(obj)
if apierrors.IsAlreadyExists(restoreErr) {
fromCluster, err := resourceClient.Get(name, metav1.GetOptions{})
if err != nil {
ctx.log.Infof("Error retrieving cluster version of %s: %v", kube.NamespaceAndName(obj), err)
addToResult(&warnings, namespace, err)
continue
}
// Remove insubstantial metadata
fromCluster, err = resetMetadataAndStatus(fromCluster)
if err != nil {
ctx.log.Infof("Error trying to reset metadata for %s: %v", kube.NamespaceAndName(obj), err)
addToResult(&warnings, namespace, err)
continue
}
// We know the object from the cluster won't have the backup/restore name labels, so
// copy them from the object we attempted to restore.
labels := obj.GetLabels()
addRestoreLabels(fromCluster, labels[api.RestoreNameLabel], labels[api.BackupNameLabel])
if equality.Semantic.DeepEqual(fromCluster, obj) {
ctx.log.Infof("Skipping restore of %s: %v because it already exists in the cluster and is unchanged from the backed up version", obj.GroupVersionKind().Kind, name)
} else {
switch groupResource {
case kuberesource.ServiceAccounts:
desired, err := mergeServiceAccounts(fromCluster, obj)
if err != nil {
ctx.log.Infof("error merging secrets for ServiceAccount %s: %v", kube.NamespaceAndName(obj), err)
addToResult(&warnings, namespace, err)
break
}
patchBytes, err := generatePatch(fromCluster, desired)
if err != nil {
ctx.log.Infof("error generating patch for ServiceAccount %s: %v", kube.NamespaceAndName(obj), err)
addToResult(&warnings, namespace, err)
break
}
if patchBytes == nil {
// In-cluster and desired state are the same, so move on to the next item
break
}
_, err = resourceClient.Patch(name, patchBytes)
if err != nil {
addToResult(&warnings, namespace, err)
break
}
ctx.log.Infof("ServiceAccount %s successfully updated", kube.NamespaceAndName(obj))
default:
e := errors.Errorf("not restored: %s and is different from backed up version.", restoreErr)
addToResult(&warnings, namespace, e)
}
}
continue
}
// Error was something other than an AlreadyExists
if restoreErr != nil {
ctx.log.Infof("error restoring %s: %v", name, err)
addToResult(&errs, namespace, fmt.Errorf("error restoring %s: %v", fullPath, restoreErr))
continue
}
if groupResource == kuberesource.Pods && len(restic.GetPodSnapshotAnnotations(obj)) > 0 {
if ctx.resticRestorer == nil {
ctx.log.Warn("No restic restorer, not restoring pod's volumes")
} else {
ctx.globalWaitGroup.GoErrorSlice(func() []error {
pod := new(v1.Pod)
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(createdObj.UnstructuredContent(), &pod); err != nil {
ctx.log.WithError(err).Error("error converting unstructured pod")
return []error{err}
}
if errs := ctx.resticRestorer.RestorePodVolumes(ctx.restore, pod, originalNamespace, ctx.backup.Spec.StorageLocation, ctx.log); errs != nil {
ctx.log.WithError(kubeerrs.NewAggregate(errs)).Error("unable to successfully complete restic restores of pod's volumes")
return errs
}
return nil
})
}
}
}
return warnings, errs
}
func hasDeleteReclaimPolicy(obj map[string]interface{}) bool {
policy, _, _ := unstructured.NestedString(obj, "spec", "persistentVolumeReclaimPolicy")
return policy == string(v1.PersistentVolumeReclaimDelete)
}
func resetMetadataAndStatus(obj *unstructured.Unstructured) (*unstructured.Unstructured, error) {
res, ok := obj.Object["metadata"]
if !ok {
return nil, errors.New("metadata not found")
}
metadata, ok := res.(map[string]interface{})
if !ok {
return nil, errors.Errorf("metadata was of type %T, expected map[string]interface{}", res)
}
for k := range metadata {
switch k {
case "name", "namespace", "labels", "annotations":
default:
delete(metadata, k)
}
}
// Never restore status
delete(obj.UnstructuredContent(), "status")
return obj, nil
}
// addRestoreLabels labels the provided object with the restore name and
// the restored backup's name.
func addRestoreLabels(obj metav1.Object, restoreName, backupName string) {
labels := obj.GetLabels()
if labels == nil {
labels = make(map[string]string)
}
labels[api.BackupNameLabel] = backupName
labels[api.RestoreNameLabel] = restoreName
// TODO(1.0): remove the below line, and remove the `RestoreLabelKey`
// constant from the API pkg, since it's been replaced with the
// namespaced label above.
labels[api.RestoreLabelKey] = restoreName
obj.SetLabels(labels)
}
// hasControllerOwner returns whether or not an object has a controller
// owner ref. Used to identify whether or not an object should be explicitly
// recreated during a restore.
func hasControllerOwner(refs []metav1.OwnerReference) bool {
for _, ref := range refs {
if ref.Controller != nil && *ref.Controller {
return true
}
}
return false
}
// isCompleted returns whether or not an object is considered completed.
// Used to identify whether or not an object should be restored. Only Jobs or Pods are considered
func isCompleted(obj *unstructured.Unstructured, groupResource schema.GroupResource) (bool, error) {
switch groupResource {
case kuberesource.Pods:
phase, _, err := unstructured.NestedString(obj.UnstructuredContent(), "status", "phase")
if err != nil {
return false, errors.WithStack(err)
}
if phase == string(v1.PodFailed) || phase == string(v1.PodSucceeded) {
return true, nil
}
case kuberesource.Jobs:
ct, found, err := unstructured.NestedString(obj.UnstructuredContent(), "status", "completionTime")
if err != nil {
return false, errors.WithStack(err)
}
if found && ct != "" {
return true, nil
}
}
// Assume any other resource isn't complete and can be restored
return false, nil
}
// unmarshal reads the specified file, unmarshals the JSON contained within it
// and returns an Unstructured object.
func (ctx *context) unmarshal(filePath string) (*unstructured.Unstructured, error) {
var obj unstructured.Unstructured
bytes, err := ctx.fileSystem.ReadFile(filePath)
if err != nil {
return nil, err
}
err = json.Unmarshal(bytes, &obj)
if err != nil {
return nil, err
}
return &obj, nil
}