2017-08-02 17:27:17 +00:00
/ *
2019-03-28 19:21:56 +00:00
Copyright 2017 , 2019 the Velero contributors .
2017-08-02 17:27:17 +00:00
Licensed under the Apache License , Version 2.0 ( the "License" ) ;
you may not use this file except in compliance with the License .
You may obtain a copy of the License at
http : //www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing , software
distributed under the License is distributed on an "AS IS" BASIS ,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied .
See the License for the specific language governing permissions and
limitations under the License .
* /
package restore
import (
2018-02-28 01:35:35 +00:00
go_context "context"
2017-08-02 17:27:17 +00:00
"encoding/json"
"fmt"
"io"
2017-11-21 17:24:43 +00:00
"io/ioutil"
2017-08-02 17:27:17 +00:00
"path/filepath"
"sort"
2019-06-27 18:57:47 +00:00
"strings"
2018-02-28 01:35:35 +00:00
"time"
2017-08-02 17:27:17 +00:00
2017-11-21 17:24:43 +00:00
"github.com/pkg/errors"
2019-08-27 23:42:38 +00:00
uuid "github.com/satori/go.uuid"
2017-09-14 21:27:31 +00:00
"github.com/sirupsen/logrus"
2019-01-25 03:33:07 +00:00
v1 "k8s.io/api/core/v1"
2018-03-29 18:50:30 +00:00
"k8s.io/apimachinery/pkg/api/equality"
2017-08-02 17:27:17 +00:00
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/labels"
2017-11-21 17:24:43 +00:00
"k8s.io/apimachinery/pkg/runtime"
2017-08-02 17:27:17 +00:00
"k8s.io/apimachinery/pkg/runtime/schema"
2018-02-28 01:35:35 +00:00
kubeerrs "k8s.io/apimachinery/pkg/util/errors"
2017-08-02 17:27:17 +00:00
"k8s.io/apimachinery/pkg/util/sets"
2018-09-07 14:42:57 +00:00
"k8s.io/apimachinery/pkg/util/wait"
2017-08-02 17:27:17 +00:00
corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
2019-09-30 21:26:56 +00:00
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
"github.com/vmware-tanzu/velero/pkg/archive"
"github.com/vmware-tanzu/velero/pkg/client"
"github.com/vmware-tanzu/velero/pkg/discovery"
listers "github.com/vmware-tanzu/velero/pkg/generated/listers/velero/v1"
"github.com/vmware-tanzu/velero/pkg/kuberesource"
"github.com/vmware-tanzu/velero/pkg/label"
"github.com/vmware-tanzu/velero/pkg/plugin/velero"
"github.com/vmware-tanzu/velero/pkg/restic"
"github.com/vmware-tanzu/velero/pkg/util/boolptr"
"github.com/vmware-tanzu/velero/pkg/util/collections"
"github.com/vmware-tanzu/velero/pkg/util/filesystem"
"github.com/vmware-tanzu/velero/pkg/util/kube"
velerosync "github.com/vmware-tanzu/velero/pkg/util/sync"
"github.com/vmware-tanzu/velero/pkg/volume"
2017-08-02 17:27:17 +00:00
)
2019-03-27 18:22:04 +00:00
type VolumeSnapshotterGetter interface {
GetVolumeSnapshotter ( name string ) ( velero . VolumeSnapshotter , error )
2018-10-16 14:28:05 +00:00
}
2019-08-06 20:17:36 +00:00
type Request struct {
* velerov1api . Restore
Log logrus . FieldLogger
Backup * velerov1api . Backup
PodVolumeBackups [ ] * velerov1api . PodVolumeBackup
VolumeSnapshots [ ] * volume . Snapshot
BackupReader io . Reader
}
2017-08-02 17:27:17 +00:00
// Restorer knows how to restore a backup.
type Restorer interface {
// Restore restores the backup data from backupReader, returning warnings and errors.
2019-08-06 20:17:36 +00:00
Restore ( req Request ,
2019-03-14 20:35:06 +00:00
actions [ ] velero . RestoreItemAction ,
2018-10-16 14:28:05 +00:00
snapshotLocationLister listers . VolumeSnapshotLocationLister ,
2019-03-27 18:22:04 +00:00
volumeSnapshotterGetter VolumeSnapshotterGetter ,
2019-04-16 18:57:02 +00:00
) ( Result , Result )
2017-08-02 17:27:17 +00:00
}
// kubernetesRestorer implements Restorer for restoring into a Kubernetes cluster.
type kubernetesRestorer struct {
2018-09-07 14:42:57 +00:00
discoveryHelper discovery . Helper
dynamicFactory client . DynamicFactory
namespaceClient corev1 . NamespaceInterface
resticRestorerFactory restic . RestorerFactory
resticTimeout time . Duration
resourceTerminatingTimeout time . Duration
resourcePriorities [ ] string
fileSystem filesystem . Interface
2019-08-27 23:42:38 +00:00
pvRenamer func ( string ) string
2018-09-07 14:42:57 +00:00
logger logrus . FieldLogger
2017-08-02 17:27:17 +00:00
}
2017-08-24 23:44:01 +00:00
// prioritizeResources returns an ordered, fully-resolved list of resources to restore based on
// the provided discovery helper, resource priorities, and included/excluded resources.
2017-11-21 17:24:43 +00:00
func prioritizeResources ( helper discovery . Helper , priorities [ ] string , includedResources * collections . IncludesExcludes , logger logrus . FieldLogger ) ( [ ] schema . GroupResource , error ) {
2017-08-02 17:27:17 +00:00
var ret [ ] schema . GroupResource
// set keeps track of resolved GroupResource names
set := sets . NewString ( )
// start by resolving priorities into GroupResources and adding them to ret
for _ , r := range priorities {
2017-08-24 23:44:01 +00:00
gvr , _ , err := helper . ResourceFor ( schema . ParseGroupResource ( r ) . WithVersion ( "" ) )
2017-08-02 17:27:17 +00:00
if err != nil {
return nil , err
}
2017-08-24 23:44:01 +00:00
gr := gvr . GroupResource ( )
2017-09-01 21:39:30 +00:00
if ! includedResources . ShouldInclude ( gr . String ( ) ) {
2017-09-14 21:27:31 +00:00
logger . WithField ( "groupResource" , gr ) . Info ( "Not including resource" )
2017-09-01 21:39:30 +00:00
continue
}
2017-08-02 17:27:17 +00:00
ret = append ( ret , gr )
set . Insert ( gr . String ( ) )
}
// go through everything we got from discovery and add anything not in "set" to byName
var byName [ ] schema . GroupResource
2017-09-01 21:39:30 +00:00
for _ , resourceGroup := range helper . Resources ( ) {
2017-08-02 17:27:17 +00:00
// will be something like storage.k8s.io/v1
groupVersion , err := schema . ParseGroupVersion ( resourceGroup . GroupVersion )
if err != nil {
return nil , err
}
for _ , resource := range resourceGroup . APIResources {
gr := groupVersion . WithResource ( resource . Name ) . GroupResource ( )
2017-09-01 21:39:30 +00:00
if ! includedResources . ShouldInclude ( gr . String ( ) ) {
2017-11-21 17:24:43 +00:00
logger . WithField ( "groupResource" , gr . String ( ) ) . Info ( "Not including resource" )
2017-09-01 21:39:30 +00:00
continue
}
2017-08-02 17:27:17 +00:00
if ! set . Has ( gr . String ( ) ) {
byName = append ( byName , gr )
}
}
}
// sort byName by name
sort . Slice ( byName , func ( i , j int ) bool {
return byName [ i ] . String ( ) < byName [ j ] . String ( )
} )
// combine prioritized with by-name
ret = append ( ret , byName ... )
return ret , nil
}
// NewKubernetesRestorer creates a new kubernetesRestorer.
func NewKubernetesRestorer (
discoveryHelper discovery . Helper ,
dynamicFactory client . DynamicFactory ,
resourcePriorities [ ] string ,
namespaceClient corev1 . NamespaceInterface ,
2018-02-28 01:35:35 +00:00
resticRestorerFactory restic . RestorerFactory ,
resticTimeout time . Duration ,
2018-09-07 14:42:57 +00:00
resourceTerminatingTimeout time . Duration ,
2017-12-12 23:22:46 +00:00
logger logrus . FieldLogger ,
2017-08-02 17:27:17 +00:00
) ( Restorer , error ) {
return & kubernetesRestorer {
2018-09-07 14:42:57 +00:00
discoveryHelper : discoveryHelper ,
dynamicFactory : dynamicFactory ,
namespaceClient : namespaceClient ,
resticRestorerFactory : resticRestorerFactory ,
resticTimeout : resticTimeout ,
resourceTerminatingTimeout : resourceTerminatingTimeout ,
resourcePriorities : resourcePriorities ,
logger : logger ,
2019-08-27 23:42:38 +00:00
pvRenamer : func ( string ) string { return "velero-clone-" + uuid . NewV4 ( ) . String ( ) } ,
2018-09-07 14:42:57 +00:00
fileSystem : filesystem . NewFileSystem ( ) ,
2017-08-02 17:27:17 +00:00
} , nil
}
// Restore executes a restore into the target Kubernetes cluster according to the restore spec
// and using data from the provided backup/backup reader. Returns a warnings and errors RestoreResult,
// respectively, summarizing info about the restore.
2018-10-16 14:28:05 +00:00
func ( kr * kubernetesRestorer ) Restore (
2019-08-06 20:17:36 +00:00
req Request ,
2019-03-14 20:35:06 +00:00
actions [ ] velero . RestoreItemAction ,
2018-10-16 14:28:05 +00:00
snapshotLocationLister listers . VolumeSnapshotLocationLister ,
2019-03-27 18:22:04 +00:00
volumeSnapshotterGetter VolumeSnapshotterGetter ,
2019-04-16 18:57:02 +00:00
) ( Result , Result ) {
2017-08-02 17:27:17 +00:00
// metav1.LabelSelectorAsSelector converts a nil LabelSelector to a
// Nothing Selector, i.e. a selector that matches nothing. We want
// a selector that matches everything. This can be accomplished by
// passing a non-nil empty LabelSelector.
2019-08-06 20:17:36 +00:00
ls := req . Restore . Spec . LabelSelector
2017-08-02 17:27:17 +00:00
if ls == nil {
ls = & metav1 . LabelSelector { }
}
selector , err := metav1 . LabelSelectorAsSelector ( ls )
if err != nil {
2019-04-16 18:57:02 +00:00
return Result { } , Result { Velero : [ ] string { err . Error ( ) } }
2017-08-02 17:27:17 +00:00
}
2017-11-21 17:24:43 +00:00
// get resource includes-excludes
2019-08-06 20:17:36 +00:00
resourceIncludesExcludes := getResourceIncludesExcludes ( kr . discoveryHelper , req . Restore . Spec . IncludedResources , req . Restore . Spec . ExcludedResources )
prioritizedResources , err := prioritizeResources ( kr . discoveryHelper , kr . resourcePriorities , resourceIncludesExcludes , req . Log )
2017-11-21 17:24:43 +00:00
if err != nil {
2019-04-16 18:57:02 +00:00
return Result { } , Result { Velero : [ ] string { err . Error ( ) } }
2017-11-21 17:24:43 +00:00
}
2019-06-27 20:48:50 +00:00
// get namespace includes-excludes
namespaceIncludesExcludes := collections . NewIncludesExcludes ( ) .
2019-08-06 20:17:36 +00:00
Includes ( req . Restore . Spec . IncludedNamespaces ... ) .
Excludes ( req . Restore . Spec . ExcludedNamespaces ... )
2019-06-27 20:48:50 +00:00
2017-11-21 17:24:43 +00:00
resolvedActions , err := resolveActions ( actions , kr . discoveryHelper )
if err != nil {
2019-04-16 18:57:02 +00:00
return Result { } , Result { Velero : [ ] string { err . Error ( ) } }
2017-11-21 17:24:43 +00:00
}
2018-02-28 01:35:35 +00:00
podVolumeTimeout := kr . resticTimeout
2019-08-06 20:17:36 +00:00
if val := req . Restore . Annotations [ velerov1api . PodVolumeOperationTimeoutAnnotation ] ; val != "" {
2018-02-28 01:35:35 +00:00
parsed , err := time . ParseDuration ( val )
if err != nil {
2019-08-06 20:17:36 +00:00
req . Log . WithError ( errors . WithStack ( err ) ) . Errorf ( "Unable to parse pod volume timeout annotation %s, using server value." , val )
2018-02-28 01:35:35 +00:00
} else {
podVolumeTimeout = parsed
}
}
ctx , cancelFunc := go_context . WithTimeout ( go_context . Background ( ) , podVolumeTimeout )
defer cancelFunc ( )
var resticRestorer restic . Restorer
if kr . resticRestorerFactory != nil {
2019-08-06 20:17:36 +00:00
resticRestorer , err = kr . resticRestorerFactory . NewRestorer ( ctx , req . Restore )
2018-02-28 01:35:35 +00:00
if err != nil {
2019-04-16 18:57:02 +00:00
return Result { } , Result { Velero : [ ] string { err . Error ( ) } }
2018-02-28 01:35:35 +00:00
}
}
2018-06-22 19:32:03 +00:00
pvRestorer := & pvRestorer {
2019-08-06 20:17:36 +00:00
logger : req . Log ,
backup : req . Backup ,
snapshotVolumes : req . Backup . Spec . SnapshotVolumes ,
restorePVs : req . Restore . Spec . RestorePVs ,
volumeSnapshots : req . VolumeSnapshots ,
2019-03-27 18:22:04 +00:00
volumeSnapshotterGetter : volumeSnapshotterGetter ,
snapshotLocationLister : snapshotLocationLister ,
2018-06-22 19:32:03 +00:00
}
2018-02-28 01:35:35 +00:00
restoreCtx := & context {
2019-08-06 20:17:36 +00:00
backup : req . Backup ,
backupReader : req . BackupReader ,
restore : req . Restore ,
2019-06-27 20:48:50 +00:00
resourceIncludesExcludes : resourceIncludesExcludes ,
namespaceIncludesExcludes : namespaceIncludesExcludes ,
2018-09-07 14:42:57 +00:00
prioritizedResources : prioritizedResources ,
selector : selector ,
2019-08-06 20:17:36 +00:00
log : req . Log ,
2018-09-07 14:42:57 +00:00
dynamicFactory : kr . dynamicFactory ,
fileSystem : kr . fileSystem ,
namespaceClient : kr . namespaceClient ,
actions : resolvedActions ,
2019-03-27 18:22:04 +00:00
volumeSnapshotterGetter : volumeSnapshotterGetter ,
2018-09-07 14:42:57 +00:00
resticRestorer : resticRestorer ,
pvsToProvision : sets . NewString ( ) ,
pvRestorer : pvRestorer ,
2019-08-06 20:17:36 +00:00
volumeSnapshots : req . VolumeSnapshots ,
podVolumeBackups : req . PodVolumeBackups ,
2018-09-07 14:42:57 +00:00
resourceTerminatingTimeout : kr . resourceTerminatingTimeout ,
2019-08-29 01:03:01 +00:00
resourceClients : make ( map [ resourceClientKey ] client . Dynamic ) ,
restoredItems : make ( map [ velero . ResourceIdentifier ] struct { } ) ,
renamedPVs : make ( map [ string ] string ) ,
pvRenamer : kr . pvRenamer ,
2017-09-12 19:54:08 +00:00
}
2018-02-28 01:35:35 +00:00
return restoreCtx . execute ( )
2017-09-12 19:54:08 +00:00
}
2017-11-21 17:24:43 +00:00
// getResourceIncludesExcludes takes the lists of resources to include and exclude, uses the
// discovery helper to resolve them to fully-qualified group-resource names, and returns an
// IncludesExcludes list.
func getResourceIncludesExcludes ( helper discovery . Helper , includes , excludes [ ] string ) * collections . IncludesExcludes {
resources := collections . GenerateIncludesExcludes (
includes ,
excludes ,
func ( item string ) string {
gvr , _ , err := helper . ResourceFor ( schema . ParseGroupResource ( item ) . WithVersion ( "" ) )
if err != nil {
return ""
}
gr := gvr . GroupResource ( )
return gr . String ( )
} ,
)
return resources
}
type resolvedAction struct {
2019-03-14 20:35:06 +00:00
velero . RestoreItemAction
2017-11-21 17:24:43 +00:00
resourceIncludesExcludes * collections . IncludesExcludes
namespaceIncludesExcludes * collections . IncludesExcludes
selector labels . Selector
}
2019-03-14 20:35:06 +00:00
func resolveActions ( actions [ ] velero . RestoreItemAction , helper discovery . Helper ) ( [ ] resolvedAction , error ) {
2017-11-21 17:24:43 +00:00
var resolved [ ] resolvedAction
for _ , action := range actions {
resourceSelector , err := action . AppliesTo ( )
if err != nil {
return nil , err
}
resources := getResourceIncludesExcludes ( helper , resourceSelector . IncludedResources , resourceSelector . ExcludedResources )
namespaces := collections . NewIncludesExcludes ( ) . Includes ( resourceSelector . IncludedNamespaces ... ) . Excludes ( resourceSelector . ExcludedNamespaces ... )
selector := labels . Everything ( )
if resourceSelector . LabelSelector != "" {
if selector , err = labels . Parse ( resourceSelector . LabelSelector ) ; err != nil {
return nil , err
}
}
res := resolvedAction {
2019-03-14 20:35:06 +00:00
RestoreItemAction : action ,
2017-11-21 17:24:43 +00:00
resourceIncludesExcludes : resources ,
namespaceIncludesExcludes : namespaces ,
selector : selector ,
}
resolved = append ( resolved , res )
}
return resolved , nil
}
2017-09-12 19:54:08 +00:00
type context struct {
2019-08-06 20:17:36 +00:00
backup * velerov1api . Backup
2018-09-07 14:42:57 +00:00
backupReader io . Reader
2019-08-06 20:17:36 +00:00
restore * velerov1api . Restore
2019-03-28 19:21:56 +00:00
restoreDir string
2019-06-27 20:48:50 +00:00
resourceIncludesExcludes * collections . IncludesExcludes
namespaceIncludesExcludes * collections . IncludesExcludes
2018-09-07 14:42:57 +00:00
prioritizedResources [ ] schema . GroupResource
selector labels . Selector
log logrus . FieldLogger
dynamicFactory client . DynamicFactory
fileSystem filesystem . Interface
namespaceClient corev1 . NamespaceInterface
actions [ ] resolvedAction
2019-03-27 18:22:04 +00:00
volumeSnapshotterGetter VolumeSnapshotterGetter
2018-09-07 14:42:57 +00:00
resticRestorer restic . Restorer
globalWaitGroup velerosync . ErrorGroup
pvsToProvision sets . String
pvRestorer PVRestorer
volumeSnapshots [ ] * volume . Snapshot
2019-08-06 20:17:36 +00:00
podVolumeBackups [ ] * velerov1api . PodVolumeBackup
2018-09-07 14:42:57 +00:00
resourceTerminatingTimeout time . Duration
2019-03-28 19:21:56 +00:00
resourceClients map [ resourceClientKey ] client . Dynamic
restoredItems map [ velero . ResourceIdentifier ] struct { }
2019-08-27 23:42:38 +00:00
renamedPVs map [ string ] string
pvRenamer func ( string ) string
2019-03-28 19:21:56 +00:00
}
type resourceClientKey struct {
resource schema . GroupResource
namespace string
2017-09-12 19:54:08 +00:00
}
2019-04-16 18:57:02 +00:00
func ( ctx * context ) execute ( ) ( Result , Result ) {
2019-08-29 01:03:01 +00:00
warnings , errs := Result { } , Result { }
2018-09-30 20:45:32 +00:00
ctx . log . Infof ( "Starting restore of backup %s" , kube . NamespaceAndName ( ctx . backup ) )
2017-09-12 19:54:08 +00:00
2019-08-29 01:03:01 +00:00
dir , err := archive . NewExtractor ( ctx . log , ctx . fileSystem ) . UnzipAndExtractBackup ( ctx . backupReader )
2017-08-02 17:27:17 +00:00
if err != nil {
2018-09-30 20:45:32 +00:00
ctx . log . Infof ( "error unzipping and extracting: %v" , err )
2019-08-29 01:03:01 +00:00
addVeleroError ( & errs , err )
return warnings , errs
2017-08-02 17:27:17 +00:00
}
2017-09-12 19:54:08 +00:00
defer ctx . fileSystem . RemoveAll ( dir )
2017-08-02 17:27:17 +00:00
2019-03-28 19:21:56 +00:00
// need to set this for additionalItems to be restored
ctx . restoreDir = dir
2019-08-29 01:03:01 +00:00
backupResources , err := archive . NewParser ( ctx . log , ctx . fileSystem ) . Parse ( ctx . restoreDir )
2017-08-11 21:05:06 +00:00
if err != nil {
2019-08-29 01:03:01 +00:00
addVeleroError ( & errs , errors . Wrap ( err , "error parsing backup contents" ) )
2017-10-10 18:43:53 +00:00
return warnings , errs
2017-08-11 21:05:06 +00:00
}
2017-11-21 17:24:43 +00:00
existingNamespaces := sets . NewString ( )
2017-10-10 18:43:53 +00:00
for _ , resource := range ctx . prioritizedResources {
2017-11-30 00:53:07 +00:00
// we don't want to explicitly restore namespace API objs because we'll handle
// them as a special case prior to restoring anything into them
2018-05-11 19:40:19 +00:00
if resource == kuberesource . Namespaces {
2017-11-30 00:53:07 +00:00
continue
}
2019-08-29 01:03:01 +00:00
resourceList := backupResources [ resource . String ( ) ]
if resourceList == nil {
2017-10-10 18:43:53 +00:00
continue
}
2019-08-29 01:03:01 +00:00
for namespace , items := range resourceList . ItemsByNamespace {
if namespace != "" && ! ctx . namespaceIncludesExcludes . ShouldInclude ( namespace ) {
ctx . log . Infof ( "Skipping namespace %s" , namespace )
2017-10-10 18:43:53 +00:00
continue
}
2019-08-29 01:03:01 +00:00
// get target namespace to restore into, if different
// from source namespace
targetNamespace := namespace
if target , ok := ctx . restore . Spec . NamespaceMapping [ namespace ] ; ok {
targetNamespace = target
2017-10-10 18:43:53 +00:00
}
2017-11-21 17:24:43 +00:00
// if we don't know whether this namespace exists yet, attempt to create
// it in order to ensure it exists. Try to get it from the backup tarball
// (in order to get any backed-up metadata), but if we don't find it there,
// create a blank one.
2019-08-29 01:03:01 +00:00
if namespace != "" && ! existingNamespaces . Has ( targetNamespace ) {
logger := ctx . log . WithField ( "namespace" , namespace )
ns := getNamespace ( logger , getItemFilePath ( ctx . restoreDir , "namespaces" , "" , namespace ) , targetNamespace )
2018-09-07 14:42:57 +00:00
if _ , err := kube . EnsureNamespaceExistsAndIsReady ( ns , ctx . namespaceClient , ctx . resourceTerminatingTimeout ) ; err != nil {
2019-01-25 03:33:07 +00:00
addVeleroError ( & errs , err )
2017-11-21 17:24:43 +00:00
continue
}
// keep track of namespaces that we know exist so we don't
// have to try to create them multiple times
2019-08-29 01:03:01 +00:00
existingNamespaces . Insert ( targetNamespace )
2017-10-10 18:43:53 +00:00
}
2019-08-29 01:03:01 +00:00
w , e := ctx . restoreResource ( resource . String ( ) , targetNamespace , namespace , items )
2017-10-10 18:43:53 +00:00
merge ( & warnings , & w )
merge ( & errs , & e )
}
2018-02-28 01:35:35 +00:00
}
// TODO timeout?
2018-09-30 20:45:32 +00:00
ctx . log . Debug ( "Waiting on global wait group" )
2018-02-28 01:35:35 +00:00
waitErrs := ctx . globalWaitGroup . Wait ( )
2018-09-30 20:45:32 +00:00
ctx . log . Debug ( "Done waiting on global wait group" )
2018-02-28 01:35:35 +00:00
for _ , err := range waitErrs {
2019-01-25 03:33:07 +00:00
// TODO not ideal to be adding these to Velero-level errors
2018-02-28 01:35:35 +00:00
// rather than a specific namespace, but don't have a way
// to track the namespace right now.
2019-01-25 03:33:07 +00:00
errs . Velero = append ( errs . Velero , err . Error ( ) )
2017-08-02 17:27:17 +00:00
}
2017-10-10 18:43:53 +00:00
return warnings , errs
2017-08-02 17:27:17 +00:00
}
2019-03-28 19:21:56 +00:00
func getItemFilePath ( rootDir , groupResource , namespace , name string ) string {
switch namespace {
case "" :
2019-08-06 20:17:36 +00:00
return filepath . Join ( rootDir , velerov1api . ResourcesDir , groupResource , velerov1api . ClusterScopedDir , name + ".json" )
2019-03-28 19:21:56 +00:00
default :
2019-08-06 20:17:36 +00:00
return filepath . Join ( rootDir , velerov1api . ResourcesDir , groupResource , velerov1api . NamespaceScopedDir , namespace , name + ".json" )
2019-03-28 19:21:56 +00:00
}
}
2017-11-21 17:24:43 +00:00
// getNamespace returns a namespace API object that we should attempt to
// create before restoring anything into it. It will come from the backup
// tarball if it exists, else will be a new one. If from the tarball, it
// will retain its labels, annotations, and spec.
func getNamespace ( logger logrus . FieldLogger , path , remappedName string ) * v1 . Namespace {
var nsBytes [ ] byte
var err error
if nsBytes , err = ioutil . ReadFile ( path ) ; err != nil {
return & v1 . Namespace {
ObjectMeta : metav1 . ObjectMeta {
Name : remappedName ,
} ,
}
}
var backupNS v1 . Namespace
if err := json . Unmarshal ( nsBytes , & backupNS ) ; err != nil {
logger . Warnf ( "Error unmarshalling namespace from backup, creating new one." )
return & v1 . Namespace {
ObjectMeta : metav1 . ObjectMeta {
Name : remappedName ,
} ,
}
}
return & v1 . Namespace {
ObjectMeta : metav1 . ObjectMeta {
Name : remappedName ,
Labels : backupNS . Labels ,
Annotations : backupNS . Annotations ,
} ,
Spec : backupNS . Spec ,
}
}
2017-08-02 17:27:17 +00:00
// merge combines two RestoreResult objects into one
// by appending the corresponding lists to one another.
2019-04-16 18:57:02 +00:00
func merge ( a , b * Result ) {
2017-08-02 17:27:17 +00:00
a . Cluster = append ( a . Cluster , b . Cluster ... )
2019-01-25 03:33:07 +00:00
a . Velero = append ( a . Velero , b . Velero ... )
2017-08-02 17:27:17 +00:00
for k , v := range b . Namespaces {
if a . Namespaces == nil {
a . Namespaces = make ( map [ string ] [ ] string )
}
a . Namespaces [ k ] = append ( a . Namespaces [ k ] , v ... )
}
}
2019-01-25 03:33:07 +00:00
// addVeleroError appends an error to the provided RestoreResult's Velero list.
2019-04-16 18:57:02 +00:00
func addVeleroError ( r * Result , err error ) {
2019-01-25 03:33:07 +00:00
r . Velero = append ( r . Velero , err . Error ( ) )
2017-08-02 17:27:17 +00:00
}
// addToResult appends an error to the provided RestoreResult, either within
// the cluster-scoped list (if ns == "") or within the provided namespace's
// entry.
2019-04-16 18:57:02 +00:00
func addToResult ( r * Result , ns string , e error ) {
2017-08-02 17:27:17 +00:00
if ns == "" {
r . Cluster = append ( r . Cluster , e . Error ( ) )
} else {
if r . Namespaces == nil {
r . Namespaces = make ( map [ string ] [ ] string )
}
r . Namespaces [ ns ] = append ( r . Namespaces [ ns ] , e . Error ( ) )
}
}
2019-03-28 19:21:56 +00:00
func ( ctx * context ) getApplicableActions ( groupResource schema . GroupResource , namespace string ) [ ] resolvedAction {
var actions [ ] resolvedAction
for _ , action := range ctx . actions {
if ! action . resourceIncludesExcludes . ShouldInclude ( groupResource . String ( ) ) {
continue
}
if namespace != "" && ! action . namespaceIncludesExcludes . ShouldInclude ( namespace ) {
continue
}
2019-06-26 17:25:47 +00:00
if namespace == "" && ! action . namespaceIncludesExcludes . IncludeEverything ( ) {
continue
}
2019-03-28 19:21:56 +00:00
actions = append ( actions , action )
}
return actions
}
2018-09-07 14:42:57 +00:00
func ( ctx * context ) shouldRestore ( name string , pvClient client . Dynamic ) ( bool , error ) {
pvLogger := ctx . log . WithField ( "pvName" , name )
var shouldRestore bool
err := wait . PollImmediate ( time . Second , ctx . resourceTerminatingTimeout , func ( ) ( bool , error ) {
2019-02-11 23:45:04 +00:00
unstructuredPV , err := pvClient . Get ( name , metav1 . GetOptions { } )
2018-09-07 14:42:57 +00:00
if apierrors . IsNotFound ( err ) {
pvLogger . Debug ( "PV not found, safe to restore" )
// PV not found, can safely exit loop and proceed with restore.
shouldRestore = true
return true , nil
}
if err != nil {
return false , errors . Wrapf ( err , "could not retrieve in-cluster copy of PV %s" , name )
}
2019-02-11 23:45:04 +00:00
clusterPV := new ( v1 . PersistentVolume )
if err := runtime . DefaultUnstructuredConverter . FromUnstructured ( unstructuredPV . Object , clusterPV ) ; err != nil {
return false , errors . Wrap ( err , "error converting PV from unstructured" )
2018-09-07 14:42:57 +00:00
}
2019-02-11 23:45:04 +00:00
if clusterPV . Status . Phase == v1 . VolumeReleased || clusterPV . DeletionTimestamp != nil {
2018-09-07 14:42:57 +00:00
// PV was found and marked for deletion, or it was released; wait for it to go away.
pvLogger . Debugf ( "PV found, but marked for deletion, waiting" )
return false , nil
}
// Check for the namespace and PVC to see if anything that's referencing the PV is deleting.
// If either the namespace or PVC is in a deleting/terminating state, wait for them to finish before
// trying to restore the PV
// Not doing so may result in the underlying PV disappearing but not restoring due to timing issues,
// then the PVC getting restored and showing as lost.
2019-02-11 23:45:04 +00:00
if clusterPV . Spec . ClaimRef == nil {
pvLogger . Debugf ( "PV is not marked for deletion and is not claimed by a PVC" )
return true , nil
2018-09-07 14:42:57 +00:00
}
2019-02-11 23:45:04 +00:00
namespace := clusterPV . Spec . ClaimRef . Namespace
pvcName := clusterPV . Spec . ClaimRef . Name
2018-09-07 14:42:57 +00:00
// Have to create the PVC client here because we don't know what namespace we're using til we get to this point.
// Using a dynamic client since it's easier to mock for testing
pvcResource := metav1 . APIResource { Name : "persistentvolumeclaims" , Namespaced : true }
pvcClient , err := ctx . dynamicFactory . ClientForGroupVersionResource ( schema . GroupVersion { Group : "" , Version : "v1" } , pvcResource , namespace )
if err != nil {
return false , errors . Wrapf ( err , "error getting pvc client" )
}
pvc , err := pvcClient . Get ( pvcName , metav1 . GetOptions { } )
if apierrors . IsNotFound ( err ) {
pvLogger . Debugf ( "PVC %s for PV not found, waiting" , pvcName )
// PVC wasn't found, but the PV still exists, so continue to wait.
return false , nil
}
if err != nil {
return false , errors . Wrapf ( err , "error getting claim %s for persistent volume" , pvcName )
}
if pvc != nil && pvc . GetDeletionTimestamp ( ) != nil {
pvLogger . Debugf ( "PVC for PV marked for deletion, waiting" )
// PVC is still deleting, continue to wait.
return false , nil
}
// Check the namespace associated with the claimRef to see if it's deleting/terminating before proceeding
ns , err := ctx . namespaceClient . Get ( namespace , metav1 . GetOptions { } )
if apierrors . IsNotFound ( err ) {
pvLogger . Debugf ( "namespace %s for PV not found, waiting" , namespace )
// namespace not found but the PV still exists, so continue to wait
return false , nil
}
if err != nil {
return false , errors . Wrapf ( err , "error getting namespace %s associated with PV %s" , namespace , name )
}
if ns != nil && ( ns . GetDeletionTimestamp ( ) != nil || ns . Status . Phase == v1 . NamespaceTerminating ) {
pvLogger . Debugf ( "namespace %s associated with PV is deleting, waiting" , namespace )
// namespace is in the process of deleting, keep looping
return false , nil
}
// None of the PV, PVC, or NS are marked for deletion, break the loop.
pvLogger . Debug ( "PV, associated PVC and namespace are not marked for deletion" )
return true , nil
} )
if err == wait . ErrWaitTimeout {
pvLogger . Debug ( "timeout reached waiting for persistent volume to delete" )
}
return shouldRestore , err
}
2017-10-10 18:43:53 +00:00
// restoreResource restores the specified cluster or namespace scoped resource. If namespace is
// empty we are restoring a cluster level resource, otherwise into the specified namespace.
2019-08-29 01:03:01 +00:00
func ( ctx * context ) restoreResource ( resource , targetNamespace , originalNamespace string , items [ ] string ) ( Result , Result ) {
2019-04-16 18:57:02 +00:00
warnings , errs := Result { } , Result { }
2017-08-02 17:27:17 +00:00
2019-08-29 01:03:01 +00:00
if targetNamespace == "" && boolptr . IsSetToFalse ( ctx . restore . Spec . IncludeClusterResources ) {
2018-09-30 20:45:32 +00:00
ctx . log . Infof ( "Skipping resource %s because it's cluster-scoped" , resource )
2017-10-20 19:51:54 +00:00
return warnings , errs
}
2019-08-29 01:03:01 +00:00
if targetNamespace != "" {
ctx . log . Infof ( "Restoring resource '%s' into namespace '%s'" , resource , targetNamespace )
2017-08-02 17:27:17 +00:00
} else {
2019-08-29 01:03:01 +00:00
ctx . log . Infof ( "Restoring cluster level resource '%s'" , resource )
2017-08-02 17:27:17 +00:00
}
2019-08-29 01:03:01 +00:00
if len ( items ) == 0 {
2017-10-10 18:43:53 +00:00
return warnings , errs
2017-08-02 17:27:17 +00:00
}
2019-03-28 19:21:56 +00:00
groupResource := schema . ParseGroupResource ( resource )
2017-11-21 17:24:43 +00:00
2019-08-29 01:03:01 +00:00
for _ , item := range items {
itemPath := getItemFilePath ( ctx . restoreDir , resource , originalNamespace , item )
obj , err := ctx . unmarshal ( itemPath )
2017-08-02 17:27:17 +00:00
if err != nil {
2019-08-29 01:03:01 +00:00
addToResult ( & errs , targetNamespace , fmt . Errorf ( "error decoding %q: %v" , strings . Replace ( itemPath , ctx . restoreDir + "/" , "" , - 1 ) , err ) )
2017-08-02 17:27:17 +00:00
continue
}
2017-09-12 19:54:08 +00:00
if ! ctx . selector . Matches ( labels . Set ( obj . GetLabels ( ) ) ) {
2017-08-02 17:27:17 +00:00
continue
}
2019-08-29 01:03:01 +00:00
w , e := ctx . restoreItem ( obj , groupResource , targetNamespace )
2019-03-28 19:21:56 +00:00
merge ( & warnings , & w )
merge ( & errs , & e )
}
2018-04-26 20:07:50 +00:00
2019-03-28 19:21:56 +00:00
return warnings , errs
}
2017-08-02 17:27:17 +00:00
2019-03-28 19:21:56 +00:00
func ( ctx * context ) getResourceClient ( groupResource schema . GroupResource , obj * unstructured . Unstructured , namespace string ) ( client . Dynamic , error ) {
key := resourceClientKey {
resource : groupResource ,
namespace : namespace ,
}
2017-08-02 17:27:17 +00:00
2019-03-28 19:21:56 +00:00
if client , ok := ctx . resourceClients [ key ] ; ok {
return client , nil
}
2017-08-02 17:27:17 +00:00
2019-03-28 19:21:56 +00:00
// initialize client for this Resource. we need
// metadata from an object to do this.
ctx . log . Infof ( "Getting client for %v" , obj . GroupVersionKind ( ) )
2018-06-22 19:32:03 +00:00
2019-03-28 19:21:56 +00:00
resource := metav1 . APIResource {
Namespaced : len ( namespace ) > 0 ,
Name : groupResource . Resource ,
}
2018-06-28 14:06:55 +00:00
2019-03-28 19:21:56 +00:00
client , err := ctx . dynamicFactory . ClientForGroupVersionResource ( obj . GroupVersionKind ( ) . GroupVersion ( ) , resource , namespace )
if err != nil {
return nil , err
}
2018-06-22 19:32:03 +00:00
2019-03-28 19:21:56 +00:00
ctx . resourceClients [ key ] = client
return client , nil
}
2018-06-22 19:32:03 +00:00
2019-03-28 19:21:56 +00:00
func getResourceID ( groupResource schema . GroupResource , namespace , name string ) string {
if namespace == "" {
return fmt . Sprintf ( "%s/%s" , groupResource . String ( ) , name )
}
2018-06-22 19:32:03 +00:00
2019-03-28 19:21:56 +00:00
return fmt . Sprintf ( "%s/%s/%s" , groupResource . String ( ) , namespace , name )
}
2018-09-07 14:42:57 +00:00
2019-04-16 18:57:02 +00:00
func ( ctx * context ) restoreItem ( obj * unstructured . Unstructured , groupResource schema . GroupResource , namespace string ) ( Result , Result ) {
warnings , errs := Result { } , Result { }
2019-03-28 19:21:56 +00:00
resourceID := getResourceID ( groupResource , namespace , obj . GetName ( ) )
2017-08-02 17:27:17 +00:00
2019-06-27 20:48:50 +00:00
// Check if group/resource should be restored. We need to do this here since
// this method may be getting called for an additional item which is a group/resource
// that's excluded.
if ! ctx . resourceIncludesExcludes . ShouldInclude ( groupResource . String ( ) ) {
ctx . log . WithFields ( logrus . Fields {
"namespace" : obj . GetNamespace ( ) ,
"name" : obj . GetName ( ) ,
"groupResource" : groupResource . String ( ) ,
} ) . Info ( "Not restoring item because resource is excluded" )
return warnings , errs
}
// Check if namespace/cluster-scoped resource should be restored. We need
// to do this here since this method may be getting called for an additional
// item which is in a namespace that's excluded, or which is cluster-scoped
2019-09-26 01:01:16 +00:00
// and should be excluded. Note that we're checking the object's namespace (
// via obj.GetNamespace()) instead of the namespace parameter, because we want
// to check the *original* namespace, not the remapped one if it's been remapped.
2019-06-27 20:48:50 +00:00
if namespace != "" {
2019-09-26 01:01:16 +00:00
if ! ctx . namespaceIncludesExcludes . ShouldInclude ( obj . GetNamespace ( ) ) {
2019-06-27 20:48:50 +00:00
ctx . log . WithFields ( logrus . Fields {
"namespace" : obj . GetNamespace ( ) ,
"name" : obj . GetName ( ) ,
"groupResource" : groupResource . String ( ) ,
} ) . Info ( "Not restoring item because namespace is excluded" )
return warnings , errs
}
} else {
if boolptr . IsSetToFalse ( ctx . restore . Spec . IncludeClusterResources ) {
ctx . log . WithFields ( logrus . Fields {
"namespace" : obj . GetNamespace ( ) ,
"name" : obj . GetName ( ) ,
"groupResource" : groupResource . String ( ) ,
} ) . Info ( "Not restoring item because it's cluster-scoped" )
return warnings , errs
}
}
2019-03-28 19:21:56 +00:00
// make a copy of object retrieved from backup
// to make it available unchanged inside restore actions
itemFromBackup := obj . DeepCopy ( )
2017-08-02 17:27:17 +00:00
2019-03-28 19:21:56 +00:00
complete , err := isCompleted ( obj , groupResource )
if err != nil {
addToResult ( & errs , namespace , fmt . Errorf ( "error checking completion of %q: %v" , resourceID , err ) )
return warnings , errs
}
if complete {
ctx . log . Infof ( "%s is complete - skipping" , kube . NamespaceAndName ( obj ) )
return warnings , errs
}
2018-06-22 19:32:03 +00:00
2019-03-28 19:21:56 +00:00
name := obj . GetName ( )
2018-06-22 19:32:03 +00:00
2019-03-28 19:21:56 +00:00
// Check if we've already restored this
itemKey := velero . ResourceIdentifier {
GroupResource : groupResource ,
Namespace : namespace ,
Name : name ,
}
if _ , exists := ctx . restoredItems [ itemKey ] ; exists {
ctx . log . Infof ( "Skipping %s because it's already been restored." , resourceID )
return warnings , errs
}
ctx . restoredItems [ itemKey ] = struct { } { }
2018-06-22 19:32:03 +00:00
2019-03-28 19:21:56 +00:00
// TODO: move to restore item action if/when we add a ShouldRestore() method to the interface
if groupResource == kuberesource . Pods && obj . GetAnnotations ( ) [ v1 . MirrorPodAnnotationKey ] != "" {
ctx . log . Infof ( "Not restoring pod because it's a mirror pod" )
return warnings , errs
}
resourceClient , err := ctx . getResourceClient ( groupResource , obj , namespace )
if err != nil {
addVeleroError ( & errs , fmt . Errorf ( "error getting resource client for namespace %q, resource %q: %v" , namespace , & groupResource , err ) )
return warnings , errs
}
if groupResource == kuberesource . PersistentVolumes {
2019-08-06 20:40:35 +00:00
switch {
case hasSnapshot ( name , ctx . volumeSnapshots ) :
2019-08-27 23:42:38 +00:00
shouldRenamePV , err := shouldRenamePV ( ctx , obj , resourceClient )
2019-08-06 20:40:35 +00:00
if err != nil {
2019-08-27 23:42:38 +00:00
addToResult ( & errs , namespace , err )
2019-08-06 20:40:35 +00:00
return warnings , errs
2018-06-22 19:32:03 +00:00
}
2019-08-27 23:42:38 +00:00
var shouldRestoreSnapshot bool
if ! shouldRenamePV {
// Check if the PV exists in the cluster before attempting to create
// a volume from the snapshot, in order to avoid orphaned volumes (GH #609)
shouldRestoreSnapshot , err = ctx . shouldRestore ( name , resourceClient )
if err != nil {
addToResult ( & errs , namespace , errors . Wrapf ( err , "error waiting on in-cluster persistentvolume %s" , name ) )
return warnings , errs
}
} else {
// if we're renaming the PV, we're going to give it a new random name,
// so we can assume it doesn't already exist in the cluster and therefore
// we should proceed with restoring from snapshot.
shouldRestoreSnapshot = true
}
2019-08-06 20:40:35 +00:00
if shouldRestoreSnapshot {
2019-08-27 23:42:38 +00:00
// even if we're renaming the PV, obj still has the old name here, because the pvRestorer
// uses the original name to look up metadata about the snapshot.
2019-08-06 20:40:35 +00:00
ctx . log . Infof ( "Restoring persistent volume from snapshot." )
updatedObj , err := ctx . pvRestorer . executePVAction ( obj )
if err != nil {
addToResult ( & errs , namespace , fmt . Errorf ( "error executing PVAction for %s: %v" , resourceID , err ) )
return warnings , errs
}
obj = updatedObj
}
2019-08-27 23:42:38 +00:00
if shouldRenamePV {
// give obj a new name, and record the mapping between the old and new names
oldName := obj . GetName ( )
newName := ctx . pvRenamer ( oldName )
ctx . renamedPVs [ oldName ] = newName
obj . SetName ( newName )
// add the original PV name as an annotation
annotations := obj . GetAnnotations ( )
if annotations == nil {
annotations = map [ string ] string { }
}
annotations [ "velero.io/original-pv-name" ] = oldName
obj . SetAnnotations ( annotations )
}
2019-08-06 20:40:35 +00:00
case hasResticBackup ( obj , ctx ) :
ctx . log . Infof ( "Dynamically re-provisioning persistent volume because it has a restic backup to be restored." )
2019-03-28 19:21:56 +00:00
ctx . pvsToProvision . Insert ( name )
2019-08-06 20:40:35 +00:00
// return early because we don't want to restore the PV itself, we want to dynamically re-provision it.
2019-03-28 19:21:56 +00:00
return warnings , errs
2019-08-27 23:42:38 +00:00
2019-08-06 20:40:35 +00:00
case hasDeleteReclaimPolicy ( obj . Object ) :
ctx . log . Infof ( "Dynamically re-provisioning persistent volume because it doesn't have a snapshot and its reclaim policy is Delete." )
ctx . pvsToProvision . Insert ( name )
2018-11-02 08:42:58 +00:00
2019-08-06 20:40:35 +00:00
// return early because we don't want to restore the PV itself, we want to dynamically re-provision it.
2019-03-28 19:21:56 +00:00
return warnings , errs
2019-08-27 23:42:38 +00:00
2019-08-06 20:40:35 +00:00
default :
ctx . log . Infof ( "Restoring persistent volume as-is because it doesn't have a snapshot and its reclaim policy is not Delete." )
2017-08-02 17:27:17 +00:00
2019-08-06 20:40:35 +00:00
// we call the pvRestorer here to clear out the PV's claimRef, so it can be re-claimed
// when its PVC is restored.
2019-03-28 19:21:56 +00:00
updatedObj , err := ctx . pvRestorer . executePVAction ( obj )
2017-11-21 17:24:43 +00:00
if err != nil {
2019-03-28 19:21:56 +00:00
addToResult ( & errs , namespace , fmt . Errorf ( "error executing PVAction for %s: %v" , resourceID , err ) )
return warnings , errs
2017-11-21 17:24:43 +00:00
}
2019-03-28 19:21:56 +00:00
obj = updatedObj
}
}
2017-11-21 17:24:43 +00:00
2019-03-28 19:21:56 +00:00
// clear out non-core metadata fields & status
if obj , err = resetMetadataAndStatus ( obj ) ; err != nil {
addToResult ( & errs , namespace , err )
return warnings , errs
}
2017-11-21 17:24:43 +00:00
2019-03-28 19:21:56 +00:00
for _ , action := range ctx . getApplicableActions ( groupResource , namespace ) {
if ! action . selector . Matches ( labels . Set ( obj . GetLabels ( ) ) ) {
return warnings , errs
2017-08-02 17:27:17 +00:00
}
2019-03-28 19:21:56 +00:00
ctx . log . Infof ( "Executing item action for %v" , & groupResource )
executeOutput , err := action . Execute ( & velero . RestoreItemActionExecuteInput {
Item : obj ,
ItemFromBackup : itemFromBackup ,
Restore : ctx . restore ,
} )
if err != nil {
addToResult ( & errs , namespace , fmt . Errorf ( "error preparing %s: %v" , resourceID , err ) )
return warnings , errs
2018-03-29 18:50:30 +00:00
}
2017-08-02 17:27:17 +00:00
2019-04-04 19:39:54 +00:00
if executeOutput . SkipRestore {
ctx . log . Infof ( "Skipping restore of %s: %v because a registered plugin discarded it" , obj . GroupVersionKind ( ) . Kind , name )
return warnings , errs
}
2019-03-28 19:21:56 +00:00
unstructuredObj , ok := executeOutput . UpdatedItem . ( * unstructured . Unstructured )
if ! ok {
addToResult ( & errs , namespace , fmt . Errorf ( "%s: unexpected type %T" , resourceID , executeOutput . UpdatedItem ) )
return warnings , errs
}
obj = unstructuredObj
for _ , additionalItem := range executeOutput . AdditionalItems {
itemPath := getItemFilePath ( ctx . restoreDir , additionalItem . GroupResource . String ( ) , additionalItem . Namespace , additionalItem . Name )
if _ , err := ctx . fileSystem . Stat ( itemPath ) ; err != nil {
ctx . log . WithError ( err ) . WithFields ( logrus . Fields {
"additionalResource" : additionalItem . GroupResource . String ( ) ,
"additionalResourceNamespace" : additionalItem . Namespace ,
"additionalResourceName" : additionalItem . Name ,
} ) . Warn ( "unable to restore additional item" )
addToResult ( & warnings , additionalItem . Namespace , err )
2017-08-02 17:27:17 +00:00
2018-04-11 15:07:43 +00:00
continue
2018-03-29 18:50:30 +00:00
}
2019-03-28 19:21:56 +00:00
additionalResourceID := getResourceID ( additionalItem . GroupResource , additionalItem . Namespace , additionalItem . Name )
additionalObj , err := ctx . unmarshal ( itemPath )
2018-04-11 15:07:43 +00:00
if err != nil {
2019-03-28 19:21:56 +00:00
addToResult ( & errs , namespace , errors . Wrapf ( err , "error restoring additional item %s" , additionalResourceID ) )
2018-04-11 15:07:43 +00:00
}
2019-03-28 19:21:56 +00:00
additionalItemNamespace := additionalItem . Namespace
if additionalItemNamespace != "" {
if remapped , ok := ctx . restore . Spec . NamespaceMapping [ additionalItemNamespace ] ; ok {
additionalItemNamespace = remapped
2018-04-11 15:07:43 +00:00
}
2018-03-29 18:50:30 +00:00
}
2019-03-28 19:21:56 +00:00
w , e := ctx . restoreItem ( additionalObj , additionalItem . GroupResource , additionalItemNamespace )
merge ( & warnings , & w )
merge ( & errs , & e )
2017-08-02 17:27:17 +00:00
}
2019-03-28 19:21:56 +00:00
}
// This comes after running item actions because we have built-in actions that restore
// a PVC's associated PV (if applicable). As part of the PV being restored, the 'pvsToProvision'
// set may be inserted into, and this needs to happen *before* running the following block of logic.
//
// The side effect of this is that it's impossible for a user to write a restore item action that
// adjusts this behavior (i.e. of resetting the PVC for dynamic provisioning if it claims a PV with
// a reclaim policy of Delete and no snapshot). If/when that becomes an issue for users, we can
// revisit. This would be easier with a multi-pass restore process.
if groupResource == kuberesource . PersistentVolumeClaims {
pvc := new ( v1 . PersistentVolumeClaim )
if err := runtime . DefaultUnstructuredConverter . FromUnstructured ( obj . UnstructuredContent ( ) , pvc ) ; err != nil {
addToResult ( & errs , namespace , err )
return warnings , errs
}
if pvc . Spec . VolumeName != "" && ctx . pvsToProvision . Has ( pvc . Spec . VolumeName ) {
ctx . log . Infof ( "Resetting PersistentVolumeClaim %s/%s for dynamic provisioning because its PV %v has a reclaim policy of Delete" , namespace , name , pvc . Spec . VolumeName )
// use the unstructured helpers here since we're only deleting and
// the unstructured converter will add back (empty) fields for metadata
// and status that we removed earlier.
unstructured . RemoveNestedField ( obj . Object , "spec" , "volumeName" )
annotations := obj . GetAnnotations ( )
delete ( annotations , "pv.kubernetes.io/bind-completed" )
delete ( annotations , "pv.kubernetes.io/bound-by-controller" )
obj . SetAnnotations ( annotations )
}
2019-08-27 23:42:38 +00:00
if newName , ok := ctx . renamedPVs [ pvc . Spec . VolumeName ] ; ok {
ctx . log . Infof ( "Updating persistent volume claim %s/%s to reference renamed persistent volume (%s -> %s)" , namespace , name , pvc . Spec . VolumeName , newName )
if err := unstructured . SetNestedField ( obj . Object , newName , "spec" , "volumeName" ) ; err != nil {
addToResult ( & errs , namespace , err )
return warnings , errs
}
}
2019-03-28 19:21:56 +00:00
}
// necessary because we may have remapped the namespace
// if the namespace is blank, don't create the key
originalNamespace := obj . GetNamespace ( )
if namespace != "" {
obj . SetNamespace ( namespace )
}
// label the resource with the restore's name and the restored backup's name
// for easy identification of all cluster resources created by this restore
// and which backup they came from
addRestoreLabels ( obj , ctx . restore . Name , ctx . restore . Spec . BackupName )
ctx . log . Infof ( "Attempting to restore %s: %v" , obj . GroupVersionKind ( ) . Kind , name )
createdObj , restoreErr := resourceClient . Create ( obj )
if apierrors . IsAlreadyExists ( restoreErr ) {
fromCluster , err := resourceClient . Get ( name , metav1 . GetOptions { } )
if err != nil {
ctx . log . Infof ( "Error retrieving cluster version of %s: %v" , kube . NamespaceAndName ( obj ) , err )
addToResult ( & warnings , namespace , err )
return warnings , errs
}
// Remove insubstantial metadata
fromCluster , err = resetMetadataAndStatus ( fromCluster )
if err != nil {
ctx . log . Infof ( "Error trying to reset metadata for %s: %v" , kube . NamespaceAndName ( obj ) , err )
addToResult ( & warnings , namespace , err )
return warnings , errs
2017-08-02 17:27:17 +00:00
}
2019-03-28 19:21:56 +00:00
// We know the object from the cluster won't have the backup/restore name labels, so
// copy them from the object we attempted to restore.
labels := obj . GetLabels ( )
2019-08-06 20:17:36 +00:00
addRestoreLabels ( fromCluster , labels [ velerov1api . RestoreNameLabel ] , labels [ velerov1api . BackupNameLabel ] )
2019-03-28 19:21:56 +00:00
if ! equality . Semantic . DeepEqual ( fromCluster , obj ) {
switch groupResource {
case kuberesource . ServiceAccounts :
desired , err := mergeServiceAccounts ( fromCluster , obj )
if err != nil {
ctx . log . Infof ( "error merging secrets for ServiceAccount %s: %v" , kube . NamespaceAndName ( obj ) , err )
addToResult ( & warnings , namespace , err )
return warnings , errs
}
patchBytes , err := generatePatch ( fromCluster , desired )
if err != nil {
ctx . log . Infof ( "error generating patch for ServiceAccount %s: %v" , kube . NamespaceAndName ( obj ) , err )
addToResult ( & warnings , namespace , err )
return warnings , errs
}
if patchBytes == nil {
// In-cluster and desired state are the same, so move on to the next item
return warnings , errs
}
_ , err = resourceClient . Patch ( name , patchBytes )
if err != nil {
addToResult ( & warnings , namespace , err )
} else {
ctx . log . Infof ( "ServiceAccount %s successfully updated" , kube . NamespaceAndName ( obj ) )
}
default :
2019-10-01 20:47:21 +00:00
e := errors . Errorf ( "could not restore, %s. Warning: the in-cluster version is different than the backed-up version." , restoreErr )
2019-03-28 19:21:56 +00:00
addToResult ( & warnings , namespace , e )
2018-02-28 01:35:35 +00:00
}
2019-03-28 19:21:56 +00:00
return warnings , errs
}
2019-10-01 20:47:21 +00:00
ctx . log . Infof ( "Restore of %s, %v skipped: it already exists in the cluster and is the same as the backed up version" , obj . GroupVersionKind ( ) . Kind , name )
2019-03-28 19:21:56 +00:00
return warnings , errs
}
// Error was something other than an AlreadyExists
if restoreErr != nil {
ctx . log . Infof ( "error restoring %s: %v" , name , restoreErr )
addToResult ( & errs , namespace , fmt . Errorf ( "error restoring %s: %v" , resourceID , restoreErr ) )
return warnings , errs
}
2019-08-06 20:17:36 +00:00
if groupResource == kuberesource . Pods && len ( restic . GetVolumeBackupsForPod ( ctx . podVolumeBackups , obj ) ) > 0 {
restorePodVolumeBackups ( ctx , createdObj , originalNamespace )
2017-08-02 17:27:17 +00:00
}
2018-02-28 01:35:35 +00:00
return warnings , errs
}
2019-08-27 23:42:38 +00:00
// shouldRenamePV returns a boolean indicating whether a persistent volume should be given a new name
// before being restored, or an error if this cannot be determined. A persistent volume will be
// given a new name if and only if (a) a PV with the original name already exists in-cluster, and
// (b) in the backup, the PV is claimed by a PVC in a namespace that's being remapped during the
// restore.
func shouldRenamePV ( ctx * context , obj * unstructured . Unstructured , client client . Dynamic ) ( bool , error ) {
if len ( ctx . restore . Spec . NamespaceMapping ) == 0 {
ctx . log . Debugf ( "Persistent volume does not need to be renamed because restore is not remapping any namespaces" )
return false , nil
}
pv := new ( v1 . PersistentVolume )
if err := runtime . DefaultUnstructuredConverter . FromUnstructured ( obj . Object , pv ) ; err != nil {
return false , errors . Wrapf ( err , "error converting persistent volume to structured" )
}
if pv . Spec . ClaimRef == nil {
ctx . log . Debugf ( "Persistent volume does not need to be renamed because it's not claimed" )
return false , nil
}
if _ , ok := ctx . restore . Spec . NamespaceMapping [ pv . Spec . ClaimRef . Namespace ] ; ! ok {
ctx . log . Debugf ( "Persistent volume does not need to be renamed because it's not claimed by a PVC in a namespace that's being remapped" )
return false , nil
}
_ , err := client . Get ( pv . Name , metav1 . GetOptions { } )
switch {
case apierrors . IsNotFound ( err ) :
ctx . log . Debugf ( "Persistent volume does not need to be renamed because it does not exist in the cluster" )
return false , nil
case err != nil :
return false , errors . Wrapf ( err , "error checking if persistent volume exists in the cluster" )
}
// no error returned: the PV was found in-cluster, so we need to rename it
return true , nil
}
2019-08-06 20:17:36 +00:00
// restorePodVolumeBackups restores the PodVolumeBackups for the given restored pod
func restorePodVolumeBackups ( ctx * context , createdObj * unstructured . Unstructured , originalNamespace string ) {
if ctx . resticRestorer == nil {
ctx . log . Warn ( "No restic restorer, not restoring pod's volumes" )
} else {
ctx . globalWaitGroup . GoErrorSlice ( func ( ) [ ] error {
pod := new ( v1 . Pod )
if err := runtime . DefaultUnstructuredConverter . FromUnstructured ( createdObj . UnstructuredContent ( ) , & pod ) ; err != nil {
ctx . log . WithError ( err ) . Error ( "error converting unstructured pod" )
return [ ] error { err }
}
data := restic . RestoreData {
Restore : ctx . restore ,
Pod : pod ,
PodVolumeBackups : ctx . podVolumeBackups ,
SourceNamespace : originalNamespace ,
BackupLocation : ctx . backup . Spec . StorageLocation ,
}
if errs := ctx . resticRestorer . RestorePodVolumes ( data ) ; errs != nil {
ctx . log . WithError ( kubeerrs . NewAggregate ( errs ) ) . Error ( "unable to successfully complete restic restores of pod's volumes" )
return errs
}
return nil
} )
}
}
2019-08-06 20:40:35 +00:00
func hasSnapshot ( pvName string , snapshots [ ] * volume . Snapshot ) bool {
for _ , snapshot := range snapshots {
if snapshot . Spec . PersistentVolumeName == pvName {
return true
}
}
return false
}
func hasResticBackup ( unstructuredPV * unstructured . Unstructured , ctx * context ) bool {
if len ( ctx . podVolumeBackups ) == 0 {
return false
}
pv := new ( v1 . PersistentVolume )
if err := runtime . DefaultUnstructuredConverter . FromUnstructured ( unstructuredPV . Object , pv ) ; err != nil {
ctx . log . WithError ( err ) . Warnf ( "Unable to convert PV from unstructured to structured" )
return false
}
if pv . Spec . ClaimRef == nil {
return false
}
var found bool
for _ , pvb := range ctx . podVolumeBackups {
if pvb . Spec . Pod . Namespace == pv . Spec . ClaimRef . Namespace && pvb . GetAnnotations ( ) [ restic . PVCNameAnnotation ] == pv . Spec . ClaimRef . Name {
found = true
break
}
}
return found
}
2018-10-23 18:44:05 +00:00
func hasDeleteReclaimPolicy ( obj map [ string ] interface { } ) bool {
2019-01-07 22:07:53 +00:00
policy , _ , _ := unstructured . NestedString ( obj , "spec" , "persistentVolumeReclaimPolicy" )
return policy == string ( v1 . PersistentVolumeReclaimDelete )
2018-10-23 18:44:05 +00:00
}
2017-12-21 21:23:48 +00:00
func resetMetadataAndStatus ( obj * unstructured . Unstructured ) ( * unstructured . Unstructured , error ) {
2019-01-07 22:07:53 +00:00
res , ok := obj . Object [ "metadata" ]
if ! ok {
return nil , errors . New ( "metadata not found" )
}
metadata , ok := res . ( map [ string ] interface { } )
if ! ok {
return nil , errors . Errorf ( "metadata was of type %T, expected map[string]interface{}" , res )
2017-11-21 17:24:43 +00:00
}
for k := range metadata {
2017-12-21 21:23:48 +00:00
switch k {
case "name" , "namespace" , "labels" , "annotations" :
default :
delete ( metadata , k )
2017-11-21 17:24:43 +00:00
}
}
2018-04-26 20:07:50 +00:00
// Never restore status
2017-11-21 17:24:43 +00:00
delete ( obj . UnstructuredContent ( ) , "status" )
return obj , nil
}
2018-08-08 23:51:33 +00:00
// addRestoreLabels labels the provided object with the restore name and
// the restored backup's name.
func addRestoreLabels ( obj metav1 . Object , restoreName , backupName string ) {
2017-08-02 17:27:17 +00:00
labels := obj . GetLabels ( )
if labels == nil {
labels = make ( map [ string ] string )
}
2019-08-06 20:17:36 +00:00
labels [ velerov1api . BackupNameLabel ] = label . GetValidName ( backupName )
labels [ velerov1api . RestoreNameLabel ] = label . GetValidName ( restoreName )
2018-08-08 23:33:09 +00:00
2017-08-02 17:27:17 +00:00
obj . SetLabels ( labels )
}
2018-04-26 20:07:50 +00:00
// isCompleted returns whether or not an object is considered completed.
// Used to identify whether or not an object should be restored. Only Jobs or Pods are considered
func isCompleted ( obj * unstructured . Unstructured , groupResource schema . GroupResource ) ( bool , error ) {
switch groupResource {
2018-05-11 19:40:19 +00:00
case kuberesource . Pods :
2018-04-26 20:07:50 +00:00
phase , _ , err := unstructured . NestedString ( obj . UnstructuredContent ( ) , "status" , "phase" )
if err != nil {
return false , errors . WithStack ( err )
}
if phase == string ( v1 . PodFailed ) || phase == string ( v1 . PodSucceeded ) {
return true , nil
}
2018-05-11 19:40:19 +00:00
case kuberesource . Jobs :
2018-04-26 20:07:50 +00:00
ct , found , err := unstructured . NestedString ( obj . UnstructuredContent ( ) , "status" , "completionTime" )
if err != nil {
return false , errors . WithStack ( err )
}
if found && ct != "" {
return true , nil
}
}
// Assume any other resource isn't complete and can be restored
return false , nil
}
2017-08-02 17:27:17 +00:00
// unmarshal reads the specified file, unmarshals the JSON contained within it
// and returns an Unstructured object.
2017-09-12 19:54:08 +00:00
func ( ctx * context ) unmarshal ( filePath string ) ( * unstructured . Unstructured , error ) {
2017-08-02 17:27:17 +00:00
var obj unstructured . Unstructured
2017-09-12 19:54:08 +00:00
bytes , err := ctx . fileSystem . ReadFile ( filePath )
2017-08-02 17:27:17 +00:00
if err != nil {
return nil , err
}
err = json . Unmarshal ( bytes , & obj )
if err != nil {
return nil , err
}
return & obj , nil
}