2017-08-02 17:27:17 +00:00
/ *
2021-02-16 17:36:17 +00:00
Copyright The Velero Contributors .
2017-08-02 17:27:17 +00:00
Licensed under the Apache License , Version 2.0 ( the "License" ) ;
you may not use this file except in compliance with the License .
You may obtain a copy of the License at
http : //www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing , software
distributed under the License is distributed on an "AS IS" BASIS ,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied .
See the License for the specific language governing permissions and
limitations under the License .
* /
package restore
import (
2018-02-28 01:35:35 +00:00
go_context "context"
2017-08-02 17:27:17 +00:00
"encoding/json"
"fmt"
"io"
2017-11-21 17:24:43 +00:00
"io/ioutil"
2021-03-15 22:51:07 +00:00
"path/filepath"
2017-08-02 17:27:17 +00:00
"sort"
2019-06-27 18:57:47 +00:00
"strings"
2020-01-15 17:27:21 +00:00
"sync"
2018-02-28 01:35:35 +00:00
"time"
2017-08-02 17:27:17 +00:00
2019-12-14 07:05:27 +00:00
uuid "github.com/gofrs/uuid"
2017-11-21 17:24:43 +00:00
"github.com/pkg/errors"
2017-09-14 21:27:31 +00:00
"github.com/sirupsen/logrus"
2019-01-25 03:33:07 +00:00
v1 "k8s.io/api/core/v1"
2018-03-29 18:50:30 +00:00
"k8s.io/apimachinery/pkg/api/equality"
2017-08-02 17:27:17 +00:00
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/labels"
2017-11-21 17:24:43 +00:00
"k8s.io/apimachinery/pkg/runtime"
2017-08-02 17:27:17 +00:00
"k8s.io/apimachinery/pkg/runtime/schema"
2021-03-04 21:21:44 +00:00
"k8s.io/apimachinery/pkg/types"
2018-02-28 01:35:35 +00:00
kubeerrs "k8s.io/apimachinery/pkg/util/errors"
2017-08-02 17:27:17 +00:00
"k8s.io/apimachinery/pkg/util/sets"
2018-09-07 14:42:57 +00:00
"k8s.io/apimachinery/pkg/util/wait"
2017-08-02 17:27:17 +00:00
corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
2020-09-08 18:33:15 +00:00
"k8s.io/client-go/tools/cache"
2017-08-02 17:27:17 +00:00
2020-09-08 18:33:15 +00:00
"github.com/vmware-tanzu/velero/internal/hook"
2019-09-30 21:26:56 +00:00
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
"github.com/vmware-tanzu/velero/pkg/archive"
"github.com/vmware-tanzu/velero/pkg/client"
"github.com/vmware-tanzu/velero/pkg/discovery"
2021-02-16 17:36:17 +00:00
"github.com/vmware-tanzu/velero/pkg/features"
2021-03-04 21:21:44 +00:00
velerov1client "github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/typed/velero/v1"
2019-09-30 21:26:56 +00:00
listers "github.com/vmware-tanzu/velero/pkg/generated/listers/velero/v1"
"github.com/vmware-tanzu/velero/pkg/kuberesource"
"github.com/vmware-tanzu/velero/pkg/label"
"github.com/vmware-tanzu/velero/pkg/plugin/velero"
2020-09-08 18:33:15 +00:00
"github.com/vmware-tanzu/velero/pkg/podexec"
2019-09-30 21:26:56 +00:00
"github.com/vmware-tanzu/velero/pkg/restic"
"github.com/vmware-tanzu/velero/pkg/util/boolptr"
"github.com/vmware-tanzu/velero/pkg/util/collections"
"github.com/vmware-tanzu/velero/pkg/util/filesystem"
"github.com/vmware-tanzu/velero/pkg/util/kube"
"github.com/vmware-tanzu/velero/pkg/volume"
2017-08-02 17:27:17 +00:00
)
2020-10-15 23:57:43 +00:00
// These annotations are taken from the Kubernetes persistent volume/persistent volume claim controller.
// They cannot be directly importing because they are part of the kubernetes/kubernetes package, and importing that package is unsupported.
// Their values are well-known and slow changing. They're duplicated here as constants to provide compile-time checking.
// Originals can be found in kubernetes/kubernetes/pkg/controller/volume/persistentvolume/util/util.go.
const KubeAnnBindCompleted = "pv.kubernetes.io/bind-completed"
const KubeAnnBoundByController = "pv.kubernetes.io/bound-by-controller"
const KubeAnnDynamicallyProvisioned = "pv.kubernetes.io/provisioned-by"
2019-03-27 18:22:04 +00:00
type VolumeSnapshotterGetter interface {
GetVolumeSnapshotter ( name string ) ( velero . VolumeSnapshotter , error )
2018-10-16 14:28:05 +00:00
}
2019-08-06 20:17:36 +00:00
type Request struct {
* velerov1api . Restore
Log logrus . FieldLogger
Backup * velerov1api . Backup
PodVolumeBackups [ ] * velerov1api . PodVolumeBackup
VolumeSnapshots [ ] * volume . Snapshot
BackupReader io . Reader
}
2017-08-02 17:27:17 +00:00
// Restorer knows how to restore a backup.
type Restorer interface {
// Restore restores the backup data from backupReader, returning warnings and errors.
2019-08-06 20:17:36 +00:00
Restore ( req Request ,
2019-03-14 20:35:06 +00:00
actions [ ] velero . RestoreItemAction ,
2018-10-16 14:28:05 +00:00
snapshotLocationLister listers . VolumeSnapshotLocationLister ,
2019-03-27 18:22:04 +00:00
volumeSnapshotterGetter VolumeSnapshotterGetter ,
2019-04-16 18:57:02 +00:00
) ( Result , Result )
2017-08-02 17:27:17 +00:00
}
// kubernetesRestorer implements Restorer for restoring into a Kubernetes cluster.
type kubernetesRestorer struct {
2021-03-04 21:21:44 +00:00
restoreClient velerov1client . RestoresGetter
2018-09-07 14:42:57 +00:00
discoveryHelper discovery . Helper
dynamicFactory client . DynamicFactory
namespaceClient corev1 . NamespaceInterface
resticRestorerFactory restic . RestorerFactory
resticTimeout time . Duration
resourceTerminatingTimeout time . Duration
resourcePriorities [ ] string
fileSystem filesystem . Interface
2019-12-17 18:23:58 +00:00
pvRenamer func ( string ) ( string , error )
2018-09-07 14:42:57 +00:00
logger logrus . FieldLogger
2020-09-08 18:33:15 +00:00
podCommandExecutor podexec . PodCommandExecutor
podGetter cache . Getter
2017-08-02 17:27:17 +00:00
}
// NewKubernetesRestorer creates a new kubernetesRestorer.
func NewKubernetesRestorer (
2021-03-04 21:21:44 +00:00
restoreClient velerov1client . RestoresGetter ,
2017-08-02 17:27:17 +00:00
discoveryHelper discovery . Helper ,
dynamicFactory client . DynamicFactory ,
resourcePriorities [ ] string ,
namespaceClient corev1 . NamespaceInterface ,
2018-02-28 01:35:35 +00:00
resticRestorerFactory restic . RestorerFactory ,
resticTimeout time . Duration ,
2018-09-07 14:42:57 +00:00
resourceTerminatingTimeout time . Duration ,
2017-12-12 23:22:46 +00:00
logger logrus . FieldLogger ,
2020-09-08 18:33:15 +00:00
podCommandExecutor podexec . PodCommandExecutor ,
podGetter cache . Getter ,
2017-08-02 17:27:17 +00:00
) ( Restorer , error ) {
return & kubernetesRestorer {
2021-03-04 21:21:44 +00:00
restoreClient : restoreClient ,
2018-09-07 14:42:57 +00:00
discoveryHelper : discoveryHelper ,
dynamicFactory : dynamicFactory ,
namespaceClient : namespaceClient ,
resticRestorerFactory : resticRestorerFactory ,
resticTimeout : resticTimeout ,
resourceTerminatingTimeout : resourceTerminatingTimeout ,
resourcePriorities : resourcePriorities ,
logger : logger ,
2019-12-17 18:23:58 +00:00
pvRenamer : func ( string ) ( string , error ) {
veleroCloneUuid , err := uuid . NewV4 ( )
if err != nil {
return "" , errors . WithStack ( err )
}
veleroCloneName := "velero-clone-" + veleroCloneUuid . String ( )
return veleroCloneName , nil
} ,
2020-09-08 18:33:15 +00:00
fileSystem : filesystem . NewFileSystem ( ) ,
podCommandExecutor : podCommandExecutor ,
podGetter : podGetter ,
2017-08-02 17:27:17 +00:00
} , nil
}
// Restore executes a restore into the target Kubernetes cluster according to the restore spec
// and using data from the provided backup/backup reader. Returns a warnings and errors RestoreResult,
// respectively, summarizing info about the restore.
2018-10-16 14:28:05 +00:00
func ( kr * kubernetesRestorer ) Restore (
2019-08-06 20:17:36 +00:00
req Request ,
2019-03-14 20:35:06 +00:00
actions [ ] velero . RestoreItemAction ,
2018-10-16 14:28:05 +00:00
snapshotLocationLister listers . VolumeSnapshotLocationLister ,
2019-03-27 18:22:04 +00:00
volumeSnapshotterGetter VolumeSnapshotterGetter ,
2019-04-16 18:57:02 +00:00
) ( Result , Result ) {
2017-08-02 17:27:17 +00:00
// metav1.LabelSelectorAsSelector converts a nil LabelSelector to a
// Nothing Selector, i.e. a selector that matches nothing. We want
// a selector that matches everything. This can be accomplished by
// passing a non-nil empty LabelSelector.
2019-08-06 20:17:36 +00:00
ls := req . Restore . Spec . LabelSelector
2017-08-02 17:27:17 +00:00
if ls == nil {
ls = & metav1 . LabelSelector { }
}
selector , err := metav1 . LabelSelectorAsSelector ( ls )
if err != nil {
2019-04-16 18:57:02 +00:00
return Result { } , Result { Velero : [ ] string { err . Error ( ) } }
2017-08-02 17:27:17 +00:00
}
2021-03-15 22:51:07 +00:00
// Get resource includes-excludes.
resourceIncludesExcludes := collections . GetResourceIncludesExcludes (
kr . discoveryHelper ,
req . Restore . Spec . IncludedResources ,
req . Restore . Spec . ExcludedResources ,
)
2017-11-21 17:24:43 +00:00
2021-03-15 22:51:07 +00:00
// Get namespace includes-excludes.
2019-06-27 20:48:50 +00:00
namespaceIncludesExcludes := collections . NewIncludesExcludes ( ) .
2019-08-06 20:17:36 +00:00
Includes ( req . Restore . Spec . IncludedNamespaces ... ) .
Excludes ( req . Restore . Spec . ExcludedNamespaces ... )
2019-06-27 20:48:50 +00:00
2017-11-21 17:24:43 +00:00
resolvedActions , err := resolveActions ( actions , kr . discoveryHelper )
if err != nil {
2019-04-16 18:57:02 +00:00
return Result { } , Result { Velero : [ ] string { err . Error ( ) } }
2017-11-21 17:24:43 +00:00
}
2018-02-28 01:35:35 +00:00
podVolumeTimeout := kr . resticTimeout
2019-08-06 20:17:36 +00:00
if val := req . Restore . Annotations [ velerov1api . PodVolumeOperationTimeoutAnnotation ] ; val != "" {
2018-02-28 01:35:35 +00:00
parsed , err := time . ParseDuration ( val )
if err != nil {
2021-03-15 22:51:07 +00:00
req . Log . WithError ( errors . WithStack ( err ) ) . Errorf (
"Unable to parse pod volume timeout annotation %s, using server value." ,
val ,
)
2018-02-28 01:35:35 +00:00
} else {
podVolumeTimeout = parsed
}
}
ctx , cancelFunc := go_context . WithTimeout ( go_context . Background ( ) , podVolumeTimeout )
defer cancelFunc ( )
var resticRestorer restic . Restorer
if kr . resticRestorerFactory != nil {
2019-08-06 20:17:36 +00:00
resticRestorer , err = kr . resticRestorerFactory . NewRestorer ( ctx , req . Restore )
2018-02-28 01:35:35 +00:00
if err != nil {
2019-04-16 18:57:02 +00:00
return Result { } , Result { Velero : [ ] string { err . Error ( ) } }
2018-02-28 01:35:35 +00:00
}
}
2020-09-08 18:33:15 +00:00
resourceRestoreHooks , err := hook . GetRestoreHooksFromSpec ( & req . Restore . Spec . Hooks )
if err != nil {
return Result { } , Result { Velero : [ ] string { err . Error ( ) } }
}
hooksCtx , hooksCancelFunc := go_context . WithCancel ( go_context . Background ( ) )
waitExecHookHandler := & hook . DefaultWaitExecHookHandler {
PodCommandExecutor : kr . podCommandExecutor ,
ListWatchFactory : & hook . DefaultListWatchFactory {
PodsGetter : kr . podGetter ,
} ,
}
2018-06-22 19:32:03 +00:00
pvRestorer := & pvRestorer {
2019-08-06 20:17:36 +00:00
logger : req . Log ,
backup : req . Backup ,
snapshotVolumes : req . Backup . Spec . SnapshotVolumes ,
restorePVs : req . Restore . Spec . RestorePVs ,
volumeSnapshots : req . VolumeSnapshots ,
2019-03-27 18:22:04 +00:00
volumeSnapshotterGetter : volumeSnapshotterGetter ,
snapshotLocationLister : snapshotLocationLister ,
2018-06-22 19:32:03 +00:00
}
k8s 1.18 import (#2651)
* k8s 1.18 import wip
backup, cmd, controller, generated, restic, restore, serverstatusrequest, test and util
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* go mod tidy
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* add changelog file
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* go fmt
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* update code-generator and controller-gen in CI
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* checkout proper code-generator version, regen
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* fix remaining calls
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* regenerate CRDs with ./hack/update-generated-crd-code.sh
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* use existing context in restic and server
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* fix test cases by resetting resource version
also use main library go context, not golang.org/x/net/context, in pkg/restore/restore.go
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* clarify changelog message
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* use github.com/kubernetes-csi/external-snapshotter/v2@v2.2.0-rc1
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* run 'go mod tidy' to remove old external-snapshotter version
Signed-off-by: Andrew Lavery <laverya@umich.edu>
2020-07-16 16:21:37 +00:00
restoreCtx := & restoreContext {
2019-08-06 20:17:36 +00:00
backup : req . Backup ,
backupReader : req . BackupReader ,
restore : req . Restore ,
2019-06-27 20:48:50 +00:00
resourceIncludesExcludes : resourceIncludesExcludes ,
namespaceIncludesExcludes : namespaceIncludesExcludes ,
2021-02-16 17:36:17 +00:00
chosenGrpVersToRestore : make ( map [ string ] ChosenGroupVersion ) ,
2018-09-07 14:42:57 +00:00
selector : selector ,
2019-08-06 20:17:36 +00:00
log : req . Log ,
2018-09-07 14:42:57 +00:00
dynamicFactory : kr . dynamicFactory ,
fileSystem : kr . fileSystem ,
namespaceClient : kr . namespaceClient ,
actions : resolvedActions ,
2019-03-27 18:22:04 +00:00
volumeSnapshotterGetter : volumeSnapshotterGetter ,
2018-09-07 14:42:57 +00:00
resticRestorer : resticRestorer ,
2020-01-15 17:27:21 +00:00
resticErrs : make ( chan error ) ,
2018-09-07 14:42:57 +00:00
pvsToProvision : sets . NewString ( ) ,
pvRestorer : pvRestorer ,
2019-08-06 20:17:36 +00:00
volumeSnapshots : req . VolumeSnapshots ,
podVolumeBackups : req . PodVolumeBackups ,
2018-09-07 14:42:57 +00:00
resourceTerminatingTimeout : kr . resourceTerminatingTimeout ,
2019-08-29 01:03:01 +00:00
resourceClients : make ( map [ resourceClientKey ] client . Dynamic ) ,
restoredItems : make ( map [ velero . ResourceIdentifier ] struct { } ) ,
renamedPVs : make ( map [ string ] string ) ,
pvRenamer : kr . pvRenamer ,
2020-01-30 17:19:13 +00:00
discoveryHelper : kr . discoveryHelper ,
resourcePriorities : kr . resourcePriorities ,
2020-09-08 18:33:15 +00:00
resourceRestoreHooks : resourceRestoreHooks ,
hooksErrs : make ( chan error ) ,
waitExecHookHandler : waitExecHookHandler ,
hooksContext : hooksCtx ,
hooksCancelFunc : hooksCancelFunc ,
2021-03-04 21:21:44 +00:00
restoreClient : kr . restoreClient ,
2017-09-12 19:54:08 +00:00
}
2018-02-28 01:35:35 +00:00
return restoreCtx . execute ( )
2017-09-12 19:54:08 +00:00
}
2017-11-21 17:24:43 +00:00
type resolvedAction struct {
2019-03-14 20:35:06 +00:00
velero . RestoreItemAction
2017-11-21 17:24:43 +00:00
resourceIncludesExcludes * collections . IncludesExcludes
namespaceIncludesExcludes * collections . IncludesExcludes
selector labels . Selector
}
2019-03-14 20:35:06 +00:00
func resolveActions ( actions [ ] velero . RestoreItemAction , helper discovery . Helper ) ( [ ] resolvedAction , error ) {
2017-11-21 17:24:43 +00:00
var resolved [ ] resolvedAction
for _ , action := range actions {
resourceSelector , err := action . AppliesTo ( )
if err != nil {
return nil , err
}
2020-08-21 00:24:29 +00:00
resources := collections . GetResourceIncludesExcludes ( helper , resourceSelector . IncludedResources , resourceSelector . ExcludedResources )
2017-11-21 17:24:43 +00:00
namespaces := collections . NewIncludesExcludes ( ) . Includes ( resourceSelector . IncludedNamespaces ... ) . Excludes ( resourceSelector . ExcludedNamespaces ... )
selector := labels . Everything ( )
if resourceSelector . LabelSelector != "" {
if selector , err = labels . Parse ( resourceSelector . LabelSelector ) ; err != nil {
return nil , err
}
}
res := resolvedAction {
2019-03-14 20:35:06 +00:00
RestoreItemAction : action ,
2017-11-21 17:24:43 +00:00
resourceIncludesExcludes : resources ,
namespaceIncludesExcludes : namespaces ,
selector : selector ,
}
resolved = append ( resolved , res )
}
return resolved , nil
}
k8s 1.18 import (#2651)
* k8s 1.18 import wip
backup, cmd, controller, generated, restic, restore, serverstatusrequest, test and util
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* go mod tidy
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* add changelog file
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* go fmt
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* update code-generator and controller-gen in CI
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* checkout proper code-generator version, regen
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* fix remaining calls
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* regenerate CRDs with ./hack/update-generated-crd-code.sh
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* use existing context in restic and server
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* fix test cases by resetting resource version
also use main library go context, not golang.org/x/net/context, in pkg/restore/restore.go
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* clarify changelog message
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* use github.com/kubernetes-csi/external-snapshotter/v2@v2.2.0-rc1
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* run 'go mod tidy' to remove old external-snapshotter version
Signed-off-by: Andrew Lavery <laverya@umich.edu>
2020-07-16 16:21:37 +00:00
type restoreContext struct {
2019-08-06 20:17:36 +00:00
backup * velerov1api . Backup
2018-09-07 14:42:57 +00:00
backupReader io . Reader
2019-08-06 20:17:36 +00:00
restore * velerov1api . Restore
2019-03-28 19:21:56 +00:00
restoreDir string
2021-03-04 21:21:44 +00:00
restoreClient velerov1client . RestoresGetter
2019-06-27 20:48:50 +00:00
resourceIncludesExcludes * collections . IncludesExcludes
namespaceIncludesExcludes * collections . IncludesExcludes
2021-02-16 17:36:17 +00:00
chosenGrpVersToRestore map [ string ] ChosenGroupVersion
2018-09-07 14:42:57 +00:00
selector labels . Selector
log logrus . FieldLogger
dynamicFactory client . DynamicFactory
fileSystem filesystem . Interface
namespaceClient corev1 . NamespaceInterface
actions [ ] resolvedAction
2019-03-27 18:22:04 +00:00
volumeSnapshotterGetter VolumeSnapshotterGetter
2018-09-07 14:42:57 +00:00
resticRestorer restic . Restorer
2020-01-15 17:27:21 +00:00
resticWaitGroup sync . WaitGroup
resticErrs chan error
2018-09-07 14:42:57 +00:00
pvsToProvision sets . String
pvRestorer PVRestorer
volumeSnapshots [ ] * volume . Snapshot
2019-08-06 20:17:36 +00:00
podVolumeBackups [ ] * velerov1api . PodVolumeBackup
2018-09-07 14:42:57 +00:00
resourceTerminatingTimeout time . Duration
2019-03-28 19:21:56 +00:00
resourceClients map [ resourceClientKey ] client . Dynamic
restoredItems map [ velero . ResourceIdentifier ] struct { }
2019-08-27 23:42:38 +00:00
renamedPVs map [ string ] string
2019-12-17 18:23:58 +00:00
pvRenamer func ( string ) ( string , error )
2020-01-30 17:19:13 +00:00
discoveryHelper discovery . Helper
resourcePriorities [ ] string
2020-07-24 18:43:44 +00:00
hooksWaitGroup sync . WaitGroup
hooksErrs chan error
2020-09-08 18:33:15 +00:00
resourceRestoreHooks [ ] hook . ResourceRestoreHook
waitExecHookHandler hook . WaitExecHookHandler
hooksContext go_context . Context
hooksCancelFunc go_context . CancelFunc
2019-03-28 19:21:56 +00:00
}
type resourceClientKey struct {
2020-01-27 21:59:08 +00:00
resource schema . GroupVersionResource
2019-03-28 19:21:56 +00:00
namespace string
2017-09-12 19:54:08 +00:00
}
2021-03-15 22:51:07 +00:00
// getOrderedResources returns an ordered list of resource identifiers to restore,
// based on the provided resource priorities and backup contents. The returned list
// begins with all of the prioritized resources (in order), and appends to that
// an alphabetized list of all resources in the backup.
2020-02-04 21:33:12 +00:00
func getOrderedResources ( resourcePriorities [ ] string , backupResources map [ string ] * archive . ResourceItems ) [ ] string {
// alphabetize resources in the backup
orderedBackupResources := make ( [ ] string , 0 , len ( backupResources ) )
for resource := range backupResources {
orderedBackupResources = append ( orderedBackupResources , resource )
}
sort . Strings ( orderedBackupResources )
2021-03-15 22:51:07 +00:00
// Main list: everything in resource priorities, followed by what's in the
// backup (alphabetized).
2020-02-04 21:33:12 +00:00
return append ( resourcePriorities , orderedBackupResources ... )
}
k8s 1.18 import (#2651)
* k8s 1.18 import wip
backup, cmd, controller, generated, restic, restore, serverstatusrequest, test and util
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* go mod tidy
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* add changelog file
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* go fmt
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* update code-generator and controller-gen in CI
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* checkout proper code-generator version, regen
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* fix remaining calls
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* regenerate CRDs with ./hack/update-generated-crd-code.sh
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* use existing context in restic and server
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* fix test cases by resetting resource version
also use main library go context, not golang.org/x/net/context, in pkg/restore/restore.go
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* clarify changelog message
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* use github.com/kubernetes-csi/external-snapshotter/v2@v2.2.0-rc1
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* run 'go mod tidy' to remove old external-snapshotter version
Signed-off-by: Andrew Lavery <laverya@umich.edu>
2020-07-16 16:21:37 +00:00
func ( ctx * restoreContext ) execute ( ) ( Result , Result ) {
2019-08-29 01:03:01 +00:00
warnings , errs := Result { } , Result { }
2018-09-30 20:45:32 +00:00
ctx . log . Infof ( "Starting restore of backup %s" , kube . NamespaceAndName ( ctx . backup ) )
2017-09-12 19:54:08 +00:00
2019-08-29 01:03:01 +00:00
dir , err := archive . NewExtractor ( ctx . log , ctx . fileSystem ) . UnzipAndExtractBackup ( ctx . backupReader )
2017-08-02 17:27:17 +00:00
if err != nil {
2018-09-30 20:45:32 +00:00
ctx . log . Infof ( "error unzipping and extracting: %v" , err )
2020-02-03 18:56:57 +00:00
errs . AddVeleroError ( err )
2019-08-29 01:03:01 +00:00
return warnings , errs
2017-08-02 17:27:17 +00:00
}
2017-09-12 19:54:08 +00:00
defer ctx . fileSystem . RemoveAll ( dir )
2017-08-02 17:27:17 +00:00
2021-03-15 22:51:07 +00:00
// Need to set this for additionalItems to be restored.
2019-03-28 19:21:56 +00:00
ctx . restoreDir = dir
2019-08-29 01:03:01 +00:00
backupResources , err := archive . NewParser ( ctx . log , ctx . fileSystem ) . Parse ( ctx . restoreDir )
2017-08-11 21:05:06 +00:00
if err != nil {
2020-02-03 18:56:57 +00:00
errs . AddVeleroError ( errors . Wrap ( err , "error parsing backup contents" ) )
2017-10-10 18:43:53 +00:00
return warnings , errs
2017-08-11 21:05:06 +00:00
}
2021-02-16 17:36:17 +00:00
// TODO: Remove outer feature flag check to make this feature a default in Velero.
if features . IsEnabled ( velerov1api . APIGroupVersionsFeatureFlag ) {
if ctx . backup . Status . FormatVersion >= "1.1.0" {
if err := ctx . chooseAPIVersionsToRestore ( ) ; err != nil {
errs . AddVeleroError ( errors . Wrap ( err , "choosing API version to restore" ) )
return warnings , errs
}
}
}
2021-03-04 21:21:44 +00:00
selectedResourceCollection , w , e := ctx . getOrderedResourceCollection ( backupResources )
warnings . Merge ( & w )
errs . Merge ( & e )
2017-10-10 18:43:53 +00:00
2021-03-04 21:21:44 +00:00
type progressUpdate struct {
totalItems , itemsRestored int
}
2020-01-30 17:19:13 +00:00
2021-03-04 21:21:44 +00:00
update := make ( chan progressUpdate )
2020-01-30 17:19:13 +00:00
2021-03-04 21:21:44 +00:00
quit := make ( chan struct { } )
2020-01-30 17:19:13 +00:00
2021-03-04 21:21:44 +00:00
go func ( ) {
ticker := time . NewTicker ( 1 * time . Second )
var lastUpdate * progressUpdate
for {
select {
case <- quit :
ticker . Stop ( )
return
case val := <- update :
lastUpdate = & val
case <- ticker . C :
if lastUpdate != nil {
2021-03-15 22:51:07 +00:00
patch := fmt . Sprintf (
` { "status": { "progress": { "totalItems":%d,"itemsRestored":%d}}} ` ,
lastUpdate . totalItems ,
lastUpdate . itemsRestored ,
)
_ , err := ctx . restoreClient . Restores ( ctx . restore . Namespace ) . Patch (
go_context . TODO ( ) ,
ctx . restore . Name ,
types . MergePatchType ,
[ ] byte ( patch ) ,
metav1 . PatchOptions { } ,
)
if err != nil {
ctx . log . WithError ( errors . WithStack ( ( err ) ) ) .
Warn ( "Got error trying to update restore's status.progress" )
2021-03-04 21:21:44 +00:00
}
lastUpdate = nil
}
2020-01-30 17:19:13 +00:00
}
2021-03-04 21:21:44 +00:00
}
} ( )
2020-01-30 17:19:13 +00:00
2021-03-15 22:51:07 +00:00
// totalItems: previously discovered items, i: iteration counter.
2021-03-04 21:21:44 +00:00
totalItems , i , existingNamespaces := 0 , 0 , sets . NewString ( )
for _ , selectedResource := range selectedResourceCollection {
totalItems += selectedResource . totalItems
}
2021-03-15 22:51:07 +00:00
2021-03-04 21:21:44 +00:00
for _ , selectedResource := range selectedResourceCollection {
groupResource := schema . ParseGroupResource ( selectedResource . resource )
for namespace , selectedItems := range selectedResource . selectedItemsByNamespace {
for _ , selectedItem := range selectedItems {
2021-03-15 22:51:07 +00:00
// If we don't know whether this namespace exists yet, attempt to create
2021-03-04 21:21:44 +00:00
// it in order to ensure it exists. Try to get it from the backup tarball
// (in order to get any backed-up metadata), but if we don't find it there,
// create a blank one.
if namespace != "" && ! existingNamespaces . Has ( selectedItem . targetNamespace ) {
logger := ctx . log . WithField ( "namespace" , namespace )
2021-03-15 22:51:07 +00:00
ns := getNamespace (
logger ,
archive . GetItemFilePath ( ctx . restoreDir , "namespaces" , "" , namespace ) ,
selectedItem . targetNamespace ,
)
_ , nsCreated , err := kube . EnsureNamespaceExistsAndIsReady (
ns ,
ctx . namespaceClient ,
ctx . resourceTerminatingTimeout ,
)
if err != nil {
2021-03-04 21:21:44 +00:00
errs . AddVeleroError ( err )
continue
2021-03-15 22:51:07 +00:00
}
// Add the newly created namespace to the list of restored items.
if nsCreated {
itemKey := velero . ResourceIdentifier {
GroupResource : kuberesource . Namespaces ,
Namespace : ns . Namespace ,
Name : ns . Name ,
2021-03-04 21:21:44 +00:00
}
2021-03-15 22:51:07 +00:00
ctx . restoredItems [ itemKey ] = struct { } { }
2021-03-04 21:21:44 +00:00
}
2021-03-15 22:51:07 +00:00
// Keep track of namespaces that we know exist so we don't
// have to try to create them multiple times.
2021-03-04 21:21:44 +00:00
existingNamespaces . Insert ( selectedItem . targetNamespace )
}
2021-03-15 22:51:07 +00:00
2021-03-04 21:21:44 +00:00
obj , err := archive . Unmarshal ( ctx . fileSystem , selectedItem . path )
if err != nil {
2021-03-15 22:51:07 +00:00
errs . Add (
selectedItem . targetNamespace ,
fmt . Errorf (
"error decoding %q: %v" ,
strings . Replace ( selectedItem . path , ctx . restoreDir + "/" , "" , - 1 ) ,
err ,
) ,
)
2020-01-30 17:19:13 +00:00
continue
}
2021-03-15 22:51:07 +00:00
2021-03-04 21:21:44 +00:00
w , e := ctx . restoreItem ( obj , groupResource , selectedItem . targetNamespace )
2021-03-15 22:51:07 +00:00
warnings . Merge ( & w )
errs . Merge ( & e )
2021-03-04 21:21:44 +00:00
i ++
2021-03-15 22:51:07 +00:00
// totalItems keeps the count of items previously known. There
// may be additional items restored by plugins. We want to include
// the additional items by looking at restoredItems at the same
// time, we don't want previously known items counted twice as
// they are present in both restoredItems and totalItems.
2021-03-04 21:21:44 +00:00
actualTotalItems := len ( ctx . restoredItems ) + ( totalItems - i )
update <- progressUpdate {
totalItems : actualTotalItems ,
itemsRestored : len ( ctx . restoredItems ) ,
}
2020-01-30 17:19:13 +00:00
}
}
2020-02-04 21:33:12 +00:00
2021-03-15 22:51:07 +00:00
// If we just restored custom resource definitions (CRDs), refresh
// discovery because the restored CRDs may have created new APIs that
// didn't previously exist in the cluster, and we want to be able to
// resolve & restore instances of them in subsequent loop iterations.
2020-02-04 21:33:12 +00:00
if groupResource == kuberesource . CustomResourceDefinitions {
if err := ctx . discoveryHelper . Refresh ( ) ; err != nil {
2021-03-15 22:51:07 +00:00
warnings . Add ( "" , errors . Wrap ( err , "refresh discovery after restoring CRDs" ) )
2020-02-04 21:33:12 +00:00
}
}
2020-01-30 17:19:13 +00:00
}
2021-03-15 22:51:07 +00:00
// Close the progress update channel.
2021-03-04 21:21:44 +00:00
quit <- struct { } { }
2021-03-15 22:51:07 +00:00
// Do a final progress update as stopping the ticker might have left last few
// updates from taking place.
patch := fmt . Sprintf (
` { "status": { "progress": { "totalItems":%d,"itemsRestored":%d}}} ` ,
len ( ctx . restoredItems ) ,
len ( ctx . restoredItems ) ,
)
_ , err = ctx . restoreClient . Restores ( ctx . restore . Namespace ) . Patch (
go_context . TODO ( ) ,
ctx . restore . Name ,
types . MergePatchType ,
[ ] byte ( patch ) ,
metav1 . PatchOptions { } ,
)
if err != nil {
ctx . log . WithError ( errors . WithStack ( ( err ) ) ) . Warn ( "Updating restore status.progress" )
2021-03-04 21:21:44 +00:00
}
2021-03-15 22:51:07 +00:00
// Wait for all of the restic restore goroutines to be done, which is
2020-01-15 17:27:21 +00:00
// only possible once all of their errors have been received by the loop
// below, then close the resticErrs channel so the loop terminates.
go func ( ) {
2020-01-21 19:36:46 +00:00
ctx . log . Info ( "Waiting for all restic restores to complete" )
2020-01-15 17:27:21 +00:00
// TODO timeout?
ctx . resticWaitGroup . Wait ( )
close ( ctx . resticErrs )
} ( )
2021-03-15 22:51:07 +00:00
// This loop will only terminate when the ctx.resticErrs channel is closed
2020-01-15 17:27:21 +00:00
// in the above goroutine, *after* all errors from the goroutines have been
// received by this loop.
for err := range ctx . resticErrs {
2021-03-15 22:51:07 +00:00
// TODO: not ideal to be adding these to Velero-level errors
2018-02-28 01:35:35 +00:00
// rather than a specific namespace, but don't have a way
// to track the namespace right now.
2019-01-25 03:33:07 +00:00
errs . Velero = append ( errs . Velero , err . Error ( ) )
2017-08-02 17:27:17 +00:00
}
2020-01-15 17:27:21 +00:00
ctx . log . Info ( "Done waiting for all restic restores to complete" )
2017-08-02 17:27:17 +00:00
2021-03-15 22:51:07 +00:00
// Wait for all post-restore exec hooks with same logic as restic wait above.
2020-09-08 18:33:15 +00:00
go func ( ) {
ctx . log . Info ( "Waiting for all post-restore-exec hooks to complete" )
ctx . hooksWaitGroup . Wait ( )
close ( ctx . hooksErrs )
} ( )
for err := range ctx . hooksErrs {
errs . Velero = append ( errs . Velero , err . Error ( ) )
}
ctx . log . Info ( "Done waiting for all post-restore exec hooks to complete" )
2017-10-10 18:43:53 +00:00
return warnings , errs
2017-08-02 17:27:17 +00:00
}
2017-11-21 17:24:43 +00:00
// getNamespace returns a namespace API object that we should attempt to
// create before restoring anything into it. It will come from the backup
// tarball if it exists, else will be a new one. If from the tarball, it
// will retain its labels, annotations, and spec.
func getNamespace ( logger logrus . FieldLogger , path , remappedName string ) * v1 . Namespace {
var nsBytes [ ] byte
var err error
if nsBytes , err = ioutil . ReadFile ( path ) ; err != nil {
return & v1 . Namespace {
ObjectMeta : metav1 . ObjectMeta {
Name : remappedName ,
} ,
}
}
var backupNS v1 . Namespace
if err := json . Unmarshal ( nsBytes , & backupNS ) ; err != nil {
logger . Warnf ( "Error unmarshalling namespace from backup, creating new one." )
return & v1 . Namespace {
ObjectMeta : metav1 . ObjectMeta {
Name : remappedName ,
} ,
}
}
return & v1 . Namespace {
ObjectMeta : metav1 . ObjectMeta {
Name : remappedName ,
Labels : backupNS . Labels ,
Annotations : backupNS . Annotations ,
} ,
Spec : backupNS . Spec ,
}
}
2020-08-21 00:24:29 +00:00
// TODO: this should be combined with DeleteItemActions at some point.
k8s 1.18 import (#2651)
* k8s 1.18 import wip
backup, cmd, controller, generated, restic, restore, serverstatusrequest, test and util
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* go mod tidy
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* add changelog file
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* go fmt
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* update code-generator and controller-gen in CI
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* checkout proper code-generator version, regen
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* fix remaining calls
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* regenerate CRDs with ./hack/update-generated-crd-code.sh
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* use existing context in restic and server
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* fix test cases by resetting resource version
also use main library go context, not golang.org/x/net/context, in pkg/restore/restore.go
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* clarify changelog message
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* use github.com/kubernetes-csi/external-snapshotter/v2@v2.2.0-rc1
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* run 'go mod tidy' to remove old external-snapshotter version
Signed-off-by: Andrew Lavery <laverya@umich.edu>
2020-07-16 16:21:37 +00:00
func ( ctx * restoreContext ) getApplicableActions ( groupResource schema . GroupResource , namespace string ) [ ] resolvedAction {
2019-03-28 19:21:56 +00:00
var actions [ ] resolvedAction
for _ , action := range ctx . actions {
if ! action . resourceIncludesExcludes . ShouldInclude ( groupResource . String ( ) ) {
continue
}
if namespace != "" && ! action . namespaceIncludesExcludes . ShouldInclude ( namespace ) {
continue
}
2019-06-26 17:25:47 +00:00
if namespace == "" && ! action . namespaceIncludesExcludes . IncludeEverything ( ) {
continue
}
2019-03-28 19:21:56 +00:00
actions = append ( actions , action )
}
return actions
}
k8s 1.18 import (#2651)
* k8s 1.18 import wip
backup, cmd, controller, generated, restic, restore, serverstatusrequest, test and util
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* go mod tidy
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* add changelog file
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* go fmt
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* update code-generator and controller-gen in CI
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* checkout proper code-generator version, regen
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* fix remaining calls
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* regenerate CRDs with ./hack/update-generated-crd-code.sh
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* use existing context in restic and server
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* fix test cases by resetting resource version
also use main library go context, not golang.org/x/net/context, in pkg/restore/restore.go
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* clarify changelog message
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* use github.com/kubernetes-csi/external-snapshotter/v2@v2.2.0-rc1
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* run 'go mod tidy' to remove old external-snapshotter version
Signed-off-by: Andrew Lavery <laverya@umich.edu>
2020-07-16 16:21:37 +00:00
func ( ctx * restoreContext ) shouldRestore ( name string , pvClient client . Dynamic ) ( bool , error ) {
2018-09-07 14:42:57 +00:00
pvLogger := ctx . log . WithField ( "pvName" , name )
var shouldRestore bool
err := wait . PollImmediate ( time . Second , ctx . resourceTerminatingTimeout , func ( ) ( bool , error ) {
2019-02-11 23:45:04 +00:00
unstructuredPV , err := pvClient . Get ( name , metav1 . GetOptions { } )
2018-09-07 14:42:57 +00:00
if apierrors . IsNotFound ( err ) {
pvLogger . Debug ( "PV not found, safe to restore" )
// PV not found, can safely exit loop and proceed with restore.
shouldRestore = true
return true , nil
}
if err != nil {
return false , errors . Wrapf ( err , "could not retrieve in-cluster copy of PV %s" , name )
}
2019-02-11 23:45:04 +00:00
clusterPV := new ( v1 . PersistentVolume )
if err := runtime . DefaultUnstructuredConverter . FromUnstructured ( unstructuredPV . Object , clusterPV ) ; err != nil {
return false , errors . Wrap ( err , "error converting PV from unstructured" )
2018-09-07 14:42:57 +00:00
}
2019-02-11 23:45:04 +00:00
if clusterPV . Status . Phase == v1 . VolumeReleased || clusterPV . DeletionTimestamp != nil {
2018-09-07 14:42:57 +00:00
// PV was found and marked for deletion, or it was released; wait for it to go away.
pvLogger . Debugf ( "PV found, but marked for deletion, waiting" )
return false , nil
}
// Check for the namespace and PVC to see if anything that's referencing the PV is deleting.
// If either the namespace or PVC is in a deleting/terminating state, wait for them to finish before
// trying to restore the PV
// Not doing so may result in the underlying PV disappearing but not restoring due to timing issues,
// then the PVC getting restored and showing as lost.
2019-02-11 23:45:04 +00:00
if clusterPV . Spec . ClaimRef == nil {
pvLogger . Debugf ( "PV is not marked for deletion and is not claimed by a PVC" )
return true , nil
2018-09-07 14:42:57 +00:00
}
2019-02-11 23:45:04 +00:00
namespace := clusterPV . Spec . ClaimRef . Namespace
pvcName := clusterPV . Spec . ClaimRef . Name
2018-09-07 14:42:57 +00:00
// Have to create the PVC client here because we don't know what namespace we're using til we get to this point.
// Using a dynamic client since it's easier to mock for testing
pvcResource := metav1 . APIResource { Name : "persistentvolumeclaims" , Namespaced : true }
pvcClient , err := ctx . dynamicFactory . ClientForGroupVersionResource ( schema . GroupVersion { Group : "" , Version : "v1" } , pvcResource , namespace )
if err != nil {
return false , errors . Wrapf ( err , "error getting pvc client" )
}
pvc , err := pvcClient . Get ( pvcName , metav1 . GetOptions { } )
if apierrors . IsNotFound ( err ) {
pvLogger . Debugf ( "PVC %s for PV not found, waiting" , pvcName )
// PVC wasn't found, but the PV still exists, so continue to wait.
return false , nil
}
if err != nil {
return false , errors . Wrapf ( err , "error getting claim %s for persistent volume" , pvcName )
}
if pvc != nil && pvc . GetDeletionTimestamp ( ) != nil {
pvLogger . Debugf ( "PVC for PV marked for deletion, waiting" )
// PVC is still deleting, continue to wait.
return false , nil
}
2021-03-15 22:51:07 +00:00
// Check the namespace associated with the claimRef to see if it's
// deleting/terminating before proceeding.
k8s 1.18 import (#2651)
* k8s 1.18 import wip
backup, cmd, controller, generated, restic, restore, serverstatusrequest, test and util
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* go mod tidy
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* add changelog file
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* go fmt
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* update code-generator and controller-gen in CI
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* checkout proper code-generator version, regen
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* fix remaining calls
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* regenerate CRDs with ./hack/update-generated-crd-code.sh
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* use existing context in restic and server
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* fix test cases by resetting resource version
also use main library go context, not golang.org/x/net/context, in pkg/restore/restore.go
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* clarify changelog message
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* use github.com/kubernetes-csi/external-snapshotter/v2@v2.2.0-rc1
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* run 'go mod tidy' to remove old external-snapshotter version
Signed-off-by: Andrew Lavery <laverya@umich.edu>
2020-07-16 16:21:37 +00:00
ns , err := ctx . namespaceClient . Get ( go_context . TODO ( ) , namespace , metav1 . GetOptions { } )
2018-09-07 14:42:57 +00:00
if apierrors . IsNotFound ( err ) {
pvLogger . Debugf ( "namespace %s for PV not found, waiting" , namespace )
2021-03-15 22:51:07 +00:00
// Namespace not found but the PV still exists, so continue to wait.
2018-09-07 14:42:57 +00:00
return false , nil
}
if err != nil {
return false , errors . Wrapf ( err , "error getting namespace %s associated with PV %s" , namespace , name )
}
if ns != nil && ( ns . GetDeletionTimestamp ( ) != nil || ns . Status . Phase == v1 . NamespaceTerminating ) {
pvLogger . Debugf ( "namespace %s associated with PV is deleting, waiting" , namespace )
2021-03-15 22:51:07 +00:00
// Namespace is in the process of deleting, keep looping.
2018-09-07 14:42:57 +00:00
return false , nil
}
// None of the PV, PVC, or NS are marked for deletion, break the loop.
pvLogger . Debug ( "PV, associated PVC and namespace are not marked for deletion" )
return true , nil
} )
if err == wait . ErrWaitTimeout {
2021-01-22 00:48:33 +00:00
pvLogger . Warn ( "timeout reached waiting for persistent volume to delete" )
2018-09-07 14:42:57 +00:00
}
return shouldRestore , err
}
2021-03-15 22:51:07 +00:00
// crdAvailable waits for a CRD to be available for use before letting the
// restore continue.
k8s 1.18 import (#2651)
* k8s 1.18 import wip
backup, cmd, controller, generated, restic, restore, serverstatusrequest, test and util
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* go mod tidy
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* add changelog file
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* go fmt
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* update code-generator and controller-gen in CI
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* checkout proper code-generator version, regen
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* fix remaining calls
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* regenerate CRDs with ./hack/update-generated-crd-code.sh
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* use existing context in restic and server
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* fix test cases by resetting resource version
also use main library go context, not golang.org/x/net/context, in pkg/restore/restore.go
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* clarify changelog message
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* use github.com/kubernetes-csi/external-snapshotter/v2@v2.2.0-rc1
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* run 'go mod tidy' to remove old external-snapshotter version
Signed-off-by: Andrew Lavery <laverya@umich.edu>
2020-07-16 16:21:37 +00:00
func ( ctx * restoreContext ) crdAvailable ( name string , crdClient client . Dynamic ) ( bool , error ) {
2020-01-30 17:19:13 +00:00
crdLogger := ctx . log . WithField ( "crdName" , name )
var available bool
2021-03-15 22:51:07 +00:00
// Wait 1 minute rather than the standard resource timeout, since each CRD
// will transition fairly quickly.
2020-01-30 17:19:13 +00:00
err := wait . PollImmediate ( time . Second , time . Minute * 1 , func ( ) ( bool , error ) {
unstructuredCRD , err := crdClient . Get ( name , metav1 . GetOptions { } )
if err != nil {
return true , err
}
2021-03-15 22:51:07 +00:00
// TODO: Due to upstream conversion issues in runtime.FromUnstructured,
// we use the unstructured object here. Once the upstream conversion
// functions are fixed, we should convert to the CRD types and use
// IsCRDReady.
2020-01-30 17:19:13 +00:00
available , err = kube . IsUnstructuredCRDReady ( unstructuredCRD )
if err != nil {
return true , err
}
if ! available {
crdLogger . Debug ( "CRD not yet ready for use" )
}
2021-03-15 22:51:07 +00:00
// If the CRD is not available, keep polling (false, nil).
// If the CRD is available, break the poll and return to caller (true, nil).
2020-01-30 17:19:13 +00:00
return available , nil
} )
if err == wait . ErrWaitTimeout {
crdLogger . Debug ( "timeout reached waiting for custom resource definition to be ready" )
}
return available , err
}
k8s 1.18 import (#2651)
* k8s 1.18 import wip
backup, cmd, controller, generated, restic, restore, serverstatusrequest, test and util
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* go mod tidy
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* add changelog file
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* go fmt
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* update code-generator and controller-gen in CI
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* checkout proper code-generator version, regen
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* fix remaining calls
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* regenerate CRDs with ./hack/update-generated-crd-code.sh
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* use existing context in restic and server
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* fix test cases by resetting resource version
also use main library go context, not golang.org/x/net/context, in pkg/restore/restore.go
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* clarify changelog message
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* use github.com/kubernetes-csi/external-snapshotter/v2@v2.2.0-rc1
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* run 'go mod tidy' to remove old external-snapshotter version
Signed-off-by: Andrew Lavery <laverya@umich.edu>
2020-07-16 16:21:37 +00:00
func ( ctx * restoreContext ) getResourceClient ( groupResource schema . GroupResource , obj * unstructured . Unstructured , namespace string ) ( client . Dynamic , error ) {
2019-03-28 19:21:56 +00:00
key := resourceClientKey {
2020-01-27 21:59:08 +00:00
resource : groupResource . WithVersion ( obj . GroupVersionKind ( ) . Version ) ,
2019-03-28 19:21:56 +00:00
namespace : namespace ,
}
2017-08-02 17:27:17 +00:00
2019-03-28 19:21:56 +00:00
if client , ok := ctx . resourceClients [ key ] ; ok {
return client , nil
}
2017-08-02 17:27:17 +00:00
2021-03-15 22:51:07 +00:00
// Initialize client for this resource. We need metadata from an object to
// do this.
2019-03-28 19:21:56 +00:00
ctx . log . Infof ( "Getting client for %v" , obj . GroupVersionKind ( ) )
2018-06-22 19:32:03 +00:00
2019-03-28 19:21:56 +00:00
resource := metav1 . APIResource {
Namespaced : len ( namespace ) > 0 ,
Name : groupResource . Resource ,
}
2018-06-28 14:06:55 +00:00
2019-03-28 19:21:56 +00:00
client , err := ctx . dynamicFactory . ClientForGroupVersionResource ( obj . GroupVersionKind ( ) . GroupVersion ( ) , resource , namespace )
if err != nil {
return nil , err
}
2018-06-22 19:32:03 +00:00
2019-03-28 19:21:56 +00:00
ctx . resourceClients [ key ] = client
return client , nil
}
2018-06-22 19:32:03 +00:00
2019-03-28 19:21:56 +00:00
func getResourceID ( groupResource schema . GroupResource , namespace , name string ) string {
if namespace == "" {
return fmt . Sprintf ( "%s/%s" , groupResource . String ( ) , name )
}
2018-06-22 19:32:03 +00:00
2019-03-28 19:21:56 +00:00
return fmt . Sprintf ( "%s/%s/%s" , groupResource . String ( ) , namespace , name )
}
2018-09-07 14:42:57 +00:00
k8s 1.18 import (#2651)
* k8s 1.18 import wip
backup, cmd, controller, generated, restic, restore, serverstatusrequest, test and util
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* go mod tidy
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* add changelog file
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* go fmt
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* update code-generator and controller-gen in CI
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* checkout proper code-generator version, regen
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* fix remaining calls
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* regenerate CRDs with ./hack/update-generated-crd-code.sh
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* use existing context in restic and server
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* fix test cases by resetting resource version
also use main library go context, not golang.org/x/net/context, in pkg/restore/restore.go
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* clarify changelog message
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* use github.com/kubernetes-csi/external-snapshotter/v2@v2.2.0-rc1
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* run 'go mod tidy' to remove old external-snapshotter version
Signed-off-by: Andrew Lavery <laverya@umich.edu>
2020-07-16 16:21:37 +00:00
func ( ctx * restoreContext ) restoreItem ( obj * unstructured . Unstructured , groupResource schema . GroupResource , namespace string ) ( Result , Result ) {
2019-04-16 18:57:02 +00:00
warnings , errs := Result { } , Result { }
2019-03-28 19:21:56 +00:00
resourceID := getResourceID ( groupResource , namespace , obj . GetName ( ) )
2017-08-02 17:27:17 +00:00
2019-06-27 20:48:50 +00:00
// Check if group/resource should be restored. We need to do this here since
// this method may be getting called for an additional item which is a group/resource
// that's excluded.
if ! ctx . resourceIncludesExcludes . ShouldInclude ( groupResource . String ( ) ) {
ctx . log . WithFields ( logrus . Fields {
"namespace" : obj . GetNamespace ( ) ,
"name" : obj . GetName ( ) ,
"groupResource" : groupResource . String ( ) ,
} ) . Info ( "Not restoring item because resource is excluded" )
return warnings , errs
}
// Check if namespace/cluster-scoped resource should be restored. We need
// to do this here since this method may be getting called for an additional
// item which is in a namespace that's excluded, or which is cluster-scoped
2019-09-26 01:01:16 +00:00
// and should be excluded. Note that we're checking the object's namespace (
// via obj.GetNamespace()) instead of the namespace parameter, because we want
// to check the *original* namespace, not the remapped one if it's been remapped.
2019-06-27 20:48:50 +00:00
if namespace != "" {
2019-09-26 01:01:16 +00:00
if ! ctx . namespaceIncludesExcludes . ShouldInclude ( obj . GetNamespace ( ) ) {
2019-06-27 20:48:50 +00:00
ctx . log . WithFields ( logrus . Fields {
"namespace" : obj . GetNamespace ( ) ,
"name" : obj . GetName ( ) ,
"groupResource" : groupResource . String ( ) ,
} ) . Info ( "Not restoring item because namespace is excluded" )
return warnings , errs
}
2020-04-03 14:57:01 +00:00
2021-03-15 22:51:07 +00:00
// If the namespace scoped resource should be restored, ensure that the
// namespace into which the resource is being restored into exists.
2020-04-03 14:57:01 +00:00
// This is the *remapped* namespace that we are ensuring exists.
2020-08-21 00:24:29 +00:00
nsToEnsure := getNamespace ( ctx . log , archive . GetItemFilePath ( ctx . restoreDir , "namespaces" , "" , obj . GetNamespace ( ) ) , namespace )
2021-03-04 21:21:44 +00:00
if _ , nsCreated , err := kube . EnsureNamespaceExistsAndIsReady ( nsToEnsure , ctx . namespaceClient , ctx . resourceTerminatingTimeout ) ; err != nil {
2020-04-03 14:57:01 +00:00
errs . AddVeleroError ( err )
return warnings , errs
2021-03-04 21:21:44 +00:00
} else {
2021-03-15 22:51:07 +00:00
// Add the newly created namespace to the list of restored items.
2021-03-04 21:21:44 +00:00
if nsCreated {
itemKey := velero . ResourceIdentifier {
GroupResource : kuberesource . Namespaces ,
Namespace : nsToEnsure . Namespace ,
Name : nsToEnsure . Name ,
}
ctx . restoredItems [ itemKey ] = struct { } { }
}
2020-04-03 14:57:01 +00:00
}
2019-06-27 20:48:50 +00:00
} else {
if boolptr . IsSetToFalse ( ctx . restore . Spec . IncludeClusterResources ) {
ctx . log . WithFields ( logrus . Fields {
"namespace" : obj . GetNamespace ( ) ,
"name" : obj . GetName ( ) ,
"groupResource" : groupResource . String ( ) ,
} ) . Info ( "Not restoring item because it's cluster-scoped" )
return warnings , errs
}
}
2021-03-15 22:51:07 +00:00
// Make a copy of object retrieved from backup to make it available unchanged
//inside restore actions.
2019-03-28 19:21:56 +00:00
itemFromBackup := obj . DeepCopy ( )
2017-08-02 17:27:17 +00:00
2019-03-28 19:21:56 +00:00
complete , err := isCompleted ( obj , groupResource )
if err != nil {
2020-02-03 18:56:57 +00:00
errs . Add ( namespace , fmt . Errorf ( "error checking completion of %q: %v" , resourceID , err ) )
2019-03-28 19:21:56 +00:00
return warnings , errs
}
if complete {
ctx . log . Infof ( "%s is complete - skipping" , kube . NamespaceAndName ( obj ) )
return warnings , errs
}
2018-06-22 19:32:03 +00:00
2019-03-28 19:21:56 +00:00
name := obj . GetName ( )
2018-06-22 19:32:03 +00:00
2021-03-15 22:51:07 +00:00
// Check if we've already restored this itemKey.
2019-03-28 19:21:56 +00:00
itemKey := velero . ResourceIdentifier {
GroupResource : groupResource ,
Namespace : namespace ,
Name : name ,
}
if _ , exists := ctx . restoredItems [ itemKey ] ; exists {
ctx . log . Infof ( "Skipping %s because it's already been restored." , resourceID )
return warnings , errs
}
ctx . restoredItems [ itemKey ] = struct { } { }
2018-06-22 19:32:03 +00:00
2021-03-15 22:51:07 +00:00
// TODO: move to restore item action if/when we add a ShouldRestore() method
// to the interface.
2019-03-28 19:21:56 +00:00
if groupResource == kuberesource . Pods && obj . GetAnnotations ( ) [ v1 . MirrorPodAnnotationKey ] != "" {
ctx . log . Infof ( "Not restoring pod because it's a mirror pod" )
return warnings , errs
}
resourceClient , err := ctx . getResourceClient ( groupResource , obj , namespace )
if err != nil {
2020-02-03 18:56:57 +00:00
errs . AddVeleroError ( fmt . Errorf ( "error getting resource client for namespace %q, resource %q: %v" , namespace , & groupResource , err ) )
2019-03-28 19:21:56 +00:00
return warnings , errs
}
if groupResource == kuberesource . PersistentVolumes {
2019-08-06 20:40:35 +00:00
switch {
case hasSnapshot ( name , ctx . volumeSnapshots ) :
2020-02-10 20:17:15 +00:00
oldName := obj . GetName ( )
2019-08-27 23:42:38 +00:00
shouldRenamePV , err := shouldRenamePV ( ctx , obj , resourceClient )
2019-08-06 20:40:35 +00:00
if err != nil {
2020-02-03 18:56:57 +00:00
errs . Add ( namespace , err )
2019-08-06 20:40:35 +00:00
return warnings , errs
2018-06-22 19:32:03 +00:00
}
2021-03-15 22:51:07 +00:00
// Check to see if the claimRef.namespace field needs to be remapped,
// and do so if necessary.
2020-10-15 23:57:43 +00:00
_ , err = remapClaimRefNS ( ctx , obj )
if err != nil {
errs . Add ( namespace , err )
return warnings , errs
}
2019-08-27 23:42:38 +00:00
var shouldRestoreSnapshot bool
if ! shouldRenamePV {
// Check if the PV exists in the cluster before attempting to create
// a volume from the snapshot, in order to avoid orphaned volumes (GH #609)
shouldRestoreSnapshot , err = ctx . shouldRestore ( name , resourceClient )
if err != nil {
2020-02-03 18:56:57 +00:00
errs . Add ( namespace , errors . Wrapf ( err , "error waiting on in-cluster persistentvolume %s" , name ) )
2019-08-27 23:42:38 +00:00
return warnings , errs
}
} else {
2021-03-15 22:51:07 +00:00
// If we're renaming the PV, we're going to give it a new random name,
2019-08-27 23:42:38 +00:00
// so we can assume it doesn't already exist in the cluster and therefore
// we should proceed with restoring from snapshot.
shouldRestoreSnapshot = true
}
2019-08-06 20:40:35 +00:00
if shouldRestoreSnapshot {
2021-03-15 22:51:07 +00:00
// Reset the PV's binding status so that Kubernetes can properly
// associate it with the restored PVC.
2020-10-15 23:57:43 +00:00
obj = resetVolumeBindingInfo ( obj )
2021-03-15 22:51:07 +00:00
// Even if we're renaming the PV, obj still has the old name here, because the pvRestorer
2019-08-27 23:42:38 +00:00
// uses the original name to look up metadata about the snapshot.
2019-08-06 20:40:35 +00:00
ctx . log . Infof ( "Restoring persistent volume from snapshot." )
updatedObj , err := ctx . pvRestorer . executePVAction ( obj )
if err != nil {
2020-02-03 18:56:57 +00:00
errs . Add ( namespace , fmt . Errorf ( "error executing PVAction for %s: %v" , resourceID , err ) )
2019-08-06 20:40:35 +00:00
return warnings , errs
}
obj = updatedObj
2020-09-01 21:25:13 +00:00
2021-03-15 22:51:07 +00:00
// VolumeSnapshotter has modified the PV name, we should rename the PV.
2020-09-01 21:25:13 +00:00
if oldName != obj . GetName ( ) {
shouldRenamePV = true
}
2019-08-06 20:40:35 +00:00
}
2019-08-27 23:42:38 +00:00
if shouldRenamePV {
2020-02-10 20:17:15 +00:00
var pvName string
if oldName == obj . GetName ( ) {
2021-03-15 22:51:07 +00:00
// pvRestorer hasn't modified the PV name, we need to rename the PV.
2020-02-10 20:17:15 +00:00
pvName , err = ctx . pvRenamer ( oldName )
if err != nil {
errs . Add ( namespace , errors . Wrapf ( err , "error renaming PV" ) )
return warnings , errs
}
} else {
2021-03-15 22:51:07 +00:00
// VolumeSnapshotter could have modified the PV name through
// function `SetVolumeID`,
2020-02-10 20:17:15 +00:00
pvName = obj . GetName ( )
2019-12-17 18:23:58 +00:00
}
2019-08-27 23:42:38 +00:00
2020-02-10 20:17:15 +00:00
ctx . renamedPVs [ oldName ] = pvName
obj . SetName ( pvName )
2019-08-27 23:42:38 +00:00
2021-03-15 22:51:07 +00:00
// Add the original PV name as an annotation.
2019-08-27 23:42:38 +00:00
annotations := obj . GetAnnotations ( )
if annotations == nil {
annotations = map [ string ] string { }
}
annotations [ "velero.io/original-pv-name" ] = oldName
obj . SetAnnotations ( annotations )
}
2019-08-06 20:40:35 +00:00
case hasResticBackup ( obj , ctx ) :
ctx . log . Infof ( "Dynamically re-provisioning persistent volume because it has a restic backup to be restored." )
2019-03-28 19:21:56 +00:00
ctx . pvsToProvision . Insert ( name )
2019-08-06 20:40:35 +00:00
2021-03-15 22:51:07 +00:00
// Return early because we don't want to restore the PV itself, we
// want to dynamically re-provision it.
2019-03-28 19:21:56 +00:00
return warnings , errs
2019-08-27 23:42:38 +00:00
2019-08-06 20:40:35 +00:00
case hasDeleteReclaimPolicy ( obj . Object ) :
ctx . log . Infof ( "Dynamically re-provisioning persistent volume because it doesn't have a snapshot and its reclaim policy is Delete." )
ctx . pvsToProvision . Insert ( name )
2018-11-02 08:42:58 +00:00
2021-03-15 22:51:07 +00:00
// Return early because we don't want to restore the PV itself, we
// want to dynamically re-provision it.
2019-03-28 19:21:56 +00:00
return warnings , errs
2019-08-27 23:42:38 +00:00
2019-08-06 20:40:35 +00:00
default :
ctx . log . Infof ( "Restoring persistent volume as-is because it doesn't have a snapshot and its reclaim policy is not Delete." )
2017-08-02 17:27:17 +00:00
2020-10-15 23:57:43 +00:00
obj = resetVolumeBindingInfo ( obj )
2021-03-15 22:51:07 +00:00
// We call the pvRestorer here to clear out the PV's claimRef.UID,
// so it can be re-claimed when its PVC is restored and gets a new UID.
2019-03-28 19:21:56 +00:00
updatedObj , err := ctx . pvRestorer . executePVAction ( obj )
2017-11-21 17:24:43 +00:00
if err != nil {
2020-02-03 18:56:57 +00:00
errs . Add ( namespace , fmt . Errorf ( "error executing PVAction for %s: %v" , resourceID , err ) )
2019-03-28 19:21:56 +00:00
return warnings , errs
2017-11-21 17:24:43 +00:00
}
2019-03-28 19:21:56 +00:00
obj = updatedObj
}
}
2017-11-21 17:24:43 +00:00
2021-03-15 22:51:07 +00:00
// Clear out non-core metadata fields and status.
2019-03-28 19:21:56 +00:00
if obj , err = resetMetadataAndStatus ( obj ) ; err != nil {
2020-02-03 18:56:57 +00:00
errs . Add ( namespace , err )
2019-03-28 19:21:56 +00:00
return warnings , errs
}
2017-11-21 17:24:43 +00:00
2019-03-28 19:21:56 +00:00
for _ , action := range ctx . getApplicableActions ( groupResource , namespace ) {
if ! action . selector . Matches ( labels . Set ( obj . GetLabels ( ) ) ) {
return warnings , errs
2017-08-02 17:27:17 +00:00
}
2019-03-28 19:21:56 +00:00
ctx . log . Infof ( "Executing item action for %v" , & groupResource )
executeOutput , err := action . Execute ( & velero . RestoreItemActionExecuteInput {
Item : obj ,
ItemFromBackup : itemFromBackup ,
Restore : ctx . restore ,
} )
if err != nil {
2020-02-03 18:56:57 +00:00
errs . Add ( namespace , fmt . Errorf ( "error preparing %s: %v" , resourceID , err ) )
2019-03-28 19:21:56 +00:00
return warnings , errs
2018-03-29 18:50:30 +00:00
}
2017-08-02 17:27:17 +00:00
2019-04-04 19:39:54 +00:00
if executeOutput . SkipRestore {
ctx . log . Infof ( "Skipping restore of %s: %v because a registered plugin discarded it" , obj . GroupVersionKind ( ) . Kind , name )
return warnings , errs
}
2019-03-28 19:21:56 +00:00
unstructuredObj , ok := executeOutput . UpdatedItem . ( * unstructured . Unstructured )
if ! ok {
2020-02-03 18:56:57 +00:00
errs . Add ( namespace , fmt . Errorf ( "%s: unexpected type %T" , resourceID , executeOutput . UpdatedItem ) )
2019-03-28 19:21:56 +00:00
return warnings , errs
}
obj = unstructuredObj
for _ , additionalItem := range executeOutput . AdditionalItems {
2020-08-21 00:24:29 +00:00
itemPath := archive . GetItemFilePath ( ctx . restoreDir , additionalItem . GroupResource . String ( ) , additionalItem . Namespace , additionalItem . Name )
2019-03-28 19:21:56 +00:00
if _ , err := ctx . fileSystem . Stat ( itemPath ) ; err != nil {
ctx . log . WithError ( err ) . WithFields ( logrus . Fields {
"additionalResource" : additionalItem . GroupResource . String ( ) ,
"additionalResourceNamespace" : additionalItem . Namespace ,
"additionalResourceName" : additionalItem . Name ,
} ) . Warn ( "unable to restore additional item" )
2020-02-03 18:56:57 +00:00
warnings . Add ( additionalItem . Namespace , err )
2017-08-02 17:27:17 +00:00
2018-04-11 15:07:43 +00:00
continue
2018-03-29 18:50:30 +00:00
}
2019-03-28 19:21:56 +00:00
additionalResourceID := getResourceID ( additionalItem . GroupResource , additionalItem . Namespace , additionalItem . Name )
2020-08-21 00:24:29 +00:00
additionalObj , err := archive . Unmarshal ( ctx . fileSystem , itemPath )
2018-04-11 15:07:43 +00:00
if err != nil {
2020-02-03 18:56:57 +00:00
errs . Add ( namespace , errors . Wrapf ( err , "error restoring additional item %s" , additionalResourceID ) )
2018-04-11 15:07:43 +00:00
}
2019-03-28 19:21:56 +00:00
additionalItemNamespace := additionalItem . Namespace
if additionalItemNamespace != "" {
if remapped , ok := ctx . restore . Spec . NamespaceMapping [ additionalItemNamespace ] ; ok {
additionalItemNamespace = remapped
2018-04-11 15:07:43 +00:00
}
2018-03-29 18:50:30 +00:00
}
2019-03-28 19:21:56 +00:00
w , e := ctx . restoreItem ( additionalObj , additionalItem . GroupResource , additionalItemNamespace )
2020-02-03 18:56:57 +00:00
warnings . Merge ( & w )
errs . Merge ( & e )
2017-08-02 17:27:17 +00:00
}
2019-03-28 19:21:56 +00:00
}
// This comes after running item actions because we have built-in actions that restore
// a PVC's associated PV (if applicable). As part of the PV being restored, the 'pvsToProvision'
// set may be inserted into, and this needs to happen *before* running the following block of logic.
//
// The side effect of this is that it's impossible for a user to write a restore item action that
// adjusts this behavior (i.e. of resetting the PVC for dynamic provisioning if it claims a PV with
// a reclaim policy of Delete and no snapshot). If/when that becomes an issue for users, we can
// revisit. This would be easier with a multi-pass restore process.
if groupResource == kuberesource . PersistentVolumeClaims {
pvc := new ( v1 . PersistentVolumeClaim )
if err := runtime . DefaultUnstructuredConverter . FromUnstructured ( obj . UnstructuredContent ( ) , pvc ) ; err != nil {
2020-02-03 18:56:57 +00:00
errs . Add ( namespace , err )
2019-03-28 19:21:56 +00:00
return warnings , errs
}
2020-10-15 23:57:43 +00:00
if pvc . Spec . VolumeName != "" {
// This used to only happen with restic volumes, but now always remove this binding metadata
obj = resetVolumeBindingInfo ( obj )
// This is the case for restic volumes, where we need to actually have an empty volume created instead of restoring one.
// The assumption is that any PV in pvsToProvision doesn't have an associated snapshot.
if ctx . pvsToProvision . Has ( pvc . Spec . VolumeName ) {
ctx . log . Infof ( "Resetting PersistentVolumeClaim %s/%s for dynamic provisioning" , namespace , name )
unstructured . RemoveNestedField ( obj . Object , "spec" , "volumeName" )
}
2019-03-28 19:21:56 +00:00
}
2019-08-27 23:42:38 +00:00
if newName , ok := ctx . renamedPVs [ pvc . Spec . VolumeName ] ; ok {
ctx . log . Infof ( "Updating persistent volume claim %s/%s to reference renamed persistent volume (%s -> %s)" , namespace , name , pvc . Spec . VolumeName , newName )
if err := unstructured . SetNestedField ( obj . Object , newName , "spec" , "volumeName" ) ; err != nil {
2020-02-03 18:56:57 +00:00
errs . Add ( namespace , err )
2019-08-27 23:42:38 +00:00
return warnings , errs
}
}
2019-03-28 19:21:56 +00:00
}
2021-03-15 22:51:07 +00:00
// Necessary because we may have remapped the namespace if the namespace is
// blank, don't create the key.
2019-03-28 19:21:56 +00:00
originalNamespace := obj . GetNamespace ( )
if namespace != "" {
obj . SetNamespace ( namespace )
}
2021-03-15 22:51:07 +00:00
// Label the resource with the restore's name and the restored backup's name
2019-03-28 19:21:56 +00:00
// for easy identification of all cluster resources created by this restore
2021-03-15 22:51:07 +00:00
// and which backup they came from.
2019-03-28 19:21:56 +00:00
addRestoreLabels ( obj , ctx . restore . Name , ctx . restore . Spec . BackupName )
ctx . log . Infof ( "Attempting to restore %s: %v" , obj . GroupVersionKind ( ) . Kind , name )
createdObj , restoreErr := resourceClient . Create ( obj )
if apierrors . IsAlreadyExists ( restoreErr ) {
fromCluster , err := resourceClient . Get ( name , metav1 . GetOptions { } )
if err != nil {
ctx . log . Infof ( "Error retrieving cluster version of %s: %v" , kube . NamespaceAndName ( obj ) , err )
2020-02-03 18:56:57 +00:00
warnings . Add ( namespace , err )
2019-03-28 19:21:56 +00:00
return warnings , errs
}
2021-03-15 22:51:07 +00:00
// Remove insubstantial metadata.
2019-03-28 19:21:56 +00:00
fromCluster , err = resetMetadataAndStatus ( fromCluster )
if err != nil {
ctx . log . Infof ( "Error trying to reset metadata for %s: %v" , kube . NamespaceAndName ( obj ) , err )
2020-02-03 18:56:57 +00:00
warnings . Add ( namespace , err )
2019-03-28 19:21:56 +00:00
return warnings , errs
2017-08-02 17:27:17 +00:00
}
2021-03-15 22:51:07 +00:00
// We know the object from the cluster won't have the backup/restore name
// labels, so copy them from the object we attempted to restore.
2019-03-28 19:21:56 +00:00
labels := obj . GetLabels ( )
2019-08-06 20:17:36 +00:00
addRestoreLabels ( fromCluster , labels [ velerov1api . RestoreNameLabel ] , labels [ velerov1api . BackupNameLabel ] )
2019-03-28 19:21:56 +00:00
if ! equality . Semantic . DeepEqual ( fromCluster , obj ) {
switch groupResource {
case kuberesource . ServiceAccounts :
desired , err := mergeServiceAccounts ( fromCluster , obj )
if err != nil {
ctx . log . Infof ( "error merging secrets for ServiceAccount %s: %v" , kube . NamespaceAndName ( obj ) , err )
2020-02-03 18:56:57 +00:00
warnings . Add ( namespace , err )
2019-03-28 19:21:56 +00:00
return warnings , errs
}
patchBytes , err := generatePatch ( fromCluster , desired )
if err != nil {
ctx . log . Infof ( "error generating patch for ServiceAccount %s: %v" , kube . NamespaceAndName ( obj ) , err )
2020-02-03 18:56:57 +00:00
warnings . Add ( namespace , err )
2019-03-28 19:21:56 +00:00
return warnings , errs
}
if patchBytes == nil {
2021-03-15 22:51:07 +00:00
// In-cluster and desired state are the same, so move on to
// the next item.
2019-03-28 19:21:56 +00:00
return warnings , errs
}
_ , err = resourceClient . Patch ( name , patchBytes )
if err != nil {
2020-02-03 18:56:57 +00:00
warnings . Add ( namespace , err )
2019-03-28 19:21:56 +00:00
} else {
ctx . log . Infof ( "ServiceAccount %s successfully updated" , kube . NamespaceAndName ( obj ) )
}
default :
2019-10-01 20:47:21 +00:00
e := errors . Errorf ( "could not restore, %s. Warning: the in-cluster version is different than the backed-up version." , restoreErr )
2020-02-03 18:56:57 +00:00
warnings . Add ( namespace , e )
2018-02-28 01:35:35 +00:00
}
2019-03-28 19:21:56 +00:00
return warnings , errs
}
2019-10-01 20:47:21 +00:00
ctx . log . Infof ( "Restore of %s, %v skipped: it already exists in the cluster and is the same as the backed up version" , obj . GroupVersionKind ( ) . Kind , name )
2019-03-28 19:21:56 +00:00
return warnings , errs
}
2021-03-15 22:51:07 +00:00
// Error was something other than an AlreadyExists.
2019-03-28 19:21:56 +00:00
if restoreErr != nil {
2020-12-09 17:32:34 +00:00
ctx . log . Errorf ( "error restoring %s: %+v" , name , restoreErr )
2020-02-03 18:56:57 +00:00
errs . Add ( namespace , fmt . Errorf ( "error restoring %s: %v" , resourceID , restoreErr ) )
2019-03-28 19:21:56 +00:00
return warnings , errs
}
2021-02-22 19:16:00 +00:00
if groupResource == kuberesource . Pods && len ( restic . GetVolumeBackupsForPod ( ctx . podVolumeBackups , obj , originalNamespace ) ) > 0 {
2019-08-06 20:17:36 +00:00
restorePodVolumeBackups ( ctx , createdObj , originalNamespace )
2017-08-02 17:27:17 +00:00
}
2020-09-08 18:33:15 +00:00
if groupResource == kuberesource . Pods {
ctx . waitExec ( createdObj )
}
2020-01-30 17:19:13 +00:00
// Wait for a CRD to be available for instantiating resources
// before continuing.
if groupResource == kuberesource . CustomResourceDefinitions {
available , err := ctx . crdAvailable ( name , resourceClient )
if err != nil {
2020-02-03 18:56:57 +00:00
errs . Add ( namespace , errors . Wrapf ( err , "error verifying custom resource definition is ready to use" ) )
2020-01-30 17:19:13 +00:00
} else if ! available {
2020-02-03 18:56:57 +00:00
errs . Add ( namespace , fmt . Errorf ( "CRD %s is not available to use for custom resources." , name ) )
2020-01-30 17:19:13 +00:00
}
}
2018-02-28 01:35:35 +00:00
return warnings , errs
}
2021-03-15 22:51:07 +00:00
// shouldRenamePV returns a boolean indicating whether a persistent volume should
// be given a new name before being restored, or an error if this cannot be determined.
// A persistent volume will be given a new name if and only if (a) a PV with the
// original name already exists in-cluster, and (b) in the backup, the PV is claimed
// by a PVC in a namespace that's being remapped during the restore.
k8s 1.18 import (#2651)
* k8s 1.18 import wip
backup, cmd, controller, generated, restic, restore, serverstatusrequest, test and util
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* go mod tidy
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* add changelog file
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* go fmt
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* update code-generator and controller-gen in CI
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* checkout proper code-generator version, regen
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* fix remaining calls
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* regenerate CRDs with ./hack/update-generated-crd-code.sh
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* use existing context in restic and server
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* fix test cases by resetting resource version
also use main library go context, not golang.org/x/net/context, in pkg/restore/restore.go
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* clarify changelog message
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* use github.com/kubernetes-csi/external-snapshotter/v2@v2.2.0-rc1
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* run 'go mod tidy' to remove old external-snapshotter version
Signed-off-by: Andrew Lavery <laverya@umich.edu>
2020-07-16 16:21:37 +00:00
func shouldRenamePV ( ctx * restoreContext , obj * unstructured . Unstructured , client client . Dynamic ) ( bool , error ) {
2019-08-27 23:42:38 +00:00
if len ( ctx . restore . Spec . NamespaceMapping ) == 0 {
ctx . log . Debugf ( "Persistent volume does not need to be renamed because restore is not remapping any namespaces" )
return false , nil
}
pv := new ( v1 . PersistentVolume )
if err := runtime . DefaultUnstructuredConverter . FromUnstructured ( obj . Object , pv ) ; err != nil {
return false , errors . Wrapf ( err , "error converting persistent volume to structured" )
}
if pv . Spec . ClaimRef == nil {
ctx . log . Debugf ( "Persistent volume does not need to be renamed because it's not claimed" )
return false , nil
}
if _ , ok := ctx . restore . Spec . NamespaceMapping [ pv . Spec . ClaimRef . Namespace ] ; ! ok {
ctx . log . Debugf ( "Persistent volume does not need to be renamed because it's not claimed by a PVC in a namespace that's being remapped" )
return false , nil
}
_ , err := client . Get ( pv . Name , metav1 . GetOptions { } )
switch {
case apierrors . IsNotFound ( err ) :
ctx . log . Debugf ( "Persistent volume does not need to be renamed because it does not exist in the cluster" )
return false , nil
case err != nil :
return false , errors . Wrapf ( err , "error checking if persistent volume exists in the cluster" )
}
2021-03-15 22:51:07 +00:00
// No error returned: the PV was found in-cluster, so we need to rename it.
2019-08-27 23:42:38 +00:00
return true , nil
}
2021-03-15 22:51:07 +00:00
// remapClaimRefNS remaps a PersistentVolume's claimRef.Namespace based on a
// restore's NamespaceMappings, if necessary. Returns true if the namespace was
// remapped, false if it was not required.
2020-10-15 23:57:43 +00:00
func remapClaimRefNS ( ctx * restoreContext , obj * unstructured . Unstructured ) ( bool , error ) {
if len ( ctx . restore . Spec . NamespaceMapping ) == 0 {
ctx . log . Debug ( "Persistent volume does not need to have the claimRef.namespace remapped because restore is not remapping any namespaces" )
return false , nil
}
2021-03-15 22:51:07 +00:00
// Conversion to the real type here is more readable than all the error checking
// involved with reading each field individually.
2020-10-15 23:57:43 +00:00
pv := new ( v1 . PersistentVolume )
if err := runtime . DefaultUnstructuredConverter . FromUnstructured ( obj . Object , pv ) ; err != nil {
return false , errors . Wrapf ( err , "error converting persistent volume to structured" )
}
if pv . Spec . ClaimRef == nil {
Fix various typos found by codespell (#3057)
By running the following command:
codespell -S .git,*.png,*.jpg,*.woff,*.ttf,*.gif,*.ico -L \
iam,aks,ist,bridget,ue
Signed-off-by: Mateusz Gozdek <mgozdekof@gmail.com>
2020-11-10 16:48:35 +00:00
ctx . log . Debugf ( "Persistent volume does not need to have the claimRef.namespace remapped because it's not claimed" )
2020-10-15 23:57:43 +00:00
return false , nil
}
targetNS , ok := ctx . restore . Spec . NamespaceMapping [ pv . Spec . ClaimRef . Namespace ]
if ! ok {
ctx . log . Debugf ( "Persistent volume does not need to have the claimRef.namespace remapped because it's not claimed by a PVC in a namespace that's being remapped" )
return false , nil
}
err := unstructured . SetNestedField ( obj . Object , targetNS , "spec" , "claimRef" , "namespace" )
if err != nil {
return false , err
}
ctx . log . Debug ( "Persistent volume's namespace was updated" )
return true , nil
}
2019-08-06 20:17:36 +00:00
// restorePodVolumeBackups restores the PodVolumeBackups for the given restored pod
k8s 1.18 import (#2651)
* k8s 1.18 import wip
backup, cmd, controller, generated, restic, restore, serverstatusrequest, test and util
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* go mod tidy
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* add changelog file
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* go fmt
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* update code-generator and controller-gen in CI
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* checkout proper code-generator version, regen
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* fix remaining calls
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* regenerate CRDs with ./hack/update-generated-crd-code.sh
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* use existing context in restic and server
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* fix test cases by resetting resource version
also use main library go context, not golang.org/x/net/context, in pkg/restore/restore.go
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* clarify changelog message
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* use github.com/kubernetes-csi/external-snapshotter/v2@v2.2.0-rc1
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* run 'go mod tidy' to remove old external-snapshotter version
Signed-off-by: Andrew Lavery <laverya@umich.edu>
2020-07-16 16:21:37 +00:00
func restorePodVolumeBackups ( ctx * restoreContext , createdObj * unstructured . Unstructured , originalNamespace string ) {
2019-08-06 20:17:36 +00:00
if ctx . resticRestorer == nil {
ctx . log . Warn ( "No restic restorer, not restoring pod's volumes" )
} else {
2020-01-15 17:27:21 +00:00
ctx . resticWaitGroup . Add ( 1 )
go func ( ) {
2021-03-15 22:51:07 +00:00
// Done() will only be called after all errors have been successfully
// sent on the ctx.resticErrs channel
2020-01-15 17:27:21 +00:00
defer ctx . resticWaitGroup . Done ( )
2019-08-06 20:17:36 +00:00
pod := new ( v1 . Pod )
if err := runtime . DefaultUnstructuredConverter . FromUnstructured ( createdObj . UnstructuredContent ( ) , & pod ) ; err != nil {
ctx . log . WithError ( err ) . Error ( "error converting unstructured pod" )
2020-01-15 17:27:21 +00:00
ctx . resticErrs <- err
return
2019-08-06 20:17:36 +00:00
}
data := restic . RestoreData {
Restore : ctx . restore ,
Pod : pod ,
PodVolumeBackups : ctx . podVolumeBackups ,
SourceNamespace : originalNamespace ,
BackupLocation : ctx . backup . Spec . StorageLocation ,
}
if errs := ctx . resticRestorer . RestorePodVolumes ( data ) ; errs != nil {
ctx . log . WithError ( kubeerrs . NewAggregate ( errs ) ) . Error ( "unable to successfully complete restic restores of pod's volumes" )
2020-01-15 17:27:21 +00:00
for _ , err := range errs {
ctx . resticErrs <- err
}
}
} ( )
2019-08-06 20:17:36 +00:00
}
}
2021-03-15 22:51:07 +00:00
// waitExec executes hooks in a restored pod's containers when they become ready.
2020-09-08 18:33:15 +00:00
func ( ctx * restoreContext ) waitExec ( createdObj * unstructured . Unstructured ) {
ctx . hooksWaitGroup . Add ( 1 )
go func ( ) {
// Done() will only be called after all errors have been successfully sent
2021-03-15 22:51:07 +00:00
// on the ctx.resticErrs channel.
2020-09-08 18:33:15 +00:00
defer ctx . hooksWaitGroup . Done ( )
pod := new ( v1 . Pod )
if err := runtime . DefaultUnstructuredConverter . FromUnstructured ( createdObj . UnstructuredContent ( ) , & pod ) ; err != nil {
ctx . log . WithError ( err ) . Error ( "error converting unstructured pod" )
ctx . hooksErrs <- err
return
}
execHooksByContainer , err := hook . GroupRestoreExecHooks (
ctx . resourceRestoreHooks ,
pod ,
ctx . log ,
)
if err != nil {
ctx . log . WithError ( err ) . Errorf ( "error getting exec hooks for pod %s/%s" , pod . Namespace , pod . Name )
ctx . hooksErrs <- err
return
}
if errs := ctx . waitExecHookHandler . HandleHooks ( ctx . hooksContext , ctx . log , pod , execHooksByContainer ) ; len ( errs ) > 0 {
ctx . log . WithError ( kubeerrs . NewAggregate ( errs ) ) . Error ( "unable to successfully execute post-restore hooks" )
ctx . hooksCancelFunc ( )
for _ , err := range errs {
2021-03-15 22:51:07 +00:00
// Errors are already logged in the HandleHooks method.
2020-09-08 18:33:15 +00:00
ctx . hooksErrs <- err
}
}
} ( )
}
2019-08-06 20:40:35 +00:00
func hasSnapshot ( pvName string , snapshots [ ] * volume . Snapshot ) bool {
for _ , snapshot := range snapshots {
if snapshot . Spec . PersistentVolumeName == pvName {
return true
}
}
return false
}
k8s 1.18 import (#2651)
* k8s 1.18 import wip
backup, cmd, controller, generated, restic, restore, serverstatusrequest, test and util
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* go mod tidy
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* add changelog file
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* go fmt
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* update code-generator and controller-gen in CI
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* checkout proper code-generator version, regen
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* fix remaining calls
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* regenerate CRDs with ./hack/update-generated-crd-code.sh
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* use existing context in restic and server
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* fix test cases by resetting resource version
also use main library go context, not golang.org/x/net/context, in pkg/restore/restore.go
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* clarify changelog message
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* use github.com/kubernetes-csi/external-snapshotter/v2@v2.2.0-rc1
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* run 'go mod tidy' to remove old external-snapshotter version
Signed-off-by: Andrew Lavery <laverya@umich.edu>
2020-07-16 16:21:37 +00:00
func hasResticBackup ( unstructuredPV * unstructured . Unstructured , ctx * restoreContext ) bool {
2019-08-06 20:40:35 +00:00
if len ( ctx . podVolumeBackups ) == 0 {
return false
}
pv := new ( v1 . PersistentVolume )
if err := runtime . DefaultUnstructuredConverter . FromUnstructured ( unstructuredPV . Object , pv ) ; err != nil {
ctx . log . WithError ( err ) . Warnf ( "Unable to convert PV from unstructured to structured" )
return false
}
if pv . Spec . ClaimRef == nil {
return false
}
var found bool
for _ , pvb := range ctx . podVolumeBackups {
if pvb . Spec . Pod . Namespace == pv . Spec . ClaimRef . Namespace && pvb . GetAnnotations ( ) [ restic . PVCNameAnnotation ] == pv . Spec . ClaimRef . Name {
found = true
break
}
}
return found
}
2018-10-23 18:44:05 +00:00
func hasDeleteReclaimPolicy ( obj map [ string ] interface { } ) bool {
2019-01-07 22:07:53 +00:00
policy , _ , _ := unstructured . NestedString ( obj , "spec" , "persistentVolumeReclaimPolicy" )
return policy == string ( v1 . PersistentVolumeReclaimDelete )
2018-10-23 18:44:05 +00:00
}
2021-03-15 22:51:07 +00:00
// resetVolumeBindingInfo clears any necessary metadata out of a PersistentVolume
// or PersistentVolumeClaim that would make it ineligible to be re-bound by Velero.
2020-10-15 23:57:43 +00:00
func resetVolumeBindingInfo ( obj * unstructured . Unstructured ) * unstructured . Unstructured {
2021-03-15 22:51:07 +00:00
// Clean out ClaimRef UID and resourceVersion, since this information is
// highly unique.
2020-10-15 23:57:43 +00:00
unstructured . RemoveNestedField ( obj . Object , "spec" , "claimRef" , "uid" )
unstructured . RemoveNestedField ( obj . Object , "spec" , "claimRef" , "resourceVersion" )
2021-03-15 22:51:07 +00:00
// Clear out any annotations used by the Kubernetes PV controllers to track
// bindings.
2020-10-15 23:57:43 +00:00
annotations := obj . GetAnnotations ( )
2021-03-15 22:51:07 +00:00
// Upon restore, this new PV will look like a statically provisioned, manually-
// bound volume rather than one bound by the controller, so remove the annotation
// that signals that a controller bound it.
2020-10-15 23:57:43 +00:00
delete ( annotations , KubeAnnBindCompleted )
2021-03-15 22:51:07 +00:00
// Remove the annotation that signals that the PV is already bound; we want
// the PV(C) controller to take the two objects and bind them again.
2020-10-15 23:57:43 +00:00
delete ( annotations , KubeAnnBoundByController )
2021-03-15 22:51:07 +00:00
// Remove the provisioned-by annotation which signals that the persistent
// volume was dynamically provisioned; it is now statically provisioned.
2020-10-15 23:57:43 +00:00
delete ( annotations , KubeAnnDynamicallyProvisioned )
2021-03-15 22:51:07 +00:00
// GetAnnotations returns a copy, so we have to set them again.
2020-10-15 23:57:43 +00:00
obj . SetAnnotations ( annotations )
return obj
}
2017-12-21 21:23:48 +00:00
func resetMetadataAndStatus ( obj * unstructured . Unstructured ) ( * unstructured . Unstructured , error ) {
2019-01-07 22:07:53 +00:00
res , ok := obj . Object [ "metadata" ]
if ! ok {
return nil , errors . New ( "metadata not found" )
}
metadata , ok := res . ( map [ string ] interface { } )
if ! ok {
return nil , errors . Errorf ( "metadata was of type %T, expected map[string]interface{}" , res )
2017-11-21 17:24:43 +00:00
}
for k := range metadata {
2017-12-21 21:23:48 +00:00
switch k {
case "name" , "namespace" , "labels" , "annotations" :
default :
delete ( metadata , k )
2017-11-21 17:24:43 +00:00
}
}
2018-04-26 20:07:50 +00:00
// Never restore status
2017-11-21 17:24:43 +00:00
delete ( obj . UnstructuredContent ( ) , "status" )
return obj , nil
}
2021-03-15 22:51:07 +00:00
// addRestoreLabels labels the provided object with the restore name and the
// restored backup's name.
2018-08-08 23:51:33 +00:00
func addRestoreLabels ( obj metav1 . Object , restoreName , backupName string ) {
2017-08-02 17:27:17 +00:00
labels := obj . GetLabels ( )
if labels == nil {
labels = make ( map [ string ] string )
}
2019-08-06 20:17:36 +00:00
labels [ velerov1api . BackupNameLabel ] = label . GetValidName ( backupName )
labels [ velerov1api . RestoreNameLabel ] = label . GetValidName ( restoreName )
2018-08-08 23:33:09 +00:00
2017-08-02 17:27:17 +00:00
obj . SetLabels ( labels )
}
2021-03-15 22:51:07 +00:00
// isCompleted returns whether or not an object is considered completed. Used to
// identify whether or not an object should be restored. Only Jobs or Pods are
// considered.
2018-04-26 20:07:50 +00:00
func isCompleted ( obj * unstructured . Unstructured , groupResource schema . GroupResource ) ( bool , error ) {
switch groupResource {
2018-05-11 19:40:19 +00:00
case kuberesource . Pods :
2018-04-26 20:07:50 +00:00
phase , _ , err := unstructured . NestedString ( obj . UnstructuredContent ( ) , "status" , "phase" )
if err != nil {
return false , errors . WithStack ( err )
}
if phase == string ( v1 . PodFailed ) || phase == string ( v1 . PodSucceeded ) {
return true , nil
}
2018-05-11 19:40:19 +00:00
case kuberesource . Jobs :
2018-04-26 20:07:50 +00:00
ct , found , err := unstructured . NestedString ( obj . UnstructuredContent ( ) , "status" , "completionTime" )
if err != nil {
return false , errors . WithStack ( err )
}
if found && ct != "" {
return true , nil
}
}
2021-03-15 22:51:07 +00:00
// Assume any other resource isn't complete and can be restored.
2018-04-26 20:07:50 +00:00
return false , nil
}
2021-03-04 21:21:44 +00:00
2021-03-15 22:51:07 +00:00
// restoreableResource represents map of individual items of each resource
// identifier grouped by their original namespaces.
2021-03-04 21:21:44 +00:00
type restoreableResource struct {
resource string
selectedItemsByNamespace map [ string ] [ ] restoreableItem
totalItems int
}
2021-03-15 22:51:07 +00:00
// restoreableItem represents an item by its target namespace contains enough
// information required to restore the item.
2021-03-04 21:21:44 +00:00
type restoreableItem struct {
path string
targetNamespace string
name string
}
2021-03-15 22:51:07 +00:00
// getOrderedResourceCollection iterates over list of ordered resource
// identifiers, applies resource include/exclude criteria, and Kubernetes
// selectors to make a list of resources to be actually restored preserving the
// original order.
2021-03-04 21:21:44 +00:00
func ( ctx * restoreContext ) getOrderedResourceCollection ( backupResources map [ string ] * archive . ResourceItems ) ( [ ] restoreableResource , Result , Result ) {
var warnings , errs Result
processedResources := sets . NewString ( )
restoreResourceCollection := make ( [ ] restoreableResource , 0 )
2021-03-15 22:51:07 +00:00
// Iterate through an ordered list of resources to restore, checking each
// one to see if it should be restored. Note that resources *may* be in this
// list twice, i.e. once due to being a prioritized resource, and once due
// to being in the backup tarball. We can't de-dupe this upfront, because
// it's possible that items in the prioritized resources list may not be
// fully resolved group-resource strings (e.g. may be specified as "po"
// instead of "pods"), and we don't want to fully resolve them via discovery
// until we reach them in the loop, because it is possible that the
// resource/API itself is being restored via a custom resource definition,
// meaning it's not available via discovery prior to beginning the restore.
2021-03-04 21:21:44 +00:00
//
2021-03-15 22:51:07 +00:00
// Since we keep track of the fully-resolved group-resources that we *have*
// restored, we won't try to restore a resource twice even if it's in the
// ordered list twice.
2021-03-04 21:21:44 +00:00
for _ , resource := range getOrderedResources ( ctx . resourcePriorities , backupResources ) {
// try to resolve the resource via discovery to a complete group/version/resource
gvr , _ , err := ctx . discoveryHelper . ResourceFor ( schema . ParseGroupResource ( resource ) . WithVersion ( "" ) )
if err != nil {
ctx . log . WithField ( "resource" , resource ) . Infof ( "Skipping restore of resource because it cannot be resolved via discovery" )
continue
}
groupResource := gvr . GroupResource ( )
2021-03-15 22:51:07 +00:00
// Check if we've already restored this resource (this would happen if
// the resource we're currently looking at was already restored because
// it was a prioritized resource, and now we're looking at it as part of
// the backup contents).
2021-03-04 21:21:44 +00:00
if processedResources . Has ( groupResource . String ( ) ) {
ctx . log . WithField ( "resource" , groupResource . String ( ) ) . Debugf ( "Skipping restore of resource because it's already been processed" )
continue
}
2021-03-15 22:51:07 +00:00
// Check if the resource should be restored according to the resource
// includes/excludes.
2021-03-04 21:21:44 +00:00
if ! ctx . resourceIncludesExcludes . ShouldInclude ( groupResource . String ( ) ) {
ctx . log . WithField ( "resource" , groupResource . String ( ) ) . Infof ( "Skipping restore of resource because the restore spec excludes it" )
continue
}
2021-03-15 22:51:07 +00:00
// We don't want to explicitly restore namespace API objs because we'll handle
2021-03-04 21:21:44 +00:00
// them as a special case prior to restoring anything into them
if groupResource == kuberesource . Namespaces {
continue
}
2021-03-15 22:51:07 +00:00
// Check if the resource is present in the backup
2021-03-04 21:21:44 +00:00
resourceList := backupResources [ groupResource . String ( ) ]
if resourceList == nil {
ctx . log . WithField ( "resource" , groupResource . String ( ) ) . Debugf ( "Skipping restore of resource because it's not present in the backup tarball" )
continue
}
2021-03-15 22:51:07 +00:00
// Iterate through each namespace that contains instances of the
// resource and append to the list of to-be restored resources.
2021-03-04 21:21:44 +00:00
for namespace , items := range resourceList . ItemsByNamespace {
if namespace != "" && ! ctx . namespaceIncludesExcludes . ShouldInclude ( namespace ) {
ctx . log . Infof ( "Skipping namespace %s" , namespace )
continue
}
// get target namespace to restore into, if different
// from source namespace
targetNamespace := namespace
if target , ok := ctx . restore . Spec . NamespaceMapping [ namespace ] ; ok {
targetNamespace = target
}
if targetNamespace == "" && boolptr . IsSetToFalse ( ctx . restore . Spec . IncludeClusterResources ) {
ctx . log . Infof ( "Skipping resource %s because it's cluster-scoped" , resource )
continue
}
if targetNamespace == "" && ! boolptr . IsSetToTrue ( ctx . restore . Spec . IncludeClusterResources ) && ! ctx . namespaceIncludesExcludes . IncludeEverything ( ) {
ctx . log . Infof ( "Skipping resource %s because it's cluster-scoped and only specific namespaces are included in the restore" , resource )
continue
}
res , w , e := ctx . getSelectedRestoreableItems ( groupResource . String ( ) , targetNamespace , namespace , items )
warnings . Merge ( & w )
errs . Merge ( & e )
2021-03-15 22:51:07 +00:00
restoreResourceCollection = append ( restoreResourceCollection , res )
2021-03-04 21:21:44 +00:00
}
// record that we've restored the resource
processedResources . Insert ( groupResource . String ( ) )
}
return restoreResourceCollection , warnings , errs
}
2021-03-15 22:51:07 +00:00
// getSelectedRestoreableItems applies Kubernetes selectors on individual items
// of each resource type to create a list of items which will be actually
// restored.
2021-03-04 21:21:44 +00:00
func ( ctx * restoreContext ) getSelectedRestoreableItems ( resource , targetNamespace , originalNamespace string , items [ ] string ) ( restoreableResource , Result , Result ) {
warnings , errs := Result { } , Result { }
2021-03-15 22:51:07 +00:00
restorable := restoreableResource {
resource : resource ,
}
if restorable . selectedItemsByNamespace == nil {
restorable . selectedItemsByNamespace = make ( map [ string ] [ ] restoreableItem )
2021-03-04 21:21:44 +00:00
}
if targetNamespace != "" {
ctx . log . Infof ( "Resource '%s' will be restored into namespace '%s'" , resource , targetNamespace )
} else {
ctx . log . Infof ( "Resource '%s' will be restored at cluster scope" , resource )
}
2021-03-15 22:51:07 +00:00
// If the APIGroupVersionsFeatureFlag is enabled, the item path will be
// updated to include the API group version that was chosen for restore. For
// example, for "horizontalpodautoscalers.autoscaling", if v2beta1 is chosen
// to be restored, then "horizontalpodautoscalers.autoscaling/v2beta1" will
// be part of item path. Different versions would only have been stored
// if the APIGroupVersionsFeatureFlag was enabled during backup. The
// chosenGrpVersToRestore map would only be populated if
// APIGroupVersionsFeatureFlag was enabled for restore and the minimum
// required backup format version has been met.
cgv , ok := ctx . chosenGrpVersToRestore [ resource ]
if ok {
resource = filepath . Join ( resource , cgv . Dir )
}
2021-03-04 21:21:44 +00:00
for _ , item := range items {
itemPath := archive . GetItemFilePath ( ctx . restoreDir , resource , originalNamespace , item )
obj , err := archive . Unmarshal ( ctx . fileSystem , itemPath )
if err != nil {
2021-03-15 22:51:07 +00:00
errs . Add (
targetNamespace ,
fmt . Errorf (
"error decoding %q: %v" ,
strings . Replace ( itemPath , ctx . restoreDir + "/" , "" , - 1 ) ,
err ,
) ,
)
2021-03-04 21:21:44 +00:00
continue
}
if ! ctx . selector . Matches ( labels . Set ( obj . GetLabels ( ) ) ) {
continue
}
selectedItem := restoreableItem {
path : itemPath ,
name : item ,
targetNamespace : targetNamespace ,
}
2021-03-15 22:51:07 +00:00
restorable . selectedItemsByNamespace [ originalNamespace ] =
append ( restorable . selectedItemsByNamespace [ originalNamespace ] , selectedItem )
restorable . totalItems ++
2021-03-04 21:21:44 +00:00
}
2021-03-15 22:51:07 +00:00
return restorable , warnings , errs
2021-03-04 21:21:44 +00:00
}