Merge pull request #5444 from Lyndon-Li/remove-restic
Pod Volume Backup/Restore Refactor: Remove Restic in codepull/5453/head
commit
d658f6564d
|
@ -0,0 +1 @@
|
|||
Remove irrational "Restic" names in Velero code after the PVBR refactor
|
|
@ -17,7 +17,7 @@ limitations under the License.
|
|||
package v1
|
||||
|
||||
// PodVolumeOperationProgress represents the progress of a
|
||||
// PodVolumeBackup/Restore (restic) operation
|
||||
// PodVolumeBackup/Restore operation
|
||||
type PodVolumeOperationProgress struct {
|
||||
// +optional
|
||||
TotalBytes int64 `json:"totalBytes,omitempty"`
|
||||
|
|
|
@ -71,15 +71,15 @@ type Backupper interface {
|
|||
|
||||
// kubernetesBackupper implements Backupper.
|
||||
type kubernetesBackupper struct {
|
||||
backupClient velerov1client.BackupsGetter
|
||||
dynamicFactory client.DynamicFactory
|
||||
discoveryHelper discovery.Helper
|
||||
podCommandExecutor podexec.PodCommandExecutor
|
||||
resticBackupperFactory podvolume.BackupperFactory
|
||||
resticTimeout time.Duration
|
||||
defaultVolumesToFsBackup bool
|
||||
clientPageSize int
|
||||
uploaderType string
|
||||
backupClient velerov1client.BackupsGetter
|
||||
dynamicFactory client.DynamicFactory
|
||||
discoveryHelper discovery.Helper
|
||||
podCommandExecutor podexec.PodCommandExecutor
|
||||
podVolumeBackupperFactory podvolume.BackupperFactory
|
||||
podVolumeTimeout time.Duration
|
||||
defaultVolumesToFsBackup bool
|
||||
clientPageSize int
|
||||
uploaderType string
|
||||
}
|
||||
|
||||
func (i *itemKey) String() string {
|
||||
|
@ -102,22 +102,22 @@ func NewKubernetesBackupper(
|
|||
discoveryHelper discovery.Helper,
|
||||
dynamicFactory client.DynamicFactory,
|
||||
podCommandExecutor podexec.PodCommandExecutor,
|
||||
resticBackupperFactory podvolume.BackupperFactory,
|
||||
resticTimeout time.Duration,
|
||||
podVolumeBackupperFactory podvolume.BackupperFactory,
|
||||
podVolumeTimeout time.Duration,
|
||||
defaultVolumesToFsBackup bool,
|
||||
clientPageSize int,
|
||||
uploaderType string,
|
||||
) (Backupper, error) {
|
||||
return &kubernetesBackupper{
|
||||
backupClient: backupClient,
|
||||
discoveryHelper: discoveryHelper,
|
||||
dynamicFactory: dynamicFactory,
|
||||
podCommandExecutor: podCommandExecutor,
|
||||
resticBackupperFactory: resticBackupperFactory,
|
||||
resticTimeout: resticTimeout,
|
||||
defaultVolumesToFsBackup: defaultVolumesToFsBackup,
|
||||
clientPageSize: clientPageSize,
|
||||
uploaderType: uploaderType,
|
||||
backupClient: backupClient,
|
||||
discoveryHelper: discoveryHelper,
|
||||
dynamicFactory: dynamicFactory,
|
||||
podCommandExecutor: podCommandExecutor,
|
||||
podVolumeBackupperFactory: podVolumeBackupperFactory,
|
||||
podVolumeTimeout: podVolumeTimeout,
|
||||
defaultVolumesToFsBackup: defaultVolumesToFsBackup,
|
||||
clientPageSize: clientPageSize,
|
||||
uploaderType: uploaderType,
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
@ -228,7 +228,7 @@ func (kb *kubernetesBackupper) BackupWithResolvers(log logrus.FieldLogger,
|
|||
|
||||
backupRequest.BackedUpItems = map[itemKey]struct{}{}
|
||||
|
||||
podVolumeTimeout := kb.resticTimeout
|
||||
podVolumeTimeout := kb.podVolumeTimeout
|
||||
if val := backupRequest.Annotations[velerov1api.PodVolumeOperationTimeoutAnnotation]; val != "" {
|
||||
parsed, err := time.ParseDuration(val)
|
||||
if err != nil {
|
||||
|
@ -241,9 +241,9 @@ func (kb *kubernetesBackupper) BackupWithResolvers(log logrus.FieldLogger,
|
|||
ctx, cancelFunc := context.WithTimeout(context.Background(), podVolumeTimeout)
|
||||
defer cancelFunc()
|
||||
|
||||
var resticBackupper podvolume.Backupper
|
||||
if kb.resticBackupperFactory != nil {
|
||||
resticBackupper, err = kb.resticBackupperFactory.NewBackupper(ctx, backupRequest.Backup, kb.uploaderType)
|
||||
var podVolumeBackupper podvolume.Backupper
|
||||
if kb.podVolumeBackupperFactory != nil {
|
||||
podVolumeBackupper, err = kb.podVolumeBackupperFactory.NewBackupper(ctx, backupRequest.Backup, kb.uploaderType)
|
||||
if err != nil {
|
||||
log.WithError(errors.WithStack(err)).Debugf("Error from NewBackupper")
|
||||
return errors.WithStack(err)
|
||||
|
@ -278,13 +278,13 @@ func (kb *kubernetesBackupper) BackupWithResolvers(log logrus.FieldLogger,
|
|||
}
|
||||
|
||||
itemBackupper := &itemBackupper{
|
||||
backupRequest: backupRequest,
|
||||
tarWriter: tw,
|
||||
dynamicFactory: kb.dynamicFactory,
|
||||
discoveryHelper: kb.discoveryHelper,
|
||||
resticBackupper: resticBackupper,
|
||||
resticSnapshotTracker: newPVCSnapshotTracker(),
|
||||
volumeSnapshotterGetter: volumeSnapshotterGetter,
|
||||
backupRequest: backupRequest,
|
||||
tarWriter: tw,
|
||||
dynamicFactory: kb.dynamicFactory,
|
||||
discoveryHelper: kb.discoveryHelper,
|
||||
podVolumeBackupper: podVolumeBackupper,
|
||||
podVolumeSnapshotTracker: newPVCSnapshotTracker(),
|
||||
volumeSnapshotterGetter: volumeSnapshotterGetter,
|
||||
itemHookHandler: &hook.DefaultItemHookHandler{
|
||||
PodCommandExecutor: kb.podCommandExecutor,
|
||||
},
|
||||
|
|
|
@ -2595,17 +2595,17 @@ func TestBackupWithHooks(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
type fakeResticBackupperFactory struct{}
|
||||
type fakePodVolumeBackupperFactory struct{}
|
||||
|
||||
func (f *fakeResticBackupperFactory) NewBackupper(context.Context, *velerov1.Backup, string) (podvolume.Backupper, error) {
|
||||
return &fakeResticBackupper{}, nil
|
||||
func (f *fakePodVolumeBackupperFactory) NewBackupper(context.Context, *velerov1.Backup, string) (podvolume.Backupper, error) {
|
||||
return &fakePodVolumeBackupper{}, nil
|
||||
}
|
||||
|
||||
type fakeResticBackupper struct{}
|
||||
type fakePodVolumeBackupper struct{}
|
||||
|
||||
// BackupPodVolumes returns one pod volume backup per entry in volumes, with namespace "velero"
|
||||
// and name "pvb-<pod-namespace>-<pod-name>-<volume-name>".
|
||||
func (b *fakeResticBackupper) BackupPodVolumes(backup *velerov1.Backup, pod *corev1.Pod, volumes []string, _ logrus.FieldLogger) ([]*velerov1.PodVolumeBackup, []error) {
|
||||
func (b *fakePodVolumeBackupper) BackupPodVolumes(backup *velerov1.Backup, pod *corev1.Pod, volumes []string, _ logrus.FieldLogger) ([]*velerov1.PodVolumeBackup, []error) {
|
||||
var res []*velerov1.PodVolumeBackup
|
||||
for _, vol := range volumes {
|
||||
pvb := builder.ForPodVolumeBackup("velero", fmt.Sprintf("pvb-%s-%s-%s", pod.Namespace, pod.Name, vol)).Result()
|
||||
|
@ -2615,11 +2615,11 @@ func (b *fakeResticBackupper) BackupPodVolumes(backup *velerov1.Backup, pod *cor
|
|||
return res, nil
|
||||
}
|
||||
|
||||
// TestBackupWithRestic runs backups of pods that are annotated for restic backup,
|
||||
// and ensures that the restic backupper is called, that the returned PodVolumeBackups
|
||||
// are added to the Request object, and that when PVCs are backed up with restic, the
|
||||
// TestBackupWithPodVolume runs backups of pods that are annotated for PodVolume backup,
|
||||
// and ensures that the pod volume backupper is called, that the returned PodVolumeBackups
|
||||
// are added to the Request object, and that when PVCs are backed up with PodVolume, the
|
||||
// claimed PVs are not also snapshotted using a VolumeSnapshotter.
|
||||
func TestBackupWithRestic(t *testing.T) {
|
||||
func TestBackupWithPodVolume(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
backup *velerov1.Backup
|
||||
|
@ -2629,7 +2629,7 @@ func TestBackupWithRestic(t *testing.T) {
|
|||
want []*velerov1.PodVolumeBackup
|
||||
}{
|
||||
{
|
||||
name: "a pod annotated for restic backup should result in pod volume backups being returned",
|
||||
name: "a pod annotated for pod volume backup should result in pod volume backups being returned",
|
||||
backup: defaultBackup().Result(),
|
||||
apiResources: []*test.APIResource{
|
||||
test.Pods(
|
||||
|
@ -2641,7 +2641,7 @@ func TestBackupWithRestic(t *testing.T) {
|
|||
},
|
||||
},
|
||||
{
|
||||
name: "when a PVC is used by two pods and annotated for restic backup on both, only one should be backed up",
|
||||
name: "when a PVC is used by two pods and annotated for pod volume backup on both, only one should be backed up",
|
||||
backup: defaultBackup().Result(),
|
||||
apiResources: []*test.APIResource{
|
||||
test.Pods(
|
||||
|
@ -2662,7 +2662,7 @@ func TestBackupWithRestic(t *testing.T) {
|
|||
},
|
||||
},
|
||||
{
|
||||
name: "when PVC pod volumes are backed up using restic, their claimed PVs are not also snapshotted",
|
||||
name: "when PVC pod volumes are backed up using pod volume backup, their claimed PVs are not also snapshotted",
|
||||
backup: defaultBackup().Result(),
|
||||
apiResources: []*test.APIResource{
|
||||
test.Pods(
|
||||
|
@ -2707,7 +2707,7 @@ func TestBackupWithRestic(t *testing.T) {
|
|||
backupFile = bytes.NewBuffer([]byte{})
|
||||
)
|
||||
|
||||
h.backupper.resticBackupperFactory = new(fakeResticBackupperFactory)
|
||||
h.backupper.podVolumeBackupperFactory = new(fakePodVolumeBackupperFactory)
|
||||
|
||||
for _, resource := range tc.apiResources {
|
||||
h.addItems(t, resource)
|
||||
|
@ -2786,9 +2786,9 @@ func newHarness(t *testing.T) *harness {
|
|||
discoveryHelper: discoveryHelper,
|
||||
|
||||
// unsupported
|
||||
podCommandExecutor: nil,
|
||||
resticBackupperFactory: nil,
|
||||
resticTimeout: 0,
|
||||
podCommandExecutor: nil,
|
||||
podVolumeBackupperFactory: nil,
|
||||
podVolumeTimeout: 0,
|
||||
},
|
||||
log: log,
|
||||
}
|
||||
|
|
|
@ -56,13 +56,13 @@ const (
|
|||
|
||||
// itemBackupper can back up individual items to a tar writer.
|
||||
type itemBackupper struct {
|
||||
backupRequest *Request
|
||||
tarWriter tarWriter
|
||||
dynamicFactory client.DynamicFactory
|
||||
discoveryHelper discovery.Helper
|
||||
resticBackupper podvolume.Backupper
|
||||
resticSnapshotTracker *pvcSnapshotTracker
|
||||
volumeSnapshotterGetter VolumeSnapshotterGetter
|
||||
backupRequest *Request
|
||||
tarWriter tarWriter
|
||||
dynamicFactory client.DynamicFactory
|
||||
discoveryHelper discovery.Helper
|
||||
podVolumeBackupper podvolume.Backupper
|
||||
podVolumeSnapshotTracker *pvcSnapshotTracker
|
||||
volumeSnapshotterGetter VolumeSnapshotterGetter
|
||||
|
||||
itemHookHandler hook.ItemHookHandler
|
||||
snapshotLocationVolumeSnapshotters map[string]vsv1.VolumeSnapshotter
|
||||
|
@ -137,9 +137,9 @@ func (ib *itemBackupper) backupItem(logger logrus.FieldLogger, obj runtime.Unstr
|
|||
}
|
||||
|
||||
var (
|
||||
backupErrs []error
|
||||
pod *corev1api.Pod
|
||||
resticVolumesToBackup []string
|
||||
backupErrs []error
|
||||
pod *corev1api.Pod
|
||||
pvbVolumes []string
|
||||
)
|
||||
|
||||
if groupResource == kuberesource.Pods {
|
||||
|
@ -154,21 +154,21 @@ func (ib *itemBackupper) backupItem(logger logrus.FieldLogger, obj runtime.Unstr
|
|||
// any volumes that use a PVC that we've already backed up (this would be in a read-write-many scenario,
|
||||
// where it's been backed up from another pod), since we don't need >1 backup per PVC.
|
||||
for _, volume := range podvolume.GetVolumesByPod(pod, boolptr.IsSetToTrue(ib.backupRequest.Spec.DefaultVolumesToFsBackup)) {
|
||||
if found, pvcName := ib.resticSnapshotTracker.HasPVCForPodVolume(pod, volume); found {
|
||||
if found, pvcName := ib.podVolumeSnapshotTracker.HasPVCForPodVolume(pod, volume); found {
|
||||
log.WithFields(map[string]interface{}{
|
||||
"podVolume": volume,
|
||||
"pvcName": pvcName,
|
||||
}).Info("Pod volume uses a persistent volume claim which has already been backed up with restic from another pod, skipping.")
|
||||
}).Info("Pod volume uses a persistent volume claim which has already been backed up from another pod, skipping.")
|
||||
continue
|
||||
}
|
||||
|
||||
resticVolumesToBackup = append(resticVolumesToBackup, volume)
|
||||
pvbVolumes = append(pvbVolumes, volume)
|
||||
}
|
||||
|
||||
// track the volumes that are PVCs using the PVC snapshot tracker, so that when we backup PVCs/PVs
|
||||
// via an item action in the next step, we don't snapshot PVs that will have their data backed up
|
||||
// with restic.
|
||||
ib.resticSnapshotTracker.Track(pod, resticVolumesToBackup)
|
||||
// with pod volume backup.
|
||||
ib.podVolumeSnapshotTracker.Track(pod, pvbVolumes)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -207,7 +207,7 @@ func (ib *itemBackupper) backupItem(logger logrus.FieldLogger, obj runtime.Unstr
|
|||
if groupResource == kuberesource.Pods && pod != nil {
|
||||
// this function will return partial results, so process podVolumeBackups
|
||||
// even if there are errors.
|
||||
podVolumeBackups, errs := ib.backupPodVolumes(log, pod, resticVolumesToBackup)
|
||||
podVolumeBackups, errs := ib.backupPodVolumes(log, pod, pvbVolumes)
|
||||
|
||||
ib.backupRequest.PodVolumeBackups = append(ib.backupRequest.PodVolumeBackups, podVolumeBackups...)
|
||||
backupErrs = append(backupErrs, errs...)
|
||||
|
@ -292,19 +292,19 @@ func (ib *itemBackupper) backupItem(logger logrus.FieldLogger, obj runtime.Unstr
|
|||
return true, nil
|
||||
}
|
||||
|
||||
// backupPodVolumes triggers restic backups of the specified pod volumes, and returns a list of PodVolumeBackups
|
||||
// backupPodVolumes triggers pod volume backups of the specified pod volumes, and returns a list of PodVolumeBackups
|
||||
// for volumes that were successfully backed up, and a slice of any errors that were encountered.
|
||||
func (ib *itemBackupper) backupPodVolumes(log logrus.FieldLogger, pod *corev1api.Pod, volumes []string) ([]*velerov1api.PodVolumeBackup, []error) {
|
||||
if len(volumes) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if ib.resticBackupper == nil {
|
||||
log.Warn("No restic backupper, not backing up pod's volumes")
|
||||
if ib.podVolumeBackupper == nil {
|
||||
log.Warn("No pod volume backupper, not backing up pod's volumes")
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return ib.resticBackupper.BackupPodVolumes(ib.backupRequest.Backup, pod, volumes, log)
|
||||
return ib.podVolumeBackupper.BackupPodVolumes(ib.backupRequest.Backup, pod, volumes, log)
|
||||
}
|
||||
|
||||
func (ib *itemBackupper) executeActions(
|
||||
|
@ -423,11 +423,11 @@ func (ib *itemBackupper) takePVSnapshot(obj runtime.Unstructured, log logrus.Fie
|
|||
|
||||
log = log.WithField("persistentVolume", pv.Name)
|
||||
|
||||
// If this PV is claimed, see if we've already taken a (restic) snapshot of the contents
|
||||
// If this PV is claimed, see if we've already taken a (pod volume backup) snapshot of the contents
|
||||
// of this PV. If so, don't take a snapshot.
|
||||
if pv.Spec.ClaimRef != nil {
|
||||
if ib.resticSnapshotTracker.Has(pv.Spec.ClaimRef.Namespace, pv.Spec.ClaimRef.Name) {
|
||||
log.Info("Skipping snapshot of persistent volume because volume is being backed up with restic.")
|
||||
if ib.podVolumeSnapshotTracker.Has(pv.Spec.ClaimRef.Namespace, pv.Spec.ClaimRef.Name) {
|
||||
log.Info("Skipping snapshot of persistent volume because volume is being backed up with pod volume backup.")
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
|
|
@ -24,7 +24,7 @@ import (
|
|||
)
|
||||
|
||||
// pvcSnapshotTracker keeps track of persistent volume claims that have been snapshotted
|
||||
// with restic.
|
||||
// with pod volume backup.
|
||||
type pvcSnapshotTracker struct {
|
||||
pvcs sets.String
|
||||
}
|
||||
|
|
|
@ -33,7 +33,7 @@ var (
|
|||
GitTreeState string
|
||||
|
||||
// ImageRegistry is the image registry that this build of Velero should use by default to pull the
|
||||
// Velero and Restic Restore Helper images from.
|
||||
// Velero and Restore Helper images from.
|
||||
ImageRegistry string
|
||||
)
|
||||
|
||||
|
|
|
@ -130,11 +130,11 @@ func Run(f client.Factory, o *cli.DeleteOptions) error {
|
|||
errs = append(errs, deleteErrs...)
|
||||
}
|
||||
|
||||
// Delete Restic repositories associated with the deleted BSL.
|
||||
resticRepoList, err := findAssociatedResticRepos(kbClient, location.Name, f.Namespace())
|
||||
// Delete backup repositories associated with the deleted BSL.
|
||||
backupRepoList, err := findAssociatedBackupRepos(kbClient, location.Name, f.Namespace())
|
||||
if err != nil {
|
||||
errs = append(errs, fmt.Errorf("find Restic repositories associated with BSL %q: %w", location.Name, err))
|
||||
} else if deleteErrs := deleteResticRepos(kbClient, resticRepoList); deleteErrs != nil {
|
||||
errs = append(errs, fmt.Errorf("find backup repositories associated with BSL %q: %w", location.Name, err))
|
||||
} else if deleteErrs := deleteBackupRepos(kbClient, backupRepoList); deleteErrs != nil {
|
||||
errs = append(errs, deleteErrs...)
|
||||
}
|
||||
}
|
||||
|
@ -151,7 +151,7 @@ func findAssociatedBackups(client kbclient.Client, bslName, ns string) (velerov1
|
|||
return backups, err
|
||||
}
|
||||
|
||||
func findAssociatedResticRepos(client kbclient.Client, bslName, ns string) (velerov1api.BackupRepositoryList, error) {
|
||||
func findAssociatedBackupRepos(client kbclient.Client, bslName, ns string) (velerov1api.BackupRepositoryList, error) {
|
||||
var repos velerov1api.BackupRepositoryList
|
||||
err := client.List(context.Background(), &repos, &kbclient.ListOptions{
|
||||
Namespace: ns,
|
||||
|
@ -172,14 +172,14 @@ func deleteBackups(client kbclient.Client, backups velerov1api.BackupList) []err
|
|||
return errs
|
||||
}
|
||||
|
||||
func deleteResticRepos(client kbclient.Client, repos velerov1api.BackupRepositoryList) []error {
|
||||
func deleteBackupRepos(client kbclient.Client, repos velerov1api.BackupRepositoryList) []error {
|
||||
var errs []error
|
||||
for _, repo := range repos.Items {
|
||||
if err := client.Delete(context.Background(), &repo, &kbclient.DeleteOptions{}); err != nil {
|
||||
errs = append(errs, errors.WithStack(fmt.Errorf("delete Restic repository %q associated with deleted BSL: %w", repo.Name, err)))
|
||||
errs = append(errs, errors.WithStack(fmt.Errorf("delete backup repository %q associated with deleted BSL: %w", repo.Name, err)))
|
||||
continue
|
||||
}
|
||||
fmt.Printf("Restic repository associated with deleted BSL(s) %q deleted successfully.\n", repo.Name)
|
||||
fmt.Printf("Backup repository associated with deleted BSL(s) %q deleted successfully.\n", repo.Name)
|
||||
}
|
||||
return errs
|
||||
}
|
||||
|
|
|
@ -88,10 +88,10 @@ func (o *InstallOptions) BindFlags(flags *pflag.FlagSet) {
|
|||
flags.StringVar(&o.SecretFile, "secret-file", o.SecretFile, "File containing credentials for backup and volume provider. If not specified, --no-secret must be used for confirmation. Optional.")
|
||||
flags.BoolVar(&o.NoSecret, "no-secret", o.NoSecret, "Flag indicating if a secret should be created. Must be used as confirmation if --secret-file is not provided. Optional.")
|
||||
flags.BoolVar(&o.NoDefaultBackupLocation, "no-default-backup-location", o.NoDefaultBackupLocation, "Flag indicating if a default backup location should be created. Must be used as confirmation if --bucket or --provider are not provided. Optional.")
|
||||
flags.StringVar(&o.Image, "image", o.Image, "Image to use for the Velero and restic server pods. Optional.")
|
||||
flags.StringVar(&o.Image, "image", o.Image, "Image to use for the Velero and node agent pods. Optional.")
|
||||
flags.StringVar(&o.Prefix, "prefix", o.Prefix, "Prefix under which all Velero data should be stored within the bucket. Optional.")
|
||||
flags.Var(&o.PodAnnotations, "pod-annotations", "Annotations to add to the Velero and restic pods. Optional. Format is key1=value1,key2=value2")
|
||||
flags.Var(&o.PodLabels, "pod-labels", "Labels to add to the Velero and restic pods. Optional. Format is key1=value1,key2=value2")
|
||||
flags.Var(&o.PodAnnotations, "pod-annotations", "Annotations to add to the Velero and node agent pods. Optional. Format is key1=value1,key2=value2")
|
||||
flags.Var(&o.PodLabels, "pod-labels", "Labels to add to the Velero and node agent pods. Optional. Format is key1=value1,key2=value2")
|
||||
flags.Var(&o.ServiceAccountAnnotations, "sa-annotations", "Annotations to add to the Velero ServiceAccount. Add iam.gke.io/gcp-service-account=[GSA_NAME]@[PROJECT_NAME].iam.gserviceaccount.com for workload identity. Optional. Format is key1=value1,key2=value2")
|
||||
flags.StringVar(&o.VeleroPodCPURequest, "velero-pod-cpu-request", o.VeleroPodCPURequest, `CPU request for Velero pod. A value of "0" is treated as unbounded. Optional.`)
|
||||
flags.StringVar(&o.VeleroPodMemRequest, "velero-pod-mem-request", o.VeleroPodMemRequest, `Memory request for Velero pod. A value of "0" is treated as unbounded. Optional.`)
|
||||
|
@ -235,7 +235,7 @@ This is useful as a starting point for more customized installations.
|
|||
|
||||
# velero install --provider aws --plugins velero/velero-plugin-for-aws:v1.0.0 --bucket backups --secret-file ./aws-iam-creds --backup-location-config region=us-east-2 --snapshot-location-config region=us-east-2
|
||||
|
||||
# velero install --provider aws --plugins velero/velero-plugin-for-aws:v1.0.0 --bucket backups --secret-file ./aws-iam-creds --backup-location-config region=us-east-2 --snapshot-location-config region=us-east-2 --use-restic
|
||||
# velero install --provider aws --plugins velero/velero-plugin-for-aws:v1.0.0 --bucket backups --secret-file ./aws-iam-creds --backup-location-config region=us-east-2 --snapshot-location-config region=us-east-2 --use-node-agent
|
||||
|
||||
# velero install --provider gcp --plugins velero/velero-plugin-for-gcp:v1.0.0 --bucket gcp-backups --secret-file ./gcp-creds.json --wait
|
||||
|
||||
|
@ -243,7 +243,7 @@ This is useful as a starting point for more customized installations.
|
|||
|
||||
# velero install --provider gcp --plugins velero/velero-plugin-for-gcp:v1.0.0 --bucket gcp-backups --secret-file ./gcp-creds.json --velero-pod-cpu-request=1000m --velero-pod-cpu-limit=5000m --velero-pod-mem-request=512Mi --velero-pod-mem-limit=1024Mi
|
||||
|
||||
# velero install --provider gcp --plugins velero/velero-plugin-for-gcp:v1.0.0 --bucket gcp-backups --secret-file ./gcp-creds.json --restic-pod-cpu-request=1000m --restic-pod-cpu-limit=5000m --restic-pod-mem-request=512Mi --restic-pod-mem-limit=1024Mi
|
||||
# velero install --provider gcp --plugins velero/velero-plugin-for-gcp:v1.0.0 --bucket gcp-backups --secret-file ./gcp-creds.json --node-agent-pod-cpu-request=1000m --node-agent-pod-cpu-limit=5000m --node-agent-pod-mem-request=512Mi --node-agent-pod-mem-limit=1024Mi
|
||||
|
||||
# velero install --provider azure --plugins velero/velero-plugin-for-microsoft-azure:v1.0.0 --bucket $BLOB_CONTAINER --secret-file ./credentials-velero --backup-location-config resourceGroup=$AZURE_BACKUP_RESOURCE_GROUP,storageAccount=$AZURE_STORAGE_ACCOUNT_ID[,subscriptionId=$AZURE_BACKUP_SUBSCRIPTION_ID] --snapshot-location-config apiTimeout=<YOUR_TIMEOUT>[,resourceGroup=$AZURE_BACKUP_RESOURCE_GROUP,subscriptionId=$AZURE_BACKUP_SUBSCRIPTION_ID]`,
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
|
|
|
@ -85,7 +85,7 @@ func NewServerCommand(f client.Factory) *cobra.Command {
|
|||
logger.Infof("Starting Velero node-agent server %s (%s)", buildinfo.Version, buildinfo.FormattedGitSHA())
|
||||
|
||||
f.SetBasename(fmt.Sprintf("%s-%s", c.Parent().Name(), c.Name()))
|
||||
s, err := newResticServer(logger, f, defaultMetricsAddress)
|
||||
s, err := newNodeAgentServer(logger, f, defaultMetricsAddress)
|
||||
cmd.CheckError(err)
|
||||
|
||||
s.run()
|
||||
|
@ -98,7 +98,7 @@ func NewServerCommand(f client.Factory) *cobra.Command {
|
|||
return command
|
||||
}
|
||||
|
||||
type resticServer struct {
|
||||
type nodeAgentServer struct {
|
||||
logger logrus.FieldLogger
|
||||
ctx context.Context
|
||||
cancelFunc context.CancelFunc
|
||||
|
@ -110,7 +110,7 @@ type resticServer struct {
|
|||
nodeName string
|
||||
}
|
||||
|
||||
func newResticServer(logger logrus.FieldLogger, factory client.Factory, metricAddress string) (*resticServer, error) {
|
||||
func newNodeAgentServer(logger logrus.FieldLogger, factory client.Factory, metricAddress string) (*nodeAgentServer, error) {
|
||||
ctx, cancelFunc := context.WithCancel(context.Background())
|
||||
|
||||
clientConfig, err := factory.ClientConfig()
|
||||
|
@ -142,7 +142,7 @@ func newResticServer(logger logrus.FieldLogger, factory client.Factory, metricAd
|
|||
return nil, err
|
||||
}
|
||||
|
||||
s := &resticServer{
|
||||
s := &nodeAgentServer{
|
||||
logger: logger,
|
||||
ctx: ctx,
|
||||
cancelFunc: cancelFunc,
|
||||
|
@ -166,20 +166,20 @@ func newResticServer(logger logrus.FieldLogger, factory client.Factory, metricAd
|
|||
return s, nil
|
||||
}
|
||||
|
||||
func (s *resticServer) run() {
|
||||
func (s *nodeAgentServer) run() {
|
||||
signals.CancelOnShutdown(s.cancelFunc, s.logger)
|
||||
|
||||
go func() {
|
||||
metricsMux := http.NewServeMux()
|
||||
metricsMux.Handle("/metrics", promhttp.Handler())
|
||||
s.logger.Infof("Starting metric server for restic at address [%s]", s.metricsAddress)
|
||||
s.logger.Infof("Starting metric server for node agent at address [%s]", s.metricsAddress)
|
||||
if err := http.ListenAndServe(s.metricsAddress, metricsMux); err != nil {
|
||||
s.logger.Fatalf("Failed to start metric server for restic at [%s]: %v", s.metricsAddress, err)
|
||||
s.logger.Fatalf("Failed to start metric server for node agent at [%s]: %v", s.metricsAddress, err)
|
||||
}
|
||||
}()
|
||||
s.metrics = metrics.NewResticServerMetrics()
|
||||
s.metrics = metrics.NewPodVolumeMetrics()
|
||||
s.metrics.RegisterAllMetrics()
|
||||
s.metrics.InitResticMetricsForNode(s.nodeName)
|
||||
s.metrics.InitPodVolumeMetricsForNode(s.nodeName)
|
||||
|
||||
s.markInProgressCRsFailed()
|
||||
|
||||
|
@ -228,7 +228,7 @@ func (s *resticServer) run() {
|
|||
|
||||
// validatePodVolumesHostPath validates that the pod volumes path contains a
|
||||
// directory for each Pod running on this node
|
||||
func (s *resticServer) validatePodVolumesHostPath(client kubernetes.Interface) error {
|
||||
func (s *nodeAgentServer) validatePodVolumesHostPath(client kubernetes.Interface) error {
|
||||
files, err := s.fileSystem.ReadDir("/host_pods/")
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not read pod volumes host path")
|
||||
|
@ -275,7 +275,7 @@ func (s *resticServer) validatePodVolumesHostPath(client kubernetes.Interface) e
|
|||
|
||||
// if there is a restarting during the reconciling of pvbs/pvrs/etc, these CRs may be stuck in progress status
|
||||
// markInProgressCRsFailed tries to mark the in progress CRs as failed when starting the server to avoid the issue
|
||||
func (s *resticServer) markInProgressCRsFailed() {
|
||||
func (s *nodeAgentServer) markInProgressCRsFailed() {
|
||||
// the function is called before starting the controller manager, the embedded client isn't ready to use, so create a new one here
|
||||
client, err := ctrlclient.New(s.mgr.GetConfig(), ctrlclient.Options{Scheme: s.mgr.GetScheme()})
|
||||
if err != nil {
|
||||
|
@ -288,7 +288,7 @@ func (s *resticServer) markInProgressCRsFailed() {
|
|||
s.markInProgressPVRsFailed(client)
|
||||
}
|
||||
|
||||
func (s *resticServer) markInProgressPVBsFailed(client ctrlclient.Client) {
|
||||
func (s *nodeAgentServer) markInProgressPVBsFailed(client ctrlclient.Client) {
|
||||
pvbs := &velerov1api.PodVolumeBackupList{}
|
||||
if err := client.List(s.ctx, pvbs, &ctrlclient.MatchingFields{"metadata.namespace": s.namespace}); err != nil {
|
||||
s.logger.WithError(errors.WithStack(err)).Error("failed to list podvolumebackups")
|
||||
|
@ -315,7 +315,7 @@ func (s *resticServer) markInProgressPVBsFailed(client ctrlclient.Client) {
|
|||
}
|
||||
}
|
||||
|
||||
func (s *resticServer) markInProgressPVRsFailed(client ctrlclient.Client) {
|
||||
func (s *nodeAgentServer) markInProgressPVRsFailed(client ctrlclient.Client) {
|
||||
pvrs := &velerov1api.PodVolumeRestoreList{}
|
||||
if err := client.List(s.ctx, pvrs, &ctrlclient.MatchingFields{"metadata.namespace": s.namespace}); err != nil {
|
||||
s.logger.WithError(errors.WithStack(err)).Error("failed to list podvolumerestores")
|
||||
|
|
|
@ -94,7 +94,7 @@ func Test_validatePodVolumesHostPath(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
s := &resticServer{
|
||||
s := &nodeAgentServer{
|
||||
logger: testutil.NewLogger(),
|
||||
fileSystem: fs,
|
||||
}
|
||||
|
|
|
@ -300,7 +300,7 @@ func newServer(f client.Factory, config serverConfig, logger *logrus.Logger) (*s
|
|||
|
||||
// cancelFunc is not deferred here because if it was, then ctx would immediately
|
||||
// be cancelled once this function exited, making it useless to any informers using later.
|
||||
// That, in turn, causes the velero server to halt when the first informer tries to use it (probably restic's).
|
||||
// That, in turn, causes the velero server to halt when the first informer tries to use it.
|
||||
// Therefore, we must explicitly call it on the error paths in this function.
|
||||
ctx, cancelFunc := context.WithCancel(context.Background())
|
||||
|
||||
|
@ -395,7 +395,9 @@ func (s *server) run() error {
|
|||
return err
|
||||
}
|
||||
|
||||
if err := s.initRestic(); err != nil {
|
||||
s.checkNodeAgent()
|
||||
|
||||
if err := s.initRepoManager(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -499,7 +501,7 @@ func (s *server) veleroResourcesExist() error {
|
|||
// - Service accounts go before pods or controllers so pods can use them.
|
||||
// - Limit ranges go before pods or controllers so pods can use them.
|
||||
// - Pods go before controllers so they can be explicitly restored and potentially
|
||||
// have restic restores run before controllers adopt the pods.
|
||||
// have pod volume restores run before controllers adopt the pods.
|
||||
// - Replica sets go before deployments/other controllers so they can be explicitly
|
||||
// restored and be adopted by controllers.
|
||||
// - CAPI ClusterClasses go before Clusters.
|
||||
|
@ -530,7 +532,16 @@ var defaultRestorePriorities = []string{
|
|||
"clusterresourcesets.addons.cluster.x-k8s.io",
|
||||
}
|
||||
|
||||
func (s *server) initRestic() error {
|
||||
func (s *server) checkNodeAgent() {
|
||||
// warn if node agent does not exist
|
||||
if err := nodeagent.IsRunning(s.ctx, s.kubeClient, s.namespace); err == nodeagent.DaemonsetNotFound {
|
||||
s.logger.Warn("Velero node agent not found; pod volume backups/restores will not work until it's created")
|
||||
} else if err != nil {
|
||||
s.logger.WithError(errors.WithStack(err)).Warn("Error checking for existence of velero node agent")
|
||||
}
|
||||
}
|
||||
|
||||
func (s *server) initRepoManager() error {
|
||||
// warn if node agent does not exist
|
||||
if err := nodeagent.IsRunning(s.ctx, s.kubeClient, s.namespace); err == nodeagent.DaemonsetNotFound {
|
||||
s.logger.Warn("Velero node agent not found; pod volume backups/restores will not work until it's created")
|
||||
|
@ -664,7 +675,7 @@ func (s *server) runControllers(defaultVolumeSnapshotLocations map[string]string
|
|||
|
||||
// By far, PodVolumeBackup, PodVolumeRestore, BackupStorageLocation controllers
|
||||
// are not included in --disable-controllers list.
|
||||
// This is because of PVB and PVR are used by Restic DaemonSet,
|
||||
// This is because of PVB and PVR are used by node agent DaemonSet,
|
||||
// and BSL controller is mandatory for Velero to work.
|
||||
enabledControllers := map[string]func() controllerRunInfo{
|
||||
controller.Backup: backupControllerRunInfo,
|
||||
|
@ -675,7 +686,7 @@ func (s *server) runControllers(defaultVolumeSnapshotLocations map[string]string
|
|||
controller.ServerStatusRequest: {},
|
||||
controller.DownloadRequest: {},
|
||||
controller.Schedule: {},
|
||||
controller.ResticRepo: {},
|
||||
controller.BackupRepo: {},
|
||||
controller.BackupDeletion: {},
|
||||
controller.GarbageCollection: {},
|
||||
controller.BackupSync: {},
|
||||
|
@ -748,9 +759,9 @@ func (s *server) runControllers(defaultVolumeSnapshotLocations map[string]string
|
|||
}
|
||||
}
|
||||
|
||||
if _, ok := enabledRuntimeControllers[controller.ResticRepo]; ok {
|
||||
if err := controller.NewResticRepoReconciler(s.namespace, s.logger, s.mgr.GetClient(), s.config.repoMaintenanceFrequency, s.repoManager).SetupWithManager(s.mgr); err != nil {
|
||||
s.logger.Fatal(err, "unable to create controller", "controller", controller.ResticRepo)
|
||||
if _, ok := enabledRuntimeControllers[controller.BackupRepo]; ok {
|
||||
if err := controller.NewBackupRepoReconciler(s.namespace, s.logger, s.mgr.GetClient(), s.config.repoMaintenanceFrequency, s.repoManager).SetupWithManager(s.mgr); err != nil {
|
||||
s.logger.Fatal(err, "unable to create controller", "controller", controller.BackupRepo)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -94,7 +94,7 @@ func TestRemoveControllers(t *testing.T) {
|
|||
controller.BackupSync,
|
||||
controller.DownloadRequest,
|
||||
controller.GarbageCollection,
|
||||
controller.ResticRepo,
|
||||
controller.BackupRepo,
|
||||
controller.Restore,
|
||||
controller.Schedule,
|
||||
controller.ServerStatusRequest,
|
||||
|
@ -130,7 +130,7 @@ func TestRemoveControllers(t *testing.T) {
|
|||
controller.ServerStatusRequest: {},
|
||||
controller.Schedule: {},
|
||||
controller.BackupDeletion: {},
|
||||
controller.ResticRepo: {},
|
||||
controller.BackupRepo: {},
|
||||
controller.DownloadRequest: {},
|
||||
}
|
||||
|
||||
|
|
|
@ -24,7 +24,7 @@ import (
|
|||
)
|
||||
|
||||
var (
|
||||
resticRepoColumns = []metav1.TableColumnDefinition{
|
||||
backupRepoColumns = []metav1.TableColumnDefinition{
|
||||
// name needs Type and Format defined for the decorator to identify it:
|
||||
// https://github.com/kubernetes/kubernetes/blob/v1.15.3/pkg/printers/tableprinter.go#L204
|
||||
{Name: "Name", Type: "string", Format: "name"},
|
||||
|
@ -33,16 +33,16 @@ var (
|
|||
}
|
||||
)
|
||||
|
||||
func printResticRepoList(list *v1.BackupRepositoryList) []metav1.TableRow {
|
||||
func printBackupRepoList(list *v1.BackupRepositoryList) []metav1.TableRow {
|
||||
rows := make([]metav1.TableRow, 0, len(list.Items))
|
||||
|
||||
for i := range list.Items {
|
||||
rows = append(rows, printResticRepo(&list.Items[i])...)
|
||||
rows = append(rows, printBackupRepo(&list.Items[i])...)
|
||||
}
|
||||
return rows
|
||||
}
|
||||
|
||||
func printResticRepo(repo *v1.BackupRepository) []metav1.TableRow {
|
||||
func printBackupRepo(repo *v1.BackupRepository) []metav1.TableRow {
|
||||
row := metav1.TableRow{
|
||||
Object: runtime.RawExtension{Object: repo},
|
||||
}
|
|
@ -179,13 +179,13 @@ func printTable(cmd *cobra.Command, obj runtime.Object) (bool, error) {
|
|||
}
|
||||
case *velerov1api.BackupRepository:
|
||||
table = &metav1.Table{
|
||||
ColumnDefinitions: resticRepoColumns,
|
||||
Rows: printResticRepo(obj.(*velerov1api.BackupRepository)),
|
||||
ColumnDefinitions: backupRepoColumns,
|
||||
Rows: printBackupRepo(obj.(*velerov1api.BackupRepository)),
|
||||
}
|
||||
case *velerov1api.BackupRepositoryList:
|
||||
table = &metav1.Table{
|
||||
ColumnDefinitions: resticRepoColumns,
|
||||
Rows: printResticRepoList(obj.(*velerov1api.BackupRepositoryList)),
|
||||
ColumnDefinitions: backupRepoColumns,
|
||||
Rows: printBackupRepoList(obj.(*velerov1api.BackupRepositoryList)),
|
||||
}
|
||||
case *velerov1api.BackupStorageLocation:
|
||||
table = &metav1.Table{
|
||||
|
|
|
@ -50,7 +50,7 @@ import (
|
|||
)
|
||||
|
||||
const (
|
||||
resticTimeout = time.Minute
|
||||
snapshotDeleteTimeout = time.Minute
|
||||
deleteBackupRequestMaxAge = 24 * time.Hour
|
||||
)
|
||||
|
||||
|
@ -302,8 +302,8 @@ func (r *backupDeletionReconciler) Reconcile(ctx context.Context, req ctrl.Reque
|
|||
}
|
||||
}
|
||||
}
|
||||
log.Info("Removing restic snapshots")
|
||||
if deleteErrs := r.deleteResticSnapshots(ctx, backup); len(deleteErrs) > 0 {
|
||||
log.Info("Removing pod volume snapshots")
|
||||
if deleteErrs := r.deletePodVolumeSnapshots(ctx, backup); len(deleteErrs) > 0 {
|
||||
for _, err := range deleteErrs {
|
||||
errs = append(errs, err.Error())
|
||||
}
|
||||
|
@ -436,7 +436,7 @@ func (r *backupDeletionReconciler) deleteExistingDeletionRequests(ctx context.Co
|
|||
return errs
|
||||
}
|
||||
|
||||
func (r *backupDeletionReconciler) deleteResticSnapshots(ctx context.Context, backup *velerov1api.Backup) []error {
|
||||
func (r *backupDeletionReconciler) deletePodVolumeSnapshots(ctx context.Context, backup *velerov1api.Backup) []error {
|
||||
if r.repoMgr == nil {
|
||||
return nil
|
||||
}
|
||||
|
@ -446,7 +446,7 @@ func (r *backupDeletionReconciler) deleteResticSnapshots(ctx context.Context, ba
|
|||
return []error{err}
|
||||
}
|
||||
|
||||
ctx2, cancelFunc := context.WithTimeout(ctx, resticTimeout)
|
||||
ctx2, cancelFunc := context.WithTimeout(ctx, snapshotDeleteTimeout)
|
||||
defer cancelFunc()
|
||||
|
||||
var errs []error
|
||||
|
@ -493,7 +493,7 @@ func (r *backupDeletionReconciler) patchBackup(ctx context.Context, backup *vele
|
|||
return backup, nil
|
||||
}
|
||||
|
||||
// getSnapshotsInBackup returns a list of all restic snapshot ids associated with
|
||||
// getSnapshotsInBackup returns a list of all pod volume snapshot ids associated with
|
||||
// a given Velero backup.
|
||||
func getSnapshotsInBackup(ctx context.Context, backup *velerov1api.Backup, kbClient client.Client) ([]repository.SnapshotIdentifier, error) {
|
||||
podVolumeBackups := &velerov1api.PodVolumeBackupList{}
|
||||
|
|
|
@ -91,7 +91,7 @@ func setupBackupDeletionControllerTest(t *testing.T, req *velerov1api.DeleteBack
|
|||
velerotest.NewLogger(),
|
||||
fakeClient,
|
||||
NewBackupTracker(),
|
||||
nil, // restic repository manager
|
||||
nil, // repository manager
|
||||
metrics.NewServerMetrics(),
|
||||
nil, // discovery helper
|
||||
func(logrus.FieldLogger) clientmgmt.Manager { return pluginManager },
|
||||
|
|
|
@ -39,7 +39,7 @@ const (
|
|||
defaultMaintainFrequency = 7 * 24 * time.Hour
|
||||
)
|
||||
|
||||
type ResticRepoReconciler struct {
|
||||
type BackupRepoReconciler struct {
|
||||
client.Client
|
||||
namespace string
|
||||
logger logrus.FieldLogger
|
||||
|
@ -48,9 +48,9 @@ type ResticRepoReconciler struct {
|
|||
repositoryManager repository.Manager
|
||||
}
|
||||
|
||||
func NewResticRepoReconciler(namespace string, logger logrus.FieldLogger, client client.Client,
|
||||
maintenanceFrequency time.Duration, repositoryManager repository.Manager) *ResticRepoReconciler {
|
||||
c := &ResticRepoReconciler{
|
||||
func NewBackupRepoReconciler(namespace string, logger logrus.FieldLogger, client client.Client,
|
||||
maintenanceFrequency time.Duration, repositoryManager repository.Manager) *BackupRepoReconciler {
|
||||
c := &BackupRepoReconciler{
|
||||
client,
|
||||
namespace,
|
||||
logger,
|
||||
|
@ -62,7 +62,7 @@ func NewResticRepoReconciler(namespace string, logger logrus.FieldLogger, client
|
|||
return c
|
||||
}
|
||||
|
||||
func (r *ResticRepoReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
func (r *BackupRepoReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
s := kube.NewPeriodicalEnqueueSource(r.logger, mgr.GetClient(), &velerov1api.BackupRepositoryList{}, repoSyncPeriod, kube.PeriodicalEnqueueSourceOption{})
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
For(&velerov1api.BackupRepository{}).
|
||||
|
@ -70,20 +70,20 @@ func (r *ResticRepoReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
|||
Complete(r)
|
||||
}
|
||||
|
||||
func (r *ResticRepoReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
||||
log := r.logger.WithField("resticRepo", req.String())
|
||||
resticRepo := &velerov1api.BackupRepository{}
|
||||
if err := r.Get(ctx, req.NamespacedName, resticRepo); err != nil {
|
||||
func (r *BackupRepoReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
||||
log := r.logger.WithField("backupRepo", req.String())
|
||||
backupRepo := &velerov1api.BackupRepository{}
|
||||
if err := r.Get(ctx, req.NamespacedName, backupRepo); err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
log.Warnf("restic repository %s in namespace %s is not found", req.Name, req.Namespace)
|
||||
log.Warnf("backup repository %s in namespace %s is not found", req.Name, req.Namespace)
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
log.WithError(err).Error("error getting restic repository")
|
||||
log.WithError(err).Error("error getting backup repository")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
if resticRepo.Status.Phase == "" || resticRepo.Status.Phase == velerov1api.BackupRepositoryPhaseNew {
|
||||
if err := r.initializeRepo(ctx, resticRepo, log); err != nil {
|
||||
if backupRepo.Status.Phase == "" || backupRepo.Status.Phase == velerov1api.BackupRepositoryPhaseNew {
|
||||
if err := r.initializeRepo(ctx, backupRepo, log); err != nil {
|
||||
log.WithError(err).Error("error initialize repository")
|
||||
return ctrl.Result{}, errors.WithStack(err)
|
||||
}
|
||||
|
@ -95,22 +95,22 @@ func (r *ResticRepoReconciler) Reconcile(ctx context.Context, req ctrl.Request)
|
|||
// this fails for any reason, it's non-critical so we still continue on to the
|
||||
// rest of the "process" logic.
|
||||
log.Debug("Checking repository for stale locks")
|
||||
if err := r.repositoryManager.UnlockRepo(resticRepo); err != nil {
|
||||
if err := r.repositoryManager.UnlockRepo(backupRepo); err != nil {
|
||||
log.WithError(err).Error("Error checking repository for stale locks")
|
||||
}
|
||||
|
||||
switch resticRepo.Status.Phase {
|
||||
switch backupRepo.Status.Phase {
|
||||
case velerov1api.BackupRepositoryPhaseReady:
|
||||
return ctrl.Result{}, r.runMaintenanceIfDue(ctx, resticRepo, log)
|
||||
return ctrl.Result{}, r.runMaintenanceIfDue(ctx, backupRepo, log)
|
||||
case velerov1api.BackupRepositoryPhaseNotReady:
|
||||
return ctrl.Result{}, r.checkNotReadyRepo(ctx, resticRepo, log)
|
||||
return ctrl.Result{}, r.checkNotReadyRepo(ctx, backupRepo, log)
|
||||
}
|
||||
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
func (r *ResticRepoReconciler) initializeRepo(ctx context.Context, req *velerov1api.BackupRepository, log logrus.FieldLogger) error {
|
||||
log.Info("Initializing restic repository")
|
||||
func (r *BackupRepoReconciler) initializeRepo(ctx context.Context, req *velerov1api.BackupRepository, log logrus.FieldLogger) error {
|
||||
log.Info("Initializing backup repository")
|
||||
|
||||
// confirm the repo's BackupStorageLocation is valid
|
||||
loc := &velerov1api.BackupStorageLocation{}
|
||||
|
@ -119,12 +119,12 @@ func (r *ResticRepoReconciler) initializeRepo(ctx context.Context, req *velerov1
|
|||
Namespace: req.Namespace,
|
||||
Name: req.Spec.BackupStorageLocation,
|
||||
}, loc); err != nil {
|
||||
return r.patchResticRepository(ctx, req, repoNotReady(err.Error()))
|
||||
return r.patchBackupRepository(ctx, req, repoNotReady(err.Error()))
|
||||
}
|
||||
|
||||
repoIdentifier, err := repoconfig.GetRepoIdentifier(loc, req.Spec.VolumeNamespace)
|
||||
if err != nil {
|
||||
return r.patchResticRepository(ctx, req, func(rr *velerov1api.BackupRepository) {
|
||||
return r.patchBackupRepository(ctx, req, func(rr *velerov1api.BackupRepository) {
|
||||
rr.Status.Message = err.Error()
|
||||
rr.Status.Phase = velerov1api.BackupRepositoryPhaseNotReady
|
||||
|
||||
|
@ -135,7 +135,7 @@ func (r *ResticRepoReconciler) initializeRepo(ctx context.Context, req *velerov1
|
|||
}
|
||||
|
||||
// defaulting - if the patch fails, return an error so the item is returned to the queue
|
||||
if err := r.patchResticRepository(ctx, req, func(rr *velerov1api.BackupRepository) {
|
||||
if err := r.patchBackupRepository(ctx, req, func(rr *velerov1api.BackupRepository) {
|
||||
rr.Spec.ResticIdentifier = repoIdentifier
|
||||
|
||||
if rr.Spec.MaintenanceFrequency.Duration <= 0 {
|
||||
|
@ -146,16 +146,16 @@ func (r *ResticRepoReconciler) initializeRepo(ctx context.Context, req *velerov1
|
|||
}
|
||||
|
||||
if err := ensureRepo(req, r.repositoryManager); err != nil {
|
||||
return r.patchResticRepository(ctx, req, repoNotReady(err.Error()))
|
||||
return r.patchBackupRepository(ctx, req, repoNotReady(err.Error()))
|
||||
}
|
||||
|
||||
return r.patchResticRepository(ctx, req, func(rr *velerov1api.BackupRepository) {
|
||||
return r.patchBackupRepository(ctx, req, func(rr *velerov1api.BackupRepository) {
|
||||
rr.Status.Phase = velerov1api.BackupRepositoryPhaseReady
|
||||
rr.Status.LastMaintenanceTime = &metav1.Time{Time: time.Now()}
|
||||
})
|
||||
}
|
||||
|
||||
func (r *ResticRepoReconciler) getRepositoryMaintenanceFrequency(req *velerov1api.BackupRepository) time.Duration {
|
||||
func (r *BackupRepoReconciler) getRepositoryMaintenanceFrequency(req *velerov1api.BackupRepository) time.Duration {
|
||||
if r.maintenanceFrequency > 0 {
|
||||
r.logger.WithField("frequency", r.maintenanceFrequency).Info("Set user defined maintenance frequency")
|
||||
return r.maintenanceFrequency
|
||||
|
@ -178,8 +178,8 @@ func ensureRepo(repo *velerov1api.BackupRepository, repoManager repository.Manag
|
|||
return repoManager.PrepareRepo(repo)
|
||||
}
|
||||
|
||||
func (r *ResticRepoReconciler) runMaintenanceIfDue(ctx context.Context, req *velerov1api.BackupRepository, log logrus.FieldLogger) error {
|
||||
log.Debug("resticRepositoryController.runMaintenanceIfDue")
|
||||
func (r *BackupRepoReconciler) runMaintenanceIfDue(ctx context.Context, req *velerov1api.BackupRepository, log logrus.FieldLogger) error {
|
||||
log.Debug("backupRepositoryController.runMaintenanceIfDue")
|
||||
|
||||
now := r.clock.Now()
|
||||
|
||||
|
@ -188,19 +188,19 @@ func (r *ResticRepoReconciler) runMaintenanceIfDue(ctx context.Context, req *vel
|
|||
return nil
|
||||
}
|
||||
|
||||
log.Info("Running maintenance on restic repository")
|
||||
log.Info("Running maintenance on backup repository")
|
||||
|
||||
// prune failures should be displayed in the `.status.message` field but
|
||||
// should not cause the repo to move to `NotReady`.
|
||||
log.Debug("Pruning repo")
|
||||
if err := r.repositoryManager.PruneRepo(req); err != nil {
|
||||
log.WithError(err).Warn("error pruning repository")
|
||||
return r.patchResticRepository(ctx, req, func(rr *velerov1api.BackupRepository) {
|
||||
return r.patchBackupRepository(ctx, req, func(rr *velerov1api.BackupRepository) {
|
||||
rr.Status.Message = err.Error()
|
||||
})
|
||||
}
|
||||
|
||||
return r.patchResticRepository(ctx, req, func(rr *velerov1api.BackupRepository) {
|
||||
return r.patchBackupRepository(ctx, req, func(rr *velerov1api.BackupRepository) {
|
||||
rr.Status.LastMaintenanceTime = &metav1.Time{Time: now}
|
||||
})
|
||||
}
|
||||
|
@ -209,20 +209,20 @@ func dueForMaintenance(req *velerov1api.BackupRepository, now time.Time) bool {
|
|||
return req.Status.LastMaintenanceTime == nil || req.Status.LastMaintenanceTime.Add(req.Spec.MaintenanceFrequency.Duration).Before(now)
|
||||
}
|
||||
|
||||
func (r *ResticRepoReconciler) checkNotReadyRepo(ctx context.Context, req *velerov1api.BackupRepository, log logrus.FieldLogger) error {
|
||||
func (r *BackupRepoReconciler) checkNotReadyRepo(ctx context.Context, req *velerov1api.BackupRepository, log logrus.FieldLogger) error {
|
||||
// no identifier: can't possibly be ready, so just return
|
||||
if req.Spec.ResticIdentifier == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
log.Info("Checking restic repository for readiness")
|
||||
log.Info("Checking backup repository for readiness")
|
||||
|
||||
// we need to ensure it (first check, if check fails, attempt to init)
|
||||
// because we don't know if it's been successfully initialized yet.
|
||||
if err := ensureRepo(req, r.repositoryManager); err != nil {
|
||||
return r.patchResticRepository(ctx, req, repoNotReady(err.Error()))
|
||||
return r.patchBackupRepository(ctx, req, repoNotReady(err.Error()))
|
||||
}
|
||||
return r.patchResticRepository(ctx, req, repoReady())
|
||||
return r.patchBackupRepository(ctx, req, repoReady())
|
||||
}
|
||||
|
||||
func repoNotReady(msg string) func(*velerov1api.BackupRepository) {
|
||||
|
@ -239,14 +239,14 @@ func repoReady() func(*velerov1api.BackupRepository) {
|
|||
}
|
||||
}
|
||||
|
||||
// patchResticRepository mutates req with the provided mutate function, and patches it
|
||||
// patchBackupRepository mutates req with the provided mutate function, and patches it
|
||||
// through the Kube API. After executing this function, req will be updated with both
|
||||
// the mutation and the results of the Patch() API call.
|
||||
func (r *ResticRepoReconciler) patchResticRepository(ctx context.Context, req *velerov1api.BackupRepository, mutate func(*velerov1api.BackupRepository)) error {
|
||||
func (r *BackupRepoReconciler) patchBackupRepository(ctx context.Context, req *velerov1api.BackupRepository, mutate func(*velerov1api.BackupRepository)) error {
|
||||
original := req.DeepCopy()
|
||||
mutate(req)
|
||||
if err := r.Patch(ctx, req, client.MergeFrom(original)); err != nil {
|
||||
return errors.Wrap(err, "error patching ResticRepository")
|
||||
return errors.Wrap(err, "error patching BackupRepository")
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -33,12 +33,12 @@ import (
|
|||
|
||||
const testMaintenanceFrequency = 10 * time.Minute
|
||||
|
||||
func mockResticRepoReconciler(t *testing.T, rr *velerov1api.BackupRepository, mockOn string, arg interface{}, ret interface{}) *ResticRepoReconciler {
|
||||
func mockBackupRepoReconciler(t *testing.T, rr *velerov1api.BackupRepository, mockOn string, arg interface{}, ret interface{}) *BackupRepoReconciler {
|
||||
mgr := &repomokes.Manager{}
|
||||
if mockOn != "" {
|
||||
mgr.On(mockOn, arg).Return(ret)
|
||||
}
|
||||
return NewResticRepoReconciler(
|
||||
return NewBackupRepoReconciler(
|
||||
velerov1api.DefaultNamespace,
|
||||
velerotest.NewLogger(),
|
||||
velerotest.NewFakeControllerRuntimeClient(t),
|
||||
|
@ -47,7 +47,7 @@ func mockResticRepoReconciler(t *testing.T, rr *velerov1api.BackupRepository, mo
|
|||
)
|
||||
}
|
||||
|
||||
func mockResticRepositoryCR() *velerov1api.BackupRepository {
|
||||
func mockBackupRepositoryCR() *velerov1api.BackupRepository {
|
||||
return &velerov1api.BackupRepository{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: velerov1api.DefaultNamespace,
|
||||
|
@ -60,22 +60,22 @@ func mockResticRepositoryCR() *velerov1api.BackupRepository {
|
|||
|
||||
}
|
||||
|
||||
func TestPatchResticRepository(t *testing.T) {
|
||||
rr := mockResticRepositoryCR()
|
||||
reconciler := mockResticRepoReconciler(t, rr, "", nil, nil)
|
||||
func TestPatchBackupRepository(t *testing.T) {
|
||||
rr := mockBackupRepositoryCR()
|
||||
reconciler := mockBackupRepoReconciler(t, rr, "", nil, nil)
|
||||
err := reconciler.Client.Create(context.TODO(), rr)
|
||||
assert.NoError(t, err)
|
||||
err = reconciler.patchResticRepository(context.Background(), rr, repoReady())
|
||||
err = reconciler.patchBackupRepository(context.Background(), rr, repoReady())
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, rr.Status.Phase, velerov1api.BackupRepositoryPhaseReady)
|
||||
err = reconciler.patchResticRepository(context.Background(), rr, repoNotReady("not ready"))
|
||||
err = reconciler.patchBackupRepository(context.Background(), rr, repoNotReady("not ready"))
|
||||
assert.NoError(t, err)
|
||||
assert.NotEqual(t, rr.Status.Phase, velerov1api.BackupRepositoryPhaseReady)
|
||||
}
|
||||
|
||||
func TestCheckNotReadyRepo(t *testing.T) {
|
||||
rr := mockResticRepositoryCR()
|
||||
reconciler := mockResticRepoReconciler(t, rr, "PrepareRepo", rr, nil)
|
||||
rr := mockBackupRepositoryCR()
|
||||
reconciler := mockBackupRepoReconciler(t, rr, "PrepareRepo", rr, nil)
|
||||
err := reconciler.Client.Create(context.TODO(), rr)
|
||||
assert.NoError(t, err)
|
||||
err = reconciler.checkNotReadyRepo(context.TODO(), rr, reconciler.logger)
|
||||
|
@ -88,8 +88,8 @@ func TestCheckNotReadyRepo(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestRunMaintenanceIfDue(t *testing.T) {
|
||||
rr := mockResticRepositoryCR()
|
||||
reconciler := mockResticRepoReconciler(t, rr, "PruneRepo", rr, nil)
|
||||
rr := mockBackupRepositoryCR()
|
||||
reconciler := mockBackupRepoReconciler(t, rr, "PruneRepo", rr, nil)
|
||||
err := reconciler.Client.Create(context.TODO(), rr)
|
||||
assert.NoError(t, err)
|
||||
lastTm := rr.Status.LastMaintenanceTime
|
||||
|
@ -105,9 +105,9 @@ func TestRunMaintenanceIfDue(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestInitializeRepo(t *testing.T) {
|
||||
rr := mockResticRepositoryCR()
|
||||
rr := mockBackupRepositoryCR()
|
||||
rr.Spec.BackupStorageLocation = "default"
|
||||
reconciler := mockResticRepoReconciler(t, rr, "PrepareRepo", rr, nil)
|
||||
reconciler := mockBackupRepoReconciler(t, rr, "PrepareRepo", rr, nil)
|
||||
err := reconciler.Client.Create(context.TODO(), rr)
|
||||
assert.NoError(t, err)
|
||||
locations := &velerov1api.BackupStorageLocation{
|
||||
|
@ -127,7 +127,7 @@ func TestInitializeRepo(t *testing.T) {
|
|||
assert.Equal(t, rr.Status.Phase, velerov1api.BackupRepositoryPhaseReady)
|
||||
}
|
||||
|
||||
func TestResticRepoReconcile(t *testing.T) {
|
||||
func TestBackupRepoReconcile(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
repo *velerov1api.BackupRepository
|
||||
|
@ -178,7 +178,7 @@ func TestResticRepoReconcile(t *testing.T) {
|
|||
}
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
reconciler := mockResticRepoReconciler(t, test.repo, "", test.repo, nil)
|
||||
reconciler := mockBackupRepoReconciler(t, test.repo, "", test.repo, nil)
|
||||
err := reconciler.Client.Create(context.TODO(), test.repo)
|
||||
assert.NoError(t, err)
|
||||
_, err = reconciler.Reconcile(context.TODO(), ctrl.Request{NamespacedName: types.NamespacedName{Namespace: test.repo.Namespace, Name: test.repo.Name}})
|
||||
|
@ -227,7 +227,7 @@ func TestGetRepositoryMaintenanceFrequency(t *testing.T) {
|
|||
t.Run(test.name, func(t *testing.T) {
|
||||
mgr := repomokes.Manager{}
|
||||
mgr.On("DefaultMaintenanceFrequency", mock.Anything).Return(test.freqReturn, test.freqError)
|
||||
reconciler := NewResticRepoReconciler(
|
||||
reconciler := NewBackupRepoReconciler(
|
||||
velerov1api.DefaultNamespace,
|
||||
velerotest.NewLogger(),
|
||||
velerotest.NewFakeControllerRuntimeClient(t),
|
|
@ -25,7 +25,7 @@ const (
|
|||
GarbageCollection = "gc"
|
||||
PodVolumeBackup = "pod-volume-backup"
|
||||
PodVolumeRestore = "pod-volume-restore"
|
||||
ResticRepo = "restic-repo"
|
||||
BackupRepo = "backup-repo"
|
||||
Restore = "restore"
|
||||
Schedule = "schedule"
|
||||
ServerStatusRequest = "server-status-request"
|
||||
|
@ -38,7 +38,7 @@ var DisableableControllers = []string{
|
|||
BackupSync,
|
||||
DownloadRequest,
|
||||
GarbageCollection,
|
||||
ResticRepo,
|
||||
BackupRepo,
|
||||
Restore,
|
||||
Schedule,
|
||||
ServerStatusRequest,
|
||||
|
|
|
@ -205,8 +205,8 @@ func (r *PodVolumeBackupReconciler) Reconcile(ctx context.Context, req ctrl.Requ
|
|||
latencySeconds := float64(latencyDuration / time.Second)
|
||||
backupName := fmt.Sprintf("%s/%s", req.Namespace, pvb.OwnerReferences[0].Name)
|
||||
generateOpName := fmt.Sprintf("%s-%s-%s-%s-%s-backup", pvb.Name, backupRepo.Name, pvb.Spec.BackupStorageLocation, pvb.Namespace, pvb.Spec.UploaderType)
|
||||
r.Metrics.ObserveResticOpLatency(r.NodeName, req.Name, generateOpName, backupName, latencySeconds)
|
||||
r.Metrics.RegisterResticOpLatencyGauge(r.NodeName, req.Name, generateOpName, backupName, latencySeconds)
|
||||
r.Metrics.ObservePodVolumeOpLatency(r.NodeName, req.Name, generateOpName, backupName, latencySeconds)
|
||||
r.Metrics.RegisterPodVolumeOpLatencyGauge(r.NodeName, req.Name, generateOpName, backupName, latencySeconds)
|
||||
r.Metrics.RegisterPodVolumeBackupDequeue(r.NodeName)
|
||||
|
||||
log.Info("PodVolumeBackup completed")
|
||||
|
|
|
@ -145,7 +145,7 @@ var _ = Describe("PodVolumeBackup Reconciler", func() {
|
|||
r := PodVolumeBackupReconciler{
|
||||
Client: fakeClient,
|
||||
Clock: clock.NewFakeClock(now),
|
||||
Metrics: metrics.NewResticServerMetrics(),
|
||||
Metrics: metrics.NewPodVolumeMetrics(),
|
||||
CredentialGetter: &credentials.CredentialGetter{FromFile: credentialFileStore},
|
||||
NodeName: "test_node",
|
||||
FileSystem: fakeFS,
|
||||
|
|
|
@ -28,8 +28,8 @@ type ServerMetrics struct {
|
|||
}
|
||||
|
||||
const (
|
||||
metricNamespace = "velero"
|
||||
resticMetricsNamespace = "restic"
|
||||
metricNamespace = "velero"
|
||||
podVolumeMetricsNamespace = "podVolume"
|
||||
//Velero metrics
|
||||
backupTarballSizeBytesGauge = "backup_tarball_size_bytes"
|
||||
backupTotal = "backup_total"
|
||||
|
@ -58,18 +58,18 @@ const (
|
|||
csiSnapshotSuccessTotal = "csi_snapshot_success_total"
|
||||
csiSnapshotFailureTotal = "csi_snapshot_failure_total"
|
||||
|
||||
// Restic metrics
|
||||
podVolumeBackupEnqueueTotal = "pod_volume_backup_enqueue_count"
|
||||
podVolumeBackupDequeueTotal = "pod_volume_backup_dequeue_count"
|
||||
resticOperationLatencySeconds = "restic_operation_latency_seconds"
|
||||
resticOperationLatencyGaugeSeconds = "restic_operation_latency_seconds_gauge"
|
||||
// pod volume metrics
|
||||
podVolumeBackupEnqueueTotal = "pod_volume_backup_enqueue_count"
|
||||
podVolumeBackupDequeueTotal = "pod_volume_backup_dequeue_count"
|
||||
podVolumeOperationLatencySeconds = "pod_volume_operation_latency_seconds"
|
||||
podVolumeOperationLatencyGaugeSeconds = "pod_volume_operation_latency_seconds_gauge"
|
||||
|
||||
// Labels
|
||||
nodeMetricLabel = "node"
|
||||
resticOperationLabel = "operation"
|
||||
pvbNameLabel = "pod_volume_backup"
|
||||
scheduleLabel = "schedule"
|
||||
backupNameLabel = "backupName"
|
||||
nodeMetricLabel = "node"
|
||||
podVolumeOperationLabel = "operation"
|
||||
pvbNameLabel = "pod_volume_backup"
|
||||
scheduleLabel = "schedule"
|
||||
backupNameLabel = "backupName"
|
||||
)
|
||||
|
||||
// NewServerMetrics returns new ServerMetrics
|
||||
|
@ -297,12 +297,12 @@ func NewServerMetrics() *ServerMetrics {
|
|||
}
|
||||
}
|
||||
|
||||
func NewResticServerMetrics() *ServerMetrics {
|
||||
func NewPodVolumeMetrics() *ServerMetrics {
|
||||
return &ServerMetrics{
|
||||
metrics: map[string]prometheus.Collector{
|
||||
podVolumeBackupEnqueueTotal: prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: resticMetricsNamespace,
|
||||
Namespace: podVolumeMetricsNamespace,
|
||||
Name: podVolumeBackupEnqueueTotal,
|
||||
Help: "Total number of pod_volume_backup objects enqueued",
|
||||
},
|
||||
|
@ -310,25 +310,25 @@ func NewResticServerMetrics() *ServerMetrics {
|
|||
),
|
||||
podVolumeBackupDequeueTotal: prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: resticMetricsNamespace,
|
||||
Namespace: podVolumeMetricsNamespace,
|
||||
Name: podVolumeBackupDequeueTotal,
|
||||
Help: "Total number of pod_volume_backup objects dequeued",
|
||||
},
|
||||
[]string{nodeMetricLabel},
|
||||
),
|
||||
resticOperationLatencyGaugeSeconds: prometheus.NewGaugeVec(
|
||||
podVolumeOperationLatencyGaugeSeconds: prometheus.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Namespace: resticMetricsNamespace,
|
||||
Name: resticOperationLatencyGaugeSeconds,
|
||||
Help: "Gauge metric indicating time taken, in seconds, to perform restic operations",
|
||||
Namespace: podVolumeMetricsNamespace,
|
||||
Name: podVolumeOperationLatencyGaugeSeconds,
|
||||
Help: "Gauge metric indicating time taken, in seconds, to perform pod volume operations",
|
||||
},
|
||||
[]string{nodeMetricLabel, resticOperationLabel, backupNameLabel, pvbNameLabel},
|
||||
[]string{nodeMetricLabel, podVolumeOperationLabel, backupNameLabel, pvbNameLabel},
|
||||
),
|
||||
resticOperationLatencySeconds: prometheus.NewHistogramVec(
|
||||
podVolumeOperationLatencySeconds: prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: resticMetricsNamespace,
|
||||
Name: resticOperationLatencySeconds,
|
||||
Help: "Time taken to complete restic operations, in seconds",
|
||||
Namespace: podVolumeMetricsNamespace,
|
||||
Name: podVolumeOperationLatencySeconds,
|
||||
Help: "Time taken to complete pod volume operations, in seconds",
|
||||
Buckets: []float64{
|
||||
toSeconds(1 * time.Minute),
|
||||
toSeconds(5 * time.Minute),
|
||||
|
@ -341,7 +341,7 @@ func NewResticServerMetrics() *ServerMetrics {
|
|||
toSeconds(4 * time.Hour),
|
||||
},
|
||||
},
|
||||
[]string{nodeMetricLabel, resticOperationLabel, backupNameLabel, pvbNameLabel},
|
||||
[]string{nodeMetricLabel, podVolumeOperationLabel, backupNameLabel, pvbNameLabel},
|
||||
),
|
||||
},
|
||||
}
|
||||
|
@ -422,7 +422,7 @@ func (m *ServerMetrics) InitSchedule(scheduleName string) {
|
|||
}
|
||||
|
||||
// InitSchedule initializes counter metrics for a node.
|
||||
func (m *ServerMetrics) InitResticMetricsForNode(node string) {
|
||||
func (m *ServerMetrics) InitPodVolumeMetricsForNode(node string) {
|
||||
if c, ok := m.metrics[podVolumeBackupEnqueueTotal].(*prometheus.CounterVec); ok {
|
||||
c.WithLabelValues(node).Add(0)
|
||||
}
|
||||
|
@ -445,16 +445,16 @@ func (m *ServerMetrics) RegisterPodVolumeBackupDequeue(node string) {
|
|||
}
|
||||
}
|
||||
|
||||
// ObserveResticOpLatency records the number of seconds a restic operation took.
|
||||
func (m *ServerMetrics) ObserveResticOpLatency(node, pvbName, opName, backupName string, seconds float64) {
|
||||
if h, ok := m.metrics[resticOperationLatencySeconds].(*prometheus.HistogramVec); ok {
|
||||
// ObservePodVolumeOpLatency records the number of seconds a pod volume operation took.
|
||||
func (m *ServerMetrics) ObservePodVolumeOpLatency(node, pvbName, opName, backupName string, seconds float64) {
|
||||
if h, ok := m.metrics[podVolumeOperationLatencySeconds].(*prometheus.HistogramVec); ok {
|
||||
h.WithLabelValues(node, opName, backupName, pvbName).Observe(seconds)
|
||||
}
|
||||
}
|
||||
|
||||
// RegisterResticOpLatencyGauge registers the restic operation latency as a gauge metric.
|
||||
func (m *ServerMetrics) RegisterResticOpLatencyGauge(node, pvbName, opName, backupName string, seconds float64) {
|
||||
if g, ok := m.metrics[resticOperationLatencyGaugeSeconds].(*prometheus.GaugeVec); ok {
|
||||
// RegisterPodVolumeOpLatencyGauge registers the pod volume operation latency as a gauge metric.
|
||||
func (m *ServerMetrics) RegisterPodVolumeOpLatencyGauge(node, pvbName, opName, backupName string, seconds float64) {
|
||||
if g, ok := m.metrics[podVolumeOperationLatencyGaugeSeconds].(*prometheus.GaugeVec); ok {
|
||||
g.WithLabelValues(node, opName, backupName, pvbName).Set(seconds)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -38,7 +38,7 @@ import (
|
|||
"github.com/vmware-tanzu/velero/pkg/util/kube"
|
||||
)
|
||||
|
||||
// Backupper can execute restic backups of volumes in a pod.
|
||||
// Backupper can execute pod volume backups of volumes in a pod.
|
||||
type Backupper interface {
|
||||
// BackupPodVolumes backs up all specified volumes in a pod.
|
||||
BackupPodVolumes(backup *velerov1api.Backup, pod *corev1api.Pod, volumesToBackup []string, log logrus.FieldLogger) ([]*velerov1api.PodVolumeBackup, []error)
|
||||
|
@ -190,7 +190,7 @@ func (b *backupper) BackupPodVolumes(backup *velerov1api.Backup, pod *corev1api.
|
|||
continue
|
||||
}
|
||||
if isHostPath {
|
||||
log.Warnf("Volume %s in pod %s/%s is a hostPath volume which is not supported for restic backup, skipping", volumeName, pod.Namespace, pod.Name)
|
||||
log.Warnf("Volume %s in pod %s/%s is a hostPath volume which is not supported for pod volume backup, skipping", volumeName, pod.Namespace, pod.Name)
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -304,7 +304,7 @@ func newPodVolumeBackup(backup *velerov1api.Backup, pod *corev1api.Pod, volume c
|
|||
|
||||
if pvc != nil {
|
||||
// this annotation is used in pkg/restore to identify if a PVC
|
||||
// has a restic backup.
|
||||
// has a pod volume backup.
|
||||
pvb.Annotations = map[string]string{
|
||||
PVCNameAnnotation: pvc.Name,
|
||||
}
|
||||
|
|
|
@ -41,7 +41,7 @@ type RestoreData struct {
|
|||
SourceNamespace, BackupLocation string
|
||||
}
|
||||
|
||||
// Restorer can execute restic restores of volumes in a pod.
|
||||
// Restorer can execute pod volume restores of volumes in a pod.
|
||||
type Restorer interface {
|
||||
// RestorePodVolumes restores all annotated volumes in a pod.
|
||||
RestorePodVolumes(RestoreData) []error
|
||||
|
|
|
@ -361,11 +361,11 @@ func TestGetVolumesByPod(t *testing.T) {
|
|||
pod: &corev1api.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Annotations: map[string]string{
|
||||
VolumesToBackupAnnotation: "resticPV1,resticPV2,resticPV3",
|
||||
VolumesToBackupAnnotation: "pvbPV1,pvbPV2,pvbPV3",
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: []string{"resticPV1", "resticPV2", "resticPV3"},
|
||||
expected: []string{"pvbPV1", "pvbPV2", "pvbPV3"},
|
||||
},
|
||||
{
|
||||
name: "should get all pod volumes when defaultVolumesToFsBackup is true and no PVs are excluded",
|
||||
|
@ -373,12 +373,12 @@ func TestGetVolumesByPod(t *testing.T) {
|
|||
pod: &corev1api.Pod{
|
||||
Spec: corev1api.PodSpec{
|
||||
Volumes: []corev1api.Volume{
|
||||
// Restic Volumes
|
||||
{Name: "resticPV1"}, {Name: "resticPV2"}, {Name: "resticPV3"},
|
||||
// PVB Volumes
|
||||
{Name: "pvbPV1"}, {Name: "pvbPV2"}, {Name: "pvbPV3"},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: []string{"resticPV1", "resticPV2", "resticPV3"},
|
||||
expected: []string{"pvbPV1", "pvbPV2", "pvbPV3"},
|
||||
},
|
||||
{
|
||||
name: "should get all pod volumes except ones excluded when defaultVolumesToFsBackup is true",
|
||||
|
@ -386,56 +386,56 @@ func TestGetVolumesByPod(t *testing.T) {
|
|||
pod: &corev1api.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Annotations: map[string]string{
|
||||
VolumesToExcludeAnnotation: "nonResticPV1,nonResticPV2,nonResticPV3",
|
||||
VolumesToExcludeAnnotation: "nonPvbPV1,nonPvbPV2,nonPvbPV3",
|
||||
},
|
||||
},
|
||||
Spec: corev1api.PodSpec{
|
||||
Volumes: []corev1api.Volume{
|
||||
// Restic Volumes
|
||||
{Name: "resticPV1"}, {Name: "resticPV2"}, {Name: "resticPV3"},
|
||||
/// Excluded from restic through annotation
|
||||
{Name: "nonResticPV1"}, {Name: "nonResticPV2"}, {Name: "nonResticPV3"},
|
||||
// PVB Volumes
|
||||
{Name: "pvbPV1"}, {Name: "pvbPV2"}, {Name: "pvbPV3"},
|
||||
/// Excluded from PVB through annotation
|
||||
{Name: "nonPvbPV1"}, {Name: "nonPvbPV2"}, {Name: "nonPvbPV3"},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: []string{"resticPV1", "resticPV2", "resticPV3"},
|
||||
expected: []string{"pvbPV1", "pvbPV2", "pvbPV3"},
|
||||
},
|
||||
{
|
||||
name: "should exclude default service account token from restic backup",
|
||||
name: "should exclude default service account token from pod volume backup",
|
||||
defaultVolumesToFsBackup: true,
|
||||
pod: &corev1api.Pod{
|
||||
Spec: corev1api.PodSpec{
|
||||
Volumes: []corev1api.Volume{
|
||||
// Restic Volumes
|
||||
{Name: "resticPV1"}, {Name: "resticPV2"}, {Name: "resticPV3"},
|
||||
/// Excluded from restic because colume mounting default service account token
|
||||
// PVB Volumes
|
||||
{Name: "pvbPV1"}, {Name: "pvbPV2"}, {Name: "pvbPV3"},
|
||||
/// Excluded from PVB because colume mounting default service account token
|
||||
{Name: "default-token-5xq45"},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: []string{"resticPV1", "resticPV2", "resticPV3"},
|
||||
expected: []string{"pvbPV1", "pvbPV2", "pvbPV3"},
|
||||
},
|
||||
{
|
||||
name: "should exclude host path volumes from restic backups",
|
||||
name: "should exclude host path volumes from pod volume backups",
|
||||
defaultVolumesToFsBackup: true,
|
||||
pod: &corev1api.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Annotations: map[string]string{
|
||||
VolumesToExcludeAnnotation: "nonResticPV1,nonResticPV2,nonResticPV3",
|
||||
VolumesToExcludeAnnotation: "nonPvbPV1,nonPvbPV2,nonPvbPV3",
|
||||
},
|
||||
},
|
||||
Spec: corev1api.PodSpec{
|
||||
Volumes: []corev1api.Volume{
|
||||
// Restic Volumes
|
||||
{Name: "resticPV1"}, {Name: "resticPV2"}, {Name: "resticPV3"},
|
||||
/// Excluded from restic through annotation
|
||||
{Name: "nonResticPV1"}, {Name: "nonResticPV2"}, {Name: "nonResticPV3"},
|
||||
// Excluded from restic because hostpath
|
||||
// PVB Volumes
|
||||
{Name: "pvbPV1"}, {Name: "pvbPV2"}, {Name: "pvbPV3"},
|
||||
/// Excluded from pod volume backup through annotation
|
||||
{Name: "nonPvbPV1"}, {Name: "nonPvbPV2"}, {Name: "nonPvbPV3"},
|
||||
// Excluded from pod volume backup because hostpath
|
||||
{Name: "hostPath1", VolumeSource: corev1api.VolumeSource{HostPath: &corev1api.HostPathVolumeSource{Path: "/hostpathVol"}}},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: []string{"resticPV1", "resticPV2", "resticPV3"},
|
||||
expected: []string{"pvbPV1", "pvbPV2", "pvbPV3"},
|
||||
},
|
||||
{
|
||||
name: "should exclude volumes mounting secrets",
|
||||
|
@ -443,21 +443,21 @@ func TestGetVolumesByPod(t *testing.T) {
|
|||
pod: &corev1api.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Annotations: map[string]string{
|
||||
VolumesToExcludeAnnotation: "nonResticPV1,nonResticPV2,nonResticPV3",
|
||||
VolumesToExcludeAnnotation: "nonPvbPV1,nonPvbPV2,nonPvbPV3",
|
||||
},
|
||||
},
|
||||
Spec: corev1api.PodSpec{
|
||||
Volumes: []corev1api.Volume{
|
||||
// Restic Volumes
|
||||
{Name: "resticPV1"}, {Name: "resticPV2"}, {Name: "resticPV3"},
|
||||
/// Excluded from restic through annotation
|
||||
{Name: "nonResticPV1"}, {Name: "nonResticPV2"}, {Name: "nonResticPV3"},
|
||||
// Excluded from restic because hostpath
|
||||
// PVB Volumes
|
||||
{Name: "pvbPV1"}, {Name: "pvbPV2"}, {Name: "pvbPV3"},
|
||||
/// Excluded from pod volume backup through annotation
|
||||
{Name: "nonPvbPV1"}, {Name: "nonPvbPV2"}, {Name: "nonPvbPV3"},
|
||||
// Excluded from pod volume backup because hostpath
|
||||
{Name: "superSecret", VolumeSource: corev1api.VolumeSource{Secret: &corev1api.SecretVolumeSource{SecretName: "super-secret"}}},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: []string{"resticPV1", "resticPV2", "resticPV3"},
|
||||
expected: []string{"pvbPV1", "pvbPV2", "pvbPV3"},
|
||||
},
|
||||
{
|
||||
name: "should exclude volumes mounting config maps",
|
||||
|
@ -465,21 +465,21 @@ func TestGetVolumesByPod(t *testing.T) {
|
|||
pod: &corev1api.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Annotations: map[string]string{
|
||||
VolumesToExcludeAnnotation: "nonResticPV1,nonResticPV2,nonResticPV3",
|
||||
VolumesToExcludeAnnotation: "nonPvbPV1,nonPvbPV2,nonPvbPV3",
|
||||
},
|
||||
},
|
||||
Spec: corev1api.PodSpec{
|
||||
Volumes: []corev1api.Volume{
|
||||
// Restic Volumes
|
||||
{Name: "resticPV1"}, {Name: "resticPV2"}, {Name: "resticPV3"},
|
||||
/// Excluded from restic through annotation
|
||||
{Name: "nonResticPV1"}, {Name: "nonResticPV2"}, {Name: "nonResticPV3"},
|
||||
// Excluded from restic because hostpath
|
||||
// PVB Volumes
|
||||
{Name: "pvbPV1"}, {Name: "pvbPV2"}, {Name: "pvbPV3"},
|
||||
/// Excluded from pod volume backup through annotation
|
||||
{Name: "nonPvbPV1"}, {Name: "nonPvbPV2"}, {Name: "nonPvbPV3"},
|
||||
// Excluded from pod volume backup because hostpath
|
||||
{Name: "appCOnfig", VolumeSource: corev1api.VolumeSource{ConfigMap: &corev1api.ConfigMapVolumeSource{LocalObjectReference: corev1api.LocalObjectReference{Name: "app-config"}}}},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: []string{"resticPV1", "resticPV2", "resticPV3"},
|
||||
expected: []string{"pvbPV1", "pvbPV2", "pvbPV3"},
|
||||
},
|
||||
{
|
||||
name: "should exclude projected volumes",
|
||||
|
@ -487,12 +487,12 @@ func TestGetVolumesByPod(t *testing.T) {
|
|||
pod: &corev1api.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Annotations: map[string]string{
|
||||
VolumesToExcludeAnnotation: "nonResticPV1,nonResticPV2,nonResticPV3",
|
||||
VolumesToExcludeAnnotation: "nonPvbPV1,nonPvbPV2,nonPvbPV3",
|
||||
},
|
||||
},
|
||||
Spec: corev1api.PodSpec{
|
||||
Volumes: []corev1api.Volume{
|
||||
{Name: "resticPV1"}, {Name: "resticPV2"}, {Name: "resticPV3"},
|
||||
{Name: "pvbPV1"}, {Name: "pvbPV2"}, {Name: "pvbPV3"},
|
||||
{
|
||||
Name: "projected",
|
||||
VolumeSource: corev1api.VolumeSource{
|
||||
|
@ -514,7 +514,7 @@ func TestGetVolumesByPod(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
expected: []string{"resticPV1", "resticPV2", "resticPV3"},
|
||||
expected: []string{"pvbPV1", "pvbPV2", "pvbPV3"},
|
||||
},
|
||||
{
|
||||
name: "should exclude DownwardAPI volumes",
|
||||
|
@ -522,12 +522,12 @@ func TestGetVolumesByPod(t *testing.T) {
|
|||
pod: &corev1api.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Annotations: map[string]string{
|
||||
VolumesToExcludeAnnotation: "nonResticPV1,nonResticPV2,nonResticPV3",
|
||||
VolumesToExcludeAnnotation: "nonPvbPV1,nonPvbPV2,nonPvbPV3",
|
||||
},
|
||||
},
|
||||
Spec: corev1api.PodSpec{
|
||||
Volumes: []corev1api.Volume{
|
||||
{Name: "resticPV1"}, {Name: "resticPV2"}, {Name: "resticPV3"},
|
||||
{Name: "pvbPV1"}, {Name: "pvbPV2"}, {Name: "pvbPV3"},
|
||||
{
|
||||
Name: "downwardAPI",
|
||||
VolumeSource: corev1api.VolumeSource{
|
||||
|
@ -547,7 +547,7 @@ func TestGetVolumesByPod(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
expected: []string{"resticPV1", "resticPV2", "resticPV3"},
|
||||
expected: []string{"pvbPV1", "pvbPV2", "pvbPV3"},
|
||||
},
|
||||
}
|
||||
|
||||
|
|
|
@ -29,7 +29,7 @@ import (
|
|||
)
|
||||
|
||||
const (
|
||||
credentialsSecretName = "velero-restic-credentials"
|
||||
credentialsSecretName = "velero-repo-credentials"
|
||||
credentialsKey = "repository-password"
|
||||
|
||||
encryptionKey = "static-passw0rd"
|
||||
|
@ -65,10 +65,10 @@ func EnsureCommonRepositoryKey(secretClient corev1client.SecretsGetter, namespac
|
|||
}
|
||||
|
||||
// RepoKeySelector returns the SecretKeySelector which can be used to fetch
|
||||
// the restic repository key.
|
||||
// the backup repository key.
|
||||
func RepoKeySelector() *corev1api.SecretKeySelector {
|
||||
// For now, all restic repos share the same key so we don't need the repoName to fetch it.
|
||||
// When we move to full-backup encryption, we'll likely have a separate key per restic repo
|
||||
// For now, all backup repos share the same key so we don't need the repoName to fetch it.
|
||||
// When we move to full-backup encryption, we'll likely have a separate key per backup repo
|
||||
// (all within the Velero server's namespace) so RepoKeySelector will need to select the key
|
||||
// for that repo.
|
||||
return builder.ForSecretKeySelector(credentialsSecretName, credentialsKey).Result()
|
||||
|
|
|
@ -19,7 +19,7 @@ package repository
|
|||
import "sync"
|
||||
|
||||
// RepoLocker manages exclusive/non-exclusive locks for
|
||||
// operations against restic repositories. The semantics
|
||||
// operations against backup repositories. The semantics
|
||||
// of exclusive/non-exclusive locks are the same as for
|
||||
// a sync.RWMutex, where a non-exclusive lock is equivalent
|
||||
// to a read lock, and an exclusive lock is equivalent to
|
||||
|
|
|
@ -31,18 +31,18 @@ import (
|
|||
"github.com/vmware-tanzu/velero/pkg/util/filesystem"
|
||||
)
|
||||
|
||||
// SnapshotIdentifier uniquely identifies a restic snapshot
|
||||
// SnapshotIdentifier uniquely identifies a snapshot
|
||||
// taken by Velero.
|
||||
type SnapshotIdentifier struct {
|
||||
// VolumeNamespace is the namespace of the pod/volume that
|
||||
// the restic snapshot is for.
|
||||
// the snapshot is for.
|
||||
VolumeNamespace string
|
||||
|
||||
// BackupStorageLocation is the backup's storage location
|
||||
// name.
|
||||
BackupStorageLocation string
|
||||
|
||||
// SnapshotID is the short ID of the restic snapshot.
|
||||
// SnapshotID is the short ID of the snapshot.
|
||||
SnapshotID string
|
||||
|
||||
// RepositoryType is the type of the repository where the
|
||||
|
|
|
@ -107,8 +107,8 @@ type kubernetesRestorer struct {
|
|||
discoveryHelper discovery.Helper
|
||||
dynamicFactory client.DynamicFactory
|
||||
namespaceClient corev1.NamespaceInterface
|
||||
resticRestorerFactory podvolume.RestorerFactory
|
||||
resticTimeout time.Duration
|
||||
podVolumeRestorerFactory podvolume.RestorerFactory
|
||||
podVolumeTimeout time.Duration
|
||||
resourceTerminatingTimeout time.Duration
|
||||
resourcePriorities []string
|
||||
fileSystem filesystem.Interface
|
||||
|
@ -126,8 +126,8 @@ func NewKubernetesRestorer(
|
|||
dynamicFactory client.DynamicFactory,
|
||||
resourcePriorities []string,
|
||||
namespaceClient corev1.NamespaceInterface,
|
||||
resticRestorerFactory podvolume.RestorerFactory,
|
||||
resticTimeout time.Duration,
|
||||
podVolumeRestorerFactory podvolume.RestorerFactory,
|
||||
podVolumeTimeout time.Duration,
|
||||
resourceTerminatingTimeout time.Duration,
|
||||
logger logrus.FieldLogger,
|
||||
podCommandExecutor podexec.PodCommandExecutor,
|
||||
|
@ -139,8 +139,8 @@ func NewKubernetesRestorer(
|
|||
discoveryHelper: discoveryHelper,
|
||||
dynamicFactory: dynamicFactory,
|
||||
namespaceClient: namespaceClient,
|
||||
resticRestorerFactory: resticRestorerFactory,
|
||||
resticTimeout: resticTimeout,
|
||||
podVolumeRestorerFactory: podVolumeRestorerFactory,
|
||||
podVolumeTimeout: podVolumeTimeout,
|
||||
resourceTerminatingTimeout: resourceTerminatingTimeout,
|
||||
resourcePriorities: resourcePriorities,
|
||||
logger: logger,
|
||||
|
@ -238,7 +238,7 @@ func (kr *kubernetesRestorer) RestoreWithResolvers(
|
|||
return Result{}, Result{Velero: []string{err.Error()}}
|
||||
}
|
||||
|
||||
podVolumeTimeout := kr.resticTimeout
|
||||
podVolumeTimeout := kr.podVolumeTimeout
|
||||
if val := req.Restore.Annotations[velerov1api.PodVolumeOperationTimeoutAnnotation]; val != "" {
|
||||
parsed, err := time.ParseDuration(val)
|
||||
if err != nil {
|
||||
|
@ -254,9 +254,9 @@ func (kr *kubernetesRestorer) RestoreWithResolvers(
|
|||
ctx, cancelFunc := go_context.WithTimeout(go_context.Background(), podVolumeTimeout)
|
||||
defer cancelFunc()
|
||||
|
||||
var resticRestorer podvolume.Restorer
|
||||
if kr.resticRestorerFactory != nil {
|
||||
resticRestorer, err = kr.resticRestorerFactory.NewRestorer(ctx, req.Restore)
|
||||
var podVolumeRestorer podvolume.Restorer
|
||||
if kr.podVolumeRestorerFactory != nil {
|
||||
podVolumeRestorer, err = kr.podVolumeRestorerFactory.NewRestorer(ctx, req.Restore)
|
||||
if err != nil {
|
||||
return Result{}, Result{Velero: []string{err.Error()}}
|
||||
}
|
||||
|
@ -302,8 +302,8 @@ func (kr *kubernetesRestorer) RestoreWithResolvers(
|
|||
restoreItemActions: resolvedActions,
|
||||
itemSnapshotterActions: resolvedItemSnapshotterActions,
|
||||
volumeSnapshotterGetter: volumeSnapshotterGetter,
|
||||
resticRestorer: resticRestorer,
|
||||
resticErrs: make(chan error),
|
||||
podVolumeRestorer: podVolumeRestorer,
|
||||
podVolumeErrs: make(chan error),
|
||||
pvsToProvision: sets.NewString(),
|
||||
pvRestorer: pvRestorer,
|
||||
volumeSnapshots: req.VolumeSnapshots,
|
||||
|
@ -345,9 +345,9 @@ type restoreContext struct {
|
|||
restoreItemActions []framework.RestoreItemResolvedAction
|
||||
itemSnapshotterActions []framework.ItemSnapshotterResolvedAction
|
||||
volumeSnapshotterGetter VolumeSnapshotterGetter
|
||||
resticRestorer podvolume.Restorer
|
||||
resticWaitGroup sync.WaitGroup
|
||||
resticErrs chan error
|
||||
podVolumeRestorer podvolume.Restorer
|
||||
podVolumeWaitGroup sync.WaitGroup
|
||||
podVolumeErrs chan error
|
||||
pvsToProvision sets.String
|
||||
pvRestorer PVRestorer
|
||||
volumeSnapshots []*volume.Snapshot
|
||||
|
@ -557,29 +557,29 @@ func (ctx *restoreContext) execute() (Result, Result) {
|
|||
ctx.log.WithError(errors.WithStack((err))).Warn("Updating restore status.progress")
|
||||
}
|
||||
|
||||
// Wait for all of the restic restore goroutines to be done, which is
|
||||
// Wait for all of the pod volume restore goroutines to be done, which is
|
||||
// only possible once all of their errors have been received by the loop
|
||||
// below, then close the resticErrs channel so the loop terminates.
|
||||
// below, then close the podVolumeErrs channel so the loop terminates.
|
||||
go func() {
|
||||
ctx.log.Info("Waiting for all restic restores to complete")
|
||||
ctx.log.Info("Waiting for all pod volume restores to complete")
|
||||
|
||||
// TODO timeout?
|
||||
ctx.resticWaitGroup.Wait()
|
||||
close(ctx.resticErrs)
|
||||
ctx.podVolumeWaitGroup.Wait()
|
||||
close(ctx.podVolumeErrs)
|
||||
}()
|
||||
|
||||
// This loop will only terminate when the ctx.resticErrs channel is closed
|
||||
// This loop will only terminate when the ctx.podVolumeErrs channel is closed
|
||||
// in the above goroutine, *after* all errors from the goroutines have been
|
||||
// received by this loop.
|
||||
for err := range ctx.resticErrs {
|
||||
for err := range ctx.podVolumeErrs {
|
||||
// TODO: not ideal to be adding these to Velero-level errors
|
||||
// rather than a specific namespace, but don't have a way
|
||||
// to track the namespace right now.
|
||||
errs.Velero = append(errs.Velero, err.Error())
|
||||
}
|
||||
ctx.log.Info("Done waiting for all restic restores to complete")
|
||||
ctx.log.Info("Done waiting for all pod volume restores to complete")
|
||||
|
||||
// Wait for all post-restore exec hooks with same logic as restic wait above.
|
||||
// Wait for all post-restore exec hooks with same logic as pod volume wait above.
|
||||
go func() {
|
||||
ctx.log.Info("Waiting for all post-restore-exec hooks to complete")
|
||||
|
||||
|
@ -1100,8 +1100,8 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso
|
|||
obj.SetAnnotations(annotations)
|
||||
}
|
||||
|
||||
case hasResticBackup(obj, ctx):
|
||||
ctx.log.Infof("Dynamically re-provisioning persistent volume because it has a restic backup to be restored.")
|
||||
case hasPodVolumeBackup(obj, ctx):
|
||||
ctx.log.Infof("Dynamically re-provisioning persistent volume because it has a pod volume backup to be restored.")
|
||||
ctx.pvsToProvision.Insert(name)
|
||||
|
||||
// Return early because we don't want to restore the PV itself, we
|
||||
|
@ -1223,10 +1223,10 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso
|
|||
}
|
||||
|
||||
if pvc.Spec.VolumeName != "" {
|
||||
// This used to only happen with restic volumes, but now always remove this binding metadata
|
||||
// This used to only happen with PVB volumes, but now always remove this binding metadata
|
||||
obj = resetVolumeBindingInfo(obj)
|
||||
|
||||
// This is the case for restic volumes, where we need to actually have an empty volume created instead of restoring one.
|
||||
// This is the case for PVB volumes, where we need to actually have an empty volume created instead of restoring one.
|
||||
// The assumption is that any PV in pvsToProvision doesn't have an associated snapshot.
|
||||
if ctx.pvsToProvision.Has(pvc.Spec.VolumeName) {
|
||||
ctx.log.Infof("Resetting PersistentVolumeClaim %s/%s for dynamic provisioning", namespace, name)
|
||||
|
@ -1558,19 +1558,19 @@ func remapClaimRefNS(ctx *restoreContext, obj *unstructured.Unstructured) (bool,
|
|||
|
||||
// restorePodVolumeBackups restores the PodVolumeBackups for the given restored pod
|
||||
func restorePodVolumeBackups(ctx *restoreContext, createdObj *unstructured.Unstructured, originalNamespace string) {
|
||||
if ctx.resticRestorer == nil {
|
||||
ctx.log.Warn("No restic restorer, not restoring pod's volumes")
|
||||
if ctx.podVolumeRestorer == nil {
|
||||
ctx.log.Warn("No pod volume restorer, not restoring pod's volumes")
|
||||
} else {
|
||||
ctx.resticWaitGroup.Add(1)
|
||||
ctx.podVolumeWaitGroup.Add(1)
|
||||
go func() {
|
||||
// Done() will only be called after all errors have been successfully
|
||||
// sent on the ctx.resticErrs channel
|
||||
defer ctx.resticWaitGroup.Done()
|
||||
// sent on the ctx.podVolumeErrs channel
|
||||
defer ctx.podVolumeWaitGroup.Done()
|
||||
|
||||
pod := new(v1.Pod)
|
||||
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(createdObj.UnstructuredContent(), &pod); err != nil {
|
||||
ctx.log.WithError(err).Error("error converting unstructured pod")
|
||||
ctx.resticErrs <- err
|
||||
ctx.podVolumeErrs <- err
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -1581,11 +1581,11 @@ func restorePodVolumeBackups(ctx *restoreContext, createdObj *unstructured.Unstr
|
|||
SourceNamespace: originalNamespace,
|
||||
BackupLocation: ctx.backup.Spec.StorageLocation,
|
||||
}
|
||||
if errs := ctx.resticRestorer.RestorePodVolumes(data); errs != nil {
|
||||
ctx.log.WithError(kubeerrs.NewAggregate(errs)).Error("unable to successfully complete restic restores of pod's volumes")
|
||||
if errs := ctx.podVolumeRestorer.RestorePodVolumes(data); errs != nil {
|
||||
ctx.log.WithError(kubeerrs.NewAggregate(errs)).Error("unable to successfully complete pod volume restores of pod's volumes")
|
||||
|
||||
for _, err := range errs {
|
||||
ctx.resticErrs <- err
|
||||
ctx.podVolumeErrs <- err
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
@ -1597,7 +1597,7 @@ func (ctx *restoreContext) waitExec(createdObj *unstructured.Unstructured) {
|
|||
ctx.hooksWaitGroup.Add(1)
|
||||
go func() {
|
||||
// Done() will only be called after all errors have been successfully sent
|
||||
// on the ctx.resticErrs channel.
|
||||
// on the ctx.podVolumeErrs channel.
|
||||
defer ctx.hooksWaitGroup.Done()
|
||||
|
||||
pod := new(v1.Pod)
|
||||
|
@ -1639,7 +1639,7 @@ func hasSnapshot(pvName string, snapshots []*volume.Snapshot) bool {
|
|||
return false
|
||||
}
|
||||
|
||||
func hasResticBackup(unstructuredPV *unstructured.Unstructured, ctx *restoreContext) bool {
|
||||
func hasPodVolumeBackup(unstructuredPV *unstructured.Unstructured, ctx *restoreContext) bool {
|
||||
if len(ctx.podVolumeBackups) == 0 {
|
||||
return false
|
||||
}
|
||||
|
|
|
@ -2682,17 +2682,17 @@ func TestRestorePersistentVolumes(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
type fakeResticRestorerFactory struct {
|
||||
type fakePodVolumeRestorerFactory struct {
|
||||
restorer *uploadermocks.Restorer
|
||||
}
|
||||
|
||||
func (f *fakeResticRestorerFactory) NewRestorer(context.Context, *velerov1api.Restore) (podvolume.Restorer, error) {
|
||||
func (f *fakePodVolumeRestorerFactory) NewRestorer(context.Context, *velerov1api.Restore) (podvolume.Restorer, error) {
|
||||
return f.restorer, nil
|
||||
}
|
||||
|
||||
// TestRestoreWithRestic verifies that a call to RestorePodVolumes was made as and when
|
||||
// expected for the given pods by using a mock for the restic restorer.
|
||||
func TestRestoreWithRestic(t *testing.T) {
|
||||
// TestRestoreWithPodVolume verifies that a call to RestorePodVolumes was made as and when
|
||||
// expected for the given pods by using a mock for the pod volume restorer.
|
||||
func TestRestoreWithPodVolume(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
restore *velerov1api.Restore
|
||||
|
@ -2753,7 +2753,7 @@ func TestRestoreWithRestic(t *testing.T) {
|
|||
h := newHarness(t)
|
||||
restorer := new(uploadermocks.Restorer)
|
||||
defer restorer.AssertExpectations(t)
|
||||
h.restorer.resticRestorerFactory = &fakeResticRestorerFactory{
|
||||
h.restorer.podVolumeRestorerFactory = &fakePodVolumeRestorerFactory{
|
||||
restorer: restorer,
|
||||
}
|
||||
|
||||
|
@ -3121,8 +3121,8 @@ func newHarness(t *testing.T) *harness {
|
|||
fileSystem: testutil.NewFakeFileSystem(),
|
||||
|
||||
// unsupported
|
||||
resticRestorerFactory: nil,
|
||||
resticTimeout: 0,
|
||||
podVolumeRestorerFactory: nil,
|
||||
podVolumeTimeout: 0,
|
||||
},
|
||||
log: log,
|
||||
}
|
||||
|
|
|
@ -15,7 +15,7 @@
|
|||
"allowed_contexts": [
|
||||
"development"
|
||||
],
|
||||
"enable_restic": false,
|
||||
"use_node_agent": false,
|
||||
"create_backup_locations": true,
|
||||
"setup-minio": true,
|
||||
"enable_debug": false,
|
||||
|
|
Loading…
Reference in New Issue