Migrate backup sync controller from code-generator to kubebuilder (#5218)
* Migrate backup sync controller from code-generator to kubebuilder 1. use kubebuilder's reconcile logic to replace controller's old logic. 2. use ginkgo and gomega to replace testing. 3. modify BSL reconciler registration method. Signed-off-by: Xun Jiang <blackpiglet@gmail.com> * Add Option for PeriodicalEnqueueSource. Signed-off-by: Xun Jiang <blackpiglet@gmail.com> Signed-off-by: Xun Jiang <blackpiglet@gmail.com> Co-authored-by: Xun Jiang <blackpiglet@gmail.com>pull/5259/head
parent
8bc464aaa6
commit
6fea973c57
|
@ -0,0 +1 @@
|
|||
Migrate backup sync controller from code-generator to kubebuilder.
|
|
@ -53,6 +53,7 @@ import (
|
|||
snapshotv1listers "github.com/kubernetes-csi/external-snapshotter/client/v4/listers/volumesnapshot/v1"
|
||||
|
||||
"github.com/vmware-tanzu/velero/internal/credentials"
|
||||
"github.com/vmware-tanzu/velero/internal/storage"
|
||||
"github.com/vmware-tanzu/velero/pkg/backup"
|
||||
"github.com/vmware-tanzu/velero/pkg/buildinfo"
|
||||
"github.com/vmware-tanzu/velero/pkg/client"
|
||||
|
@ -79,7 +80,6 @@ import (
|
|||
ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager"
|
||||
|
||||
"github.com/vmware-tanzu/velero/internal/storage"
|
||||
"github.com/vmware-tanzu/velero/internal/util/managercontroller"
|
||||
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
|
||||
"github.com/vmware-tanzu/velero/pkg/podvolume"
|
||||
|
@ -607,29 +607,6 @@ func (s *server) runControllers(defaultVolumeSnapshotLocations map[string]string
|
|||
|
||||
csiVSLister, csiVSCLister, csiVSClassLister := s.getCSISnapshotListers()
|
||||
|
||||
backupSyncControllerRunInfo := func() controllerRunInfo {
|
||||
backupSyncContoller := controller.NewBackupSyncController(
|
||||
s.veleroClient.VeleroV1(),
|
||||
s.mgr.GetClient(),
|
||||
s.veleroClient.VeleroV1(),
|
||||
s.sharedInformerFactory.Velero().V1().Backups().Lister(),
|
||||
csiVSLister,
|
||||
s.config.backupSyncPeriod,
|
||||
s.namespace,
|
||||
s.csiSnapshotClient,
|
||||
s.kubeClient,
|
||||
s.config.defaultBackupLocation,
|
||||
newPluginManager,
|
||||
backupStoreGetter,
|
||||
s.logger,
|
||||
)
|
||||
|
||||
return controllerRunInfo{
|
||||
controller: backupSyncContoller,
|
||||
numWorkers: defaultControllerWorkers,
|
||||
}
|
||||
}
|
||||
|
||||
backupTracker := controller.NewBackupTracker()
|
||||
|
||||
backupControllerRunInfo := func() controllerRunInfo {
|
||||
|
@ -717,8 +694,11 @@ func (s *server) runControllers(defaultVolumeSnapshotLocations map[string]string
|
|||
}
|
||||
}
|
||||
|
||||
// By far, PodVolumeBackup, PodVolumeRestore, BackupStorageLocation controllers
|
||||
// are not included in --disable-controllers list.
|
||||
// This is because of PVB and PVR are used by Restic DaemonSet,
|
||||
// and BSL controller is mandatory for Velero to work.
|
||||
enabledControllers := map[string]func() controllerRunInfo{
|
||||
controller.BackupSync: backupSyncControllerRunInfo,
|
||||
controller.Backup: backupControllerRunInfo,
|
||||
controller.Restore: restoreControllerRunInfo,
|
||||
}
|
||||
|
@ -729,6 +709,8 @@ func (s *server) runControllers(defaultVolumeSnapshotLocations map[string]string
|
|||
controller.Schedule: {},
|
||||
controller.ResticRepo: {},
|
||||
controller.BackupDeletion: {},
|
||||
controller.GarbageCollection: {},
|
||||
controller.BackupSync: {},
|
||||
}
|
||||
|
||||
if s.config.restoreOnly {
|
||||
|
@ -742,7 +724,7 @@ func (s *server) runControllers(defaultVolumeSnapshotLocations map[string]string
|
|||
}
|
||||
|
||||
// Remove disabled controllers so they are not initialized. If a match is not found we want
|
||||
// to hault the system so the user knows this operation was not possible.
|
||||
// to halt the system so the user knows this operation was not possible.
|
||||
if err := removeControllers(s.config.disabledControllers, enabledControllers, enabledRuntimeControllers, s.logger); err != nil {
|
||||
log.Fatal(err, "unable to disable a controller")
|
||||
}
|
||||
|
@ -776,18 +758,18 @@ func (s *server) runControllers(defaultVolumeSnapshotLocations map[string]string
|
|||
s.logger.WithField("informer", informer).Info("Informer cache synced")
|
||||
}
|
||||
|
||||
bslr := controller.BackupStorageLocationReconciler{
|
||||
Ctx: s.ctx,
|
||||
Client: s.mgr.GetClient(),
|
||||
Scheme: s.mgr.GetScheme(),
|
||||
DefaultBackupLocationInfo: storage.DefaultBackupLocationInfo{
|
||||
bslr := controller.NewBackupStorageLocationReconciler(
|
||||
s.ctx,
|
||||
s.mgr.GetClient(),
|
||||
s.mgr.GetScheme(),
|
||||
storage.DefaultBackupLocationInfo{
|
||||
StorageLocation: s.config.defaultBackupLocation,
|
||||
ServerValidationFrequency: s.config.storeValidationFrequency,
|
||||
},
|
||||
NewPluginManager: newPluginManager,
|
||||
BackupStoreGetter: backupStoreGetter,
|
||||
Log: s.logger,
|
||||
}
|
||||
newPluginManager,
|
||||
backupStoreGetter,
|
||||
s.logger,
|
||||
)
|
||||
if err := bslr.SetupWithManager(s.mgr); err != nil {
|
||||
s.logger.Fatal(err, "unable to create controller", "controller", controller.BackupStorageLocation)
|
||||
}
|
||||
|
@ -844,6 +826,25 @@ func (s *server) runControllers(defaultVolumeSnapshotLocations map[string]string
|
|||
}
|
||||
}
|
||||
|
||||
if _, ok := enabledRuntimeControllers[controller.BackupSync]; ok {
|
||||
syncPeriod := s.config.backupSyncPeriod
|
||||
if syncPeriod <= 0 {
|
||||
syncPeriod = time.Minute
|
||||
}
|
||||
|
||||
backupSyncReconciler := controller.NewBackupSyncReconciler(
|
||||
s.mgr.GetClient(),
|
||||
s.namespace,
|
||||
syncPeriod,
|
||||
newPluginManager,
|
||||
backupStoreGetter,
|
||||
s.logger,
|
||||
)
|
||||
if err := backupSyncReconciler.SetupWithManager(s.mgr); err != nil {
|
||||
s.logger.Fatal(err, " unable to create controller ", "controller ", controller.BackupSync)
|
||||
}
|
||||
}
|
||||
|
||||
if _, ok := enabledRuntimeControllers[controller.GarbageCollection]; ok {
|
||||
r := controller.NewGCReconciler(s.logger, s.mgr.GetClient(), s.config.garbageCollectionFrequency)
|
||||
if err := r.SetupWithManager(s.mgr); err != nil {
|
||||
|
|
|
@ -90,7 +90,7 @@ func NewBackupDeletionReconciler(
|
|||
|
||||
func (r *backupDeletionReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
// Make sure the expired requests can be deleted eventually
|
||||
s := kube.NewPeriodicalEnqueueSource(r.logger, mgr.GetClient(), &velerov1api.DeleteBackupRequestList{}, time.Hour)
|
||||
s := kube.NewPeriodicalEnqueueSource(r.logger, mgr.GetClient(), &velerov1api.DeleteBackupRequestList{}, time.Hour, kube.PeriodicalEnqueueSourceOption{})
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
For(&velerov1api.DeleteBackupRequest{}).
|
||||
Watches(s, nil).
|
||||
|
|
|
@ -44,35 +44,55 @@ const (
|
|||
)
|
||||
|
||||
// BackupStorageLocationReconciler reconciles a BackupStorageLocation object
|
||||
type BackupStorageLocationReconciler struct {
|
||||
Ctx context.Context
|
||||
Client client.Client
|
||||
Scheme *runtime.Scheme
|
||||
DefaultBackupLocationInfo storage.DefaultBackupLocationInfo
|
||||
type backupStorageLocationReconciler struct {
|
||||
ctx context.Context
|
||||
client client.Client
|
||||
scheme *runtime.Scheme
|
||||
defaultBackupLocationInfo storage.DefaultBackupLocationInfo
|
||||
// use variables to refer to these functions so they can be
|
||||
// replaced with fakes for testing.
|
||||
NewPluginManager func(logrus.FieldLogger) clientmgmt.Manager
|
||||
BackupStoreGetter persistence.ObjectBackupStoreGetter
|
||||
newPluginManager func(logrus.FieldLogger) clientmgmt.Manager
|
||||
backupStoreGetter persistence.ObjectBackupStoreGetter
|
||||
|
||||
Log logrus.FieldLogger
|
||||
log logrus.FieldLogger
|
||||
}
|
||||
|
||||
// NewBackupStorageLocationReconciler initialize and return a backupStorageLocationReconciler struct
|
||||
func NewBackupStorageLocationReconciler(
|
||||
ctx context.Context,
|
||||
client client.Client,
|
||||
scheme *runtime.Scheme,
|
||||
defaultBackupLocationInfo storage.DefaultBackupLocationInfo,
|
||||
newPluginManager func(logrus.FieldLogger) clientmgmt.Manager,
|
||||
backupStoreGetter persistence.ObjectBackupStoreGetter,
|
||||
log logrus.FieldLogger) *backupStorageLocationReconciler {
|
||||
return &backupStorageLocationReconciler{
|
||||
ctx: ctx,
|
||||
client: client,
|
||||
scheme: scheme,
|
||||
defaultBackupLocationInfo: defaultBackupLocationInfo,
|
||||
newPluginManager: newPluginManager,
|
||||
backupStoreGetter: backupStoreGetter,
|
||||
log: log,
|
||||
}
|
||||
}
|
||||
|
||||
// +kubebuilder:rbac:groups=velero.io,resources=backupstoragelocations,verbs=get;list;watch;create;update;patch;delete
|
||||
// +kubebuilder:rbac:groups=velero.io,resources=backupstoragelocations/status,verbs=get;update;patch
|
||||
func (r *BackupStorageLocationReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
||||
func (r *backupStorageLocationReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
||||
var unavailableErrors []string
|
||||
var location velerov1api.BackupStorageLocation
|
||||
|
||||
log := r.Log.WithField("controller", BackupStorageLocation).WithField(BackupStorageLocation, req.NamespacedName.String())
|
||||
log := r.log.WithField("controller", BackupStorageLocation).WithField(BackupStorageLocation, req.NamespacedName.String())
|
||||
log.Debug("Validating availability of BackupStorageLocation")
|
||||
|
||||
locationList, err := storage.ListBackupStorageLocations(r.Ctx, r.Client, req.Namespace)
|
||||
locationList, err := storage.ListBackupStorageLocations(r.ctx, r.client, req.Namespace)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("No BackupStorageLocations found, at least one is required")
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
pluginManager := r.NewPluginManager(log)
|
||||
pluginManager := r.newPluginManager(log)
|
||||
defer pluginManager.CleanupClients()
|
||||
|
||||
var defaultFound bool
|
||||
|
@ -93,7 +113,7 @@ func (r *BackupStorageLocationReconciler) Reconcile(ctx context.Context, req ctr
|
|||
isDefault := location.Spec.Default
|
||||
|
||||
// TODO(2.0) remove this check since the server default will be deprecated
|
||||
if !defaultFound && location.Name == r.DefaultBackupLocationInfo.StorageLocation {
|
||||
if !defaultFound && location.Name == r.defaultBackupLocationInfo.StorageLocation {
|
||||
// For backward-compatible, to configure the backup storage location as the default if
|
||||
// none of the BSLs be marked as the default and the BSL name matches against the
|
||||
// "velero server --default-backup-storage-location".
|
||||
|
@ -117,12 +137,12 @@ func (r *BackupStorageLocationReconciler) Reconcile(ctx context.Context, req ctr
|
|||
location.Status.Phase = velerov1api.BackupStorageLocationPhaseAvailable
|
||||
location.Status.Message = ""
|
||||
}
|
||||
if err := r.Client.Patch(r.Ctx, &location, client.MergeFrom(original)); err != nil {
|
||||
if err := r.client.Patch(r.ctx, &location, client.MergeFrom(original)); err != nil {
|
||||
log.WithError(err).Error("Error updating BackupStorageLocation phase")
|
||||
}
|
||||
}()
|
||||
|
||||
backupStore, err := r.BackupStoreGetter.Get(&location, pluginManager, log)
|
||||
backupStore, err := r.backupStoreGetter.Get(&location, pluginManager, log)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Error getting a backup store")
|
||||
return
|
||||
|
@ -144,11 +164,11 @@ func (r *BackupStorageLocationReconciler) Reconcile(ctx context.Context, req ctr
|
|||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
func (r *BackupStorageLocationReconciler) logReconciledPhase(defaultFound bool, locationList velerov1api.BackupStorageLocationList, errs []string) {
|
||||
func (r *backupStorageLocationReconciler) logReconciledPhase(defaultFound bool, locationList velerov1api.BackupStorageLocationList, errs []string) {
|
||||
var availableBSLs []*velerov1api.BackupStorageLocation
|
||||
var unAvailableBSLs []*velerov1api.BackupStorageLocation
|
||||
var unknownBSLs []*velerov1api.BackupStorageLocation
|
||||
log := r.Log.WithField("controller", BackupStorageLocation)
|
||||
log := r.log.WithField("controller", BackupStorageLocation)
|
||||
|
||||
for i, location := range locationList.Items {
|
||||
phase := location.Status.Phase
|
||||
|
@ -181,16 +201,19 @@ func (r *BackupStorageLocationReconciler) logReconciledPhase(defaultFound bool,
|
|||
}
|
||||
}
|
||||
|
||||
func (r *BackupStorageLocationReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
func (r *backupStorageLocationReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
g := kube.NewPeriodicalEnqueueSource(
|
||||
r.Log,
|
||||
r.log,
|
||||
mgr.GetClient(),
|
||||
&velerov1api.BackupStorageLocationList{},
|
||||
bslValidationEnqueuePeriod,
|
||||
// Add filter function to enqueue BSL per ValidationFrequency setting.
|
||||
kube.PeriodicalEnqueueSourceOption{
|
||||
FilterFuncs: []func(object client.Object) bool{
|
||||
func(object client.Object) bool {
|
||||
location := object.(*velerov1api.BackupStorageLocation)
|
||||
return storage.IsReadyToValidate(location.Spec.ValidationFrequency, location.Status.LastValidationTime, r.DefaultBackupLocationInfo.ServerValidationFrequency, r.Log.WithField("controller", BackupStorageLocation))
|
||||
return storage.IsReadyToValidate(location.Spec.ValidationFrequency, location.Status.LastValidationTime, r.defaultBackupLocationInfo.ServerValidationFrequency, r.log.WithField("controller", BackupStorageLocation))
|
||||
},
|
||||
},
|
||||
},
|
||||
)
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
|
|
|
@ -79,16 +79,16 @@ var _ = Describe("Backup Storage Location Reconciler", func() {
|
|||
|
||||
// Setup reconciler
|
||||
Expect(velerov1api.AddToScheme(scheme.Scheme)).To(Succeed())
|
||||
r := BackupStorageLocationReconciler{
|
||||
Ctx: ctx,
|
||||
Client: fake.NewClientBuilder().WithScheme(scheme.Scheme).WithRuntimeObjects(locations).Build(),
|
||||
DefaultBackupLocationInfo: storage.DefaultBackupLocationInfo{
|
||||
r := backupStorageLocationReconciler{
|
||||
ctx: ctx,
|
||||
client: fake.NewClientBuilder().WithScheme(scheme.Scheme).WithRuntimeObjects(locations).Build(),
|
||||
defaultBackupLocationInfo: storage.DefaultBackupLocationInfo{
|
||||
StorageLocation: "location-1",
|
||||
ServerValidationFrequency: 0,
|
||||
},
|
||||
NewPluginManager: func(logrus.FieldLogger) clientmgmt.Manager { return pluginManager },
|
||||
BackupStoreGetter: NewFakeObjectBackupStoreGetter(backupStores),
|
||||
Log: velerotest.NewLogger(),
|
||||
newPluginManager: func(logrus.FieldLogger) clientmgmt.Manager { return pluginManager },
|
||||
backupStoreGetter: NewFakeObjectBackupStoreGetter(backupStores),
|
||||
log: velerotest.NewLogger(),
|
||||
}
|
||||
|
||||
// Assertions
|
||||
|
@ -101,7 +101,7 @@ var _ = Describe("Backup Storage Location Reconciler", func() {
|
|||
|
||||
key := client.ObjectKey{Name: location.Name, Namespace: location.Namespace}
|
||||
instance := &velerov1api.BackupStorageLocation{}
|
||||
err = r.Client.Get(ctx, key, instance)
|
||||
err = r.client.Get(ctx, key, instance)
|
||||
Expect(err).To(BeNil())
|
||||
Expect(instance.Spec.Default).To(BeIdenticalTo(tests[i].expectedIsDefault))
|
||||
Expect(instance.Status.Phase).To(BeIdenticalTo(tests[i].expectedPhase))
|
||||
|
@ -144,16 +144,16 @@ var _ = Describe("Backup Storage Location Reconciler", func() {
|
|||
|
||||
// Setup reconciler
|
||||
Expect(velerov1api.AddToScheme(scheme.Scheme)).To(Succeed())
|
||||
r := BackupStorageLocationReconciler{
|
||||
Ctx: ctx,
|
||||
Client: fake.NewClientBuilder().WithScheme(scheme.Scheme).WithRuntimeObjects(locations).Build(),
|
||||
DefaultBackupLocationInfo: storage.DefaultBackupLocationInfo{
|
||||
r := backupStorageLocationReconciler{
|
||||
ctx: ctx,
|
||||
client: fake.NewClientBuilder().WithScheme(scheme.Scheme).WithRuntimeObjects(locations).Build(),
|
||||
defaultBackupLocationInfo: storage.DefaultBackupLocationInfo{
|
||||
StorageLocation: "default",
|
||||
ServerValidationFrequency: 0,
|
||||
},
|
||||
NewPluginManager: func(logrus.FieldLogger) clientmgmt.Manager { return pluginManager },
|
||||
BackupStoreGetter: NewFakeObjectBackupStoreGetter(backupStores),
|
||||
Log: velerotest.NewLogger(),
|
||||
newPluginManager: func(logrus.FieldLogger) clientmgmt.Manager { return pluginManager },
|
||||
backupStoreGetter: NewFakeObjectBackupStoreGetter(backupStores),
|
||||
log: velerotest.NewLogger(),
|
||||
}
|
||||
|
||||
// Assertions
|
||||
|
@ -166,7 +166,7 @@ var _ = Describe("Backup Storage Location Reconciler", func() {
|
|||
|
||||
key := client.ObjectKey{Name: location.Name, Namespace: location.Namespace}
|
||||
instance := &velerov1api.BackupStorageLocation{}
|
||||
err = r.Client.Get(ctx, key, instance)
|
||||
err = r.client.Get(ctx, key, instance)
|
||||
Expect(err).To(BeNil())
|
||||
Expect(instance.Spec.Default).To(BeIdenticalTo(tests[i].expectedIsDefault))
|
||||
}
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
Copyright 2020 the Velero contributors.
|
||||
Copyright The Velero Contributors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
@ -20,189 +20,116 @@ import (
|
|||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/apex/log"
|
||||
snapshotv1api "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1"
|
||||
snapshotterClientSet "github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned"
|
||||
snapshotv1listers "github.com/kubernetes-csi/external-snapshotter/client/v4/listers/volumesnapshot/v1"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
kuberrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
|
||||
"github.com/vmware-tanzu/velero/pkg/util/kube"
|
||||
|
||||
"github.com/vmware-tanzu/velero/internal/storage"
|
||||
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
|
||||
"github.com/vmware-tanzu/velero/pkg/features"
|
||||
velerov1client "github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/typed/velero/v1"
|
||||
velerov1listers "github.com/vmware-tanzu/velero/pkg/generated/listers/velero/v1"
|
||||
"github.com/vmware-tanzu/velero/pkg/label"
|
||||
"github.com/vmware-tanzu/velero/pkg/persistence"
|
||||
"github.com/vmware-tanzu/velero/pkg/plugin/clientmgmt"
|
||||
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/event"
|
||||
"sigs.k8s.io/controller-runtime/pkg/predicate"
|
||||
)
|
||||
|
||||
type backupSyncController struct {
|
||||
*genericController
|
||||
const (
|
||||
backupSyncReconcilePeriod = time.Minute
|
||||
)
|
||||
|
||||
backupClient velerov1client.BackupsGetter
|
||||
kbClient client.Client
|
||||
podVolumeBackupClient velerov1client.PodVolumeBackupsGetter
|
||||
backupLister velerov1listers.BackupLister
|
||||
csiVSLister snapshotv1listers.VolumeSnapshotLister
|
||||
csiSnapshotClient *snapshotterClientSet.Clientset
|
||||
kubeClient kubernetes.Interface
|
||||
type backupSyncReconciler struct {
|
||||
client client.Client
|
||||
namespace string
|
||||
defaultBackupLocation string
|
||||
defaultBackupSyncPeriod time.Duration
|
||||
newPluginManager func(logrus.FieldLogger) clientmgmt.Manager
|
||||
backupStoreGetter persistence.ObjectBackupStoreGetter
|
||||
logger logrus.FieldLogger
|
||||
}
|
||||
|
||||
func NewBackupSyncController(
|
||||
backupClient velerov1client.BackupsGetter,
|
||||
kbClient client.Client,
|
||||
podVolumeBackupClient velerov1client.PodVolumeBackupsGetter,
|
||||
backupLister velerov1listers.BackupLister,
|
||||
csiVSLister snapshotv1listers.VolumeSnapshotLister,
|
||||
syncPeriod time.Duration,
|
||||
// NewBackupSyncReconciler is used to generate BackupSync reconciler structure.
|
||||
func NewBackupSyncReconciler(
|
||||
client client.Client,
|
||||
namespace string,
|
||||
csiSnapshotClient *snapshotterClientSet.Clientset,
|
||||
kubeClient kubernetes.Interface,
|
||||
defaultBackupLocation string,
|
||||
defaultBackupSyncPeriod time.Duration,
|
||||
newPluginManager func(logrus.FieldLogger) clientmgmt.Manager,
|
||||
backupStoreGetter persistence.ObjectBackupStoreGetter,
|
||||
logger logrus.FieldLogger,
|
||||
) Interface {
|
||||
if syncPeriod <= 0 {
|
||||
syncPeriod = time.Minute
|
||||
}
|
||||
logger.Infof("Backup sync period is %v", syncPeriod)
|
||||
|
||||
c := &backupSyncController{
|
||||
genericController: newGenericController(BackupSync, logger),
|
||||
backupClient: backupClient,
|
||||
kbClient: kbClient,
|
||||
podVolumeBackupClient: podVolumeBackupClient,
|
||||
logger logrus.FieldLogger) *backupSyncReconciler {
|
||||
return &backupSyncReconciler{
|
||||
client: client,
|
||||
namespace: namespace,
|
||||
defaultBackupLocation: defaultBackupLocation,
|
||||
defaultBackupSyncPeriod: syncPeriod,
|
||||
backupLister: backupLister,
|
||||
csiVSLister: csiVSLister,
|
||||
csiSnapshotClient: csiSnapshotClient,
|
||||
kubeClient: kubeClient,
|
||||
|
||||
// use variables to refer to these functions so they can be
|
||||
// replaced with fakes for testing.
|
||||
defaultBackupSyncPeriod: defaultBackupSyncPeriod,
|
||||
newPluginManager: newPluginManager,
|
||||
backupStoreGetter: backupStoreGetter,
|
||||
logger: logger,
|
||||
}
|
||||
|
||||
c.resyncFunc = c.run
|
||||
c.resyncPeriod = 30 * time.Second
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
// orderedBackupLocations returns a new slice with the default backup location first (if it exists),
|
||||
// followed by the rest of the locations in no particular order.
|
||||
func orderedBackupLocations(locationList *velerov1api.BackupStorageLocationList, defaultLocationName string) []velerov1api.BackupStorageLocation {
|
||||
var result []velerov1api.BackupStorageLocation
|
||||
// Reconcile syncs between the backups in cluster and backups metadata in object store.
|
||||
func (b *backupSyncReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
||||
log := b.logger.WithField("controller", BackupSync)
|
||||
log = log.WithField("backupLocation", req.String())
|
||||
log.Debug("Begin to sync between backups' metadata in BSL object storage and cluster's existing backups.")
|
||||
|
||||
for i := range locationList.Items {
|
||||
if locationList.Items[i].Name == defaultLocationName {
|
||||
// put the default location first
|
||||
result = append(result, locationList.Items[i])
|
||||
// append everything before the default
|
||||
result = append(result, locationList.Items[:i]...)
|
||||
// append everything after the default
|
||||
result = append(result, locationList.Items[i+1:]...)
|
||||
|
||||
return result
|
||||
}
|
||||
}
|
||||
|
||||
return locationList.Items
|
||||
}
|
||||
|
||||
func (c *backupSyncController) run() {
|
||||
c.logger.Debug("Checking for existing backup storage locations to sync into cluster")
|
||||
|
||||
locationList, err := storage.ListBackupStorageLocations(context.Background(), c.kbClient, c.namespace)
|
||||
location := &velerov1api.BackupStorageLocation{}
|
||||
err := b.client.Get(ctx, req.NamespacedName, location)
|
||||
if err != nil {
|
||||
c.logger.WithError(err).Error("No backup storage locations found, at least one is required")
|
||||
return
|
||||
if apierrors.IsNotFound(err) {
|
||||
log.Debug("BackupStorageLocation is not found")
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
return ctrl.Result{}, errors.Wrapf(err, "error getting BackupStorageLocation %s", req.String())
|
||||
}
|
||||
|
||||
// sync the default backup storage location first, if it exists
|
||||
for _, location := range locationList.Items {
|
||||
if location.Spec.Default {
|
||||
c.defaultBackupLocation = location.Name
|
||||
break
|
||||
}
|
||||
}
|
||||
locations := orderedBackupLocations(&locationList, c.defaultBackupLocation)
|
||||
|
||||
pluginManager := c.newPluginManager(c.logger)
|
||||
pluginManager := b.newPluginManager(log)
|
||||
defer pluginManager.CleanupClients()
|
||||
|
||||
for _, location := range locations {
|
||||
log := c.logger.WithField("backupLocation", location.Name)
|
||||
|
||||
syncPeriod := c.defaultBackupSyncPeriod
|
||||
if location.Spec.BackupSyncPeriod != nil {
|
||||
syncPeriod = location.Spec.BackupSyncPeriod.Duration
|
||||
if syncPeriod == 0 {
|
||||
log.Debug("Backup sync period for this location is set to 0, skipping sync")
|
||||
continue
|
||||
}
|
||||
|
||||
if syncPeriod < 0 {
|
||||
log.Debug("Backup sync period must be non-negative")
|
||||
syncPeriod = c.defaultBackupSyncPeriod
|
||||
}
|
||||
}
|
||||
|
||||
lastSync := location.Status.LastSyncedTime
|
||||
if lastSync != nil {
|
||||
log.Debug("Checking if backups need to be synced at this time for this location")
|
||||
nextSync := lastSync.Add(syncPeriod)
|
||||
if time.Now().UTC().Before(nextSync) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
log.Debug("Checking backup location for backups to sync into cluster")
|
||||
|
||||
backupStore, err := c.backupStoreGetter.Get(&location, pluginManager, log)
|
||||
backupStore, err := b.backupStoreGetter.Get(location, pluginManager, log)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Error getting backup store for this location")
|
||||
continue
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
// get a list of all the backups that are stored in the backup storage location
|
||||
res, err := backupStore.ListBackups()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Error listing backups in backup store")
|
||||
continue
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
backupStoreBackups := sets.NewString(res...)
|
||||
log.WithField("backupCount", len(backupStoreBackups)).Debug("Got backups from backup store")
|
||||
|
||||
// get a list of all the backups that exist as custom resources in the cluster
|
||||
clusterBackups, err := c.backupLister.Backups(c.namespace).List(labels.Everything())
|
||||
var clusterBackupList velerov1api.BackupList
|
||||
listOption := client.ListOptions{
|
||||
LabelSelector: labels.Everything(),
|
||||
Namespace: b.namespace,
|
||||
}
|
||||
|
||||
err = b.client.List(ctx, &clusterBackupList, &listOption)
|
||||
if err != nil {
|
||||
log.WithError(errors.WithStack(err)).Error("Error getting backups from cluster, proceeding with sync into cluster")
|
||||
} else {
|
||||
log.WithField("backupCount", len(clusterBackups)).Debug("Got backups from cluster")
|
||||
log.WithField("backupCount", len(clusterBackupList.Items)).Debug("Got backups from cluster")
|
||||
}
|
||||
|
||||
// get a list of backups that *are* in the backup storage location and *aren't* in the cluster
|
||||
clusterBackupsSet := sets.NewString()
|
||||
for _, b := range clusterBackups {
|
||||
for _, b := range clusterBackupList.Items {
|
||||
clusterBackupsSet.Insert(b.Name)
|
||||
}
|
||||
backupsToSync := backupStoreBackups.Difference(clusterBackupsSet)
|
||||
|
@ -224,7 +151,7 @@ func (c *backupSyncController) run() {
|
|||
continue
|
||||
}
|
||||
|
||||
backup.Namespace = c.namespace
|
||||
backup.Namespace = b.namespace
|
||||
backup.ResourceVersion = ""
|
||||
|
||||
// update the StorageLocation field and label since the name of the location
|
||||
|
@ -237,7 +164,7 @@ func (c *backupSyncController) run() {
|
|||
backup.Labels[velerov1api.StorageLocationLabel] = label.GetValidName(backup.Spec.StorageLocation)
|
||||
|
||||
// attempt to create backup custom resource via API
|
||||
backup, err = c.backupClient.Backups(backup.Namespace).Create(context.TODO(), backup, metav1.CreateOptions{})
|
||||
err = b.client.Create(ctx, backup, &client.CreateOptions{})
|
||||
switch {
|
||||
case err != nil && kuberrs.IsAlreadyExists(err):
|
||||
log.Debug("Backup already exists in cluster")
|
||||
|
@ -274,7 +201,7 @@ func (c *backupSyncController) run() {
|
|||
podVolumeBackup.Namespace = backup.Namespace
|
||||
podVolumeBackup.ResourceVersion = ""
|
||||
|
||||
_, err = c.podVolumeBackupClient.PodVolumeBackups(backup.Namespace).Create(context.TODO(), podVolumeBackup, metav1.CreateOptions{})
|
||||
err = b.client.Create(ctx, podVolumeBackup, &client.CreateOptions{})
|
||||
switch {
|
||||
case err != nil && kuberrs.IsAlreadyExists(err):
|
||||
log.Debug("Pod volume backup already exists in cluster")
|
||||
|
@ -290,20 +217,25 @@ func (c *backupSyncController) run() {
|
|||
if features.IsEnabled(velerov1api.CSIFeatureFlag) {
|
||||
// we are syncing these objects only to ensure that the storage snapshots are cleaned up
|
||||
// on backup deletion or expiry.
|
||||
log.Info("Syncing CSI volumesnapshotclasses in backup")
|
||||
log.Info("Syncing CSI VolumeSnapshotClasses in backup")
|
||||
vsClasses, err := backupStore.GetCSIVolumeSnapshotClasses(backupName)
|
||||
if err != nil {
|
||||
log.WithError(errors.WithStack(err)).Error("Error getting CSI volumesnapclasses for this backup from backup store")
|
||||
log.WithError(errors.WithStack(err)).Error("Error getting CSI VolumeSnapClasses for this backup from backup store")
|
||||
continue
|
||||
}
|
||||
for _, vsClass := range vsClasses {
|
||||
vsClass.ResourceVersion = ""
|
||||
created, err := c.csiSnapshotClient.SnapshotV1().VolumeSnapshotClasses().Create(context.TODO(), vsClass, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
log.WithError(errors.WithStack(err)).Errorf("Error syncing volumesnapshotclass %s into cluster", vsClass.Name)
|
||||
err := b.client.Create(ctx, vsClass, &client.CreateOptions{})
|
||||
switch {
|
||||
case err != nil && kuberrs.IsAlreadyExists(err):
|
||||
log.Debugf("VolumeSnapshotClass %s already exists in cluster", vsClass.Name)
|
||||
continue
|
||||
case err != nil && !kuberrs.IsAlreadyExists(err):
|
||||
log.WithError(errors.WithStack(err)).Errorf("Error syncing VolumeSnapshotClass %s into cluster", vsClass.Name)
|
||||
continue
|
||||
default:
|
||||
log.Infof("Created CSI VolumeSnapshotClass %s", vsClass.Name)
|
||||
}
|
||||
log.Infof("Created CSI volumesnapshotclass %s", created.Name)
|
||||
}
|
||||
|
||||
log.Info("Syncing CSI volumesnapshotcontents in backup")
|
||||
|
@ -317,7 +249,7 @@ func (c *backupSyncController) run() {
|
|||
for _, snapCont := range snapConts {
|
||||
// TODO: Reset ResourceVersion prior to persisting VolumeSnapshotContents
|
||||
snapCont.ResourceVersion = ""
|
||||
created, err := c.csiSnapshotClient.SnapshotV1().VolumeSnapshotContents().Create(context.TODO(), snapCont, metav1.CreateOptions{})
|
||||
err := b.client.Create(ctx, snapCont, &client.CreateOptions{})
|
||||
switch {
|
||||
case err != nil && kuberrs.IsAlreadyExists(err):
|
||||
log.Debugf("volumesnapshotcontent %s already exists in cluster", snapCont.Name)
|
||||
|
@ -326,73 +258,183 @@ func (c *backupSyncController) run() {
|
|||
log.WithError(errors.WithStack(err)).Errorf("Error syncing volumesnapshotcontent %s into cluster", snapCont.Name)
|
||||
continue
|
||||
default:
|
||||
log.Infof("Created CSI volumesnapshotcontent %s", created.Name)
|
||||
log.Infof("Created CSI volumesnapshotcontent %s", snapCont.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
c.deleteOrphanedBackups(location.Name, backupStoreBackups, log)
|
||||
b.deleteOrphanedBackups(ctx, location.Name, backupStoreBackups, log)
|
||||
|
||||
// update the location's last-synced time field
|
||||
statusPatch := client.MergeFrom(location.DeepCopy())
|
||||
location.Status.LastSyncedTime = &metav1.Time{Time: time.Now().UTC()}
|
||||
if err := c.kbClient.Patch(context.Background(), &location, statusPatch); err != nil {
|
||||
if err := b.client.Patch(ctx, location, statusPatch); err != nil {
|
||||
log.WithError(errors.WithStack(err)).Error("Error patching backup location's last-synced time")
|
||||
continue
|
||||
}
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
// SetupWithManager is used to setup controller and its watching sources.
|
||||
func (b *backupSyncReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
backupSyncSource := kube.NewPeriodicalEnqueueSource(
|
||||
b.logger,
|
||||
mgr.GetClient(),
|
||||
&velerov1api.BackupStorageLocationList{},
|
||||
backupSyncReconcilePeriod,
|
||||
kube.PeriodicalEnqueueSourceOption{
|
||||
OrderFunc: backupSyncSourceOrderFunc,
|
||||
FilterFuncs: []func(object client.Object) bool{
|
||||
func(object client.Object) bool {
|
||||
location := object.(*velerov1api.BackupStorageLocation)
|
||||
return b.locationFilterFunc(location)
|
||||
},
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
For(&velerov1api.BackupStorageLocation{}).
|
||||
// Filter all BSL events, because this controller is supposed to run periodically, not by event.
|
||||
WithEventFilter(predicate.Funcs{
|
||||
CreateFunc: func(ce event.CreateEvent) bool {
|
||||
return false
|
||||
},
|
||||
UpdateFunc: func(ue event.UpdateEvent) bool {
|
||||
return false
|
||||
},
|
||||
DeleteFunc: func(de event.DeleteEvent) bool {
|
||||
return false
|
||||
},
|
||||
GenericFunc: func(ge event.GenericEvent) bool {
|
||||
return false
|
||||
},
|
||||
}).
|
||||
Watches(backupSyncSource, nil).
|
||||
Complete(b)
|
||||
}
|
||||
|
||||
// deleteOrphanedBackups deletes backup objects (CRDs) from Kubernetes that have the specified location
|
||||
// and a phase of Completed, but no corresponding backup in object storage.
|
||||
func (c *backupSyncController) deleteOrphanedBackups(locationName string, backupStoreBackups sets.String, log logrus.FieldLogger) {
|
||||
locationSelector := labels.Set(map[string]string{
|
||||
func (b *backupSyncReconciler) deleteOrphanedBackups(ctx context.Context, locationName string, backupStoreBackups sets.String, log logrus.FieldLogger) {
|
||||
var backupList velerov1api.BackupList
|
||||
listOption := client.ListOptions{
|
||||
LabelSelector: labels.Set(map[string]string{
|
||||
velerov1api.StorageLocationLabel: label.GetValidName(locationName),
|
||||
}).AsSelector()
|
||||
|
||||
backups, err := c.backupLister.Backups(c.namespace).List(locationSelector)
|
||||
}).AsSelector(),
|
||||
}
|
||||
err := b.client.List(ctx, &backupList, &listOption)
|
||||
if err != nil {
|
||||
log.WithError(errors.WithStack(err)).Error("Error listing backups from cluster")
|
||||
return
|
||||
}
|
||||
if len(backups) == 0 {
|
||||
|
||||
if len(backupList.Items) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
for _, backup := range backups {
|
||||
for _, backup := range backupList.Items {
|
||||
log = log.WithField("backup", backup.Name)
|
||||
if backup.Status.Phase != velerov1api.BackupPhaseCompleted || backupStoreBackups.Has(backup.Name) {
|
||||
continue
|
||||
}
|
||||
if err := c.backupClient.Backups(backup.Namespace).Delete(context.TODO(), backup.Name, metav1.DeleteOptions{}); err != nil {
|
||||
|
||||
if err := b.client.Delete(ctx, &backup, &client.DeleteOptions{}); err != nil {
|
||||
log.WithError(errors.WithStack(err)).Error("Error deleting orphaned backup from cluster")
|
||||
} else {
|
||||
log.Debug("Deleted orphaned backup from cluster")
|
||||
c.deleteCSISnapshotsByBackup(backup.Name, log)
|
||||
b.deleteCSISnapshotsByBackup(ctx, backup.Name, log)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *backupSyncController) deleteCSISnapshotsByBackup(backupName string, log logrus.FieldLogger) {
|
||||
func (b *backupSyncReconciler) deleteCSISnapshotsByBackup(ctx context.Context, backupName string, log logrus.FieldLogger) {
|
||||
if !features.IsEnabled(velerov1api.CSIFeatureFlag) {
|
||||
return
|
||||
}
|
||||
m := client.MatchingLabels{velerov1api.BackupNameLabel: label.GetValidName(backupName)}
|
||||
if vsList, err := c.csiVSLister.List(label.NewSelectorForBackup(label.GetValidName(backupName))); err != nil {
|
||||
var vsList snapshotv1api.VolumeSnapshotList
|
||||
listOptions := &client.ListOptions{
|
||||
LabelSelector: label.NewSelectorForBackup(label.GetValidName(backupName)),
|
||||
}
|
||||
if err := b.client.List(ctx, &vsList, listOptions); err != nil {
|
||||
log.WithError(err).Warnf("Failed to list volumesnapshots for backup: %s, the deletion will be skipped", backupName)
|
||||
} else {
|
||||
for _, vs := range vsList {
|
||||
for _, vs := range vsList.Items {
|
||||
name := kube.NamespaceAndName(vs.GetObjectMeta())
|
||||
log.Debugf("Deleting volumesnapshot %s", name)
|
||||
if err := c.kbClient.Delete(context.TODO(), vs); err != nil {
|
||||
if err := b.client.Delete(context.TODO(), &vs); err != nil {
|
||||
log.WithError(err).Warnf("Failed to delete volumesnapshot %s", name)
|
||||
}
|
||||
}
|
||||
}
|
||||
vsc := &snapshotv1api.VolumeSnapshotContent{}
|
||||
log.Debugf("Deleting volumesnapshotcontents for backup: %s", backupName)
|
||||
if err := c.kbClient.DeleteAllOf(context.TODO(), vsc, m); err != nil {
|
||||
if err := b.client.DeleteAllOf(context.TODO(), vsc, m); err != nil {
|
||||
log.WithError(err).Warnf("Failed to delete volumesnapshotcontents for backup: %s", backupName)
|
||||
}
|
||||
}
|
||||
|
||||
// backupSyncSourceOrderFunc returns a new slice with the default backup location first (if it exists),
|
||||
// followed by the rest of the locations in no particular order.
|
||||
func backupSyncSourceOrderFunc(objList client.ObjectList) client.ObjectList {
|
||||
inputBSLList := objList.(*velerov1api.BackupStorageLocationList)
|
||||
resultBSLList := &velerov1api.BackupStorageLocationList{}
|
||||
bslArray := make([]runtime.Object, 0)
|
||||
|
||||
if len(inputBSLList.Items) <= 0 {
|
||||
return objList
|
||||
}
|
||||
|
||||
for i := range inputBSLList.Items {
|
||||
location := inputBSLList.Items[i]
|
||||
|
||||
// sync the default backup storage location first, if it exists
|
||||
if location.Spec.Default {
|
||||
// put the default location first
|
||||
bslArray = append(bslArray, &inputBSLList.Items[i])
|
||||
// append everything before the default
|
||||
for _, bsl := range inputBSLList.Items[:i] {
|
||||
bslArray = append(bslArray, &bsl)
|
||||
}
|
||||
// append everything after the default
|
||||
for _, bsl := range inputBSLList.Items[i+1:] {
|
||||
bslArray = append(bslArray, &bsl)
|
||||
}
|
||||
meta.SetList(resultBSLList, bslArray)
|
||||
|
||||
return resultBSLList
|
||||
}
|
||||
}
|
||||
|
||||
// No default BSL found. Return the input.
|
||||
return objList
|
||||
}
|
||||
|
||||
func (b *backupSyncReconciler) locationFilterFunc(location *velerov1api.BackupStorageLocation) bool {
|
||||
syncPeriod := b.defaultBackupSyncPeriod
|
||||
if location.Spec.BackupSyncPeriod != nil {
|
||||
syncPeriod = location.Spec.BackupSyncPeriod.Duration
|
||||
if syncPeriod == 0 {
|
||||
log.Debug("Backup sync period for this location is set to 0, skipping sync")
|
||||
return false
|
||||
}
|
||||
|
||||
if syncPeriod < 0 {
|
||||
log.Debug("Backup sync period must be non-negative")
|
||||
syncPeriod = b.defaultBackupSyncPeriod
|
||||
}
|
||||
}
|
||||
|
||||
lastSync := location.Status.LastSyncedTime
|
||||
if lastSync != nil {
|
||||
log.Debug("Checking if backups need to be synced at this time for this location")
|
||||
nextSync := lastSync.Add(syncPeriod)
|
||||
if time.Now().UTC().Before(nextSync) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
|
|
@ -18,21 +18,27 @@ package controller
|
|||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/validation"
|
||||
core "k8s.io/client-go/testing"
|
||||
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
ctrlClient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
ctrlfake "sigs.k8s.io/controller-runtime/pkg/client/fake"
|
||||
|
||||
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
|
||||
"github.com/vmware-tanzu/velero/pkg/builder"
|
||||
"github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/fake"
|
||||
informers "github.com/vmware-tanzu/velero/pkg/generated/informers/externalversions"
|
||||
"github.com/vmware-tanzu/velero/pkg/label"
|
||||
persistencemocks "github.com/vmware-tanzu/velero/pkg/persistence/mocks"
|
||||
"github.com/vmware-tanzu/velero/pkg/plugin/clientmgmt"
|
||||
|
@ -40,8 +46,40 @@ import (
|
|||
velerotest "github.com/vmware-tanzu/velero/pkg/test"
|
||||
)
|
||||
|
||||
func defaultLocation(namespace string) *velerov1api.BackupStorageLocation {
|
||||
return &velerov1api.BackupStorageLocation{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: namespace,
|
||||
Name: "location-1",
|
||||
},
|
||||
Spec: velerov1api.BackupStorageLocationSpec{
|
||||
Provider: "objStoreProvider",
|
||||
StorageType: velerov1api.StorageType{
|
||||
ObjectStorage: &velerov1api.ObjectStorageLocation{
|
||||
Bucket: "bucket-1",
|
||||
},
|
||||
},
|
||||
Default: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func defaultLocationsList(namespace string) []*velerov1api.BackupStorageLocation {
|
||||
return []*velerov1api.BackupStorageLocation{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: namespace,
|
||||
Name: "location-0",
|
||||
},
|
||||
Spec: velerov1api.BackupStorageLocationSpec{
|
||||
Provider: "objStoreProvider",
|
||||
StorageType: velerov1api.StorageType{
|
||||
ObjectStorage: &velerov1api.ObjectStorageLocation{
|
||||
Bucket: "bucket-1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: namespace,
|
||||
|
@ -66,7 +104,21 @@ func defaultLocationsList(namespace string) []*velerov1api.BackupStorageLocation
|
|||
Provider: "objStoreProvider",
|
||||
StorageType: velerov1api.StorageType{
|
||||
ObjectStorage: &velerov1api.ObjectStorageLocation{
|
||||
Bucket: "bucket-2",
|
||||
Bucket: "bucket-1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: namespace,
|
||||
Name: "location-3",
|
||||
},
|
||||
Spec: velerov1api.BackupStorageLocationSpec{
|
||||
Provider: "objStoreProvider",
|
||||
StorageType: velerov1api.StorageType{
|
||||
ObjectStorage: &velerov1api.ObjectStorageLocation{
|
||||
Bucket: "bucket-1",
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -74,9 +126,8 @@ func defaultLocationsList(namespace string) []*velerov1api.BackupStorageLocation
|
|||
}
|
||||
}
|
||||
|
||||
func defaultLocationsListWithLongerLocationName(namespace string) []*velerov1api.BackupStorageLocation {
|
||||
return []*velerov1api.BackupStorageLocation{
|
||||
{
|
||||
func defaultLocationWithLongerLocationName(namespace string) *velerov1api.BackupStorageLocation {
|
||||
return &velerov1api.BackupStorageLocation{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: namespace,
|
||||
Name: "the-really-long-location-name-that-is-much-more-than-63-characters-1",
|
||||
|
@ -89,25 +140,21 @@ func defaultLocationsListWithLongerLocationName(namespace string) []*velerov1api
|
|||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: namespace,
|
||||
Name: "the-really-long-location-name-that-is-much-more-than-63-characters-2",
|
||||
},
|
||||
Spec: velerov1api.BackupStorageLocationSpec{
|
||||
Provider: "objStoreProvider",
|
||||
StorageType: velerov1api.StorageType{
|
||||
ObjectStorage: &velerov1api.ObjectStorageLocation{
|
||||
Bucket: "bucket-2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TestBackupSyncControllerRun(t *testing.T) {
|
||||
func numBackups(c ctrlClient.WithWatch, ns string) (int, error) {
|
||||
var existingK8SBackups velerov1api.BackupList
|
||||
err := c.List(context.TODO(), &existingK8SBackups, &ctrlClient.ListOptions{})
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return len(existingK8SBackups.Items), nil
|
||||
}
|
||||
|
||||
var _ = Describe("Backup Sync Reconciler", func() {
|
||||
It("Test Backup Sync Reconciler basic function", func() {
|
||||
type cloudBackupData struct {
|
||||
backup *velerov1api.Backup
|
||||
podVolumeBackups []*velerov1api.PodVolumeBackup
|
||||
|
@ -116,80 +163,55 @@ func TestBackupSyncControllerRun(t *testing.T) {
|
|||
tests := []struct {
|
||||
name string
|
||||
namespace string
|
||||
locations []*velerov1api.BackupStorageLocation
|
||||
cloudBuckets map[string][]*cloudBackupData
|
||||
location *velerov1api.BackupStorageLocation
|
||||
cloudBackups []*cloudBackupData
|
||||
existingBackups []*velerov1api.Backup
|
||||
existingPodVolumeBackups []*velerov1api.PodVolumeBackup
|
||||
longLocationNameEnabled bool
|
||||
}{
|
||||
{
|
||||
name: "no cloud backups",
|
||||
namespace: "ns-1",
|
||||
location: defaultLocation("ns-1"),
|
||||
},
|
||||
{
|
||||
name: "normal case",
|
||||
namespace: "ns-1",
|
||||
locations: defaultLocationsList("ns-1"),
|
||||
cloudBuckets: map[string][]*cloudBackupData{
|
||||
"bucket-1": {
|
||||
&cloudBackupData{
|
||||
location: defaultLocation("ns-1"),
|
||||
cloudBackups: []*cloudBackupData{
|
||||
{
|
||||
backup: builder.ForBackup("ns-1", "backup-1").Result(),
|
||||
},
|
||||
&cloudBackupData{
|
||||
{
|
||||
backup: builder.ForBackup("ns-1", "backup-2").Result(),
|
||||
},
|
||||
},
|
||||
"bucket-2": {
|
||||
&cloudBackupData{
|
||||
backup: builder.ForBackup("ns-1", "backup-3").Result(),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "all synced backups get created in Velero server's namespace",
|
||||
namespace: "velero",
|
||||
locations: defaultLocationsList("velero"),
|
||||
cloudBuckets: map[string][]*cloudBackupData{
|
||||
"bucket-1": {
|
||||
&cloudBackupData{
|
||||
location: defaultLocation("velero"),
|
||||
cloudBackups: []*cloudBackupData{
|
||||
{
|
||||
backup: builder.ForBackup("ns-1", "backup-1").Result(),
|
||||
},
|
||||
&cloudBackupData{
|
||||
{
|
||||
backup: builder.ForBackup("ns-1", "backup-2").Result(),
|
||||
},
|
||||
},
|
||||
"bucket-2": {
|
||||
&cloudBackupData{
|
||||
backup: builder.ForBackup("ns-2", "backup-3").Result(),
|
||||
},
|
||||
&cloudBackupData{
|
||||
backup: builder.ForBackup("velero", "backup-4").Result(),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "new backups get synced when some cloud backups already exist in the cluster",
|
||||
namespace: "ns-1",
|
||||
locations: defaultLocationsList("ns-1"),
|
||||
cloudBuckets: map[string][]*cloudBackupData{
|
||||
"bucket-1": {
|
||||
&cloudBackupData{
|
||||
location: defaultLocation("ns-1"),
|
||||
cloudBackups: []*cloudBackupData{
|
||||
{
|
||||
backup: builder.ForBackup("ns-1", "backup-1").Result(),
|
||||
},
|
||||
&cloudBackupData{
|
||||
{
|
||||
backup: builder.ForBackup("ns-1", "backup-2").Result(),
|
||||
},
|
||||
},
|
||||
"bucket-2": {
|
||||
&cloudBackupData{
|
||||
backup: builder.ForBackup("ns-1", "backup-3").Result(),
|
||||
},
|
||||
&cloudBackupData{
|
||||
backup: builder.ForBackup("ns-1", "backup-4").Result(),
|
||||
},
|
||||
},
|
||||
},
|
||||
existingBackups: []*velerov1api.Backup{
|
||||
// add a label to each existing backup so we can differentiate it from the cloud
|
||||
// backup during verification
|
||||
|
@ -200,14 +222,12 @@ func TestBackupSyncControllerRun(t *testing.T) {
|
|||
{
|
||||
name: "existing backups without a StorageLocation get it filled in",
|
||||
namespace: "ns-1",
|
||||
locations: defaultLocationsList("ns-1"),
|
||||
cloudBuckets: map[string][]*cloudBackupData{
|
||||
"bucket-1": {
|
||||
&cloudBackupData{
|
||||
location: defaultLocation("ns-1"),
|
||||
cloudBackups: []*cloudBackupData{
|
||||
{
|
||||
backup: builder.ForBackup("ns-1", "backup-1").Result(),
|
||||
},
|
||||
},
|
||||
},
|
||||
existingBackups: []*velerov1api.Backup{
|
||||
// add a label to each existing backup so we can differentiate it from the cloud
|
||||
// backup during verification
|
||||
|
@ -217,111 +237,67 @@ func TestBackupSyncControllerRun(t *testing.T) {
|
|||
{
|
||||
name: "backup storage location names and labels get updated",
|
||||
namespace: "ns-1",
|
||||
locations: defaultLocationsList("ns-1"),
|
||||
cloudBuckets: map[string][]*cloudBackupData{
|
||||
"bucket-1": {
|
||||
&cloudBackupData{
|
||||
location: defaultLocation("ns-1"),
|
||||
cloudBackups: []*cloudBackupData{
|
||||
{
|
||||
backup: builder.ForBackup("ns-1", "backup-1").StorageLocation("foo").ObjectMeta(builder.WithLabels(velerov1api.StorageLocationLabel, "foo")).Result(),
|
||||
},
|
||||
&cloudBackupData{
|
||||
{
|
||||
backup: builder.ForBackup("ns-1", "backup-2").Result(),
|
||||
},
|
||||
},
|
||||
"bucket-2": {
|
||||
&cloudBackupData{
|
||||
backup: builder.ForBackup("ns-1", "backup-3").StorageLocation("bar").ObjectMeta(builder.WithLabels(velerov1api.StorageLocationLabel, "bar")).Result(),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "backup storage location names and labels get updated with location name greater than 63 chars",
|
||||
namespace: "ns-1",
|
||||
locations: defaultLocationsListWithLongerLocationName("ns-1"),
|
||||
location: defaultLocationWithLongerLocationName("ns-1"),
|
||||
longLocationNameEnabled: true,
|
||||
cloudBuckets: map[string][]*cloudBackupData{
|
||||
"bucket-1": {
|
||||
&cloudBackupData{
|
||||
cloudBackups: []*cloudBackupData{
|
||||
{
|
||||
backup: builder.ForBackup("ns-1", "backup-1").StorageLocation("foo").ObjectMeta(builder.WithLabels(velerov1api.StorageLocationLabel, "foo")).Result(),
|
||||
},
|
||||
&cloudBackupData{
|
||||
{
|
||||
backup: builder.ForBackup("ns-1", "backup-2").Result(),
|
||||
},
|
||||
},
|
||||
"bucket-2": {
|
||||
&cloudBackupData{
|
||||
backup: builder.ForBackup("ns-1", "backup-3").StorageLocation("bar").ObjectMeta(builder.WithLabels(velerov1api.StorageLocationLabel, "bar")).Result(),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "all synced backups and pod volume backups get created in Velero server's namespace",
|
||||
namespace: "ns-1",
|
||||
locations: defaultLocationsList("ns-1"),
|
||||
cloudBuckets: map[string][]*cloudBackupData{
|
||||
"bucket-1": {
|
||||
&cloudBackupData{
|
||||
location: defaultLocation("ns-1"),
|
||||
cloudBackups: []*cloudBackupData{
|
||||
{
|
||||
backup: builder.ForBackup("ns-1", "backup-1").Result(),
|
||||
podVolumeBackups: []*velerov1api.PodVolumeBackup{
|
||||
builder.ForPodVolumeBackup("ns-1", "pvb-1").Result(),
|
||||
},
|
||||
},
|
||||
&cloudBackupData{
|
||||
{
|
||||
backup: builder.ForBackup("ns-1", "backup-2").Result(),
|
||||
podVolumeBackups: []*velerov1api.PodVolumeBackup{
|
||||
builder.ForPodVolumeBackup("ns-1", "pvb-2").Result(),
|
||||
},
|
||||
},
|
||||
},
|
||||
"bucket-2": {
|
||||
&cloudBackupData{
|
||||
backup: builder.ForBackup("ns-1", "backup-3").Result(),
|
||||
},
|
||||
&cloudBackupData{
|
||||
backup: builder.ForBackup("ns-1", "backup-4").Result(),
|
||||
podVolumeBackups: []*velerov1api.PodVolumeBackup{
|
||||
builder.ForPodVolumeBackup("ns-1", "pvb-1").Result(),
|
||||
builder.ForPodVolumeBackup("ns-1", "pvb-2").Result(),
|
||||
builder.ForPodVolumeBackup("ns-1", "pvb-3").Result(),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "new pod volume backups get synched when some pod volume backups already exist in the cluster",
|
||||
namespace: "ns-1",
|
||||
locations: defaultLocationsList("ns-1"),
|
||||
cloudBuckets: map[string][]*cloudBackupData{
|
||||
"bucket-1": {
|
||||
&cloudBackupData{
|
||||
location: defaultLocation("ns-1"),
|
||||
cloudBackups: []*cloudBackupData{
|
||||
{
|
||||
backup: builder.ForBackup("ns-1", "backup-1").Result(),
|
||||
podVolumeBackups: []*velerov1api.PodVolumeBackup{
|
||||
builder.ForPodVolumeBackup("ns-1", "pvb-1").Result(),
|
||||
},
|
||||
},
|
||||
&cloudBackupData{
|
||||
{
|
||||
backup: builder.ForBackup("ns-1", "backup-2").Result(),
|
||||
podVolumeBackups: []*velerov1api.PodVolumeBackup{
|
||||
builder.ForPodVolumeBackup("ns-1", "pvb-3").Result(),
|
||||
},
|
||||
},
|
||||
},
|
||||
"bucket-2": {
|
||||
&cloudBackupData{
|
||||
backup: builder.ForBackup("ns-1", "backup-3").Result(),
|
||||
},
|
||||
&cloudBackupData{
|
||||
backup: builder.ForBackup("ns-1", "backup-4").Result(),
|
||||
podVolumeBackups: []*velerov1api.PodVolumeBackup{
|
||||
builder.ForPodVolumeBackup("ns-1", "pvb-1").Result(),
|
||||
builder.ForPodVolumeBackup("ns-1", "pvb-5").Result(),
|
||||
builder.ForPodVolumeBackup("ns-1", "pvb-6").Result(),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
existingPodVolumeBackups: []*velerov1api.PodVolumeBackup{
|
||||
builder.ForPodVolumeBackup("ns-1", "pvb-1").Result(),
|
||||
builder.ForPodVolumeBackup("ns-1", "pvb-2").Result(),
|
||||
|
@ -330,84 +306,65 @@ func TestBackupSyncControllerRun(t *testing.T) {
|
|||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
var (
|
||||
client = fake.NewSimpleClientset()
|
||||
fakeClient = velerotest.NewFakeControllerRuntimeClient(t)
|
||||
sharedInformers = informers.NewSharedInformerFactory(client, 0)
|
||||
client = ctrlfake.NewClientBuilder().Build()
|
||||
pluginManager = &pluginmocks.Manager{}
|
||||
backupStores = make(map[string]*persistencemocks.BackupStore)
|
||||
)
|
||||
|
||||
c := NewBackupSyncController(
|
||||
client.VeleroV1(),
|
||||
fakeClient,
|
||||
client.VeleroV1(),
|
||||
sharedInformers.Velero().V1().Backups().Lister(),
|
||||
nil, // csiVSLister
|
||||
time.Duration(0),
|
||||
test.namespace,
|
||||
nil, // csiSnapshotClient
|
||||
nil, // kubeClient
|
||||
"",
|
||||
func(logrus.FieldLogger) clientmgmt.Manager { return pluginManager },
|
||||
NewFakeObjectBackupStoreGetter(backupStores),
|
||||
velerotest.NewLogger(),
|
||||
).(*backupSyncController)
|
||||
|
||||
pluginManager.On("CleanupClients").Return(nil)
|
||||
|
||||
for _, location := range test.locations {
|
||||
require.NoError(t, fakeClient.Create(context.Background(), location))
|
||||
backupStores[location.Name] = &persistencemocks.BackupStore{}
|
||||
r := backupSyncReconciler{
|
||||
client: client,
|
||||
namespace: test.namespace,
|
||||
defaultBackupSyncPeriod: time.Second * 10,
|
||||
newPluginManager: func(logrus.FieldLogger) clientmgmt.Manager { return pluginManager },
|
||||
backupStoreGetter: NewFakeObjectBackupStoreGetter(backupStores),
|
||||
logger: velerotest.NewLogger(),
|
||||
}
|
||||
|
||||
for _, location := range test.locations {
|
||||
backupStore, ok := backupStores[location.Name]
|
||||
require.True(t, ok, "no mock backup store for location %s", location.Name)
|
||||
if test.location != nil {
|
||||
Expect(r.client.Create(ctx, test.location)).ShouldNot(HaveOccurred())
|
||||
backupStores[test.location.Name] = &persistencemocks.BackupStore{}
|
||||
|
||||
backupStore, ok := backupStores[test.location.Name]
|
||||
Expect(ok).To(BeTrue(), "no mock backup store for location %s", test.location.Name)
|
||||
|
||||
var backupNames []string
|
||||
for _, bucket := range test.cloudBuckets[location.Spec.ObjectStorage.Bucket] {
|
||||
backupNames = append(backupNames, bucket.backup.Name)
|
||||
backupStore.On("GetBackupMetadata", bucket.backup.Name).Return(bucket.backup, nil)
|
||||
backupStore.On("GetPodVolumeBackups", bucket.backup.Name).Return(bucket.podVolumeBackups, nil)
|
||||
for _, backup := range test.cloudBackups {
|
||||
backupNames = append(backupNames, backup.backup.Name)
|
||||
backupStore.On("GetBackupMetadata", backup.backup.Name).Return(backup.backup, nil)
|
||||
backupStore.On("GetPodVolumeBackups", backup.backup.Name).Return(backup.podVolumeBackups, nil)
|
||||
}
|
||||
backupStore.On("ListBackups").Return(backupNames, nil)
|
||||
}
|
||||
|
||||
for _, existingBackup := range test.existingBackups {
|
||||
require.NoError(t, sharedInformers.Velero().V1().Backups().Informer().GetStore().Add(existingBackup))
|
||||
|
||||
_, err := client.VeleroV1().Backups(test.namespace).Create(context.TODO(), existingBackup, metav1.CreateOptions{})
|
||||
require.NoError(t, err)
|
||||
err := client.Create(context.TODO(), existingBackup, &ctrlClient.CreateOptions{})
|
||||
Expect(err).ShouldNot(HaveOccurred())
|
||||
}
|
||||
|
||||
for _, existingPodVolumeBackup := range test.existingPodVolumeBackups {
|
||||
require.NoError(t, sharedInformers.Velero().V1().PodVolumeBackups().Informer().GetStore().Add(existingPodVolumeBackup))
|
||||
|
||||
_, err := client.VeleroV1().PodVolumeBackups(test.namespace).Create(context.TODO(), existingPodVolumeBackup, metav1.CreateOptions{})
|
||||
require.NoError(t, err)
|
||||
err := client.Create(context.TODO(), existingPodVolumeBackup, &ctrlClient.CreateOptions{})
|
||||
Expect(err).ShouldNot(HaveOccurred())
|
||||
}
|
||||
client.ClearActions()
|
||||
|
||||
c.run()
|
||||
actualResult, err := r.Reconcile(ctx, ctrl.Request{
|
||||
NamespacedName: types.NamespacedName{Namespace: test.location.Namespace, Name: test.location.Name},
|
||||
})
|
||||
|
||||
for bucket, backupDataSet := range test.cloudBuckets {
|
||||
// figure out which location this bucket is for; we need this for verification
|
||||
// purposes later
|
||||
var location *velerov1api.BackupStorageLocation
|
||||
for _, loc := range test.locations {
|
||||
if loc.Spec.ObjectStorage.Bucket == bucket {
|
||||
location = loc
|
||||
break
|
||||
}
|
||||
}
|
||||
require.NotNil(t, location)
|
||||
Expect(actualResult).To(BeEquivalentTo(ctrl.Result{}))
|
||||
Expect(err).To(BeNil())
|
||||
|
||||
// process the cloud backups
|
||||
for _, cloudBackupData := range backupDataSet {
|
||||
obj, err := client.VeleroV1().Backups(test.namespace).Get(context.TODO(), cloudBackupData.backup.Name, metav1.GetOptions{})
|
||||
require.NoError(t, err)
|
||||
for _, cloudBackupData := range test.cloudBackups {
|
||||
obj := &velerov1api.Backup{}
|
||||
err := client.Get(
|
||||
context.TODO(),
|
||||
types.NamespacedName{
|
||||
Namespace: cloudBackupData.backup.Namespace,
|
||||
Name: cloudBackupData.backup.Name},
|
||||
obj)
|
||||
Expect(err).To(BeNil())
|
||||
|
||||
// did this cloud backup already exist in the cluster?
|
||||
var existing *velerov1api.Backup
|
||||
|
@ -424,25 +381,32 @@ func TestBackupSyncControllerRun(t *testing.T) {
|
|||
|
||||
// verify that the in-cluster backup has its storage location populated, if it's not already.
|
||||
expected := existing.DeepCopy()
|
||||
expected.Spec.StorageLocation = location.Name
|
||||
expected.Spec.StorageLocation = test.location.Name
|
||||
|
||||
assert.Equal(t, expected, obj)
|
||||
Expect(expected).To(BeEquivalentTo(obj))
|
||||
} else {
|
||||
// verify that the storage location field and label are set properly
|
||||
assert.Equal(t, location.Name, obj.Spec.StorageLocation)
|
||||
Expect(test.location.Name).To(BeEquivalentTo(obj.Spec.StorageLocation))
|
||||
|
||||
locationName := location.Name
|
||||
locationName := test.location.Name
|
||||
if test.longLocationNameEnabled {
|
||||
locationName = label.GetValidName(locationName)
|
||||
}
|
||||
assert.Equal(t, locationName, obj.Labels[velerov1api.StorageLocationLabel])
|
||||
assert.Equal(t, true, len(obj.Labels[velerov1api.StorageLocationLabel]) <= validation.DNS1035LabelMaxLength)
|
||||
Expect(locationName).To(BeEquivalentTo(obj.Labels[velerov1api.StorageLocationLabel]))
|
||||
Expect(len(obj.Labels[velerov1api.StorageLocationLabel]) <= validation.DNS1035LabelMaxLength).To(BeTrue())
|
||||
}
|
||||
|
||||
// process the cloud pod volume backups for this backup, if any
|
||||
for _, podVolumeBackup := range cloudBackupData.podVolumeBackups {
|
||||
objPodVolumeBackup, err := client.VeleroV1().PodVolumeBackups(test.namespace).Get(context.TODO(), podVolumeBackup.Name, metav1.GetOptions{})
|
||||
require.NoError(t, err)
|
||||
objPodVolumeBackup := &velerov1api.PodVolumeBackup{}
|
||||
err := client.Get(
|
||||
context.TODO(),
|
||||
types.NamespacedName{
|
||||
Namespace: podVolumeBackup.Namespace,
|
||||
Name: podVolumeBackup.Name,
|
||||
},
|
||||
objPodVolumeBackup)
|
||||
Expect(err).ShouldNot(HaveOccurred())
|
||||
|
||||
// did this cloud pod volume backup already exist in the cluster?
|
||||
var existingPodVolumeBackup *velerov1api.PodVolumeBackup
|
||||
|
@ -457,16 +421,16 @@ func TestBackupSyncControllerRun(t *testing.T) {
|
|||
// if this cloud pod volume backup already exists in the cluster, make sure that what we get from the
|
||||
// client is the existing backup, not the cloud one.
|
||||
expected := existingPodVolumeBackup.DeepCopy()
|
||||
assert.Equal(t, expected, objPodVolumeBackup)
|
||||
Expect(expected).To(BeEquivalentTo(objPodVolumeBackup))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeleteOrphanedBackups(t *testing.T) {
|
||||
It("Test deleting orphaned backups.", func() {
|
||||
longLabelName := "the-really-long-location-name-that-is-much-more-than-63-characters"
|
||||
|
||||
baseBuilder := func(name string) *builder.BackupBuilder {
|
||||
return builder.ForBackup("ns-1", name).ObjectMeta(builder.WithLabels(velerov1api.StorageLocationLabel, "default"))
|
||||
}
|
||||
|
@ -477,6 +441,7 @@ func TestDeleteOrphanedBackups(t *testing.T) {
|
|||
k8sBackups []*velerov1api.Backup
|
||||
namespace string
|
||||
expectedDeletes sets.String
|
||||
useLongBSLName bool
|
||||
}{
|
||||
{
|
||||
name: "no overlapping backups",
|
||||
|
@ -551,75 +516,6 @@ func TestDeleteOrphanedBackups(t *testing.T) {
|
|||
},
|
||||
expectedDeletes: sets.NewString("backup-C"),
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
var (
|
||||
client = fake.NewSimpleClientset()
|
||||
fakeClient = velerotest.NewFakeControllerRuntimeClient(t)
|
||||
sharedInformers = informers.NewSharedInformerFactory(client, 0)
|
||||
)
|
||||
|
||||
c := NewBackupSyncController(
|
||||
client.VeleroV1(),
|
||||
fakeClient,
|
||||
client.VeleroV1(),
|
||||
sharedInformers.Velero().V1().Backups().Lister(),
|
||||
nil, // csiVSLister
|
||||
time.Duration(0),
|
||||
test.namespace,
|
||||
nil, // csiSnapshotClient
|
||||
nil, // kubeClient
|
||||
"",
|
||||
nil, // new plugin manager func
|
||||
nil, // backupStoreGetter
|
||||
velerotest.NewLogger(),
|
||||
).(*backupSyncController)
|
||||
|
||||
expectedDeleteActions := make([]core.Action, 0)
|
||||
|
||||
for _, backup := range test.k8sBackups {
|
||||
// add test backup to informer
|
||||
require.NoError(t, sharedInformers.Velero().V1().Backups().Informer().GetStore().Add(backup), "Error adding backup to informer")
|
||||
|
||||
// add test backup to client
|
||||
_, err := client.VeleroV1().Backups(test.namespace).Create(context.TODO(), backup, metav1.CreateOptions{})
|
||||
require.NoError(t, err, "Error adding backup to clientset")
|
||||
|
||||
// if we expect this backup to be deleted, set up the expected DeleteAction
|
||||
if test.expectedDeletes.Has(backup.Name) {
|
||||
actionDelete := core.NewDeleteAction(
|
||||
velerov1api.SchemeGroupVersion.WithResource("backups"),
|
||||
test.namespace,
|
||||
backup.Name,
|
||||
)
|
||||
expectedDeleteActions = append(expectedDeleteActions, actionDelete)
|
||||
}
|
||||
}
|
||||
|
||||
c.deleteOrphanedBackups("default", test.cloudBackups, velerotest.NewLogger())
|
||||
|
||||
numBackups, err := numBackups(t, client, c.namespace)
|
||||
assert.NoError(t, err)
|
||||
|
||||
expected := len(test.k8sBackups) - len(test.expectedDeletes)
|
||||
assert.Equal(t, expected, numBackups)
|
||||
|
||||
velerotest.CompareActions(t, expectedDeleteActions, getDeleteActions(client.Actions()))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestStorageLabelsInDeleteOrphanedBackups(t *testing.T) {
|
||||
longLabelName := "the-really-long-location-name-that-is-much-more-than-63-characters"
|
||||
tests := []struct {
|
||||
name string
|
||||
cloudBackups sets.String
|
||||
k8sBackups []*velerov1api.Backup
|
||||
namespace string
|
||||
expectedDeletes sets.String
|
||||
}{
|
||||
{
|
||||
name: "some overlapping backups",
|
||||
namespace: "ns-1",
|
||||
|
@ -645,42 +541,32 @@ func TestStorageLabelsInDeleteOrphanedBackups(t *testing.T) {
|
|||
Result(),
|
||||
},
|
||||
expectedDeletes: sets.NewString("backup-C"),
|
||||
useLongBSLName: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
var (
|
||||
client = fake.NewSimpleClientset()
|
||||
fakeClient = velerotest.NewFakeControllerRuntimeClient(t)
|
||||
sharedInformers = informers.NewSharedInformerFactory(client, 0)
|
||||
client = ctrlfake.NewClientBuilder().Build()
|
||||
pluginManager = &pluginmocks.Manager{}
|
||||
backupStores = make(map[string]*persistencemocks.BackupStore)
|
||||
)
|
||||
|
||||
c := NewBackupSyncController(
|
||||
client.VeleroV1(),
|
||||
fakeClient,
|
||||
client.VeleroV1(),
|
||||
sharedInformers.Velero().V1().Backups().Lister(),
|
||||
nil, // csiVSLister
|
||||
time.Duration(0),
|
||||
test.namespace,
|
||||
nil, // csiSnapshotClient
|
||||
nil, // kubeClient
|
||||
"",
|
||||
nil, // new plugin manager func
|
||||
nil, // backupStoreGetter
|
||||
velerotest.NewLogger(),
|
||||
).(*backupSyncController)
|
||||
r := backupSyncReconciler{
|
||||
client: client,
|
||||
namespace: test.namespace,
|
||||
defaultBackupSyncPeriod: time.Second * 10,
|
||||
newPluginManager: func(logrus.FieldLogger) clientmgmt.Manager { return pluginManager },
|
||||
backupStoreGetter: NewFakeObjectBackupStoreGetter(backupStores),
|
||||
logger: velerotest.NewLogger(),
|
||||
}
|
||||
|
||||
expectedDeleteActions := make([]core.Action, 0)
|
||||
|
||||
for _, backup := range test.k8sBackups {
|
||||
// add test backup to informer
|
||||
require.NoError(t, sharedInformers.Velero().V1().Backups().Informer().GetStore().Add(backup), "Error adding backup to informer")
|
||||
|
||||
// add test backup to client
|
||||
_, err := client.VeleroV1().Backups(test.namespace).Create(context.TODO(), backup, metav1.CreateOptions{})
|
||||
require.NoError(t, err, "Error adding backup to clientset")
|
||||
err := client.Create(context.TODO(), backup, &ctrlClient.CreateOptions{})
|
||||
Expect(err).ShouldNot(HaveOccurred())
|
||||
|
||||
// if we expect this backup to be deleted, set up the expected DeleteAction
|
||||
if test.expectedDeletes.Has(backup.Name) {
|
||||
|
@ -693,45 +579,45 @@ func TestStorageLabelsInDeleteOrphanedBackups(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
c.deleteOrphanedBackups(longLabelName, test.cloudBackups, velerotest.NewLogger())
|
||||
bslName := "default"
|
||||
if test.useLongBSLName {
|
||||
bslName = longLabelName
|
||||
}
|
||||
r.deleteOrphanedBackups(ctx, bslName, test.cloudBackups, velerotest.NewLogger())
|
||||
|
||||
numBackups, err := numBackups(t, client, c.namespace)
|
||||
assert.NoError(t, err)
|
||||
numBackups, err := numBackups(client, r.namespace)
|
||||
Expect(err).ShouldNot(HaveOccurred())
|
||||
|
||||
fmt.Println("")
|
||||
|
||||
expected := len(test.k8sBackups) - len(test.expectedDeletes)
|
||||
assert.Equal(t, expected, numBackups)
|
||||
|
||||
velerotest.CompareActions(t, expectedDeleteActions, getDeleteActions(client.Actions()))
|
||||
Expect(expected).To(BeEquivalentTo(numBackups))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func getDeleteActions(actions []core.Action) []core.Action {
|
||||
var deleteActions []core.Action
|
||||
for _, action := range actions {
|
||||
if action.GetVerb() == "delete" {
|
||||
deleteActions = append(deleteActions, action)
|
||||
}
|
||||
}
|
||||
return deleteActions
|
||||
}
|
||||
It("Test moving default BSL at the head of BSL array.", func() {
|
||||
locationList := &velerov1api.BackupStorageLocationList{}
|
||||
objArray := make([]runtime.Object, 0)
|
||||
|
||||
func numBackups(t *testing.T, c *fake.Clientset, ns string) (int, error) {
|
||||
t.Helper()
|
||||
existingK8SBackups, err := c.VeleroV1().Backups(ns).List(context.TODO(), metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return 0, err
|
||||
// Generate BSL array.
|
||||
locations := defaultLocationsList("velero")
|
||||
for _, bsl := range locations {
|
||||
objArray = append(objArray, bsl)
|
||||
}
|
||||
|
||||
return len(existingK8SBackups.Items), nil
|
||||
}
|
||||
meta.SetList(locationList, objArray)
|
||||
|
||||
func numPodVolumeBackups(t *testing.T, c *fake.Clientset, ns string) (int, error) {
|
||||
t.Helper()
|
||||
existingK8SPodvolumeBackups, err := c.VeleroV1().PodVolumeBackups(ns).List(context.TODO(), metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
testObjList := backupSyncSourceOrderFunc(locationList)
|
||||
testObjArray, err := meta.ExtractList(testObjList)
|
||||
Expect(err).ShouldNot(HaveOccurred())
|
||||
|
||||
return len(existingK8SPodvolumeBackups.Items), nil
|
||||
}
|
||||
expectLocation := testObjArray[0].(*velerov1api.BackupStorageLocation)
|
||||
Expect(expectLocation.Spec.Default).To(BeEquivalentTo(true))
|
||||
|
||||
// If BSL list without default BSL is passed in, the output should be same with input.
|
||||
locationList.Items = testObjList.(*velerov1api.BackupStorageLocationList).Items[1:]
|
||||
testObjList = backupSyncSourceOrderFunc(locationList)
|
||||
Expect(testObjList).To(BeEquivalentTo(locationList))
|
||||
|
||||
})
|
||||
})
|
||||
|
|
|
@ -73,7 +73,7 @@ func NewGCReconciler(
|
|||
// Other Events will be filtered to decrease the number of reconcile call. Especially UpdateEvent must be filtered since we removed
|
||||
// the backup status as the sub-resource of backup in v1.9, every change on it will be treated as UpdateEvent and trigger reconcile call.
|
||||
func (c *gcReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
s := kube.NewPeriodicalEnqueueSource(c.logger, mgr.GetClient(), &velerov1api.BackupList{}, c.frequency)
|
||||
s := kube.NewPeriodicalEnqueueSource(c.logger, mgr.GetClient(), &velerov1api.BackupList{}, c.frequency, kube.PeriodicalEnqueueSourceOption{})
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
For(&velerov1api.Backup{}).
|
||||
WithEventFilter(predicate.Funcs{
|
||||
|
|
|
@ -69,7 +69,7 @@ func NewResticRepoReconciler(namespace string, logger logrus.FieldLogger, client
|
|||
}
|
||||
|
||||
func (r *ResticRepoReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
s := kube.NewPeriodicalEnqueueSource(r.logger, mgr.GetClient(), &velerov1api.BackupRepositoryList{}, repoSyncPeriod)
|
||||
s := kube.NewPeriodicalEnqueueSource(r.logger, mgr.GetClient(), &velerov1api.BackupRepositoryList{}, repoSyncPeriod, kube.PeriodicalEnqueueSourceOption{})
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
For(&velerov1api.BackupRepository{}).
|
||||
Watches(s, nil).
|
||||
|
|
|
@ -65,7 +65,7 @@ func NewScheduleReconciler(
|
|||
}
|
||||
|
||||
func (c *scheduleReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
s := kube.NewPeriodicalEnqueueSource(c.logger, mgr.GetClient(), &velerov1.ScheduleList{}, scheduleSyncPeriod)
|
||||
s := kube.NewPeriodicalEnqueueSource(c.logger, mgr.GetClient(), &velerov1.ScheduleList{}, scheduleSyncPeriod, kube.PeriodicalEnqueueSourceOption{})
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
For(&velerov1.Schedule{}).
|
||||
Watches(s, nil).
|
||||
|
|
|
@ -34,13 +34,18 @@ import (
|
|||
"sigs.k8s.io/controller-runtime/pkg/predicate"
|
||||
)
|
||||
|
||||
func NewPeriodicalEnqueueSource(logger logrus.FieldLogger, client client.Client, objList client.ObjectList, period time.Duration, filters ...func(object client.Object) bool) *PeriodicalEnqueueSource {
|
||||
func NewPeriodicalEnqueueSource(
|
||||
logger logrus.FieldLogger,
|
||||
client client.Client,
|
||||
objList client.ObjectList,
|
||||
period time.Duration,
|
||||
option PeriodicalEnqueueSourceOption) *PeriodicalEnqueueSource {
|
||||
return &PeriodicalEnqueueSource{
|
||||
logger: logger.WithField("resource", reflect.TypeOf(objList).String()),
|
||||
Client: client,
|
||||
objList: objList,
|
||||
period: period,
|
||||
filterFuncs: filters,
|
||||
option: option,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -52,7 +57,12 @@ type PeriodicalEnqueueSource struct {
|
|||
logger logrus.FieldLogger
|
||||
objList client.ObjectList
|
||||
period time.Duration
|
||||
filterFuncs []func(object client.Object) bool
|
||||
option PeriodicalEnqueueSourceOption
|
||||
}
|
||||
|
||||
type PeriodicalEnqueueSourceOption struct {
|
||||
FilterFuncs []func(object client.Object) bool
|
||||
OrderFunc func(objList client.ObjectList) client.ObjectList
|
||||
}
|
||||
|
||||
func (p *PeriodicalEnqueueSource) Start(ctx context.Context, h handler.EventHandler, q workqueue.RateLimitingInterface, pre ...predicate.Predicate) error {
|
||||
|
@ -66,13 +76,16 @@ func (p *PeriodicalEnqueueSource) Start(ctx context.Context, h handler.EventHand
|
|||
p.logger.Debug("no resources, skip")
|
||||
return
|
||||
}
|
||||
if p.option.OrderFunc != nil {
|
||||
p.objList = p.option.OrderFunc(p.objList)
|
||||
}
|
||||
if err := meta.EachListItem(p.objList, func(object runtime.Object) error {
|
||||
obj, ok := object.(metav1.Object)
|
||||
if !ok {
|
||||
p.logger.Error("%s's type isn't metav1.Object", object.GetObjectKind().GroupVersionKind().String())
|
||||
return nil
|
||||
}
|
||||
for _, filter := range p.filterFuncs {
|
||||
for _, filter := range p.option.FilterFuncs {
|
||||
if filter != nil {
|
||||
if enqueueObj := filter(object.(client.Object)); !enqueueObj {
|
||||
p.logger.Debugf("skip enqueue object %s/%s due to filter function.", obj.GetNamespace(), obj.GetName())
|
||||
|
|
|
@ -23,11 +23,14 @@ import (
|
|||
"github.com/sirupsen/logrus"
|
||||
"github.com/stretchr/testify/require"
|
||||
"golang.org/x/net/context"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
crclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client/fake"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
|
||||
"github.com/vmware-tanzu/velero/internal/storage"
|
||||
velerov1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
|
||||
|
@ -39,7 +42,7 @@ func TestStart(t *testing.T) {
|
|||
ctx, cancelFunc := context.WithCancel(context.TODO())
|
||||
client := (&fake.ClientBuilder{}).Build()
|
||||
queue := workqueue.NewRateLimitingQueue(workqueue.DefaultItemBasedRateLimiter())
|
||||
source := NewPeriodicalEnqueueSource(logrus.WithContext(ctx), client, &velerov1.ScheduleList{}, 1*time.Second)
|
||||
source := NewPeriodicalEnqueueSource(logrus.WithContext(ctx), client, &velerov1.ScheduleList{}, 1*time.Second, PeriodicalEnqueueSourceOption{})
|
||||
|
||||
require.Nil(t, source.Start(ctx, nil, queue))
|
||||
|
||||
|
@ -76,9 +79,11 @@ func TestFilter(t *testing.T) {
|
|||
client,
|
||||
&velerov1.BackupStorageLocationList{},
|
||||
1*time.Second,
|
||||
func(object crclient.Object) bool {
|
||||
PeriodicalEnqueueSourceOption{
|
||||
FilterFuncs: []func(object crclient.Object) bool{func(object crclient.Object) bool {
|
||||
location := object.(*velerov1.BackupStorageLocation)
|
||||
return storage.IsReadyToValidate(location.Spec.ValidationFrequency, location.Status.LastValidationTime, 1*time.Minute, logrus.WithContext(ctx).WithField("BackupStorageLocation", location.Name))
|
||||
}},
|
||||
},
|
||||
)
|
||||
|
||||
|
@ -104,3 +109,72 @@ func TestFilter(t *testing.T) {
|
|||
|
||||
cancelFunc()
|
||||
}
|
||||
|
||||
func TestOrder(t *testing.T) {
|
||||
require.Nil(t, velerov1.AddToScheme(scheme.Scheme))
|
||||
|
||||
ctx, cancelFunc := context.WithCancel(context.TODO())
|
||||
client := (&fake.ClientBuilder{}).Build()
|
||||
queue := workqueue.NewRateLimitingQueue(workqueue.DefaultItemBasedRateLimiter())
|
||||
source := NewPeriodicalEnqueueSource(
|
||||
logrus.WithContext(ctx),
|
||||
client,
|
||||
&velerov1.BackupStorageLocationList{},
|
||||
1*time.Second,
|
||||
PeriodicalEnqueueSourceOption{
|
||||
OrderFunc: func(objList crclient.ObjectList) crclient.ObjectList {
|
||||
locationList := &velerov1.BackupStorageLocationList{}
|
||||
objArray := make([]runtime.Object, 0)
|
||||
|
||||
// Generate BSL array.
|
||||
locations, _ := meta.ExtractList(objList)
|
||||
// Move default BSL to tail of array.
|
||||
objArray = append(objArray, locations[1])
|
||||
objArray = append(objArray, locations[0])
|
||||
|
||||
meta.SetList(locationList, objArray)
|
||||
|
||||
return locationList
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
require.Nil(t, source.Start(ctx, nil, queue))
|
||||
|
||||
// Should not patch a backup storage location object status phase
|
||||
// if the location's validation frequency is specifically set to zero
|
||||
require.Nil(t, client.Create(ctx, &velerov1.BackupStorageLocation{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "location1",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: velerov1.BackupStorageLocationSpec{
|
||||
ValidationFrequency: &metav1.Duration{Duration: 0},
|
||||
},
|
||||
Status: velerov1.BackupStorageLocationStatus{
|
||||
LastValidationTime: &metav1.Time{Time: time.Now()},
|
||||
},
|
||||
}))
|
||||
require.Nil(t, client.Create(ctx, &velerov1.BackupStorageLocation{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "location2",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: velerov1.BackupStorageLocationSpec{
|
||||
ValidationFrequency: &metav1.Duration{Duration: 0},
|
||||
Default: true,
|
||||
},
|
||||
Status: velerov1.BackupStorageLocationStatus{
|
||||
LastValidationTime: &metav1.Time{Time: time.Now()},
|
||||
},
|
||||
}))
|
||||
time.Sleep(2 * time.Second)
|
||||
|
||||
first, _ := queue.Get()
|
||||
bsl := &velerov1.BackupStorageLocation{}
|
||||
require.Equal(t, "location2", first.(reconcile.Request).Name)
|
||||
require.Nil(t, client.Get(ctx, first.(reconcile.Request).NamespacedName, bsl))
|
||||
require.Equal(t, true, bsl.Spec.Default)
|
||||
|
||||
cancelFunc()
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue