add delete backup cmd using finalizer and simplify GC process

Signed-off-by: Steve Kriss <steve@heptio.com>
pull/252/head
Steve Kriss 2017-12-14 16:40:02 -08:00
parent 7cc73fb9d2
commit f5123794e0
14 changed files with 510 additions and 266 deletions

View File

@ -30,6 +30,7 @@ operations can also be performed as 'ark backup get' and 'ark schedule create'.
### SEE ALSO
* [ark backup](ark_backup.md) - Work with backups
* [ark create](ark_create.md) - Create ark resources
* [ark delete](ark_delete.md) - Delete ark resources
* [ark describe](ark_describe.md) - Describe ark resources
* [ark get](ark_get.md) - Get ark resources
* [ark plugin](ark_plugin.md) - Work with plugins

View File

@ -29,6 +29,7 @@ Work with backups
### SEE ALSO
* [ark](ark.md) - Back up and restore Kubernetes cluster resources.
* [ark backup create](ark_backup_create.md) - Create a backup
* [ark backup delete](ark_backup_delete.md) - Delete a backup
* [ark backup describe](ark_backup_describe.md) - Describe backups
* [ark backup download](ark_backup_download.md) - Download a backup
* [ark backup get](ark_backup_get.md) - Get backups

View File

@ -0,0 +1,35 @@
## ark backup delete
Delete a backup
### Synopsis
Delete a backup
```
ark backup delete NAME [flags]
```
### Options
```
-h, --help help for delete
```
### Options inherited from parent commands
```
--alsologtostderr log to standard error as well as files
--kubeconfig string Path to the kubeconfig file to use to talk to the Kubernetes apiserver. If unset, try the environment variable KUBECONFIG, as well as in-cluster configuration
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
--log_dir string If non-empty, write log files in this directory
--logtostderr log to standard error instead of files
--stderrthreshold severity logs at or above this threshold go to stderr (default 2)
-v, --v Level log level for V logs
--vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
```
### SEE ALSO
* [ark backup](ark_backup.md) - Work with backups

View File

@ -0,0 +1,32 @@
## ark delete
Delete ark resources
### Synopsis
Delete ark resources
### Options
```
-h, --help help for delete
```
### Options inherited from parent commands
```
--alsologtostderr log to standard error as well as files
--kubeconfig string Path to the kubeconfig file to use to talk to the Kubernetes apiserver. If unset, try the environment variable KUBECONFIG, as well as in-cluster configuration
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
--log_dir string If non-empty, write log files in this directory
--logtostderr log to standard error instead of files
--stderrthreshold severity logs at or above this threshold go to stderr (default 2)
-v, --v Level log level for V logs
--vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
```
### SEE ALSO
* [ark](ark.md) - Back up and restore Kubernetes cluster resources.
* [ark delete backup](ark_delete_backup.md) - Delete a backup

View File

@ -0,0 +1,35 @@
## ark delete backup
Delete a backup
### Synopsis
Delete a backup
```
ark delete backup NAME [flags]
```
### Options
```
-h, --help help for backup
```
### Options inherited from parent commands
```
--alsologtostderr log to standard error as well as files
--kubeconfig string Path to the kubeconfig file to use to talk to the Kubernetes apiserver. If unset, try the environment variable KUBECONFIG, as well as in-cluster configuration
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
--log_dir string If non-empty, write log files in this directory
--logtostderr log to standard error instead of files
--stderrthreshold severity logs at or above this threshold go to stderr (default 2)
-v, --v Level log level for V logs
--vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
```
### SEE ALSO
* [ark delete](ark_delete.md) - Delete ark resources

View File

@ -24,6 +24,7 @@ import (
"github.com/heptio/ark/pkg/client"
"github.com/heptio/ark/pkg/cmd/cli/backup"
"github.com/heptio/ark/pkg/cmd/cli/create"
"github.com/heptio/ark/pkg/cmd/cli/delete"
"github.com/heptio/ark/pkg/cmd/cli/describe"
"github.com/heptio/ark/pkg/cmd/cli/get"
"github.com/heptio/ark/pkg/cmd/cli/plugin"
@ -61,6 +62,7 @@ operations can also be performed as 'ark backup get' and 'ark schedule create'.`
create.NewCommand(f),
runplugin.NewCommand(),
plugin.NewCommand(f),
delete.NewCommand(f),
)
// add the glog flags

View File

@ -35,10 +35,7 @@ func NewCommand(f client.Factory) *cobra.Command {
NewLogsCommand(f),
NewDescribeCommand(f, "describe"),
NewDownloadCommand(f),
// If you delete a backup and it still exists in object storage, the backup sync controller will
// recreate it. Until we have a good UX around this, we're disabling the delete command.
// NewDeleteCommand(f),
NewDeleteCommand(f, "delete"),
)
return c

View File

@ -27,9 +27,9 @@ import (
"github.com/heptio/ark/pkg/cmd"
)
func NewDeleteCommand(f client.Factory) *cobra.Command {
func NewDeleteCommand(f client.Factory, use string) *cobra.Command {
c := &cobra.Command{
Use: "delete NAME",
Use: fmt.Sprintf("%s NAME", use),
Short: "Delete a backup",
Run: func(c *cobra.Command, args []string) {
if len(args) != 1 {

View File

@ -0,0 +1,41 @@
/*
Copyright 2017 the Heptio Ark contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package delete
import (
"github.com/spf13/cobra"
"github.com/heptio/ark/pkg/client"
"github.com/heptio/ark/pkg/cmd/cli/backup"
)
func NewCommand(f client.Factory) *cobra.Command {
c := &cobra.Command{
Use: "delete",
Short: "Delete ark resources",
Long: "Delete ark resources",
}
backupCommand := backup.NewDeleteCommand(f, "backup")
backupCommand.Aliases = []string{"backups"}
c.AddCommand(
backupCommand,
)
return c
}

View File

@ -234,6 +234,11 @@ func (controller *backupController) processBackup(key string) error {
// set backup version
backup.Status.Version = backupVersion
// add GC finalizer if it's not there already
if !has(backup.Finalizers, gcFinalizer) {
backup.Finalizers = append(backup.Finalizers, gcFinalizer)
}
// calculate expiration
if backup.Spec.TTL.Duration > 0 {
backup.Status.Expiration = metav1.NewTime(controller.clock.Now().Add(backup.Spec.TTL.Duration))

View File

@ -190,6 +190,7 @@ func TestProcessBackup(t *testing.T) {
backup.Status.Phase = v1.BackupPhaseInProgress
backup.Status.Expiration.Time = expiration
backup.Status.Version = 1
backup.Finalizers = []string{gcFinalizer}
backupper.On("Backup", backup, mock.Anything, mock.Anything, mock.Anything).Return(nil)
cloudBackups.On("UploadBackup", "bucket", backup.Name, mock.Anything, mock.Anything, mock.Anything).Return(nil)
@ -225,6 +226,7 @@ func TestProcessBackup(t *testing.T) {
res.Status.Version = 1
res.Status.Expiration.Time = expiration
res.Status.Phase = v1.BackupPhase(phase)
res.Finalizers = []string{gcFinalizer}
return true, res, nil
})
@ -247,14 +249,15 @@ func TestProcessBackup(t *testing.T) {
actions := client.Actions()
require.Equal(t, 2, len(actions))
// validate Patch call 1 (setting version, expiration, and phase)
// validate Patch call 1 (setting finalizer, version, expiration, and phase)
patchAction, ok := actions[0].(core.PatchAction)
require.True(t, ok, "action is not a PatchAction")
patch := make(map[string]interface{})
require.NoError(t, json.Unmarshal(patchAction.GetPatch(), &patch), "cannot unmarshal patch")
assert.Equal(t, 1, len(patch), "patch has wrong number of keys")
// should have metadata and status
assert.Equal(t, 2, len(patch), "patch has wrong number of keys")
expectedStatusKeys := 2
if test.backup.Spec.TTL.Duration > 0 {
@ -268,10 +271,19 @@ func TestProcessBackup(t *testing.T) {
res, _ := collections.GetMap(patch, "status")
assert.Equal(t, expectedStatusKeys, len(res), "patch's status has the wrong number of keys")
finalizers, err := collections.GetSlice(patch, "metadata.finalizers")
require.NoError(t, err, "patch does not contain metadata.finalizers")
assert.Equal(t, 1, len(finalizers))
assert.Equal(t, gcFinalizer, finalizers[0])
res, _ = collections.GetMap(patch, "metadata")
assert.Equal(t, 1, len(res), "patch's metadata has the wrong number of keys")
// validate Patch call 2 (setting phase)
patchAction, ok = actions[1].(core.PatchAction)
require.True(t, ok, "action is not a PatchAction")
patch = make(map[string]interface{})
require.NoError(t, json.Unmarshal(patchAction.GetPatch(), &patch), "cannot unmarshal patch")
assert.Equal(t, 1, len(patch), "patch has wrong number of keys")

View File

@ -18,6 +18,7 @@ package controller
import (
"context"
"encoding/json"
"time"
"github.com/pkg/errors"
@ -25,7 +26,9 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/clock"
kerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/tools/cache"
@ -37,6 +40,8 @@ import (
"github.com/heptio/ark/pkg/util/kube"
)
const gcFinalizer = "gc.ark.heptio.com"
// gcController removes expired backup content from object storage.
type gcController struct {
backupService cloudprovider.BackupService
@ -70,7 +75,7 @@ func NewGCController(
syncPeriod = time.Minute
}
return &gcController{
c := &gcController{
backupService: backupService,
snapshotService: snapshotService,
bucket: bucket,
@ -84,9 +89,87 @@ func NewGCController(
restoreClient: restoreClient,
logger: logger,
}
backupInformer.Informer().AddEventHandler(
cache.ResourceEventHandlerFuncs{
UpdateFunc: c.handleFinalizer,
},
)
return c
}
var _ Interface = &gcController{}
// handleFinalizer runs garbage-collection on a backup that has the Ark GC
// finalizer and a deletionTimestamp.
func (c *gcController) handleFinalizer(_, newObj interface{}) {
var (
backup = newObj.(*api.Backup)
log = c.logger.WithField("backup", kube.NamespaceAndName(backup))
)
// we're only interested in backups that have a deletionTimestamp and at
// least one finalizer.
if backup.DeletionTimestamp == nil || len(backup.Finalizers) == 0 {
return
}
log.Debugf("Backup has finalizers %s", backup.Finalizers)
if !has(backup.Finalizers, gcFinalizer) {
return
}
log.Infof("Garbage-collecting backup")
if err := c.garbageCollect(backup, log); err != nil {
// if there were errors deleting related cloud resources, don't
// delete the backup API object because we don't want to orphan
// the cloud resources.
log.WithError(err).Error("Error deleting backup's related objects")
return
}
patchMap := map[string]interface{}{
"metadata": map[string]interface{}{
"finalizers": except(backup.Finalizers, gcFinalizer),
"resourceVersion": backup.ResourceVersion,
},
}
patchBytes, err := json.Marshal(patchMap)
if err != nil {
log.WithError(err).Error("Error marshaling finalizers patch")
return
}
if _, err = c.backupClient.Backups(backup.Namespace).Patch(backup.Name, types.MergePatchType, patchBytes); err != nil {
log.WithError(errors.WithStack(err)).Error("Error patching backup")
}
}
// has returns true if the `items` slice contains the
// value `val`, or false otherwise.
func has(items []string, val string) bool {
for _, itm := range items {
if itm == val {
return true
}
}
return false
}
// except returns a new string slice that contains all of the entries
// from `items` except `val`.
func except(items []string, val string) []string {
var newItems []string
for _, itm := range items {
if itm != val {
newItems = append(newItems, itm)
}
}
return newItems
}
// Run is a blocking function that runs a single worker to garbage-collect backups
// from object/block storage and the Ark API. It will return when it receives on the
@ -103,104 +186,74 @@ func (c *gcController) Run(ctx context.Context, workers int) error {
}
func (c *gcController) run() {
c.processBackups()
}
now := c.clock.Now()
c.logger.Info("Garbage-collecting expired backups")
// garbageCollectBackup removes an expired backup by deleting any associated backup files (if
// deleteBackupFiles = true), volume snapshots, restore API objects, and the backup API object
// itself.
func (c *gcController) garbageCollectBackup(backup *api.Backup, deleteBackupFiles bool) {
logContext := c.logger.WithField("backup", kube.NamespaceAndName(backup))
// if the backup includes snapshots but we don't currently have a PVProvider, we don't
// want to orphan the snapshots so skip garbage-collection entirely.
if c.snapshotService == nil && len(backup.Status.VolumeBackups) > 0 {
logContext.Warning("Cannot garbage-collect backup because backup includes snapshots and server is not configured with PersistentVolumeProvider")
// Go thru API objects and delete expired ones (finalizer will GC their
// corresponding files/snapshots/restores). Note that we're ignoring backups
// in object storage that haven't been synced to Kubernetes yet; they'll
// be processed for GC (if applicable) once they've been synced.
backups, err := c.backupLister.List(labels.Everything())
if err != nil {
c.logger.WithError(errors.WithStack(err)).Error("Error getting all backups")
return
}
// The GC process is primarily intended to delete expired cloud resources (file in object
// storage, snapshots). If we fail to delete any of these, we don't delete the Backup API
// object or metadata file in object storage so that we don't orphan the cloud resources.
deletionFailure := false
for _, backup := range backups {
log := c.logger.WithField("backup", kube.NamespaceAndName(backup))
if backup.Status.Expiration.Time.After(now) {
log.Debug("Backup has not expired yet, skipping")
continue
}
// since backups have a finalizer, this will actually have the effect of setting a deletionTimestamp and calling
// an update. The update will be handled by this controller and will result in a deletion of the obj storage
// files and the API object.
if err := c.backupClient.Backups(backup.Namespace).Delete(backup.Name, &metav1.DeleteOptions{}); err != nil {
log.WithError(errors.WithStack(err)).Error("Error deleting backup")
}
}
}
// garbageCollect prepares for deleting an expired backup by deleting any
// associated backup files, volume snapshots, or restore API objects.
func (c *gcController) garbageCollect(backup *api.Backup, log logrus.FieldLogger) error {
// if the backup includes snapshots but we don't currently have a PVProvider, we don't
// want to orphan the snapshots so skip garbage-collection entirely.
if c.snapshotService == nil && len(backup.Status.VolumeBackups) > 0 {
return errors.New("cannot garbage-collect backup because it includes snapshots and Ark is not configured with a PersistentVolumeProvider")
}
var errs []error
for _, volumeBackup := range backup.Status.VolumeBackups {
logContext.WithField("snapshotID", volumeBackup.SnapshotID).Info("Removing snapshot associated with backup")
log.WithField("snapshotID", volumeBackup.SnapshotID).Info("Removing snapshot associated with backup")
if err := c.snapshotService.DeleteSnapshot(volumeBackup.SnapshotID); err != nil {
logContext.WithError(err).WithField("snapshotID", volumeBackup.SnapshotID).Error("Error deleting snapshot")
deletionFailure = true
errs = append(errs, errors.Wrapf(err, "error deleting snapshot %s", volumeBackup.SnapshotID))
}
}
// If applicable, delete everything in the backup dir in object storage *before* deleting the API object
// because otherwise the backup sync controller could re-sync the backup from object storage.
if deleteBackupFiles {
logContext.Info("Removing backup from object storage")
if err := c.backupService.DeleteBackupDir(c.bucket, backup.Name); err != nil {
logContext.WithError(err).Error("Error deleting backup")
deletionFailure = true
}
log.Info("Removing backup from object storage")
if err := c.backupService.DeleteBackupDir(c.bucket, backup.Name); err != nil {
errs = append(errs, errors.Wrap(err, "error deleting backup from object storage"))
}
logContext.Info("Getting restore API objects referencing backup")
if restores, err := c.restoreLister.Restores(backup.Namespace).List(labels.Everything()); err != nil {
logContext.WithError(errors.WithStack(err)).Error("Error getting Restore API objects")
log.WithError(errors.WithStack(err)).Error("Error listing restore API objects")
} else {
for _, restore := range restores {
if restore.Spec.BackupName == backup.Name {
logContext.WithField("restore", kube.NamespaceAndName(restore)).Info("Removing Restore API object referencing Backup")
if err := c.restoreClient.Restores(restore.Namespace).Delete(restore.Name, &metav1.DeleteOptions{}); err != nil {
logContext.WithError(errors.WithStack(err)).WithField("restore", kube.NamespaceAndName(restore)).
Error("Error deleting Restore API object")
}
if restore.Spec.BackupName != backup.Name {
continue
}
restoreLog := log.WithField("restore", kube.NamespaceAndName(restore))
restoreLog.Info("Deleting restore referencing backup")
if err := c.restoreClient.Restores(restore.Namespace).Delete(restore.Name, &metav1.DeleteOptions{}); err != nil {
restoreLog.WithError(errors.WithStack(err)).Error("Error deleting restore")
}
}
}
if deletionFailure {
logContext.Warning("Backup will not be deleted due to errors deleting related object storage files(s) and/or volume snapshots")
return
}
logContext.Info("Removing Backup API object")
if err := c.backupClient.Backups(backup.Namespace).Delete(backup.Name, &metav1.DeleteOptions{}); err != nil {
logContext.WithError(errors.WithStack(err)).Error("Error deleting Backup API object")
}
}
// garbageCollectBackups checks backups for expiration and triggers garbage-collection for the expired
// ones.
func (c *gcController) garbageCollectBackups(backups []*api.Backup, expiration time.Time, deleteBackupFiles bool) {
for _, backup := range backups {
if backup.Status.Expiration.Time.After(expiration) {
c.logger.WithField("backup", kube.NamespaceAndName(backup)).Info("Backup has not expired yet, skipping")
continue
}
c.garbageCollectBackup(backup, deleteBackupFiles)
}
}
// processBackups gets backups from object storage and the API and submits
// them for garbage-collection.
func (c *gcController) processBackups() {
now := c.clock.Now()
c.logger.WithField("now", now).Info("Garbage-collecting backups that have expired as of now")
// GC backups in object storage. We do this in addition
// to GC'ing API objects to prevent orphan backup files.
backups, err := c.backupService.GetAllBackups(c.bucket)
if err != nil {
c.logger.WithError(err).Error("Error getting all backups from object storage")
return
}
c.garbageCollectBackups(backups, now, true)
// GC backups without files in object storage
apiBackups, err := c.backupLister.List(labels.Everything())
if err != nil {
c.logger.WithError(errors.WithStack(err)).Error("Error getting all Backup API objects")
return
}
c.garbageCollectBackups(apiBackups, now, false)
return kerrors.NewAggregate(errs)
}

View File

@ -17,38 +17,37 @@ limitations under the License.
package controller
import (
"errors"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/apimachinery/pkg/util/sets"
core "k8s.io/client-go/testing"
api "github.com/heptio/ark/pkg/apis/ark/v1"
"github.com/heptio/ark/pkg/cloudprovider"
"github.com/heptio/ark/pkg/generated/clientset/versioned/fake"
informers "github.com/heptio/ark/pkg/generated/informers/externalversions"
arktest "github.com/heptio/ark/pkg/util/test"
)
type gcTest struct {
name string
backups []*api.Backup
snapshots sets.String
nilSnapshotService bool
expectedDeletions sets.String
expectedSnapshotsRemaining sets.String
}
func TestGarbageCollect(t *testing.T) {
func TestGCControllerRun(t *testing.T) {
fakeClock := clock.NewFakeClock(time.Now())
tests := []gcTest{
tests := []struct {
name string
backups []*api.Backup
snapshots sets.String
expectedDeletions sets.String
}{
{
name: "basic-expired",
name: "no backups results in no deletions",
},
{
name: "expired backup is deleted",
backups: []*api.Backup{
arktest.NewTestBackup().WithName("backup-1").
WithExpiration(fakeClock.Now().Add(-1*time.Second)).
@ -56,12 +55,10 @@ func TestGarbageCollect(t *testing.T) {
WithSnapshot("pv-2", "snapshot-2").
Backup,
},
snapshots: sets.NewString("snapshot-1", "snapshot-2"),
expectedDeletions: sets.NewString("backup-1"),
expectedSnapshotsRemaining: sets.NewString(),
expectedDeletions: sets.NewString("backup-1"),
},
{
name: "basic-unexpired",
name: "unexpired backup is not deleted",
backups: []*api.Backup{
arktest.NewTestBackup().WithName("backup-1").
WithExpiration(fakeClock.Now().Add(1*time.Minute)).
@ -69,12 +66,10 @@ func TestGarbageCollect(t *testing.T) {
WithSnapshot("pv-2", "snapshot-2").
Backup,
},
snapshots: sets.NewString("snapshot-1", "snapshot-2"),
expectedDeletions: sets.NewString(),
expectedSnapshotsRemaining: sets.NewString("snapshot-1", "snapshot-2"),
expectedDeletions: sets.NewString(),
},
{
name: "one expired, one unexpired",
name: "expired backup is deleted and unexpired backup is not deleted",
backups: []*api.Backup{
arktest.NewTestBackup().WithName("backup-1").
WithExpiration(fakeClock.Now().Add(-1*time.Minute)).
@ -87,144 +82,91 @@ func TestGarbageCollect(t *testing.T) {
WithSnapshot("pv-4", "snapshot-4").
Backup,
},
snapshots: sets.NewString("snapshot-1", "snapshot-2", "snapshot-3", "snapshot-4"),
expectedDeletions: sets.NewString("backup-1"),
expectedSnapshotsRemaining: sets.NewString("snapshot-3", "snapshot-4"),
},
{
name: "none expired in target bucket",
backups: []*api.Backup{
arktest.NewTestBackup().WithName("backup-2").
WithExpiration(fakeClock.Now().Add(1*time.Minute)).
WithSnapshot("pv-3", "snapshot-3").
WithSnapshot("pv-4", "snapshot-4").
Backup,
},
snapshots: sets.NewString("snapshot-1", "snapshot-2", "snapshot-3", "snapshot-4"),
expectedDeletions: sets.NewString(),
expectedSnapshotsRemaining: sets.NewString("snapshot-1", "snapshot-2", "snapshot-3", "snapshot-4"),
},
{
name: "orphan snapshots",
backups: []*api.Backup{
arktest.NewTestBackup().WithName("backup-1").
WithExpiration(fakeClock.Now().Add(-1*time.Minute)).
WithSnapshot("pv-1", "snapshot-1").
WithSnapshot("pv-2", "snapshot-2").
Backup,
},
snapshots: sets.NewString("snapshot-1", "snapshot-2", "snapshot-3", "snapshot-4"),
expectedDeletions: sets.NewString("backup-1"),
expectedSnapshotsRemaining: sets.NewString("snapshot-3", "snapshot-4"),
},
{
name: "no snapshot service only GC's backups without snapshots",
backups: []*api.Backup{
arktest.NewTestBackup().WithName("backup-1").
WithExpiration(fakeClock.Now().Add(-1*time.Second)).
WithSnapshot("pv-1", "snapshot-1").
WithSnapshot("pv-2", "snapshot-2").
Backup,
arktest.NewTestBackup().WithName("backup-2").
WithExpiration(fakeClock.Now().Add(-1 * time.Second)).
Backup,
},
snapshots: sets.NewString("snapshot-1", "snapshot-2"),
nilSnapshotService: true,
expectedDeletions: sets.NewString("backup-2"),
expectedDeletions: sets.NewString("backup-1"),
},
}
for _, test := range tests {
var (
backupService = &arktest.BackupService{}
snapshotService *arktest.FakeSnapshotService
)
if !test.nilSnapshotService {
snapshotService = &arktest.FakeSnapshotService{SnapshotsTaken: test.snapshots}
}
t.Run(test.name, func(t *testing.T) {
var (
client = fake.NewSimpleClientset()
sharedInformers = informers.NewSharedInformerFactory(client, 0)
snapSvc cloudprovider.SnapshotService
bucket = "bucket"
logger = arktest.NewLogger()
)
if snapshotService != nil {
snapSvc = snapshotService
}
controller := NewGCController(
backupService,
snapSvc,
bucket,
nil,
nil,
"bucket",
1*time.Millisecond,
sharedInformers.Ark().V1().Backups(),
client.ArkV1(),
sharedInformers.Ark().V1().Restores(),
client.ArkV1(),
logger,
arktest.NewLogger(),
).(*gcController)
controller.clock = fakeClock
backupService.On("GetAllBackups", bucket).Return(test.backups, nil)
for _, b := range test.expectedDeletions.List() {
backupService.On("DeleteBackupDir", bucket, b).Return(nil)
for _, backup := range test.backups {
sharedInformers.Ark().V1().Backups().Informer().GetStore().Add(backup)
}
controller.processBackups()
if !test.nilSnapshotService {
assert.Equal(t, test.expectedSnapshotsRemaining, snapshotService.SnapshotsTaken)
expectedDeletions := make([]core.Action, 0, len(test.expectedDeletions))
for backup := range test.expectedDeletions {
expectedDeletions = append(expectedDeletions, core.NewDeleteAction(
api.SchemeGroupVersion.WithResource("backups"),
api.DefaultNamespace,
backup,
))
}
backupService.AssertExpectations(t)
controller.run()
assert.Equal(t, expectedDeletions, client.Actions())
})
}
}
func TestGarbageCollectBackup(t *testing.T) {
tests := []struct {
name string
backup *api.Backup
deleteBackupFile bool
snapshots sets.String
backupFiles sets.String
backupMetadataFiles sets.String
restores []*api.Restore
expectedRestoreDeletes []string
expectedBackupDelete string
expectedSnapshots sets.String
expectedObjectStorageDeletions sets.String
name string
backup *api.Backup
snapshots sets.String
restores []*api.Restore
nilSnapshotService bool
expectErr bool
expectBackupDirDeleted bool
}{
{
name: "deleteBackupFile=false, snapshot deletion fails, don't delete kube backup",
name: "nil snapshot service when backup has snapshots returns error",
backup: arktest.NewTestBackup().WithName("backup-1").WithSnapshot("pv-1", "snap-1").Backup,
nilSnapshotService: true,
expectErr: true,
},
{
name: "nil snapshot service when backup doesn't have snapshots correctly garbage-collects",
backup: arktest.NewTestBackup().WithName("backup-1").Backup,
nilSnapshotService: true,
expectBackupDirDeleted: true,
},
{
name: "return error if snapshot deletion fails",
backup: arktest.NewTestBackup().WithName("backup-1").
WithSnapshot("pv-1", "snapshot-1").
WithSnapshot("pv-2", "snapshot-2").
Backup,
deleteBackupFile: false,
snapshots: sets.NewString("snapshot-1"),
expectedSnapshots: sets.NewString(),
expectedObjectStorageDeletions: sets.NewString(),
snapshots: sets.NewString("snapshot-1"),
expectBackupDirDeleted: true,
expectErr: true,
},
{
name: "related restores should be deleted",
backup: arktest.NewTestBackup().WithName("backup-1").Backup,
deleteBackupFile: true,
snapshots: sets.NewString(),
name: "related restores should be deleted",
backup: arktest.NewTestBackup().WithName("backup-1").Backup,
snapshots: sets.NewString(),
restores: []*api.Restore{
arktest.NewTestRestore(api.DefaultNamespace, "restore-1", api.RestorePhaseCompleted).WithBackup("backup-1").Restore,
arktest.NewTestRestore(api.DefaultNamespace, "restore-2", api.RestorePhaseCompleted).WithBackup("backup-2").Restore,
},
expectedRestoreDeletes: []string{"restore-1"},
expectedBackupDelete: "backup-1",
expectedSnapshots: sets.NewString(),
expectedObjectStorageDeletions: sets.NewString("backup-1"),
expectBackupDirDeleted: true,
},
}
@ -235,61 +177,67 @@ func TestGarbageCollectBackup(t *testing.T) {
snapshotService = &arktest.FakeSnapshotService{SnapshotsTaken: test.snapshots}
client = fake.NewSimpleClientset()
sharedInformers = informers.NewSharedInformerFactory(client, 0)
bucket = "bucket-1"
logger = arktest.NewLogger()
controller = NewGCController(
backupService,
snapshotService,
bucket,
"bucket-1",
1*time.Millisecond,
sharedInformers.Ark().V1().Backups(),
client.ArkV1(),
sharedInformers.Ark().V1().Restores(),
client.ArkV1(),
logger,
arktest.NewLogger(),
).(*gcController)
)
if test.nilSnapshotService {
controller.snapshotService = nil
}
sharedInformers.Ark().V1().Backups().Informer().GetStore().Add(test.backup)
for _, restore := range test.restores {
sharedInformers.Ark().V1().Restores().Informer().GetStore().Add(restore)
}
for _, b := range test.expectedObjectStorageDeletions.List() {
backupService.On("DeleteBackupDir", bucket, b).Return(nil)
if test.expectBackupDirDeleted {
backupService.On("DeleteBackupDir", controller.bucket, test.backup.Name).Return(nil)
}
// METHOD UNDER TEST
controller.garbageCollectBackup(test.backup, test.deleteBackupFile)
err := controller.garbageCollect(test.backup, controller.logger)
// VERIFY:
// remaining snapshots
assert.Equal(t, test.expectedSnapshots, snapshotService.SnapshotsTaken)
// error
assert.Equal(t, test.expectErr, err != nil)
// remaining snapshots
if !test.nilSnapshotService {
backupSnapshots := sets.NewString()
for _, snapshot := range test.backup.Status.VolumeBackups {
backupSnapshots.Insert(snapshot.SnapshotID)
}
assert.Equal(t, test.snapshots.Difference(backupSnapshots), snapshotService.SnapshotsTaken)
}
// restore client deletes
expectedActions := make([]core.Action, 0)
// Restore client deletes
for _, restore := range test.expectedRestoreDeletes {
for _, restore := range test.restores {
if restore.Spec.BackupName != test.backup.Name {
continue
}
action := core.NewDeleteAction(
api.SchemeGroupVersion.WithResource("restores"),
api.DefaultNamespace,
restore,
restore.Name,
)
expectedActions = append(expectedActions, action)
}
// Backup client deletes
if test.expectedBackupDelete != "" {
action := core.NewDeleteAction(
api.SchemeGroupVersion.WithResource("backups"),
api.DefaultNamespace,
test.expectedBackupDelete,
)
expectedActions = append(expectedActions, action)
}
assert.Equal(t, expectedActions, client.Actions())
// backup dir deletion
backupService.AssertExpectations(t)
})
}
@ -297,59 +245,126 @@ func TestGarbageCollectBackup(t *testing.T) {
func TestGarbageCollectPicksUpBackupUponExpiration(t *testing.T) {
var (
backupService = &arktest.BackupService{}
snapshotService = &arktest.FakeSnapshotService{}
fakeClock = clock.NewFakeClock(time.Now())
assert = assert.New(t)
)
scenario := gcTest{
name: "basic-expired",
backups: []*api.Backup{
arktest.NewTestBackup().WithName("backup-1").
client = fake.NewSimpleClientset()
sharedInformers = informers.NewSharedInformerFactory(client, 0)
backup = arktest.NewTestBackup().WithName("backup-1").
WithExpiration(fakeClock.Now().Add(1*time.Second)).
WithSnapshot("pv-1", "snapshot-1").
WithSnapshot("pv-2", "snapshot-2").
Backup,
},
snapshots: sets.NewString("snapshot-1", "snapshot-2"),
}
snapshotService.SnapshotsTaken = scenario.snapshots
var (
client = fake.NewSimpleClientset()
sharedInformers = informers.NewSharedInformerFactory(client, 0)
logger = arktest.NewLogger()
Backup
)
controller := NewGCController(
backupService,
snapshotService,
nil,
nil,
"bucket",
1*time.Millisecond,
sharedInformers.Ark().V1().Backups(),
client.ArkV1(),
sharedInformers.Ark().V1().Restores(),
client.ArkV1(),
logger,
arktest.NewLogger(),
).(*gcController)
controller.clock = fakeClock
backupService.On("GetAllBackups", "bucket").Return(scenario.backups, nil)
sharedInformers.Ark().V1().Backups().Informer().GetStore().Add(backup)
// PASS 1
controller.processBackups()
backupService.AssertExpectations(t)
assert.Equal(scenario.snapshots, snapshotService.SnapshotsTaken, "snapshots should not be garbage-collected yet.")
controller.run()
assert.Equal(t, 0, len(client.Actions()))
// PASS 2
expectedActions := []core.Action{
core.NewDeleteAction(
api.SchemeGroupVersion.WithResource("backups"),
api.DefaultNamespace,
"backup-1",
),
}
fakeClock.Step(1 * time.Minute)
backupService.On("DeleteBackupDir", "bucket", "backup-1").Return(nil)
controller.processBackups()
controller.run()
assert.Equal(0, len(snapshotService.SnapshotsTaken), "snapshots should have been garbage-collected.")
backupService.AssertExpectations(t)
assert.Equal(t, expectedActions, client.Actions())
}
func TestHandleFinalizer(t *testing.T) {
tests := []struct {
name string
backup *api.Backup
deleteBackupDirError bool
expectGarbageCollect bool
expectedPatch []byte
}{
{
name: "nil deletionTimestamp exits early",
backup: arktest.NewTestBackup().Backup,
},
{
name: "no finalizers exits early",
backup: arktest.NewTestBackup().WithDeletionTimestamp(time.Now()).Backup,
},
{
name: "no gcFinalizer exits early",
backup: arktest.NewTestBackup().WithDeletionTimestamp(time.Now()).WithFinalizers("foo").Backup,
},
{
name: "error when calling garbageCollect exits without patch",
backup: arktest.NewTestBackup().WithDeletionTimestamp(time.Now()).WithFinalizers(gcFinalizer).Backup,
deleteBackupDirError: true,
},
{
name: "normal case - patch includes the appropriate fields",
backup: arktest.NewTestBackup().WithDeletionTimestamp(time.Now()).WithFinalizers(gcFinalizer, "foo").WithResourceVersion("1").Backup,
expectGarbageCollect: true,
expectedPatch: []byte(`{"metadata":{"finalizers":["foo"],"resourceVersion":"1"}}`),
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
var (
backupService = &arktest.BackupService{}
client = fake.NewSimpleClientset()
sharedInformers = informers.NewSharedInformerFactory(client, 0)
controller = NewGCController(
backupService,
nil,
"bucket-1",
1*time.Millisecond,
sharedInformers.Ark().V1().Backups(),
client.ArkV1(),
sharedInformers.Ark().V1().Restores(),
client.ArkV1(),
arktest.NewLogger(),
).(*gcController)
)
if test.expectGarbageCollect {
backupService.On("DeleteBackupDir", controller.bucket, test.backup.Name).Return(nil)
} else if test.deleteBackupDirError {
backupService.On("DeleteBackupDir", controller.bucket, test.backup.Name).Return(errors.New("foo"))
}
// METHOD UNDER TEST
controller.handleFinalizer(nil, test.backup)
// VERIFY
backupService.AssertExpectations(t)
actions := client.Actions()
if test.expectedPatch == nil {
assert.Equal(t, 0, len(actions))
return
}
require.Equal(t, 1, len(actions))
patchAction, ok := actions[0].(core.PatchAction)
require.True(t, ok, "action is not a PatchAction")
assert.Equal(t, test.expectedPatch, patchAction.GetPatch())
})
}
}

View File

@ -114,3 +114,18 @@ func (b *TestBackup) WithSnapshotVolumesPointer(value *bool) *TestBackup {
b.Spec.SnapshotVolumes = value
return b
}
func (b *TestBackup) WithDeletionTimestamp(time time.Time) *TestBackup {
b.DeletionTimestamp = &metav1.Time{Time: time}
return b
}
func (b *TestBackup) WithFinalizers(finalizers ...string) *TestBackup {
b.Finalizers = finalizers
return b
}
func (b *TestBackup) WithResourceVersion(version string) *TestBackup {
b.ResourceVersion = version
return b
}