Merge pull request #63 from skriss/gc_restores

GC restores along with backups; de-dupe GC controller code
pull/55/merge
Andy Goldstein 2017-09-06 13:37:18 -04:00 committed by GitHub
commit b7265a59f2
6 changed files with 438 additions and 111 deletions

View File

@ -45,8 +45,11 @@ type BackupService interface {
// downloading or reading the file from the cloud API.
DownloadBackup(bucket, name string) (io.ReadCloser, error)
// DeleteBackup deletes the backup content in object storage for the given api.Backup.
DeleteBackup(bucket, backupName string) error
// DeleteBackup deletes the backup content in object storage for the given backup.
DeleteBackupFile(bucket, backupName string) error
// DeleteBackup deletes the backup metadata file in object storage for the given backup.
DeleteBackupMetadataFile(bucket, backupName string) error
// GetBackup gets the specified api.Backup from the given bucket in object storage.
GetBackup(bucket, name string) (*api.Backup, error)
@ -152,22 +155,16 @@ func (br *backupService) GetBackup(bucket, name string) (*api.Backup, error) {
return backup, nil
}
func (br *backupService) DeleteBackup(bucket, backupName string) error {
var errs []error
func (br *backupService) DeleteBackupFile(bucket, backupName string) error {
key := fmt.Sprintf(backupFileFormatString, backupName, backupName)
glog.V(4).Infof("Trying to delete bucket=%s, key=%s", bucket, key)
if err := br.objectStorage.DeleteObject(bucket, key); err != nil {
errs = append(errs, err)
}
return br.objectStorage.DeleteObject(bucket, key)
}
key = fmt.Sprintf(metadataFileFormatString, backupName)
func (br *backupService) DeleteBackupMetadataFile(bucket, backupName string) error {
key := fmt.Sprintf(metadataFileFormatString, backupName)
glog.V(4).Infof("Trying to delete bucket=%s, key=%s", bucket, key)
if err := br.objectStorage.DeleteObject(bucket, key); err != nil {
errs = append(errs, err)
}
return errors.NewAggregate(errs)
return br.objectStorage.DeleteObject(bucket, key)
}
// cachedBackupService wraps a real backup service with a cache for getting cloud backups.

View File

@ -178,8 +178,7 @@ func TestDownloadBackup(t *testing.T) {
}
}
func TestDeleteBackup(t *testing.T) {
func TestDeleteBackupFile(t *testing.T) {
tests := []struct {
name string
bucket string
@ -195,32 +194,17 @@ func TestDeleteBackup(t *testing.T) {
storage: map[string]map[string][]byte{
"test-bucket": map[string][]byte{
"bak/bak.tar.gz": nil,
"bak/ark-backup.json": nil,
},
},
expectedErr: false,
expectedRes: make(map[string][]byte),
},
{
name: "failed delete of backup doesn't prevent metadata delete but returns error",
name: "failed delete of backup returns error",
bucket: "test-bucket",
backupName: "bak",
storage: map[string]map[string][]byte{
"test-bucket": map[string][]byte{
"bak/ark-backup.json": nil,
},
},
expectedErr: true,
expectedRes: make(map[string][]byte),
},
{
name: "failed delete of metadata returns error",
bucket: "test-bucket",
backupName: "bak",
storage: map[string]map[string][]byte{
"test-bucket": map[string][]byte{
"bak/bak.tar.gz": nil,
},
"test-bucket": map[string][]byte{},
},
expectedErr: true,
expectedRes: make(map[string][]byte),
@ -232,7 +216,54 @@ func TestDeleteBackup(t *testing.T) {
objStore := &fakeObjectStorage{storage: test.storage}
backupService := NewBackupService(objStore)
res := backupService.DeleteBackup(test.bucket, test.backupName)
res := backupService.DeleteBackupFile(test.bucket, test.backupName)
assert.Equal(t, test.expectedErr, res != nil, "got error %v", res)
assert.Equal(t, test.expectedRes, objStore.storage[test.bucket])
})
}
}
func TestDeleteBackupMetadataFile(t *testing.T) {
tests := []struct {
name string
bucket string
backupName string
storage map[string]map[string][]byte
expectedErr bool
expectedRes map[string][]byte
}{
{
name: "normal case",
bucket: "test-bucket",
backupName: "bak",
storage: map[string]map[string][]byte{
"test-bucket": map[string][]byte{
"bak/ark-backup.json": nil,
},
},
expectedErr: false,
expectedRes: make(map[string][]byte),
},
{
name: "failed delete of file returns error",
bucket: "test-bucket",
backupName: "bak",
storage: map[string]map[string][]byte{
"test-bucket": map[string][]byte{},
},
expectedErr: true,
expectedRes: make(map[string][]byte),
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
objStore := &fakeObjectStorage{storage: test.storage}
backupService := NewBackupService(objStore)
res := backupService.DeleteBackupMetadataFile(test.bucket, test.backupName)
assert.Equal(t, test.expectedErr, res != nil, "got error %v", res)

View File

@ -428,6 +428,8 @@ func (s *server) runControllers(config *api.Config) error {
config.GCSyncPeriod.Duration,
s.sharedInformerFactory.Ark().V1().Backups(),
s.arkClient.ArkV1(),
s.sharedInformerFactory.Ark().V1().Restores(),
s.arkClient.ArkV1(),
)
wg.Add(1)
go func() {

View File

@ -29,10 +29,12 @@ import (
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/tools/cache"
api "github.com/heptio/ark/pkg/apis/ark/v1"
"github.com/heptio/ark/pkg/cloudprovider"
arkv1client "github.com/heptio/ark/pkg/generated/clientset/typed/ark/v1"
informers "github.com/heptio/ark/pkg/generated/informers/externalversions/ark/v1"
listers "github.com/heptio/ark/pkg/generated/listers/ark/v1"
"github.com/heptio/ark/pkg/util/kube"
)
// gcController removes expired backup content from object storage.
@ -42,9 +44,12 @@ type gcController struct {
bucket string
syncPeriod time.Duration
clock clock.Clock
lister listers.BackupLister
listerSynced cache.InformerSynced
client arkv1client.BackupsGetter
backupLister listers.BackupLister
backupListerSynced cache.InformerSynced
backupClient arkv1client.BackupsGetter
restoreLister listers.RestoreLister
restoreListerSynced cache.InformerSynced
restoreClient arkv1client.RestoresGetter
}
// NewGCController constructs a new gcController.
@ -54,7 +59,9 @@ func NewGCController(
bucket string,
syncPeriod time.Duration,
backupInformer informers.BackupInformer,
client arkv1client.BackupsGetter,
backupClient arkv1client.BackupsGetter,
restoreInformer informers.RestoreInformer,
restoreClient arkv1client.RestoresGetter,
) Interface {
if syncPeriod < time.Minute {
glog.Infof("GC sync period %v is too short. Setting to 1 minute", syncPeriod)
@ -67,9 +74,12 @@ func NewGCController(
bucket: bucket,
syncPeriod: syncPeriod,
clock: clock.RealClock{},
lister: backupInformer.Lister(),
listerSynced: backupInformer.Informer().HasSynced,
client: client,
backupLister: backupInformer.Lister(),
backupListerSynced: backupInformer.Informer().HasSynced,
backupClient: backupClient,
restoreLister: restoreInformer.Lister(),
restoreListerSynced: restoreInformer.Informer().HasSynced,
restoreClient: restoreClient,
}
}
@ -80,7 +90,7 @@ var _ Interface = &gcController{}
// ctx.Done() channel.
func (c *gcController) Run(ctx context.Context, workers int) error {
glog.Info("Waiting for caches to sync")
if !cache.WaitForCacheSync(ctx.Done(), c.listerSynced) {
if !cache.WaitForCacheSync(ctx.Done(), c.backupListerSynced, c.restoreListerSynced) {
return errors.New("timed out waiting for caches to sync")
}
glog.Info("Caches are synced")
@ -90,70 +100,111 @@ func (c *gcController) Run(ctx context.Context, workers int) error {
}
func (c *gcController) run() {
c.cleanBackups()
c.processBackups()
}
// cleanBackups deletes expired backups.
func (c *gcController) cleanBackups() {
backups, err := c.backupService.GetAllBackups(c.bucket)
if err != nil {
glog.Errorf("error getting all backups: %v", err)
return
}
now := c.clock.Now()
glog.Infof("garbage-collecting backups that have expired as of %v", now)
// GC backup files and associated snapshots/API objects. Note that deletion from object
// storage should happen first because otherwise there's a possibility the backup sync
// controller would re-create the API object after deletion.
for _, backup := range backups {
if !backup.Status.Expiration.Time.Before(now) {
glog.Infof("Backup %s/%s has not expired yet, skipping", backup.Namespace, backup.Name)
continue
}
// garbageCollectBackup removes an expired backup by deleting any associated backup files (if
// deleteBackupFile = true), volume snapshots, restore API objects, and the backup API object
// itself.
func (c *gcController) garbageCollectBackup(backup *api.Backup, deleteBackupFile bool) {
// if the backup includes snapshots but we don't currently have a PVProvider, we don't
// want to orphan the snapshots so skip garbage-collection entirely.
if c.snapshotService == nil && len(backup.Status.VolumeBackups) > 0 {
glog.Warningf("Cannot garbage-collect backup %s/%s because backup includes snapshots and server is not configured with PersistentVolumeProvider",
backup.Namespace, backup.Name)
glog.Warningf("Cannot garbage-collect backup %s because backup includes snapshots and server is not configured with PersistentVolumeProvider",
kube.NamespaceAndName(backup))
return
}
// The GC process is primarily intended to delete expired cloud resources (file in object
// storage, snapshots). If we fail to delete any of these, we don't delete the Backup API
// object or metadata file in object storage so that we don't orphan the cloud resources.
deletionFailure := false
for _, volumeBackup := range backup.Status.VolumeBackups {
glog.Infof("Removing snapshot %s associated with backup %s", volumeBackup.SnapshotID, kube.NamespaceAndName(backup))
if err := c.snapshotService.DeleteSnapshot(volumeBackup.SnapshotID); err != nil {
glog.Errorf("error deleting snapshot %v: %v", volumeBackup.SnapshotID, err)
deletionFailure = true
}
}
// If applicable, delete backup & metadata file from object storage *before* deleting the API object
// because otherwise the backup sync controller could re-sync the backup from object storage.
if deleteBackupFile {
glog.Infof("Removing backup %s", kube.NamespaceAndName(backup))
if err := c.backupService.DeleteBackupFile(c.bucket, backup.Name); err != nil {
glog.Errorf("error deleting backup %s: %v", kube.NamespaceAndName(backup), err)
deletionFailure = true
}
if deletionFailure {
glog.Warningf("Backup %s will not be deleted due to errors deleting related object storage files(s) and/or volume snapshots", kube.NamespaceAndName(backup))
} else {
if err := c.backupService.DeleteBackupMetadataFile(c.bucket, backup.Name); err != nil {
glog.Errorf("error deleting backup metadata file for %s: %v", kube.NamespaceAndName(backup), err)
deletionFailure = true
}
}
}
glog.Infof("Getting restore API objects referencing backup %s", kube.NamespaceAndName(backup))
if restores, err := c.restoreLister.Restores(backup.Namespace).List(labels.Everything()); err != nil {
glog.Errorf("error getting restore API objects: %v", err)
} else {
for _, restore := range restores {
if restore.Spec.BackupName == backup.Name {
glog.Infof("Removing restore API object %s of backup %s", kube.NamespaceAndName(restore), kube.NamespaceAndName(backup))
if err := c.restoreClient.Restores(restore.Namespace).Delete(restore.Name, &metav1.DeleteOptions{}); err != nil {
glog.Errorf("error deleting restore API object %s: %v", kube.NamespaceAndName(restore), err)
}
}
}
}
if deletionFailure {
glog.Warningf("Backup %s will not be deleted due to errors deleting related object storage files(s) and/or volume snapshots", kube.NamespaceAndName(backup))
return
}
glog.Infof("Removing backup API object %s", kube.NamespaceAndName(backup))
if err := c.backupClient.Backups(backup.Namespace).Delete(backup.Name, &metav1.DeleteOptions{}); err != nil {
glog.Errorf("error deleting backup API object %s: %v", kube.NamespaceAndName(backup), err)
}
}
// garbageCollectBackups checks backups for expiration and triggers garbage-collection for the expired
// ones.
func (c *gcController) garbageCollectBackups(backups []*api.Backup, expiration time.Time, deleteBackupFiles bool) {
for _, backup := range backups {
if backup.Status.Expiration.Time.After(expiration) {
glog.Infof("Backup %s has not expired yet, skipping", kube.NamespaceAndName(backup))
continue
}
glog.Infof("Removing backup %s/%s", backup.Namespace, backup.Name)
if err := c.backupService.DeleteBackup(c.bucket, backup.Name); err != nil {
glog.Errorf("error deleting backup %s/%s: %v", backup.Namespace, backup.Name, err)
}
for _, volumeBackup := range backup.Status.VolumeBackups {
glog.Infof("Removing snapshot %s associated with backup %s/%s", volumeBackup.SnapshotID, backup.Namespace, backup.Name)
if err := c.snapshotService.DeleteSnapshot(volumeBackup.SnapshotID); err != nil {
glog.Errorf("error deleting snapshot %v: %v", volumeBackup.SnapshotID, err)
}
}
glog.Infof("Removing backup API object %s/%s", backup.Namespace, backup.Name)
if err := c.client.Backups(backup.Namespace).Delete(backup.Name, &metav1.DeleteOptions{}); err != nil {
glog.Errorf("error deleting backup API object %s/%s: %v", backup.Namespace, backup.Name, err)
}
}
// also GC any Backup API objects without files in object storage
apiBackups, err := c.lister.List(labels.NewSelector())
if err != nil {
glog.Errorf("error getting all backup API objects: %v", err)
}
for _, backup := range apiBackups {
if backup.Status.Expiration.Time.Before(now) {
glog.Infof("Removing backup API object %s/%s", backup.Namespace, backup.Name)
if err := c.client.Backups(backup.Namespace).Delete(backup.Name, &metav1.DeleteOptions{}); err != nil {
glog.Errorf("error deleting backup API object %s/%s: %v", backup.Namespace, backup.Name, err)
}
} else {
glog.Infof("Backup %s/%s has not expired yet, skipping", backup.Namespace, backup.Name)
}
c.garbageCollectBackup(backup, deleteBackupFiles)
}
}
// processBackups gets backups from object storage and the API and submits
// them for garbage-collection.
func (c *gcController) processBackups() {
now := c.clock.Now()
glog.Infof("garbage-collecting backups that have expired as of %v", now)
// GC backups in object storage. We do this in addition
// to GC'ing API objects to prevent orphan backup files.
backups, err := c.backupService.GetAllBackups(c.bucket)
if err != nil {
glog.Errorf("error getting all backups from object storage: %v", err)
return
}
c.garbageCollectBackups(backups, now, true)
// GC backups without files in object storage
apiBackups, err := c.backupLister.List(labels.Everything())
if err != nil {
glog.Errorf("error getting all backup API objects: %v", err)
return
}
c.garbageCollectBackups(apiBackups, now, false)
}

View File

@ -27,8 +27,10 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/apimachinery/pkg/util/sets"
core "k8s.io/client-go/testing"
api "github.com/heptio/ark/pkg/apis/ark/v1"
"github.com/heptio/ark/pkg/cloudprovider"
@ -186,6 +188,7 @@ func TestGarbageCollect(t *testing.T) {
t.Run(test.name, func(t *testing.T) {
backupService.backupsByBucket = make(map[string][]*api.Backup)
backupService.backupMetadataByBucket = make(map[string][]*api.Backup)
for bucket, backups := range test.backups {
data := make([]*api.Backup, 0, len(backups))
@ -194,6 +197,7 @@ func TestGarbageCollect(t *testing.T) {
}
backupService.backupsByBucket[bucket] = data
backupService.backupMetadataByBucket[bucket] = data
}
var (
@ -213,10 +217,12 @@ func TestGarbageCollect(t *testing.T) {
1*time.Millisecond,
sharedInformers.Ark().V1().Backups(),
client.ArkV1(),
sharedInformers.Ark().V1().Restores(),
client.ArkV1(),
).(*gcController)
controller.clock = fakeClock
controller.cleanBackups()
controller.processBackups()
// verify every bucket has the backups we expect
for bucket, backups := range backupService.backupsByBucket {
@ -241,6 +247,208 @@ func TestGarbageCollect(t *testing.T) {
}
}
func TestGarbageCollectBackup(t *testing.T) {
tests := []struct {
name string
backup *api.Backup
deleteBackupFile bool
snapshots sets.String
backupFiles sets.String
backupMetadataFiles sets.String
restores []*api.Restore
expectedRestoreDeletes []string
expectedBackupDelete string
expectedSnapshots sets.String
expectedBackupFiles sets.String
expectedMetadataFiles sets.String
}{
{
name: "failed snapshot deletion shouldn't delete backup metadata file",
backup: NewTestBackup().WithName("backup-1").
WithSnapshot("pv-1", "snapshot-1").
WithSnapshot("pv-2", "snapshot-2").
Backup,
deleteBackupFile: true,
snapshots: sets.NewString("snapshot-1"),
backupFiles: sets.NewString("backup-1"),
backupMetadataFiles: sets.NewString("backup-1"),
restores: nil,
expectedBackupDelete: "",
expectedSnapshots: sets.NewString(),
expectedBackupFiles: sets.NewString(),
expectedMetadataFiles: sets.NewString("backup-1"),
},
{
name: "failed backup file deletion shouldn't delete backup metadata file",
backup: NewTestBackup().WithName("backup-1").
WithSnapshot("pv-1", "snapshot-1").
WithSnapshot("pv-2", "snapshot-2").
Backup,
deleteBackupFile: true,
snapshots: sets.NewString("snapshot-1", "snapshot-2"),
backupFiles: sets.NewString("doesn't-match-backup-name"),
backupMetadataFiles: sets.NewString("backup-1"),
restores: nil,
expectedBackupDelete: "",
expectedSnapshots: sets.NewString(),
expectedBackupFiles: sets.NewString("doesn't-match-backup-name"),
expectedMetadataFiles: sets.NewString("backup-1"),
},
{
name: "missing backup metadata file still deletes snapshots & backup file",
backup: NewTestBackup().WithName("backup-1").
WithSnapshot("pv-1", "snapshot-1").
WithSnapshot("pv-2", "snapshot-2").
Backup,
deleteBackupFile: true,
snapshots: sets.NewString("snapshot-1", "snapshot-2"),
backupFiles: sets.NewString("backup-1"),
backupMetadataFiles: sets.NewString("doesn't-match-backup-name"),
restores: nil,
expectedBackupDelete: "",
expectedSnapshots: sets.NewString(),
expectedBackupFiles: sets.NewString(),
expectedMetadataFiles: sets.NewString("doesn't-match-backup-name"),
},
{
name: "deleteBackupFile=false shouldn't error if no backup file exists",
backup: NewTestBackup().WithName("backup-1").
WithSnapshot("pv-1", "snapshot-1").
WithSnapshot("pv-2", "snapshot-2").
Backup,
deleteBackupFile: false,
snapshots: sets.NewString("snapshot-1", "snapshot-2"),
backupFiles: sets.NewString("non-matching-backup"),
backupMetadataFiles: sets.NewString("non-matching-backup"),
restores: nil,
expectedBackupDelete: "backup-1",
expectedSnapshots: sets.NewString(),
expectedBackupFiles: sets.NewString("non-matching-backup"),
expectedMetadataFiles: sets.NewString("non-matching-backup"),
},
{
name: "deleteBackupFile=false should error if snapshot delete fails",
backup: NewTestBackup().WithName("backup-1").
WithSnapshot("pv-1", "snapshot-1").
WithSnapshot("pv-2", "snapshot-2").
Backup,
deleteBackupFile: false,
snapshots: sets.NewString("snapshot-1"),
backupFiles: sets.NewString("non-matching-backup"),
backupMetadataFiles: sets.NewString("non-matching-backup"),
restores: nil,
expectedBackupDelete: "",
expectedSnapshots: sets.NewString(),
expectedBackupFiles: sets.NewString("non-matching-backup"),
expectedMetadataFiles: sets.NewString("non-matching-backup"),
},
{
name: "related restores should be deleted",
backup: NewTestBackup().WithName("backup-1").Backup,
deleteBackupFile: false,
snapshots: sets.NewString(),
backupFiles: sets.NewString("non-matching-backup"),
backupMetadataFiles: sets.NewString("non-matching-backup"),
restores: []*api.Restore{
NewTestRestore(api.DefaultNamespace, "restore-1", api.RestorePhaseCompleted).WithBackup("backup-1").Restore,
NewTestRestore(api.DefaultNamespace, "restore-2", api.RestorePhaseCompleted).WithBackup("backup-2").Restore,
},
expectedRestoreDeletes: []string{"restore-1"},
expectedBackupDelete: "backup-1",
expectedSnapshots: sets.NewString(),
expectedBackupFiles: sets.NewString("non-matching-backup"),
expectedMetadataFiles: sets.NewString("non-matching-backup"),
},
}
for _, test := range tests {
var ()
t.Run(test.name, func(t *testing.T) {
var (
backupService = &fakeBackupService{
backupsByBucket: make(map[string][]*api.Backup),
backupMetadataByBucket: make(map[string][]*api.Backup),
}
snapshotService = &FakeSnapshotService{SnapshotsTaken: test.snapshots}
client = fake.NewSimpleClientset()
sharedInformers = informers.NewSharedInformerFactory(client, 0)
controller = NewGCController(
backupService,
snapshotService,
"bucket-1",
1*time.Millisecond,
sharedInformers.Ark().V1().Backups(),
client.ArkV1(),
sharedInformers.Ark().V1().Restores(),
client.ArkV1(),
).(*gcController)
)
for file := range test.backupFiles {
backup := &api.Backup{ObjectMeta: metav1.ObjectMeta{Name: file}}
backupService.backupsByBucket["bucket-1"] = append(backupService.backupsByBucket["bucket-1"], backup)
}
for file := range test.backupMetadataFiles {
backup := &api.Backup{ObjectMeta: metav1.ObjectMeta{Name: file}}
backupService.backupMetadataByBucket["bucket-1"] = append(backupService.backupMetadataByBucket["bucket-1"], backup)
}
sharedInformers.Ark().V1().Backups().Informer().GetStore().Add(test.backup)
for _, restore := range test.restores {
sharedInformers.Ark().V1().Restores().Informer().GetStore().Add(restore)
}
// METHOD UNDER TEST
controller.garbageCollectBackup(test.backup, test.deleteBackupFile)
// VERIFY:
// remaining snapshots
assert.Equal(t, test.expectedSnapshots, snapshotService.SnapshotsTaken)
// remaining object storage backup files
expectedBackups := make([]*api.Backup, 0)
for file := range test.expectedBackupFiles {
backup := &api.Backup{ObjectMeta: metav1.ObjectMeta{Name: file}}
expectedBackups = append(expectedBackups, backup)
}
assert.Equal(t, expectedBackups, backupService.backupsByBucket["bucket-1"])
// remaining object storage backup metadata files
expectedBackups = make([]*api.Backup, 0)
for file := range test.expectedMetadataFiles {
backup := &api.Backup{ObjectMeta: metav1.ObjectMeta{Name: file}}
expectedBackups = append(expectedBackups, backup)
}
assert.Equal(t, expectedBackups, backupService.backupMetadataByBucket["bucket-1"])
expectedActions := make([]core.Action, 0)
// Restore client deletes
for _, restore := range test.expectedRestoreDeletes {
action := core.NewDeleteAction(
api.SchemeGroupVersion.WithResource("restores"),
api.DefaultNamespace,
restore,
)
expectedActions = append(expectedActions, action)
}
// Backup client deletes
if test.expectedBackupDelete != "" {
action := core.NewDeleteAction(
api.SchemeGroupVersion.WithResource("backups"),
api.DefaultNamespace,
test.expectedBackupDelete,
)
expectedActions = append(expectedActions, action)
}
assert.Equal(t, expectedActions, client.Actions())
})
}
}
func TestGarbageCollectPicksUpBackupUponExpiration(t *testing.T) {
var (
backupService = &fakeBackupService{}
@ -289,24 +497,27 @@ func TestGarbageCollectPicksUpBackupUponExpiration(t *testing.T) {
1*time.Millisecond,
sharedInformers.Ark().V1().Backups(),
client.ArkV1(),
sharedInformers.Ark().V1().Restores(),
client.ArkV1(),
).(*gcController)
controller.clock = fakeClock
// PASS 1
controller.cleanBackups()
controller.processBackups()
assert.Equal(scenario.backups, backupService.backupsByBucket, "backups should not be garbage-collected yet.")
assert.Equal(scenario.snapshots, snapshotService.SnapshotsTaken, "snapshots should not be garbage-collected yet.")
// PASS 2
fakeClock.Step(1 * time.Minute)
controller.cleanBackups()
controller.processBackups()
assert.Equal(0, len(backupService.backupsByBucket[scenario.bucket]), "backups should have been garbage-collected.")
assert.Equal(0, len(snapshotService.SnapshotsTaken), "snapshots should have been garbage-collected.")
}
type fakeBackupService struct {
backupMetadataByBucket map[string][]*api.Backup
backupsByBucket map[string][]*api.Backup
mock.Mock
}
@ -343,7 +554,30 @@ func (s *fakeBackupService) DownloadBackup(bucket, name string) (io.ReadCloser,
return ioutil.NopCloser(bytes.NewReader([]byte("hello world"))), nil
}
func (s *fakeBackupService) DeleteBackup(bucket, backupName string) error {
func (s *fakeBackupService) DeleteBackupMetadataFile(bucket, backupName string) error {
backups, found := s.backupMetadataByBucket[bucket]
if !found {
return errors.New("bucket not found")
}
deleteIdx := -1
for i, backup := range backups {
if backup.Name == backupName {
deleteIdx = i
break
}
}
if deleteIdx == -1 {
return errors.New("backup not found")
}
s.backupMetadataByBucket[bucket] = append(s.backupMetadataByBucket[bucket][0:deleteIdx], s.backupMetadataByBucket[bucket][deleteIdx+1:]...)
return nil
}
func (s *fakeBackupService) DeleteBackupFile(bucket, backupName string) error {
backups, err := s.GetAllBackups(bucket)
if err != nil {
return err

View File

@ -18,15 +18,27 @@ package kube
import (
"errors"
"fmt"
"regexp"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/pkg/api/v1"
"github.com/heptio/ark/pkg/util/collections"
)
// NamespaceAndName returns a string in the format <namespace>/<name>
func NamespaceAndName(metaAccessor metav1.ObjectMetaAccessor) string {
objMeta := metaAccessor.GetObjectMeta()
if objMeta == nil {
return ""
}
return fmt.Sprintf("%s/%s", objMeta.GetNamespace(), objMeta.GetName())
}
// EnsureNamespaceExists attempts to create the provided Kubernetes namespace. It returns two values:
// a bool indicating whether or not the namespace was created, and an error if the create failed
// for a reason other than that the namespace already exists. Note that in the case where the