store backups & restores in backups/, restores/ subdirs in obj storage

Signed-off-by: Steve Kriss <steve@heptio.com>
pull/853/head
Steve Kriss 2018-09-18 09:56:45 -06:00
parent 889b220a5a
commit 8bc7e4f6aa
10 changed files with 475 additions and 121 deletions

View File

@ -0,0 +1,112 @@
# Object Storage Layout Changes in v0.10
## Overview
Ark v0.10 includes breaking changes to where data is stored in your object storage bucket. You'll need to run a [one-time migration procedure](#upgrading-to-v0.10)
if you're upgrading from prior versions of Ark.
## Details
Prior to v0.10, Ark stored data in an object storage bucket using the following structure:
```
<your-bucket>/
backup-1/
ark-backup.json
backup-1.tar.gz
backup-1-logs.gz
restore-of-backup-1-logs.gz
restore-of-backup-1-results.gz
backup-2/
ark-backup.json
backup-2.tar.gz
backup-2-logs.gz
restore-of-backup-2-logs.gz
restore-of-backup-2-results.gz
...
```
As of v0.10, we've reorganized this layout to provide a cleaner and more extensible directory structure. The new layout looks like:
```
<your-bucket>[/<your-prefix>]/
backups/
backup-1/
ark-backup.json
backup-1.tar.gz
backup-1-logs.gz
backup-2/
ark-backup.json
backup-2.tar.gz
backup-2-logs.gz
...
restores/
restore-of-backup-1/
restore-of-backup-1-logs.gz
restore-of-backup-1-results.gz
restore-of-backup-2/
restore-of-backup-2-logs.gz
restore-of-backup-2-results.gz
...
...
```
## Upgrading to v0.10
Before upgrading to v0.10, you'll need to run a one-time upgrade script to rearrange the contents of your existing Ark bucket(s) to be compatible with
the new layout.
Please note that the following scripts **will not** migrate existing restore logs/results into the new `restores/` subdirectory. This means that they
will not be accessible using `ark restore describe` or `ark restore logs`. They *will* remain in the relevant backup's subdirectory so they are manually
accessible, and will eventually be garbage-collected along with the backup. We've taken this approach in order to keep the migration scripts simple
and less error-prone.
### rclone-Based Script
This script uses [rclone][1], which you can download and install following the instructions [here][2].
Please read through the script carefully before starting and execute it step-by-step.
```bash
ARK_BUCKET=<your-ark-bucket>
ARK_TEMP_MIGRATION_BUCKET=<a-temp-bucket-for-migration>
# 1. This is an interactive step that configures rclone to be
# able to access your storage provider. Follow the instructions,
# and keep track of the "remote name" for the next step:
rclone config
# 2. Store the name of the rclone remote that you just set up
# in Step #1:
RCLONE_REMOTE_NAME=<your-remote-name>
# 3. Create a temporary bucket to be used as a backup of your
# current Ark bucket's contents:
rclone mkdir ${RCLONE_REMOTE_NAME}:${ARK_TEMP_MIGRATION_BUCKET}
# 4. Do a full copy of the contents of your Ark bucket into the
# temporary bucket:
rclone copy ${RCLONE_REMOTE_NAME}:${ARK_BUCKET} ${RCLONE_REMOTE_NAME}:${ARK_TEMP_MIGRATION_BUCKET}
# 5. Verify that the temporary bucket contains an exact copy of
# your Ark bucket's contents. You should see a short block
# of output stating "0 differences found":
rclone check ${RCLONE_REMOTE_NAME}:${ARK_BUCKET} ${RCLONE_REMOTE_NAME}:${ARK_TEMP_MIGRATION_BUCKET}
# 6. Delete your Ark bucket's contents (this command does not
# delete the bucket itself, only the contents):
rclone delete ${RCLONE_REMOTE_NAME}:${ARK_BUCKET}
# 7. Copy the contents of the temporary bucket into your Ark bucket,
# under the 'backups/' directory/prefix:
rclone copy ${RCLONE_REMOTE_NAME}:${ARK_TEMP_MIGRATION_BUCKET} ${RCLONE_REMOTE_NAME}:${ARK_BUCKET}/backups
# 8. Verify that the 'backups/' directory in your Ark bucket now
# contains an exact copy of the temporary bucket's contents:
rclone check ${RCLONE_REMOTE_NAME}:${ARK_BUCKET}/backups ${RCLONE_REMOTE_NAME}:${ARK_TEMP_MIGRATION_BUCKET}
# 9. Once you've confirmed that Ark v0.10 works with your revised Ark
# bucket, you can delete the temporary migration bucket.
```
[1]: https://rclone.org/
[2]: https://rclone.org/downloads/

View File

@ -62,6 +62,7 @@ import (
clientset "github.com/heptio/ark/pkg/generated/clientset/versioned"
informers "github.com/heptio/ark/pkg/generated/informers/externalversions"
"github.com/heptio/ark/pkg/metrics"
"github.com/heptio/ark/pkg/persistence"
"github.com/heptio/ark/pkg/plugin"
"github.com/heptio/ark/pkg/podexec"
"github.com/heptio/ark/pkg/restic"
@ -252,11 +253,14 @@ func (s *server) run() error {
return err
}
// check to ensure all Ark CRDs exist
if err := s.arkResourcesExist(); err != nil {
return err
}
if err := s.validateBackupStorageLocations(); err != nil {
return err
}
originalConfig, err := s.loadConfig()
if err != nil {
return err
@ -391,6 +395,39 @@ func (s *server) arkResourcesExist() error {
return nil
}
// validateBackupStorageLocations checks to ensure all backup storage locations exist
// and have a compatible layout, and returns an error if not.
func (s *server) validateBackupStorageLocations() error {
s.logger.Info("Checking that all backup storage locations are valid")
locations, err := s.arkClient.ArkV1().BackupStorageLocations(s.namespace).List(metav1.ListOptions{})
if err != nil {
return errors.WithStack(err)
}
var invalid []string
for _, location := range locations.Items {
backupStore, err := persistence.NewObjectBackupStore(&location, s.pluginManager, s.logger)
if err != nil {
invalid = append(invalid, errors.Wrapf(err, "error getting backup store for location %q", location.Name).Error())
continue
}
if err := backupStore.IsValid(); err != nil {
invalid = append(invalid, errors.Wrapf(err,
"backup store for location %q is invalid (if upgrading from a pre-v0.10 version of Ark, please refer to https://heptio.github.io/ark/v0.10.0/storage-layout-reorg-v0.10 for instructions)",
location.Name,
).Error())
}
}
if len(invalid) > 0 {
return errors.Errorf("some backup storage locations are invalid: %s", strings.Join(invalid, "; "))
}
return nil
}
func (s *server) loadConfig() (*api.Config, error) {
s.logger.Info("Retrieving Ark configuration")
var (

View File

@ -261,7 +261,15 @@ func (c *backupDeletionController) processRequest(req *v1.DeleteBackupRequest) e
}
log.Info("Removing backup from backup storage")
if err := c.deleteBackupFromStorage(backup, log); err != nil {
pluginManager := c.newPluginManager(log)
defer pluginManager.CleanupClients()
backupStore, backupStoreErr := c.backupStoreForBackup(backup, pluginManager, log)
if backupStoreErr != nil {
errs = append(errs, backupStoreErr.Error())
}
if err := backupStore.DeleteBackup(backup.Name); err != nil {
errs = append(errs, err.Error())
}
@ -276,6 +284,13 @@ func (c *backupDeletionController) processRequest(req *v1.DeleteBackupRequest) e
restoreLog := log.WithField("restore", kube.NamespaceAndName(restore))
restoreLog.Info("Deleting restore log/results from backup storage")
if err := backupStore.DeleteRestore(restore.Name); err != nil {
errs = append(errs, err.Error())
// if we couldn't delete the restore files, don't delete the API object
continue
}
restoreLog.Info("Deleting restore referencing backup")
if err := c.restoreClient.Restores(restore.Namespace).Delete(restore.Name, &metav1.DeleteOptions{}); err != nil {
errs = append(errs, errors.Wrapf(err, "error deleting restore %s", kube.NamespaceAndName(restore)).Error())
@ -313,27 +328,19 @@ func (c *backupDeletionController) processRequest(req *v1.DeleteBackupRequest) e
return nil
}
func (c *backupDeletionController) deleteBackupFromStorage(backup *v1.Backup, log logrus.FieldLogger) error {
pluginManager := c.newPluginManager(log)
defer pluginManager.CleanupClients()
func (c *backupDeletionController) backupStoreForBackup(backup *v1.Backup, pluginManager plugin.Manager, log logrus.FieldLogger) (persistence.BackupStore, error) {
backupLocation, err := c.backupLocationLister.BackupStorageLocations(backup.Namespace).Get(backup.Spec.StorageLocation)
if err != nil {
return errors.WithStack(err)
return nil, errors.WithStack(err)
}
backupStore, err := c.newBackupStore(backupLocation, pluginManager, log)
if err != nil {
return err
return nil, err
}
if err := backupStore.DeleteBackup(backup.Name); err != nil {
return errors.Wrap(err, "error deleting backup from backup storage")
}
return nil
return backupStore, nil
}
func (c *backupDeletionController) deleteExistingDeletionRequests(req *v1.DeleteBackupRequest, log logrus.FieldLogger) []error {
log.Info("Removing existing deletion requests for backup")
selector := labels.SelectorFromSet(labels.Set(map[string]string{

View File

@ -410,6 +410,8 @@ func TestBackupDeletionControllerProcessRequest(t *testing.T) {
})
td.backupStore.On("DeleteBackup", td.req.Spec.BackupName).Return(nil)
td.backupStore.On("DeleteRestore", "restore-1").Return(nil)
td.backupStore.On("DeleteRestore", "restore-2").Return(nil)
err := td.controller.processRequest(td.req)
require.NoError(t, err)

View File

@ -180,7 +180,7 @@ func (c *downloadRequestController) generatePreSignedURL(downloadRequest *v1.Dow
return errors.WithStack(err)
}
if update.Status.DownloadURL, err = backupStore.GetDownloadURL(backupName, downloadRequest.Spec.Target); err != nil {
if update.Status.DownloadURL, err = backupStore.GetDownloadURL(downloadRequest.Spec.Target); err != nil {
return err
}

View File

@ -274,7 +274,7 @@ func TestProcessDownloadRequest(t *testing.T) {
}
if tc.expectGetsURL {
harness.backupStore.On("GetDownloadURL", tc.backup.Name, tc.downloadRequest.Spec.Target).Return("a-url", nil)
harness.backupStore.On("GetDownloadURL", tc.downloadRequest.Spec.Target).Return("a-url", nil)
}
// exercise method under test

View File

@ -25,6 +25,20 @@ func (_m *BackupStore) DeleteBackup(name string) error {
return r0
}
// DeleteRestore provides a mock function with given fields: name
func (_m *BackupStore) DeleteRestore(name string) error {
ret := _m.Called(name)
var r0 error
if rf, ok := ret.Get(0).(func(string) error); ok {
r0 = rf(name)
} else {
r0 = ret.Error(0)
}
return r0
}
// GetBackupContents provides a mock function with given fields: name
func (_m *BackupStore) GetBackupContents(name string) (io.ReadCloser, error) {
ret := _m.Called(name)
@ -71,20 +85,20 @@ func (_m *BackupStore) GetBackupMetadata(name string) (*v1.Backup, error) {
return r0, r1
}
// GetDownloadURL provides a mock function with given fields: backup, target
func (_m *BackupStore) GetDownloadURL(backup string, target v1.DownloadTarget) (string, error) {
ret := _m.Called(backup, target)
// GetDownloadURL provides a mock function with given fields: target
func (_m *BackupStore) GetDownloadURL(target v1.DownloadTarget) (string, error) {
ret := _m.Called(target)
var r0 string
if rf, ok := ret.Get(0).(func(string, v1.DownloadTarget) string); ok {
r0 = rf(backup, target)
if rf, ok := ret.Get(0).(func(v1.DownloadTarget) string); ok {
r0 = rf(target)
} else {
r0 = ret.Get(0).(string)
}
var r1 error
if rf, ok := ret.Get(1).(func(string, v1.DownloadTarget) error); ok {
r1 = rf(backup, target)
if rf, ok := ret.Get(1).(func(v1.DownloadTarget) error); ok {
r1 = rf(target)
} else {
r1 = ret.Error(1)
}
@ -92,6 +106,20 @@ func (_m *BackupStore) GetDownloadURL(backup string, target v1.DownloadTarget) (
return r0, r1
}
// IsValid provides a mock function with given fields:
func (_m *BackupStore) IsValid() error {
ret := _m.Called()
var r0 error
if rf, ok := ret.Get(0).(func() error); ok {
r0 = rf()
} else {
r0 = ret.Error(0)
}
return r0
}
// ListBackups provides a mock function with given fields:
func (_m *BackupStore) ListBackups() ([]*v1.Backup, error) {
ret := _m.Called()

View File

@ -17,7 +17,6 @@ limitations under the License.
package persistence
import (
"fmt"
"io"
"io/ioutil"
"strings"
@ -36,6 +35,8 @@ import (
// BackupStore defines operations for creating, retrieving, and deleting
// Ark backup and restore data in/from a persistent backup store.
type BackupStore interface {
IsValid() error
ListBackups() ([]*arkv1api.Backup, error)
PutBackup(name string, metadata, contents, log io.Reader) error
@ -45,53 +46,18 @@ type BackupStore interface {
PutRestoreLog(backup, restore string, log io.Reader) error
PutRestoreResults(backup, restore string, results io.Reader) error
DeleteRestore(name string) error
GetDownloadURL(backup string, target arkv1api.DownloadTarget) (string, error)
GetDownloadURL(target arkv1api.DownloadTarget) (string, error)
}
const (
// DownloadURLTTL is how long a download URL is valid for.
DownloadURLTTL = 10 * time.Minute
backupMetadataFileFormatString = "%s/ark-backup.json"
backupFileFormatString = "%s/%s.tar.gz"
backupLogFileFormatString = "%s/%s-logs.gz"
restoreLogFileFormatString = "%s/restore-%s-logs.gz"
restoreResultsFileFormatString = "%s/restore-%s-results.gz"
)
func getPrefix(prefix string) string {
if prefix == "" || strings.HasSuffix(prefix, "/") {
return prefix
}
return prefix + "/"
}
func getBackupMetadataKey(prefix, backup string) string {
return prefix + fmt.Sprintf(backupMetadataFileFormatString, backup)
}
func getBackupContentsKey(prefix, backup string) string {
return prefix + fmt.Sprintf(backupFileFormatString, backup, backup)
}
func getBackupLogKey(prefix, backup string) string {
return prefix + fmt.Sprintf(backupLogFileFormatString, backup, backup)
}
func getRestoreLogKey(prefix, backup, restore string) string {
return prefix + fmt.Sprintf(restoreLogFileFormatString, backup, restore)
}
func getRestoreResultsKey(prefix, backup, restore string) string {
return prefix + fmt.Sprintf(restoreResultsFileFormatString, backup, restore)
}
// DownloadURLTTL is how long a download URL is valid for.
const DownloadURLTTL = 10 * time.Minute
type objectBackupStore struct {
objectStore cloudprovider.ObjectStore
bucket string
prefix string
layout *objectStoreLayout
logger logrus.FieldLogger
}
@ -129,23 +95,46 @@ func NewObjectBackupStore(location *arkv1api.BackupStorageLocation, objectStoreG
return nil, err
}
prefix := getPrefix(location.Spec.ObjectStorage.Prefix)
log := logger.WithFields(logrus.Fields(map[string]interface{}{
"bucket": location.Spec.ObjectStorage.Bucket,
"prefix": prefix,
"prefix": location.Spec.ObjectStorage.Prefix,
}))
return &objectBackupStore{
objectStore: objectStore,
bucket: location.Spec.ObjectStorage.Bucket,
prefix: prefix,
layout: newObjectStoreLayout(location.Spec.ObjectStorage.Prefix),
logger: log,
}, nil
}
func (s *objectBackupStore) IsValid() error {
dirs, err := s.objectStore.ListCommonPrefixes(s.bucket, s.layout.rootPrefix, "/")
if err != nil {
return errors.WithStack(err)
}
var invalid []string
for _, dir := range dirs {
subdir := strings.TrimSuffix(strings.TrimPrefix(dir, s.layout.rootPrefix), "/")
if !validRootDirs.Has(subdir) {
invalid = append(invalid, subdir)
}
}
if len(invalid) > 0 {
// don't include more than 3 invalid dirs in the error message
if len(invalid) > 3 {
return errors.Errorf("Backup store contains %d invalid top-level directories: %v", len(invalid), append(invalid[:3], "..."))
}
return errors.Errorf("Backup store contains invalid top-level directories: %v", invalid)
}
return nil
}
func (s *objectBackupStore) ListBackups() ([]*arkv1api.Backup, error) {
prefixes, err := s.objectStore.ListCommonPrefixes(s.bucket, s.prefix, "/")
prefixes, err := s.objectStore.ListCommonPrefixes(s.bucket, s.layout.backupsDir, "/")
if err != nil {
return nil, err
}
@ -157,10 +146,10 @@ func (s *objectBackupStore) ListBackups() ([]*arkv1api.Backup, error) {
for _, prefix := range prefixes {
// values returned from a call to cloudprovider.ObjectStore's
// ListcommonPrefixes method return the *full* prefix, inclusive
// of s.prefix, and include the delimiter ("/") as a suffix. Trim
// ListCommonPrefixes method return the *full* prefix, inclusive
// of s.backupsPrefix, and include the delimiter ("/") as a suffix. Trim
// each of those off to get the backup name.
backupName := strings.TrimSuffix(strings.TrimPrefix(prefix, s.prefix), "/")
backupName := strings.TrimSuffix(strings.TrimPrefix(prefix, s.layout.backupsDir), "/")
backup, err := s.GetBackupMetadata(backupName)
if err != nil {
@ -175,7 +164,7 @@ func (s *objectBackupStore) ListBackups() ([]*arkv1api.Backup, error) {
}
func (s *objectBackupStore) PutBackup(name string, metadata io.Reader, contents io.Reader, log io.Reader) error {
if err := seekAndPutObject(s.objectStore, s.bucket, getBackupLogKey(s.prefix, name), log); err != nil {
if err := seekAndPutObject(s.objectStore, s.bucket, s.layout.getBackupLogKey(name), log); err != nil {
// Uploading the log file is best-effort; if it fails, we log the error but it doesn't impact the
// backup's status.
s.logger.WithError(err).WithField("backup", name).Error("Error uploading log file")
@ -188,13 +177,13 @@ func (s *objectBackupStore) PutBackup(name string, metadata io.Reader, contents
return nil
}
if err := seekAndPutObject(s.objectStore, s.bucket, getBackupMetadataKey(s.prefix, name), metadata); err != nil {
if err := seekAndPutObject(s.objectStore, s.bucket, s.layout.getBackupMetadataKey(name), metadata); err != nil {
// failure to upload metadata file is a hard-stop
return err
}
if err := seekAndPutObject(s.objectStore, s.bucket, getBackupContentsKey(s.prefix, name), contents); err != nil {
deleteErr := s.objectStore.DeleteObject(s.bucket, getBackupMetadataKey(s.prefix, name))
if err := seekAndPutObject(s.objectStore, s.bucket, s.layout.getBackupContentsKey(name), contents); err != nil {
deleteErr := s.objectStore.DeleteObject(s.bucket, s.layout.getBackupMetadataKey(name))
return kerrors.NewAggregate([]error{err, deleteErr})
}
@ -202,7 +191,7 @@ func (s *objectBackupStore) PutBackup(name string, metadata io.Reader, contents
}
func (s *objectBackupStore) GetBackupMetadata(name string) (*arkv1api.Backup, error) {
key := getBackupMetadataKey(s.prefix, name)
key := s.layout.getBackupMetadataKey(name)
res, err := s.objectStore.GetObject(s.bucket, key)
if err != nil {
@ -231,11 +220,30 @@ func (s *objectBackupStore) GetBackupMetadata(name string) (*arkv1api.Backup, er
}
func (s *objectBackupStore) GetBackupContents(name string) (io.ReadCloser, error) {
return s.objectStore.GetObject(s.bucket, getBackupContentsKey(s.prefix, name))
return s.objectStore.GetObject(s.bucket, s.layout.getBackupContentsKey(name))
}
func (s *objectBackupStore) DeleteBackup(name string) error {
objects, err := s.objectStore.ListObjects(s.bucket, s.prefix+name+"/")
objects, err := s.objectStore.ListObjects(s.bucket, s.layout.getBackupDir(name))
if err != nil {
return err
}
var errs []error
for _, key := range objects {
s.logger.WithFields(logrus.Fields{
"key": key,
}).Debug("Trying to delete object")
if err := s.objectStore.DeleteObject(s.bucket, key); err != nil {
errs = append(errs, err)
}
}
return errors.WithStack(kerrors.NewAggregate(errs))
}
func (s *objectBackupStore) DeleteRestore(name string) error {
objects, err := s.objectStore.ListObjects(s.bucket, s.layout.getRestoreDir(name))
if err != nil {
return err
}
@ -254,23 +262,23 @@ func (s *objectBackupStore) DeleteBackup(name string) error {
}
func (s *objectBackupStore) PutRestoreLog(backup string, restore string, log io.Reader) error {
return s.objectStore.PutObject(s.bucket, getRestoreLogKey(s.prefix, backup, restore), log)
return s.objectStore.PutObject(s.bucket, s.layout.getRestoreLogKey(restore), log)
}
func (s *objectBackupStore) PutRestoreResults(backup string, restore string, results io.Reader) error {
return s.objectStore.PutObject(s.bucket, getRestoreResultsKey(s.prefix, backup, restore), results)
return s.objectStore.PutObject(s.bucket, s.layout.getRestoreResultsKey(restore), results)
}
func (s *objectBackupStore) GetDownloadURL(backup string, target arkv1api.DownloadTarget) (string, error) {
func (s *objectBackupStore) GetDownloadURL(target arkv1api.DownloadTarget) (string, error) {
switch target.Kind {
case arkv1api.DownloadTargetKindBackupContents:
return s.objectStore.CreateSignedURL(s.bucket, getBackupContentsKey(s.prefix, backup), DownloadURLTTL)
return s.objectStore.CreateSignedURL(s.bucket, s.layout.getBackupContentsKey(target.Name), DownloadURLTTL)
case arkv1api.DownloadTargetKindBackupLog:
return s.objectStore.CreateSignedURL(s.bucket, getBackupLogKey(s.prefix, backup), DownloadURLTTL)
return s.objectStore.CreateSignedURL(s.bucket, s.layout.getBackupLogKey(target.Name), DownloadURLTTL)
case arkv1api.DownloadTargetKindRestoreLog:
return s.objectStore.CreateSignedURL(s.bucket, getRestoreLogKey(s.prefix, backup, target.Name), DownloadURLTTL)
return s.objectStore.CreateSignedURL(s.bucket, s.layout.getRestoreLogKey(target.Name), DownloadURLTTL)
case arkv1api.DownloadTargetKindRestoreResults:
return s.objectStore.CreateSignedURL(s.bucket, getRestoreResultsKey(s.prefix, backup, target.Name), DownloadURLTTL)
return s.objectStore.CreateSignedURL(s.bucket, s.layout.getRestoreResultsKey(target.Name), DownloadURLTTL)
default:
return "", errors.Errorf("unsupported download target kind %q", target.Kind)
}

View File

@ -0,0 +1,76 @@
/*
Copyright 2018 the Heptio Ark contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package persistence
import (
"fmt"
"path"
"strings"
"k8s.io/apimachinery/pkg/util/sets"
)
var validRootDirs = sets.NewString("backups", "restores")
type objectStoreLayout struct {
rootPrefix string
backupsDir string
restoresDir string
}
func newObjectStoreLayout(prefix string) *objectStoreLayout {
if prefix != "" && !strings.HasSuffix(prefix, "/") {
prefix = prefix + "/"
}
backupsDir := path.Join(prefix, "backups") + "/"
restoresDir := path.Join(prefix, "restores") + "/"
return &objectStoreLayout{
rootPrefix: prefix,
backupsDir: backupsDir,
restoresDir: restoresDir,
}
}
func (l *objectStoreLayout) getBackupDir(backup string) string {
return path.Join(l.backupsDir, backup) + "/"
}
func (l *objectStoreLayout) getRestoreDir(restore string) string {
return path.Join(l.restoresDir, restore) + "/"
}
func (l *objectStoreLayout) getBackupMetadataKey(backup string) string {
return path.Join(l.backupsDir, backup, "ark-backup.json")
}
func (l *objectStoreLayout) getBackupContentsKey(backup string) string {
return path.Join(l.backupsDir, backup, fmt.Sprintf("%s.tar.gz", backup))
}
func (l *objectStoreLayout) getBackupLogKey(backup string) string {
return path.Join(l.backupsDir, backup, fmt.Sprintf("%s-logs.gz", backup))
}
func (l *objectStoreLayout) getRestoreLogKey(restore string) string {
return path.Join(l.restoresDir, restore, fmt.Sprintf("restore-%s-logs.gz", restore))
}
func (l *objectStoreLayout) getRestoreResultsKey(restore string) string {
return path.Join(l.restoresDir, restore, fmt.Sprintf("restore-%s-results.gz", restore))
}

View File

@ -53,7 +53,7 @@ func newObjectBackupStoreTestHarness(bucket, prefix string) *objectBackupStoreTe
objectBackupStore: &objectBackupStore{
objectStore: objectStore,
bucket: bucket,
prefix: prefix,
layout: newObjectStoreLayout(prefix),
logger: arktest.NewLogger(),
},
objectStore: objectStore,
@ -62,6 +62,99 @@ func newObjectBackupStoreTestHarness(bucket, prefix string) *objectBackupStoreTe
}
}
func TestIsValid(t *testing.T) {
tests := []struct {
name string
prefix string
storageData cloudprovider.BucketData
expectErr bool
}{
{
name: "empty backup store with no prefix is valid",
expectErr: false,
},
{
name: "empty backup store with a prefix is valid",
prefix: "bar",
expectErr: false,
},
{
name: "backup store with no prefix and only unsupported directories is invalid",
storageData: map[string][]byte{
"backup-1/ark-backup.json": {},
"backup-2/ark-backup.json": {},
},
expectErr: true,
},
{
name: "backup store with a prefix and only unsupported directories is invalid",
prefix: "backups",
storageData: map[string][]byte{
"backups/backup-1/ark-backup.json": {},
"backups/backup-2/ark-backup.json": {},
},
expectErr: true,
},
{
name: "backup store with no prefix and both supported and unsupported directories is invalid",
storageData: map[string][]byte{
"backups/backup-1/ark-backup.json": {},
"backups/backup-2/ark-backup.json": {},
"restores/restore-1/foo": {},
"unsupported-dir/foo": {},
},
expectErr: true,
},
{
name: "backup store with a prefix and both supported and unsupported directories is invalid",
prefix: "cluster-1",
storageData: map[string][]byte{
"cluster-1/backups/backup-1/ark-backup.json": {},
"cluster-1/backups/backup-2/ark-backup.json": {},
"cluster-1/restores/restore-1/foo": {},
"cluster-1/unsupported-dir/foo": {},
},
expectErr: true,
},
{
name: "backup store with no prefix and only supported directories is valid",
storageData: map[string][]byte{
"backups/backup-1/ark-backup.json": {},
"backups/backup-2/ark-backup.json": {},
"restores/restore-1/foo": {},
},
expectErr: false,
},
{
name: "backup store with a prefix and only supported directories is valid",
prefix: "cluster-1",
storageData: map[string][]byte{
"cluster-1/backups/backup-1/ark-backup.json": {},
"cluster-1/backups/backup-2/ark-backup.json": {},
"cluster-1/restores/restore-1/foo": {},
},
expectErr: false,
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
harness := newObjectBackupStoreTestHarness("foo", tc.prefix)
for key, obj := range tc.storageData {
require.NoError(t, harness.objectStore.PutObject(harness.bucket, key, bytes.NewReader(obj)))
}
err := harness.IsValid()
if tc.expectErr {
assert.Error(t, err)
} else {
assert.NoError(t, err)
}
})
}
}
func TestListBackups(t *testing.T) {
tests := []struct {
name string
@ -73,8 +166,8 @@ func TestListBackups(t *testing.T) {
{
name: "normal case",
storageData: map[string][]byte{
"backup-1/ark-backup.json": encodeToBytes(&api.Backup{ObjectMeta: metav1.ObjectMeta{Name: "backup-1"}}),
"backup-2/ark-backup.json": encodeToBytes(&api.Backup{ObjectMeta: metav1.ObjectMeta{Name: "backup-2"}}),
"backups/backup-1/ark-backup.json": encodeToBytes(&api.Backup{ObjectMeta: metav1.ObjectMeta{Name: "backup-1"}}),
"backups/backup-2/ark-backup.json": encodeToBytes(&api.Backup{ObjectMeta: metav1.ObjectMeta{Name: "backup-2"}}),
},
expectedRes: []*api.Backup{
{
@ -91,8 +184,8 @@ func TestListBackups(t *testing.T) {
name: "normal case with backup store prefix",
prefix: "ark-backups/",
storageData: map[string][]byte{
"ark-backups/backup-1/ark-backup.json": encodeToBytes(&api.Backup{ObjectMeta: metav1.ObjectMeta{Name: "backup-1"}}),
"ark-backups/backup-2/ark-backup.json": encodeToBytes(&api.Backup{ObjectMeta: metav1.ObjectMeta{Name: "backup-2"}}),
"ark-backups/backups/backup-1/ark-backup.json": encodeToBytes(&api.Backup{ObjectMeta: metav1.ObjectMeta{Name: "backup-1"}}),
"ark-backups/backups/backup-2/ark-backup.json": encodeToBytes(&api.Backup{ObjectMeta: metav1.ObjectMeta{Name: "backup-2"}}),
},
expectedRes: []*api.Backup{
{
@ -108,8 +201,8 @@ func TestListBackups(t *testing.T) {
{
name: "backup that can't be decoded is ignored",
storageData: map[string][]byte{
"backup-1/ark-backup.json": encodeToBytes(&api.Backup{ObjectMeta: metav1.ObjectMeta{Name: "backup-1"}}),
"backup-2/ark-backup.json": []byte("this is not valid backup JSON"),
"backups/backup-1/ark-backup.json": encodeToBytes(&api.Backup{ObjectMeta: metav1.ObjectMeta{Name: "backup-1"}}),
"backups/backup-2/ark-backup.json": []byte("this is not valid backup JSON"),
},
expectedRes: []*api.Backup{
{
@ -170,7 +263,7 @@ func TestPutBackup(t *testing.T) {
contents: newStringReadSeeker("contents"),
log: newStringReadSeeker("log"),
expectedErr: "",
expectedKeys: []string{"backup-1/ark-backup.json", "backup-1/backup-1.tar.gz", "backup-1/backup-1-logs.gz"},
expectedKeys: []string{"backups/backup-1/ark-backup.json", "backups/backup-1/backup-1.tar.gz", "backups/backup-1/backup-1-logs.gz"},
},
{
name: "normal case with backup store prefix",
@ -179,7 +272,7 @@ func TestPutBackup(t *testing.T) {
contents: newStringReadSeeker("contents"),
log: newStringReadSeeker("log"),
expectedErr: "",
expectedKeys: []string{"prefix-1/backup-1/ark-backup.json", "prefix-1/backup-1/backup-1.tar.gz", "prefix-1/backup-1/backup-1-logs.gz"},
expectedKeys: []string{"prefix-1/backups/backup-1/ark-backup.json", "prefix-1/backups/backup-1/backup-1.tar.gz", "prefix-1/backups/backup-1/backup-1-logs.gz"},
},
{
name: "error on metadata upload does not upload data",
@ -187,7 +280,7 @@ func TestPutBackup(t *testing.T) {
contents: newStringReadSeeker("contents"),
log: newStringReadSeeker("log"),
expectedErr: "error readers return errors",
expectedKeys: []string{"backup-1/backup-1-logs.gz"},
expectedKeys: []string{"backups/backup-1/backup-1-logs.gz"},
},
{
name: "error on data upload deletes metadata",
@ -195,7 +288,7 @@ func TestPutBackup(t *testing.T) {
contents: new(errorReader),
log: newStringReadSeeker("log"),
expectedErr: "error readers return errors",
expectedKeys: []string{"backup-1/backup-1-logs.gz"},
expectedKeys: []string{"backups/backup-1/backup-1-logs.gz"},
},
{
name: "error on log upload is ok",
@ -203,7 +296,7 @@ func TestPutBackup(t *testing.T) {
contents: newStringReadSeeker("bar"),
log: new(errorReader),
expectedErr: "",
expectedKeys: []string{"backup-1/ark-backup.json", "backup-1/backup-1.tar.gz"},
expectedKeys: []string{"backups/backup-1/ark-backup.json", "backups/backup-1/backup-1.tar.gz"},
},
{
name: "don't upload data when metadata is nil",
@ -211,7 +304,7 @@ func TestPutBackup(t *testing.T) {
contents: newStringReadSeeker("contents"),
log: newStringReadSeeker("log"),
expectedErr: "",
expectedKeys: []string{"backup-1/backup-1-logs.gz"},
expectedKeys: []string{"backups/backup-1/backup-1-logs.gz"},
},
}
@ -231,7 +324,7 @@ func TestPutBackup(t *testing.T) {
func TestGetBackupContents(t *testing.T) {
harness := newObjectBackupStoreTestHarness("test-bucket", "")
harness.objectStore.PutObject(harness.bucket, "test-backup/test-backup.tar.gz", newStringReadSeeker("foo"))
harness.objectStore.PutObject(harness.bucket, "backups/test-backup/test-backup.tar.gz", newStringReadSeeker("foo"))
rc, err := harness.GetBackupContents("test-backup")
require.NoError(t, err)
@ -270,14 +363,14 @@ func TestDeleteBackup(t *testing.T) {
backupStore := &objectBackupStore{
objectStore: objectStore,
bucket: "test-bucket",
prefix: test.prefix,
layout: newObjectStoreLayout(test.prefix),
logger: arktest.NewLogger(),
}
defer objectStore.AssertExpectations(t)
objects := []string{test.prefix + "bak/ark-backup.json", test.prefix + "bak/bak.tar.gz", test.prefix + "bak/bak.log.gz"}
objects := []string{test.prefix + "backups/bak/ark-backup.json", test.prefix + "backups/bak/bak.tar.gz", test.prefix + "backups/bak/bak.log.gz"}
objectStore.On("ListObjects", backupStore.bucket, test.prefix+"bak/").Return(objects, test.listObjectsError)
objectStore.On("ListObjects", backupStore.bucket, test.prefix+"backups/bak/").Return(objects, test.listObjectsError)
for i, obj := range objects {
var err error
if i < len(test.deleteErrors) {
@ -299,7 +392,6 @@ func TestGetDownloadURL(t *testing.T) {
name string
targetKind api.DownloadTargetKind
targetName string
directory string
prefix string
expectedKey string
}{
@ -307,58 +399,50 @@ func TestGetDownloadURL(t *testing.T) {
name: "backup contents",
targetKind: api.DownloadTargetKindBackupContents,
targetName: "my-backup",
directory: "my-backup",
expectedKey: "my-backup/my-backup.tar.gz",
expectedKey: "backups/my-backup/my-backup.tar.gz",
},
{
name: "backup log",
targetKind: api.DownloadTargetKindBackupLog,
targetName: "my-backup",
directory: "my-backup",
expectedKey: "my-backup/my-backup-logs.gz",
expectedKey: "backups/my-backup/my-backup-logs.gz",
},
{
name: "scheduled backup contents",
targetKind: api.DownloadTargetKindBackupContents,
targetName: "my-backup-20170913154901",
directory: "my-backup-20170913154901",
expectedKey: "my-backup-20170913154901/my-backup-20170913154901.tar.gz",
expectedKey: "backups/my-backup-20170913154901/my-backup-20170913154901.tar.gz",
},
{
name: "scheduled backup log",
targetKind: api.DownloadTargetKindBackupLog,
targetName: "my-backup-20170913154901",
directory: "my-backup-20170913154901",
expectedKey: "my-backup-20170913154901/my-backup-20170913154901-logs.gz",
expectedKey: "backups/my-backup-20170913154901/my-backup-20170913154901-logs.gz",
},
{
name: "backup contents with backup store prefix",
targetKind: api.DownloadTargetKindBackupContents,
targetName: "my-backup",
directory: "my-backup",
prefix: "ark-backups/",
expectedKey: "ark-backups/my-backup/my-backup.tar.gz",
expectedKey: "ark-backups/backups/my-backup/my-backup.tar.gz",
},
{
name: "restore log",
targetKind: api.DownloadTargetKindRestoreLog,
targetName: "b-20170913154901",
directory: "b",
expectedKey: "b/restore-b-20170913154901-logs.gz",
expectedKey: "restores/b-20170913154901/restore-b-20170913154901-logs.gz",
},
{
name: "restore results",
targetKind: api.DownloadTargetKindRestoreResults,
targetName: "b-20170913154901",
directory: "b",
expectedKey: "b/restore-b-20170913154901-results.gz",
expectedKey: "restores/b-20170913154901/restore-b-20170913154901-results.gz",
},
{
name: "restore results - backup has multiple dashes (e.g. restore of scheduled backup)",
targetKind: api.DownloadTargetKindRestoreResults,
targetName: "b-cool-20170913154901-20170913154902",
directory: "b-cool-20170913154901",
expectedKey: "b-cool-20170913154901/restore-b-cool-20170913154901-20170913154902-results.gz",
expectedKey: "restores/b-cool-20170913154901-20170913154902/restore-b-cool-20170913154901-20170913154902-results.gz",
},
}
@ -368,7 +452,7 @@ func TestGetDownloadURL(t *testing.T) {
require.NoError(t, harness.objectStore.PutObject("test-bucket", test.expectedKey, newStringReadSeeker("foo")))
url, err := harness.GetDownloadURL(test.directory, api.DownloadTarget{Kind: test.targetKind, Name: test.targetName})
url, err := harness.GetDownloadURL(api.DownloadTarget{Kind: test.targetKind, Name: test.targetName})
require.NoError(t, err)
assert.Equal(t, "a-url", url)
})