Merge pull request #409 from skriss/fix-398

get backup name from restore spec when handling restore DownloadRequests
pull/419/head
Andy Goldstein 2018-04-10 13:29:07 -04:00 committed by GitHub
commit 258f3e011e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 76 additions and 62 deletions

View File

@ -21,7 +21,6 @@ import (
"fmt"
"io"
"io/ioutil"
"strings"
"time"
"github.com/pkg/errors"
@ -55,7 +54,7 @@ type BackupService interface {
// CreateSignedURL creates a pre-signed URL that can be used to download a file from object
// storage. The URL expires after ttl.
CreateSignedURL(target api.DownloadTarget, bucket string, ttl time.Duration) (string, error)
CreateSignedURL(target api.DownloadTarget, bucket, directory string, ttl time.Duration) (string, error)
// UploadRestoreLog uploads the restore's log file to object storage.
UploadRestoreLog(bucket, backup, restore string, log io.Reader) error
@ -78,24 +77,24 @@ const (
restoreResultsFileFormatString = "%s/restore-%s-results.gz"
)
func getMetadataKey(backup string) string {
return fmt.Sprintf(metadataFileFormatString, backup)
func getMetadataKey(directory string) string {
return fmt.Sprintf(metadataFileFormatString, directory)
}
func getBackupContentsKey(backup string) string {
return fmt.Sprintf(backupFileFormatString, backup, backup)
func getBackupContentsKey(directory, backup string) string {
return fmt.Sprintf(backupFileFormatString, directory, backup)
}
func getBackupLogKey(backup string) string {
return fmt.Sprintf(backupLogFileFormatString, backup, backup)
func getBackupLogKey(directory, backup string) string {
return fmt.Sprintf(backupLogFileFormatString, directory, backup)
}
func getRestoreLogKey(backup, restore string) string {
return fmt.Sprintf(restoreLogFileFormatString, backup, restore)
func getRestoreLogKey(directory, restore string) string {
return fmt.Sprintf(restoreLogFileFormatString, directory, restore)
}
func getRestoreResultsKey(backup, restore string) string {
return fmt.Sprintf(restoreResultsFileFormatString, backup, restore)
func getRestoreResultsKey(directory, restore string) string {
return fmt.Sprintf(restoreResultsFileFormatString, directory, restore)
}
type backupService struct {
@ -141,7 +140,7 @@ func (br *backupService) seekAndPutObject(bucket, key string, file io.Reader) er
func (br *backupService) UploadBackup(bucket, backupName string, metadata, backup, log io.Reader) error {
// Uploading the log file is best-effort; if it fails, we log the error but it doesn't impact the
// backup's status.
logKey := getBackupLogKey(backupName)
logKey := getBackupLogKey(backupName, backupName)
if err := br.seekAndPutObject(bucket, logKey, log); err != nil {
br.logger.WithError(err).WithFields(logrus.Fields{
"bucket": bucket,
@ -165,7 +164,7 @@ func (br *backupService) UploadBackup(bucket, backupName string, metadata, backu
if backup != nil {
// upload tar file
if err := br.seekAndPutObject(bucket, getBackupContentsKey(backupName), backup); err != nil {
if err := br.seekAndPutObject(bucket, getBackupContentsKey(backupName, backupName), backup); err != nil {
// try to delete the metadata file since the data upload failed
deleteErr := br.objectStore.DeleteObject(bucket, metadataKey)
@ -177,7 +176,7 @@ func (br *backupService) UploadBackup(bucket, backupName string, metadata, backu
}
func (br *backupService) DownloadBackup(bucket, backupName string) (io.ReadCloser, error) {
return br.objectStore.GetObject(bucket, getBackupContentsKey(backupName))
return br.objectStore.GetObject(bucket, getBackupContentsKey(backupName, backupName))
}
func (br *backupService) GetAllBackups(bucket string) ([]*api.Backup, error) {
@ -251,32 +250,21 @@ func (br *backupService) DeleteBackupDir(bucket, backupName string) error {
return errors.WithStack(kerrors.NewAggregate(errs))
}
func (br *backupService) CreateSignedURL(target api.DownloadTarget, bucket string, ttl time.Duration) (string, error) {
func (br *backupService) CreateSignedURL(target api.DownloadTarget, bucket, directory string, ttl time.Duration) (string, error) {
switch target.Kind {
case api.DownloadTargetKindBackupContents:
return br.objectStore.CreateSignedURL(bucket, getBackupContentsKey(target.Name), ttl)
return br.objectStore.CreateSignedURL(bucket, getBackupContentsKey(directory, target.Name), ttl)
case api.DownloadTargetKindBackupLog:
return br.objectStore.CreateSignedURL(bucket, getBackupLogKey(target.Name), ttl)
return br.objectStore.CreateSignedURL(bucket, getBackupLogKey(directory, target.Name), ttl)
case api.DownloadTargetKindRestoreLog:
backup := extractBackupName(target.Name)
return br.objectStore.CreateSignedURL(bucket, getRestoreLogKey(backup, target.Name), ttl)
return br.objectStore.CreateSignedURL(bucket, getRestoreLogKey(directory, target.Name), ttl)
case api.DownloadTargetKindRestoreResults:
backup := extractBackupName(target.Name)
return br.objectStore.CreateSignedURL(bucket, getRestoreResultsKey(backup, target.Name), ttl)
return br.objectStore.CreateSignedURL(bucket, getRestoreResultsKey(directory, target.Name), ttl)
default:
return "", errors.Errorf("unsupported download target kind %q", target.Kind)
}
}
func extractBackupName(s string) string {
// restore name is formatted as <backup name>-<timestamp>
i := strings.LastIndex(s, "-")
if i < 0 {
i = len(s)
}
return s[0:i]
}
func (br *backupService) UploadRestoreLog(bucket, backup, restore string, log io.Reader) error {
key := getRestoreLogKey(backup, restore)
return br.objectStore.PutObject(bucket, key, log)

View File

@ -270,66 +270,56 @@ func TestCreateSignedURL(t *testing.T) {
name string
targetKind api.DownloadTargetKind
targetName string
directory string
expectedKey string
}{
{
name: "backup contents",
targetKind: api.DownloadTargetKindBackupContents,
targetName: "my-backup",
directory: "my-backup",
expectedKey: "my-backup/my-backup.tar.gz",
},
{
name: "backup log",
targetKind: api.DownloadTargetKindBackupLog,
targetName: "my-backup",
directory: "my-backup",
expectedKey: "my-backup/my-backup-logs.gz",
},
{
name: "scheduled backup contents",
targetKind: api.DownloadTargetKindBackupContents,
targetName: "my-backup-20170913154901",
directory: "my-backup-20170913154901",
expectedKey: "my-backup-20170913154901/my-backup-20170913154901.tar.gz",
},
{
name: "scheduled backup log",
targetKind: api.DownloadTargetKindBackupLog,
targetName: "my-backup-20170913154901",
directory: "my-backup-20170913154901",
expectedKey: "my-backup-20170913154901/my-backup-20170913154901-logs.gz",
},
{
name: "restore log - backup has no dash",
name: "restore log",
targetKind: api.DownloadTargetKindRestoreLog,
targetName: "b-20170913154901",
directory: "b",
expectedKey: "b/restore-b-20170913154901-logs.gz",
},
{
name: "restore log - backup has 1 dash",
targetKind: api.DownloadTargetKindRestoreLog,
targetName: "b-cool-20170913154901",
expectedKey: "b-cool/restore-b-cool-20170913154901-logs.gz",
},
{
name: "restore log - backup has multiple dashes (e.g. restore of scheduled backup)",
targetKind: api.DownloadTargetKindRestoreLog,
targetName: "b-cool-20170913154901-20170913154902",
expectedKey: "b-cool-20170913154901/restore-b-cool-20170913154901-20170913154902-logs.gz",
},
{
name: "restore results - backup has no dash",
name: "restore results",
targetKind: api.DownloadTargetKindRestoreResults,
targetName: "b-20170913154901",
directory: "b",
expectedKey: "b/restore-b-20170913154901-results.gz",
},
{
name: "restore results - backup has 1 dash",
targetKind: api.DownloadTargetKindRestoreResults,
targetName: "b-cool-20170913154901",
expectedKey: "b-cool/restore-b-cool-20170913154901-results.gz",
},
{
name: "restore results - backup has multiple dashes (e.g. restore of scheduled backup)",
targetKind: api.DownloadTargetKindRestoreResults,
targetName: "b-cool-20170913154901-20170913154902",
directory: "b-cool-20170913154901",
expectedKey: "b-cool-20170913154901/restore-b-cool-20170913154901-20170913154902-results.gz",
},
}
@ -347,7 +337,7 @@ func TestCreateSignedURL(t *testing.T) {
Name: test.targetName,
}
objectStorage.On("CreateSignedURL", "bucket", test.expectedKey, time.Duration(0)).Return("url", nil)
url, err := backupService.CreateSignedURL(target, "bucket", 0)
url, err := backupService.CreateSignedURL(target, "bucket", test.directory, 0)
require.NoError(t, err)
assert.Equal(t, "url", url)
objectStorage.AssertExpectations(t)

View File

@ -577,6 +577,7 @@ func (s *server) runControllers(config *api.Config) error {
downloadRequestController := controller.NewDownloadRequestController(
s.arkClient.ArkV1(),
s.sharedInformerFactory.Ark().V1().DownloadRequests(),
s.sharedInformerFactory.Ark().V1().Restores(),
s.backupService,
config.BackupStorageProvider.Bucket,
s.logger,

View File

@ -47,6 +47,8 @@ type downloadRequestController struct {
downloadRequestClient arkv1client.DownloadRequestsGetter
downloadRequestLister listers.DownloadRequestLister
downloadRequestListerSynced cache.InformerSynced
restoreLister listers.RestoreLister
restoreListerSynced cache.InformerSynced
backupService cloudprovider.BackupService
bucket string
syncHandler func(key string) error
@ -59,6 +61,7 @@ type downloadRequestController struct {
func NewDownloadRequestController(
downloadRequestClient arkv1client.DownloadRequestsGetter,
downloadRequestInformer informers.DownloadRequestInformer,
restoreInformer informers.RestoreInformer,
backupService cloudprovider.BackupService,
bucket string,
logger logrus.FieldLogger,
@ -67,6 +70,8 @@ func NewDownloadRequestController(
downloadRequestClient: downloadRequestClient,
downloadRequestLister: downloadRequestInformer.Lister(),
downloadRequestListerSynced: downloadRequestInformer.Informer().HasSynced,
restoreLister: restoreInformer.Lister(),
restoreListerSynced: restoreInformer.Informer().HasSynced,
backupService: backupService,
bucket: bucket,
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "downloadrequest"),
@ -118,7 +123,7 @@ func (c *downloadRequestController) Run(ctx context.Context, numWorkers int) err
defer c.logger.Info("Shutting down DownloadRequestController")
c.logger.Info("Waiting for caches to sync")
if !cache.WaitForCacheSync(ctx.Done(), c.downloadRequestListerSynced) {
if !cache.WaitForCacheSync(ctx.Done(), c.downloadRequestListerSynced, c.restoreListerSynced) {
return errors.New("timed out waiting for caches to sync")
}
c.logger.Info("Caches are synced")
@ -214,8 +219,24 @@ const signedURLTTL = 10 * time.Minute
func (c *downloadRequestController) generatePreSignedURL(downloadRequest *v1.DownloadRequest) error {
update := downloadRequest.DeepCopy()
var err error
update.Status.DownloadURL, err = c.backupService.CreateSignedURL(downloadRequest.Spec.Target, c.bucket, signedURLTTL)
var (
directory string
err error
)
switch downloadRequest.Spec.Target.Kind {
case v1.DownloadTargetKindRestoreLog, v1.DownloadTargetKindRestoreResults:
restore, err := c.restoreLister.Restores(downloadRequest.Namespace).Get(downloadRequest.Spec.Target.Name)
if err != nil {
return errors.Wrap(err, "error getting Restore")
}
directory = restore.Spec.BackupName
default:
directory = downloadRequest.Spec.Target.Name
}
update.Status.DownloadURL, err = c.backupService.CreateSignedURL(downloadRequest.Spec.Target, c.bucket, directory, signedURLTTL)
if err != nil {
return err
}

View File

@ -41,7 +41,9 @@ func TestProcessDownloadRequest(t *testing.T) {
phase v1.DownloadRequestPhase
targetKind v1.DownloadTargetKind
targetName string
restore *v1.Restore
expectedError string
expectedDir string
expectedPhase v1.DownloadRequestPhase
expectedURL string
}{
@ -60,6 +62,7 @@ func TestProcessDownloadRequest(t *testing.T) {
phase: "",
targetKind: v1.DownloadTargetKindBackupLog,
targetName: "backup1",
expectedDir: "backup1",
expectedPhase: v1.DownloadRequestPhaseProcessed,
expectedURL: "signedURL",
},
@ -69,6 +72,7 @@ func TestProcessDownloadRequest(t *testing.T) {
phase: v1.DownloadRequestPhaseNew,
targetKind: v1.DownloadTargetKindBackupLog,
targetName: "backup1",
expectedDir: "backup1",
expectedPhase: v1.DownloadRequestPhaseProcessed,
expectedURL: "signedURL",
},
@ -78,6 +82,8 @@ func TestProcessDownloadRequest(t *testing.T) {
phase: "",
targetKind: v1.DownloadTargetKindRestoreLog,
targetName: "backup1-20170912150214",
restore: arktest.NewTestRestore(v1.DefaultNamespace, "backup1-20170912150214", v1.RestorePhaseCompleted).WithBackup("backup1").Restore,
expectedDir: "backup1",
expectedPhase: v1.DownloadRequestPhaseProcessed,
expectedURL: "signedURL",
},
@ -87,6 +93,8 @@ func TestProcessDownloadRequest(t *testing.T) {
phase: v1.DownloadRequestPhaseNew,
targetKind: v1.DownloadTargetKindRestoreLog,
targetName: "backup1-20170912150214",
restore: arktest.NewTestRestore(v1.DefaultNamespace, "backup1-20170912150214", v1.RestorePhaseCompleted).WithBackup("backup1").Restore,
expectedDir: "backup1",
expectedPhase: v1.DownloadRequestPhaseProcessed,
expectedURL: "signedURL",
},
@ -98,6 +106,7 @@ func TestProcessDownloadRequest(t *testing.T) {
client = fake.NewSimpleClientset()
sharedInformers = informers.NewSharedInformerFactory(client, 0)
downloadRequestsInformer = sharedInformers.Ark().V1().DownloadRequests()
restoresInformer = sharedInformers.Ark().V1().Restores()
backupService = &arktest.BackupService{}
logger = arktest.NewLogger()
)
@ -106,6 +115,7 @@ func TestProcessDownloadRequest(t *testing.T) {
c := NewDownloadRequestController(
client.ArkV1(),
downloadRequestsInformer,
restoresInformer,
backupService,
"bucket",
logger,
@ -130,7 +140,11 @@ func TestProcessDownloadRequest(t *testing.T) {
}
downloadRequestsInformer.Informer().GetStore().Add(downloadRequest)
backupService.On("CreateSignedURL", target, "bucket", 10*time.Minute).Return("signedURL", nil)
if tc.restore != nil {
restoresInformer.Informer().GetStore().Add(tc.restore)
}
backupService.On("CreateSignedURL", target, "bucket", tc.expectedDir, 10*time.Minute).Return("signedURL", nil)
}
// method under test

View File

@ -28,19 +28,19 @@ type BackupService struct {
}
// CreateSignedURL provides a mock function with given fields: target, bucket, ttl
func (_m *BackupService) CreateSignedURL(target v1.DownloadTarget, bucket string, ttl time.Duration) (string, error) {
ret := _m.Called(target, bucket, ttl)
func (_m *BackupService) CreateSignedURL(target v1.DownloadTarget, bucket, directory string, ttl time.Duration) (string, error) {
ret := _m.Called(target, bucket, directory, ttl)
var r0 string
if rf, ok := ret.Get(0).(func(v1.DownloadTarget, string, time.Duration) string); ok {
r0 = rf(target, bucket, ttl)
if rf, ok := ret.Get(0).(func(v1.DownloadTarget, string, string, time.Duration) string); ok {
r0 = rf(target, bucket, directory, ttl)
} else {
r0 = ret.Get(0).(string)
}
var r1 error
if rf, ok := ret.Get(1).(func(v1.DownloadTarget, string, time.Duration) error); ok {
r1 = rf(target, bucket, ttl)
if rf, ok := ret.Get(1).(func(v1.DownloadTarget, string, string, time.Duration) error); ok {
r1 = rf(target, bucket, directory, ttl)
} else {
r1 = ret.Error(1)
}