Add per-restore logs

Signed-off-by: Andy Goldstein <andy.goldstein@gmail.com>
pull/79/head
Andy Goldstein 2017-09-12 15:54:08 -04:00
parent d0b7880881
commit 273b563c86
13 changed files with 481 additions and 173 deletions

View File

@ -30,6 +30,7 @@ type DownloadTargetKind string
const (
DownloadTargetKindBackupLog DownloadTargetKind = "BackupLog"
DownloadTargetKindBackupContents DownloadTargetKind = "BackupContents"
DownloadTargetKindRestoreLog DownloadTargetKind = "RestoreLog"
)
// DownloadTarget is the specification for what kind of file to download, and the name of the

View File

@ -21,6 +21,7 @@ import (
"fmt"
"io"
"io/ioutil"
"strings"
"time"
"github.com/golang/glog"
@ -51,9 +52,12 @@ type BackupService interface {
// GetBackup gets the specified api.Backup from the given bucket in object storage.
GetBackup(bucket, name string) (*api.Backup, error)
// CreateBackupLogSignedURL creates a pre-signed URL that can be used to download a backup's log
// file from object storage. The URL expires after ttl.
CreateBackupSignedURL(backupType api.DownloadTargetKind, bucket, backupName string, ttl time.Duration) (string, error)
// CreateSignedURL creates a pre-signed URL that can be used to download a file from object
// storage. The URL expires after ttl.
CreateSignedURL(target api.DownloadTarget, bucket string, ttl time.Duration) (string, error)
// UploadRestoreLog uploads the restore's log file to object storage.
UploadRestoreLog(bucket, backup, restore string, log io.ReadSeeker) error
}
// BackupGetter knows how to list backups in object storage.
@ -63,9 +67,10 @@ type BackupGetter interface {
}
const (
metadataFileFormatString = "%s/ark-backup.json"
backupFileFormatString = "%s/%s-data.tar.gz"
logFileFormatString = "%s/%s-logs.gz"
metadataFileFormatString = "%s/ark-backup.json"
backupFileFormatString = "%s/%s-data.tar.gz"
backukpLogFileFormatString = "%s/%s-logs.gz"
restoreLogFileFormatString = "%s/restore-%s-logs.gz"
)
func getMetadataKey(backup string) string {
@ -77,7 +82,11 @@ func getBackupContentsKey(backup string) string {
}
func getBackupLogKey(backup string) string {
return fmt.Sprintf(logFileFormatString, backup, backup)
return fmt.Sprintf(backukpLogFileFormatString, backup, backup)
}
func getRestoreLogKey(backup, restore string) string {
return fmt.Sprintf(restoreLogFileFormatString, backup, restore)
}
type backupService struct {
@ -194,17 +203,26 @@ func (br *backupService) DeleteBackupDir(bucket, backupName string) error {
return errors.NewAggregate(errs)
}
func (br *backupService) CreateBackupSignedURL(backupType api.DownloadTargetKind, bucket, backupName string, ttl time.Duration) (string, error) {
switch backupType {
func (br *backupService) CreateSignedURL(target api.DownloadTarget, bucket string, ttl time.Duration) (string, error) {
switch target.Kind {
case api.DownloadTargetKindBackupContents:
return br.objectStorage.CreateSignedURL(bucket, getBackupContentsKey(backupName), ttl)
return br.objectStorage.CreateSignedURL(bucket, getBackupContentsKey(target.Name), ttl)
case api.DownloadTargetKindBackupLog:
return br.objectStorage.CreateSignedURL(bucket, getBackupLogKey(backupName), ttl)
return br.objectStorage.CreateSignedURL(bucket, getBackupLogKey(target.Name), ttl)
case api.DownloadTargetKindRestoreLog:
// restore name is formatted as <backup name>-<timestamp>
backup := strings.Split(target.Name, "-")[0]
return br.objectStorage.CreateSignedURL(bucket, getRestoreLogKey(backup, target.Name), ttl)
default:
return "", fmt.Errorf("unsupported download target kind %q", backupType)
return "", fmt.Errorf("unsupported download target kind %q", target.Kind)
}
}
func (br *backupService) UploadRestoreLog(bucket, backup, restore string, log io.ReadSeeker) error {
key := getRestoreLogKey(backup, restore)
return br.objectStorage.PutObject(bucket, key, log)
}
// cachedBackupService wraps a real backup service with a cache for getting cloud backups.
type cachedBackupService struct {
BackupService

View File

@ -0,0 +1,55 @@
/*
Copyright 2017 Heptio Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package restore
import (
"errors"
"os"
"time"
"github.com/spf13/cobra"
"github.com/heptio/ark/pkg/apis/ark/v1"
"github.com/heptio/ark/pkg/client"
"github.com/heptio/ark/pkg/cmd"
"github.com/heptio/ark/pkg/cmd/util/downloadrequest"
)
func NewLogsCommand(f client.Factory) *cobra.Command {
timeout := time.Minute
c := &cobra.Command{
Use: "logs RESTORE",
Short: "Get restore logs",
Run: func(c *cobra.Command, args []string) {
if len(args) != 1 {
err := errors.New("restore name is required")
cmd.CheckError(err)
}
arkClient, err := f.Client()
cmd.CheckError(err)
err = downloadrequest.Stream(arkClient.ArkV1(), args[0], v1.DownloadTargetKindRestoreLog, os.Stdout, timeout)
cmd.CheckError(err)
},
}
c.Flags().DurationVar(&timeout, "timeout", timeout, "how long to wait to receive logs")
return c
}

View File

@ -32,6 +32,7 @@ func NewCommand(f client.Factory) *cobra.Command {
c.AddCommand(
NewCreateCommand(f),
NewGetCommand(f),
NewLogsCommand(f),
// Will implement later
// NewDescribeCommand(f),
NewDeleteCommand(f),

View File

@ -492,7 +492,6 @@ func (s *server) runControllers(config *api.Config) error {
downloadRequestController := controller.NewDownloadRequestController(
s.arkClient.ArkV1(),
s.sharedInformerFactory.Ark().V1().DownloadRequests(),
s.sharedInformerFactory.Ark().V1().Backups(),
s.backupService,
config.BackupStorageProvider.Bucket,
)

View File

@ -46,8 +46,6 @@ type downloadRequestController struct {
downloadRequestClient arkv1client.DownloadRequestsGetter
downloadRequestLister listers.DownloadRequestLister
downloadRequestListerSynced cache.InformerSynced
backupLister listers.BackupLister
backupListerSynced cache.InformerSynced
backupService cloudprovider.BackupService
bucket string
@ -62,7 +60,6 @@ type downloadRequestController struct {
func NewDownloadRequestController(
downloadRequestClient arkv1client.DownloadRequestsGetter,
downloadRequestInformer informers.DownloadRequestInformer,
backupInformer informers.BackupInformer,
backupService cloudprovider.BackupService,
bucket string,
) Interface {
@ -70,13 +67,11 @@ func NewDownloadRequestController(
downloadRequestClient: downloadRequestClient,
downloadRequestLister: downloadRequestInformer.Lister(),
downloadRequestListerSynced: downloadRequestInformer.Informer().HasSynced,
backupLister: backupInformer.Lister(),
backupListerSynced: backupInformer.Informer().HasSynced,
backupService: backupService,
bucket: bucket,
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "backup"),
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "downloadrequest"),
clock: &clock.RealClock{},
}
@ -122,7 +117,7 @@ func (c *downloadRequestController) Run(ctx context.Context, numWorkers int) err
defer glog.Infof("Shutting down DownloadRequestController")
glog.Info("Waiting for caches to sync")
if !cache.WaitForCacheSync(ctx.Done(), c.downloadRequestListerSynced, c.backupListerSynced) {
if !cache.WaitForCacheSync(ctx.Done(), c.downloadRequestListerSynced) {
return errors.New("timed out waiting for caches to sync")
}
glog.Info("Caches are synced")
@ -215,25 +210,21 @@ const signedURLTTL = 10 * time.Minute
// generatePreSignedURL generates a pre-signed URL for downloadRequest, changes the phase to
// Processed, and persists the changes to storage.
func (c *downloadRequestController) generatePreSignedURL(downloadRequest *v1.DownloadRequest) error {
switch downloadRequest.Spec.Target.Kind {
case v1.DownloadTargetKindBackupLog, v1.DownloadTargetKindBackupContents:
update, err := cloneDownloadRequest(downloadRequest)
if err != nil {
return err
}
update.Status.DownloadURL, err = c.backupService.CreateBackupSignedURL(downloadRequest.Spec.Target.Kind, c.bucket, update.Spec.Target.Name, signedURLTTL)
if err != nil {
return err
}
update.Status.Phase = v1.DownloadRequestPhaseProcessed
update.Status.Expiration = metav1.NewTime(c.clock.Now().Add(signedURLTTL))
_, err = c.downloadRequestClient.DownloadRequests(update.Namespace).Update(update)
update, err := cloneDownloadRequest(downloadRequest)
if err != nil {
return err
}
return fmt.Errorf("unsupported download target kind %q", downloadRequest.Spec.Target.Kind)
update.Status.DownloadURL, err = c.backupService.CreateSignedURL(downloadRequest.Spec.Target, c.bucket, signedURLTTL)
if err != nil {
return err
}
update.Status.Phase = v1.DownloadRequestPhaseProcessed
update.Status.Expiration = metav1.NewTime(c.clock.Now().Add(signedURLTTL))
_, err = c.downloadRequestClient.DownloadRequests(update.Namespace).Update(update)
return err
}

View File

@ -0,0 +1,161 @@
/*
Copyright 2017 Heptio Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controller
import (
"testing"
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
core "k8s.io/client-go/testing"
"github.com/heptio/ark/pkg/apis/ark/v1"
"github.com/heptio/ark/pkg/generated/clientset/fake"
informers "github.com/heptio/ark/pkg/generated/informers/externalversions"
"github.com/heptio/ark/pkg/util/test"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestProcessDownloadRequest(t *testing.T) {
tests := []struct {
name string
key string
phase v1.DownloadRequestPhase
targetKind v1.DownloadTargetKind
targetName string
expectedError string
expectedPhase v1.DownloadRequestPhase
expectedURL string
}{
{
name: "empty key",
key: "",
},
{
name: "bad key format",
key: "a/b/c",
expectedError: `unexpected key format: "a/b/c"`,
},
{
name: "backup log request with phase '' gets a url",
key: "heptio-ark/dr1",
phase: "",
targetKind: v1.DownloadTargetKindBackupLog,
targetName: "backup1",
expectedPhase: v1.DownloadRequestPhaseProcessed,
expectedURL: "signedURL",
},
{
name: "backup log request with phase 'New' gets a url",
key: "heptio-ark/dr1",
phase: v1.DownloadRequestPhaseNew,
targetKind: v1.DownloadTargetKindBackupLog,
targetName: "backup1",
expectedPhase: v1.DownloadRequestPhaseProcessed,
expectedURL: "signedURL",
},
{
name: "restore log request with phase '' gets a url",
key: "heptio-ark/dr1",
phase: "",
targetKind: v1.DownloadTargetKindRestoreLog,
targetName: "backup1-20170912150214",
expectedPhase: v1.DownloadRequestPhaseProcessed,
expectedURL: "signedURL",
},
{
name: "restore log request with phase New gets a url",
key: "heptio-ark/dr1",
phase: v1.DownloadRequestPhaseNew,
targetKind: v1.DownloadTargetKindRestoreLog,
targetName: "backup1-20170912150214",
expectedPhase: v1.DownloadRequestPhaseProcessed,
expectedURL: "signedURL",
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
client := fake.NewSimpleClientset()
sharedInformers := informers.NewSharedInformerFactory(client, 0)
downloadRequestsInformer := sharedInformers.Ark().V1().DownloadRequests()
backupService := &test.BackupService{}
defer backupService.AssertExpectations(t)
c := NewDownloadRequestController(
client.ArkV1(),
downloadRequestsInformer,
backupService,
"bucket",
).(*downloadRequestController)
if tc.expectedPhase == v1.DownloadRequestPhaseProcessed {
target := v1.DownloadTarget{
Kind: tc.targetKind,
Name: tc.targetName,
}
downloadRequestsInformer.Informer().GetStore().Add(
&v1.DownloadRequest{
ObjectMeta: metav1.ObjectMeta{
Namespace: v1.DefaultNamespace,
Name: "dr1",
},
Spec: v1.DownloadRequestSpec{
Target: target,
},
},
)
backupService.On("CreateSignedURL", target, "bucket", 10*time.Minute).Return("signedURL", nil)
}
var updatedRequest *v1.DownloadRequest
client.PrependReactor("update", "downloadrequests", func(action core.Action) (bool, runtime.Object, error) {
obj := action.(core.UpdateAction).GetObject()
r, ok := obj.(*v1.DownloadRequest)
require.True(t, ok)
updatedRequest = r
return true, obj, nil
})
// method under test
err := c.processDownloadRequest(tc.key)
if tc.expectedError != "" {
assert.EqualError(t, err, tc.expectedError)
return
}
require.NoError(t, err)
var (
updatedPhase v1.DownloadRequestPhase
updatedURL string
)
if updatedRequest != nil {
updatedPhase = updatedRequest.Status.Phase
updatedURL = updatedRequest.Status.DownloadURL
}
assert.Equal(t, tc.expectedPhase, updatedPhase)
assert.Equal(t, tc.expectedURL, updatedURL)
})
}
}

View File

@ -333,28 +333,57 @@ func (controller *restoreController) runRestore(restore *api.Restore, bucket str
backup, err := controller.fetchBackup(bucket, restore.Spec.BackupName)
if err != nil {
glog.Errorf("error getting backup: %v", err)
errors.Cluster = append(errors.Ark, err.Error())
errors.Ark = append(errors.Ark, err.Error())
return
}
tmpFile, err := downloadToTempFile(restore.Spec.BackupName, controller.backupService, bucket)
if err != nil {
glog.Errorf("error downloading backup: %v", err)
errors.Cluster = append(errors.Ark, err.Error())
errors.Ark = append(errors.Ark, err.Error())
return
}
logFile, err := ioutil.TempFile("", "")
if err != nil {
glog.Errorf("error creating log temp file: %v", err)
errors.Ark = append(errors.Ark, err.Error())
return
}
defer func() {
if err := tmpFile.Close(); err != nil {
errors.Cluster = append(errors.Ark, err.Error())
glog.Errorf("error closing %q: %v", tmpFile.Name(), err)
}
if err := os.Remove(tmpFile.Name()); err != nil {
errors.Cluster = append(errors.Ark, err.Error())
glog.Errorf("error removing %q: %v", tmpFile.Name(), err)
}
if err := logFile.Close(); err != nil {
glog.Errorf("error closing %q: %v", logFile.Name(), err)
}
if err := os.Remove(logFile.Name()); err != nil {
glog.Errorf("error removing %q: %v", logFile.Name(), err)
}
}()
return controller.restorer.Restore(restore, backup, tmpFile)
warnings, errors = controller.restorer.Restore(restore, backup, tmpFile, logFile)
// Try to upload the log file. This is best-effort. If we fail, we'll add to the ark errors.
// Reset the offset to 0 for reading
if _, err = logFile.Seek(0, 0); err != nil {
errors.Ark = append(errors.Ark, fmt.Sprintf("error resetting log file offset to 0: %v", err))
return
}
if err := controller.backupService.UploadRestoreLog(bucket, restore.Spec.BackupName, restore.Name, logFile); err != nil {
errors.Ark = append(errors.Ark, fmt.Sprintf("error uploading log file to object storage: %v", err))
}
return
}
func downloadToTempFile(backupName string, backupService cloudprovider.BackupService, bucket string) (*os.File, error) {

View File

@ -19,7 +19,6 @@ package controller
import (
"bytes"
"errors"
"fmt"
"io"
"io/ioutil"
"testing"
@ -119,6 +118,7 @@ func TestProcessRestore(t *testing.T) {
expectedRestoreUpdates []*api.Restore
expectedRestorerCall *api.Restore
backupServiceGetBackupError error
uploadLogError error
}{
{
name: "invalid key returns error",
@ -187,7 +187,7 @@ func TestProcessRestore(t *testing.T) {
NewRestore("foo", "bar", "backup-1", "ns-1", "*", api.RestorePhaseInProgress).Restore,
NewRestore("foo", "bar", "backup-1", "ns-1", "*", api.RestorePhaseCompleted).
WithErrors(api.RestoreResult{
Cluster: []string{"no backup here"},
Ark: []string{"no backup here"},
}).
Restore,
},
@ -260,13 +260,15 @@ func TestProcessRestore(t *testing.T) {
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
fmt.Println(test.name)
var (
client = fake.NewSimpleClientset()
restorer = &fakeRestorer{}
sharedInformers = informers.NewSharedInformerFactory(client, 0)
backupSvc = &BackupService{}
)
client := fake.NewSimpleClientset()
restorer := &fakeRestorer{}
defer restorer.AssertExpectations(t)
sharedInformers := informers.NewSharedInformerFactory(client, 0)
backupSvc := &BackupService{}
defer backupSvc.AssertExpectations(t)
c := NewRestoreController(
sharedInformers.Ark().V1().Restores(),
@ -303,10 +305,14 @@ func TestProcessRestore(t *testing.T) {
if test.restorerError != nil {
errors.Namespaces = map[string][]string{"ns-1": {test.restorerError.Error()}}
}
if test.uploadLogError != nil {
errors.Ark = append(errors.Ark, "error uploading log file to object storage: "+test.uploadLogError.Error())
}
if test.expectedRestorerCall != nil {
downloadedBackup := ioutil.NopCloser(bytes.NewReader([]byte("hello world")))
backupSvc.On("DownloadBackup", mock.Anything, mock.Anything).Return(downloadedBackup, nil)
restorer.On("Restore", mock.Anything, mock.Anything, mock.Anything).Return(warnings, errors)
restorer.On("Restore", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(warnings, errors)
backupSvc.On("UploadRestoreLog", "bucket", test.restore.Spec.BackupName, test.restore.Name, mock.Anything).Return(test.uploadLogError)
}
var (
@ -379,8 +385,8 @@ type fakeRestorer struct {
calledWithArg api.Restore
}
func (r *fakeRestorer) Restore(restore *api.Restore, backup *api.Backup, backupReader io.Reader) (api.RestoreResult, api.RestoreResult) {
res := r.Called(restore, backup, backupReader)
func (r *fakeRestorer) Restore(restore *api.Restore, backup *api.Backup, backupReader io.Reader, logger io.Writer) (api.RestoreResult, api.RestoreResult) {
res := r.Called(restore, backup, backupReader, logger)
r.calledWithArg = *restore

View File

@ -26,8 +26,7 @@ import (
"path"
"path/filepath"
"sort"
"github.com/golang/glog"
"time"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -38,6 +37,7 @@ import (
corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/pkg/api/v1"
"github.com/golang/glog"
api "github.com/heptio/ark/pkg/apis/ark/v1"
"github.com/heptio/ark/pkg/client"
"github.com/heptio/ark/pkg/cloudprovider"
@ -51,7 +51,7 @@ import (
// Restorer knows how to restore a backup.
type Restorer interface {
// Restore restores the backup data from backupReader, returning warnings and errors.
Restore(restore *api.Restore, backup *api.Backup, backupReader io.Reader) (api.RestoreResult, api.RestoreResult)
Restore(restore *api.Restore, backup *api.Backup, backupReader io.Reader, logFile io.Writer) (api.RestoreResult, api.RestoreResult)
}
var _ Restorer = &kubernetesRestorer{}
@ -164,7 +164,7 @@ func NewKubernetesRestorer(
// Restore executes a restore into the target Kubernetes cluster according to the restore spec
// and using data from the provided backup/backup reader. Returns a warnings and errors RestoreResult,
// respectively, summarizing info about the restore.
func (kr *kubernetesRestorer) Restore(restore *api.Restore, backup *api.Backup, backupReader io.Reader) (api.RestoreResult, api.RestoreResult) {
func (kr *kubernetesRestorer) Restore(restore *api.Restore, backup *api.Backup, backupReader io.Reader, logFile io.Writer) (api.RestoreResult, api.RestoreResult) {
// metav1.LabelSelectorAsSelector converts a nil LabelSelector to a
// Nothing Selector, i.e. a selector that matches nothing. We want
// a selector that matches everything. This can be accomplished by
@ -198,42 +198,85 @@ func (kr *kubernetesRestorer) Restore(restore *api.Restore, backup *api.Backup,
return api.RestoreResult{}, api.RestoreResult{Ark: []string{err.Error()}}
}
dir, err := kr.unzipAndExtractBackup(backupReader)
gzippedLog := gzip.NewWriter(logFile)
defer gzippedLog.Close()
ctx := &context{
backup: backup,
backupReader: backupReader,
restore: restore,
prioritizedResources: prioritizedResources,
selector: selector,
logger: &logger{w: gzippedLog},
dynamicFactory: kr.dynamicFactory,
fileSystem: kr.fileSystem,
namespaceClient: kr.namespaceClient,
restorers: kr.restorers,
}
return ctx.execute()
}
type logger struct {
w io.Writer
}
func (l *logger) log(msg string, args ...interface{}) {
// TODO use a real logger that supports writing to files
now := time.Now().Format(time.RFC3339)
fmt.Fprintf(l.w, now+" "+msg+"\n", args...)
}
type context struct {
backup *api.Backup
backupReader io.Reader
restore *api.Restore
prioritizedResources []schema.GroupResource
selector labels.Selector
logger *logger
dynamicFactory client.DynamicFactory
fileSystem FileSystem
namespaceClient corev1.NamespaceInterface
restorers map[schema.GroupResource]restorers.ResourceRestorer
}
func (ctx *context) log(msg string, args ...interface{}) {
ctx.logger.log(msg, args...)
}
func (ctx *context) execute() (api.RestoreResult, api.RestoreResult) {
ctx.log("Starting restore of backup %s", kube.NamespaceAndName(ctx.backup))
dir, err := ctx.unzipAndExtractBackup(ctx.backupReader)
if err != nil {
glog.Errorf("error unzipping and extracting: %v", err)
ctx.log("error unzipping and extracting: %v", err)
return api.RestoreResult{}, api.RestoreResult{Ark: []string{err.Error()}}
}
defer kr.fileSystem.RemoveAll(dir)
defer ctx.fileSystem.RemoveAll(dir)
return kr.restoreFromDir(dir, restore, backup, prioritizedResources, selector)
return ctx.restoreFromDir(dir)
}
// restoreFromDir executes a restore based on backup data contained within a local
// directory.
func (kr *kubernetesRestorer) restoreFromDir(
dir string,
restore *api.Restore,
backup *api.Backup,
prioritizedResources []schema.GroupResource,
selector labels.Selector,
) (api.RestoreResult, api.RestoreResult) {
func (ctx *context) restoreFromDir(dir string) (api.RestoreResult, api.RestoreResult) {
warnings, errors := api.RestoreResult{}, api.RestoreResult{}
// cluster-scoped
clusterPath := path.Join(dir, api.ClusterScopedDir)
exists, err := kr.fileSystem.DirExists(clusterPath)
exists, err := ctx.fileSystem.DirExists(clusterPath)
if err != nil {
errors.Cluster = []string{err.Error()}
}
if exists {
w, e := kr.restoreNamespace(restore, "", clusterPath, prioritizedResources, selector, backup)
w, e := ctx.restoreNamespace("", clusterPath)
merge(&warnings, &w)
merge(&errors, &e)
}
// namespace-scoped
namespacesPath := path.Join(dir, api.NamespaceScopedDir)
exists, err = kr.fileSystem.DirExists(namespacesPath)
exists, err = ctx.fileSystem.DirExists(namespacesPath)
if err != nil {
addArkError(&errors, err)
return warnings, errors
@ -242,13 +285,13 @@ func (kr *kubernetesRestorer) restoreFromDir(
return warnings, errors
}
nses, err := kr.fileSystem.ReadDir(namespacesPath)
nses, err := ctx.fileSystem.ReadDir(namespacesPath)
if err != nil {
addArkError(&errors, err)
return warnings, errors
}
namespaceFilter := collections.NewIncludesExcludes().Includes(restore.Spec.IncludedNamespaces...).Excludes(restore.Spec.ExcludedNamespaces...)
namespaceFilter := collections.NewIncludesExcludes().Includes(ctx.restore.Spec.IncludedNamespaces...).Excludes(ctx.restore.Spec.ExcludedNamespaces...)
for _, ns := range nses {
if !ns.IsDir() {
continue
@ -256,11 +299,11 @@ func (kr *kubernetesRestorer) restoreFromDir(
nsPath := path.Join(namespacesPath, ns.Name())
if !namespaceFilter.ShouldInclude(ns.Name()) {
glog.Infof("Skipping namespace %s", ns.Name())
ctx.log("Skipping namespace %s", ns.Name())
continue
}
w, e := kr.restoreNamespace(restore, ns.Name(), nsPath, prioritizedResources, selector, backup)
w, e := ctx.restoreNamespace(ns.Name(), nsPath)
merge(&warnings, &w)
merge(&errors, &e)
}
@ -302,23 +345,16 @@ func addToResult(r *api.RestoreResult, ns string, e error) {
// restoreNamespace restores the resources from a specified namespace directory in the backup,
// or from the cluster-scoped directory if no namespace is specified.
func (kr *kubernetesRestorer) restoreNamespace(
restore *api.Restore,
nsName string,
nsPath string,
prioritizedResources []schema.GroupResource,
labelSelector labels.Selector,
backup *api.Backup,
) (api.RestoreResult, api.RestoreResult) {
func (ctx *context) restoreNamespace(nsName, nsPath string) (api.RestoreResult, api.RestoreResult) {
warnings, errors := api.RestoreResult{}, api.RestoreResult{}
if nsName == "" {
glog.Info("Restoring cluster-scoped resources")
ctx.log("Restoring cluster-scoped resources")
} else {
glog.Infof("Restoring namespace %s", nsName)
ctx.log("Restoring namespace %s", nsName)
}
resourceDirs, err := kr.fileSystem.ReadDir(nsPath)
resourceDirs, err := ctx.fileSystem.ReadDir(nsPath)
if err != nil {
addToResult(&errors, nsName, err)
return warnings, errors
@ -333,7 +369,7 @@ func (kr *kubernetesRestorer) restoreNamespace(
if nsName != "" {
// fetch mapped NS name
if target, ok := restore.Spec.NamespaceMapping[nsName]; ok {
if target, ok := ctx.restore.Spec.NamespaceMapping[nsName]; ok {
nsName = target
}
@ -344,13 +380,13 @@ func (kr *kubernetesRestorer) restoreNamespace(
},
}
if _, err := kube.EnsureNamespaceExists(ns, kr.namespaceClient); err != nil {
if _, err := kube.EnsureNamespaceExists(ns, ctx.namespaceClient); err != nil {
addArkError(&errors, err)
return warnings, errors
}
}
for _, resource := range prioritizedResources {
for _, resource := range ctx.prioritizedResources {
rscDir := resourceDirsMap[resource.String()]
if rscDir == nil {
continue
@ -358,7 +394,7 @@ func (kr *kubernetesRestorer) restoreNamespace(
resourcePath := path.Join(nsPath, rscDir.Name())
w, e := kr.restoreResourceForNamespace(nsName, resourcePath, labelSelector, restore, backup)
w, e := ctx.restoreResourceForNamespace(nsName, resourcePath)
merge(&warnings, &w)
merge(&errors, &e)
}
@ -368,19 +404,13 @@ func (kr *kubernetesRestorer) restoreNamespace(
// restoreResourceForNamespace restores the specified resource type for the specified
// namespace (or blank for cluster-scoped resources).
func (kr *kubernetesRestorer) restoreResourceForNamespace(
namespace string,
resourcePath string,
labelSelector labels.Selector,
restore *api.Restore,
backup *api.Backup,
) (api.RestoreResult, api.RestoreResult) {
func (ctx *context) restoreResourceForNamespace(namespace string, resourcePath string) (api.RestoreResult, api.RestoreResult) {
warnings, errors := api.RestoreResult{}, api.RestoreResult{}
resource := path.Base(resourcePath)
glog.Infof("Restoring resource %v into namespace %v\n", resource, namespace)
ctx.log("Restoring resource %v into namespace %v", resource, namespace)
files, err := kr.fileSystem.ReadDir(resourcePath)
files, err := ctx.fileSystem.ReadDir(resourcePath)
if err != nil {
addToResult(&errors, namespace, fmt.Errorf("error reading %q resource directory: %v", resource, err))
return warnings, errors
@ -398,20 +428,20 @@ func (kr *kubernetesRestorer) restoreResourceForNamespace(
for _, file := range files {
fullPath := filepath.Join(resourcePath, file.Name())
obj, err := kr.unmarshal(fullPath)
obj, err := ctx.unmarshal(fullPath)
if err != nil {
addToResult(&errors, namespace, fmt.Errorf("error decoding %q: %v", fullPath, err))
continue
}
if !labelSelector.Matches(labels.Set(obj.GetLabels())) {
if !ctx.selector.Matches(labels.Set(obj.GetLabels())) {
continue
}
if restorer == nil {
// initialize client & restorer for this Resource. we need
// metadata from an object to do this.
glog.Infof("Getting client for %v", obj.GroupVersionKind())
ctx.log("Getting client for %v", obj.GroupVersionKind())
resource := metav1.APIResource{
Namespaced: len(namespace) > 0,
@ -419,18 +449,18 @@ func (kr *kubernetesRestorer) restoreResourceForNamespace(
}
var err error
resourceClient, err = kr.dynamicFactory.ClientForGroupVersionKind(obj.GroupVersionKind(), resource, namespace)
resourceClient, err = ctx.dynamicFactory.ClientForGroupVersionKind(obj.GroupVersionKind(), resource, namespace)
if err != nil {
addArkError(&errors, fmt.Errorf("error getting resource client for namespace %q, resource %q: %v", namespace, groupResource, err))
return warnings, errors
}
restorer = kr.restorers[groupResource]
restorer = ctx.restorers[groupResource]
if restorer == nil {
glog.Infof("Using default restorer for %v", groupResource)
ctx.log("Using default restorer for %v", groupResource)
restorer = restorers.NewBasicRestorer(true)
} else {
glog.Infof("Using custom restorer for %v", groupResource)
ctx.log("Using custom restorer for %v", groupResource)
}
if restorer.Wait() {
@ -446,16 +476,16 @@ func (kr *kubernetesRestorer) restoreResourceForNamespace(
}
}
if !restorer.Handles(obj, restore) {
if !restorer.Handles(obj, ctx.restore) {
continue
}
if hasControllerOwner(obj.GetOwnerReferences()) {
glog.V(4).Infof("%s/%s has a controller owner - skipping", obj.GetNamespace(), obj.GetName())
ctx.log("%s/%s has a controller owner - skipping", obj.GetNamespace(), obj.GetName())
continue
}
preparedObj, warning, err := restorer.Prepare(obj, restore, backup)
preparedObj, warning, err := restorer.Prepare(obj, ctx.restore, ctx.backup)
if warning != nil {
addToResult(&warnings, namespace, fmt.Errorf("warning preparing %s: %v", fullPath, warning))
}
@ -474,16 +504,16 @@ func (kr *kubernetesRestorer) restoreResourceForNamespace(
unstructuredObj.SetNamespace(namespace)
// add an ark-restore label to each resource for easy ID
addLabel(unstructuredObj, api.RestoreLabelKey, restore.Name)
addLabel(unstructuredObj, api.RestoreLabelKey, ctx.restore.Name)
glog.Infof("Restoring %s: %v", obj.GroupVersionKind().Kind, unstructuredObj.GetName())
ctx.log("Restoring %s: %v", obj.GroupVersionKind().Kind, unstructuredObj.GetName())
_, err = resourceClient.Create(unstructuredObj)
if apierrors.IsAlreadyExists(err) {
addToResult(&warnings, namespace, err)
continue
}
if err != nil {
glog.Errorf("error restoring %s: %v", unstructuredObj.GetName(), err)
ctx.log("error restoring %s: %v", unstructuredObj.GetName(), err)
addToResult(&errors, namespace, fmt.Errorf("error restoring %s: %v", fullPath, err))
continue
}
@ -529,10 +559,10 @@ func hasControllerOwner(refs []metav1.OwnerReference) bool {
// unmarshal reads the specified file, unmarshals the JSON contained within it
// and returns an Unstructured object.
func (kr *kubernetesRestorer) unmarshal(filePath string) (*unstructured.Unstructured, error) {
func (ctx *context) unmarshal(filePath string) (*unstructured.Unstructured, error) {
var obj unstructured.Unstructured
bytes, err := kr.fileSystem.ReadFile(filePath)
bytes, err := ctx.fileSystem.ReadFile(filePath)
if err != nil {
return nil, err
}
@ -546,23 +576,23 @@ func (kr *kubernetesRestorer) unmarshal(filePath string) (*unstructured.Unstruct
}
// unzipAndExtractBackup extracts a reader on a gzipped tarball to a local temp directory
func (kr *kubernetesRestorer) unzipAndExtractBackup(src io.Reader) (string, error) {
func (ctx *context) unzipAndExtractBackup(src io.Reader) (string, error) {
gzr, err := gzip.NewReader(src)
if err != nil {
glog.Errorf("error creating gzip reader: %v", err)
ctx.log("error creating gzip reader: %v", err)
return "", err
}
defer gzr.Close()
return kr.readBackup(tar.NewReader(gzr))
return ctx.readBackup(tar.NewReader(gzr))
}
// readBackup extracts a tar reader to a local directory/file tree within a
// temp directory.
func (kr *kubernetesRestorer) readBackup(tarRdr *tar.Reader) (string, error) {
dir, err := kr.fileSystem.TempDir("", "")
func (ctx *context) readBackup(tarRdr *tar.Reader) (string, error) {
dir, err := ctx.fileSystem.TempDir("", "")
if err != nil {
glog.Errorf("error creating temp dir: %v", err)
ctx.log("error creating temp dir: %v", err)
return "", err
}
@ -570,11 +600,10 @@ func (kr *kubernetesRestorer) readBackup(tarRdr *tar.Reader) (string, error) {
header, err := tarRdr.Next()
if err == io.EOF {
glog.Infof("end of tar")
break
}
if err != nil {
glog.Errorf("error reading tar: %v", err)
ctx.log("error reading tar: %v", err)
return "", err
}
@ -582,29 +611,29 @@ func (kr *kubernetesRestorer) readBackup(tarRdr *tar.Reader) (string, error) {
switch header.Typeflag {
case tar.TypeDir:
err := kr.fileSystem.MkdirAll(target, header.FileInfo().Mode())
err := ctx.fileSystem.MkdirAll(target, header.FileInfo().Mode())
if err != nil {
glog.Errorf("mkdirall error: %v", err)
ctx.log("mkdirall error: %v", err)
return "", err
}
case tar.TypeReg:
// make sure we have the directory created
err := kr.fileSystem.MkdirAll(path.Dir(target), header.FileInfo().Mode())
err := ctx.fileSystem.MkdirAll(path.Dir(target), header.FileInfo().Mode())
if err != nil {
glog.Errorf("mkdirall error: %v", err)
ctx.log("mkdirall error: %v", err)
return "", err
}
// create the file
file, err := kr.fileSystem.Create(target)
file, err := ctx.fileSystem.Create(target)
if err != nil {
return "", err
}
defer file.Close()
if _, err := io.Copy(file, tarRdr); err != nil {
glog.Errorf("error copying: %v", err)
ctx.log("error copying: %v", err)
return "", err
}
}

View File

@ -19,6 +19,7 @@ package restore
import (
"encoding/json"
"io"
"io/ioutil"
"os"
"testing"
@ -170,18 +171,14 @@ func TestRestoreMethod(t *testing.T) {
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
restorer := &kubernetesRestorer{
discoveryHelper: nil,
dynamicFactory: nil,
restorers: nil,
backupService: nil,
backupClient: nil,
namespaceClient: &fakeNamespaceClient{},
resourcePriorities: nil,
fileSystem: test.fileSystem,
ctx := &context{
restore: test.restore,
namespaceClient: &fakeNamespaceClient{},
fileSystem: test.fileSystem,
logger: &logger{w: ioutil.Discard},
}
warnings, errors := restorer.restoreFromDir(test.baseDir, test.restore, nil, nil, nil)
warnings, errors := ctx.restoreFromDir(test.baseDir)
assert.Empty(t, warnings.Ark)
assert.Empty(t, warnings.Cluster)
@ -266,18 +263,15 @@ func TestRestoreNamespace(t *testing.T) {
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
restorer := &kubernetesRestorer{
discoveryHelper: nil,
dynamicFactory: nil,
restorers: nil,
backupService: nil,
backupClient: nil,
namespaceClient: &fakeNamespaceClient{},
resourcePriorities: nil,
fileSystem: test.fileSystem,
ctx := &context{
restore: test.restore,
namespaceClient: &fakeNamespaceClient{},
fileSystem: test.fileSystem,
prioritizedResources: test.prioritizedResources,
logger: &logger{w: ioutil.Discard},
}
warnings, errors := restorer.restoreNamespace(test.restore, test.namespace, test.path, test.prioritizedResources, nil, nil)
warnings, errors := ctx.restoreNamespace(test.namespace, test.path)
assert.Empty(t, warnings.Ark)
assert.Empty(t, warnings.Cluster)
@ -410,28 +404,22 @@ func TestRestoreResourceForNamespace(t *testing.T) {
gvk := schema.GroupVersionKind{Group: "", Version: "v1", Kind: "ConfigMap"}
dynamicFactory.On("ClientForGroupVersionKind", gvk, resource, test.namespace).Return(resourceClient, nil)
restorer := &kubernetesRestorer{
discoveryHelper: nil,
dynamicFactory: dynamicFactory,
restorers: test.restorers,
backupService: nil,
backupClient: nil,
namespaceClient: nil,
resourcePriorities: nil,
fileSystem: test.fileSystem,
}
var (
restore = &api.Restore{
ctx := &context{
dynamicFactory: dynamicFactory,
restorers: test.restorers,
fileSystem: test.fileSystem,
selector: test.labelSelector,
restore: &api.Restore{
ObjectMeta: metav1.ObjectMeta{
Namespace: api.DefaultNamespace,
Name: "my-restore",
},
}
backup = &api.Backup{}
)
},
backup: &api.Backup{},
logger: &logger{w: ioutil.Discard},
}
warnings, errors := restorer.restoreResourceForNamespace(test.namespace, test.resourcePath, test.labelSelector, restore, backup)
warnings, errors := ctx.restoreResourceForNamespace(test.namespace, test.resourcePath)
assert.Empty(t, warnings.Ark)
assert.Empty(t, warnings.Cluster)

View File

@ -27,20 +27,20 @@ type BackupService struct {
mock.Mock
}
// CreateBackupLogSignedURL provides a mock function with given fields: bucket, backupName, ttl
func (_m *BackupService) CreateBackupSignedURL(backupType v1.DownloadTargetKind, bucket string, backupName string, ttl time.Duration) (string, error) {
ret := _m.Called(bucket, backupName, ttl)
// CreateSignedURL provides a mock function with given fields: target, bucket, ttl
func (_m *BackupService) CreateSignedURL(target v1.DownloadTarget, bucket string, ttl time.Duration) (string, error) {
ret := _m.Called(target, bucket, ttl)
var r0 string
if rf, ok := ret.Get(0).(func(string, string, time.Duration) string); ok {
r0 = rf(bucket, backupName, ttl)
if rf, ok := ret.Get(0).(func(v1.DownloadTarget, string, time.Duration) string); ok {
r0 = rf(target, bucket, ttl)
} else {
r0 = ret.Get(0).(string)
}
var r1 error
if rf, ok := ret.Get(1).(func(string, string, time.Duration) error); ok {
r1 = rf(bucket, backupName, ttl)
if rf, ok := ret.Get(1).(func(v1.DownloadTarget, string, time.Duration) error); ok {
r1 = rf(target, bucket, ttl)
} else {
r1 = ret.Error(1)
}
@ -144,3 +144,17 @@ func (_m *BackupService) UploadBackup(bucket string, name string, metadata io.Re
return r0
}
// UploadRestoreLog provides a mock function with given fields: bucket, backup, restore, log
func (_m *BackupService) UploadRestoreLog(bucket string, backup string, restore string, log io.ReadSeeker) error {
ret := _m.Called(bucket, backup, restore, log)
var r0 error
if rf, ok := ret.Get(0).(func(string, string, string, io.ReadSeeker) error); ok {
r0 = rf(bucket, backup, restore, log)
} else {
r0 = ret.Error(0)
}
return r0
}

View File

@ -1,3 +1,19 @@
/*
Copyright 2017 Heptio Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package test
import (