commit
0045bb057d
|
@ -283,7 +283,7 @@
|
|||
|
||||
[[projects]]
|
||||
name = "github.com/sirupsen/logrus"
|
||||
packages = [".","hooks/test"]
|
||||
packages = ["."]
|
||||
revision = "f006c2ac4710855cf0f916dd6b77acf6b048dc6e"
|
||||
version = "v1.0.3"
|
||||
|
||||
|
@ -426,6 +426,6 @@
|
|||
[solve-meta]
|
||||
analyzer-name = "dep"
|
||||
analyzer-version = 1
|
||||
inputs-digest = "6287197115277ba882d5bb5dc20d74a8cb8e13d90c4e783c518a4e4aed55245f"
|
||||
inputs-digest = "c3cd1b703421685e5b2343ced6eaa6ec958b9c44d62277322f4c93de164c2d04"
|
||||
solver-name = "gps-cdcl"
|
||||
solver-version = 1
|
||||
|
|
|
@ -37,7 +37,7 @@ type itemHookHandler interface {
|
|||
// to specify a hook, that is executed. Otherwise, this looks at the backup context's Backup to
|
||||
// determine if there are any hooks relevant to the item, taking into account the hook spec's
|
||||
// namespaces, resources, and label selector.
|
||||
handleHooks(log *logrus.Entry, groupResource schema.GroupResource, obj runtime.Unstructured, resourceHooks []resourceHook) error
|
||||
handleHooks(log logrus.FieldLogger, groupResource schema.GroupResource, obj runtime.Unstructured, resourceHooks []resourceHook) error
|
||||
}
|
||||
|
||||
// defaultItemHookHandler is the default itemHookHandler.
|
||||
|
@ -46,7 +46,7 @@ type defaultItemHookHandler struct {
|
|||
}
|
||||
|
||||
func (h *defaultItemHookHandler) handleHooks(
|
||||
log *logrus.Entry,
|
||||
log logrus.FieldLogger,
|
||||
groupResource schema.GroupResource,
|
||||
obj runtime.Unstructured,
|
||||
resourceHooks []resourceHook,
|
||||
|
|
|
@ -38,7 +38,7 @@ type mockItemHookHandler struct {
|
|||
mock.Mock
|
||||
}
|
||||
|
||||
func (h *mockItemHookHandler) handleHooks(log *logrus.Entry, groupResource schema.GroupResource, obj runtime.Unstructured, resourceHooks []resourceHook) error {
|
||||
func (h *mockItemHookHandler) handleHooks(log logrus.FieldLogger, groupResource schema.GroupResource, obj runtime.Unstructured, resourceHooks []resourceHook) error {
|
||||
args := h.Called(log, groupResource, obj, resourceHooks)
|
||||
return args.Error(0)
|
||||
}
|
||||
|
|
|
@ -35,7 +35,7 @@ import (
|
|||
type podCommandExecutor interface {
|
||||
// executePodCommand executes a command in a container in a pod. If the command takes longer than
|
||||
// the specified timeout, an error is returned.
|
||||
executePodCommand(log *logrus.Entry, item map[string]interface{}, namespace, name, hookName string, hook *api.ExecHook) error
|
||||
executePodCommand(log logrus.FieldLogger, item map[string]interface{}, namespace, name, hookName string, hook *api.ExecHook) error
|
||||
}
|
||||
|
||||
type poster interface {
|
||||
|
@ -63,7 +63,7 @@ func NewPodCommandExecutor(restClientConfig *rest.Config, restClient poster) pod
|
|||
// command takes longer than the specified timeout, an error is returned (NOTE: it is not currently
|
||||
// possible to ensure the command is terminated when the timeout occurs, so it may continue to run
|
||||
// in the background).
|
||||
func (e *defaultPodCommandExecutor) executePodCommand(log *logrus.Entry, item map[string]interface{}, namespace, name, hookName string, hook *api.ExecHook) error {
|
||||
func (e *defaultPodCommandExecutor) executePodCommand(log logrus.FieldLogger, item map[string]interface{}, namespace, name, hookName string, hook *api.ExecHook) error {
|
||||
if item == nil {
|
||||
return errors.New("item is required")
|
||||
}
|
||||
|
|
|
@ -270,7 +270,7 @@ type mockPodCommandExecutor struct {
|
|||
mock.Mock
|
||||
}
|
||||
|
||||
func (e *mockPodCommandExecutor) executePodCommand(log *logrus.Entry, item map[string]interface{}, namespace, name, hookName string, hook *v1.ExecHook) error {
|
||||
func (e *mockPodCommandExecutor) executePodCommand(log logrus.FieldLogger, item map[string]interface{}, namespace, name, hookName string, hook *v1.ExecHook) error {
|
||||
args := e.Called(log, item, namespace, name, hookName, hook)
|
||||
return args.Error(0)
|
||||
}
|
||||
|
|
|
@ -41,13 +41,13 @@ type backupCache struct {
|
|||
// This doesn't really need to be a map right now, but if we ever move to supporting multiple
|
||||
// buckets, this will be ready for it.
|
||||
buckets map[string]*backupCacheBucket
|
||||
logger *logrus.Logger
|
||||
logger logrus.FieldLogger
|
||||
}
|
||||
|
||||
var _ BackupGetter = &backupCache{}
|
||||
|
||||
// NewBackupCache returns a new backup cache that refreshes from delegate every resyncPeriod.
|
||||
func NewBackupCache(ctx context.Context, delegate BackupGetter, resyncPeriod time.Duration, logger *logrus.Logger) BackupGetter {
|
||||
func NewBackupCache(ctx context.Context, delegate BackupGetter, resyncPeriod time.Duration, logger logrus.FieldLogger) BackupGetter {
|
||||
c := &backupCache{
|
||||
delegate: delegate,
|
||||
buckets: make(map[string]*backupCacheBucket),
|
||||
|
|
|
@ -22,7 +22,6 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
testlogger "github.com/sirupsen/logrus/hooks/test"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/heptio/ark/pkg/apis/ark/v1"
|
||||
|
@ -34,7 +33,7 @@ func TestNewBackupCache(t *testing.T) {
|
|||
var (
|
||||
delegate = &test.FakeBackupService{}
|
||||
ctx, cancel = context.WithTimeout(context.Background(), 5*time.Second)
|
||||
logger, _ = testlogger.NewNullLogger()
|
||||
logger = test.NewLogger()
|
||||
)
|
||||
defer cancel()
|
||||
|
||||
|
@ -104,8 +103,8 @@ func TestNewBackupCache(t *testing.T) {
|
|||
|
||||
func TestBackupCacheRefresh(t *testing.T) {
|
||||
var (
|
||||
delegate = &test.FakeBackupService{}
|
||||
logger, _ = testlogger.NewNullLogger()
|
||||
delegate = &test.FakeBackupService{}
|
||||
logger = test.NewLogger()
|
||||
)
|
||||
|
||||
c := &backupCache{
|
||||
|
@ -136,9 +135,9 @@ func TestBackupCacheRefresh(t *testing.T) {
|
|||
|
||||
func TestBackupCacheGetAllBackupsUsesCacheIfPresent(t *testing.T) {
|
||||
var (
|
||||
delegate = &test.FakeBackupService{}
|
||||
logger, _ = testlogger.NewNullLogger()
|
||||
bucket1 = []*v1.Backup{
|
||||
delegate = &test.FakeBackupService{}
|
||||
logger = test.NewLogger()
|
||||
bucket1 = []*v1.Backup{
|
||||
test.NewTestBackup().WithName("backup1").Backup,
|
||||
test.NewTestBackup().WithName("backup2").Backup,
|
||||
}
|
||||
|
|
|
@ -101,14 +101,14 @@ func getRestoreResultsKey(backup, restore string) string {
|
|||
type backupService struct {
|
||||
objectStore ObjectStore
|
||||
decoder runtime.Decoder
|
||||
logger *logrus.Logger
|
||||
logger logrus.FieldLogger
|
||||
}
|
||||
|
||||
var _ BackupService = &backupService{}
|
||||
var _ BackupGetter = &backupService{}
|
||||
|
||||
// NewBackupService creates a backup service using the provided object store
|
||||
func NewBackupService(objectStore ObjectStore, logger *logrus.Logger) BackupService {
|
||||
func NewBackupService(objectStore ObjectStore, logger logrus.FieldLogger) BackupService {
|
||||
return &backupService{
|
||||
objectStore: objectStore,
|
||||
decoder: scheme.Codecs.UniversalDecoder(api.SchemeGroupVersion),
|
||||
|
@ -268,7 +268,7 @@ func NewBackupServiceWithCachedBackupGetter(
|
|||
ctx context.Context,
|
||||
delegate BackupService,
|
||||
resyncPeriod time.Duration,
|
||||
logger *logrus.Logger,
|
||||
logger logrus.FieldLogger,
|
||||
) BackupService {
|
||||
return &cachedBackupService{
|
||||
BackupService: delegate,
|
||||
|
|
|
@ -27,7 +27,6 @@ import (
|
|||
"time"
|
||||
|
||||
testutil "github.com/heptio/ark/pkg/util/test"
|
||||
testlogger "github.com/sirupsen/logrus/hooks/test"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
|
@ -36,6 +35,7 @@ import (
|
|||
|
||||
api "github.com/heptio/ark/pkg/apis/ark/v1"
|
||||
"github.com/heptio/ark/pkg/util/encode"
|
||||
arktest "github.com/heptio/ark/pkg/util/test"
|
||||
)
|
||||
|
||||
func TestUploadBackup(t *testing.T) {
|
||||
|
@ -85,7 +85,7 @@ func TestUploadBackup(t *testing.T) {
|
|||
objStore = &testutil.ObjectStore{}
|
||||
bucket = "test-bucket"
|
||||
backupName = "test-backup"
|
||||
logger, _ = testlogger.NewNullLogger()
|
||||
logger = arktest.NewLogger()
|
||||
)
|
||||
|
||||
if test.metadata != nil {
|
||||
|
@ -118,10 +118,10 @@ func TestUploadBackup(t *testing.T) {
|
|||
|
||||
func TestDownloadBackup(t *testing.T) {
|
||||
var (
|
||||
o = &testutil.ObjectStore{}
|
||||
bucket = "b"
|
||||
backup = "bak"
|
||||
logger, _ = testlogger.NewNullLogger()
|
||||
o = &testutil.ObjectStore{}
|
||||
bucket = "b"
|
||||
backup = "bak"
|
||||
logger = arktest.NewLogger()
|
||||
)
|
||||
o.On("GetObject", bucket, backup+"/"+backup+".tar.gz").Return(ioutil.NopCloser(strings.NewReader("foo")), nil)
|
||||
|
||||
|
@ -155,11 +155,11 @@ func TestDeleteBackup(t *testing.T) {
|
|||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
var (
|
||||
bucket = "bucket"
|
||||
backup = "bak"
|
||||
objects = []string{"bak/ark-backup.json", "bak/bak.tar.gz", "bak/bak.log.gz"}
|
||||
objStore = &testutil.ObjectStore{}
|
||||
logger, _ = testlogger.NewNullLogger()
|
||||
bucket = "bucket"
|
||||
backup = "bak"
|
||||
objects = []string{"bak/ark-backup.json", "bak/bak.tar.gz", "bak/bak.log.gz"}
|
||||
objStore = &testutil.ObjectStore{}
|
||||
logger = arktest.NewLogger()
|
||||
)
|
||||
|
||||
objStore.On("ListObjects", bucket, backup+"/").Return(objects, test.listObjectsError)
|
||||
|
@ -229,9 +229,9 @@ func TestGetAllBackups(t *testing.T) {
|
|||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
var (
|
||||
bucket = "bucket"
|
||||
objStore = &testutil.ObjectStore{}
|
||||
logger, _ = testlogger.NewNullLogger()
|
||||
bucket = "bucket"
|
||||
objStore = &testutil.ObjectStore{}
|
||||
logger = arktest.NewLogger()
|
||||
)
|
||||
|
||||
objStore.On("ListCommonPrefixes", bucket, "/").Return([]string{"backup-1", "backup-2"}, nil)
|
||||
|
@ -328,7 +328,7 @@ func TestCreateSignedURL(t *testing.T) {
|
|||
t.Run(test.name, func(t *testing.T) {
|
||||
var (
|
||||
objectStorage = &testutil.ObjectStore{}
|
||||
logger, _ = testlogger.NewNullLogger()
|
||||
logger = arktest.NewLogger()
|
||||
backupService = NewBackupService(objectStorage, logger)
|
||||
)
|
||||
|
||||
|
|
|
@ -141,7 +141,7 @@ type server struct {
|
|||
sharedInformerFactory informers.SharedInformerFactory
|
||||
ctx context.Context
|
||||
cancelFunc context.CancelFunc
|
||||
logger *logrus.Logger
|
||||
logger logrus.FieldLogger
|
||||
pluginManager plugin.Manager
|
||||
}
|
||||
|
||||
|
@ -272,7 +272,7 @@ var defaultResourcePriorities = []string{
|
|||
"configmaps",
|
||||
}
|
||||
|
||||
func applyConfigDefaults(c *api.Config, logger *logrus.Logger) {
|
||||
func applyConfigDefaults(c *api.Config, logger logrus.FieldLogger) {
|
||||
if c.GCSyncPeriod.Duration == 0 {
|
||||
c.GCSyncPeriod.Duration = defaultGCSyncPeriod
|
||||
}
|
||||
|
@ -567,7 +567,7 @@ func newRestorer(
|
|||
resourcePriorities []string,
|
||||
backupClient arkv1client.BackupsGetter,
|
||||
kubeClient kubernetes.Interface,
|
||||
logger *logrus.Logger,
|
||||
logger logrus.FieldLogger,
|
||||
) (restore.Restorer, error) {
|
||||
return restore.NewKubernetesRestorer(
|
||||
discoveryHelper,
|
||||
|
|
|
@ -20,16 +20,16 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/sirupsen/logrus/hooks/test"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/heptio/ark/pkg/apis/ark/v1"
|
||||
arktest "github.com/heptio/ark/pkg/util/test"
|
||||
)
|
||||
|
||||
func TestApplyConfigDefaults(t *testing.T) {
|
||||
var (
|
||||
logger, _ = test.NewNullLogger()
|
||||
c = &v1.Config{}
|
||||
logger = arktest.NewLogger()
|
||||
c = &v1.Config{}
|
||||
)
|
||||
|
||||
// test defaulting
|
||||
|
|
|
@ -19,6 +19,7 @@ package controller
|
|||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
@ -29,8 +30,10 @@ import (
|
|||
"github.com/sirupsen/logrus"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
kuberrs "k8s.io/apimachinery/pkg/util/errors"
|
||||
"k8s.io/apimachinery/pkg/util/strategicpatch"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
|
@ -60,7 +63,7 @@ type backupController struct {
|
|||
syncHandler func(backupName string) error
|
||||
queue workqueue.RateLimitingInterface
|
||||
clock clock.Clock
|
||||
logger *logrus.Logger
|
||||
logger logrus.FieldLogger
|
||||
pluginManager plugin.Manager
|
||||
}
|
||||
|
||||
|
@ -71,7 +74,7 @@ func NewBackupController(
|
|||
backupService cloudprovider.BackupService,
|
||||
bucket string,
|
||||
pvProviderExists bool,
|
||||
logger *logrus.Logger,
|
||||
logger logrus.FieldLogger,
|
||||
pluginManager plugin.Manager,
|
||||
) Interface {
|
||||
c := &backupController{
|
||||
|
@ -223,6 +226,8 @@ func (controller *backupController) processBackup(key string) error {
|
|||
}
|
||||
|
||||
logContext.Debug("Cloning backup")
|
||||
// store ref to original for creating patch
|
||||
original := backup
|
||||
// don't modify items in the cache
|
||||
backup = backup.DeepCopy()
|
||||
|
||||
|
@ -242,11 +247,13 @@ func (controller *backupController) processBackup(key string) error {
|
|||
}
|
||||
|
||||
// update status
|
||||
updatedBackup, err := controller.client.Backups(ns).Update(backup)
|
||||
updatedBackup, err := patchBackup(original, backup, controller.client)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error updating Backup status to %s", backup.Status.Phase)
|
||||
}
|
||||
backup = updatedBackup
|
||||
// store ref to just-updated item for creating patch
|
||||
original = updatedBackup
|
||||
backup = updatedBackup.DeepCopy()
|
||||
|
||||
if backup.Status.Phase == api.BackupPhaseFailedValidation {
|
||||
return nil
|
||||
|
@ -260,13 +267,37 @@ func (controller *backupController) processBackup(key string) error {
|
|||
}
|
||||
|
||||
logContext.Debug("Updating backup's final status")
|
||||
if _, err = controller.client.Backups(ns).Update(backup); err != nil {
|
||||
if _, err := patchBackup(original, backup, controller.client); err != nil {
|
||||
logContext.WithError(err).Error("error updating backup's final status")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func patchBackup(original, updated *api.Backup, client arkv1client.BackupsGetter) (*api.Backup, error) {
|
||||
origBytes, err := json.Marshal(original)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error marshalling original backup")
|
||||
}
|
||||
|
||||
updatedBytes, err := json.Marshal(updated)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error marshalling updated backup")
|
||||
}
|
||||
|
||||
patchBytes, err := strategicpatch.CreateTwoWayMergePatch(origBytes, updatedBytes, api.Backup{})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error creating two-way merge patch for backup")
|
||||
}
|
||||
|
||||
res, err := client.Backups(api.DefaultNamespace).Patch(original.Name, types.MergePatchType, patchBytes)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error patching backup")
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (controller *backupController) getValidationErrors(itm *api.Backup) []string {
|
||||
var validationErrors []string
|
||||
|
||||
|
|
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||
package controller
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io"
|
||||
"testing"
|
||||
"time"
|
||||
|
@ -25,7 +26,6 @@ import (
|
|||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
core "k8s.io/client-go/testing"
|
||||
|
||||
testlogger "github.com/sirupsen/logrus/hooks/test"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
@ -34,10 +34,10 @@ import (
|
|||
"github.com/heptio/ark/pkg/backup"
|
||||
"github.com/heptio/ark/pkg/cloudprovider"
|
||||
"github.com/heptio/ark/pkg/generated/clientset/versioned/fake"
|
||||
"github.com/heptio/ark/pkg/generated/clientset/versioned/scheme"
|
||||
informers "github.com/heptio/ark/pkg/generated/informers/externalversions"
|
||||
"github.com/heptio/ark/pkg/restore"
|
||||
. "github.com/heptio/ark/pkg/util/test"
|
||||
"github.com/heptio/ark/pkg/util/collections"
|
||||
arktest "github.com/heptio/ark/pkg/util/test"
|
||||
)
|
||||
|
||||
type fakeBackupper struct {
|
||||
|
@ -56,7 +56,7 @@ func TestProcessBackup(t *testing.T) {
|
|||
expectError bool
|
||||
expectedIncludes []string
|
||||
expectedExcludes []string
|
||||
backup *TestBackup
|
||||
backup *arktest.TestBackup
|
||||
expectBackup bool
|
||||
allowSnapshots bool
|
||||
}{
|
||||
|
@ -73,49 +73,49 @@ func TestProcessBackup(t *testing.T) {
|
|||
{
|
||||
name: "do not process phase FailedValidation",
|
||||
key: "heptio-ark/backup1",
|
||||
backup: NewTestBackup().WithName("backup1").WithPhase(v1.BackupPhaseFailedValidation),
|
||||
backup: arktest.NewTestBackup().WithName("backup1").WithPhase(v1.BackupPhaseFailedValidation),
|
||||
expectBackup: false,
|
||||
},
|
||||
{
|
||||
name: "do not process phase InProgress",
|
||||
key: "heptio-ark/backup1",
|
||||
backup: NewTestBackup().WithName("backup1").WithPhase(v1.BackupPhaseInProgress),
|
||||
backup: arktest.NewTestBackup().WithName("backup1").WithPhase(v1.BackupPhaseInProgress),
|
||||
expectBackup: false,
|
||||
},
|
||||
{
|
||||
name: "do not process phase Completed",
|
||||
key: "heptio-ark/backup1",
|
||||
backup: NewTestBackup().WithName("backup1").WithPhase(v1.BackupPhaseCompleted),
|
||||
backup: arktest.NewTestBackup().WithName("backup1").WithPhase(v1.BackupPhaseCompleted),
|
||||
expectBackup: false,
|
||||
},
|
||||
{
|
||||
name: "do not process phase Failed",
|
||||
key: "heptio-ark/backup1",
|
||||
backup: NewTestBackup().WithName("backup1").WithPhase(v1.BackupPhaseFailed),
|
||||
backup: arktest.NewTestBackup().WithName("backup1").WithPhase(v1.BackupPhaseFailed),
|
||||
expectBackup: false,
|
||||
},
|
||||
{
|
||||
name: "do not process phase other",
|
||||
key: "heptio-ark/backup1",
|
||||
backup: NewTestBackup().WithName("backup1").WithPhase("arg"),
|
||||
backup: arktest.NewTestBackup().WithName("backup1").WithPhase("arg"),
|
||||
expectBackup: false,
|
||||
},
|
||||
{
|
||||
name: "invalid included/excluded resources fails validation",
|
||||
key: "heptio-ark/backup1",
|
||||
backup: NewTestBackup().WithName("backup1").WithPhase(v1.BackupPhaseNew).WithIncludedResources("foo").WithExcludedResources("foo"),
|
||||
backup: arktest.NewTestBackup().WithName("backup1").WithPhase(v1.BackupPhaseNew).WithIncludedResources("foo").WithExcludedResources("foo"),
|
||||
expectBackup: false,
|
||||
},
|
||||
{
|
||||
name: "invalid included/excluded namespaces fails validation",
|
||||
key: "heptio-ark/backup1",
|
||||
backup: NewTestBackup().WithName("backup1").WithPhase(v1.BackupPhaseNew).WithIncludedNamespaces("foo").WithExcludedNamespaces("foo"),
|
||||
backup: arktest.NewTestBackup().WithName("backup1").WithPhase(v1.BackupPhaseNew).WithIncludedNamespaces("foo").WithExcludedNamespaces("foo"),
|
||||
expectBackup: false,
|
||||
},
|
||||
{
|
||||
name: "make sure specified included and excluded resources are honored",
|
||||
key: "heptio-ark/backup1",
|
||||
backup: NewTestBackup().WithName("backup1").WithPhase(v1.BackupPhaseNew).WithIncludedResources("i", "j").WithExcludedResources("k", "l"),
|
||||
backup: arktest.NewTestBackup().WithName("backup1").WithPhase(v1.BackupPhaseNew).WithIncludedResources("i", "j").WithExcludedResources("k", "l"),
|
||||
expectedIncludes: []string{"i", "j"},
|
||||
expectedExcludes: []string{"k", "l"},
|
||||
expectBackup: true,
|
||||
|
@ -123,25 +123,25 @@ func TestProcessBackup(t *testing.T) {
|
|||
{
|
||||
name: "if includednamespaces are specified, don't default to *",
|
||||
key: "heptio-ark/backup1",
|
||||
backup: NewTestBackup().WithName("backup1").WithPhase(v1.BackupPhaseNew).WithIncludedNamespaces("ns-1"),
|
||||
backup: arktest.NewTestBackup().WithName("backup1").WithPhase(v1.BackupPhaseNew).WithIncludedNamespaces("ns-1"),
|
||||
expectBackup: true,
|
||||
},
|
||||
{
|
||||
name: "ttl",
|
||||
key: "heptio-ark/backup1",
|
||||
backup: NewTestBackup().WithName("backup1").WithPhase(v1.BackupPhaseNew).WithTTL(10 * time.Minute),
|
||||
backup: arktest.NewTestBackup().WithName("backup1").WithPhase(v1.BackupPhaseNew).WithTTL(10 * time.Minute),
|
||||
expectBackup: true,
|
||||
},
|
||||
{
|
||||
name: "backup with SnapshotVolumes when allowSnapshots=false fails validation",
|
||||
key: "heptio-ark/backup1",
|
||||
backup: NewTestBackup().WithName("backup1").WithPhase(v1.BackupPhaseNew).WithSnapshotVolumes(true),
|
||||
backup: arktest.NewTestBackup().WithName("backup1").WithPhase(v1.BackupPhaseNew).WithSnapshotVolumes(true),
|
||||
expectBackup: false,
|
||||
},
|
||||
{
|
||||
name: "backup with SnapshotVolumes when allowSnapshots=true gets executed",
|
||||
key: "heptio-ark/backup1",
|
||||
backup: NewTestBackup().WithName("backup1").WithPhase(v1.BackupPhaseNew).WithSnapshotVolumes(true),
|
||||
backup: arktest.NewTestBackup().WithName("backup1").WithPhase(v1.BackupPhaseNew).WithSnapshotVolumes(true),
|
||||
allowSnapshots: true,
|
||||
expectBackup: true,
|
||||
},
|
||||
|
@ -152,9 +152,9 @@ func TestProcessBackup(t *testing.T) {
|
|||
var (
|
||||
client = fake.NewSimpleClientset()
|
||||
backupper = &fakeBackupper{}
|
||||
cloudBackups = &BackupService{}
|
||||
cloudBackups = &arktest.BackupService{}
|
||||
sharedInformers = informers.NewSharedInformerFactory(client, 0)
|
||||
logger, _ = testlogger.NewNullLogger()
|
||||
logger = arktest.NewLogger()
|
||||
pluginManager = &MockManager{}
|
||||
)
|
||||
|
||||
|
@ -182,9 +182,7 @@ func TestProcessBackup(t *testing.T) {
|
|||
}
|
||||
|
||||
// set up a Backup object to represent what we expect to be passed to backupper.Backup()
|
||||
copy, err := scheme.Scheme.Copy(test.backup.Backup)
|
||||
assert.NoError(t, err, "copy error")
|
||||
backup := copy.(*v1.Backup)
|
||||
backup := test.backup.DeepCopy()
|
||||
backup.Spec.IncludedResources = test.expectedIncludes
|
||||
backup.Spec.ExcludedResources = test.expectedExcludes
|
||||
backup.Spec.IncludedNamespaces = test.backup.Spec.IncludedNamespaces
|
||||
|
@ -200,16 +198,35 @@ func TestProcessBackup(t *testing.T) {
|
|||
pluginManager.On("CloseBackupItemActions", backup.Name).Return(nil)
|
||||
}
|
||||
|
||||
// this is necessary so the Update() call returns the appropriate object
|
||||
client.PrependReactor("update", "backups", func(action core.Action) (bool, runtime.Object, error) {
|
||||
obj := action.(core.UpdateAction).GetObject()
|
||||
// need to deep copy so we can test the backup state for each call to update
|
||||
copy, err := scheme.Scheme.DeepCopy(obj)
|
||||
if err != nil {
|
||||
// this is necessary so the Patch() call returns the appropriate object
|
||||
client.PrependReactor("patch", "backups", func(action core.Action) (bool, runtime.Object, error) {
|
||||
if test.backup == nil {
|
||||
return true, nil, nil
|
||||
}
|
||||
|
||||
patch := action.(core.PatchAction).GetPatch()
|
||||
patchMap := make(map[string]interface{})
|
||||
|
||||
if err := json.Unmarshal(patch, &patchMap); err != nil {
|
||||
t.Logf("error unmarshalling patch: %s\n", err)
|
||||
return false, nil, err
|
||||
}
|
||||
ret := copy.(runtime.Object)
|
||||
return true, ret, nil
|
||||
|
||||
phase, err := collections.GetString(patchMap, "status.phase")
|
||||
if err != nil {
|
||||
t.Logf("error getting status.phase: %s\n", err)
|
||||
return false, nil, err
|
||||
}
|
||||
|
||||
res := test.backup.DeepCopy()
|
||||
|
||||
// these are the fields that we expect to be set by
|
||||
// the controller
|
||||
res.Status.Version = 1
|
||||
res.Status.Expiration.Time = expiration
|
||||
res.Status.Phase = v1.BackupPhase(phase)
|
||||
|
||||
return true, res, nil
|
||||
})
|
||||
|
||||
// method under test
|
||||
|
@ -227,41 +244,41 @@ func TestProcessBackup(t *testing.T) {
|
|||
return
|
||||
}
|
||||
|
||||
expectedActions := []core.Action{
|
||||
core.NewUpdateAction(
|
||||
v1.SchemeGroupVersion.WithResource("backups"),
|
||||
v1.DefaultNamespace,
|
||||
NewTestBackup().
|
||||
WithName(test.backup.Name).
|
||||
WithPhase(v1.BackupPhaseInProgress).
|
||||
WithIncludedResources(test.expectedIncludes...).
|
||||
WithExcludedResources(test.expectedExcludes...).
|
||||
WithIncludedNamespaces(test.backup.Spec.IncludedNamespaces...).
|
||||
WithTTL(test.backup.Spec.TTL.Duration).
|
||||
WithSnapshotVolumesPointer(test.backup.Spec.SnapshotVolumes).
|
||||
WithExpiration(expiration).
|
||||
WithVersion(1).
|
||||
Backup,
|
||||
),
|
||||
actions := client.Actions()
|
||||
require.Equal(t, 2, len(actions))
|
||||
|
||||
core.NewUpdateAction(
|
||||
v1.SchemeGroupVersion.WithResource("backups"),
|
||||
v1.DefaultNamespace,
|
||||
NewTestBackup().
|
||||
WithName(test.backup.Name).
|
||||
WithPhase(v1.BackupPhaseCompleted).
|
||||
WithIncludedResources(test.expectedIncludes...).
|
||||
WithExcludedResources(test.expectedExcludes...).
|
||||
WithIncludedNamespaces(test.backup.Spec.IncludedNamespaces...).
|
||||
WithTTL(test.backup.Spec.TTL.Duration).
|
||||
WithSnapshotVolumesPointer(test.backup.Spec.SnapshotVolumes).
|
||||
WithExpiration(expiration).
|
||||
WithVersion(1).
|
||||
Backup,
|
||||
),
|
||||
// validate Patch call 1 (setting version, expiration, and phase)
|
||||
patchAction, ok := actions[0].(core.PatchAction)
|
||||
require.True(t, ok, "action is not a PatchAction")
|
||||
|
||||
patch := make(map[string]interface{})
|
||||
require.NoError(t, json.Unmarshal(patchAction.GetPatch(), &patch), "cannot unmarshal patch")
|
||||
|
||||
assert.Equal(t, 1, len(patch), "patch has wrong number of keys")
|
||||
|
||||
expectedStatusKeys := 2
|
||||
if test.backup.Spec.TTL.Duration > 0 {
|
||||
assert.True(t, collections.HasKeyAndVal(patch, "status.expiration", expiration.UTC().Format(time.RFC3339)), "patch's status.expiration does not match")
|
||||
expectedStatusKeys = 3
|
||||
}
|
||||
|
||||
assert.Equal(t, expectedActions, client.Actions())
|
||||
assert.True(t, collections.HasKeyAndVal(patch, "status.version", float64(1)))
|
||||
assert.True(t, collections.HasKeyAndVal(patch, "status.phase", string(v1.BackupPhaseInProgress)), "patch's status.phase does not match")
|
||||
|
||||
res, _ := collections.GetMap(patch, "status")
|
||||
assert.Equal(t, expectedStatusKeys, len(res), "patch's status has the wrong number of keys")
|
||||
|
||||
// validate Patch call 2 (setting phase)
|
||||
patchAction, ok = actions[1].(core.PatchAction)
|
||||
require.True(t, ok, "action is not a PatchAction")
|
||||
|
||||
require.NoError(t, json.Unmarshal(patchAction.GetPatch(), &patch), "cannot unmarshal patch")
|
||||
|
||||
assert.Equal(t, 1, len(patch), "patch has wrong number of keys")
|
||||
|
||||
res, _ = collections.GetMap(patch, "status")
|
||||
assert.Equal(t, 1, len(res), "patch's status has the wrong number of keys")
|
||||
assert.True(t, collections.HasKeyAndVal(patch, "status.phase", string(v1.BackupPhaseCompleted)), "patch's status.phase does not match")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -36,7 +36,7 @@ type backupSyncController struct {
|
|||
backupService cloudprovider.BackupService
|
||||
bucket string
|
||||
syncPeriod time.Duration
|
||||
logger *logrus.Logger
|
||||
logger logrus.FieldLogger
|
||||
}
|
||||
|
||||
func NewBackupSyncController(
|
||||
|
@ -44,7 +44,7 @@ func NewBackupSyncController(
|
|||
backupService cloudprovider.BackupService,
|
||||
bucket string,
|
||||
syncPeriod time.Duration,
|
||||
logger *logrus.Logger,
|
||||
logger logrus.FieldLogger,
|
||||
) Interface {
|
||||
if syncPeriod < time.Minute {
|
||||
logger.Infof("Provided backup sync period %v is too short. Setting to 1 minute", syncPeriod)
|
||||
|
|
|
@ -21,14 +21,13 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
testlogger "github.com/sirupsen/logrus/hooks/test"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
core "k8s.io/client-go/testing"
|
||||
|
||||
"github.com/heptio/ark/pkg/apis/ark/v1"
|
||||
"github.com/heptio/ark/pkg/generated/clientset/versioned/fake"
|
||||
. "github.com/heptio/ark/pkg/util/test"
|
||||
arktest "github.com/heptio/ark/pkg/util/test"
|
||||
)
|
||||
|
||||
func TestBackupSyncControllerRun(t *testing.T) {
|
||||
|
@ -47,9 +46,9 @@ func TestBackupSyncControllerRun(t *testing.T) {
|
|||
{
|
||||
name: "normal case",
|
||||
cloudBackups: []*v1.Backup{
|
||||
NewTestBackup().WithNamespace("ns-1").WithName("backup-1").Backup,
|
||||
NewTestBackup().WithNamespace("ns-1").WithName("backup-2").Backup,
|
||||
NewTestBackup().WithNamespace("ns-2").WithName("backup-3").Backup,
|
||||
arktest.NewTestBackup().WithNamespace("ns-1").WithName("backup-1").Backup,
|
||||
arktest.NewTestBackup().WithNamespace("ns-1").WithName("backup-2").Backup,
|
||||
arktest.NewTestBackup().WithNamespace("ns-2").WithName("backup-3").Backup,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
@ -57,9 +56,9 @@ func TestBackupSyncControllerRun(t *testing.T) {
|
|||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
var (
|
||||
bs = &BackupService{}
|
||||
client = fake.NewSimpleClientset()
|
||||
logger, _ = testlogger.NewNullLogger()
|
||||
bs = &arktest.BackupService{}
|
||||
client = fake.NewSimpleClientset()
|
||||
logger = arktest.NewLogger()
|
||||
)
|
||||
|
||||
c := NewBackupSyncController(
|
||||
|
|
|
@ -18,6 +18,7 @@ package controller
|
|||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
|
@ -27,7 +28,9 @@ import (
|
|||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
"k8s.io/apimachinery/pkg/util/strategicpatch"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
|
@ -49,7 +52,7 @@ type downloadRequestController struct {
|
|||
syncHandler func(key string) error
|
||||
queue workqueue.RateLimitingInterface
|
||||
clock clock.Clock
|
||||
logger *logrus.Logger
|
||||
logger logrus.FieldLogger
|
||||
}
|
||||
|
||||
// NewDownloadRequestController creates a new DownloadRequestController.
|
||||
|
@ -58,7 +61,7 @@ func NewDownloadRequestController(
|
|||
downloadRequestInformer informers.DownloadRequestInformer,
|
||||
backupService cloudprovider.BackupService,
|
||||
bucket string,
|
||||
logger *logrus.Logger,
|
||||
logger logrus.FieldLogger,
|
||||
) Interface {
|
||||
c := &downloadRequestController{
|
||||
downloadRequestClient: downloadRequestClient,
|
||||
|
@ -220,7 +223,7 @@ func (c *downloadRequestController) generatePreSignedURL(downloadRequest *v1.Dow
|
|||
update.Status.Phase = v1.DownloadRequestPhaseProcessed
|
||||
update.Status.Expiration = metav1.NewTime(c.clock.Now().Add(signedURLTTL))
|
||||
|
||||
_, err = c.downloadRequestClient.DownloadRequests(update.Namespace).Update(update)
|
||||
_, err = patchDownloadRequest(downloadRequest, update, c.downloadRequestClient)
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
|
@ -256,3 +259,27 @@ func (c *downloadRequestController) resync() {
|
|||
c.queue.Add(key)
|
||||
}
|
||||
}
|
||||
|
||||
func patchDownloadRequest(original, updated *v1.DownloadRequest, client arkv1client.DownloadRequestsGetter) (*v1.DownloadRequest, error) {
|
||||
origBytes, err := json.Marshal(original)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error marshalling original download request")
|
||||
}
|
||||
|
||||
updatedBytes, err := json.Marshal(updated)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error marshalling updated download request")
|
||||
}
|
||||
|
||||
patchBytes, err := strategicpatch.CreateTwoWayMergePatch(origBytes, updatedBytes, v1.DownloadRequest{})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error creating two-way merge patch for download request")
|
||||
}
|
||||
|
||||
res, err := client.DownloadRequests(v1.DefaultNamespace).Patch(original.Name, types.MergePatchType, patchBytes)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error patching download request")
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
|
|
@ -17,21 +17,21 @@ limitations under the License.
|
|||
package controller
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
testlogger "github.com/sirupsen/logrus/hooks/test"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
core "k8s.io/client-go/testing"
|
||||
|
||||
"github.com/heptio/ark/pkg/apis/ark/v1"
|
||||
"github.com/heptio/ark/pkg/generated/clientset/versioned/fake"
|
||||
informers "github.com/heptio/ark/pkg/generated/informers/externalversions"
|
||||
"github.com/heptio/ark/pkg/util/test"
|
||||
"github.com/heptio/ark/pkg/util/collections"
|
||||
arktest "github.com/heptio/ark/pkg/util/test"
|
||||
)
|
||||
|
||||
func TestProcessDownloadRequest(t *testing.T) {
|
||||
|
@ -98,8 +98,8 @@ func TestProcessDownloadRequest(t *testing.T) {
|
|||
client = fake.NewSimpleClientset()
|
||||
sharedInformers = informers.NewSharedInformerFactory(client, 0)
|
||||
downloadRequestsInformer = sharedInformers.Ark().V1().DownloadRequests()
|
||||
backupService = &test.BackupService{}
|
||||
logger, _ = testlogger.NewNullLogger()
|
||||
backupService = &arktest.BackupService{}
|
||||
logger = arktest.NewLogger()
|
||||
)
|
||||
defer backupService.AssertExpectations(t)
|
||||
|
||||
|
@ -111,37 +111,28 @@ func TestProcessDownloadRequest(t *testing.T) {
|
|||
logger,
|
||||
).(*downloadRequestController)
|
||||
|
||||
var downloadRequest *v1.DownloadRequest
|
||||
|
||||
if tc.expectedPhase == v1.DownloadRequestPhaseProcessed {
|
||||
target := v1.DownloadTarget{
|
||||
Kind: tc.targetKind,
|
||||
Name: tc.targetName,
|
||||
}
|
||||
|
||||
downloadRequestsInformer.Informer().GetStore().Add(
|
||||
&v1.DownloadRequest{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: v1.DefaultNamespace,
|
||||
Name: "dr1",
|
||||
},
|
||||
Spec: v1.DownloadRequestSpec{
|
||||
Target: target,
|
||||
},
|
||||
downloadRequest = &v1.DownloadRequest{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: v1.DefaultNamespace,
|
||||
Name: "dr1",
|
||||
},
|
||||
)
|
||||
Spec: v1.DownloadRequestSpec{
|
||||
Target: target,
|
||||
},
|
||||
}
|
||||
downloadRequestsInformer.Informer().GetStore().Add(downloadRequest)
|
||||
|
||||
backupService.On("CreateSignedURL", target, "bucket", 10*time.Minute).Return("signedURL", nil)
|
||||
}
|
||||
|
||||
var updatedRequest *v1.DownloadRequest
|
||||
|
||||
client.PrependReactor("update", "downloadrequests", func(action core.Action) (bool, runtime.Object, error) {
|
||||
obj := action.(core.UpdateAction).GetObject()
|
||||
r, ok := obj.(*v1.DownloadRequest)
|
||||
require.True(t, ok)
|
||||
updatedRequest = r
|
||||
return true, obj, nil
|
||||
})
|
||||
|
||||
// method under test
|
||||
err := c.processDownloadRequest(tc.key)
|
||||
|
||||
|
@ -152,16 +143,37 @@ func TestProcessDownloadRequest(t *testing.T) {
|
|||
|
||||
require.NoError(t, err)
|
||||
|
||||
var (
|
||||
updatedPhase v1.DownloadRequestPhase
|
||||
updatedURL string
|
||||
)
|
||||
if updatedRequest != nil {
|
||||
updatedPhase = updatedRequest.Status.Phase
|
||||
updatedURL = updatedRequest.Status.DownloadURL
|
||||
actions := client.Actions()
|
||||
|
||||
// if we don't expect a phase update, this means
|
||||
// we don't expect any actions to take place
|
||||
if tc.expectedPhase == "" {
|
||||
require.Equal(t, 0, len(actions))
|
||||
return
|
||||
}
|
||||
assert.Equal(t, tc.expectedPhase, updatedPhase)
|
||||
assert.Equal(t, tc.expectedURL, updatedURL)
|
||||
|
||||
// otherwise, we should get exactly 1 patch
|
||||
require.Equal(t, 1, len(actions))
|
||||
patchAction, ok := actions[0].(core.PatchAction)
|
||||
require.True(t, ok, "action is not a PatchAction")
|
||||
|
||||
patch := make(map[string]interface{})
|
||||
require.NoError(t, json.Unmarshal(patchAction.GetPatch(), &patch), "cannot unmarshal patch")
|
||||
|
||||
// check the URL
|
||||
assert.True(t, collections.HasKeyAndVal(patch, "status.downloadURL", tc.expectedURL), "patch's status.downloadURL does not match")
|
||||
|
||||
// check the Phase
|
||||
assert.True(t, collections.HasKeyAndVal(patch, "status.phase", string(tc.expectedPhase)), "patch's status.phase does not match")
|
||||
|
||||
// check that Expiration exists
|
||||
// TODO pass a fake clock to the controller and verify
|
||||
// the expiration value
|
||||
assert.True(t, collections.Exists(patch, "status.expiration"), "patch's status.expiration does not exist")
|
||||
|
||||
// we expect 3 total updates.
|
||||
res, _ := collections.GetMap(patch, "status")
|
||||
assert.Equal(t, 3, len(res), "patch's status has the wrong number of keys")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -50,7 +50,7 @@ type gcController struct {
|
|||
restoreLister listers.RestoreLister
|
||||
restoreListerSynced cache.InformerSynced
|
||||
restoreClient arkv1client.RestoresGetter
|
||||
logger *logrus.Logger
|
||||
logger logrus.FieldLogger
|
||||
}
|
||||
|
||||
// NewGCController constructs a new gcController.
|
||||
|
@ -63,7 +63,7 @@ func NewGCController(
|
|||
backupClient arkv1client.BackupsGetter,
|
||||
restoreInformer informers.RestoreInformer,
|
||||
restoreClient arkv1client.RestoresGetter,
|
||||
logger *logrus.Logger,
|
||||
logger logrus.FieldLogger,
|
||||
) Interface {
|
||||
if syncPeriod < time.Minute {
|
||||
logger.WithField("syncPeriod", syncPeriod).Info("Provided GC sync period is too short. Setting to 1 minute")
|
||||
|
|
|
@ -20,7 +20,6 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
testlogger "github.com/sirupsen/logrus/hooks/test"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
|
@ -31,7 +30,7 @@ import (
|
|||
"github.com/heptio/ark/pkg/cloudprovider"
|
||||
"github.com/heptio/ark/pkg/generated/clientset/versioned/fake"
|
||||
informers "github.com/heptio/ark/pkg/generated/informers/externalversions"
|
||||
. "github.com/heptio/ark/pkg/util/test"
|
||||
arktest "github.com/heptio/ark/pkg/util/test"
|
||||
)
|
||||
|
||||
type gcTest struct {
|
||||
|
@ -51,7 +50,7 @@ func TestGarbageCollect(t *testing.T) {
|
|||
{
|
||||
name: "basic-expired",
|
||||
backups: []*api.Backup{
|
||||
NewTestBackup().WithName("backup-1").
|
||||
arktest.NewTestBackup().WithName("backup-1").
|
||||
WithExpiration(fakeClock.Now().Add(-1*time.Second)).
|
||||
WithSnapshot("pv-1", "snapshot-1").
|
||||
WithSnapshot("pv-2", "snapshot-2").
|
||||
|
@ -64,7 +63,7 @@ func TestGarbageCollect(t *testing.T) {
|
|||
{
|
||||
name: "basic-unexpired",
|
||||
backups: []*api.Backup{
|
||||
NewTestBackup().WithName("backup-1").
|
||||
arktest.NewTestBackup().WithName("backup-1").
|
||||
WithExpiration(fakeClock.Now().Add(1*time.Minute)).
|
||||
WithSnapshot("pv-1", "snapshot-1").
|
||||
WithSnapshot("pv-2", "snapshot-2").
|
||||
|
@ -77,12 +76,12 @@ func TestGarbageCollect(t *testing.T) {
|
|||
{
|
||||
name: "one expired, one unexpired",
|
||||
backups: []*api.Backup{
|
||||
NewTestBackup().WithName("backup-1").
|
||||
arktest.NewTestBackup().WithName("backup-1").
|
||||
WithExpiration(fakeClock.Now().Add(-1*time.Minute)).
|
||||
WithSnapshot("pv-1", "snapshot-1").
|
||||
WithSnapshot("pv-2", "snapshot-2").
|
||||
Backup,
|
||||
NewTestBackup().WithName("backup-2").
|
||||
arktest.NewTestBackup().WithName("backup-2").
|
||||
WithExpiration(fakeClock.Now().Add(1*time.Minute)).
|
||||
WithSnapshot("pv-3", "snapshot-3").
|
||||
WithSnapshot("pv-4", "snapshot-4").
|
||||
|
@ -95,7 +94,7 @@ func TestGarbageCollect(t *testing.T) {
|
|||
{
|
||||
name: "none expired in target bucket",
|
||||
backups: []*api.Backup{
|
||||
NewTestBackup().WithName("backup-2").
|
||||
arktest.NewTestBackup().WithName("backup-2").
|
||||
WithExpiration(fakeClock.Now().Add(1*time.Minute)).
|
||||
WithSnapshot("pv-3", "snapshot-3").
|
||||
WithSnapshot("pv-4", "snapshot-4").
|
||||
|
@ -108,7 +107,7 @@ func TestGarbageCollect(t *testing.T) {
|
|||
{
|
||||
name: "orphan snapshots",
|
||||
backups: []*api.Backup{
|
||||
NewTestBackup().WithName("backup-1").
|
||||
arktest.NewTestBackup().WithName("backup-1").
|
||||
WithExpiration(fakeClock.Now().Add(-1*time.Minute)).
|
||||
WithSnapshot("pv-1", "snapshot-1").
|
||||
WithSnapshot("pv-2", "snapshot-2").
|
||||
|
@ -121,12 +120,12 @@ func TestGarbageCollect(t *testing.T) {
|
|||
{
|
||||
name: "no snapshot service only GC's backups without snapshots",
|
||||
backups: []*api.Backup{
|
||||
NewTestBackup().WithName("backup-1").
|
||||
arktest.NewTestBackup().WithName("backup-1").
|
||||
WithExpiration(fakeClock.Now().Add(-1*time.Second)).
|
||||
WithSnapshot("pv-1", "snapshot-1").
|
||||
WithSnapshot("pv-2", "snapshot-2").
|
||||
Backup,
|
||||
NewTestBackup().WithName("backup-2").
|
||||
arktest.NewTestBackup().WithName("backup-2").
|
||||
WithExpiration(fakeClock.Now().Add(-1 * time.Second)).
|
||||
Backup,
|
||||
},
|
||||
|
@ -138,12 +137,12 @@ func TestGarbageCollect(t *testing.T) {
|
|||
|
||||
for _, test := range tests {
|
||||
var (
|
||||
backupService = &BackupService{}
|
||||
snapshotService *FakeSnapshotService
|
||||
backupService = &arktest.BackupService{}
|
||||
snapshotService *arktest.FakeSnapshotService
|
||||
)
|
||||
|
||||
if !test.nilSnapshotService {
|
||||
snapshotService = &FakeSnapshotService{SnapshotsTaken: test.snapshots}
|
||||
snapshotService = &arktest.FakeSnapshotService{SnapshotsTaken: test.snapshots}
|
||||
}
|
||||
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
|
@ -152,7 +151,7 @@ func TestGarbageCollect(t *testing.T) {
|
|||
sharedInformers = informers.NewSharedInformerFactory(client, 0)
|
||||
snapSvc cloudprovider.SnapshotService
|
||||
bucket = "bucket"
|
||||
logger, _ = testlogger.NewNullLogger()
|
||||
logger = arktest.NewLogger()
|
||||
)
|
||||
|
||||
if snapshotService != nil {
|
||||
|
@ -204,7 +203,7 @@ func TestGarbageCollectBackup(t *testing.T) {
|
|||
}{
|
||||
{
|
||||
name: "deleteBackupFile=false, snapshot deletion fails, don't delete kube backup",
|
||||
backup: NewTestBackup().WithName("backup-1").
|
||||
backup: arktest.NewTestBackup().WithName("backup-1").
|
||||
WithSnapshot("pv-1", "snapshot-1").
|
||||
WithSnapshot("pv-2", "snapshot-2").
|
||||
Backup,
|
||||
|
@ -215,12 +214,12 @@ func TestGarbageCollectBackup(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "related restores should be deleted",
|
||||
backup: NewTestBackup().WithName("backup-1").Backup,
|
||||
backup: arktest.NewTestBackup().WithName("backup-1").Backup,
|
||||
deleteBackupFile: true,
|
||||
snapshots: sets.NewString(),
|
||||
restores: []*api.Restore{
|
||||
NewTestRestore(api.DefaultNamespace, "restore-1", api.RestorePhaseCompleted).WithBackup("backup-1").Restore,
|
||||
NewTestRestore(api.DefaultNamespace, "restore-2", api.RestorePhaseCompleted).WithBackup("backup-2").Restore,
|
||||
arktest.NewTestRestore(api.DefaultNamespace, "restore-1", api.RestorePhaseCompleted).WithBackup("backup-1").Restore,
|
||||
arktest.NewTestRestore(api.DefaultNamespace, "restore-2", api.RestorePhaseCompleted).WithBackup("backup-2").Restore,
|
||||
},
|
||||
expectedRestoreDeletes: []string{"restore-1"},
|
||||
expectedBackupDelete: "backup-1",
|
||||
|
@ -232,12 +231,12 @@ func TestGarbageCollectBackup(t *testing.T) {
|
|||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
var (
|
||||
backupService = &BackupService{}
|
||||
snapshotService = &FakeSnapshotService{SnapshotsTaken: test.snapshots}
|
||||
backupService = &arktest.BackupService{}
|
||||
snapshotService = &arktest.FakeSnapshotService{SnapshotsTaken: test.snapshots}
|
||||
client = fake.NewSimpleClientset()
|
||||
sharedInformers = informers.NewSharedInformerFactory(client, 0)
|
||||
bucket = "bucket-1"
|
||||
logger, _ = testlogger.NewNullLogger()
|
||||
logger = arktest.NewLogger()
|
||||
controller = NewGCController(
|
||||
backupService,
|
||||
snapshotService,
|
||||
|
@ -298,8 +297,8 @@ func TestGarbageCollectBackup(t *testing.T) {
|
|||
|
||||
func TestGarbageCollectPicksUpBackupUponExpiration(t *testing.T) {
|
||||
var (
|
||||
backupService = &BackupService{}
|
||||
snapshotService = &FakeSnapshotService{}
|
||||
backupService = &arktest.BackupService{}
|
||||
snapshotService = &arktest.FakeSnapshotService{}
|
||||
fakeClock = clock.NewFakeClock(time.Now())
|
||||
assert = assert.New(t)
|
||||
)
|
||||
|
@ -307,7 +306,7 @@ func TestGarbageCollectPicksUpBackupUponExpiration(t *testing.T) {
|
|||
scenario := gcTest{
|
||||
name: "basic-expired",
|
||||
backups: []*api.Backup{
|
||||
NewTestBackup().WithName("backup-1").
|
||||
arktest.NewTestBackup().WithName("backup-1").
|
||||
WithExpiration(fakeClock.Now().Add(1*time.Second)).
|
||||
WithSnapshot("pv-1", "snapshot-1").
|
||||
WithSnapshot("pv-2", "snapshot-2").
|
||||
|
@ -321,7 +320,7 @@ func TestGarbageCollectPicksUpBackupUponExpiration(t *testing.T) {
|
|||
var (
|
||||
client = fake.NewSimpleClientset()
|
||||
sharedInformers = informers.NewSharedInformerFactory(client, 0)
|
||||
logger, _ = testlogger.NewNullLogger()
|
||||
logger = arktest.NewLogger()
|
||||
)
|
||||
|
||||
controller := NewGCController(
|
||||
|
|
|
@ -31,7 +31,9 @@ import (
|
|||
"github.com/sirupsen/logrus"
|
||||
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/strategicpatch"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
|
@ -230,6 +232,8 @@ func (controller *restoreController) processRestore(key string) error {
|
|||
}
|
||||
|
||||
logContext.Debug("Cloning Restore")
|
||||
// store ref to original for creating patch
|
||||
original := restore
|
||||
// don't modify items in the cache
|
||||
restore = restore.DeepCopy()
|
||||
|
||||
|
@ -248,11 +252,13 @@ func (controller *restoreController) processRestore(key string) error {
|
|||
}
|
||||
|
||||
// update status
|
||||
updatedRestore, err := controller.restoreClient.Restores(ns).Update(restore)
|
||||
updatedRestore, err := patchRestore(original, restore, controller.restoreClient)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error updating Restore phase to %s", restore.Status.Phase)
|
||||
}
|
||||
restore = updatedRestore
|
||||
// store ref to just-updated item for creating patch
|
||||
original = updatedRestore
|
||||
restore = updatedRestore.DeepCopy()
|
||||
|
||||
if restore.Status.Phase == api.RestorePhaseFailedValidation {
|
||||
return nil
|
||||
|
@ -276,7 +282,7 @@ func (controller *restoreController) processRestore(key string) error {
|
|||
restore.Status.Phase = api.RestorePhaseCompleted
|
||||
|
||||
logContext.Debug("Updating Restore final status")
|
||||
if _, err = controller.restoreClient.Restores(ns).Update(restore); err != nil {
|
||||
if _, err = patchRestore(original, restore, controller.restoreClient); err != nil {
|
||||
logContext.WithError(errors.WithStack(err)).Info("Error updating Restore final status")
|
||||
}
|
||||
|
||||
|
@ -472,3 +478,27 @@ func downloadToTempFile(backupName string, backupService cloudprovider.BackupSer
|
|||
|
||||
return file, nil
|
||||
}
|
||||
|
||||
func patchRestore(original, updated *api.Restore, client arkv1client.RestoresGetter) (*api.Restore, error) {
|
||||
origBytes, err := json.Marshal(original)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error marshalling original restore")
|
||||
}
|
||||
|
||||
updatedBytes, err := json.Marshal(updated)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error marshalling updated restore")
|
||||
}
|
||||
|
||||
patchBytes, err := strategicpatch.CreateTwoWayMergePatch(origBytes, updatedBytes, api.Restore{})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error creating two-way merge patch for restore")
|
||||
}
|
||||
|
||||
res, err := client.Restores(api.DefaultNamespace).Patch(original.Name, types.MergePatchType, patchBytes)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error patching restore")
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
|
|
@ -18,6 +18,7 @@ package controller
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
|
@ -25,9 +26,9 @@ import (
|
|||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
core "k8s.io/client-go/testing"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
|
||||
|
@ -35,6 +36,7 @@ import (
|
|||
"github.com/heptio/ark/pkg/generated/clientset/versioned/fake"
|
||||
informers "github.com/heptio/ark/pkg/generated/informers/externalversions"
|
||||
"github.com/heptio/ark/pkg/restore"
|
||||
"github.com/heptio/ark/pkg/util/collections"
|
||||
arktest "github.com/heptio/ark/pkg/util/test"
|
||||
)
|
||||
|
||||
|
@ -120,7 +122,9 @@ func TestProcessRestore(t *testing.T) {
|
|||
restorerError error
|
||||
allowRestoreSnapshots bool
|
||||
expectedErr bool
|
||||
expectedRestoreUpdates []*api.Restore
|
||||
expectedPhase string
|
||||
expectedValidationErrors []string
|
||||
expectedRestoreErrors int
|
||||
expectedRestorerCall *api.Restore
|
||||
backupServiceGetBackupError error
|
||||
uploadLogError error
|
||||
|
@ -151,73 +155,53 @@ func TestProcessRestore(t *testing.T) {
|
|||
expectedErr: false,
|
||||
},
|
||||
{
|
||||
name: "restore with both namespace in both includedNamespaces and excludedNamespaces fails validation",
|
||||
restore: NewRestore("foo", "bar", "backup-1", "another-1", "*", api.RestorePhaseNew).WithExcludedNamespace("another-1").Restore,
|
||||
backup: arktest.NewTestBackup().WithName("backup-1").Backup,
|
||||
expectedErr: false,
|
||||
expectedRestoreUpdates: []*api.Restore{
|
||||
NewRestore("foo", "bar", "backup-1", "another-1", "*", api.RestorePhaseFailedValidation).WithExcludedNamespace("another-1").
|
||||
WithValidationError("Invalid included/excluded namespace lists: excludes list cannot contain an item in the includes list: another-1").
|
||||
Restore,
|
||||
},
|
||||
name: "restore with both namespace in both includedNamespaces and excludedNamespaces fails validation",
|
||||
restore: NewRestore("foo", "bar", "backup-1", "another-1", "*", api.RestorePhaseNew).WithExcludedNamespace("another-1").Restore,
|
||||
backup: arktest.NewTestBackup().WithName("backup-1").Backup,
|
||||
expectedErr: false,
|
||||
expectedPhase: string(api.RestorePhaseFailedValidation),
|
||||
expectedValidationErrors: []string{"Invalid included/excluded namespace lists: excludes list cannot contain an item in the includes list: another-1"},
|
||||
},
|
||||
{
|
||||
name: "restore with resource in both includedResources and excludedResources fails validation",
|
||||
restore: NewRestore("foo", "bar", "backup-1", "*", "a-resource", api.RestorePhaseNew).WithExcludedResource("a-resource").Restore,
|
||||
backup: arktest.NewTestBackup().WithName("backup-1").Backup,
|
||||
expectedErr: false,
|
||||
expectedRestoreUpdates: []*api.Restore{
|
||||
NewRestore("foo", "bar", "backup-1", "*", "a-resource", api.RestorePhaseFailedValidation).WithExcludedResource("a-resource").
|
||||
WithValidationError("Invalid included/excluded resource lists: excludes list cannot contain an item in the includes list: a-resource").
|
||||
Restore,
|
||||
},
|
||||
name: "restore with resource in both includedResources and excludedResources fails validation",
|
||||
restore: NewRestore("foo", "bar", "backup-1", "*", "a-resource", api.RestorePhaseNew).WithExcludedResource("a-resource").Restore,
|
||||
backup: arktest.NewTestBackup().WithName("backup-1").Backup,
|
||||
expectedErr: false,
|
||||
expectedPhase: string(api.RestorePhaseFailedValidation),
|
||||
expectedValidationErrors: []string{"Invalid included/excluded resource lists: excludes list cannot contain an item in the includes list: a-resource"},
|
||||
},
|
||||
{
|
||||
name: "new restore with empty backup name fails validation",
|
||||
restore: NewRestore("foo", "bar", "", "ns-1", "", api.RestorePhaseNew).Restore,
|
||||
expectedErr: false,
|
||||
expectedRestoreUpdates: []*api.Restore{
|
||||
NewRestore("foo", "bar", "", "ns-1", "", api.RestorePhaseFailedValidation).
|
||||
WithValidationError("BackupName must be non-empty and correspond to the name of a backup in object storage.").
|
||||
Restore,
|
||||
},
|
||||
name: "new restore with empty backup name fails validation",
|
||||
restore: NewRestore("foo", "bar", "", "ns-1", "", api.RestorePhaseNew).Restore,
|
||||
expectedErr: false,
|
||||
expectedPhase: string(api.RestorePhaseFailedValidation),
|
||||
expectedValidationErrors: []string{"BackupName must be non-empty and correspond to the name of a backup in object storage."},
|
||||
},
|
||||
|
||||
{
|
||||
name: "restore with non-existent backup name fails",
|
||||
restore: arktest.NewTestRestore("foo", "bar", api.RestorePhaseNew).WithBackup("backup-1").WithIncludedNamespace("ns-1").Restore,
|
||||
expectedErr: false,
|
||||
expectedRestoreUpdates: []*api.Restore{
|
||||
NewRestore("foo", "bar", "backup-1", "ns-1", "", api.RestorePhaseInProgress).Restore,
|
||||
NewRestore("foo", "bar", "backup-1", "ns-1", "", api.RestorePhaseCompleted).
|
||||
WithErrors(1).
|
||||
Restore,
|
||||
},
|
||||
name: "restore with non-existent backup name fails",
|
||||
restore: arktest.NewTestRestore("foo", "bar", api.RestorePhaseNew).WithBackup("backup-1").WithIncludedNamespace("ns-1").Restore,
|
||||
expectedErr: false,
|
||||
expectedPhase: string(api.RestorePhaseInProgress),
|
||||
expectedRestoreErrors: 1,
|
||||
backupServiceGetBackupError: errors.New("no backup here"),
|
||||
},
|
||||
{
|
||||
name: "restorer throwing an error causes the restore to fail",
|
||||
restore: NewRestore("foo", "bar", "backup-1", "ns-1", "", api.RestorePhaseNew).Restore,
|
||||
backup: arktest.NewTestBackup().WithName("backup-1").Backup,
|
||||
restorerError: errors.New("blarg"),
|
||||
expectedErr: false,
|
||||
expectedRestoreUpdates: []*api.Restore{
|
||||
NewRestore("foo", "bar", "backup-1", "ns-1", "", api.RestorePhaseInProgress).Restore,
|
||||
NewRestore("foo", "bar", "backup-1", "ns-1", "", api.RestorePhaseCompleted).
|
||||
WithErrors(1).
|
||||
Restore,
|
||||
},
|
||||
expectedRestorerCall: NewRestore("foo", "bar", "backup-1", "ns-1", "", api.RestorePhaseInProgress).Restore,
|
||||
name: "restorer throwing an error causes the restore to fail",
|
||||
restore: NewRestore("foo", "bar", "backup-1", "ns-1", "", api.RestorePhaseNew).Restore,
|
||||
backup: arktest.NewTestBackup().WithName("backup-1").Backup,
|
||||
restorerError: errors.New("blarg"),
|
||||
expectedErr: false,
|
||||
expectedPhase: string(api.RestorePhaseInProgress),
|
||||
expectedRestoreErrors: 1,
|
||||
expectedRestorerCall: NewRestore("foo", "bar", "backup-1", "ns-1", "", api.RestorePhaseInProgress).Restore,
|
||||
},
|
||||
{
|
||||
name: "valid restore gets executed",
|
||||
restore: NewRestore("foo", "bar", "backup-1", "ns-1", "", api.RestorePhaseNew).Restore,
|
||||
backup: arktest.NewTestBackup().WithName("backup-1").Backup,
|
||||
expectedErr: false,
|
||||
expectedRestoreUpdates: []*api.Restore{
|
||||
NewRestore("foo", "bar", "backup-1", "ns-1", "", api.RestorePhaseInProgress).Restore,
|
||||
NewRestore("foo", "bar", "backup-1", "ns-1", "", api.RestorePhaseCompleted).Restore,
|
||||
},
|
||||
name: "valid restore gets executed",
|
||||
restore: NewRestore("foo", "bar", "backup-1", "ns-1", "", api.RestorePhaseNew).Restore,
|
||||
backup: arktest.NewTestBackup().WithName("backup-1").Backup,
|
||||
expectedErr: false,
|
||||
expectedPhase: string(api.RestorePhaseInProgress),
|
||||
expectedRestorerCall: NewRestore("foo", "bar", "backup-1", "ns-1", "", api.RestorePhaseInProgress).Restore,
|
||||
},
|
||||
{
|
||||
|
@ -226,34 +210,26 @@ func TestProcessRestore(t *testing.T) {
|
|||
backup: arktest.NewTestBackup().WithName("backup-1").Backup,
|
||||
allowRestoreSnapshots: true,
|
||||
expectedErr: false,
|
||||
expectedRestoreUpdates: []*api.Restore{
|
||||
NewRestore("foo", "bar", "backup-1", "ns-1", "", api.RestorePhaseInProgress).WithRestorePVs(true).Restore,
|
||||
NewRestore("foo", "bar", "backup-1", "ns-1", "", api.RestorePhaseCompleted).WithRestorePVs(true).Restore,
|
||||
},
|
||||
expectedRestorerCall: NewRestore("foo", "bar", "backup-1", "ns-1", "", api.RestorePhaseInProgress).WithRestorePVs(true).Restore,
|
||||
expectedPhase: string(api.RestorePhaseInProgress),
|
||||
expectedRestorerCall: NewRestore("foo", "bar", "backup-1", "ns-1", "", api.RestorePhaseInProgress).WithRestorePVs(true).Restore,
|
||||
},
|
||||
{
|
||||
name: "restore with RestorePVs=true fails validation when allowRestoreSnapshots=false",
|
||||
restore: NewRestore("foo", "bar", "backup-1", "ns-1", "", api.RestorePhaseNew).WithRestorePVs(true).Restore,
|
||||
backup: arktest.NewTestBackup().WithName("backup-1").Backup,
|
||||
expectedErr: false,
|
||||
expectedRestoreUpdates: []*api.Restore{
|
||||
NewRestore("foo", "bar", "backup-1", "ns-1", "", api.RestorePhaseFailedValidation).
|
||||
WithRestorePVs(true).
|
||||
WithValidationError("Server is not configured for PV snapshot restores").
|
||||
Restore,
|
||||
},
|
||||
name: "restore with RestorePVs=true fails validation when allowRestoreSnapshots=false",
|
||||
restore: NewRestore("foo", "bar", "backup-1", "ns-1", "", api.RestorePhaseNew).WithRestorePVs(true).Restore,
|
||||
backup: arktest.NewTestBackup().WithName("backup-1").Backup,
|
||||
expectedErr: false,
|
||||
expectedPhase: string(api.RestorePhaseFailedValidation),
|
||||
expectedValidationErrors: []string{"Server is not configured for PV snapshot restores"},
|
||||
},
|
||||
{
|
||||
name: "restoration of nodes is not supported",
|
||||
restore: NewRestore("foo", "bar", "backup-1", "ns-1", "nodes", api.RestorePhaseNew).Restore,
|
||||
backup: arktest.NewTestBackup().WithName("backup-1").Backup,
|
||||
expectedErr: false,
|
||||
expectedRestoreUpdates: []*api.Restore{
|
||||
NewRestore("foo", "bar", "backup-1", "ns-1", "nodes", api.RestorePhaseFailedValidation).
|
||||
WithValidationError("nodes are a non-restorable resource").
|
||||
WithValidationError("Invalid included/excluded resource lists: excludes list cannot contain an item in the includes list: nodes").
|
||||
Restore,
|
||||
name: "restoration of nodes is not supported",
|
||||
restore: NewRestore("foo", "bar", "backup-1", "ns-1", "nodes", api.RestorePhaseNew).Restore,
|
||||
backup: arktest.NewTestBackup().WithName("backup-1").Backup,
|
||||
expectedErr: false,
|
||||
expectedPhase: string(api.RestorePhaseFailedValidation),
|
||||
expectedValidationErrors: []string{
|
||||
"nodes are a non-restorable resource",
|
||||
"Invalid included/excluded resource lists: excludes list cannot contain an item in the includes list: nodes",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
@ -288,16 +264,34 @@ func TestProcessRestore(t *testing.T) {
|
|||
if test.restore != nil {
|
||||
sharedInformers.Ark().V1().Restores().Informer().GetStore().Add(test.restore)
|
||||
|
||||
// this is necessary so the Update() call returns the appropriate object
|
||||
client.PrependReactor("update", "restores", func(action core.Action) (bool, runtime.Object, error) {
|
||||
obj := action.(core.UpdateAction).GetObject()
|
||||
// need to deep copy so we can test the backup state for each call to update
|
||||
copy, err := scheme.Scheme.DeepCopy(obj)
|
||||
if err != nil {
|
||||
// this is necessary so the Patch() call returns the appropriate object
|
||||
client.PrependReactor("patch", "restores", func(action core.Action) (bool, runtime.Object, error) {
|
||||
if test.restore == nil {
|
||||
return true, nil, nil
|
||||
}
|
||||
|
||||
patch := action.(core.PatchAction).GetPatch()
|
||||
patchMap := make(map[string]interface{})
|
||||
|
||||
if err := json.Unmarshal(patch, &patchMap); err != nil {
|
||||
t.Logf("error unmarshalling patch: %s\n", err)
|
||||
return false, nil, err
|
||||
}
|
||||
ret := copy.(runtime.Object)
|
||||
return true, ret, nil
|
||||
|
||||
phase, err := collections.GetString(patchMap, "status.phase")
|
||||
if err != nil {
|
||||
t.Logf("error getting status.phase: %s\n", err)
|
||||
return false, nil, err
|
||||
}
|
||||
|
||||
res := test.restore.DeepCopy()
|
||||
|
||||
// these are the fields that we expect to be set by
|
||||
// the controller
|
||||
|
||||
res.Status.Phase = api.RestorePhase(phase)
|
||||
|
||||
return true, res, nil
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -346,32 +340,75 @@ func TestProcessRestore(t *testing.T) {
|
|||
|
||||
assert.Equal(t, test.expectedErr, err != nil, "got error %v", err)
|
||||
|
||||
if test.expectedRestoreUpdates != nil {
|
||||
var expectedActions []core.Action
|
||||
actions := client.Actions()
|
||||
|
||||
for _, upd := range test.expectedRestoreUpdates {
|
||||
action := core.NewUpdateAction(
|
||||
api.SchemeGroupVersion.WithResource("restores"),
|
||||
upd.Namespace,
|
||||
upd)
|
||||
|
||||
expectedActions = append(expectedActions, action)
|
||||
}
|
||||
|
||||
assert.Equal(t, expectedActions, client.Actions())
|
||||
if test.expectedPhase == "" {
|
||||
require.Equal(t, 0, len(actions), "len(actions) should be zero")
|
||||
return
|
||||
}
|
||||
|
||||
// validate Patch call 1 (setting phase, validation errs)
|
||||
require.True(t, len(actions) > 0, "len(actions) is too small")
|
||||
|
||||
patchAction, ok := actions[0].(core.PatchAction)
|
||||
require.True(t, ok, "action is not a PatchAction")
|
||||
|
||||
patch := make(map[string]interface{})
|
||||
require.NoError(t, json.Unmarshal(patchAction.GetPatch(), &patch), "cannot unmarshal patch")
|
||||
|
||||
expectedStatusKeys := 1
|
||||
|
||||
assert.True(t, collections.HasKeyAndVal(patch, "status.phase", test.expectedPhase), "patch's status.phase does not match")
|
||||
|
||||
if len(test.expectedValidationErrors) > 0 {
|
||||
errs, err := collections.GetSlice(patch, "status.validationErrors")
|
||||
require.NoError(t, err, "error getting patch's status.validationErrors")
|
||||
|
||||
var errStrings []string
|
||||
for _, err := range errs {
|
||||
errStrings = append(errStrings, err.(string))
|
||||
}
|
||||
|
||||
assert.Equal(t, test.expectedValidationErrors, errStrings, "patch's status.validationErrors does not match")
|
||||
|
||||
expectedStatusKeys++
|
||||
}
|
||||
|
||||
res, _ := collections.GetMap(patch, "status")
|
||||
assert.Equal(t, expectedStatusKeys, len(res), "patch's status has the wrong number of keys")
|
||||
|
||||
// if we don't expect a restore, validate it wasn't called and exit the test
|
||||
if test.expectedRestorerCall == nil {
|
||||
assert.Empty(t, restorer.Calls)
|
||||
assert.Zero(t, restorer.calledWithArg)
|
||||
} else {
|
||||
assert.Equal(t, 1, len(restorer.Calls))
|
||||
|
||||
// explicitly capturing the argument passed to Restore myself because
|
||||
// I want to validate the called arg as of the time of calling, but
|
||||
// the mock stores the pointer, which gets modified after
|
||||
assert.Equal(t, *test.expectedRestorerCall, restorer.calledWithArg)
|
||||
return
|
||||
}
|
||||
assert.Equal(t, 1, len(restorer.Calls))
|
||||
|
||||
// validate Patch call 2 (setting phase)
|
||||
patchAction, ok = actions[1].(core.PatchAction)
|
||||
require.True(t, ok, "action is not a PatchAction")
|
||||
|
||||
require.NoError(t, json.Unmarshal(patchAction.GetPatch(), &patch), "cannot unmarshal patch")
|
||||
|
||||
assert.Equal(t, 1, len(patch), "patch has wrong number of keys")
|
||||
|
||||
res, _ = collections.GetMap(patch, "status")
|
||||
expectedStatusKeys = 1
|
||||
|
||||
assert.True(t, collections.HasKeyAndVal(patch, "status.phase", string(api.RestorePhaseCompleted)), "patch's status.phase does not match")
|
||||
|
||||
if test.expectedRestoreErrors != 0 {
|
||||
assert.True(t, collections.HasKeyAndVal(patch, "status.errors", float64(test.expectedRestoreErrors)), "patch's status.errors does not match")
|
||||
expectedStatusKeys++
|
||||
}
|
||||
|
||||
assert.Equal(t, expectedStatusKeys, len(res), "patch's status has wrong number of keys")
|
||||
|
||||
// explicitly capturing the argument passed to Restore myself because
|
||||
// I want to validate the called arg as of the time of calling, but
|
||||
// the mock stores the pointer, which gets modified after
|
||||
assert.Equal(t, *test.expectedRestorerCall, restorer.calledWithArg)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -18,6 +18,7 @@ package controller
|
|||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
@ -29,7 +30,9 @@ import (
|
|||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
"k8s.io/apimachinery/pkg/util/strategicpatch"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
|
@ -50,7 +53,7 @@ type scheduleController struct {
|
|||
queue workqueue.RateLimitingInterface
|
||||
syncPeriod time.Duration
|
||||
clock clock.Clock
|
||||
logger *logrus.Logger
|
||||
logger logrus.FieldLogger
|
||||
}
|
||||
|
||||
func NewScheduleController(
|
||||
|
@ -58,7 +61,7 @@ func NewScheduleController(
|
|||
backupsClient arkv1client.BackupsGetter,
|
||||
schedulesInformer informers.ScheduleInformer,
|
||||
syncPeriod time.Duration,
|
||||
logger *logrus.Logger,
|
||||
logger logrus.FieldLogger,
|
||||
) *scheduleController {
|
||||
if syncPeriod < time.Minute {
|
||||
logger.WithField("syncPeriod", syncPeriod).Info("Provided schedule sync period is too short. Setting to 1 minute")
|
||||
|
@ -230,6 +233,8 @@ func (controller *scheduleController) processSchedule(key string) error {
|
|||
}
|
||||
|
||||
logContext.Debug("Cloning schedule")
|
||||
// store ref to original for creating patch
|
||||
original := schedule
|
||||
// don't modify items in the cache
|
||||
schedule = schedule.DeepCopy()
|
||||
|
||||
|
@ -247,7 +252,7 @@ func (controller *scheduleController) processSchedule(key string) error {
|
|||
|
||||
// update status if it's changed
|
||||
if currentPhase != schedule.Status.Phase {
|
||||
updatedSchedule, err := controller.schedulesClient.Schedules(ns).Update(schedule)
|
||||
updatedSchedule, err := patchSchedule(original, schedule, controller.schedulesClient)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error updating Schedule phase to %s", schedule.Status.Phase)
|
||||
}
|
||||
|
@ -266,7 +271,7 @@ func (controller *scheduleController) processSchedule(key string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func parseCronSchedule(itm *api.Schedule, logger *logrus.Logger) (cron.Schedule, []string) {
|
||||
func parseCronSchedule(itm *api.Schedule, logger logrus.FieldLogger) (cron.Schedule, []string) {
|
||||
var validationErrors []string
|
||||
var schedule cron.Schedule
|
||||
|
||||
|
@ -330,11 +335,12 @@ func (controller *scheduleController) submitBackupIfDue(item *api.Schedule, cron
|
|||
return errors.Wrap(err, "error creating Backup")
|
||||
}
|
||||
|
||||
original := item
|
||||
schedule := item.DeepCopy()
|
||||
|
||||
schedule.Status.LastBackup = metav1.NewTime(now)
|
||||
|
||||
if _, err := controller.schedulesClient.Schedules(schedule.Namespace).Update(schedule); err != nil {
|
||||
if _, err := patchSchedule(original, schedule, controller.schedulesClient); err != nil {
|
||||
return errors.Wrapf(err, "error updating Schedule's LastBackup time to %v", schedule.Status.LastBackup)
|
||||
}
|
||||
|
||||
|
@ -365,3 +371,27 @@ func getBackup(item *api.Schedule, timestamp time.Time) *api.Backup {
|
|||
|
||||
return backup
|
||||
}
|
||||
|
||||
func patchSchedule(original, updated *api.Schedule, client arkv1client.SchedulesGetter) (*api.Schedule, error) {
|
||||
origBytes, err := json.Marshal(original)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error marshalling original schedule")
|
||||
}
|
||||
|
||||
updatedBytes, err := json.Marshal(updated)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error marshalling updated schedule")
|
||||
}
|
||||
|
||||
patchBytes, err := strategicpatch.CreateTwoWayMergePatch(origBytes, updatedBytes, api.Schedule{})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error creating two-way merge patch for schedule")
|
||||
}
|
||||
|
||||
res, err := client.Schedules(api.DefaultNamespace).Patch(original.Name, types.MergePatchType, patchBytes)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error patching schedule")
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
|
|
@ -17,37 +17,39 @@ limitations under the License.
|
|||
package controller
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/robfig/cron"
|
||||
testlogger "github.com/sirupsen/logrus/hooks/test"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
core "k8s.io/client-go/testing"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
|
||||
api "github.com/heptio/ark/pkg/apis/ark/v1"
|
||||
"github.com/heptio/ark/pkg/generated/clientset/versioned/fake"
|
||||
informers "github.com/heptio/ark/pkg/generated/informers/externalversions"
|
||||
. "github.com/heptio/ark/pkg/util/test"
|
||||
"github.com/heptio/ark/pkg/util/collections"
|
||||
arktest "github.com/heptio/ark/pkg/util/test"
|
||||
)
|
||||
|
||||
func TestProcessSchedule(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
scheduleKey string
|
||||
schedule *api.Schedule
|
||||
fakeClockTime string
|
||||
expectedErr bool
|
||||
expectedSchedulePhaseUpdate *api.Schedule
|
||||
expectedScheduleLastBackupUpdate *api.Schedule
|
||||
expectedBackupCreate *api.Backup
|
||||
name string
|
||||
scheduleKey string
|
||||
schedule *api.Schedule
|
||||
fakeClockTime string
|
||||
expectedErr bool
|
||||
expectedPhase string
|
||||
expectedValidationError string
|
||||
expectedBackupCreate *api.Backup
|
||||
expectedLastBackup string
|
||||
}{
|
||||
{
|
||||
name: "invalid key returns error",
|
||||
|
@ -61,70 +63,64 @@ func TestProcessSchedule(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "schedule with phase FailedValidation does not get processed",
|
||||
schedule: NewTestSchedule("ns", "name").WithPhase(api.SchedulePhaseFailedValidation).Schedule,
|
||||
schedule: arktest.NewTestSchedule("ns", "name").WithPhase(api.SchedulePhaseFailedValidation).Schedule,
|
||||
expectedErr: false,
|
||||
},
|
||||
{
|
||||
name: "schedule with phase New gets validated and failed if invalid",
|
||||
schedule: NewTestSchedule("ns", "name").WithPhase(api.SchedulePhaseNew).Schedule,
|
||||
expectedErr: false,
|
||||
expectedSchedulePhaseUpdate: NewTestSchedule("ns", "name").WithPhase(api.SchedulePhaseFailedValidation).
|
||||
WithValidationError("Schedule must be a non-empty valid Cron expression").Schedule,
|
||||
name: "schedule with phase New gets validated and failed if invalid",
|
||||
schedule: arktest.NewTestSchedule("ns", "name").WithPhase(api.SchedulePhaseNew).Schedule,
|
||||
expectedErr: false,
|
||||
expectedPhase: string(api.SchedulePhaseFailedValidation),
|
||||
expectedValidationError: "Schedule must be a non-empty valid Cron expression",
|
||||
},
|
||||
{
|
||||
name: "schedule with phase <blank> gets validated and failed if invalid",
|
||||
schedule: NewTestSchedule("ns", "name").Schedule,
|
||||
expectedErr: false,
|
||||
expectedSchedulePhaseUpdate: NewTestSchedule("ns", "name").WithPhase(api.SchedulePhaseFailedValidation).
|
||||
WithValidationError("Schedule must be a non-empty valid Cron expression").Schedule,
|
||||
name: "schedule with phase <blank> gets validated and failed if invalid",
|
||||
schedule: arktest.NewTestSchedule("ns", "name").Schedule,
|
||||
expectedErr: false,
|
||||
expectedPhase: string(api.SchedulePhaseFailedValidation),
|
||||
expectedValidationError: "Schedule must be a non-empty valid Cron expression",
|
||||
},
|
||||
{
|
||||
name: "schedule with phase Enabled gets re-validated and failed if invalid",
|
||||
schedule: NewTestSchedule("ns", "name").WithPhase(api.SchedulePhaseEnabled).Schedule,
|
||||
expectedErr: false,
|
||||
expectedSchedulePhaseUpdate: NewTestSchedule("ns", "name").WithPhase(api.SchedulePhaseFailedValidation).
|
||||
WithValidationError("Schedule must be a non-empty valid Cron expression").Schedule,
|
||||
name: "schedule with phase Enabled gets re-validated and failed if invalid",
|
||||
schedule: arktest.NewTestSchedule("ns", "name").WithPhase(api.SchedulePhaseEnabled).Schedule,
|
||||
expectedErr: false,
|
||||
expectedPhase: string(api.SchedulePhaseFailedValidation),
|
||||
expectedValidationError: "Schedule must be a non-empty valid Cron expression",
|
||||
},
|
||||
{
|
||||
name: "schedule with phase New gets validated and triggers a backup",
|
||||
schedule: NewTestSchedule("ns", "name").WithPhase(api.SchedulePhaseNew).WithCronSchedule("@every 5m").Schedule,
|
||||
fakeClockTime: "2017-01-01 12:00:00",
|
||||
expectedErr: false,
|
||||
expectedSchedulePhaseUpdate: NewTestSchedule("ns", "name").WithPhase(api.SchedulePhaseEnabled).WithCronSchedule("@every 5m").Schedule,
|
||||
expectedBackupCreate: NewTestBackup().WithNamespace("ns").WithName("name-20170101120000").WithLabel("ark-schedule", "name").Backup,
|
||||
expectedScheduleLastBackupUpdate: NewTestSchedule("ns", "name").WithPhase(api.SchedulePhaseEnabled).
|
||||
WithCronSchedule("@every 5m").WithLastBackupTime("2017-01-01 12:00:00").Schedule,
|
||||
name: "schedule with phase New gets validated and triggers a backup",
|
||||
schedule: arktest.NewTestSchedule("ns", "name").WithPhase(api.SchedulePhaseNew).WithCronSchedule("@every 5m").Schedule,
|
||||
fakeClockTime: "2017-01-01 12:00:00",
|
||||
expectedErr: false,
|
||||
expectedPhase: string(api.SchedulePhaseEnabled),
|
||||
expectedBackupCreate: arktest.NewTestBackup().WithNamespace("ns").WithName("name-20170101120000").WithLabel("ark-schedule", "name").Backup,
|
||||
expectedLastBackup: "2017-01-01 12:00:00",
|
||||
},
|
||||
{
|
||||
name: "schedule with phase Enabled gets re-validated and triggers a backup if valid",
|
||||
schedule: NewTestSchedule("ns", "name").WithPhase(api.SchedulePhaseEnabled).WithCronSchedule("@every 5m").Schedule,
|
||||
schedule: arktest.NewTestSchedule("ns", "name").WithPhase(api.SchedulePhaseEnabled).WithCronSchedule("@every 5m").Schedule,
|
||||
fakeClockTime: "2017-01-01 12:00:00",
|
||||
expectedErr: false,
|
||||
expectedBackupCreate: NewTestBackup().WithNamespace("ns").WithName("name-20170101120000").WithLabel("ark-schedule", "name").Backup,
|
||||
expectedScheduleLastBackupUpdate: NewTestSchedule("ns", "name").WithPhase(api.SchedulePhaseEnabled).
|
||||
WithCronSchedule("@every 5m").WithLastBackupTime("2017-01-01 12:00:00").Schedule,
|
||||
expectedBackupCreate: arktest.NewTestBackup().WithNamespace("ns").WithName("name-20170101120000").WithLabel("ark-schedule", "name").Backup,
|
||||
expectedLastBackup: "2017-01-01 12:00:00",
|
||||
},
|
||||
{
|
||||
name: "schedule that's already run gets LastBackup updated",
|
||||
schedule: NewTestSchedule("ns", "name").WithPhase(api.SchedulePhaseEnabled).
|
||||
schedule: arktest.NewTestSchedule("ns", "name").WithPhase(api.SchedulePhaseEnabled).
|
||||
WithCronSchedule("@every 5m").WithLastBackupTime("2000-01-01 00:00:00").Schedule,
|
||||
fakeClockTime: "2017-01-01 12:00:00",
|
||||
expectedErr: false,
|
||||
expectedBackupCreate: NewTestBackup().WithNamespace("ns").WithName("name-20170101120000").WithLabel("ark-schedule", "name").Backup,
|
||||
expectedScheduleLastBackupUpdate: NewTestSchedule("ns", "name").WithPhase(api.SchedulePhaseEnabled).
|
||||
WithCronSchedule("@every 5m").WithLastBackupTime("2017-01-01 12:00:00").Schedule,
|
||||
expectedBackupCreate: arktest.NewTestBackup().WithNamespace("ns").WithName("name-20170101120000").WithLabel("ark-schedule", "name").Backup,
|
||||
expectedLastBackup: "2017-01-01 12:00:00",
|
||||
},
|
||||
}
|
||||
|
||||
// flag.Set("logtostderr", "true")
|
||||
// flag.Set("v", "4")
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
var (
|
||||
client = fake.NewSimpleClientset()
|
||||
sharedInformers = informers.NewSharedInformerFactory(client, 0)
|
||||
logger, _ = testlogger.NewNullLogger()
|
||||
logger = arktest.NewLogger()
|
||||
)
|
||||
|
||||
c := NewScheduleController(
|
||||
|
@ -148,16 +144,36 @@ func TestProcessSchedule(t *testing.T) {
|
|||
if test.schedule != nil {
|
||||
sharedInformers.Ark().V1().Schedules().Informer().GetStore().Add(test.schedule)
|
||||
|
||||
// this is necessary so the Update() call returns the appropriate object
|
||||
client.PrependReactor("update", "schedules", func(action core.Action) (bool, runtime.Object, error) {
|
||||
obj := action.(core.UpdateAction).GetObject()
|
||||
// need to deep copy so we can test the schedule state for each call to update
|
||||
copy, err := scheme.Scheme.DeepCopy(obj)
|
||||
if err != nil {
|
||||
// this is necessary so the Patch() call returns the appropriate object
|
||||
client.PrependReactor("patch", "schedules", func(action core.Action) (bool, runtime.Object, error) {
|
||||
var (
|
||||
patch = action.(core.PatchAction).GetPatch()
|
||||
patchMap = make(map[string]interface{})
|
||||
res = test.schedule.DeepCopy()
|
||||
)
|
||||
|
||||
if err := json.Unmarshal(patch, &patchMap); err != nil {
|
||||
t.Logf("error unmarshalling patch: %s\n", err)
|
||||
return false, nil, err
|
||||
}
|
||||
ret := copy.(runtime.Object)
|
||||
return true, ret, nil
|
||||
|
||||
// these are the fields that may be updated by the controller
|
||||
phase, err := collections.GetString(patchMap, "status.phase")
|
||||
if err == nil {
|
||||
res.Status.Phase = api.SchedulePhase(phase)
|
||||
}
|
||||
|
||||
lastBackupStr, err := collections.GetString(patchMap, "status.lastBackup")
|
||||
if err == nil {
|
||||
parsed, err := time.Parse(time.RFC3339, lastBackupStr)
|
||||
if err != nil {
|
||||
t.Logf("error parsing status.lastBackup: %s\n", err)
|
||||
return false, nil, err
|
||||
}
|
||||
res.Status.LastBackup = metav1.Time{Time: parsed}
|
||||
}
|
||||
|
||||
return true, res, nil
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -171,37 +187,88 @@ func TestProcessSchedule(t *testing.T) {
|
|||
|
||||
assert.Equal(t, test.expectedErr, err != nil, "got error %v", err)
|
||||
|
||||
expectedActions := make([]core.Action, 0)
|
||||
actions := client.Actions()
|
||||
index := 0
|
||||
|
||||
if upd := test.expectedSchedulePhaseUpdate; upd != nil {
|
||||
action := core.NewUpdateAction(
|
||||
api.SchemeGroupVersion.WithResource("schedules"),
|
||||
upd.Namespace,
|
||||
upd)
|
||||
expectedActions = append(expectedActions, action)
|
||||
if test.expectedPhase != "" {
|
||||
require.True(t, len(actions) > index, "len(actions) is too small")
|
||||
|
||||
patchAction, ok := actions[index].(core.PatchAction)
|
||||
require.True(t, ok, "action is not a PatchAction")
|
||||
|
||||
patch := make(map[string]interface{})
|
||||
require.NoError(t, json.Unmarshal(patchAction.GetPatch(), &patch), "cannot unmarshal patch")
|
||||
|
||||
assert.Equal(t, 1, len(patch), "patch has wrong number of keys")
|
||||
|
||||
expectedStatusKeys := 1
|
||||
|
||||
assert.True(t, collections.HasKeyAndVal(patch, "status.phase", test.expectedPhase), "patch's status.phase does not match")
|
||||
|
||||
if test.expectedValidationError != "" {
|
||||
errs, err := collections.GetSlice(patch, "status.validationErrors")
|
||||
require.NoError(t, err, "error getting patch's status.validationErrors")
|
||||
|
||||
require.Equal(t, 1, len(errs))
|
||||
|
||||
assert.Equal(t, test.expectedValidationError, errs[0].(string), "patch's status.validationErrors does not match")
|
||||
|
||||
expectedStatusKeys++
|
||||
}
|
||||
|
||||
res, _ := collections.GetMap(patch, "status")
|
||||
assert.Equal(t, expectedStatusKeys, len(res), "patch's status has the wrong number of keys")
|
||||
|
||||
index++
|
||||
}
|
||||
|
||||
if created := test.expectedBackupCreate; created != nil {
|
||||
require.True(t, len(actions) > index, "len(actions) is too small")
|
||||
|
||||
action := core.NewCreateAction(
|
||||
api.SchemeGroupVersion.WithResource("backups"),
|
||||
created.Namespace,
|
||||
created)
|
||||
expectedActions = append(expectedActions, action)
|
||||
|
||||
assert.Equal(t, action, actions[index])
|
||||
|
||||
index++
|
||||
}
|
||||
|
||||
if upd := test.expectedScheduleLastBackupUpdate; upd != nil {
|
||||
action := core.NewUpdateAction(
|
||||
api.SchemeGroupVersion.WithResource("schedules"),
|
||||
upd.Namespace,
|
||||
upd)
|
||||
expectedActions = append(expectedActions, action)
|
||||
}
|
||||
if test.expectedLastBackup != "" {
|
||||
require.True(t, len(actions) > index, "len(actions) is too small")
|
||||
|
||||
assert.Equal(t, expectedActions, client.Actions())
|
||||
patchAction, ok := actions[index].(core.PatchAction)
|
||||
require.True(t, ok, "action is not a PatchAction")
|
||||
|
||||
patch := make(map[string]interface{})
|
||||
require.NoError(t, json.Unmarshal(patchAction.GetPatch(), &patch), "cannot unmarshal patch")
|
||||
|
||||
assert.Equal(t, 1, len(patch), "patch has wrong number of keys")
|
||||
|
||||
lastBackup, _ := collections.GetValue(patch, "status.lastBackup")
|
||||
fmt.Println(lastBackup)
|
||||
|
||||
assert.True(
|
||||
t,
|
||||
collections.HasKeyAndVal(patch, "status.lastBackup", parseTime(test.expectedLastBackup).UTC().Format(time.RFC3339)),
|
||||
"patch's status.lastBackup does not match",
|
||||
)
|
||||
|
||||
res, _ := collections.GetMap(patch, "status")
|
||||
assert.Equal(t, 1, len(res), "patch's status has the wrong number of keys")
|
||||
|
||||
index++
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func parseTime(timeString string) time.Time {
|
||||
res, _ := time.Parse("2006-01-02 15:04:05", timeString)
|
||||
return res
|
||||
}
|
||||
|
||||
func TestGetNextRunTime(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
|
@ -294,7 +361,7 @@ func TestParseCronSchedule(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
logger, _ := testlogger.NewNullLogger()
|
||||
logger := arktest.NewLogger()
|
||||
|
||||
c, errs := parseCronSchedule(s, logger)
|
||||
require.Empty(t, errs)
|
||||
|
|
|
@ -49,7 +49,7 @@ type Helper interface {
|
|||
|
||||
type helper struct {
|
||||
discoveryClient discovery.DiscoveryInterface
|
||||
logger *logrus.Logger
|
||||
logger logrus.FieldLogger
|
||||
|
||||
// lock guards mapper, resources and resourcesMap
|
||||
lock sync.RWMutex
|
||||
|
@ -60,7 +60,7 @@ type helper struct {
|
|||
|
||||
var _ Helper = &helper{}
|
||||
|
||||
func NewHelper(discoveryClient discovery.DiscoveryInterface, logger *logrus.Logger) (Helper, error) {
|
||||
func NewHelper(discoveryClient discovery.DiscoveryInterface, logger logrus.FieldLogger) (Helper, error) {
|
||||
h := &helper{
|
||||
discoveryClient: discoveryClient,
|
||||
}
|
||||
|
|
|
@ -70,7 +70,7 @@ type kubernetesRestorer struct {
|
|||
namespaceClient corev1.NamespaceInterface
|
||||
resourcePriorities []string
|
||||
fileSystem FileSystem
|
||||
logger *logrus.Logger
|
||||
logger logrus.FieldLogger
|
||||
}
|
||||
|
||||
// prioritizeResources returns an ordered, fully-resolved list of resources to restore based on
|
||||
|
@ -140,7 +140,7 @@ func NewKubernetesRestorer(
|
|||
resourcePriorities []string,
|
||||
backupClient arkv1client.BackupsGetter,
|
||||
namespaceClient corev1.NamespaceInterface,
|
||||
logger *logrus.Logger,
|
||||
logger logrus.FieldLogger,
|
||||
) (Restorer, error) {
|
||||
return &kubernetesRestorer{
|
||||
discoveryHelper: discoveryHelper,
|
||||
|
|
|
@ -23,8 +23,6 @@ import (
|
|||
"testing"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus/hooks/test"
|
||||
testlogger "github.com/sirupsen/logrus/hooks/test"
|
||||
"github.com/spf13/afero"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
@ -83,7 +81,7 @@ func TestPrioritizeResources(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
logger, _ := test.NewNullLogger()
|
||||
logger := arktest.NewLogger()
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
|
@ -180,7 +178,7 @@ func TestRestoreNamespaceFiltering(t *testing.T) {
|
|||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
log, _ := testlogger.NewNullLogger()
|
||||
log := arktest.NewLogger()
|
||||
|
||||
ctx := &context{
|
||||
restore: test.restore,
|
||||
|
@ -272,7 +270,7 @@ func TestRestorePriority(t *testing.T) {
|
|||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
log, _ := testlogger.NewNullLogger()
|
||||
log := arktest.NewLogger()
|
||||
|
||||
ctx := &context{
|
||||
restore: test.restore,
|
||||
|
|
|
@ -122,3 +122,14 @@ func Exists(root map[string]interface{}, path string) bool {
|
|||
_, err := GetValue(root, path)
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// HasKeyAndVal returns true if root[path] exists and the value
|
||||
// contained is equal to val, or false otherwise.
|
||||
func HasKeyAndVal(root map[string]interface{}, path string, val interface{}) bool {
|
||||
valObj, err := GetValue(root, path)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return valObj == val
|
||||
}
|
||||
|
|
|
@ -22,7 +22,7 @@ import (
|
|||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func NewLogger() *logrus.Entry {
|
||||
func NewLogger() logrus.FieldLogger {
|
||||
logger := logrus.New()
|
||||
logger.Out = ioutil.Discard
|
||||
return logrus.NewEntry(logger)
|
||||
|
|
|
@ -39,12 +39,12 @@ type shortcutExpander struct {
|
|||
RESTMapper meta.RESTMapper
|
||||
|
||||
discoveryClient discovery.DiscoveryInterface
|
||||
logger *logrus.Logger
|
||||
logger logrus.FieldLogger
|
||||
}
|
||||
|
||||
var _ meta.RESTMapper = &shortcutExpander{}
|
||||
|
||||
func NewShortcutExpander(delegate meta.RESTMapper, client discovery.DiscoveryInterface, logger *logrus.Logger) (shortcutExpander, error) {
|
||||
func NewShortcutExpander(delegate meta.RESTMapper, client discovery.DiscoveryInterface, logger logrus.FieldLogger) (shortcutExpander, error) {
|
||||
if client == nil {
|
||||
return shortcutExpander{}, errors.New("Please provide discovery client to shortcut expander")
|
||||
}
|
||||
|
|
|
@ -1,95 +0,0 @@
|
|||
// The Test package is used for testing logrus. It is here for backwards
|
||||
// compatibility from when logrus' organization was upper-case. Please use
|
||||
// lower-case logrus and the `null` package instead of this one.
|
||||
package test
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"sync"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// Hook is a hook designed for dealing with logs in test scenarios.
|
||||
type Hook struct {
|
||||
// Entries is an array of all entries that have been received by this hook.
|
||||
// For safe access, use the AllEntries() method, rather than reading this
|
||||
// value directly.
|
||||
Entries []*logrus.Entry
|
||||
mu sync.RWMutex
|
||||
}
|
||||
|
||||
// NewGlobal installs a test hook for the global logger.
|
||||
func NewGlobal() *Hook {
|
||||
|
||||
hook := new(Hook)
|
||||
logrus.AddHook(hook)
|
||||
|
||||
return hook
|
||||
|
||||
}
|
||||
|
||||
// NewLocal installs a test hook for a given local logger.
|
||||
func NewLocal(logger *logrus.Logger) *Hook {
|
||||
|
||||
hook := new(Hook)
|
||||
logger.Hooks.Add(hook)
|
||||
|
||||
return hook
|
||||
|
||||
}
|
||||
|
||||
// NewNullLogger creates a discarding logger and installs the test hook.
|
||||
func NewNullLogger() (*logrus.Logger, *Hook) {
|
||||
|
||||
logger := logrus.New()
|
||||
logger.Out = ioutil.Discard
|
||||
|
||||
return logger, NewLocal(logger)
|
||||
|
||||
}
|
||||
|
||||
func (t *Hook) Fire(e *logrus.Entry) error {
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
t.Entries = append(t.Entries, e)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *Hook) Levels() []logrus.Level {
|
||||
return logrus.AllLevels
|
||||
}
|
||||
|
||||
// LastEntry returns the last entry that was logged or nil.
|
||||
func (t *Hook) LastEntry() *logrus.Entry {
|
||||
t.mu.RLock()
|
||||
defer t.mu.RUnlock()
|
||||
i := len(t.Entries) - 1
|
||||
if i < 0 {
|
||||
return nil
|
||||
}
|
||||
// Make a copy, for safety
|
||||
e := *t.Entries[i]
|
||||
return &e
|
||||
}
|
||||
|
||||
// AllEntries returns all entries that were logged.
|
||||
func (t *Hook) AllEntries() []*logrus.Entry {
|
||||
t.mu.RLock()
|
||||
defer t.mu.RUnlock()
|
||||
// Make a copy so the returned value won't race with future log requests
|
||||
entries := make([]*logrus.Entry, len(t.Entries))
|
||||
for i, entry := range t.Entries {
|
||||
// Make a copy, for safety
|
||||
e := *entry
|
||||
entries[i] = &e
|
||||
}
|
||||
return entries
|
||||
}
|
||||
|
||||
// Reset removes all Entries from this test hook.
|
||||
func (t *Hook) Reset() {
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
t.Entries = make([]*logrus.Entry, 0)
|
||||
}
|
Loading…
Reference in New Issue