refactor download request controller test and add test cases

Signed-off-by: Steve Kriss <steve@heptio.com>
pull/799/head
Steve Kriss 2018-08-16 16:52:52 -07:00
parent 8f5346150c
commit 7007f198e1
1 changed files with 239 additions and 168 deletions

View File

@ -17,7 +17,6 @@ limitations under the License.
package controller package controller
import ( import (
"encoding/json"
"testing" "testing"
"time" "time"
@ -26,223 +25,295 @@ import (
"github.com/stretchr/testify/mock" "github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/clock" "k8s.io/apimachinery/pkg/util/clock"
"github.com/heptio/ark/pkg/apis/ark/v1" "github.com/heptio/ark/pkg/apis/ark/v1"
"github.com/heptio/ark/pkg/cloudprovider"
"github.com/heptio/ark/pkg/generated/clientset/versioned/fake" "github.com/heptio/ark/pkg/generated/clientset/versioned/fake"
informers "github.com/heptio/ark/pkg/generated/informers/externalversions" informers "github.com/heptio/ark/pkg/generated/informers/externalversions"
"github.com/heptio/ark/pkg/plugin" "github.com/heptio/ark/pkg/plugin"
pluginmocks "github.com/heptio/ark/pkg/plugin/mocks" pluginmocks "github.com/heptio/ark/pkg/plugin/mocks"
kubeutil "github.com/heptio/ark/pkg/util/kube"
arktest "github.com/heptio/ark/pkg/util/test" arktest "github.com/heptio/ark/pkg/util/test"
) )
type downloadRequestTestHarness struct {
client *fake.Clientset
informerFactory informers.SharedInformerFactory
pluginManager *pluginmocks.Manager
objectStore *arktest.ObjectStore
controller *downloadRequestController
}
func newDownloadRequestTestHarness(t *testing.T) *downloadRequestTestHarness {
var (
client = fake.NewSimpleClientset()
informerFactory = informers.NewSharedInformerFactory(client, 0)
pluginManager = new(pluginmocks.Manager)
objectStore = new(arktest.ObjectStore)
controller = NewDownloadRequestController(
client.ArkV1(),
informerFactory.Ark().V1().DownloadRequests(),
informerFactory.Ark().V1().Restores(),
informerFactory.Ark().V1().BackupStorageLocations(),
informerFactory.Ark().V1().Backups(),
nil,
arktest.NewLogger(),
).(*downloadRequestController)
)
clockTime, err := time.Parse(time.RFC1123, time.RFC1123)
require.NoError(t, err)
controller.clock = clock.NewFakeClock(clockTime)
controller.newPluginManager = func(_ logrus.FieldLogger, _ logrus.Level, _ plugin.Registry) plugin.Manager {
return pluginManager
}
pluginManager.On("CleanupClients").Return()
objectStore.On("Init", mock.Anything).Return(nil)
return &downloadRequestTestHarness{
client: client,
informerFactory: informerFactory,
pluginManager: pluginManager,
objectStore: objectStore,
controller: controller,
}
}
func newDownloadRequest(phase v1.DownloadRequestPhase, targetKind v1.DownloadTargetKind, targetName string) *v1.DownloadRequest {
return &v1.DownloadRequest{
ObjectMeta: metav1.ObjectMeta{
Name: "a-download-request",
Namespace: v1.DefaultNamespace,
},
Spec: v1.DownloadRequestSpec{
Target: v1.DownloadTarget{
Kind: targetKind,
Name: targetName,
},
},
Status: v1.DownloadRequestStatus{
Phase: phase,
},
}
}
func newBackupLocation(name, provider, bucket string) *v1.BackupStorageLocation {
return &v1.BackupStorageLocation{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: v1.DefaultNamespace,
},
Spec: v1.BackupStorageLocationSpec{
Provider: provider,
StorageType: v1.StorageType{
ObjectStorage: &v1.ObjectStorageLocation{
Bucket: bucket,
},
},
},
}
}
func TestProcessDownloadRequest(t *testing.T) { func TestProcessDownloadRequest(t *testing.T) {
tests := []struct { tests := []struct {
name string name string
key string key string
phase v1.DownloadRequestPhase downloadRequest *v1.DownloadRequest
targetKind v1.DownloadTargetKind backup *v1.Backup
targetName string
restore *v1.Restore restore *v1.Restore
expectedError string backupLocation *v1.BackupStorageLocation
expectedDir string expired bool
expectedPhase v1.DownloadRequestPhase expectedErr string
expectedURL string expectedRequestedObject string
}{ }{
{ {
name: "empty key", name: "empty key returns without error",
key: "", key: "",
}, },
{ {
name: "bad key format", name: "bad key format returns without error",
key: "a/b/c", key: "a/b/c",
}, },
{
name: "no download request for key returns without error",
key: "nonexistent/key",
},
{
name: "backup contents request for nonexistent backup returns an error",
downloadRequest: newDownloadRequest("", v1.DownloadTargetKindBackupContents, "a-backup"),
backup: arktest.NewTestBackup().WithName("non-matching-backup").WithStorageLocation("a-location").Backup,
backupLocation: newBackupLocation("a-location", "a-provider", "a-bucket"),
expectedErr: "backup.ark.heptio.com \"a-backup\" not found",
},
{
name: "restore log request for nonexistent restore returns an error",
downloadRequest: newDownloadRequest("", v1.DownloadTargetKindRestoreLog, "a-backup-20170912150214"),
restore: arktest.NewTestRestore(v1.DefaultNamespace, "non-matching-restore", v1.RestorePhaseCompleted).WithBackup("a-backup").Restore,
backup: arktest.NewTestBackup().WithName("a-backup").WithStorageLocation("a-location").Backup,
backupLocation: newBackupLocation("a-location", "a-provider", "a-bucket"),
expectedErr: "error getting Restore: restore.ark.heptio.com \"a-backup-20170912150214\" not found",
},
{
name: "backup contents request for backup with nonexistent location returns an error",
downloadRequest: newDownloadRequest("", v1.DownloadTargetKindBackupContents, "a-backup"),
backup: arktest.NewTestBackup().WithName("a-backup").WithStorageLocation("a-location").Backup,
backupLocation: newBackupLocation("non-matching-location", "a-provider", "a-bucket"),
expectedErr: "backupstoragelocation.ark.heptio.com \"a-location\" not found",
},
{
name: "backup contents request with phase '' gets a url",
downloadRequest: newDownloadRequest("", v1.DownloadTargetKindBackupContents, "a-backup"),
backup: arktest.NewTestBackup().WithName("a-backup").WithStorageLocation("a-location").Backup,
backupLocation: newBackupLocation("a-location", "a-provider", "a-bucket"),
expectedRequestedObject: "a-backup/a-backup.tar.gz",
},
{
name: "backup contents request with phase 'New' gets a url",
downloadRequest: newDownloadRequest(v1.DownloadRequestPhaseNew, v1.DownloadTargetKindBackupContents, "a-backup"),
backup: arktest.NewTestBackup().WithName("a-backup").WithStorageLocation("a-location").Backup,
backupLocation: newBackupLocation("a-location", "a-provider", "a-bucket"),
expectedRequestedObject: "a-backup/a-backup.tar.gz",
},
{ {
name: "backup log request with phase '' gets a url", name: "backup log request with phase '' gets a url",
key: "heptio-ark/dr1", downloadRequest: newDownloadRequest("", v1.DownloadTargetKindBackupLog, "a-backup"),
phase: "", backup: arktest.NewTestBackup().WithName("a-backup").WithStorageLocation("a-location").Backup,
targetKind: v1.DownloadTargetKindBackupLog, backupLocation: newBackupLocation("a-location", "a-provider", "a-bucket"),
targetName: "backup1", expectedRequestedObject: "a-backup/a-backup-logs.gz",
expectedDir: "backup1",
expectedPhase: v1.DownloadRequestPhaseProcessed,
expectedURL: "signedURL",
}, },
{ {
name: "backup log request with phase 'New' gets a url", name: "backup log request with phase 'New' gets a url",
key: "heptio-ark/dr1", downloadRequest: newDownloadRequest(v1.DownloadRequestPhaseNew, v1.DownloadTargetKindBackupLog, "a-backup"),
phase: v1.DownloadRequestPhaseNew, backup: arktest.NewTestBackup().WithName("a-backup").WithStorageLocation("a-location").Backup,
targetKind: v1.DownloadTargetKindBackupLog, backupLocation: newBackupLocation("a-location", "a-provider", "a-bucket"),
targetName: "backup1", expectedRequestedObject: "a-backup/a-backup-logs.gz",
expectedDir: "backup1",
expectedPhase: v1.DownloadRequestPhaseProcessed,
expectedURL: "signedURL",
}, },
{ {
name: "restore log request with phase '' gets a url", name: "restore log request with phase '' gets a url",
key: "heptio-ark/dr1", downloadRequest: newDownloadRequest("", v1.DownloadTargetKindRestoreLog, "a-backup-20170912150214"),
phase: "", restore: arktest.NewTestRestore(v1.DefaultNamespace, "a-backup-20170912150214", v1.RestorePhaseCompleted).WithBackup("a-backup").Restore,
targetKind: v1.DownloadTargetKindRestoreLog, backup: arktest.NewTestBackup().WithName("a-backup").WithStorageLocation("a-location").Backup,
targetName: "backup1-20170912150214", backupLocation: newBackupLocation("a-location", "a-provider", "a-bucket"),
restore: arktest.NewTestRestore(v1.DefaultNamespace, "backup1-20170912150214", v1.RestorePhaseCompleted).WithBackup("backup1").Restore, expectedRequestedObject: "a-backup/restore-a-backup-20170912150214-logs.gz",
expectedDir: "backup1",
expectedPhase: v1.DownloadRequestPhaseProcessed,
expectedURL: "signedURL",
}, },
{ {
name: "restore log request with phase New gets a url", name: "restore log request with phase 'New' gets a url",
key: "heptio-ark/dr1", downloadRequest: newDownloadRequest(v1.DownloadRequestPhaseNew, v1.DownloadTargetKindRestoreLog, "a-backup-20170912150214"),
phase: v1.DownloadRequestPhaseNew, restore: arktest.NewTestRestore(v1.DefaultNamespace, "a-backup-20170912150214", v1.RestorePhaseCompleted).WithBackup("a-backup").Restore,
targetKind: v1.DownloadTargetKindRestoreLog, backup: arktest.NewTestBackup().WithName("a-backup").WithStorageLocation("a-location").Backup,
targetName: "backup1-20170912150214", backupLocation: newBackupLocation("a-location", "a-provider", "a-bucket"),
restore: arktest.NewTestRestore(v1.DefaultNamespace, "backup1-20170912150214", v1.RestorePhaseCompleted).WithBackup("backup1").Restore, expectedRequestedObject: "a-backup/restore-a-backup-20170912150214-logs.gz",
expectedDir: "backup1", },
expectedPhase: v1.DownloadRequestPhaseProcessed, {
expectedURL: "signedURL", name: "restore results request with phase '' gets a url",
downloadRequest: newDownloadRequest("", v1.DownloadTargetKindRestoreResults, "a-backup-20170912150214"),
restore: arktest.NewTestRestore(v1.DefaultNamespace, "a-backup-20170912150214", v1.RestorePhaseCompleted).WithBackup("a-backup").Restore,
backup: arktest.NewTestBackup().WithName("a-backup").WithStorageLocation("a-location").Backup,
backupLocation: newBackupLocation("a-location", "a-provider", "a-bucket"),
expectedRequestedObject: "a-backup/restore-a-backup-20170912150214-results.gz",
},
{
name: "restore results request with phase 'New' gets a url",
downloadRequest: newDownloadRequest(v1.DownloadRequestPhaseNew, v1.DownloadTargetKindRestoreResults, "a-backup-20170912150214"),
restore: arktest.NewTestRestore(v1.DefaultNamespace, "a-backup-20170912150214", v1.RestorePhaseCompleted).WithBackup("a-backup").Restore,
backup: arktest.NewTestBackup().WithName("a-backup").WithStorageLocation("a-location").Backup,
backupLocation: newBackupLocation("a-location", "a-provider", "a-bucket"),
expectedRequestedObject: "a-backup/restore-a-backup-20170912150214-results.gz",
},
{
name: "request with phase 'Processed' is not deleted if not expired",
downloadRequest: newDownloadRequest(v1.DownloadRequestPhaseProcessed, v1.DownloadTargetKindBackupLog, "a-backup-20170912150214"),
backup: arktest.NewTestBackup().WithName("a-backup").WithStorageLocation("a-location").Backup,
},
{
name: "request with phase 'Processed' is deleted if expired",
downloadRequest: newDownloadRequest(v1.DownloadRequestPhaseProcessed, v1.DownloadTargetKindBackupLog, "a-backup-20170912150214"),
backup: arktest.NewTestBackup().WithName("a-backup").WithStorageLocation("a-location").Backup,
expired: true,
}, },
} }
for _, tc := range tests { for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) { t.Run(tc.name, func(t *testing.T) {
var ( harness := newDownloadRequestTestHarness(t)
client = fake.NewSimpleClientset()
sharedInformers = informers.NewSharedInformerFactory(client, 0)
downloadRequestsInformer = sharedInformers.Ark().V1().DownloadRequests()
restoresInformer = sharedInformers.Ark().V1().Restores()
logger = arktest.NewLogger()
clockTime, _ = time.Parse("Mon Jan 2 15:04:05 2006", "Mon Jan 2 15:04:05 2006")
pluginManager = &pluginmocks.Manager{}
objectStore = &arktest.ObjectStore{}
)
c := NewDownloadRequestController( // set up test case data
client.ArkV1(),
downloadRequestsInformer,
restoresInformer,
sharedInformers.Ark().V1().BackupStorageLocations(),
sharedInformers.Ark().V1().Backups(),
nil, // pluginRegistry
logger,
).(*downloadRequestController)
c.newPluginManager = func(_ logrus.FieldLogger, _ logrus.Level, _ plugin.Registry) plugin.Manager { // Set .status.expiration properly for processed requests. Since "expired" is relative to the controller's
return pluginManager // clock time, it's easier to do this here than as part of the test case definitions.
if tc.downloadRequest != nil && tc.downloadRequest.Status.Phase == v1.DownloadRequestPhaseProcessed {
if tc.expired {
tc.downloadRequest.Status.Expiration.Time = harness.controller.clock.Now().Add(-1 * time.Minute)
} else {
tc.downloadRequest.Status.Expiration.Time = harness.controller.clock.Now().Add(time.Minute)
}
} }
pluginManager.On("GetObjectStore", "objStoreProvider").Return(objectStore, nil) if tc.downloadRequest != nil {
pluginManager.On("CleanupClients").Return(nil) require.NoError(t, harness.informerFactory.Ark().V1().DownloadRequests().Informer().GetStore().Add(tc.downloadRequest))
objectStore.On("Init", mock.Anything).Return(nil) _, err := harness.client.ArkV1().DownloadRequests(tc.downloadRequest.Namespace).Create(tc.downloadRequest)
require.NoError(t, err)
c.clock = clock.NewFakeClock(clockTime)
var downloadRequest *v1.DownloadRequest
if tc.expectedPhase == v1.DownloadRequestPhaseProcessed {
expectedTarget := v1.DownloadTarget{
Kind: tc.targetKind,
Name: tc.targetName,
} }
downloadRequest = &v1.DownloadRequest{
ObjectMeta: metav1.ObjectMeta{
Namespace: v1.DefaultNamespace,
Name: "dr1",
},
Spec: v1.DownloadRequestSpec{
Target: expectedTarget,
},
}
downloadRequestsInformer.Informer().GetStore().Add(downloadRequest)
if tc.restore != nil { if tc.restore != nil {
restoresInformer.Informer().GetStore().Add(tc.restore) require.NoError(t, harness.informerFactory.Ark().V1().Restores().Informer().GetStore().Add(tc.restore))
} }
if tc.expectedDir != "" { if tc.backup != nil {
backup := &v1.Backup{ require.NoError(t, harness.informerFactory.Ark().V1().Backups().Informer().GetStore().Add(tc.backup))
ObjectMeta: metav1.ObjectMeta{
Name: tc.expectedDir,
Namespace: v1.DefaultNamespace,
},
}
require.NoError(t, sharedInformers.Ark().V1().Backups().Informer().GetStore().Add(backup))
location := &v1.BackupStorageLocation{
ObjectMeta: metav1.ObjectMeta{
Name: backup.Spec.StorageLocation,
Namespace: backup.Namespace,
},
Spec: v1.BackupStorageLocationSpec{
Provider: "objStoreProvider",
StorageType: v1.StorageType{
ObjectStorage: &v1.ObjectStorageLocation{
Bucket: "bucket",
},
},
},
}
require.NoError(t, sharedInformers.Ark().V1().BackupStorageLocations().Informer().GetStore().Add(location))
} }
c.createSignedURL = func(objectStore cloudprovider.ObjectStore, target v1.DownloadTarget, bucket, directory string, ttl time.Duration) (string, error) { if tc.backupLocation != nil {
require.Equal(t, expectedTarget, target) require.NoError(t, harness.informerFactory.Ark().V1().BackupStorageLocations().Informer().GetStore().Add(tc.backupLocation))
require.Equal(t, "bucket", bucket)
require.Equal(t, tc.expectedDir, directory) harness.pluginManager.On("GetObjectStore", tc.backupLocation.Spec.Provider).Return(harness.objectStore, nil)
require.Equal(t, 10*time.Minute, ttl)
return "signedURL", nil
}
} }
// method under test if tc.expectedRequestedObject != "" {
err := c.processDownloadRequest(tc.key) harness.objectStore.On("CreateSignedURL", tc.backupLocation.Spec.ObjectStorage.Bucket, tc.expectedRequestedObject, mock.Anything).Return("a-url", nil)
if tc.expectedError != "" {
assert.EqualError(t, err, tc.expectedError)
return
} }
// exercise method under test
key := tc.key
if key == "" && tc.downloadRequest != nil {
key = kubeutil.NamespaceAndName(tc.downloadRequest)
}
err := harness.controller.processDownloadRequest(key)
// verify results
if tc.expectedErr != "" {
require.Equal(t, tc.expectedErr, err.Error())
} else {
assert.Nil(t, err)
}
if tc.expectedRequestedObject != "" {
output, err := harness.client.ArkV1().DownloadRequests(tc.downloadRequest.Namespace).Get(tc.downloadRequest.Name, metav1.GetOptions{})
require.NoError(t, err) require.NoError(t, err)
actions := client.Actions() assert.Equal(t, string(v1.DownloadRequestPhaseProcessed), string(output.Status.Phase))
assert.Equal(t, "a-url", output.Status.DownloadURL)
// if we don't expect a phase update, this means assert.True(t, arktest.TimesAreEqual(harness.controller.clock.Now().Add(signedURLTTL), output.Status.Expiration.Time), "expiration does not match")
// we don't expect any actions to take place
if tc.expectedPhase == "" {
require.Equal(t, 0, len(actions))
return
} }
// otherwise, we should get exactly 1 patch if tc.downloadRequest != nil && tc.downloadRequest.Status.Phase == v1.DownloadRequestPhaseProcessed {
require.Equal(t, 1, len(actions)) res, err := harness.client.ArkV1().DownloadRequests(tc.downloadRequest.Namespace).Get(tc.downloadRequest.Name, metav1.GetOptions{})
type PatchStatus struct { if tc.expired {
DownloadURL string `json:"downloadURL"` assert.True(t, apierrors.IsNotFound(err))
Phase v1.DownloadRequestPhase `json:"phase"` } else {
Expiration time.Time `json:"expiration"` assert.NoError(t, err)
assert.Equal(t, tc.downloadRequest, res)
} }
type Patch struct {
Status PatchStatus `json:"status"`
} }
decode := func(decoder *json.Decoder) (interface{}, error) {
actual := new(Patch)
err := decoder.Decode(actual)
return *actual, err
}
expected := Patch{
Status: PatchStatus{
DownloadURL: tc.expectedURL,
Phase: tc.expectedPhase,
Expiration: clockTime.Add(signedURLTTL),
},
}
arktest.ValidatePatch(t, actions[0], expected, decode)
}) })
} }
} }