migrate restore PV tests
Signed-off-by: Steve Kriss <krisss@vmware.com>pull/1614/head
parent
e371ba78b0
commit
adb93c33b1
|
@ -45,12 +45,14 @@ import (
|
|||
"github.com/heptio/velero/pkg/backup"
|
||||
"github.com/heptio/velero/pkg/client"
|
||||
"github.com/heptio/velero/pkg/discovery"
|
||||
velerov1informers "github.com/heptio/velero/pkg/generated/informers/externalversions"
|
||||
"github.com/heptio/velero/pkg/kuberesource"
|
||||
"github.com/heptio/velero/pkg/plugin/velero"
|
||||
"github.com/heptio/velero/pkg/test"
|
||||
"github.com/heptio/velero/pkg/util/encode"
|
||||
kubeutil "github.com/heptio/velero/pkg/util/kube"
|
||||
testutil "github.com/heptio/velero/pkg/util/test"
|
||||
"github.com/heptio/velero/pkg/volume"
|
||||
)
|
||||
|
||||
// TestRestoreResourceFiltering runs restores with different combinations
|
||||
|
@ -1659,6 +1661,425 @@ func assertRestoredItems(t *testing.T, h *harness, want []*test.APIResource) {
|
|||
}
|
||||
}
|
||||
|
||||
// volumeSnapshotterGetter is a simple implementation of the VolumeSnapshotterGetter
|
||||
// interface that returns velero.VolumeSnapshotters from a map if they exist.
|
||||
type volumeSnapshotterGetter map[string]velero.VolumeSnapshotter
|
||||
|
||||
func (vsg volumeSnapshotterGetter) GetVolumeSnapshotter(name string) (velero.VolumeSnapshotter, error) {
|
||||
snapshotter, ok := vsg[name]
|
||||
if !ok {
|
||||
return nil, errors.New("volume snapshotter not found")
|
||||
}
|
||||
|
||||
return snapshotter, nil
|
||||
}
|
||||
|
||||
// volumeSnapshotter is a test fake for the velero.VolumeSnapshotter interface
|
||||
type volumeSnapshotter struct {
|
||||
// a map from snapshotID to volumeID
|
||||
snapshotVolumes map[string]string
|
||||
}
|
||||
|
||||
// Init is a no-op.
|
||||
func (vs *volumeSnapshotter) Init(config map[string]string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// CreateVolumeFromSnapshot looks up the specified snapshotID in the snapshotVolumes
|
||||
// map and returns the corresponding volumeID if it exists, or an error otherwise.
|
||||
func (vs *volumeSnapshotter) CreateVolumeFromSnapshot(snapshotID, volumeType, volumeAZ string, iops *int64) (volumeID string, err error) {
|
||||
volumeID, ok := vs.snapshotVolumes[snapshotID]
|
||||
if !ok {
|
||||
return "", errors.New("snapshot not found")
|
||||
}
|
||||
|
||||
return volumeID, nil
|
||||
}
|
||||
|
||||
// SetVolumeID sets the persistent volume's spec.awsElasticBlockStore.volumeID field
|
||||
// with the provided volumeID.
|
||||
func (*volumeSnapshotter) SetVolumeID(pv runtime.Unstructured, volumeID string) (runtime.Unstructured, error) {
|
||||
unstructured.SetNestedField(pv.UnstructuredContent(), volumeID, "spec", "awsElasticBlockStore", "volumeID")
|
||||
return pv, nil
|
||||
}
|
||||
|
||||
// GetVolumeID panics because it's not expected to be used for restores.
|
||||
func (*volumeSnapshotter) GetVolumeID(pv runtime.Unstructured) (string, error) {
|
||||
panic("GetVolumeID should not be used for restores")
|
||||
}
|
||||
|
||||
// CreateSnapshot panics because it's not expected to be used for restores.
|
||||
func (*volumeSnapshotter) CreateSnapshot(volumeID, volumeAZ string, tags map[string]string) (snapshotID string, err error) {
|
||||
panic("CreateSnapshot should not be used for restores")
|
||||
}
|
||||
|
||||
// GetVolumeInfo panics because it's not expected to be used for restores.
|
||||
func (*volumeSnapshotter) GetVolumeInfo(volumeID, volumeAZ string) (string, *int64, error) {
|
||||
panic("GetVolumeInfo should not be used for restores")
|
||||
}
|
||||
|
||||
// DeleteSnapshot panics because it's not expected to be used for restores.
|
||||
func (*volumeSnapshotter) DeleteSnapshot(snapshotID string) error {
|
||||
panic("DeleteSnapshot should not be used for backups")
|
||||
}
|
||||
|
||||
// TestRestorePersistentVolumes runs restores for persistent volumes and verifies that
|
||||
// they are restored as expected, including restoring volumes from snapshots when expected.
|
||||
// Verification is done by looking at the contents of the API and the metadata/spec/status of
|
||||
// the items in the API.
|
||||
func TestRestorePersistentVolumes(t *testing.T) {
|
||||
withReclaimPolicy := func(policy corev1api.PersistentVolumeReclaimPolicy) func(*corev1api.PersistentVolume) {
|
||||
return func(pv *corev1api.PersistentVolume) {
|
||||
pv.Spec.PersistentVolumeReclaimPolicy = policy
|
||||
}
|
||||
}
|
||||
withClaimRef := func(ns, name string) func(*corev1api.PersistentVolume) {
|
||||
return func(pv *corev1api.PersistentVolume) {
|
||||
pv.Spec.ClaimRef = &corev1api.ObjectReference{
|
||||
Namespace: ns,
|
||||
Name: name,
|
||||
}
|
||||
}
|
||||
}
|
||||
withVolumeName := func(volumeName string) func(*corev1api.PersistentVolume) {
|
||||
return func(pv *corev1api.PersistentVolume) {
|
||||
pv.Spec.AWSElasticBlockStore = &corev1api.AWSElasticBlockStoreVolumeSource{
|
||||
VolumeID: volumeName,
|
||||
}
|
||||
}
|
||||
}
|
||||
withLabels := func(labels ...string) func(*corev1api.PersistentVolume) {
|
||||
return func(pv *corev1api.PersistentVolume) {
|
||||
test.WithLabels(labels...)(pv)
|
||||
}
|
||||
}
|
||||
|
||||
newPV := func(name string, opts ...func(*corev1api.PersistentVolume)) *corev1api.PersistentVolume {
|
||||
pv := test.NewPV(name)
|
||||
for _, opt := range opts {
|
||||
opt(pv)
|
||||
}
|
||||
return pv
|
||||
}
|
||||
|
||||
newPVC := func(ns, name, volumeName string, annotations map[string]string) *corev1api.PersistentVolumeClaim {
|
||||
pvc := test.NewPVC(ns, name)
|
||||
pvc.Spec.VolumeName = volumeName
|
||||
pvc.Annotations = annotations
|
||||
|
||||
return pvc
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
restore *velerov1api.Restore
|
||||
backup *velerov1api.Backup
|
||||
tarball io.Reader
|
||||
apiResources []*test.APIResource
|
||||
volumeSnapshots []*volume.Snapshot
|
||||
volumeSnapshotLocations []*velerov1api.VolumeSnapshotLocation
|
||||
volumeSnapshotterGetter volumeSnapshotterGetter
|
||||
want []*test.APIResource
|
||||
}{
|
||||
{
|
||||
name: "when a PV with a reclaim policy of delete has no snapshot and does not exist in-cluster, it does not get restored, and its PVC gets reset for dynamic provisioning",
|
||||
restore: defaultRestore().Restore(),
|
||||
backup: defaultBackup().Backup(),
|
||||
tarball: newTarWriter(t).
|
||||
addItems("persistentvolumes",
|
||||
newPV("pv-1", withReclaimPolicy(corev1api.PersistentVolumeReclaimDelete), withClaimRef("ns-1", "pvc-1")),
|
||||
).
|
||||
addItems("persistentvolumeclaims",
|
||||
newPVC("ns-1", "pvc-1", "pv-1", map[string]string{
|
||||
"pv.kubernetes.io/bind-completed": "true",
|
||||
"pv.kubernetes.io/bound-by-controller": "true",
|
||||
"foo": "bar",
|
||||
}),
|
||||
).
|
||||
done(),
|
||||
apiResources: []*test.APIResource{
|
||||
test.PVs(),
|
||||
test.PVCs(),
|
||||
},
|
||||
want: []*test.APIResource{
|
||||
test.PVs(),
|
||||
test.PVCs(
|
||||
test.NewPVC(
|
||||
"ns-1", "pvc-1",
|
||||
test.WithAnnotations("foo", "bar"),
|
||||
test.WithLabels("velero.io/backup-name", "backup-1", "velero.io/restore-name", "restore-1"),
|
||||
),
|
||||
),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "when a PV with a reclaim policy of retain has no snapshot and does not exist in-cluster, it gets restored, without its claim ref",
|
||||
restore: defaultRestore().Restore(),
|
||||
backup: defaultBackup().Backup(),
|
||||
tarball: newTarWriter(t).
|
||||
addItems("persistentvolumes",
|
||||
newPV("pv-1", withReclaimPolicy(corev1api.PersistentVolumeReclaimRetain), withClaimRef("ns-1", "pvc-1")),
|
||||
).
|
||||
done(),
|
||||
apiResources: []*test.APIResource{
|
||||
test.PVs(),
|
||||
test.PVCs(),
|
||||
},
|
||||
want: []*test.APIResource{
|
||||
test.PVs(
|
||||
newPV(
|
||||
"pv-1",
|
||||
withReclaimPolicy(corev1api.PersistentVolumeReclaimRetain),
|
||||
withLabels("velero.io/backup-name", "backup-1", "velero.io/restore-name", "restore-1"),
|
||||
),
|
||||
),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "when a PV with a reclaim policy of delete has a snapshot and does not exist in-cluster, the snapshot and PV are restored",
|
||||
restore: defaultRestore().Restore(),
|
||||
backup: defaultBackup().Backup(),
|
||||
tarball: newTarWriter(t).
|
||||
addItems("persistentvolumes", newPV("pv-1", withReclaimPolicy(corev1api.PersistentVolumeReclaimDelete), withVolumeName("old-volume"))).
|
||||
done(),
|
||||
apiResources: []*test.APIResource{
|
||||
test.PVs(),
|
||||
test.PVCs(),
|
||||
},
|
||||
volumeSnapshots: []*volume.Snapshot{
|
||||
{
|
||||
Spec: volume.SnapshotSpec{
|
||||
BackupName: "backup-1",
|
||||
Location: "default",
|
||||
PersistentVolumeName: "pv-1",
|
||||
},
|
||||
Status: volume.SnapshotStatus{
|
||||
Phase: volume.SnapshotPhaseCompleted,
|
||||
ProviderSnapshotID: "snapshot-1",
|
||||
},
|
||||
},
|
||||
},
|
||||
volumeSnapshotLocations: []*velerov1api.VolumeSnapshotLocation{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: velerov1api.DefaultNamespace,
|
||||
Name: "default",
|
||||
},
|
||||
Spec: velerov1api.VolumeSnapshotLocationSpec{
|
||||
Provider: "provider-1",
|
||||
},
|
||||
},
|
||||
},
|
||||
volumeSnapshotterGetter: map[string]velero.VolumeSnapshotter{
|
||||
"provider-1": &volumeSnapshotter{
|
||||
snapshotVolumes: map[string]string{"snapshot-1": "new-volume"},
|
||||
},
|
||||
},
|
||||
want: []*test.APIResource{
|
||||
test.PVs(
|
||||
newPV(
|
||||
"pv-1",
|
||||
withReclaimPolicy(corev1api.PersistentVolumeReclaimDelete), withVolumeName("new-volume"),
|
||||
withLabels("velero.io/backup-name", "backup-1", "velero.io/restore-name", "restore-1"),
|
||||
),
|
||||
),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "when a PV with a reclaim policy of retain has a snapshot and does not exist in-cluster, the snapshot and PV are restored",
|
||||
restore: defaultRestore().Restore(),
|
||||
backup: defaultBackup().Backup(),
|
||||
tarball: newTarWriter(t).
|
||||
addItems("persistentvolumes", newPV("pv-1", withReclaimPolicy(corev1api.PersistentVolumeReclaimRetain), withVolumeName("old-volume"))).
|
||||
done(),
|
||||
apiResources: []*test.APIResource{
|
||||
test.PVs(),
|
||||
test.PVCs(),
|
||||
},
|
||||
volumeSnapshots: []*volume.Snapshot{
|
||||
{
|
||||
Spec: volume.SnapshotSpec{
|
||||
BackupName: "backup-1",
|
||||
Location: "default",
|
||||
PersistentVolumeName: "pv-1",
|
||||
},
|
||||
Status: volume.SnapshotStatus{
|
||||
Phase: volume.SnapshotPhaseCompleted,
|
||||
ProviderSnapshotID: "snapshot-1",
|
||||
},
|
||||
},
|
||||
},
|
||||
volumeSnapshotLocations: []*velerov1api.VolumeSnapshotLocation{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: velerov1api.DefaultNamespace,
|
||||
Name: "default",
|
||||
},
|
||||
Spec: velerov1api.VolumeSnapshotLocationSpec{
|
||||
Provider: "provider-1",
|
||||
},
|
||||
},
|
||||
},
|
||||
volumeSnapshotterGetter: map[string]velero.VolumeSnapshotter{
|
||||
"provider-1": &volumeSnapshotter{
|
||||
snapshotVolumes: map[string]string{"snapshot-1": "new-volume"},
|
||||
},
|
||||
},
|
||||
want: []*test.APIResource{
|
||||
test.PVs(
|
||||
newPV(
|
||||
"pv-1",
|
||||
withReclaimPolicy(corev1api.PersistentVolumeReclaimRetain),
|
||||
withVolumeName("new-volume"),
|
||||
withLabels("velero.io/backup-name", "backup-1", "velero.io/restore-name", "restore-1"),
|
||||
),
|
||||
),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "when a PV with a reclaim policy of delete has a snapshot and exists in-cluster, neither the snapshot nor the PV are restored",
|
||||
restore: defaultRestore().Restore(),
|
||||
backup: defaultBackup().Backup(),
|
||||
tarball: newTarWriter(t).
|
||||
addItems("persistentvolumes", newPV("pv-1", withReclaimPolicy(corev1api.PersistentVolumeReclaimDelete), withVolumeName("old-volume"))).
|
||||
done(),
|
||||
apiResources: []*test.APIResource{
|
||||
test.PVs(
|
||||
newPV("pv-1", withReclaimPolicy(corev1api.PersistentVolumeReclaimDelete), withVolumeName("old-volume")),
|
||||
),
|
||||
test.PVCs(),
|
||||
},
|
||||
volumeSnapshots: []*volume.Snapshot{
|
||||
{
|
||||
Spec: volume.SnapshotSpec{
|
||||
BackupName: "backup-1",
|
||||
Location: "default",
|
||||
PersistentVolumeName: "pv-1",
|
||||
},
|
||||
Status: volume.SnapshotStatus{
|
||||
Phase: volume.SnapshotPhaseCompleted,
|
||||
ProviderSnapshotID: "snapshot-1",
|
||||
},
|
||||
},
|
||||
},
|
||||
volumeSnapshotLocations: []*velerov1api.VolumeSnapshotLocation{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: velerov1api.DefaultNamespace,
|
||||
Name: "default",
|
||||
},
|
||||
Spec: velerov1api.VolumeSnapshotLocationSpec{
|
||||
Provider: "provider-1",
|
||||
},
|
||||
},
|
||||
},
|
||||
volumeSnapshotterGetter: map[string]velero.VolumeSnapshotter{
|
||||
// the volume snapshotter fake is not configured with any snapshotID -> volumeID
|
||||
// mappings as a way to verify that the snapshot is not restored, since if it were
|
||||
// restored, we'd get an error of "snapshot not found".
|
||||
"provider-1": &volumeSnapshotter{},
|
||||
},
|
||||
want: []*test.APIResource{
|
||||
test.PVs(
|
||||
newPV("pv-1", withReclaimPolicy(corev1api.PersistentVolumeReclaimDelete), withVolumeName("old-volume")),
|
||||
),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "when a PV with a reclaim policy of retain has a snapshot and exists in-cluster, neither the snapshot nor the PV are restored",
|
||||
restore: defaultRestore().Restore(),
|
||||
backup: defaultBackup().Backup(),
|
||||
tarball: newTarWriter(t).
|
||||
addItems("persistentvolumes", newPV("pv-1", withReclaimPolicy(corev1api.PersistentVolumeReclaimRetain), withVolumeName("old-volume"))).
|
||||
done(),
|
||||
apiResources: []*test.APIResource{
|
||||
test.PVs(
|
||||
newPV("pv-1", withReclaimPolicy(corev1api.PersistentVolumeReclaimRetain), withVolumeName("old-volume")),
|
||||
),
|
||||
test.PVCs(),
|
||||
},
|
||||
volumeSnapshots: []*volume.Snapshot{
|
||||
{
|
||||
Spec: volume.SnapshotSpec{
|
||||
BackupName: "backup-1",
|
||||
Location: "default",
|
||||
PersistentVolumeName: "pv-1",
|
||||
},
|
||||
Status: volume.SnapshotStatus{
|
||||
Phase: volume.SnapshotPhaseCompleted,
|
||||
ProviderSnapshotID: "snapshot-1",
|
||||
},
|
||||
},
|
||||
},
|
||||
volumeSnapshotLocations: []*velerov1api.VolumeSnapshotLocation{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: velerov1api.DefaultNamespace,
|
||||
Name: "default",
|
||||
},
|
||||
Spec: velerov1api.VolumeSnapshotLocationSpec{
|
||||
Provider: "provider-1",
|
||||
},
|
||||
},
|
||||
},
|
||||
volumeSnapshotterGetter: map[string]velero.VolumeSnapshotter{
|
||||
// the volume snapshotter fake is not configured with any snapshotID -> volumeID
|
||||
// mappings as a way to verify that the snapshot is not restored, since if it were
|
||||
// restored, we'd get an error of "snapshot not found".
|
||||
"provider-1": &volumeSnapshotter{},
|
||||
},
|
||||
want: []*test.APIResource{
|
||||
test.PVs(
|
||||
newPV("pv-1", withReclaimPolicy(corev1api.PersistentVolumeReclaimRetain), withVolumeName("old-volume")),
|
||||
),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
h := newHarness(t)
|
||||
h.restorer.resourcePriorities = []string{"persistentvolumes", "persistentvolumeclaims"}
|
||||
|
||||
// set up the VolumeSnapshotLocation informer/lister and add test data to it
|
||||
vslInformer := velerov1informers.NewSharedInformerFactory(h.VeleroClient, 0).Velero().V1().VolumeSnapshotLocations()
|
||||
for _, vsl := range tc.volumeSnapshotLocations {
|
||||
require.NoError(t, vslInformer.Informer().GetStore().Add(vsl))
|
||||
}
|
||||
|
||||
for _, r := range tc.apiResources {
|
||||
h.addItems(t, r)
|
||||
}
|
||||
|
||||
// Collect the IDs of all of the wanted resources so we can ensure the
|
||||
// exact set exists in the API after restore.
|
||||
wantIDs := make(map[*test.APIResource][]string)
|
||||
for i, resource := range tc.want {
|
||||
wantIDs[tc.want[i]] = []string{}
|
||||
|
||||
for _, item := range resource.Items {
|
||||
wantIDs[tc.want[i]] = append(wantIDs[tc.want[i]], fmt.Sprintf("%s/%s", item.GetNamespace(), item.GetName()))
|
||||
}
|
||||
}
|
||||
|
||||
warnings, errs := h.restorer.Restore(
|
||||
h.log,
|
||||
tc.restore,
|
||||
tc.backup,
|
||||
tc.volumeSnapshots,
|
||||
tc.tarball,
|
||||
nil, // actions
|
||||
vslInformer.Lister(),
|
||||
tc.volumeSnapshotterGetter,
|
||||
)
|
||||
|
||||
assertEmptyResults(t, warnings, errs)
|
||||
assertAPIContents(t, h, wantIDs)
|
||||
assertRestoredItems(t, h, tc.want)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// assertResourceCreationOrder ensures that resources were created in the expected
|
||||
// order. Any resources *not* in resourcePriorities are required to come *after* all
|
||||
// resources in any order.
|
||||
|
|
|
@ -21,29 +21,16 @@ import (
|
|||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
k8serrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
discoveryfake "k8s.io/client-go/discovery/fake"
|
||||
kubefake "k8s.io/client-go/kubernetes/fake"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
|
||||
api "github.com/heptio/velero/pkg/apis/velero/v1"
|
||||
pkgclient "github.com/heptio/velero/pkg/client"
|
||||
"github.com/heptio/velero/pkg/discovery"
|
||||
"github.com/heptio/velero/pkg/kuberesource"
|
||||
"github.com/heptio/velero/pkg/plugin/velero"
|
||||
"github.com/heptio/velero/pkg/test"
|
||||
"github.com/heptio/velero/pkg/util/collections"
|
||||
velerotest "github.com/heptio/velero/pkg/util/test"
|
||||
"github.com/heptio/velero/pkg/volume"
|
||||
)
|
||||
|
||||
func TestPrioritizeResources(t *testing.T) {
|
||||
|
@ -128,322 +115,6 @@ func TestPrioritizeResources(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestRestoringPVsWithoutSnapshots(t *testing.T) {
|
||||
pv := `apiVersion: v1
|
||||
kind: PersistentVolume
|
||||
metadata:
|
||||
annotations:
|
||||
EXPORT_block: "\nEXPORT\n{\n\tExport_Id = 1;\n\tPath = /export/pvc-6a74b5af-78a5-11e8-a0d8-e2ad1e9734ce;\n\tPseudo
|
||||
= /export/pvc-6a74b5af-78a5-11e8-a0d8-e2ad1e9734ce;\n\tAccess_Type = RW;\n\tSquash
|
||||
= no_root_squash;\n\tSecType = sys;\n\tFilesystem_id = 1.1;\n\tFSAL {\n\t\tName
|
||||
= VFS;\n\t}\n}\n"
|
||||
Export_Id: "1"
|
||||
Project_Id: "0"
|
||||
Project_block: ""
|
||||
Provisioner_Id: 5fdf4025-78a5-11e8-9ece-0242ac110004
|
||||
kubernetes.io/createdby: nfs-dynamic-provisioner
|
||||
pv.kubernetes.io/provisioned-by: example.com/nfs
|
||||
volume.beta.kubernetes.io/mount-options: vers=4.1
|
||||
creationTimestamp: 2018-06-25T18:27:35Z
|
||||
finalizers:
|
||||
- kubernetes.io/pv-protection
|
||||
name: pvc-6a74b5af-78a5-11e8-a0d8-e2ad1e9734ce
|
||||
resourceVersion: "2576"
|
||||
selfLink: /api/v1/persistentvolumes/pvc-6a74b5af-78a5-11e8-a0d8-e2ad1e9734ce
|
||||
uid: 6ecd24e4-78a5-11e8-a0d8-e2ad1e9734ce
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteMany
|
||||
capacity:
|
||||
storage: 1Mi
|
||||
claimRef:
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
name: nfs
|
||||
namespace: default
|
||||
resourceVersion: "2565"
|
||||
uid: 6a74b5af-78a5-11e8-a0d8-e2ad1e9734ce
|
||||
nfs:
|
||||
path: /export/pvc-6a74b5af-78a5-11e8-a0d8-e2ad1e9734ce
|
||||
server: 10.103.235.254
|
||||
storageClassName: example-nfs
|
||||
status:
|
||||
phase: Bound`
|
||||
|
||||
pvc := `apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
annotations:
|
||||
control-plane.alpha.kubernetes.io/leader: '{"holderIdentity":"5fdf5572-78a5-11e8-9ece-0242ac110004","leaseDurationSeconds":15,"acquireTime":"2018-06-25T18:27:35Z","renewTime":"2018-06-25T18:27:37Z","leaderTransitions":0}'
|
||||
kubectl.kubernetes.io/last-applied-configuration: |
|
||||
{"apiVersion":"v1","kind":"PersistentVolumeClaim","metadata":{"annotations":{},"name":"nfs","namespace":"default"},"spec":{"accessModes":["ReadWriteMany"],"resources":{"requests":{"storage":"1Mi"}},"storageClassName":"example-nfs"}}
|
||||
pv.kubernetes.io/bind-completed: "yes"
|
||||
pv.kubernetes.io/bound-by-controller: "yes"
|
||||
volume.beta.kubernetes.io/storage-provisioner: example.com/nfs
|
||||
creationTimestamp: 2018-06-25T18:27:28Z
|
||||
finalizers:
|
||||
- kubernetes.io/pvc-protection
|
||||
name: nfs
|
||||
namespace: default
|
||||
resourceVersion: "2578"
|
||||
selfLink: /api/v1/namespaces/default/persistentvolumeclaims/nfs
|
||||
uid: 6a74b5af-78a5-11e8-a0d8-e2ad1e9734ce
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteMany
|
||||
resources:
|
||||
requests:
|
||||
storage: 1Mi
|
||||
storageClassName: example-nfs
|
||||
volumeName: pvc-6a74b5af-78a5-11e8-a0d8-e2ad1e9734ce
|
||||
status:
|
||||
accessModes:
|
||||
- ReadWriteMany
|
||||
capacity:
|
||||
storage: 1Mi
|
||||
phase: Bound`
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
haveSnapshot bool
|
||||
reclaimPolicy string
|
||||
expectPVCVolumeName bool
|
||||
expectedPVCAnnotationsMissing sets.String
|
||||
expectPVCreation bool
|
||||
expectPVFound bool
|
||||
}{
|
||||
{
|
||||
name: "backup has snapshot, reclaim policy delete, no existing PV found",
|
||||
haveSnapshot: true,
|
||||
reclaimPolicy: "Delete",
|
||||
expectPVCVolumeName: true,
|
||||
expectPVCreation: true,
|
||||
},
|
||||
{
|
||||
name: "backup has snapshot, reclaim policy delete, existing PV found",
|
||||
haveSnapshot: true,
|
||||
reclaimPolicy: "Delete",
|
||||
expectPVCVolumeName: true,
|
||||
expectPVCreation: false,
|
||||
expectPVFound: true,
|
||||
},
|
||||
{
|
||||
name: "backup has snapshot, reclaim policy retain, no existing PV found",
|
||||
haveSnapshot: true,
|
||||
reclaimPolicy: "Retain",
|
||||
expectPVCVolumeName: true,
|
||||
expectPVCreation: true,
|
||||
},
|
||||
{
|
||||
name: "backup has snapshot, reclaim policy retain, existing PV found",
|
||||
haveSnapshot: true,
|
||||
reclaimPolicy: "Retain",
|
||||
expectPVCVolumeName: true,
|
||||
expectPVCreation: false,
|
||||
expectPVFound: true,
|
||||
},
|
||||
{
|
||||
name: "backup has snapshot, reclaim policy retain, existing PV found",
|
||||
haveSnapshot: true,
|
||||
reclaimPolicy: "Retain",
|
||||
expectPVCVolumeName: true,
|
||||
expectPVCreation: false,
|
||||
expectPVFound: true,
|
||||
},
|
||||
{
|
||||
name: "no snapshot, reclaim policy delete, no existing PV",
|
||||
haveSnapshot: false,
|
||||
reclaimPolicy: "Delete",
|
||||
expectPVCVolumeName: false,
|
||||
expectedPVCAnnotationsMissing: sets.NewString("pv.kubernetes.io/bind-completed", "pv.kubernetes.io/bound-by-controller"),
|
||||
},
|
||||
{
|
||||
name: "no snapshot, reclaim policy retain, no existing PV found",
|
||||
haveSnapshot: false,
|
||||
reclaimPolicy: "Retain",
|
||||
expectPVCVolumeName: true,
|
||||
expectPVCreation: true,
|
||||
},
|
||||
{
|
||||
name: "no snapshot, reclaim policy retain, existing PV found",
|
||||
haveSnapshot: false,
|
||||
reclaimPolicy: "Retain",
|
||||
expectPVCVolumeName: true,
|
||||
expectPVCreation: false,
|
||||
expectPVFound: true,
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
dynamicFactory := &velerotest.FakeDynamicFactory{}
|
||||
gv := schema.GroupVersion{Group: "", Version: "v1"}
|
||||
|
||||
pvClient := &velerotest.FakeDynamicClient{}
|
||||
defer pvClient.AssertExpectations(t)
|
||||
|
||||
pvResource := metav1.APIResource{Name: "persistentvolumes", Namespaced: false}
|
||||
dynamicFactory.On("ClientForGroupVersionResource", gv, pvResource, "").Return(pvClient, nil)
|
||||
|
||||
pvcClient := &velerotest.FakeDynamicClient{}
|
||||
defer pvcClient.AssertExpectations(t)
|
||||
|
||||
pvcResource := metav1.APIResource{Name: "persistentvolumeclaims", Namespaced: true}
|
||||
dynamicFactory.On("ClientForGroupVersionResource", gv, pvcResource, "default").Return(pvcClient, nil)
|
||||
|
||||
obj, _, err := scheme.Codecs.UniversalDecoder(v1.SchemeGroupVersion).Decode([]byte(pv), nil, nil)
|
||||
require.NoError(t, err)
|
||||
pvObj, ok := obj.(*v1.PersistentVolume)
|
||||
require.True(t, ok)
|
||||
pvObj.Spec.PersistentVolumeReclaimPolicy = v1.PersistentVolumeReclaimPolicy(test.reclaimPolicy)
|
||||
pvBytes, err := json.Marshal(pvObj)
|
||||
require.NoError(t, err)
|
||||
|
||||
obj, _, err = scheme.Codecs.UniversalDecoder(v1.SchemeGroupVersion).Decode([]byte(pvc), nil, nil)
|
||||
require.NoError(t, err)
|
||||
pvcObj, ok := obj.(*v1.PersistentVolumeClaim)
|
||||
require.True(t, ok)
|
||||
pvcBytes, err := json.Marshal(pvcObj)
|
||||
require.NoError(t, err)
|
||||
|
||||
unstructuredPVCMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(pvcObj)
|
||||
require.NoError(t, err)
|
||||
unstructuredPVC := &unstructured.Unstructured{Object: unstructuredPVCMap}
|
||||
|
||||
nsClient := &velerotest.FakeNamespaceClient{}
|
||||
ns := newTestNamespace(pvcObj.Namespace).Namespace
|
||||
nsClient.On("Get", pvcObj.Namespace, mock.Anything).Return(ns, nil)
|
||||
|
||||
backup := &api.Backup{}
|
||||
|
||||
pvRestorer := new(mockPVRestorer)
|
||||
defer pvRestorer.AssertExpectations(t)
|
||||
|
||||
ctx := &context{
|
||||
dynamicFactory: dynamicFactory,
|
||||
actions: []resolvedAction{},
|
||||
fileSystem: velerotest.NewFakeFileSystem().
|
||||
WithFile("foo/resources/persistentvolumes/cluster/pv.json", pvBytes).
|
||||
WithFile("foo/resources/persistentvolumeclaims/default/pvc.json", pvcBytes),
|
||||
selector: labels.NewSelector(),
|
||||
resourceIncludesExcludes: collections.NewIncludesExcludes(),
|
||||
namespaceIncludesExcludes: collections.NewIncludesExcludes(),
|
||||
prioritizedResources: []schema.GroupResource{
|
||||
kuberesource.PersistentVolumes,
|
||||
kuberesource.PersistentVolumeClaims,
|
||||
},
|
||||
restore: &api.Restore{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: api.DefaultNamespace,
|
||||
Name: "my-restore",
|
||||
},
|
||||
},
|
||||
backup: backup,
|
||||
log: velerotest.NewLogger(),
|
||||
pvsToProvision: sets.NewString(),
|
||||
pvRestorer: pvRestorer,
|
||||
namespaceClient: nsClient,
|
||||
resourceClients: make(map[resourceClientKey]pkgclient.Dynamic),
|
||||
restoredItems: make(map[velero.ResourceIdentifier]struct{}),
|
||||
}
|
||||
|
||||
if test.haveSnapshot {
|
||||
ctx.volumeSnapshots = append(ctx.volumeSnapshots, &volume.Snapshot{
|
||||
Spec: volume.SnapshotSpec{
|
||||
PersistentVolumeName: "pvc-6a74b5af-78a5-11e8-a0d8-e2ad1e9734ce",
|
||||
},
|
||||
Status: volume.SnapshotStatus{
|
||||
ProviderSnapshotID: "snap",
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
unstructuredPVMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(pvObj)
|
||||
require.NoError(t, err)
|
||||
unstructuredPV := &unstructured.Unstructured{Object: unstructuredPVMap}
|
||||
|
||||
if test.expectPVFound {
|
||||
// Copy the PV so that later modifcations don't affect what's returned by our faked calls.
|
||||
inClusterPV := unstructuredPV.DeepCopy()
|
||||
pvClient.On("Get", inClusterPV.GetName(), metav1.GetOptions{}).Return(inClusterPV, nil)
|
||||
pvClient.On("Create", mock.Anything).Return(inClusterPV, k8serrors.NewAlreadyExists(kuberesource.PersistentVolumes, inClusterPV.GetName()))
|
||||
inClusterPVC := unstructuredPVC.DeepCopy()
|
||||
pvcClient.On("Get", pvcObj.Name, mock.Anything).Return(inClusterPVC, nil)
|
||||
}
|
||||
|
||||
// Only set up the client expectation if the test has the proper prerequisites
|
||||
if test.haveSnapshot || test.reclaimPolicy != "Delete" {
|
||||
pvClient.On("Get", unstructuredPV.GetName(), metav1.GetOptions{}).Return(&unstructured.Unstructured{}, k8serrors.NewNotFound(schema.GroupResource{Resource: "persistentvolumes"}, unstructuredPV.GetName()))
|
||||
}
|
||||
|
||||
pvToRestore := unstructuredPV.DeepCopy()
|
||||
restoredPV := unstructuredPV.DeepCopy()
|
||||
|
||||
if test.expectPVCreation {
|
||||
// just to ensure we have the data flowing correctly
|
||||
restoredPV.Object["foo"] = "bar"
|
||||
pvRestorer.On("executePVAction", pvToRestore).Return(restoredPV, nil)
|
||||
}
|
||||
|
||||
resetMetadataAndStatus(unstructuredPV)
|
||||
addRestoreLabels(unstructuredPV, ctx.restore.Name, ctx.restore.Spec.BackupName)
|
||||
unstructuredPV.Object["foo"] = "bar"
|
||||
|
||||
if test.expectPVCreation {
|
||||
createdPV := unstructuredPV.DeepCopy()
|
||||
pvClient.On("Create", unstructuredPV).Return(createdPV, nil)
|
||||
}
|
||||
|
||||
// Restore PV
|
||||
warnings, errors := ctx.restoreResource("persistentvolumes", "", "foo/resources/persistentvolumes/cluster/")
|
||||
|
||||
assert.Empty(t, warnings.Velero)
|
||||
assert.Empty(t, warnings.Namespaces)
|
||||
assert.Equal(t, Result{}, errors)
|
||||
assert.Empty(t, warnings.Cluster)
|
||||
|
||||
// Prep PVC restore
|
||||
// Handle expectations
|
||||
if !test.expectPVCVolumeName {
|
||||
pvcObj.Spec.VolumeName = ""
|
||||
}
|
||||
for _, key := range test.expectedPVCAnnotationsMissing.List() {
|
||||
delete(pvcObj.Annotations, key)
|
||||
}
|
||||
|
||||
// Recreate the unstructured PVC since the object was edited.
|
||||
unstructuredPVCMap, err = runtime.DefaultUnstructuredConverter.ToUnstructured(pvcObj)
|
||||
require.NoError(t, err)
|
||||
unstructuredPVC = &unstructured.Unstructured{Object: unstructuredPVCMap}
|
||||
|
||||
resetMetadataAndStatus(unstructuredPVC)
|
||||
addRestoreLabels(unstructuredPVC, ctx.restore.Name, ctx.restore.Spec.BackupName)
|
||||
|
||||
createdPVC := unstructuredPVC.DeepCopy()
|
||||
// just to ensure we have the data flowing correctly
|
||||
createdPVC.Object["foo"] = "bar"
|
||||
|
||||
pvcClient.On("Create", unstructuredPVC).Return(createdPVC, nil)
|
||||
|
||||
// Restore PVC
|
||||
warnings, errors = ctx.restoreResource("persistentvolumeclaims", "default", "foo/resources/persistentvolumeclaims/default/")
|
||||
|
||||
assert.Empty(t, warnings.Velero)
|
||||
assert.Empty(t, warnings.Cluster)
|
||||
assert.Empty(t, warnings.Namespaces)
|
||||
assert.Equal(t, Result{}, errors)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type mockPVRestorer struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
func (r *mockPVRestorer) executePVAction(obj *unstructured.Unstructured) (*unstructured.Unstructured, error) {
|
||||
args := r.Called(obj)
|
||||
return args.Get(0).(*unstructured.Unstructured), args.Error(1)
|
||||
}
|
||||
|
||||
func TestResetMetadataAndStatus(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
|
@ -670,65 +341,3 @@ func (obj *testUnstructured) withMapEntry(mapName, field string, value interface
|
|||
|
||||
return obj
|
||||
}
|
||||
|
||||
func toUnstructured(objs ...runtime.Object) []unstructured.Unstructured {
|
||||
res := make([]unstructured.Unstructured, 0, len(objs))
|
||||
|
||||
for _, obj := range objs {
|
||||
jsonObj, err := json.Marshal(obj)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
var unstructuredObj unstructured.Unstructured
|
||||
|
||||
if err := json.Unmarshal(jsonObj, &unstructuredObj); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
metadata := unstructuredObj.Object["metadata"].(map[string]interface{})
|
||||
|
||||
delete(metadata, "creationTimestamp")
|
||||
|
||||
delete(unstructuredObj.Object, "status")
|
||||
|
||||
res = append(res, unstructuredObj)
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
type testNamespace struct {
|
||||
*v1.Namespace
|
||||
}
|
||||
|
||||
func newTestNamespace(name string) *testNamespace {
|
||||
return &testNamespace{
|
||||
Namespace: &v1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (ns *testNamespace) ToJSON() []byte {
|
||||
bytes, _ := json.Marshal(ns.Namespace)
|
||||
return bytes
|
||||
}
|
||||
|
||||
type fakeVolumeSnapshotterGetter struct {
|
||||
fakeVolumeSnapshotter *velerotest.FakeVolumeSnapshotter
|
||||
volumeMap map[velerotest.VolumeBackupInfo]string
|
||||
volumeID string
|
||||
}
|
||||
|
||||
func (r *fakeVolumeSnapshotterGetter) GetVolumeSnapshotter(provider string) (velero.VolumeSnapshotter, error) {
|
||||
if r.fakeVolumeSnapshotter == nil {
|
||||
r.fakeVolumeSnapshotter = &velerotest.FakeVolumeSnapshotter{
|
||||
RestorableVolumes: r.volumeMap,
|
||||
VolumeID: r.volumeID,
|
||||
}
|
||||
}
|
||||
return r.fakeVolumeSnapshotter, nil
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue