Add backup opt-in/opt-out E2E test

Signed-off-by: danfengl <danfengl@vmware.com>
pull/5331/head
danfengl 2022-09-13 04:42:06 +00:00
parent 745ebbe081
commit 081b70d0eb
18 changed files with 524 additions and 22 deletions

View File

@ -0,0 +1 @@
Add opt-in and opt-out PersistentVolume backup to E2E tests

View File

@ -58,12 +58,11 @@ func (b *TTL) Init() {
}
func TTLTest() {
var err error
useVolumeSnapshots := true
test := new(TTL)
client, err := NewTestClient(VeleroCfg.DefaultCluster)
if err != nil {
println(err.Error())
}
client := *VeleroCfg.ClientToInstallVelero
//Expect(err).To(Succeed(), "Failed to instantiate cluster client for backup tests")
BeforeEach(func() {

View File

@ -36,6 +36,7 @@ import (
"github.com/vmware-tanzu/velero/pkg/builder"
veleroexec "github.com/vmware-tanzu/velero/pkg/util/exec"
. "github.com/vmware-tanzu/velero/test/e2e"
. "github.com/vmware-tanzu/velero/test/e2e/util/common"
. "github.com/vmware-tanzu/velero/test/e2e/util/k8s"
. "github.com/vmware-tanzu/velero/test/e2e/util/velero"
)
@ -464,14 +465,8 @@ func runEnableAPIGroupVersionsTests(ctx context.Context, client TestClient, reso
func installCRD(ctx context.Context, yaml string) error {
fmt.Printf("Install CRD with %s.\n", yaml)
cmd := exec.CommandContext(ctx, "kubectl", "apply", "-f", yaml)
_, stderr, err := veleroexec.RunCommand(cmd)
if err != nil {
return errors.Wrap(err, stderr)
}
return nil
err := KubectlApplyFile(ctx, yaml)
return err
}
func deleteCRD(ctx context.Context, yaml string) error {

View File

@ -35,6 +35,7 @@ import (
. "github.com/vmware-tanzu/velero/test/e2e/bsl-mgmt"
. "github.com/vmware-tanzu/velero/test/e2e/orderedresources"
. "github.com/vmware-tanzu/velero/test/e2e/privilegesmgmt"
. "github.com/vmware-tanzu/velero/test/e2e/pv-backup"
. "github.com/vmware-tanzu/velero/test/e2e/resource-filtering"
. "github.com/vmware-tanzu/velero/test/e2e/scale"
. "github.com/vmware-tanzu/velero/test/e2e/upgrade"
@ -118,6 +119,7 @@ var _ = Describe("[BSL][Deletion][Snapshot] Local backups will be deleted once t
var _ = Describe("[BSL][Deletion][Restic] Local backups and restic repos will be deleted once the corresponding backup storage location is deleted", BslDeletionWithRestic)
var _ = Describe("[Migration][Restic]", MigrationWithRestic)
var _ = Describe("[Migration][Snapshot]", MigrationWithSnapshots)
var _ = Describe("[Schedule][OrederedResources] Backup resources should follow the specific order in schedule", ScheduleOrderedResources)
@ -125,6 +127,9 @@ var _ = Describe("[Schedule][OrederedResources] Backup resources should follow t
var _ = Describe("[NamespaceMapping][Single] Backup resources should follow the specific order in schedule", OneNamespaceMappingTest)
var _ = Describe("[NamespaceMapping][Multiple] Backup resources should follow the specific order in schedule", MultiNamespacesMappingTest)
var _ = Describe("[pv-backup][Opt-In] Backup resources should follow the specific order in schedule", OptInPVBackupTest)
var _ = Describe("[pv-backup][Opt-Out] Backup resources should follow the specific order in schedule", OptOutPVBackupTest)
func GetKubeconfigContext() error {
var err error
var tcDefault, tcStandby TestClient

View File

@ -85,7 +85,7 @@ func MigrationTest(useVolumeSnapshots bool, veleroCLI2Version VeleroCLI2Version)
DeleteNamespace(context.Background(), *VeleroCfg.StandbyClient, migrationNamespace, true)
})
}
By(fmt.Sprintf("Switch to default kubeconfig context %s", VeleroCfg.DefaultCluster), func() {
By(fmt.Sprintf("Switch to default kubeconfig context %s", VeleroCfg.DefaultClient), func() {
Expect(KubectlConfigUseContext(context.Background(), VeleroCfg.DefaultCluster)).To(Succeed())
VeleroCfg.ClientToInstallVelero = VeleroCfg.DefaultClient
})

View File

@ -112,10 +112,8 @@ func ScheduleOrderedResources() {
func (o *OrderedResources) Init() error {
rand.Seed(time.Now().UnixNano())
UUIDgen, _ = uuid.NewRandom()
client, err := NewTestClient(VeleroCfg.DefaultCluster)
if err != nil {
return fmt.Errorf("failed to init ordered resources test with err %v", err)
}
client := *VeleroCfg.ClientToInstallVelero
o.Client = client
o.ScheduleName = "schedule-ordered-resources-" + UUIDgen.String()
o.NSBaseName = "schedule-ordered-resources"

View File

@ -0,0 +1,209 @@
package basic
import (
"context"
"fmt"
"strings"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/pkg/errors"
. "github.com/vmware-tanzu/velero/test/e2e"
. "github.com/vmware-tanzu/velero/test/e2e/test"
. "github.com/vmware-tanzu/velero/test/e2e/util/common"
. "github.com/vmware-tanzu/velero/test/e2e/util/k8s"
)
type PVBackupFiltering struct {
TestCase
annotation string
podsList [][]string
volumesList [][]string
id string
}
const POD_COUNT, VOLUME_COUNT_PER_POD = 2, 3
const OPT_IN_ANN, OPT_OUT_ANN = "backup.velero.io/backup-volumes", "backup.velero.io/backup-volumes-excludes"
const FILE_NAME = "test-data.txt"
var OptInPVBackupTest func() = TestFunc(&PVBackupFiltering{annotation: OPT_IN_ANN, id: "opt-in"})
var OptOutPVBackupTest func() = TestFunc(&PVBackupFiltering{annotation: OPT_OUT_ANN, id: "opt-out"})
func (p *PVBackupFiltering) Init() error {
p.Ctx, _ = context.WithTimeout(context.Background(), 60*time.Minute)
p.Client = TestClientInstance
p.NSBaseName = "ns"
p.NSIncluded = &[]string{fmt.Sprintf("%s-%s-%d", p.NSBaseName, p.id, 1), fmt.Sprintf("%s-%s-%d", p.NSBaseName, p.id, 2)}
p.TestMsg = &TestMSG{
Desc: "Backup PVs filtering by opt-in/opt-out annotation",
FailedMSG: "Failed to PVs filtering by opt-in/opt-out annotation",
Text: fmt.Sprintf("Should backup PVs in namespace %s according to annotation %s", *p.NSIncluded, p.annotation),
}
return nil
}
func (p *PVBackupFiltering) StartRun() error {
err := installStorageClass(p.Ctx, fmt.Sprintf("testdata/storage-class/%s.yaml", VeleroCfg.CloudProvider))
if err != nil {
return err
}
p.BackupName = p.BackupName + "backup-opt-in-" + UUIDgen.String()
p.RestoreName = p.RestoreName + "restore-opt-in-" + UUIDgen.String()
p.BackupArgs = []string{
"create", "--namespace", VeleroCfg.VeleroNamespace, "backup", p.BackupName,
"--include-namespaces", strings.Join(*p.NSIncluded, ","),
"--snapshot-volumes=false", "--wait",
}
if p.annotation == OPT_OUT_ANN {
p.BackupArgs = append(p.BackupArgs, "--default-volumes-to-restic")
}
p.RestoreArgs = []string{
"create", "--namespace", VeleroCfg.VeleroNamespace, "restore", p.RestoreName,
"--from-backup", p.BackupName, "--wait",
}
return nil
}
func (p *PVBackupFiltering) CreateResources() error {
p.Ctx, _ = context.WithTimeout(context.Background(), 60*time.Minute)
for _, ns := range *p.NSIncluded {
By(fmt.Sprintf("Create namespaces %s for workload\n", ns), func() {
Expect(CreateNamespace(p.Ctx, p.Client, ns)).To(Succeed(), fmt.Sprintf("Failed to create namespace %s", ns))
})
var pods []string
By(fmt.Sprintf("Deploy a few pods with several PVs in namespace %s", ns), func() {
var volumesToAnnotation string
//Make sure PVC name is unique from other tests to avoid PVC creation error
for i := 0; i <= POD_COUNT-1; i++ {
var volumeToAnnotationList []string
var volumes []string
for j := 0; j <= VOLUME_COUNT_PER_POD-1; j++ {
volume := fmt.Sprintf("volume-%s-%d-%d", p.id, i, j)
volumes = append(volumes, volume)
//Volumes cherry pick policy for opt-in/out annotation to pods
if j%2 == 0 {
volumeToAnnotationList = append(volumeToAnnotationList, volume)
}
}
p.volumesList = append(p.volumesList, volumes)
volumesToAnnotation = strings.Join(volumeToAnnotationList, ",")
podName := fmt.Sprintf("pod-%d", i)
pods = append(pods, podName)
By(fmt.Sprintf("Create pod %s in namespace %s", podName, ns), func() {
pod, err := CreatePodWithPVC(p.Client, ns, podName, "e2e-storage-class", volumes)
Expect(err).To(Succeed())
ann := map[string]string{
p.annotation: volumesToAnnotation,
}
By(fmt.Sprintf("Add annotation to pod %s of namespace %s", pod.Name, ns), func() {
_, err := AddAnnotationToPod(p.Ctx, p.Client, ns, pod.Name, ann)
Expect(err).To(Succeed())
})
})
}
})
p.podsList = append(p.podsList, pods)
}
By(fmt.Sprintf("Waiting for all pods to start %s\n", p.podsList), func() {
for index, ns := range *p.NSIncluded {
By(fmt.Sprintf("Waiting for all pods to start %d in namespace %s", index, ns), func() {
WaitForPods(p.Ctx, p.Client, ns, p.podsList[index])
})
}
})
By(fmt.Sprintf("Polulate all pods %s with file %s", p.podsList, FILE_NAME), func() {
for index, ns := range *p.NSIncluded {
By(fmt.Sprintf("Creating file in all pods to start %d in namespace %s", index, ns), func() {
WaitForPods(p.Ctx, p.Client, ns, p.podsList[index])
for i, pod := range p.podsList[index] {
for j := range p.volumesList[i] {
Expect(CreateFileToPod(p.Ctx, ns, pod, p.volumesList[i][j],
FILE_NAME, fileContent(ns, pod, p.volumesList[i][j]))).To(Succeed())
}
}
})
}
})
return nil
}
func (p *PVBackupFiltering) Verify() error {
p.Ctx, _ = context.WithTimeout(context.Background(), 60*time.Minute)
By(fmt.Sprintf("Waiting for all pods to start %s", p.podsList), func() {
for index, ns := range *p.NSIncluded {
By(fmt.Sprintf("Waiting for all pods to start %d in namespace %s", index, ns), func() {
WaitForPods(p.Ctx, p.Client, ns, p.podsList[index])
})
}
})
for k, ns := range *p.NSIncluded {
By("Verify PV backed up according to annotation", func() {
for i := 0; i <= POD_COUNT-1; i++ {
for j := 0; j <= VOLUME_COUNT_PER_POD-1; j++ {
// Same with volumes cherry pick policy to verify backup result
if j%2 == 0 {
if p.annotation == OPT_IN_ANN {
By(fmt.Sprintf("File should exists in PV %s of pod %s under namespace %s\n", p.volumesList[i][j], p.podsList[k][i], ns), func() {
Expect(fileExist(p.Ctx, ns, p.podsList[k][i], p.volumesList[i][j])).To(Succeed(), "File not exist as expect")
})
} else {
By(fmt.Sprintf("File should not exist in PV %s of pod %s under namespace %s\n", p.volumesList[i][j], p.podsList[k][i], ns), func() {
Expect(fileNotExist(p.Ctx, ns, p.podsList[k][i], p.volumesList[i][j])).To(Succeed(), "File exists, not as expect")
})
}
} else {
if p.annotation == OPT_OUT_ANN {
By(fmt.Sprintf("File should exists in PV %s of pod %s under namespace %s\n", p.volumesList[i][j], p.podsList[k][i], ns), func() {
Expect(fileExist(p.Ctx, ns, p.podsList[k][i], p.volumesList[i][j])).To(Succeed(), "File not exist as expect")
})
} else {
By(fmt.Sprintf("File should not exist in PV %s of pod %s under namespace %s\n", p.volumesList[i][j], p.podsList[k][i], ns), func() {
Expect(fileNotExist(p.Ctx, ns, p.podsList[k][i], p.volumesList[i][j])).To(Succeed(), "File exists, not as expect")
})
}
}
}
}
})
}
return nil
}
func fileContent(namespace, podName, volume string) string {
return fmt.Sprintf("ns-%s pod-%s volume-%s", namespace, podName, volume)
}
func fileExist(ctx context.Context, namespace, podName, volume string) error {
c, err := ReadFileFromPodVolume(ctx, namespace, podName, volume, FILE_NAME)
if err != nil {
return errors.Wrap(err, fmt.Sprintf("Fail to read file %s from volume %s of pod %s in %s ",
FILE_NAME, volume, podName, namespace))
}
c = strings.Replace(c, "\n", "", -1)
origin_content := strings.Replace(fileContent(namespace, podName, volume), "\n", "", -1)
if c == origin_content {
return nil
} else {
return errors.New(fmt.Sprintf("UNEXPECTED: File %s does not exist in volume %s of pod %s in namespace %s.",
FILE_NAME, volume, podName, namespace))
}
}
func fileNotExist(ctx context.Context, namespace, podName, volume string) error {
_, err := ReadFileFromPodVolume(ctx, namespace, podName, volume, FILE_NAME)
if err != nil {
return nil
} else {
return errors.New(fmt.Sprintf("UNEXPECTED: File %s exist in volume %s of pod %s in namespace %s.",
FILE_NAME, volume, podName, namespace))
}
}
func installStorageClass(ctx context.Context, yaml string) error {
fmt.Printf("Install storage class with %s.\n", yaml)
err := KubectlApplyFile(ctx, yaml)
return err
}

View File

@ -164,10 +164,12 @@ func (t *TestCase) Destroy() error {
}
func (t *TestCase) Restore() error {
if err := VeleroCmdExec(t.Ctx, VeleroCfg.VeleroCLI, t.RestoreArgs); err != nil {
RunDebug(context.Background(), VeleroCfg.VeleroCLI, VeleroCfg.VeleroNamespace, t.BackupName, "")
return errors.Wrapf(err, "Failed to restore resources")
}
By("Start to restore ......", func() {
Expect(VeleroCmdExec(t.Ctx, VeleroCfg.VeleroCLI, t.RestoreArgs)).To(Succeed(), func() string {
RunDebug(context.Background(), VeleroCfg.VeleroCLI, VeleroCfg.VeleroNamespace, t.BackupName, "")
return "Fail to restore workload"
})
})
return nil
}

View File

@ -0,0 +1,9 @@
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: e2e-storage-class
provisioner: kubernetes.io/aws-ebs
parameters:
type: gp2
reclaimPolicy: Delete
volumeBindingMode: WaitForFirstConsumer

View File

@ -0,0 +1,11 @@
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: e2e-storage-class
provisioner: kubernetes.io/azure-disk
parameters:
cachingmode: ReadOnly
kind: Managed
storageaccounttype: StandardSSD_LRS
reclaimPolicy: Delete
volumeBindingMode: WaitForFirstConsumer

View File

@ -0,0 +1,13 @@
allowVolumeExpansion: true
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
labels:
addonmanager.kubernetes.io/mode: EnsureExists
name: e2e-storage-class
parameters:
type: pd-standard
provisioner: kubernetes.io/gce-pd
reclaimPolicy: Delete
volumeBindingMode: volumeBindingMode: WaitForFirstConsumer

View File

@ -0,0 +1,11 @@
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: e2e-storage-class
annotations:
storageclass.kubernetes.io/is-default-class: "false"
parameters:
StoragePolicyName: "vSAN Default Storage Policy"
provisioner: csi.vsphere.vmware.com
reclaimPolicy: Delete
volumeBindingMode: WaitForFirstConsumer

View File

@ -6,6 +6,10 @@ import (
"context"
"fmt"
"os/exec"
"github.com/pkg/errors"
veleroexec "github.com/vmware-tanzu/velero/pkg/util/exec"
)
type OsCommandLine struct {
@ -52,3 +56,15 @@ func GetListBy2Pipes(ctx context.Context, cmdline1, cmdline2, cmdline3 OsCommand
return ret, nil
}
func KubectlApplyFile(ctx context.Context, yaml string) error {
fmt.Printf("Kube apply file %s.\n", yaml)
cmd := exec.CommandContext(ctx, "kubectl", "apply", "-f", yaml)
_, stderr, err := veleroexec.RunCommand(cmd)
if err != nil {
return errors.Wrap(err, stderr)
}
return nil
}

View File

@ -24,6 +24,7 @@ import (
"github.com/pkg/errors"
"golang.org/x/net/context"
corev1 "k8s.io/api/core/v1"
corev1api "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
@ -217,3 +218,67 @@ func GetAPIVersions(client *TestClient, name string) ([]string, error) {
}
return nil, errors.New("Server API groups is empty")
}
func GetPVByPodName(client TestClient, namespace, podName string) (string, error) {
pvcList, err := GetPvcByPodName(context.Background(), namespace, podName)
if err != nil {
return "", err
}
if len(pvcList) != 1 {
return "", errors.New(fmt.Sprintf("Only 1 PVC of pod %s should be found under namespace %s", podName, namespace))
}
pvList, err := GetPvByPvc(context.Background(), namespace, pvcList[0])
if err != nil {
return "", err
}
if len(pvList) != 1 {
return "", errors.New(fmt.Sprintf("Only 1 PV of PVC %s pod %s should be found under namespace %s", pvcList[0], podName, namespace))
}
pv_value, err := GetPersistentVolume(context.Background(), client, "", pvList[0])
fmt.Println(pv_value.Annotations["pv.kubernetes.io/provisioned-by"])
if err != nil {
return "", err
}
return pv_value.Name, nil
}
func CreatePodWithPVC(client TestClient, ns, podName, sc string, volumeNameList []string) (*corev1.Pod, error) {
volumes := []corev1.Volume{}
for _, volume := range volumeNameList {
pvc, err := CreatePVC(client, ns, fmt.Sprintf("pvc-%s", volume), sc)
if err != nil {
return nil, err
}
volumes = append(volumes, corev1.Volume{
Name: volume,
VolumeSource: corev1.VolumeSource{
PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{
ClaimName: pvc.Name,
ReadOnly: false,
},
},
})
}
pod, err := CreatePod(client, ns, podName, volumes)
if err != nil {
return nil, err
}
return pod, nil
}
func CreateFileToPod(ctx context.Context, namespace, podName, volume, filename, content string) error {
arg := []string{"exec", "-n", namespace, "-c", podName, podName,
"--", "/bin/sh", "-c", fmt.Sprintf("echo ns-%s pod-%s volume-%s > /%s/%s", namespace, podName, volume, volume, filename)}
cmd := exec.CommandContext(ctx, "kubectl", arg...)
fmt.Printf("Kubectl exec cmd =%v\n", cmd)
return cmd.Run()
}
func ReadFileFromPodVolume(ctx context.Context, namespace, podName, volume, filename string) (string, error) {
arg := []string{"exec", "-n", namespace, "-c", podName, podName,
"--", "cat", fmt.Sprintf("/%s/%s", volume, filename)}
cmd := exec.CommandContext(ctx, "kubectl", arg...)
fmt.Printf("Kubectl exec cmd =%v\n", cmd)
stdout, stderr, err := veleroexec.RunCommand(cmd)
fmt.Print(stdout)
fmt.Print(stderr)
return stdout, err
}

View File

@ -100,7 +100,7 @@ func CleanupNamespacesWithPoll(ctx context.Context, client TestClient, nsBaseNam
if err != nil {
return errors.Wrapf(err, "Could not delete namespace %s", checkNamespace.Name)
}
fmt.Printf("Delete namespace %s", checkNamespace.Name)
fmt.Printf("Delete namespace %s\n", checkNamespace.Name)
}
}
return nil

View File

@ -18,11 +18,49 @@ package k8s
import (
"context"
"fmt"
"github.com/pkg/errors"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func CreatePersistentVolume(client TestClient, name string) (*corev1.PersistentVolume, error) {
p := &corev1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Spec: corev1.PersistentVolumeSpec{
StorageClassName: "manual",
AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce},
Capacity: corev1.ResourceList{corev1.ResourceName(corev1.ResourceStorage): resource.MustParse("2Gi")},
PersistentVolumeSource: corev1.PersistentVolumeSource{
HostPath: &corev1.HostPathVolumeSource{
Path: "/demo",
},
},
},
}
return client.ClientGo.CoreV1().PersistentVolumes().Create(context.TODO(), p, metav1.CreateOptions{})
}
func GetPersistentVolume(ctx context.Context, client TestClient, namespace string, persistentVolume string) (*corev1.PersistentVolume, error) {
return client.ClientGo.CoreV1().PersistentVolumes().Get(ctx, persistentVolume, metav1.GetOptions{})
}
func AddAnnotationToPersistentVolume(ctx context.Context, client TestClient, namespace string, persistentVolume, key string) (*corev1.PersistentVolume, error) {
newPV, err := GetPersistentVolume(ctx, client, "", persistentVolume)
if err != nil {
return nil, errors.Wrap(err, fmt.Sprintf("Fail to ge PV %s", persistentVolume))
}
ann := newPV.ObjectMeta.Annotations
ann[key] = persistentVolume
newPV.Annotations = ann
return client.ClientGo.CoreV1().PersistentVolumes().Update(ctx, newPV, metav1.UpdateOptions{})
}

79
test/e2e/util/k8s/pod.go Normal file
View File

@ -0,0 +1,79 @@
/*
Copyright the Velero contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package k8s
import (
"context"
"fmt"
"github.com/pkg/errors"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func CreatePod(client TestClient, ns, name string, volumes []corev1.Volume) (*corev1.Pod, error) {
vmList := []corev1.VolumeMount{}
for _, v := range volumes {
vmList = append(vmList, corev1.VolumeMount{
Name: v.Name,
MountPath: "/" + v.Name,
})
}
p := &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Spec: corev1.PodSpec{
Containers: []corev1.Container{
{
Name: name,
Image: "gcr.io/velero-gcp/busybox",
Command: []string{"sleep", "3600"},
VolumeMounts: vmList,
},
},
Volumes: volumes,
},
}
return client.ClientGo.CoreV1().Pods(ns).Create(context.TODO(), p, metav1.CreateOptions{})
}
func GetPod(ctx context.Context, client TestClient, namespace string, pod string) (*corev1.Pod, error) {
return client.ClientGo.CoreV1().Pods(namespace).Get(ctx, pod, metav1.GetOptions{})
}
func AddAnnotationToPod(ctx context.Context, client TestClient, namespace, podName string, ann map[string]string) (*corev1.Pod, error) {
newPod, err := GetPod(ctx, client, namespace, podName)
if err != nil {
return nil, errors.Wrap(err, fmt.Sprintf("Fail to ge pod %s in namespace %s", podName, namespace))
}
newAnn := newPod.ObjectMeta.Annotations
if newAnn == nil {
newAnn = make(map[string]string)
}
for k, v := range ann {
fmt.Println(k, v)
newAnn[k] = v
}
newPod.Annotations = newAnn
fmt.Println(newPod.Annotations)
return client.ClientGo.CoreV1().Pods(namespace).Update(ctx, newPod, metav1.UpdateOptions{})
}

51
test/e2e/util/k8s/pvc.go Normal file
View File

@ -0,0 +1,51 @@
/*
Copyright the Velero contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package k8s
import (
"context"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func CreatePVC(client TestClient, ns, name, sc string) (*corev1.PersistentVolumeClaim, error) {
pvc := &corev1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Spec: corev1.PersistentVolumeClaimSpec{
AccessModes: []corev1.PersistentVolumeAccessMode{
corev1.ReadWriteOnce,
},
Resources: corev1.ResourceRequirements{
Requests: corev1.ResourceList{
corev1.ResourceStorage: resource.MustParse("1Gi"),
},
},
StorageClassName: &sc,
},
}
return client.ClientGo.CoreV1().PersistentVolumeClaims(ns).Create(context.TODO(), pvc, metav1.CreateOptions{})
}
func GetPVC(ctx context.Context, client TestClient, namespace string, persistentVolume string) (*corev1.PersistentVolume, error) {
return client.ClientGo.CoreV1().PersistentVolumes().Get(ctx, persistentVolume, metav1.GetOptions{})
}