Merge pull request #6047 from danfengliu/debug-azure-issue
Add changing PVC selected node E2E testpull/6050/head
commit
c9af70aff3
|
@ -52,7 +52,7 @@ func (b *TTL) Init() {
|
|||
b.testNS = "backup-ttl-test-" + UUIDgen.String()
|
||||
b.backupName = "backup-ttl-test-" + UUIDgen.String()
|
||||
b.restoreName = "restore-ttl-test-" + UUIDgen.String()
|
||||
b.ctx, _ = context.WithTimeout(context.Background(), time.Hour)
|
||||
b.ctx, _ = context.WithTimeout(context.Background(), 2*time.Hour)
|
||||
b.ttl = 20 * time.Minute
|
||||
|
||||
}
|
||||
|
|
|
@ -29,7 +29,6 @@ var OneNamespaceMappingSnapshotTest func() = TestFunc(&NamespaceMapping{TestCase
|
|||
var MultiNamespacesMappingSnapshotTest func() = TestFunc(&NamespaceMapping{TestCase: TestCase{NSBaseName: NamespaceBaseName, NSIncluded: &[]string{NamespaceBaseName + "1", NamespaceBaseName + "2"}, UseVolumeSnapshots: true}})
|
||||
|
||||
func (n *NamespaceMapping) Init() error {
|
||||
//n.Client = TestClientInstance
|
||||
n.VeleroCfg = VeleroCfg
|
||||
n.Client = *n.VeleroCfg.ClientToInstallVelero
|
||||
n.VeleroCfg.UseVolumeSnapshots = n.UseVolumeSnapshots
|
||||
|
|
|
@ -38,7 +38,6 @@ func (n *NodePort) Init() error {
|
|||
n.VeleroCfg = VeleroCfg
|
||||
n.Client = *n.VeleroCfg.ClientToInstallVelero
|
||||
n.NSBaseName = NodeportBaseName
|
||||
n.NamespacesTotal = 1
|
||||
n.TestMsg = &TestMSG{
|
||||
Desc: fmt.Sprintf("Nodeport preservation"),
|
||||
FailedMSG: "Failed to restore with nodeport preservation",
|
||||
|
|
|
@ -0,0 +1,153 @@
|
|||
package basic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
|
||||
|
||||
. "github.com/vmware-tanzu/velero/test/e2e"
|
||||
. "github.com/vmware-tanzu/velero/test/e2e/test"
|
||||
. "github.com/vmware-tanzu/velero/test/e2e/util/k8s"
|
||||
. "github.com/vmware-tanzu/velero/test/e2e/util/velero"
|
||||
)
|
||||
|
||||
type PVCSelectedNodeChanging struct {
|
||||
TestCase
|
||||
labels map[string]string
|
||||
data map[string]string
|
||||
configmaptName string
|
||||
namespace string
|
||||
oldNodeName string
|
||||
newNodeName string
|
||||
volume string
|
||||
podName string
|
||||
mappedNS string
|
||||
pvcName string
|
||||
ann string
|
||||
}
|
||||
|
||||
const PSNCBaseName string = "psnc-"
|
||||
|
||||
var PVCSelectedNodeChangingTest func() = TestFunc(&PVCSelectedNodeChanging{
|
||||
namespace: PSNCBaseName + "1", TestCase: TestCase{NSBaseName: PSNCBaseName}})
|
||||
|
||||
func (p *PVCSelectedNodeChanging) Init() error {
|
||||
p.VeleroCfg = VeleroCfg
|
||||
p.Client = *p.VeleroCfg.ClientToInstallVelero
|
||||
p.NSBaseName = PSNCBaseName
|
||||
p.namespace = p.NSBaseName + UUIDgen.String()
|
||||
p.mappedNS = p.namespace + "-mapped"
|
||||
p.TestMsg = &TestMSG{
|
||||
Desc: "Changing PVC node selector",
|
||||
FailedMSG: "Failed to changing PVC node selector",
|
||||
Text: "Change node selectors of persistent volume claims during restores",
|
||||
}
|
||||
p.BackupName = "backup-sc-" + UUIDgen.String()
|
||||
p.RestoreName = "restore-" + UUIDgen.String()
|
||||
p.labels = map[string]string{"velero.io/plugin-config": "",
|
||||
"velero.io/change-pvc-node-selector": "RestoreItemAction"}
|
||||
p.configmaptName = "change-pvc-node-selector-config"
|
||||
p.volume = "volume-1"
|
||||
p.podName = "pod-1"
|
||||
p.pvcName = "pvc-1"
|
||||
p.ann = "volume.kubernetes.io/selected-node"
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *PVCSelectedNodeChanging) StartRun() error {
|
||||
p.BackupName = p.BackupName + "backup-" + UUIDgen.String()
|
||||
p.RestoreName = p.RestoreName + "restore-" + UUIDgen.String()
|
||||
p.BackupArgs = []string{
|
||||
"create", "--namespace", VeleroCfg.VeleroNamespace, "backup", p.BackupName,
|
||||
"--include-namespaces", p.namespace,
|
||||
"--snapshot-volumes=false", "--wait",
|
||||
}
|
||||
p.RestoreArgs = []string{
|
||||
"create", "--namespace", VeleroCfg.VeleroNamespace, "restore", p.RestoreName,
|
||||
"--from-backup", p.BackupName, "--namespace-mappings", fmt.Sprintf("%s:%s", p.namespace, p.mappedNS), "--wait",
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (p *PVCSelectedNodeChanging) CreateResources() error {
|
||||
p.Ctx, _ = context.WithTimeout(context.Background(), 60*time.Minute)
|
||||
By(fmt.Sprintf("Create namespace %s", p.namespace), func() {
|
||||
Expect(CreateNamespace(context.Background(), p.Client, p.namespace)).To(Succeed(),
|
||||
fmt.Sprintf("Failed to create namespace %s", p.namespace))
|
||||
})
|
||||
|
||||
By(fmt.Sprintf("Create pod %s in namespace %s", p.podName, p.namespace), func() {
|
||||
nodeNameList, err := GetWorkerNodes(context.Background())
|
||||
Expect(err).To(Succeed())
|
||||
for _, nodeName := range nodeNameList {
|
||||
p.oldNodeName = nodeName
|
||||
fmt.Printf("Create PVC on node %s\n", p.oldNodeName)
|
||||
pvcAnn := map[string]string{p.ann: nodeName}
|
||||
_, err := CreatePodWithPVC(p.Client, p.namespace, p.podName, "default", p.pvcName, []string{p.volume}, pvcAnn)
|
||||
Expect(err).To(Succeed())
|
||||
err = WaitForPods(context.Background(), p.Client, p.namespace, []string{p.podName})
|
||||
Expect(err).To(Succeed())
|
||||
break
|
||||
}
|
||||
})
|
||||
|
||||
By("Prepare ConfigMap data", func() {
|
||||
nodeNameList, err := GetWorkerNodes(context.Background())
|
||||
Expect(err).To(Succeed())
|
||||
Expect(len(nodeNameList) > 2).To(Equal(true))
|
||||
for _, nodeName := range nodeNameList {
|
||||
if nodeName != p.oldNodeName {
|
||||
p.newNodeName = nodeName
|
||||
break
|
||||
}
|
||||
}
|
||||
p.data = map[string]string{p.oldNodeName: p.newNodeName}
|
||||
})
|
||||
|
||||
By(fmt.Sprintf("Create ConfigMap %s in namespace %s", p.configmaptName, p.VeleroCfg.VeleroNamespace), func() {
|
||||
cm, err := CreateConfigMap(p.Client.ClientGo, p.VeleroCfg.VeleroNamespace, p.configmaptName, p.labels, p.data)
|
||||
Expect(err).To(Succeed(), fmt.Sprintf("failed to create configmap in the namespace %q", p.VeleroCfg.VeleroNamespace))
|
||||
fmt.Printf("Configmap: %v", cm)
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *PVCSelectedNodeChanging) Destroy() error {
|
||||
By(fmt.Sprintf("Start to destroy namespace %s......", p.NSBaseName), func() {
|
||||
Expect(CleanupNamespacesWithPoll(context.Background(), p.Client, p.NSBaseName)).To(Succeed(),
|
||||
fmt.Sprintf("Failed to delete namespace %s", p.NSBaseName))
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *PVCSelectedNodeChanging) Restore() error {
|
||||
By(fmt.Sprintf("Start to restore %s .....", p.RestoreName), func() {
|
||||
Expect(VeleroRestoreExec(context.Background(), p.VeleroCfg.VeleroCLI,
|
||||
p.VeleroCfg.VeleroNamespace, p.RestoreName,
|
||||
p.RestoreArgs, velerov1api.RestorePhaseCompleted)).To(
|
||||
Succeed(),
|
||||
func() string {
|
||||
RunDebug(context.Background(), p.VeleroCfg.VeleroCLI,
|
||||
p.VeleroCfg.VeleroNamespace, "", p.RestoreName)
|
||||
return "Fail to restore workload"
|
||||
})
|
||||
err := WaitForPods(p.Ctx, p.Client, p.mappedNS, []string{p.podName})
|
||||
Expect(err).To(Succeed())
|
||||
})
|
||||
return nil
|
||||
}
|
||||
func (p *PVCSelectedNodeChanging) Verify() error {
|
||||
By(fmt.Sprintf("PVC selected node should be %s", p.newNodeName), func() {
|
||||
pvcNameList, err := GetPvcByPodName(context.Background(), p.mappedNS, p.pvcName)
|
||||
Expect(err).To(Succeed())
|
||||
Expect(len(pvcNameList)).Should(Equal(1))
|
||||
pvc, err := GetPVC(context.Background(), p.Client, p.mappedNS, pvcNameList[0])
|
||||
Expect(err).To(Succeed())
|
||||
Expect(pvc.Annotations[p.ann]).To(Equal(p.newNodeName))
|
||||
})
|
||||
return nil
|
||||
}
|
|
@ -40,7 +40,6 @@ func (s *StorageClasssChanging) Init() error {
|
|||
s.NSBaseName = SCCBaseName
|
||||
s.namespace = s.NSBaseName + UUIDgen.String()
|
||||
s.mappedNS = s.namespace + "-mapped"
|
||||
s.NamespacesTotal = 1
|
||||
s.TestMsg = &TestMSG{
|
||||
Desc: "Changing PV/PVC Storage Classes",
|
||||
FailedMSG: "Failed to changing PV/PVC Storage Classes",
|
||||
|
@ -80,13 +79,13 @@ func (s *StorageClasssChanging) CreateResources() error {
|
|||
Expect(InstallStorageClass(context.Background(), fmt.Sprintf("testdata/storage-class/%s.yaml",
|
||||
s.VeleroCfg.CloudProvider))).To(Succeed())
|
||||
})
|
||||
By(fmt.Sprintf("Create namespace %s", s.namespace), func() {
|
||||
By(fmt.Sprintf("Create namespace %s", s.namespace), func() {
|
||||
Expect(CreateNamespace(s.Ctx, s.Client, s.namespace)).To(Succeed(),
|
||||
fmt.Sprintf("Failed to create namespace %s", s.namespace))
|
||||
})
|
||||
|
||||
By(fmt.Sprintf("Create pod %s in namespace %s", s.podName, s.namespace), func() {
|
||||
_, err := CreatePodWithPVC(s.Client, s.namespace, s.podName, s.srcStorageClass, []string{s.volume})
|
||||
_, err := CreatePodWithPVC(s.Client, s.namespace, s.podName, s.srcStorageClass, "", []string{s.volume}, nil)
|
||||
Expect(err).To(Succeed())
|
||||
})
|
||||
By(fmt.Sprintf("Create ConfigMap %s in namespace %s", s.configmaptName, s.VeleroCfg.VeleroNamespace), func() {
|
||||
|
@ -130,6 +129,7 @@ func (s *StorageClasssChanging) Restore() error {
|
|||
}
|
||||
func (s *StorageClasssChanging) Verify() error {
|
||||
By(fmt.Sprintf("Expect storage class of PV %s to be %s ", s.volume, s.desStorageClass), func() {
|
||||
time.Sleep(1 * time.Minute)
|
||||
pvName, err := GetPVByPodName(s.Client, s.mappedNS, s.volume)
|
||||
Expect(err).To(Succeed(), fmt.Sprintf("Failed to get PV name by pod name %s", s.podName))
|
||||
pv, err := GetPersistentVolume(s.Ctx, s.Client, s.mappedNS, pvName)
|
||||
|
|
|
@ -136,6 +136,7 @@ var _ = Describe("[pv-backup][Opt-Out] Backup resources should follow the specif
|
|||
|
||||
var _ = Describe("[Basic][Nodeport] Service nodeport reservation during restore is configurable", NodePortTest)
|
||||
var _ = Describe("[Basic][StorageClass] Storage class of persistent volumes and persistent volume claims can be changed during restores", StorageClasssChangingTest)
|
||||
var _ = Describe("[Basic][SelectedNode] Node selectors of persistent volume claims can be changed during restores", PVCSelectedNodeChangingTest)
|
||||
|
||||
func GetKubeconfigContext() error {
|
||||
var err error
|
||||
|
|
|
@ -97,7 +97,7 @@ func (p *PVBackupFiltering) CreateResources() error {
|
|||
podName := fmt.Sprintf("pod-%d", i)
|
||||
pods = append(pods, podName)
|
||||
By(fmt.Sprintf("Create pod %s in namespace %s", podName, ns), func() {
|
||||
pod, err := CreatePodWithPVC(p.Client, ns, podName, "e2e-storage-class", volumes)
|
||||
pod, err := CreatePodWithPVC(p.Client, ns, podName, "e2e-storage-class", "", volumes, nil)
|
||||
Expect(err).To(Succeed())
|
||||
ann := map[string]string{
|
||||
p.annotation: volumesToAnnotation,
|
||||
|
|
|
@ -48,7 +48,6 @@ func (f *FilteringCase) Init() error {
|
|||
f.replica = int32(2)
|
||||
f.labels = map[string]string{"resourcefiltering": "true"}
|
||||
f.labelSelector = "resourcefiltering"
|
||||
//f.Client = TestClientInstance
|
||||
f.VeleroCfg = VeleroCfg
|
||||
f.Client = *f.VeleroCfg.ClientToInstallVelero
|
||||
f.NamespacesTotal = 3
|
||||
|
|
|
@ -28,7 +28,6 @@ type ScheduleBackup struct {
|
|||
var ScheduleBackupTest func() = TestFunc(&ScheduleBackup{TestCase: TestCase{NSBaseName: "schedule-test"}})
|
||||
|
||||
func (n *ScheduleBackup) Init() error {
|
||||
//n.Client = TestClientInstance
|
||||
n.VeleroCfg = VeleroCfg
|
||||
n.Client = *n.VeleroCfg.ClientToInstallVelero
|
||||
n.Period = 3 // Unit is minute
|
||||
|
|
|
@ -75,15 +75,10 @@ type TestCase struct {
|
|||
RestorePhaseExpect velerov1api.RestorePhase
|
||||
}
|
||||
|
||||
var TestClientInstance TestClient
|
||||
|
||||
func TestFunc(test VeleroBackupRestoreTest) func() {
|
||||
return func() {
|
||||
Expect(test.Init()).To(Succeed(), "Failed to instantiate test cases")
|
||||
veleroCfg := test.GetTestCase().VeleroCfg
|
||||
By("Create test client instance", func() {
|
||||
TestClientInstance = *veleroCfg.ClientToInstallVelero
|
||||
})
|
||||
BeforeEach(func() {
|
||||
flag.Parse()
|
||||
veleroCfg := test.GetTestCase().VeleroCfg
|
||||
|
@ -117,9 +112,6 @@ func TestFuncWithMultiIt(tests []VeleroBackupRestoreTest) func() {
|
|||
for k := range tests {
|
||||
Expect(tests[k].Init()).To(Succeed(), fmt.Sprintf("Failed to instantiate test %s case", tests[k].GetTestMsg().Desc))
|
||||
veleroCfg = tests[k].GetTestCase().VeleroCfg
|
||||
By("Create test client instance", func() {
|
||||
TestClientInstance = *veleroCfg.ClientToInstallVelero
|
||||
})
|
||||
useVolumeSnapshots = tests[k].GetTestCase().UseVolumeSnapshots
|
||||
}
|
||||
|
||||
|
|
|
@ -4,7 +4,9 @@ import (
|
|||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os/exec"
|
||||
)
|
||||
|
||||
|
@ -55,3 +57,33 @@ func GetListByCmdPipes(ctx context.Context, cmdlines []*OsCommandLine) ([]string
|
|||
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func CMDExecWithOutput(checkCMD *exec.Cmd) (*[]byte, error) {
|
||||
stdoutPipe, err := checkCMD.StdoutPipe()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
jsonBuf := make([]byte, 128*1024) // If the YAML is bigger than 64K, there's probably something bad happening
|
||||
|
||||
err = checkCMD.Start()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
bytesRead, err := io.ReadFull(stdoutPipe, jsonBuf)
|
||||
|
||||
if err != nil && err != io.ErrUnexpectedEOF {
|
||||
return nil, err
|
||||
}
|
||||
if bytesRead == len(jsonBuf) {
|
||||
return nil, errors.New("yaml returned bigger than max allowed")
|
||||
}
|
||||
|
||||
jsonBuf = jsonBuf[0:bytesRead]
|
||||
err = checkCMD.Wait()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &jsonBuf, err
|
||||
}
|
||||
|
|
|
@ -255,10 +255,16 @@ func GetPVByPodName(client TestClient, namespace, podName string) (string, error
|
|||
}
|
||||
return pv_value.Name, nil
|
||||
}
|
||||
func CreatePodWithPVC(client TestClient, ns, podName, sc string, volumeNameList []string) (*corev1.Pod, error) {
|
||||
func CreatePodWithPVC(client TestClient, ns, podName, sc, pvcName string, volumeNameList []string, pvcAnn map[string]string) (*corev1.Pod, error) {
|
||||
volumes := []corev1.Volume{}
|
||||
for _, volume := range volumeNameList {
|
||||
pvc, err := CreatePVC(client, ns, fmt.Sprintf("pvc-%s", volume), sc)
|
||||
var _pvcName string
|
||||
if pvcName == "" {
|
||||
_pvcName = fmt.Sprintf("pvc-%s", volume)
|
||||
} else {
|
||||
_pvcName = pvcName
|
||||
}
|
||||
pvc, err := CreatePVC(client, ns, _pvcName, sc, pvcAnn)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -0,0 +1,43 @@
|
|||
package k8s
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os/exec"
|
||||
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
|
||||
common "github.com/vmware-tanzu/velero/test/e2e/util/common"
|
||||
)
|
||||
|
||||
func GetWorkerNodes(ctx context.Context) ([]string, error) {
|
||||
getCMD := exec.CommandContext(ctx, "kubectl", "get", "node", "-o", "json")
|
||||
|
||||
fmt.Printf("kubectl get node cmd =%v\n", getCMD)
|
||||
jsonBuf, err := common.CMDExecWithOutput(getCMD)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
nodes := &unstructured.UnstructuredList{}
|
||||
err = json.Unmarshal(*jsonBuf, &nodes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var nodeNameList []string
|
||||
for nodeIndex, node := range nodes.Items {
|
||||
// := v1.Node{}
|
||||
fmt.Println(nodeIndex)
|
||||
fmt.Println(node.GetName())
|
||||
anns := node.GetAnnotations()
|
||||
fmt.Println(anns)
|
||||
fmt.Println(anns["cluster.x-k8s.io/owner-kind"])
|
||||
//"MachineSet"
|
||||
if anns["cluster.x-k8s.io/owner-kind"] == "KubeadmControlPlane" {
|
||||
continue
|
||||
}
|
||||
nodeNameList = append(nodeNameList, node.GetName())
|
||||
}
|
||||
return nodeNameList, nil
|
||||
}
|
|
@ -20,16 +20,18 @@ import (
|
|||
"context"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
func CreatePVC(client TestClient, ns, name, sc string) (*corev1.PersistentVolumeClaim, error) {
|
||||
func CreatePVC(client TestClient, ns, name, sc string, ann map[string]string) (*corev1.PersistentVolumeClaim, error) {
|
||||
oMeta := metav1.ObjectMeta{}
|
||||
oMeta = metav1.ObjectMeta{Name: name}
|
||||
if ann != nil {
|
||||
oMeta.Annotations = ann
|
||||
}
|
||||
pvc := &corev1.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
ObjectMeta: oMeta,
|
||||
Spec: corev1.PersistentVolumeClaimSpec{
|
||||
AccessModes: []corev1.PersistentVolumeAccessMode{
|
||||
corev1.ReadWriteOnce,
|
||||
|
@ -42,10 +44,8 @@ func CreatePVC(client TestClient, ns, name, sc string) (*corev1.PersistentVolume
|
|||
StorageClassName: &sc,
|
||||
},
|
||||
}
|
||||
|
||||
return client.ClientGo.CoreV1().PersistentVolumeClaims(ns).Create(context.TODO(), pvc, metav1.CreateOptions{})
|
||||
}
|
||||
|
||||
func GetPVC(ctx context.Context, client TestClient, namespace string, persistentVolume string) (*corev1.PersistentVolume, error) {
|
||||
return client.ClientGo.CoreV1().PersistentVolumes().Get(ctx, persistentVolume, metav1.GetOptions{})
|
||||
func GetPVC(ctx context.Context, client TestClient, namespace string, pvcName string) (*corev1.PersistentVolumeClaim, error) {
|
||||
return client.ClientGo.CoreV1().PersistentVolumeClaims(namespace).Get(ctx, pvcName, metav1.GetOptions{})
|
||||
}
|
||||
|
|
|
@ -168,36 +168,6 @@ func getProviderVeleroInstallOptions(veleroCfg *VeleroConfig,
|
|||
return io, nil
|
||||
}
|
||||
|
||||
func CMDExecWithOutput(checkCMD *exec.Cmd) (*[]byte, error) {
|
||||
stdoutPipe, err := checkCMD.StdoutPipe()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
jsonBuf := make([]byte, 16*1024) // If the YAML is bigger than 16K, there's probably something bad happening
|
||||
|
||||
err = checkCMD.Start()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
bytesRead, err := io.ReadFull(stdoutPipe, jsonBuf)
|
||||
|
||||
if err != nil && err != io.ErrUnexpectedEOF {
|
||||
return nil, err
|
||||
}
|
||||
if bytesRead == len(jsonBuf) {
|
||||
return nil, errors.New("yaml returned bigger than max allowed")
|
||||
}
|
||||
|
||||
jsonBuf = jsonBuf[0:bytesRead]
|
||||
err = checkCMD.Wait()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &jsonBuf, err
|
||||
}
|
||||
|
||||
// checkBackupPhase uses VeleroCLI to inspect the phase of a Velero backup.
|
||||
func checkBackupPhase(ctx context.Context, veleroCLI string, veleroNamespace string, backupName string,
|
||||
expectedPhase velerov1api.BackupPhase) error {
|
||||
|
@ -205,7 +175,7 @@ func checkBackupPhase(ctx context.Context, veleroCLI string, veleroNamespace str
|
|||
backupName)
|
||||
|
||||
fmt.Printf("get backup cmd =%v\n", checkCMD)
|
||||
jsonBuf, err := CMDExecWithOutput(checkCMD)
|
||||
jsonBuf, err := common.CMDExecWithOutput(checkCMD)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -227,7 +197,7 @@ func checkRestorePhase(ctx context.Context, veleroCLI string, veleroNamespace st
|
|||
restoreName)
|
||||
|
||||
fmt.Printf("get restore cmd =%v\n", checkCMD)
|
||||
jsonBuf, err := CMDExecWithOutput(checkCMD)
|
||||
jsonBuf, err := common.CMDExecWithOutput(checkCMD)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -245,7 +215,7 @@ func checkRestorePhase(ctx context.Context, veleroCLI string, veleroNamespace st
|
|||
func checkSchedulePhase(ctx context.Context, veleroCLI, veleroNamespace, scheduleName string) error {
|
||||
return wait.PollImmediate(time.Second*5, time.Minute*2, func() (bool, error) {
|
||||
checkCMD := exec.CommandContext(ctx, veleroCLI, "--namespace", veleroNamespace, "schedule", "get", scheduleName, "-ojson")
|
||||
jsonBuf, err := CMDExecWithOutput(checkCMD)
|
||||
jsonBuf, err := common.CMDExecWithOutput(checkCMD)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
@ -265,7 +235,7 @@ func checkSchedulePhase(ctx context.Context, veleroCLI, veleroNamespace, schedul
|
|||
|
||||
func checkSchedulePause(ctx context.Context, veleroCLI, veleroNamespace, scheduleName string, pause bool) error {
|
||||
checkCMD := exec.CommandContext(ctx, veleroCLI, "--namespace", veleroNamespace, "schedule", "get", scheduleName, "-ojson")
|
||||
jsonBuf, err := CMDExecWithOutput(checkCMD)
|
||||
jsonBuf, err := common.CMDExecWithOutput(checkCMD)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -283,7 +253,7 @@ func checkSchedulePause(ctx context.Context, veleroCLI, veleroNamespace, schedul
|
|||
}
|
||||
func CheckScheduleWithResourceOrder(ctx context.Context, veleroCLI, veleroNamespace, scheduleName string, order map[string]string) error {
|
||||
checkCMD := exec.CommandContext(ctx, veleroCLI, "--namespace", veleroNamespace, "schedule", "get", scheduleName, "-ojson")
|
||||
jsonBuf, err := CMDExecWithOutput(checkCMD)
|
||||
jsonBuf, err := common.CMDExecWithOutput(checkCMD)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -305,7 +275,7 @@ func CheckScheduleWithResourceOrder(ctx context.Context, veleroCLI, veleroNamesp
|
|||
|
||||
func CheckBackupWithResourceOrder(ctx context.Context, veleroCLI, veleroNamespace, backupName string, order map[string]string) error {
|
||||
checkCMD := exec.CommandContext(ctx, veleroCLI, "--namespace", veleroNamespace, "get", "backup", backupName, "-ojson")
|
||||
jsonBuf, err := CMDExecWithOutput(checkCMD)
|
||||
jsonBuf, err := common.CMDExecWithOutput(checkCMD)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue