Add namespace- mappping E2E test
Signed-off-by: danfengl <danfengl@vmware.com>pull/5201/head
parent
fb445b3c0d
commit
c8544ea212
|
@ -105,7 +105,7 @@ func runBackupDeletionTests(client TestClient, veleroCfg VerleroConfig, backupNa
|
|||
}
|
||||
|
||||
if err := KibishiiPrepareBeforeBackup(oneHourTimeout, client, providerName, deletionTest,
|
||||
registryCredentialFile, veleroFeatures, kibishiiDirectory, useVolumeSnapshots); err != nil {
|
||||
registryCredentialFile, veleroFeatures, kibishiiDirectory, useVolumeSnapshots, DefaultKibishiiData); err != nil {
|
||||
return errors.Wrapf(err, "Failed to install and prepare data for kibishii %s", deletionTest)
|
||||
}
|
||||
err := ObjectsShouldNotBeInBucket(VeleroCfg.CloudProvider, VeleroCfg.CloudCredentialsFile, VeleroCfg.BSLBucket, VeleroCfg.BSLPrefix, VeleroCfg.BSLConfig, backupName, BackupObjectsPrefix, 1)
|
||||
|
|
|
@ -95,7 +95,7 @@ func TTLTest() {
|
|||
By("Deploy sample workload of Kibishii", func() {
|
||||
Expect(KibishiiPrepareBeforeBackup(test.ctx, client, VeleroCfg.CloudProvider,
|
||||
test.testNS, VeleroCfg.RegistryCredentialFile, VeleroCfg.Features,
|
||||
VeleroCfg.KibishiiDirectory, useVolumeSnapshots)).To(Succeed())
|
||||
VeleroCfg.KibishiiDirectory, useVolumeSnapshots, DefaultKibishiiData)).To(Succeed())
|
||||
})
|
||||
|
||||
var BackupCfg BackupConfig
|
||||
|
|
|
@ -0,0 +1,101 @@
|
|||
package basic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
. "github.com/vmware-tanzu/velero/test/e2e"
|
||||
. "github.com/vmware-tanzu/velero/test/e2e/test"
|
||||
. "github.com/vmware-tanzu/velero/test/e2e/util/k8s"
|
||||
. "github.com/vmware-tanzu/velero/test/e2e/util/kibishii"
|
||||
)
|
||||
|
||||
type NamespaceMapping struct {
|
||||
TestCase
|
||||
MappedNamespaceList []string
|
||||
kibishiiData *KibishiiData
|
||||
}
|
||||
|
||||
var OneNamespaceMappingTest func() = TestFunc(&NamespaceMapping{TestCase: TestCase{NSBaseName: "ns", NSIncluded: &[]string{"ns1"}}})
|
||||
var MultiNamespacesMappingTest func() = TestFunc(&NamespaceMapping{TestCase: TestCase{NSBaseName: "ns", NSIncluded: &[]string{"ns1", "ns2"}}})
|
||||
|
||||
func (n *NamespaceMapping) Init() error {
|
||||
n.Client = TestClientInstance
|
||||
n.kibishiiData = &KibishiiData{2, 10, 10, 1024, 1024, 0, 2}
|
||||
|
||||
n.TestMsg = &TestMSG{
|
||||
Desc: "Backup resources with include namespace test",
|
||||
FailedMSG: "Failed to backup with namespace include",
|
||||
Text: fmt.Sprintf("should backup namespaces %s", *n.NSIncluded),
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *NamespaceMapping) StartRun() error {
|
||||
var mappedNS string
|
||||
var mappedNSList []string
|
||||
|
||||
for index, ns := range *n.NSIncluded {
|
||||
mappedNS = mappedNS + ns + ":" + ns + UUIDgen.String()
|
||||
mappedNSList = append(mappedNSList, ns+UUIDgen.String())
|
||||
if index+1 != len(*n.NSIncluded) {
|
||||
mappedNS = mappedNS + ","
|
||||
}
|
||||
n.BackupName = n.BackupName + ns
|
||||
n.RestoreName = n.RestoreName + ns
|
||||
}
|
||||
n.BackupName = n.BackupName + "backup-ns-mapping-" + UUIDgen.String()
|
||||
n.RestoreName = n.RestoreName + "restore-ns-mapping-" + UUIDgen.String()
|
||||
|
||||
n.MappedNamespaceList = mappedNSList
|
||||
fmt.Println(mappedNSList)
|
||||
n.BackupArgs = []string{
|
||||
"create", "--namespace", VeleroCfg.VeleroNamespace, "backup", n.BackupName,
|
||||
"--include-namespaces", strings.Join(*n.NSIncluded, ","),
|
||||
"--default-volumes-to-restic", "--wait",
|
||||
}
|
||||
n.RestoreArgs = []string{
|
||||
"create", "--namespace", VeleroCfg.VeleroNamespace, "restore", n.RestoreName,
|
||||
"--from-backup", n.BackupName, "--namespace-mappings", mappedNS,
|
||||
"--wait",
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (n *NamespaceMapping) CreateResources() error {
|
||||
n.Ctx, _ = context.WithTimeout(context.Background(), 60*time.Minute)
|
||||
|
||||
for index, ns := range *n.NSIncluded {
|
||||
n.kibishiiData.Levels = len(*n.NSIncluded) + index
|
||||
By(fmt.Sprintf("Creating namespaces ...%s\n", ns), func() {
|
||||
Expect(CreateNamespace(n.Ctx, n.Client, ns)).To(Succeed(), fmt.Sprintf("Failed to create namespace %s", ns))
|
||||
})
|
||||
By("Deploy sample workload of Kibishii", func() {
|
||||
Expect(KibishiiPrepareBeforeBackup(n.Ctx, n.Client, VeleroCfg.CloudProvider,
|
||||
ns, VeleroCfg.RegistryCredentialFile, VeleroCfg.Features,
|
||||
VeleroCfg.KibishiiDirectory, false, n.kibishiiData)).To(Succeed())
|
||||
})
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *NamespaceMapping) Verify() error {
|
||||
n.Ctx, _ = context.WithTimeout(context.Background(), 60*time.Minute)
|
||||
for index, ns := range n.MappedNamespaceList {
|
||||
n.kibishiiData.Levels = len(*n.NSIncluded) + index
|
||||
By(fmt.Sprintf("Verify workload %s after restore ", ns), func() {
|
||||
Expect(KibishiiVerifyAfterRestore(n.Client, ns,
|
||||
n.Ctx, n.kibishiiData)).To(Succeed(), "Fail to verify workload after restore")
|
||||
})
|
||||
}
|
||||
for _, ns := range *n.NSIncluded {
|
||||
By(fmt.Sprintf("Verify namespace %s for backup is no longer exist after restore with namespace mapping", ns), func() {
|
||||
Expect(NamespaceShouldNotExist(n.Ctx, n.Client, ns)).To(Succeed())
|
||||
})
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -147,7 +147,7 @@ func BslDeletionTest(useVolumeSnapshots bool) {
|
|||
By("Deploy sample workload of Kibishii", func() {
|
||||
Expect(KibishiiPrepareBeforeBackup(oneHourTimeout, *VeleroCfg.ClientToInstallVelero, VeleroCfg.CloudProvider,
|
||||
bslDeletionTestNs, VeleroCfg.RegistryCredentialFile, VeleroCfg.Features,
|
||||
VeleroCfg.KibishiiDirectory, useVolumeSnapshots)).To(Succeed())
|
||||
VeleroCfg.KibishiiDirectory, useVolumeSnapshots, DefaultKibishiiData)).To(Succeed())
|
||||
})
|
||||
|
||||
// Restic can not backup PV only, so pod need to be labeled also
|
||||
|
|
|
@ -122,6 +122,9 @@ var _ = Describe("[Migration][Snapshot]", MigrationWithSnapshots)
|
|||
|
||||
var _ = Describe("[Schedule][OrederedResources] Backup resources should follow the specific order in schedule", ScheduleOrderedResources)
|
||||
|
||||
var _ = Describe("[NamespaceMapping][Single] Backup resources should follow the specific order in schedule", OneNamespaceMappingTest)
|
||||
var _ = Describe("[NamespaceMapping][Multiple] Backup resources should follow the specific order in schedule", MultiNamespacesMappingTest)
|
||||
|
||||
func GetKubeconfigContext() error {
|
||||
var err error
|
||||
var tcDefault, tcStandby TestClient
|
||||
|
|
|
@ -144,7 +144,7 @@ func MigrationTest(useVolumeSnapshots bool, veleroCLI2Version VeleroCLI2Version)
|
|||
By("Deploy sample workload of Kibishii", func() {
|
||||
Expect(KibishiiPrepareBeforeBackup(oneHourTimeout, *VeleroCfg.DefaultClient, VeleroCfg.CloudProvider,
|
||||
migrationNamespace, VeleroCfg.RegistryCredentialFile, VeleroCfg.Features,
|
||||
VeleroCfg.KibishiiDirectory, useVolumeSnapshots)).To(Succeed())
|
||||
VeleroCfg.KibishiiDirectory, useVolumeSnapshots, DefaultKibishiiData)).To(Succeed())
|
||||
})
|
||||
|
||||
By(fmt.Sprintf("Backup namespace %s", migrationNamespace), func() {
|
||||
|
@ -249,7 +249,7 @@ func MigrationTest(useVolumeSnapshots bool, veleroCLI2Version VeleroCLI2Version)
|
|||
|
||||
By(fmt.Sprintf("Verify workload %s after restore ", migrationNamespace), func() {
|
||||
Expect(KibishiiVerifyAfterRestore(*VeleroCfg.StandbyClient, migrationNamespace,
|
||||
oneHourTimeout)).To(Succeed(), "Fail to verify workload after restore")
|
||||
oneHourTimeout, DefaultKibishiiData)).To(Succeed(), "Fail to verify workload after restore")
|
||||
})
|
||||
})
|
||||
})
|
||||
|
|
|
@ -174,7 +174,7 @@ func (t *TestCase) Verify() error {
|
|||
}
|
||||
|
||||
func (t *TestCase) Clean() error {
|
||||
return CleanupNamespaces(t.Ctx, t.Client, t.NSBaseName)
|
||||
return CleanupNamespacesWithPoll(t.Ctx, t.Client, t.NSBaseName)
|
||||
}
|
||||
|
||||
func (t *TestCase) GetTestMsg() *TestMSG {
|
||||
|
|
|
@ -130,7 +130,7 @@ func BackupUpgradeRestoreTest(useVolumeSnapshots bool, veleroCLI2Version VeleroC
|
|||
By("Deploy sample workload of Kibishii", func() {
|
||||
Expect(KibishiiPrepareBeforeBackup(oneHourTimeout, *VeleroCfg.ClientToInstallVelero, tmpCfg.CloudProvider,
|
||||
upgradeNamespace, tmpCfg.RegistryCredentialFile, tmpCfg.Features,
|
||||
tmpCfg.KibishiiDirectory, useVolumeSnapshots)).To(Succeed())
|
||||
tmpCfg.KibishiiDirectory, useVolumeSnapshots, DefaultKibishiiData)).To(Succeed())
|
||||
})
|
||||
|
||||
By(fmt.Sprintf("Backup namespace %s", upgradeNamespace), func() {
|
||||
|
@ -206,7 +206,7 @@ func BackupUpgradeRestoreTest(useVolumeSnapshots bool, veleroCLI2Version VeleroC
|
|||
|
||||
By(fmt.Sprintf("Verify workload %s after restore ", upgradeNamespace), func() {
|
||||
Expect(KibishiiVerifyAfterRestore(*VeleroCfg.ClientToInstallVelero, upgradeNamespace,
|
||||
oneHourTimeout)).To(Succeed(), "Fail to verify workload after restore")
|
||||
oneHourTimeout, DefaultKibishiiData)).To(Succeed(), "Fail to verify workload after restore")
|
||||
})
|
||||
})
|
||||
})
|
||||
|
|
|
@ -67,7 +67,7 @@ func GetNamespace(ctx context.Context, client TestClient, namespace string) (*co
|
|||
}
|
||||
|
||||
func DeleteNamespace(ctx context.Context, client TestClient, namespace string, wait bool) error {
|
||||
oneMinuteTimeout, _ := context.WithTimeout(context.Background(), time.Minute*1)
|
||||
tenMinuteTimeout, _ := context.WithTimeout(context.Background(), time.Minute*10)
|
||||
if err := client.ClientGo.CoreV1().Namespaces().Delete(context.TODO(), namespace, metav1.DeleteOptions{}); err != nil {
|
||||
return errors.Wrap(err, fmt.Sprintf("failed to delete the namespace %q", namespace))
|
||||
}
|
||||
|
@ -76,7 +76,7 @@ func DeleteNamespace(ctx context.Context, client TestClient, namespace string, w
|
|||
}
|
||||
return waitutil.PollImmediateInfinite(5*time.Second,
|
||||
func() (bool, error) {
|
||||
if _, err := client.ClientGo.CoreV1().Namespaces().Get(oneMinuteTimeout, namespace, metav1.GetOptions{}); err != nil {
|
||||
if _, err := client.ClientGo.CoreV1().Namespaces().Get(tenMinuteTimeout, namespace, metav1.GetOptions{}); err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
return true, nil
|
||||
}
|
||||
|
@ -90,6 +90,7 @@ func DeleteNamespace(ctx context.Context, client TestClient, namespace string, w
|
|||
|
||||
func CleanupNamespacesWithPoll(ctx context.Context, client TestClient, nsBaseName string) error {
|
||||
namespaces, err := client.ClientGo.CoreV1().Namespaces().List(ctx, metav1.ListOptions{})
|
||||
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Could not retrieve namespaces")
|
||||
}
|
||||
|
@ -99,6 +100,7 @@ func CleanupNamespacesWithPoll(ctx context.Context, client TestClient, nsBaseNam
|
|||
if err != nil {
|
||||
return errors.Wrapf(err, "Could not delete namespace %s", checkNamespace.Name)
|
||||
}
|
||||
fmt.Printf("Delete namespace %s", checkNamespace.Name)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
@ -135,3 +137,16 @@ func WaitAllSelectedNSDeleted(ctx context.Context, client TestClient, label stri
|
|||
}
|
||||
})
|
||||
}
|
||||
|
||||
func NamespaceShouldNotExist(ctx context.Context, client TestClient, namespace string) error {
|
||||
namespaces, err := client.ClientGo.CoreV1().Namespaces().List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Could not retrieve namespaces")
|
||||
}
|
||||
for _, checkNamespace := range namespaces.Items {
|
||||
if checkNamespace.Name == namespace {
|
||||
return errors.New(fmt.Sprintf("Namespace %s still exist", checkNamespace.Name))
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -37,6 +37,17 @@ const (
|
|||
jumpPadPod = "jump-pad"
|
||||
)
|
||||
|
||||
type KibishiiData struct {
|
||||
Levels int
|
||||
DirsPerLevel int
|
||||
FilesPerLevel int
|
||||
FileLength int
|
||||
BlockSize int
|
||||
PassNum int
|
||||
ExpectedNodes int
|
||||
}
|
||||
|
||||
var DefaultKibishiiData = &KibishiiData{2, 10, 10, 1024, 1024, 0, 2}
|
||||
var KibishiiPodNameList = []string{"kibishii-deployment-0", "kibishii-deployment-1"}
|
||||
|
||||
// RunKibishiiTests runs kibishii tests on the provider.
|
||||
|
@ -65,9 +76,10 @@ func RunKibishiiTests(client TestClient, veleroCfg VerleroConfig, backupName, re
|
|||
}
|
||||
}
|
||||
}()
|
||||
|
||||
if err := KibishiiPrepareBeforeBackup(oneHourTimeout, client, providerName,
|
||||
kibishiiNamespace, registryCredentialFile, veleroFeatures,
|
||||
kibishiiDirectory, useVolumeSnapshots); err != nil {
|
||||
kibishiiDirectory, useVolumeSnapshots, DefaultKibishiiData); err != nil {
|
||||
return errors.Wrapf(err, "Failed to install and prepare data for kibishii %s", kibishiiNamespace)
|
||||
}
|
||||
|
||||
|
@ -123,7 +135,7 @@ func RunKibishiiTests(client TestClient, veleroCfg VerleroConfig, backupName, re
|
|||
return errors.Wrapf(err, "Restore %s failed from backup %s", restoreName, backupName)
|
||||
}
|
||||
|
||||
if err := KibishiiVerifyAfterRestore(client, kibishiiNamespace, oneHourTimeout); err != nil {
|
||||
if err := KibishiiVerifyAfterRestore(client, kibishiiNamespace, oneHourTimeout, DefaultKibishiiData); err != nil {
|
||||
return errors.Wrapf(err, "Error verifying kibishii after restore")
|
||||
}
|
||||
|
||||
|
@ -164,11 +176,11 @@ func installKibishii(ctx context.Context, namespace string, cloudPlatform, veler
|
|||
return err
|
||||
}
|
||||
|
||||
func generateData(ctx context.Context, namespace string, levels int, filesPerLevel int, dirsPerLevel int, fileSize int,
|
||||
blockSize int, passNum int, expectedNodes int) error {
|
||||
func generateData(ctx context.Context, namespace string, kibishiiData *KibishiiData) error {
|
||||
kibishiiGenerateCmd := exec.CommandContext(ctx, "kubectl", "exec", "-n", namespace, "jump-pad", "--",
|
||||
"/usr/local/bin/generate.sh", strconv.Itoa(levels), strconv.Itoa(filesPerLevel), strconv.Itoa(dirsPerLevel), strconv.Itoa(fileSize),
|
||||
strconv.Itoa(blockSize), strconv.Itoa(passNum), strconv.Itoa(expectedNodes))
|
||||
"/usr/local/bin/generate.sh", strconv.Itoa(kibishiiData.Levels), strconv.Itoa(kibishiiData.DirsPerLevel),
|
||||
strconv.Itoa(kibishiiData.FilesPerLevel), strconv.Itoa(kibishiiData.FileLength),
|
||||
strconv.Itoa(kibishiiData.BlockSize), strconv.Itoa(kibishiiData.PassNum), strconv.Itoa(kibishiiData.ExpectedNodes))
|
||||
fmt.Printf("kibishiiGenerateCmd cmd =%v\n", kibishiiGenerateCmd)
|
||||
|
||||
_, stderr, err := veleroexec.RunCommand(kibishiiGenerateCmd)
|
||||
|
@ -179,12 +191,13 @@ func generateData(ctx context.Context, namespace string, levels int, filesPerLev
|
|||
return nil
|
||||
}
|
||||
|
||||
func verifyData(ctx context.Context, namespace string, levels int, filesPerLevel int, dirsPerLevel int, fileSize int,
|
||||
blockSize int, passNum int, expectedNodes int) error {
|
||||
func verifyData(ctx context.Context, namespace string, kibishiiData *KibishiiData) error {
|
||||
timeout, _ := context.WithTimeout(context.Background(), time.Minute*5)
|
||||
kibishiiVerifyCmd := exec.CommandContext(timeout, "kubectl", "exec", "-n", namespace, "jump-pad", "--",
|
||||
"/usr/local/bin/verify.sh", strconv.Itoa(levels), strconv.Itoa(filesPerLevel), strconv.Itoa(dirsPerLevel), strconv.Itoa(fileSize),
|
||||
strconv.Itoa(blockSize), strconv.Itoa(passNum), strconv.Itoa(expectedNodes))
|
||||
"/usr/local/bin/verify.sh", strconv.Itoa(kibishiiData.Levels), strconv.Itoa(kibishiiData.DirsPerLevel),
|
||||
strconv.Itoa(kibishiiData.FilesPerLevel), strconv.Itoa(kibishiiData.FileLength),
|
||||
strconv.Itoa(kibishiiData.BlockSize), strconv.Itoa(kibishiiData.PassNum),
|
||||
strconv.Itoa(kibishiiData.ExpectedNodes))
|
||||
fmt.Printf("kibishiiVerifyCmd cmd =%v\n", kibishiiVerifyCmd)
|
||||
|
||||
stdout, stderr, err := veleroexec.RunCommand(kibishiiVerifyCmd)
|
||||
|
@ -200,7 +213,7 @@ func waitForKibishiiPods(ctx context.Context, client TestClient, kibishiiNamespa
|
|||
|
||||
func KibishiiPrepareBeforeBackup(oneHourTimeout context.Context, client TestClient,
|
||||
providerName, kibishiiNamespace, registryCredentialFile, veleroFeatures,
|
||||
kibishiiDirectory string, useVolumeSnapshots bool) error {
|
||||
kibishiiDirectory string, useVolumeSnapshots bool, kibishiiData *KibishiiData) error {
|
||||
serviceAccountName := "default"
|
||||
|
||||
// wait until the service account is created before patch the image pull secret
|
||||
|
@ -224,14 +237,20 @@ func KibishiiPrepareBeforeBackup(oneHourTimeout context.Context, client TestClie
|
|||
if err := waitForKibishiiPods(oneHourTimeout, client, kibishiiNamespace); err != nil {
|
||||
return errors.Wrapf(err, "Failed to wait for ready status of kibishii pods in %s", kibishiiNamespace)
|
||||
}
|
||||
|
||||
if err := generateData(oneHourTimeout, kibishiiNamespace, 2, 10, 10, 1024, 1024, 0, 2); err != nil {
|
||||
if kibishiiData == nil {
|
||||
kibishiiData = DefaultKibishiiData
|
||||
}
|
||||
if err := generateData(oneHourTimeout, kibishiiNamespace, kibishiiData); err != nil {
|
||||
return errors.Wrap(err, "Failed to generate data")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func KibishiiVerifyAfterRestore(client TestClient, kibishiiNamespace string, oneHourTimeout context.Context) error {
|
||||
func KibishiiVerifyAfterRestore(client TestClient, kibishiiNamespace string, oneHourTimeout context.Context,
|
||||
kibishiiData *KibishiiData) error {
|
||||
if kibishiiData == nil {
|
||||
kibishiiData = DefaultKibishiiData
|
||||
}
|
||||
// wait for kibishii pod startup
|
||||
// TODO - Fix kibishii so we can check that it is ready to go
|
||||
fmt.Printf("Waiting for kibishii pods to be ready\n")
|
||||
|
@ -241,7 +260,7 @@ func KibishiiVerifyAfterRestore(client TestClient, kibishiiNamespace string, one
|
|||
time.Sleep(60 * time.Second)
|
||||
// TODO - check that namespace exists
|
||||
fmt.Printf("running kibishii verify\n")
|
||||
if err := verifyData(oneHourTimeout, kibishiiNamespace, 2, 10, 10, 1024, 1024, 0, 2); err != nil {
|
||||
if err := verifyData(oneHourTimeout, kibishiiNamespace, kibishiiData); err != nil {
|
||||
return errors.Wrap(err, "Failed to verify data generated by kibishii")
|
||||
}
|
||||
return nil
|
||||
|
|
Loading…
Reference in New Issue