commit
eaef57ab82
|
@ -34,7 +34,7 @@ RUN go install sigs.k8s.io/controller-tools/cmd/controller-gen@v0.16.5
|
||||||
|
|
||||||
# get goimports (the revision is pinned so we don't indiscriminately update, but the particular commit
|
# get goimports (the revision is pinned so we don't indiscriminately update, but the particular commit
|
||||||
# is not important)
|
# is not important)
|
||||||
RUN go install golang.org/x/tools/cmd/goimports@11e9d9cc0042e6bd10337d4d2c3e5d9295508e7d
|
RUN go install golang.org/x/tools/cmd/goimports@v0.33.0
|
||||||
|
|
||||||
# get protoc compiler and golang plugin
|
# get protoc compiler and golang plugin
|
||||||
WORKDIR /root
|
WORKDIR /root
|
||||||
|
|
|
@ -119,6 +119,8 @@ VELERO_SERVER_DEBUG_MODE ?= false
|
||||||
|
|
||||||
ITEM_BLOCK_WORKER_COUNT ?= 1
|
ITEM_BLOCK_WORKER_COUNT ?= 1
|
||||||
|
|
||||||
|
WORKER_OS ?= linux
|
||||||
|
|
||||||
# Parameters to run migration tests along with all other E2E tests, and both of them should
|
# Parameters to run migration tests along with all other E2E tests, and both of them should
|
||||||
# be provided or left them all empty to skip migration tests with no influence to other
|
# be provided or left them all empty to skip migration tests with no influence to other
|
||||||
# E2E tests.
|
# E2E tests.
|
||||||
|
@ -221,7 +223,8 @@ run-e2e: ginkgo
|
||||||
--standby-cls-service-account-name=$(STANDBY_CLS_SERVICE_ACCOUNT_NAME) \
|
--standby-cls-service-account-name=$(STANDBY_CLS_SERVICE_ACCOUNT_NAME) \
|
||||||
--kibishii-directory=$(KIBISHII_DIRECTORY) \
|
--kibishii-directory=$(KIBISHII_DIRECTORY) \
|
||||||
--disable-informer-cache=$(DISABLE_INFORMER_CACHE) \
|
--disable-informer-cache=$(DISABLE_INFORMER_CACHE) \
|
||||||
--image-registry-proxy=$(IMAGE_REGISTRY_PROXY)
|
--image-registry-proxy=$(IMAGE_REGISTRY_PROXY) \
|
||||||
|
--worker-os=$(WORKER_OS)
|
||||||
|
|
||||||
.PHONY: run-perf
|
.PHONY: run-perf
|
||||||
run-perf: ginkgo
|
run-perf: ginkgo
|
||||||
|
|
|
@ -79,6 +79,7 @@ These configuration parameters are expected as values to the following command l
|
||||||
1. `--debug-velero-pod-restart`: A switch for debugging velero pod restart.
|
1. `--debug-velero-pod-restart`: A switch for debugging velero pod restart.
|
||||||
1. `--fail-fast`: A switch for for failing fast on meeting error.
|
1. `--fail-fast`: A switch for for failing fast on meeting error.
|
||||||
1. `--has-vsphere-plugin`: A switch to indicate whether the Velero vSphere plugin is installed for vSphere environment.
|
1. `--has-vsphere-plugin`: A switch to indicate whether the Velero vSphere plugin is installed for vSphere environment.
|
||||||
|
1. `--worker-os`: A switch to indicate the workload should be ran on windows or linux OS.
|
||||||
|
|
||||||
These configurations or parameters are used to generate install options for Velero for each test suite.
|
These configurations or parameters are used to generate install options for Velero for each test suite.
|
||||||
|
|
||||||
|
@ -131,6 +132,7 @@ Below is a mapping between `make` variables to E2E configuration flags.
|
||||||
1. `DEBUG_VELERO_POD_RESTART`: `-debug-velero-pod-restart`. Optional.
|
1. `DEBUG_VELERO_POD_RESTART`: `-debug-velero-pod-restart`. Optional.
|
||||||
1. `FAIL_FAST`: `--fail-fast`. Optional.
|
1. `FAIL_FAST`: `--fail-fast`. Optional.
|
||||||
1. `HAS_VSPHERE_PLUGIN`: `--has-vsphere-plugin`. Optional.
|
1. `HAS_VSPHERE_PLUGIN`: `--has-vsphere-plugin`. Optional.
|
||||||
|
1. `WORKER_OS`: `--worker-os`. Optional.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -140,7 +140,15 @@ func BackupRestoreTest(backupRestoreTestConfig BackupRestoreTestConfig) {
|
||||||
veleroCfg.ProvideSnapshotsVolumeParam = provideSnapshotVolumesParmInBackup
|
veleroCfg.ProvideSnapshotsVolumeParam = provideSnapshotVolumesParmInBackup
|
||||||
|
|
||||||
// Set DefaultVolumesToFsBackup to false since DefaultVolumesToFsBackup was set to true during installation
|
// Set DefaultVolumesToFsBackup to false since DefaultVolumesToFsBackup was set to true during installation
|
||||||
Expect(RunKibishiiTests(veleroCfg, backupName, restoreName, "", kibishiiNamespace, useVolumeSnapshots, false)).To(Succeed(),
|
Expect(RunKibishiiTests(
|
||||||
|
veleroCfg,
|
||||||
|
backupName,
|
||||||
|
restoreName,
|
||||||
|
"",
|
||||||
|
kibishiiNamespace,
|
||||||
|
useVolumeSnapshots,
|
||||||
|
false,
|
||||||
|
)).To(Succeed(),
|
||||||
"Failed to successfully backup and restore Kibishii namespace")
|
"Failed to successfully backup and restore Kibishii namespace")
|
||||||
})
|
})
|
||||||
|
|
||||||
|
@ -212,7 +220,17 @@ func BackupRestoreTest(backupRestoreTestConfig BackupRestoreTestConfig) {
|
||||||
}
|
}
|
||||||
veleroCfg.ProvideSnapshotsVolumeParam = !provideSnapshotVolumesParmInBackup
|
veleroCfg.ProvideSnapshotsVolumeParam = !provideSnapshotVolumesParmInBackup
|
||||||
workloadNS := kibishiiNamespace + bsl
|
workloadNS := kibishiiNamespace + bsl
|
||||||
Expect(RunKibishiiTests(veleroCfg, backupName, restoreName, bsl, workloadNS, useVolumeSnapshots, !useVolumeSnapshots)).To(Succeed(),
|
Expect(
|
||||||
|
RunKibishiiTests(
|
||||||
|
veleroCfg,
|
||||||
|
backupName,
|
||||||
|
restoreName,
|
||||||
|
bsl,
|
||||||
|
workloadNS,
|
||||||
|
useVolumeSnapshots,
|
||||||
|
!useVolumeSnapshots,
|
||||||
|
),
|
||||||
|
).To(Succeed(),
|
||||||
"Failed to successfully backup and restore Kibishii namespace using BSL %s", bsl)
|
"Failed to successfully backup and restore Kibishii namespace using BSL %s", bsl)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
|
@ -125,6 +125,7 @@ func runBackupDeletionTests(client TestClient, veleroCfg VeleroConfig, backupLoc
|
||||||
kibishiiDirectory,
|
kibishiiDirectory,
|
||||||
DefaultKibishiiData,
|
DefaultKibishiiData,
|
||||||
veleroCfg.ImageRegistryProxy,
|
veleroCfg.ImageRegistryProxy,
|
||||||
|
veleroCfg.WorkerOS,
|
||||||
); err != nil {
|
); err != nil {
|
||||||
return errors.Wrapf(err, "Failed to install and prepare data for kibishii %s", ns)
|
return errors.Wrapf(err, "Failed to install and prepare data for kibishii %s", ns)
|
||||||
}
|
}
|
||||||
|
|
|
@ -110,6 +110,7 @@ func TTLTest() {
|
||||||
veleroCfg.KibishiiDirectory,
|
veleroCfg.KibishiiDirectory,
|
||||||
DefaultKibishiiData,
|
DefaultKibishiiData,
|
||||||
veleroCfg.ImageRegistryProxy,
|
veleroCfg.ImageRegistryProxy,
|
||||||
|
veleroCfg.WorkerOS,
|
||||||
)).To(Succeed())
|
)).To(Succeed())
|
||||||
})
|
})
|
||||||
|
|
||||||
|
|
|
@ -138,8 +138,16 @@ func (v *BackupVolumeInfo) CreateResources() error {
|
||||||
// Hitting issue https://github.com/vmware-tanzu/velero/issues/7388
|
// Hitting issue https://github.com/vmware-tanzu/velero/issues/7388
|
||||||
// So populate data only to some of pods, leave other pods empty to verify empty PV datamover
|
// So populate data only to some of pods, leave other pods empty to verify empty PV datamover
|
||||||
if i%2 == 0 {
|
if i%2 == 0 {
|
||||||
Expect(CreateFileToPod(v.Ctx, createNSName, pod.Name, DefaultContainerName, vols[i].Name,
|
Expect(CreateFileToPod(
|
||||||
fmt.Sprintf("file-%s", pod.Name), CreateFileContent(createNSName, pod.Name, vols[i].Name))).To(Succeed())
|
v.Ctx,
|
||||||
|
createNSName,
|
||||||
|
pod.Name,
|
||||||
|
DefaultContainerName,
|
||||||
|
vols[i].Name,
|
||||||
|
fmt.Sprintf("file-%s", pod.Name),
|
||||||
|
CreateFileContent(createNSName, pod.Name, vols[i].Name),
|
||||||
|
WorkerOSLinux,
|
||||||
|
)).To(Succeed())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -101,6 +101,7 @@ func (n *NamespaceMapping) CreateResources() error {
|
||||||
n.VeleroCfg.KibishiiDirectory,
|
n.VeleroCfg.KibishiiDirectory,
|
||||||
n.kibishiiData,
|
n.kibishiiData,
|
||||||
n.VeleroCfg.ImageRegistryProxy,
|
n.VeleroCfg.ImageRegistryProxy,
|
||||||
|
n.VeleroCfg.WorkerOS,
|
||||||
)).To(Succeed())
|
)).To(Succeed())
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -111,8 +112,14 @@ func (n *NamespaceMapping) Verify() error {
|
||||||
for index, ns := range n.MappedNamespaceList {
|
for index, ns := range n.MappedNamespaceList {
|
||||||
n.kibishiiData.Levels = len(*n.NSIncluded) + index
|
n.kibishiiData.Levels = len(*n.NSIncluded) + index
|
||||||
By(fmt.Sprintf("Verify workload %s after restore ", ns), func() {
|
By(fmt.Sprintf("Verify workload %s after restore ", ns), func() {
|
||||||
Expect(KibishiiVerifyAfterRestore(n.Client, ns,
|
Expect(KibishiiVerifyAfterRestore(
|
||||||
n.Ctx, n.kibishiiData, "")).To(Succeed(), "Fail to verify workload after restore")
|
n.Client,
|
||||||
|
ns,
|
||||||
|
n.Ctx,
|
||||||
|
n.kibishiiData,
|
||||||
|
"",
|
||||||
|
n.VeleroCfg.WorkerOS,
|
||||||
|
)).To(Succeed(), "Fail to verify workload after restore")
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
for _, ns := range *n.NSIncluded {
|
for _, ns := range *n.NSIncluded {
|
||||||
|
|
|
@ -31,7 +31,7 @@ import (
|
||||||
|
|
||||||
type MultiNSBackup struct {
|
type MultiNSBackup struct {
|
||||||
TestCase
|
TestCase
|
||||||
IsScalTest bool
|
IsScaleTest bool
|
||||||
NSExcluded *[]string
|
NSExcluded *[]string
|
||||||
TimeoutDuration time.Duration
|
TimeoutDuration time.Duration
|
||||||
}
|
}
|
||||||
|
@ -43,7 +43,7 @@ func (m *MultiNSBackup) Init() error {
|
||||||
m.RestoreName = "restore-" + m.CaseBaseName
|
m.RestoreName = "restore-" + m.CaseBaseName
|
||||||
m.NSExcluded = &[]string{}
|
m.NSExcluded = &[]string{}
|
||||||
|
|
||||||
if m.IsScalTest {
|
if m.IsScaleTest {
|
||||||
m.NamespacesTotal = 2500
|
m.NamespacesTotal = 2500
|
||||||
m.TimeoutDuration = time.Hour * 2
|
m.TimeoutDuration = time.Hour * 2
|
||||||
m.TestMsg = &TestMSG{
|
m.TestMsg = &TestMSG{
|
||||||
|
|
|
@ -39,7 +39,7 @@ import (
|
||||||
func GetResourcesCheckTestCases() []VeleroBackupRestoreTest {
|
func GetResourcesCheckTestCases() []VeleroBackupRestoreTest {
|
||||||
return []VeleroBackupRestoreTest{
|
return []VeleroBackupRestoreTest{
|
||||||
&NSAnnotationCase{},
|
&NSAnnotationCase{},
|
||||||
&MultiNSBackup{IsScalTest: false},
|
&MultiNSBackup{IsScaleTest: false},
|
||||||
&RBACCase{},
|
&RBACCase{},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -162,6 +162,7 @@ func BslDeletionTest(useVolumeSnapshots bool) {
|
||||||
veleroCfg.KibishiiDirectory,
|
veleroCfg.KibishiiDirectory,
|
||||||
DefaultKibishiiData,
|
DefaultKibishiiData,
|
||||||
veleroCfg.ImageRegistryProxy,
|
veleroCfg.ImageRegistryProxy,
|
||||||
|
veleroCfg.WorkerOS,
|
||||||
)).To(Succeed())
|
)).To(Succeed())
|
||||||
})
|
})
|
||||||
|
|
||||||
|
|
|
@ -356,6 +356,12 @@ func init() {
|
||||||
"",
|
"",
|
||||||
"The image registry proxy, e.g. when the DockerHub access limitation is reached, can use available proxy to replace. Default is nil.",
|
"The image registry proxy, e.g. when the DockerHub access limitation is reached, can use available proxy to replace. Default is nil.",
|
||||||
)
|
)
|
||||||
|
flag.StringVar(
|
||||||
|
&test.VeleroCfg.WorkerOS,
|
||||||
|
"worker-os",
|
||||||
|
"linux",
|
||||||
|
"test k8s worker node OS version, should be either linux or windows.",
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add label [SkipVanillaZfs]:
|
// Add label [SkipVanillaZfs]:
|
||||||
|
@ -621,12 +627,12 @@ var _ = Describe(
|
||||||
|
|
||||||
var _ = Describe(
|
var _ = Describe(
|
||||||
"Backup resources should follow the specific order in schedule",
|
"Backup resources should follow the specific order in schedule",
|
||||||
Label("PVBackup", "OptIn"),
|
Label("PVBackup", "OptIn", "FSB"),
|
||||||
OptInPVBackupTest,
|
OptInPVBackupTest,
|
||||||
)
|
)
|
||||||
var _ = Describe(
|
var _ = Describe(
|
||||||
"Backup resources should follow the specific order in schedule",
|
"Backup resources should follow the specific order in schedule",
|
||||||
Label("PVBackup", "OptOut"),
|
Label("PVBackup", "OptOut", "FSB"),
|
||||||
OptOutPVBackupTest,
|
OptOutPVBackupTest,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -23,9 +23,11 @@ import (
|
||||||
|
|
||||||
. "github.com/onsi/ginkgo/v2"
|
. "github.com/onsi/ginkgo/v2"
|
||||||
. "github.com/onsi/gomega"
|
. "github.com/onsi/gomega"
|
||||||
|
"golang.org/x/mod/semver"
|
||||||
|
|
||||||
"github.com/vmware-tanzu/velero/test"
|
"github.com/vmware-tanzu/velero/test"
|
||||||
framework "github.com/vmware-tanzu/velero/test/e2e/test"
|
framework "github.com/vmware-tanzu/velero/test/e2e/test"
|
||||||
|
"github.com/vmware-tanzu/velero/test/util/common"
|
||||||
util "github.com/vmware-tanzu/velero/test/util/csi"
|
util "github.com/vmware-tanzu/velero/test/util/csi"
|
||||||
k8sutil "github.com/vmware-tanzu/velero/test/util/k8s"
|
k8sutil "github.com/vmware-tanzu/velero/test/util/k8s"
|
||||||
"github.com/vmware-tanzu/velero/test/util/kibishii"
|
"github.com/vmware-tanzu/velero/test/util/kibishii"
|
||||||
|
@ -160,6 +162,10 @@ func (m *migrationE2E) Backup() error {
|
||||||
version, err := veleroutil.GetVeleroVersion(m.Ctx, OriginVeleroCfg.VeleroCLI, true)
|
version, err := veleroutil.GetVeleroVersion(m.Ctx, OriginVeleroCfg.VeleroCLI, true)
|
||||||
Expect(err).To(Succeed(), "Fail to get Velero version")
|
Expect(err).To(Succeed(), "Fail to get Velero version")
|
||||||
OriginVeleroCfg.VeleroVersion = version
|
OriginVeleroCfg.VeleroVersion = version
|
||||||
|
if OriginVeleroCfg.WorkerOS == common.WorkerOSWindows &&
|
||||||
|
(version != "main" && semver.Compare(version, "v1.16") < 0) {
|
||||||
|
Skip(fmt.Sprintf("Velero CLI version %s doesn't support Windows migration test.", version))
|
||||||
|
}
|
||||||
|
|
||||||
if OriginVeleroCfg.SnapshotMoveData {
|
if OriginVeleroCfg.SnapshotMoveData {
|
||||||
OriginVeleroCfg.UseNodeAgent = true
|
OriginVeleroCfg.UseNodeAgent = true
|
||||||
|
@ -197,6 +203,7 @@ func (m *migrationE2E) Backup() error {
|
||||||
OriginVeleroCfg.KibishiiDirectory,
|
OriginVeleroCfg.KibishiiDirectory,
|
||||||
&m.kibishiiData,
|
&m.kibishiiData,
|
||||||
OriginVeleroCfg.ImageRegistryProxy,
|
OriginVeleroCfg.ImageRegistryProxy,
|
||||||
|
OriginVeleroCfg.WorkerOS,
|
||||||
)).To(Succeed())
|
)).To(Succeed())
|
||||||
})
|
})
|
||||||
|
|
||||||
|
@ -401,6 +408,7 @@ func (m *migrationE2E) Verify() error {
|
||||||
m.Ctx,
|
m.Ctx,
|
||||||
&m.kibishiiData,
|
&m.kibishiiData,
|
||||||
"",
|
"",
|
||||||
|
m.VeleroCfg.WorkerOS,
|
||||||
)).To(Succeed(), "Fail to verify workload after restore")
|
)).To(Succeed(), "Fail to verify workload after restore")
|
||||||
})
|
})
|
||||||
|
|
||||||
|
@ -413,56 +421,66 @@ func (m *migrationE2E) Clean() error {
|
||||||
})
|
})
|
||||||
|
|
||||||
By("Clean resource on standby cluster.", func() {
|
By("Clean resource on standby cluster.", func() {
|
||||||
|
defer func() {
|
||||||
|
By("Switch to default KubeConfig context", func() {
|
||||||
|
k8sutil.KubectlConfigUseContext(
|
||||||
|
m.Ctx,
|
||||||
|
m.VeleroCfg.DefaultClusterContext,
|
||||||
|
)
|
||||||
|
})
|
||||||
|
}()
|
||||||
|
|
||||||
Expect(k8sutil.KubectlConfigUseContext(
|
Expect(k8sutil.KubectlConfigUseContext(
|
||||||
m.Ctx, m.VeleroCfg.StandbyClusterContext)).To(Succeed())
|
m.Ctx, m.VeleroCfg.StandbyClusterContext)).To(Succeed())
|
||||||
m.VeleroCfg.ClientToInstallVelero = m.VeleroCfg.StandbyClient
|
m.VeleroCfg.ClientToInstallVelero = m.VeleroCfg.StandbyClient
|
||||||
m.VeleroCfg.ClusterToInstallVelero = m.VeleroCfg.StandbyClusterName
|
m.VeleroCfg.ClusterToInstallVelero = m.VeleroCfg.StandbyClusterName
|
||||||
|
|
||||||
By("Delete StorageClasses created by E2E")
|
By("Delete StorageClasses created by E2E")
|
||||||
Expect(
|
if err := k8sutil.DeleteStorageClass(
|
||||||
k8sutil.DeleteStorageClass(
|
|
||||||
m.Ctx,
|
m.Ctx,
|
||||||
*m.VeleroCfg.ClientToInstallVelero,
|
*m.VeleroCfg.ClientToInstallVelero,
|
||||||
test.StorageClassName,
|
test.StorageClassName,
|
||||||
),
|
); err != nil {
|
||||||
).To(Succeed())
|
fmt.Println("Fail to delete StorageClass1: ", err)
|
||||||
Expect(
|
return
|
||||||
k8sutil.DeleteStorageClass(
|
}
|
||||||
|
|
||||||
|
if err := k8sutil.DeleteStorageClass(
|
||||||
m.Ctx,
|
m.Ctx,
|
||||||
*m.VeleroCfg.ClientToInstallVelero,
|
*m.VeleroCfg.ClientToInstallVelero,
|
||||||
test.StorageClassName2,
|
test.StorageClassName2,
|
||||||
),
|
); err != nil {
|
||||||
).To(Succeed())
|
fmt.Println("Fail to delete StorageClass2: ", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
if strings.EqualFold(m.VeleroCfg.Features, test.FeatureCSI) &&
|
if strings.EqualFold(m.VeleroCfg.Features, test.FeatureCSI) &&
|
||||||
m.VeleroCfg.UseVolumeSnapshots {
|
m.VeleroCfg.UseVolumeSnapshots {
|
||||||
By("Delete VolumeSnapshotClass created by E2E")
|
By("Delete VolumeSnapshotClass created by E2E")
|
||||||
Expect(
|
if err := k8sutil.KubectlDeleteByFile(
|
||||||
k8sutil.KubectlDeleteByFile(
|
|
||||||
m.Ctx,
|
m.Ctx,
|
||||||
fmt.Sprintf("../testdata/volume-snapshot-class/%s.yaml",
|
fmt.Sprintf("../testdata/volume-snapshot-class/%s.yaml",
|
||||||
m.VeleroCfg.StandbyClusterCloudProvider),
|
m.VeleroCfg.StandbyClusterCloudProvider),
|
||||||
),
|
); err != nil {
|
||||||
).To(Succeed())
|
fmt.Println("Fail to delete VolumeSnapshotClass: ", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Expect(veleroutil.VeleroUninstall(m.Ctx, m.VeleroCfg)).To(Succeed())
|
if err := veleroutil.VeleroUninstall(m.Ctx, m.VeleroCfg); err != nil {
|
||||||
|
fmt.Println("Fail to uninstall Velero: ", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
Expect(
|
if err := k8sutil.DeleteNamespace(
|
||||||
k8sutil.DeleteNamespace(
|
|
||||||
m.Ctx,
|
m.Ctx,
|
||||||
*m.VeleroCfg.StandbyClient,
|
*m.VeleroCfg.StandbyClient,
|
||||||
m.CaseBaseName,
|
m.CaseBaseName,
|
||||||
true,
|
true,
|
||||||
),
|
); err != nil {
|
||||||
).To(Succeed())
|
fmt.Println("Fail to delete the workload namespace: ", err)
|
||||||
})
|
return
|
||||||
|
}
|
||||||
By("Switch to default KubeConfig context", func() {
|
|
||||||
Expect(k8sutil.KubectlConfigUseContext(
|
|
||||||
m.Ctx,
|
|
||||||
m.VeleroCfg.DefaultClusterContext,
|
|
||||||
)).To(Succeed())
|
|
||||||
})
|
})
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
|
|
@ -115,8 +115,16 @@ func (p *PVBackupFiltering) CreateResources() error {
|
||||||
Expect(WaitForPods(p.Ctx, p.Client, ns, p.podsList[index])).To(Succeed())
|
Expect(WaitForPods(p.Ctx, p.Client, ns, p.podsList[index])).To(Succeed())
|
||||||
for i, pod := range p.podsList[index] {
|
for i, pod := range p.podsList[index] {
|
||||||
for j := range p.volumesList[i] {
|
for j := range p.volumesList[i] {
|
||||||
Expect(CreateFileToPod(p.Ctx, ns, pod, pod, p.volumesList[i][j],
|
Expect(CreateFileToPod(
|
||||||
FILE_NAME, CreateFileContent(ns, pod, p.volumesList[i][j]))).To(Succeed())
|
p.Ctx,
|
||||||
|
ns,
|
||||||
|
pod,
|
||||||
|
pod,
|
||||||
|
p.volumesList[i][j],
|
||||||
|
FILE_NAME,
|
||||||
|
CreateFileContent(ns, pod, p.volumesList[i][j]),
|
||||||
|
WorkerOSLinux,
|
||||||
|
)).To(Succeed())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
@ -142,21 +150,45 @@ func (p *PVBackupFiltering) Verify() error {
|
||||||
if j%2 == 0 {
|
if j%2 == 0 {
|
||||||
if p.annotation == OPT_IN_ANN {
|
if p.annotation == OPT_IN_ANN {
|
||||||
By(fmt.Sprintf("File should exists in PV %s of pod %s under namespace %s\n", p.volumesList[i][j], p.podsList[k][i], ns), func() {
|
By(fmt.Sprintf("File should exists in PV %s of pod %s under namespace %s\n", p.volumesList[i][j], p.podsList[k][i], ns), func() {
|
||||||
Expect(fileExist(p.Ctx, ns, p.podsList[k][i], p.volumesList[i][j])).To(Succeed(), "File not exist as expect")
|
Expect(fileExist(
|
||||||
|
p.Ctx,
|
||||||
|
ns,
|
||||||
|
p.podsList[k][i],
|
||||||
|
p.volumesList[i][j],
|
||||||
|
p.VeleroCfg.WorkerOS,
|
||||||
|
)).To(Succeed(), "File not exist as expect")
|
||||||
})
|
})
|
||||||
} else {
|
} else {
|
||||||
By(fmt.Sprintf("File should not exist in PV %s of pod %s under namespace %s\n", p.volumesList[i][j], p.podsList[k][i], ns), func() {
|
By(fmt.Sprintf("File should not exist in PV %s of pod %s under namespace %s\n", p.volumesList[i][j], p.podsList[k][i], ns), func() {
|
||||||
Expect(fileNotExist(p.Ctx, ns, p.podsList[k][i], p.volumesList[i][j])).To(Succeed(), "File exists, not as expect")
|
Expect(fileNotExist(
|
||||||
|
p.Ctx,
|
||||||
|
ns,
|
||||||
|
p.podsList[k][i],
|
||||||
|
p.volumesList[i][j],
|
||||||
|
p.VeleroCfg.WorkerOS,
|
||||||
|
)).To(Succeed(), "File exists, not as expect")
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if p.annotation == OPT_OUT_ANN {
|
if p.annotation == OPT_OUT_ANN {
|
||||||
By(fmt.Sprintf("File should exists in PV %s of pod %s under namespace %s\n", p.volumesList[i][j], p.podsList[k][i], ns), func() {
|
By(fmt.Sprintf("File should exists in PV %s of pod %s under namespace %s\n", p.volumesList[i][j], p.podsList[k][i], ns), func() {
|
||||||
Expect(fileExist(p.Ctx, ns, p.podsList[k][i], p.volumesList[i][j])).To(Succeed(), "File not exist as expect")
|
Expect(fileExist(
|
||||||
|
p.Ctx,
|
||||||
|
ns,
|
||||||
|
p.podsList[k][i],
|
||||||
|
p.volumesList[i][j],
|
||||||
|
p.VeleroCfg.WorkerOS,
|
||||||
|
)).To(Succeed(), "File not exist as expect")
|
||||||
})
|
})
|
||||||
} else {
|
} else {
|
||||||
By(fmt.Sprintf("File should not exist in PV %s of pod %s under namespace %s\n", p.volumesList[i][j], p.podsList[k][i], ns), func() {
|
By(fmt.Sprintf("File should not exist in PV %s of pod %s under namespace %s\n", p.volumesList[i][j], p.podsList[k][i], ns), func() {
|
||||||
Expect(fileNotExist(p.Ctx, ns, p.podsList[k][i], p.volumesList[i][j])).To(Succeed(), "File exists, not as expect")
|
Expect(fileNotExist(
|
||||||
|
p.Ctx,
|
||||||
|
ns,
|
||||||
|
p.podsList[k][i],
|
||||||
|
p.volumesList[i][j],
|
||||||
|
p.VeleroCfg.WorkerOS,
|
||||||
|
)).To(Succeed(), "File exists, not as expect")
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -168,8 +200,14 @@ func (p *PVBackupFiltering) Verify() error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func fileExist(ctx context.Context, namespace, podName, volume string) error {
|
func fileExist(
|
||||||
c, _, err := ReadFileFromPodVolume(ctx, namespace, podName, podName, volume, FILE_NAME)
|
ctx context.Context,
|
||||||
|
namespace string,
|
||||||
|
podName string,
|
||||||
|
volume string,
|
||||||
|
workerOS string,
|
||||||
|
) error {
|
||||||
|
c, _, err := ReadFileFromPodVolume(ctx, namespace, podName, podName, volume, FILE_NAME, workerOS)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, fmt.Sprintf("Fail to read file %s from volume %s of pod %s in %s ",
|
return errors.Wrap(err, fmt.Sprintf("Fail to read file %s from volume %s of pod %s in %s ",
|
||||||
FILE_NAME, volume, podName, namespace))
|
FILE_NAME, volume, podName, namespace))
|
||||||
|
@ -183,8 +221,14 @@ func fileExist(ctx context.Context, namespace, podName, volume string) error {
|
||||||
FILE_NAME, volume, podName, namespace))
|
FILE_NAME, volume, podName, namespace))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
func fileNotExist(ctx context.Context, namespace, podName, volume string) error {
|
func fileNotExist(
|
||||||
_, _, err := ReadFileFromPodVolume(ctx, namespace, podName, podName, volume, FILE_NAME)
|
ctx context.Context,
|
||||||
|
namespace string,
|
||||||
|
podName string,
|
||||||
|
volume string,
|
||||||
|
workerOS string,
|
||||||
|
) error {
|
||||||
|
_, _, err := ReadFileFromPodVolume(ctx, namespace, podName, podName, volume, FILE_NAME, workerOS)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil
|
return nil
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -28,6 +28,7 @@ import (
|
||||||
|
|
||||||
. "github.com/vmware-tanzu/velero/test"
|
. "github.com/vmware-tanzu/velero/test"
|
||||||
. "github.com/vmware-tanzu/velero/test/e2e/test"
|
. "github.com/vmware-tanzu/velero/test/e2e/test"
|
||||||
|
"github.com/vmware-tanzu/velero/test/util/common"
|
||||||
. "github.com/vmware-tanzu/velero/test/util/k8s"
|
. "github.com/vmware-tanzu/velero/test/util/k8s"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -151,7 +152,15 @@ func (r *ResourcePoliciesCase) Verify() error {
|
||||||
if vol.Name != volName {
|
if vol.Name != volName {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
content, _, err := ReadFileFromPodVolume(r.Ctx, ns, pod.Name, "container-busybox", vol.Name, FileName)
|
content, _, err := ReadFileFromPodVolume(
|
||||||
|
r.Ctx,
|
||||||
|
ns,
|
||||||
|
pod.Name,
|
||||||
|
"container-busybox",
|
||||||
|
vol.Name,
|
||||||
|
FileName,
|
||||||
|
r.VeleroCfg.WorkerOS,
|
||||||
|
)
|
||||||
if i%2 == 0 {
|
if i%2 == 0 {
|
||||||
Expect(err).To(HaveOccurred(), "Expected file not found") // File should not exist
|
Expect(err).To(HaveOccurred(), "Expected file not found") // File should not exist
|
||||||
} else {
|
} else {
|
||||||
|
@ -231,7 +240,16 @@ func (r *ResourcePoliciesCase) writeDataIntoPods(namespace, volName string) erro
|
||||||
if vol.Name != volName {
|
if vol.Name != volName {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
err := CreateFileToPod(r.Ctx, namespace, pod.Name, "container-busybox", vol.Name, FileName, fmt.Sprintf("ns-%s pod-%s volume-%s", namespace, pod.Name, vol.Name))
|
err := CreateFileToPod(
|
||||||
|
r.Ctx,
|
||||||
|
namespace,
|
||||||
|
pod.Name,
|
||||||
|
"container-busybox",
|
||||||
|
vol.Name,
|
||||||
|
FileName,
|
||||||
|
fmt.Sprintf("ns-%s pod-%s volume-%s", namespace, pod.Name, vol.Name),
|
||||||
|
common.WorkerOSLinux,
|
||||||
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, fmt.Sprintf("failed to create file into pod %s in namespace: %q", pod.Name, namespace))
|
return errors.Wrap(err, fmt.Sprintf("failed to create file into pod %s in namespace: %q", pod.Name, namespace))
|
||||||
}
|
}
|
||||||
|
|
|
@ -21,4 +21,4 @@ import (
|
||||||
. "github.com/vmware-tanzu/velero/test/e2e/test"
|
. "github.com/vmware-tanzu/velero/test/e2e/test"
|
||||||
)
|
)
|
||||||
|
|
||||||
var MultiNSBackupRestore func() = TestFunc(&basic.MultiNSBackup{IsScalTest: true})
|
var MultiNSBackupRestore func() = TestFunc(&basic.MultiNSBackup{IsScaleTest: true})
|
||||||
|
|
|
@ -126,15 +126,6 @@ func BackupUpgradeRestoreTest(useVolumeSnapshots bool, veleroCLI2Version VeleroC
|
||||||
tmpCfgForOldVeleroInstall.UpgradeFromVeleroVersion = veleroCLI2Version.VeleroVersion
|
tmpCfgForOldVeleroInstall.UpgradeFromVeleroVersion = veleroCLI2Version.VeleroVersion
|
||||||
tmpCfgForOldVeleroInstall.VeleroCLI = veleroCLI2Version.VeleroCLI
|
tmpCfgForOldVeleroInstall.VeleroCLI = veleroCLI2Version.VeleroCLI
|
||||||
|
|
||||||
// CLI under version v1.14.x
|
|
||||||
if veleroCLI2Version.VeleroVersion < "v1.15" {
|
|
||||||
tmpCfgForOldVeleroInstall.BackupRepoConfigMap = ""
|
|
||||||
fmt.Printf(
|
|
||||||
"CLI version %s is lower than v1.15. Set BackupRepoConfigMap to empty, because it's not supported",
|
|
||||||
veleroCLI2Version.VeleroVersion,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
tmpCfgForOldVeleroInstall, err = SetImagesToDefaultValues(
|
tmpCfgForOldVeleroInstall, err = SetImagesToDefaultValues(
|
||||||
tmpCfgForOldVeleroInstall,
|
tmpCfgForOldVeleroInstall,
|
||||||
veleroCLI2Version.VeleroVersion,
|
veleroCLI2Version.VeleroVersion,
|
||||||
|
@ -176,6 +167,7 @@ func BackupUpgradeRestoreTest(useVolumeSnapshots bool, veleroCLI2Version VeleroC
|
||||||
tmpCfg.KibishiiDirectory,
|
tmpCfg.KibishiiDirectory,
|
||||||
DefaultKibishiiData,
|
DefaultKibishiiData,
|
||||||
tmpCfg.ImageRegistryProxy,
|
tmpCfg.ImageRegistryProxy,
|
||||||
|
veleroCfg.WorkerOS,
|
||||||
)).To(Succeed())
|
)).To(Succeed())
|
||||||
})
|
})
|
||||||
|
|
||||||
|
@ -269,8 +261,14 @@ func BackupUpgradeRestoreTest(useVolumeSnapshots bool, veleroCLI2Version VeleroC
|
||||||
})
|
})
|
||||||
|
|
||||||
By(fmt.Sprintf("Verify workload %s after restore ", upgradeNamespace), func() {
|
By(fmt.Sprintf("Verify workload %s after restore ", upgradeNamespace), func() {
|
||||||
Expect(KibishiiVerifyAfterRestore(*veleroCfg.ClientToInstallVelero, upgradeNamespace,
|
Expect(KibishiiVerifyAfterRestore(
|
||||||
oneHourTimeout, DefaultKibishiiData, "")).To(Succeed(), "Fail to verify workload after restore")
|
*veleroCfg.ClientToInstallVelero,
|
||||||
|
upgradeNamespace,
|
||||||
|
oneHourTimeout,
|
||||||
|
DefaultKibishiiData,
|
||||||
|
"",
|
||||||
|
veleroCfg.WorkerOS,
|
||||||
|
)).To(Succeed(), "Fail to verify workload after restore")
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
|
@ -129,6 +129,7 @@ type VeleroConfig struct {
|
||||||
FailFast bool
|
FailFast bool
|
||||||
HasVspherePlugin bool
|
HasVspherePlugin bool
|
||||||
ImageRegistryProxy string
|
ImageRegistryProxy string
|
||||||
|
WorkerOS string
|
||||||
}
|
}
|
||||||
|
|
||||||
type VeleroCfgInPerf struct {
|
type VeleroCfgInPerf struct {
|
||||||
|
|
|
@ -11,6 +11,11 @@ import (
|
||||||
"os/exec"
|
"os/exec"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
WorkerOSLinux string = "linux"
|
||||||
|
WorkerOSWindows string = "windows"
|
||||||
|
)
|
||||||
|
|
||||||
type OsCommandLine struct {
|
type OsCommandLine struct {
|
||||||
Cmd string
|
Cmd string
|
||||||
Args []string
|
Args []string
|
||||||
|
|
|
@ -322,21 +322,57 @@ func WriteRandomDataToFileInPod(ctx context.Context, namespace, podName, contain
|
||||||
return cmd.Run()
|
return cmd.Run()
|
||||||
}
|
}
|
||||||
|
|
||||||
func CreateFileToPod(ctx context.Context, namespace, podName, containerName, volume, filename, content string) error {
|
func CreateFileToPod(
|
||||||
|
ctx context.Context,
|
||||||
|
namespace string,
|
||||||
|
podName string,
|
||||||
|
containerName string,
|
||||||
|
volume string,
|
||||||
|
filename string,
|
||||||
|
content string,
|
||||||
|
workerOS string,
|
||||||
|
) error {
|
||||||
|
filePath := fmt.Sprintf("/%s/%s", volume, filename)
|
||||||
|
shell := "/bin/sh"
|
||||||
|
shellParameter := "-c"
|
||||||
|
|
||||||
|
if workerOS == common.WorkerOSWindows {
|
||||||
|
filePath = fmt.Sprintf("C:\\%s\\%s", volume, filename)
|
||||||
|
shell = "cmd"
|
||||||
|
shellParameter = "/c"
|
||||||
|
}
|
||||||
|
|
||||||
arg := []string{"exec", "-n", namespace, "-c", containerName, podName,
|
arg := []string{"exec", "-n", namespace, "-c", containerName, podName,
|
||||||
"--", "/bin/sh", "-c", fmt.Sprintf("echo ns-%s pod-%s volume-%s > /%s/%s", namespace, podName, volume, volume, filename)}
|
"--", shell, shellParameter, fmt.Sprintf("echo ns-%s pod-%s volume-%s > %s", namespace, podName, volume, filePath)}
|
||||||
|
|
||||||
cmd := exec.CommandContext(ctx, "kubectl", arg...)
|
cmd := exec.CommandContext(ctx, "kubectl", arg...)
|
||||||
fmt.Printf("Kubectl exec cmd =%v\n", cmd)
|
fmt.Printf("Kubectl exec cmd =%v\n", cmd)
|
||||||
return cmd.Run()
|
return cmd.Run()
|
||||||
}
|
}
|
||||||
|
|
||||||
func FileExistInPV(ctx context.Context, namespace, podName, containerName, volume, filename string) (bool, error) {
|
func FileExistInPV(
|
||||||
stdout, stderr, err := ReadFileFromPodVolume(ctx, namespace, podName, containerName, volume, filename)
|
ctx context.Context,
|
||||||
|
namespace string,
|
||||||
|
podName string,
|
||||||
|
containerName string,
|
||||||
|
volume string,
|
||||||
|
filename string,
|
||||||
|
workerOS string,
|
||||||
|
) (bool, error) {
|
||||||
|
stdout, stderr, err := ReadFileFromPodVolume(ctx, namespace, podName, containerName, volume, filename, workerOS)
|
||||||
|
|
||||||
output := fmt.Sprintf("%s:%s", stdout, stderr)
|
output := fmt.Sprintf("%s:%s", stdout, stderr)
|
||||||
|
|
||||||
|
if workerOS == common.WorkerOSWindows {
|
||||||
|
if strings.Contains(output, "The system cannot find the file specified") {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if strings.Contains(output, fmt.Sprintf("/%s/%s: No such file or directory", volume, filename)) {
|
if strings.Contains(output, fmt.Sprintf("/%s/%s: No such file or directory", volume, filename)) {
|
||||||
return false, nil
|
return false, nil
|
||||||
} else {
|
}
|
||||||
|
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return true, nil
|
return true, nil
|
||||||
} else {
|
} else {
|
||||||
|
@ -344,10 +380,22 @@ func FileExistInPV(ctx context.Context, namespace, podName, containerName, volum
|
||||||
filename, volume, podName, namespace))
|
filename, volume, podName, namespace))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
func ReadFileFromPodVolume(
|
||||||
func ReadFileFromPodVolume(ctx context.Context, namespace, podName, containerName, volume, filename string) (string, string, error) {
|
ctx context.Context,
|
||||||
|
namespace string,
|
||||||
|
podName string,
|
||||||
|
containerName string,
|
||||||
|
volume string,
|
||||||
|
filename string,
|
||||||
|
workerOS string,
|
||||||
|
) (string, string, error) {
|
||||||
arg := []string{"exec", "-n", namespace, "-c", containerName, podName,
|
arg := []string{"exec", "-n", namespace, "-c", containerName, podName,
|
||||||
"--", "cat", fmt.Sprintf("/%s/%s", volume, filename)}
|
"--", "cat", fmt.Sprintf("/%s/%s", volume, filename)}
|
||||||
|
if workerOS == common.WorkerOSWindows {
|
||||||
|
arg = []string{"exec", "-n", namespace, "-c", containerName, podName,
|
||||||
|
"--", "cmd", "/c", fmt.Sprintf("type C:\\%s\\%s", volume, filename)}
|
||||||
|
}
|
||||||
|
|
||||||
cmd := exec.CommandContext(ctx, "kubectl", arg...)
|
cmd := exec.CommandContext(ctx, "kubectl", arg...)
|
||||||
fmt.Printf("Kubectl exec cmd =%v\n", cmd)
|
fmt.Printf("Kubectl exec cmd =%v\n", cmd)
|
||||||
stdout, stderr, err := veleroexec.RunCommand(cmd)
|
stdout, stderr, err := veleroexec.RunCommand(cmd)
|
||||||
|
|
|
@ -36,6 +36,7 @@ import (
|
||||||
|
|
||||||
veleroexec "github.com/vmware-tanzu/velero/pkg/util/exec"
|
veleroexec "github.com/vmware-tanzu/velero/pkg/util/exec"
|
||||||
. "github.com/vmware-tanzu/velero/test"
|
. "github.com/vmware-tanzu/velero/test"
|
||||||
|
"github.com/vmware-tanzu/velero/test/util/common"
|
||||||
. "github.com/vmware-tanzu/velero/test/util/k8s"
|
. "github.com/vmware-tanzu/velero/test/util/k8s"
|
||||||
. "github.com/vmware-tanzu/velero/test/util/providers"
|
. "github.com/vmware-tanzu/velero/test/util/providers"
|
||||||
. "github.com/vmware-tanzu/velero/test/util/velero"
|
. "github.com/vmware-tanzu/velero/test/util/velero"
|
||||||
|
@ -117,6 +118,7 @@ func RunKibishiiTests(
|
||||||
kibishiiDirectory,
|
kibishiiDirectory,
|
||||||
DefaultKibishiiData,
|
DefaultKibishiiData,
|
||||||
veleroCfg.ImageRegistryProxy,
|
veleroCfg.ImageRegistryProxy,
|
||||||
|
veleroCfg.WorkerOS,
|
||||||
); err != nil {
|
); err != nil {
|
||||||
return errors.Wrapf(err, "Failed to install and prepare data for kibishii %s", kibishiiNamespace)
|
return errors.Wrapf(err, "Failed to install and prepare data for kibishii %s", kibishiiNamespace)
|
||||||
}
|
}
|
||||||
|
@ -206,14 +208,22 @@ func RunKibishiiTests(
|
||||||
// Modify PV data right after backup. If PV's reclaim policy is retain, PV will be restored with the origin resource config
|
// Modify PV data right after backup. If PV's reclaim policy is retain, PV will be restored with the origin resource config
|
||||||
fileName := "file-" + kibishiiNamespace
|
fileName := "file-" + kibishiiNamespace
|
||||||
fileBaseContent := fileName
|
fileBaseContent := fileName
|
||||||
fmt.Printf("Re-poulate volume %s\n", time.Now().Format("2006-01-02 15:04:05"))
|
fmt.Printf("Re-populate volume %s\n", time.Now().Format("2006-01-02 15:04:05"))
|
||||||
for _, pod := range KibishiiPodNameList {
|
for _, pod := range KibishiiPodNameList {
|
||||||
// To ensure Kibishii verification result is accurate
|
// To ensure Kibishii verification result is accurate
|
||||||
ClearKibishiiData(oneHourTimeout, kibishiiNamespace, pod, "kibishii", "data")
|
ClearKibishiiData(oneHourTimeout, kibishiiNamespace, pod, "kibishii", "data")
|
||||||
|
|
||||||
CreateFileContent := fileBaseContent + pod
|
CreateFileContent := fileBaseContent + pod
|
||||||
err := CreateFileToPod(oneHourTimeout, kibishiiNamespace, pod, "kibishii", "data",
|
err := CreateFileToPod(
|
||||||
fileName, CreateFileContent)
|
oneHourTimeout,
|
||||||
|
kibishiiNamespace,
|
||||||
|
pod,
|
||||||
|
"kibishii",
|
||||||
|
"data",
|
||||||
|
fileName,
|
||||||
|
CreateFileContent,
|
||||||
|
veleroCfg.WorkerOS,
|
||||||
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrapf(err, "failed to create file %s", fileName)
|
return errors.Wrapf(err, "failed to create file %s", fileName)
|
||||||
}
|
}
|
||||||
|
@ -269,7 +279,7 @@ func RunKibishiiTests(
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Printf("KibishiiVerifyAfterRestore %s\n", time.Now().Format("2006-01-02 15:04:05"))
|
fmt.Printf("KibishiiVerifyAfterRestore %s\n", time.Now().Format("2006-01-02 15:04:05"))
|
||||||
if err := KibishiiVerifyAfterRestore(client, kibishiiNamespace, oneHourTimeout, DefaultKibishiiData, fileName); err != nil {
|
if err := KibishiiVerifyAfterRestore(client, kibishiiNamespace, oneHourTimeout, DefaultKibishiiData, fileName, veleroCfg.WorkerOS); err != nil {
|
||||||
return errors.Wrapf(err, "Error verifying kibishii after restore")
|
return errors.Wrapf(err, "Error verifying kibishii after restore")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -279,12 +289,13 @@ func RunKibishiiTests(
|
||||||
|
|
||||||
func installKibishii(
|
func installKibishii(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
namespace string,
|
namespace,
|
||||||
cloudPlatform,
|
cloudPlatform,
|
||||||
veleroFeatures,
|
veleroFeatures,
|
||||||
kibishiiDirectory string,
|
kibishiiDirectory string,
|
||||||
workerReplicas int,
|
workerReplicas int,
|
||||||
imageRegistryProxy string,
|
imageRegistryProxy string,
|
||||||
|
workerOS string,
|
||||||
) error {
|
) error {
|
||||||
if strings.EqualFold(cloudPlatform, Azure) &&
|
if strings.EqualFold(cloudPlatform, Azure) &&
|
||||||
strings.EqualFold(veleroFeatures, FeatureCSI) {
|
strings.EqualFold(veleroFeatures, FeatureCSI) {
|
||||||
|
@ -295,15 +306,28 @@ func installKibishii(
|
||||||
cloudPlatform = AwsCSI
|
cloudPlatform = AwsCSI
|
||||||
}
|
}
|
||||||
|
|
||||||
|
targetKustomizeDir := path.Join(kibishiiDirectory, cloudPlatform)
|
||||||
|
|
||||||
if strings.EqualFold(cloudPlatform, Vsphere) {
|
if strings.EqualFold(cloudPlatform, Vsphere) {
|
||||||
if strings.HasPrefix(kibishiiDirectory, "https://") {
|
if strings.HasPrefix(kibishiiDirectory, "https://") {
|
||||||
return errors.New("vSphere needs to download the Kibishii repository first because it needs to inject some image patch file to work.")
|
return errors.New("vSphere needs to download the Kibishii repository first because it needs to inject some image patch file to work.")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO: blackpiglet debug
|
||||||
|
fmt.Printf("targetKustomizeDir %s, workerOS: %s, WorkerOSWindows: %s.\n", targetKustomizeDir, workerOS, common.WorkerOSWindows)
|
||||||
|
|
||||||
|
if workerOS == common.WorkerOSWindows {
|
||||||
|
targetKustomizeDir += "-windows"
|
||||||
|
|
||||||
|
// TODO: blackpiglet debug
|
||||||
|
fmt.Printf("targetKustomizeDir for windows %s\n", targetKustomizeDir)
|
||||||
|
}
|
||||||
|
fmt.Printf("The installed Kibishii Kustomize package directory is %s.\n", targetKustomizeDir)
|
||||||
|
|
||||||
kibishiiImage := readBaseKibishiiImage(path.Join(kibishiiDirectory, "base", "kibishii.yaml"))
|
kibishiiImage := readBaseKibishiiImage(path.Join(kibishiiDirectory, "base", "kibishii.yaml"))
|
||||||
if err := generateKibishiiImagePatch(
|
if err := generateKibishiiImagePatch(
|
||||||
path.Join(imageRegistryProxy, kibishiiImage),
|
path.Join(imageRegistryProxy, kibishiiImage),
|
||||||
path.Join(kibishiiDirectory, cloudPlatform, "worker-image-patch.yaml"),
|
path.Join(targetKustomizeDir, "worker-image-patch.yaml"),
|
||||||
); err != nil {
|
); err != nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -311,22 +335,39 @@ func installKibishii(
|
||||||
jumpPadImage := readBaseJumpPadImage(path.Join(kibishiiDirectory, "base", "jump-pad.yaml"))
|
jumpPadImage := readBaseJumpPadImage(path.Join(kibishiiDirectory, "base", "jump-pad.yaml"))
|
||||||
if err := generateJumpPadPatch(
|
if err := generateJumpPadPatch(
|
||||||
path.Join(imageRegistryProxy, jumpPadImage),
|
path.Join(imageRegistryProxy, jumpPadImage),
|
||||||
path.Join(kibishiiDirectory, cloudPlatform, "jump-pad-image-patch.yaml"),
|
path.Join(targetKustomizeDir, "jump-pad-image-patch.yaml"),
|
||||||
); err != nil {
|
); err != nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// We use kustomize to generate YAML for Kibishii from the checked-in yaml directories
|
// We use kustomize to generate YAML for Kibishii from the checked-in yaml directories
|
||||||
|
|
||||||
kibishiiInstallCmd := exec.CommandContext(ctx, "kubectl", "apply", "-n", namespace, "-k",
|
kibishiiInstallCmd := exec.CommandContext(ctx, "kubectl", "apply", "-n", namespace, "-k",
|
||||||
path.Join(kibishiiDirectory, cloudPlatform), "--timeout=90s")
|
targetKustomizeDir, "--timeout=90s")
|
||||||
_, stderr, err := veleroexec.RunCommand(kibishiiInstallCmd)
|
_, stderr, err := veleroexec.RunCommand(kibishiiInstallCmd)
|
||||||
fmt.Printf("Install Kibishii cmd: %s\n", kibishiiInstallCmd)
|
fmt.Printf("Install Kibishii cmd: %s\n", kibishiiInstallCmd)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrapf(err, "failed to install kibishii, stderr=%s", stderr)
|
return errors.Wrapf(err, "failed to install kibishii, stderr=%s", stderr)
|
||||||
}
|
}
|
||||||
|
|
||||||
labelNamespaceCmd := exec.CommandContext(ctx, "kubectl", "label", "namespace", namespace, "pod-security.kubernetes.io/enforce=baseline", "pod-security.kubernetes.io/enforce-version=latest", "--overwrite=true")
|
psa_enforce_policy := "baseline"
|
||||||
|
if workerOS == common.WorkerOSWindows {
|
||||||
|
// Windows container volume mount root directory's permission only allow privileged user write.
|
||||||
|
// https://github.com/kubernetes/kubernetes/issues/131341
|
||||||
|
psa_enforce_policy = "privileged"
|
||||||
|
}
|
||||||
|
|
||||||
|
labelNamespaceCmd := exec.CommandContext(
|
||||||
|
ctx,
|
||||||
|
"kubectl",
|
||||||
|
"label",
|
||||||
|
"namespace",
|
||||||
|
namespace,
|
||||||
|
fmt.Sprintf("pod-security.kubernetes.io/enforce=%s", psa_enforce_policy),
|
||||||
|
"pod-security.kubernetes.io/enforce-version=latest",
|
||||||
|
"--overwrite=true",
|
||||||
|
)
|
||||||
_, stderr, err = veleroexec.RunCommand(labelNamespaceCmd)
|
_, stderr, err = veleroexec.RunCommand(labelNamespaceCmd)
|
||||||
fmt.Printf("Label namespace with PSA policy: %s\n", labelNamespaceCmd)
|
fmt.Printf("Label namespace with PSA policy: %s\n", labelNamespaceCmd)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -558,7 +599,7 @@ func waitForKibishiiPods(ctx context.Context, client TestClient, kibishiiNamespa
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
func KibishiiGenerateData(oneHourTimeout context.Context, kibishiiNamespace string, kibishiiData *KibishiiData) error {
|
func kibishiiGenerateData(oneHourTimeout context.Context, kibishiiNamespace string, kibishiiData *KibishiiData) error {
|
||||||
fmt.Printf("generateData %s\n", time.Now().Format("2006-01-02 15:04:05"))
|
fmt.Printf("generateData %s\n", time.Now().Format("2006-01-02 15:04:05"))
|
||||||
if err := generateData(oneHourTimeout, kibishiiNamespace, kibishiiData); err != nil {
|
if err := generateData(oneHourTimeout, kibishiiNamespace, kibishiiData); err != nil {
|
||||||
return errors.Wrap(err, "Failed to generate data")
|
return errors.Wrap(err, "Failed to generate data")
|
||||||
|
@ -577,6 +618,7 @@ func KibishiiPrepareBeforeBackup(
|
||||||
kibishiiDirectory string,
|
kibishiiDirectory string,
|
||||||
kibishiiData *KibishiiData,
|
kibishiiData *KibishiiData,
|
||||||
imageRegistryProxy string,
|
imageRegistryProxy string,
|
||||||
|
workerOS string,
|
||||||
) error {
|
) error {
|
||||||
fmt.Printf("installKibishii %s\n", time.Now().Format("2006-01-02 15:04:05"))
|
fmt.Printf("installKibishii %s\n", time.Now().Format("2006-01-02 15:04:05"))
|
||||||
serviceAccountName := "default"
|
serviceAccountName := "default"
|
||||||
|
@ -599,6 +641,7 @@ func KibishiiPrepareBeforeBackup(
|
||||||
kibishiiDirectory,
|
kibishiiDirectory,
|
||||||
kibishiiData.ExpectedNodes,
|
kibishiiData.ExpectedNodes,
|
||||||
imageRegistryProxy,
|
imageRegistryProxy,
|
||||||
|
workerOS,
|
||||||
); err != nil {
|
); err != nil {
|
||||||
return errors.Wrap(err, "Failed to install Kibishii workload")
|
return errors.Wrap(err, "Failed to install Kibishii workload")
|
||||||
}
|
}
|
||||||
|
@ -611,12 +654,18 @@ func KibishiiPrepareBeforeBackup(
|
||||||
if kibishiiData == nil {
|
if kibishiiData == nil {
|
||||||
kibishiiData = DefaultKibishiiData
|
kibishiiData = DefaultKibishiiData
|
||||||
}
|
}
|
||||||
KibishiiGenerateData(oneHourTimeout, kibishiiNamespace, kibishiiData)
|
kibishiiGenerateData(oneHourTimeout, kibishiiNamespace, kibishiiData)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func KibishiiVerifyAfterRestore(client TestClient, kibishiiNamespace string, oneHourTimeout context.Context,
|
func KibishiiVerifyAfterRestore(
|
||||||
kibishiiData *KibishiiData, incrementalFileName string) error {
|
client TestClient,
|
||||||
|
kibishiiNamespace string,
|
||||||
|
oneHourTimeout context.Context,
|
||||||
|
kibishiiData *KibishiiData,
|
||||||
|
incrementalFileName string,
|
||||||
|
workerOS string,
|
||||||
|
) error {
|
||||||
if kibishiiData == nil {
|
if kibishiiData == nil {
|
||||||
kibishiiData = DefaultKibishiiData
|
kibishiiData = DefaultKibishiiData
|
||||||
}
|
}
|
||||||
|
@ -628,7 +677,7 @@ func KibishiiVerifyAfterRestore(client TestClient, kibishiiNamespace string, one
|
||||||
}
|
}
|
||||||
if incrementalFileName != "" {
|
if incrementalFileName != "" {
|
||||||
for _, pod := range KibishiiPodNameList {
|
for _, pod := range KibishiiPodNameList {
|
||||||
exist, err := FileExistInPV(oneHourTimeout, kibishiiNamespace, pod, "kibishii", "data", incrementalFileName)
|
exist, err := FileExistInPV(oneHourTimeout, kibishiiNamespace, pod, "kibishii", "data", incrementalFileName, workerOS)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrapf(err, "fail to get file %s", incrementalFileName)
|
return errors.Wrapf(err, "fail to get file %s", incrementalFileName)
|
||||||
}
|
}
|
||||||
|
|
|
@ -28,6 +28,7 @@ import (
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"golang.org/x/exp/slices"
|
"golang.org/x/exp/slices"
|
||||||
|
"golang.org/x/mod/semver"
|
||||||
appsv1api "k8s.io/api/apps/v1"
|
appsv1api "k8s.io/api/apps/v1"
|
||||||
corev1api "k8s.io/api/core/v1"
|
corev1api "k8s.io/api/core/v1"
|
||||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
|
@ -40,6 +41,7 @@ import (
|
||||||
"github.com/vmware-tanzu/velero/pkg/cmd/cli/install"
|
"github.com/vmware-tanzu/velero/pkg/cmd/cli/install"
|
||||||
velerexec "github.com/vmware-tanzu/velero/pkg/util/exec"
|
velerexec "github.com/vmware-tanzu/velero/pkg/util/exec"
|
||||||
"github.com/vmware-tanzu/velero/test"
|
"github.com/vmware-tanzu/velero/test"
|
||||||
|
common "github.com/vmware-tanzu/velero/test/util/common"
|
||||||
eksutil "github.com/vmware-tanzu/velero/test/util/eks"
|
eksutil "github.com/vmware-tanzu/velero/test/util/eks"
|
||||||
"github.com/vmware-tanzu/velero/test/util/k8s"
|
"github.com/vmware-tanzu/velero/test/util/k8s"
|
||||||
)
|
)
|
||||||
|
@ -51,6 +53,7 @@ type installOptions struct {
|
||||||
RestoreHelperImage string
|
RestoreHelperImage string
|
||||||
VeleroServerDebugMode bool
|
VeleroServerDebugMode bool
|
||||||
WithoutDisableInformerCacheParam bool
|
WithoutDisableInformerCacheParam bool
|
||||||
|
WorkerOS string
|
||||||
}
|
}
|
||||||
|
|
||||||
func VeleroInstall(ctx context.Context, veleroCfg *test.VeleroConfig, isStandbyCluster bool) error {
|
func VeleroInstall(ctx context.Context, veleroCfg *test.VeleroConfig, isStandbyCluster bool) error {
|
||||||
|
@ -174,13 +177,14 @@ func VeleroInstall(ctx context.Context, veleroCfg *test.VeleroConfig, isStandbyC
|
||||||
if err := installVeleroServer(
|
if err := installVeleroServer(
|
||||||
ctx,
|
ctx,
|
||||||
veleroCfg.VeleroCLI,
|
veleroCfg.VeleroCLI,
|
||||||
veleroCfg.CloudProvider,
|
veleroCfg.VeleroVersion,
|
||||||
&installOptions{
|
&installOptions{
|
||||||
Options: veleroInstallOptions,
|
Options: veleroInstallOptions,
|
||||||
RegistryCredentialFile: veleroCfg.RegistryCredentialFile,
|
RegistryCredentialFile: veleroCfg.RegistryCredentialFile,
|
||||||
RestoreHelperImage: veleroCfg.RestoreHelperImage,
|
RestoreHelperImage: veleroCfg.RestoreHelperImage,
|
||||||
VeleroServerDebugMode: veleroCfg.VeleroServerDebugMode,
|
VeleroServerDebugMode: veleroCfg.VeleroServerDebugMode,
|
||||||
WithoutDisableInformerCacheParam: veleroCfg.WithoutDisableInformerCacheParam,
|
WithoutDisableInformerCacheParam: veleroCfg.WithoutDisableInformerCacheParam,
|
||||||
|
WorkerOS: veleroCfg.WorkerOS,
|
||||||
},
|
},
|
||||||
); err != nil {
|
); err != nil {
|
||||||
time.Sleep(1 * time.Minute)
|
time.Sleep(1 * time.Minute)
|
||||||
|
@ -282,7 +286,12 @@ func cleanVSpherePluginConfig(c clientset.Interface, ns, secretName, configMapNa
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func installVeleroServer(ctx context.Context, cli, cloudProvider string, options *installOptions) error {
|
func installVeleroServer(
|
||||||
|
ctx context.Context,
|
||||||
|
cli string,
|
||||||
|
version string,
|
||||||
|
options *installOptions,
|
||||||
|
) error {
|
||||||
args := []string{"install"}
|
args := []string{"install"}
|
||||||
namespace := "velero"
|
namespace := "velero"
|
||||||
if len(options.Namespace) > 0 {
|
if len(options.Namespace) > 0 {
|
||||||
|
@ -295,6 +304,16 @@ func installVeleroServer(ctx context.Context, cli, cloudProvider string, options
|
||||||
if options.UseNodeAgent {
|
if options.UseNodeAgent {
|
||||||
args = append(args, "--use-node-agent")
|
args = append(args, "--use-node-agent")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO: need to consider align options.UseNodeAgentWindows usage
|
||||||
|
// with options.UseNodeAgent
|
||||||
|
// Only version after v1.16.0 support windows node agent.
|
||||||
|
if options.WorkerOS == common.WorkerOSWindows &&
|
||||||
|
(semver.Compare(version, "v1.16") >= 0 || version == "main") {
|
||||||
|
fmt.Println("Install node-agent-windows. The Velero version is ", version)
|
||||||
|
args = append(args, "--use-node-agent-windows")
|
||||||
|
}
|
||||||
|
|
||||||
if options.DefaultVolumesToFsBackup {
|
if options.DefaultVolumesToFsBackup {
|
||||||
args = append(args, "--default-volumes-to-fs-backup")
|
args = append(args, "--default-volumes-to-fs-backup")
|
||||||
}
|
}
|
||||||
|
@ -391,7 +410,10 @@ func installVeleroServer(ctx context.Context, cli, cloudProvider string, options
|
||||||
args = append(args, fmt.Sprintf("--item-block-worker-count=%d", options.ItemBlockWorkerCount))
|
args = append(args, fmt.Sprintf("--item-block-worker-count=%d", options.ItemBlockWorkerCount))
|
||||||
}
|
}
|
||||||
|
|
||||||
if options.BackupRepoConfigMap != "" {
|
// Only version no older than v1.15 support --backup-repository-configmap.
|
||||||
|
if options.BackupRepoConfigMap != "" &&
|
||||||
|
(semver.Compare(version, "v1.15") >= 0 || version == "main") {
|
||||||
|
fmt.Println("Associate backup repository ConfigMap. The Velero version is ", version)
|
||||||
args = append(args, fmt.Sprintf("--backup-repository-configmap=%s", options.BackupRepoConfigMap))
|
args = append(args, fmt.Sprintf("--backup-repository-configmap=%s", options.BackupRepoConfigMap))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -399,7 +421,7 @@ func installVeleroServer(ctx context.Context, cli, cloudProvider string, options
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
return waitVeleroReady(ctx, namespace, options.UseNodeAgent)
|
return waitVeleroReady(ctx, namespace, options.UseNodeAgent, options.UseNodeAgentWindows)
|
||||||
}
|
}
|
||||||
|
|
||||||
func createVeleroResources(ctx context.Context, cli, namespace string, args []string, options *installOptions) error {
|
func createVeleroResources(ctx context.Context, cli, namespace string, args []string, options *installOptions) error {
|
||||||
|
@ -617,7 +639,7 @@ func toUnstructured(res any) (unstructured.Unstructured, error) {
|
||||||
return un, err
|
return un, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func waitVeleroReady(ctx context.Context, namespace string, useNodeAgent bool) error {
|
func waitVeleroReady(ctx context.Context, namespace string, useNodeAgent bool, useNodeAgentWindows bool) error {
|
||||||
fmt.Println("Waiting for Velero deployment to be ready.")
|
fmt.Println("Waiting for Velero deployment to be ready.")
|
||||||
// when doing upgrade by the "kubectl apply" the command "kubectl wait --for=condition=available deployment/velero -n velero --timeout=600s" returns directly
|
// when doing upgrade by the "kubectl apply" the command "kubectl wait --for=condition=available deployment/velero -n velero --timeout=600s" returns directly
|
||||||
// use "rollout status" instead to avoid this. For more detail information, refer to https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#complete-deployment
|
// use "rollout status" instead to avoid this. For more detail information, refer to https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#complete-deployment
|
||||||
|
@ -649,6 +671,28 @@ func waitVeleroReady(ctx context.Context, namespace string, useNodeAgent bool) e
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if useNodeAgentWindows {
|
||||||
|
fmt.Println("Waiting for node-agent-windows DaemonSet to be ready.")
|
||||||
|
err := wait.PollUntilContextTimeout(context.Background(), 5*time.Second, 1*time.Minute, true, func(ctx context.Context) (bool, error) {
|
||||||
|
stdout, stderr, err := velerexec.RunCommand(exec.CommandContext(ctx, "kubectl", "get", "DaemonSet/node-agent-windows",
|
||||||
|
"-o", "json", "-n", namespace))
|
||||||
|
if err != nil {
|
||||||
|
return false, errors.Wrapf(err, "failed to get the node-agent-windows DaemonSet, stdout=%s, stderr=%s", stdout, stderr)
|
||||||
|
}
|
||||||
|
ds := &appsv1api.DaemonSet{}
|
||||||
|
if err = json.Unmarshal([]byte(stdout), ds); err != nil {
|
||||||
|
return false, errors.Wrapf(err, "failed to unmarshal the node-agent-windows DaemonSet")
|
||||||
|
}
|
||||||
|
if ds.Status.DesiredNumberScheduled == ds.Status.NumberAvailable {
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
return false, nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "fail to wait for the node-agent-windows ready")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
fmt.Printf("Velero is installed and ready to be tested in the %s namespace! ⛵ \n", namespace)
|
fmt.Printf("Velero is installed and ready to be tested in the %s namespace! ⛵ \n", namespace)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -1393,7 +1393,7 @@ func VeleroUpgrade(ctx context.Context, veleroCfg VeleroConfig) error {
|
||||||
return errors.Wrap(err, "Fail to update node agent")
|
return errors.Wrap(err, "Fail to update node agent")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return waitVeleroReady(ctx, veleroCfg.VeleroNamespace, veleroCfg.UseNodeAgent)
|
return waitVeleroReady(ctx, veleroCfg.VeleroNamespace, veleroCfg.UseNodeAgent, veleroCfg.UseNodeAgentWindows)
|
||||||
}
|
}
|
||||||
|
|
||||||
func ApplyCRDs(ctx context.Context, veleroCLI string) ([]string, error) {
|
func ApplyCRDs(ctx context.Context, veleroCLI string) ([]string, error) {
|
||||||
|
|
Loading…
Reference in New Issue