Compare commits
18 Commits
main
...
v1.14.0-rc
Author | SHA1 | Date |
---|---|---|
|
2fc6300f22 | |
|
200f16e539 | |
|
0d3657240a | |
|
08fea6e994 | |
|
d20bd165a9 | |
|
bf778c7d21 | |
|
a65005996a | |
|
f61c8b9042 | |
|
21366795d1 | |
|
f6367ca396 | |
|
ce16acb12e | |
|
54d5dabdda | |
|
952f713b3b | |
|
1f1ccab948 | |
|
9164bc95a4 | |
|
74966d0e2c | |
|
7d22548d7a | |
|
892fa79051 |
|
@ -14,7 +14,7 @@ jobs:
|
|||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '1.22'
|
||||
go-version: '1.22.2'
|
||||
id: go
|
||||
# Look for a CLI that's made for this PR
|
||||
- name: Fetch built CLI
|
||||
|
|
|
@ -14,7 +14,7 @@ jobs:
|
|||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '1.22'
|
||||
go-version: '1.22.2'
|
||||
id: go
|
||||
# Look for a CLI that's made for this PR
|
||||
- name: Fetch built CLI
|
||||
|
@ -82,7 +82,7 @@ jobs:
|
|||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '1.22'
|
||||
go-version: '1.22.2'
|
||||
id: go
|
||||
- name: Check out the code
|
||||
uses: actions/checkout@v4
|
||||
|
|
|
@ -10,7 +10,7 @@ jobs:
|
|||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '1.22'
|
||||
go-version: '1.22.2'
|
||||
id: go
|
||||
- name: Check out the code
|
||||
uses: actions/checkout@v4
|
||||
|
|
|
@ -15,7 +15,7 @@ jobs:
|
|||
with:
|
||||
# ignore the config/.../crd.go file as it's generated binary data that is edited elswhere.
|
||||
skip: .git,*.png,*.jpg,*.woff,*.ttf,*.gif,*.ico,./config/crd/v1beta1/crds/crds.go,./config/crd/v1/crds/crds.go,./config/crd/v2alpha1/crds/crds.go,./go.sum,./LICENSE
|
||||
ignore_words_list: iam,aks,ist,bridget,ue,shouldnot,atleast
|
||||
ignore_words_list: iam,aks,ist,bridget,ue,shouldnot,atleast,notin,sme
|
||||
check_filenames: true
|
||||
check_hidden: true
|
||||
|
||||
|
|
|
@ -18,7 +18,7 @@ jobs:
|
|||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '1.22'
|
||||
go-version: '1.22.2'
|
||||
id: go
|
||||
|
||||
- uses: actions/checkout@v4
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
We as members, contributors, and leaders pledge to make participation in the Velero project and our
|
||||
community a harassment-free experience for everyone, regardless of age, body
|
||||
size, visible or invisible disability, ethnicity, sex characteristics, gender
|
||||
identity and expression, level of experience, education, socio-economic status,
|
||||
identity and expression, level of experience, education, socioeconomic status,
|
||||
nationality, personal appearance, race, religion, or sexual identity
|
||||
and orientation.
|
||||
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
# limitations under the License.
|
||||
|
||||
# Velero binary build section
|
||||
FROM --platform=$BUILDPLATFORM golang:1.22-bookworm as velero-builder
|
||||
FROM --platform=$BUILDPLATFORM golang:1.22.2-bookworm as velero-builder
|
||||
|
||||
ARG GOPROXY
|
||||
ARG BIN
|
||||
|
@ -47,7 +47,7 @@ RUN mkdir -p /output/usr/bin && \
|
|||
go clean -modcache -cache
|
||||
|
||||
# Restic binary build section
|
||||
FROM --platform=$BUILDPLATFORM golang:1.22-bookworm as restic-builder
|
||||
FROM --platform=$BUILDPLATFORM golang:1.22.2-bookworm as restic-builder
|
||||
|
||||
ARG BIN
|
||||
ARG TARGETOS
|
||||
|
@ -70,7 +70,7 @@ RUN mkdir -p /output/usr/bin && \
|
|||
go clean -modcache -cache
|
||||
|
||||
# Velero image packing section
|
||||
FROM paketobuildpacks/run-jammy-tiny:latest
|
||||
FROM paketobuildpacks/run-jammy-tiny:0.2.38
|
||||
|
||||
LABEL maintainer="Xun Jiang <jxun@vmware.com>"
|
||||
|
||||
|
|
2
Tiltfile
2
Tiltfile
|
@ -52,7 +52,7 @@ git_sha = str(local("git rev-parse HEAD", quiet = True, echo_off = True)).strip(
|
|||
|
||||
tilt_helper_dockerfile_header = """
|
||||
# Tilt image
|
||||
FROM golang:1.22 as tilt-helper
|
||||
FROM golang:1.22.2 as tilt-helper
|
||||
|
||||
# Support live reloading with Tilt
|
||||
RUN wget --output-document /restart.sh --quiet https://raw.githubusercontent.com/windmilleng/rerun-process-wrapper/master/restart.sh && \
|
||||
|
|
|
@ -14,7 +14,7 @@ https://velero.io/docs/v1.14/upgrade-to-1.14/
|
|||
|
||||
### Highlights
|
||||
|
||||
#### The maintenance work for kopia backup repositories is run in jobs
|
||||
#### The maintenance work for kopia/restic backup repositories is run in jobs
|
||||
Since velero started using kopia as the approach for filesystem-level backup/restore, we've noticed an issue when velero connects to the kopia backup repositories and performs maintenance, it sometimes consumes excessive memory that can cause the velero pod to get OOM Killed. To mitigate this issue, the maintenance work will be moved out of velero pod to a separate kubernetes job, and the user will be able to specify the resource request in "velero install".
|
||||
#### Volume Policies are extended to support more actions to handle volumes
|
||||
In an earlier release, a flexible volume policy was introduced to skip certain volumes from a backup. In v1.14 we've made enhancement to this policy to allow the user to set how the volumes should be backed up. The user will be able to set "fs-backup" or "snapshot" as value of “action" in the policy and velero will backup the volumes accordingly. This enhancement allows the user to achieve a fine-grained control like "opt-in/out" without having to update the target workload. For more details please refer to https://velero.io/docs/v1.14/resource-filtering/#supported-volumepolicy-actions
|
||||
|
@ -38,6 +38,7 @@ Besides the service principal with secret(password)-based authentication, Velero
|
|||
* CSI plugin has been merged into velero repo in v1.14 release. It will be installed by default as an internal plugin, and should not be installed via "–plugins " parameter in "velero install" command.
|
||||
* The default resource requests and limitations for node agent are removed in v1.14, to make the node agent pods have the QoS class of "BestEffort", more details please refer to #7391
|
||||
* There's a change in namespace filtering behavior during backup: In v1.14, when the includedNamespaces/excludedNamespaces fields are not set and the labelSelector/OrLabelSelectors are set in the backup spec, the backup will only include the namespaces which contain the resources that match the label selectors, while in previous releases all namespaces will be included in the backup with such settings. More details refer to #7105
|
||||
* Patching the PV in the "Finalizing" state may cause the restore to be in "PartiallyFailed" state when the PV is blocked in "Pending" state, while in the previous release the restore may end up being in "Complete" state. For more details refer to #7866
|
||||
|
||||
### All Changes
|
||||
* Fix backup log to show error string, not index (#7805, @piny940)
|
||||
|
|
|
@ -65,7 +65,7 @@ This page contains a pre-migration checklist for ensuring a repo migration goes
|
|||
|
||||
#### Updating Netlify
|
||||
|
||||
The settings for Netflify should remain the same, except that it now needs to be installed in the new repo. The instructions on how to install Netlify on the new repo are here: https://www.netlify.com/docs/github-permissions/.
|
||||
The settings for Netlify should remain the same, except that it now needs to be installed in the new repo. The instructions on how to install Netlify on the new repo are here: https://www.netlify.com/docs/github-permissions/.
|
||||
|
||||
#### Communication strategy
|
||||
|
||||
|
|
|
@ -27,7 +27,7 @@ Moreover, we would like to create a general workflow to variations during the da
|
|||
- Support different data accesses, i.e., file system level and block level
|
||||
- Support different snapshot types, i.e., CSI snapshot, volume snapshot API from storage vendors
|
||||
- Support different snapshot accesses, i.e., through PV generated from snapshots, and through direct access API from storage vendors
|
||||
- Reuse the existing Velero generic data path as creatd in [Unified Repository design][1]
|
||||
- Reuse the existing Velero generic data path as created in [Unified Repository design][1]
|
||||
|
||||
## Non-Goals
|
||||
|
||||
|
|
2
go.mod
2
go.mod
|
@ -1,6 +1,6 @@
|
|||
module github.com/vmware-tanzu/velero
|
||||
|
||||
go 1.22.0
|
||||
go 1.22.2
|
||||
|
||||
require (
|
||||
cloud.google.com/go/storage v1.40.0
|
||||
|
|
|
@ -12,7 +12,7 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
FROM --platform=$TARGETPLATFORM golang:1.22-bookworm
|
||||
FROM --platform=$TARGETPLATFORM golang:1.22.2-bookworm
|
||||
|
||||
ARG GOPROXY
|
||||
|
||||
|
@ -99,3 +99,6 @@ RUN curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/i
|
|||
RUN curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/$(go env GOARCH)/kubectl
|
||||
RUN chmod +x ./kubectl
|
||||
RUN mv ./kubectl /usr/local/bin
|
||||
|
||||
# Fix the "dubious ownership" issue from git when running goreleaser.sh
|
||||
RUN echo "[safe] \n\t directory = *" > /.gitconfig
|
|
@ -182,7 +182,7 @@ func TestGetResourceMatchedAction(t *testing.T) {
|
|||
expectedAction: &Action{Type: "snapshot"},
|
||||
},
|
||||
{
|
||||
name: "dismatch all policies",
|
||||
name: "mismatch all policies",
|
||||
volume: &structuredVolume{
|
||||
capacity: *resource.NewQuantity(50<<30, resource.BinarySI),
|
||||
storageClass: "ebs-sc",
|
||||
|
@ -394,7 +394,7 @@ volumePolicies:
|
|||
skip: true,
|
||||
},
|
||||
{
|
||||
name: "dismatch volume by types",
|
||||
name: "mismatch volume by types",
|
||||
yamlData: `version: v1
|
||||
volumePolicies:
|
||||
- conditions:
|
||||
|
|
|
@ -165,7 +165,7 @@ func TestNFSConditionMatch(t *testing.T) {
|
|||
expectedMatch: true,
|
||||
},
|
||||
{
|
||||
name: "server dismatch",
|
||||
name: "server mismatch",
|
||||
condition: &nfsCondition{&nFSVolumeSource{Server: "192.168.10.20", Path: ""}},
|
||||
volume: setStructuredVolume(*resource.NewQuantity(0, resource.BinarySI), "", &nFSVolumeSource{Server: ""}, nil),
|
||||
expectedMatch: false,
|
||||
|
|
|
@ -662,17 +662,17 @@ type RestoreVolumeInfoTracker struct {
|
|||
|
||||
// map of PV name to the NativeSnapshotInfo from which the PV is restored
|
||||
pvNativeSnapshotMap map[string]*NativeSnapshotInfo
|
||||
// map of PV name to the CSISnapshot object from which the PV is restored
|
||||
pvCSISnapshotMap map[string]snapshotv1api.VolumeSnapshot
|
||||
datadownloadList *velerov2alpha1.DataDownloadList
|
||||
pvrs []*velerov1api.PodVolumeRestore
|
||||
// map of PVC object to the CSISnapshot object from which the PV is restored
|
||||
// the key is in the form of $pvc-ns/$pvc-name
|
||||
pvcCSISnapshotMap map[string]snapshotv1api.VolumeSnapshot
|
||||
datadownloadList *velerov2alpha1.DataDownloadList
|
||||
pvrs []*velerov1api.PodVolumeRestore
|
||||
}
|
||||
|
||||
// Populate data objects in the tracker, which will be used to generate the RestoreVolumeInfo array in Result()
|
||||
// The input param resourceList should be the final result of the restore.
|
||||
func (t *RestoreVolumeInfoTracker) Populate(ctx context.Context, restoredResourceList map[string][]string) {
|
||||
pvcs := RestoredPVCFromRestoredResourceList(restoredResourceList)
|
||||
|
||||
t.Lock()
|
||||
defer t.Unlock()
|
||||
for item := range pvcs {
|
||||
|
@ -684,25 +684,26 @@ func (t *RestoreVolumeInfoTracker) Populate(ctx context.Context, restoredResourc
|
|||
log.WithError(err).Error("Failed to get PVC")
|
||||
continue
|
||||
}
|
||||
if pvc.Status.Phase != corev1api.ClaimBound || pvc.Spec.VolumeName == "" {
|
||||
log.Info("PVC is not bound or has no volume name")
|
||||
continue
|
||||
}
|
||||
pv := &corev1api.PersistentVolume{}
|
||||
if err := t.client.Get(ctx, kbclient.ObjectKey{Name: pvc.Spec.VolumeName}, pv); err != nil {
|
||||
log.WithError(err).Error("Failed to get PV")
|
||||
} else {
|
||||
t.pvPvc.insert(*pv, pvcName, pvcNS)
|
||||
}
|
||||
// Collect the CSI VolumeSnapshot objects referenced by the restored PVCs,
|
||||
if pvc.Spec.DataSource != nil && pvc.Spec.DataSource.Kind == "VolumeSnapshot" {
|
||||
vs := &snapshotv1api.VolumeSnapshot{}
|
||||
if err := t.client.Get(ctx, kbclient.ObjectKey{Namespace: pvcNS, Name: pvc.Spec.DataSource.Name}, vs); err != nil {
|
||||
log.WithError(err).Error("Failed to get VolumeSnapshot")
|
||||
} else {
|
||||
t.pvCSISnapshotMap[pv.Name] = *vs
|
||||
t.pvcCSISnapshotMap[pvc.Namespace+"/"+pvcName] = *vs
|
||||
}
|
||||
}
|
||||
if pvc.Status.Phase == corev1api.ClaimBound && pvc.Spec.VolumeName != "" {
|
||||
pv := &corev1api.PersistentVolume{}
|
||||
if err := t.client.Get(ctx, kbclient.ObjectKey{Name: pvc.Spec.VolumeName}, pv); err != nil {
|
||||
log.WithError(err).Error("Failed to get PV")
|
||||
} else {
|
||||
t.pvPvc.insert(*pv, pvcName, pvcNS)
|
||||
}
|
||||
} else {
|
||||
log.Warn("PVC is not bound or has no volume name")
|
||||
continue
|
||||
}
|
||||
}
|
||||
if err := t.client.List(ctx, t.datadownloadList, &kbclient.ListOptions{
|
||||
Namespace: t.restore.Namespace,
|
||||
|
@ -761,21 +762,35 @@ func (t *RestoreVolumeInfoTracker) Result() []*RestoreVolumeInfo {
|
|||
}
|
||||
|
||||
// Generate RestoreVolumeInfo for PVs restored from CSISnapshots
|
||||
for pvName, csiSnapshot := range t.pvCSISnapshotMap {
|
||||
for pvc, csiSnapshot := range t.pvcCSISnapshotMap {
|
||||
n := strings.Split(pvc, "/")
|
||||
if len(n) != 2 {
|
||||
t.log.Warnf("Invalid PVC key '%s' in the pvc-CSISnapshot map, skip populating it to volume info", pvc)
|
||||
continue
|
||||
}
|
||||
pvcNS, pvcName := n[0], n[1]
|
||||
var restoreSize int64 = 0
|
||||
if csiSnapshot.Status != nil && csiSnapshot.Status.RestoreSize != nil {
|
||||
restoreSize = csiSnapshot.Status.RestoreSize.Value()
|
||||
}
|
||||
vscName := ""
|
||||
if csiSnapshot.Spec.Source.VolumeSnapshotContentName != nil {
|
||||
vscName = *csiSnapshot.Spec.Source.VolumeSnapshotContentName
|
||||
}
|
||||
volumeInfo := &RestoreVolumeInfo{
|
||||
PVName: pvName,
|
||||
PVCNamespace: pvcNS,
|
||||
PVCName: pvcName,
|
||||
SnapshotDataMoved: false,
|
||||
RestoreMethod: CSISnapshot,
|
||||
CSISnapshotInfo: &CSISnapshotInfo{
|
||||
SnapshotHandle: csiSnapshot.Annotations[VolumeSnapshotHandleAnnotation],
|
||||
Size: csiSnapshot.Status.RestoreSize.Value(),
|
||||
Size: restoreSize,
|
||||
Driver: csiSnapshot.Annotations[CSIDriverNameAnnotation],
|
||||
VSCName: *csiSnapshot.Spec.Source.VolumeSnapshotContentName,
|
||||
VSCName: vscName,
|
||||
},
|
||||
}
|
||||
if pvcPVInfo := t.pvPvc.retrieve(pvName, "", ""); pvcPVInfo != nil {
|
||||
volumeInfo.PVCName = pvcPVInfo.PVCName
|
||||
volumeInfo.PVCNamespace = pvcPVInfo.PVCNamespace
|
||||
if pvcPVInfo := t.pvPvc.retrieve("", pvcName, pvcNS); pvcPVInfo != nil {
|
||||
volumeInfo.PVName = pvcPVInfo.PV.Name
|
||||
}
|
||||
volumeInfos = append(volumeInfos, volumeInfo)
|
||||
}
|
||||
|
@ -829,7 +844,7 @@ func NewRestoreVolInfoTracker(restore *velerov1api.Restore, logger logrus.FieldL
|
|||
data: make(map[string]pvcPvInfo),
|
||||
},
|
||||
pvNativeSnapshotMap: make(map[string]*NativeSnapshotInfo),
|
||||
pvCSISnapshotMap: make(map[string]snapshotv1api.VolumeSnapshot),
|
||||
pvcCSISnapshotMap: make(map[string]snapshotv1api.VolumeSnapshot),
|
||||
datadownloadList: &velerov2alpha1.DataDownloadList{},
|
||||
}
|
||||
}
|
||||
|
|
|
@ -933,7 +933,7 @@ func TestRestoreVolumeInfoResult(t *testing.T) {
|
|||
data: make(map[string]pvcPvInfo),
|
||||
},
|
||||
pvNativeSnapshotMap: map[string]*NativeSnapshotInfo{},
|
||||
pvCSISnapshotMap: map[string]snapshotv1api.VolumeSnapshot{},
|
||||
pvcCSISnapshotMap: map[string]snapshotv1api.VolumeSnapshot{},
|
||||
datadownloadList: &velerov2alpha1.DataDownloadList{},
|
||||
pvrs: []*velerov1api.PodVolumeRestore{},
|
||||
},
|
||||
|
@ -968,8 +968,8 @@ func TestRestoreVolumeInfoResult(t *testing.T) {
|
|||
IOPS: "10000",
|
||||
},
|
||||
},
|
||||
pvCSISnapshotMap: map[string]snapshotv1api.VolumeSnapshot{},
|
||||
datadownloadList: &velerov2alpha1.DataDownloadList{},
|
||||
pvcCSISnapshotMap: map[string]snapshotv1api.VolumeSnapshot{},
|
||||
datadownloadList: &velerov2alpha1.DataDownloadList{},
|
||||
pvrs: []*velerov1api.PodVolumeRestore{
|
||||
builder.ForPodVolumeRestore("velero", "testRestore-1234").
|
||||
PodNamespace("testNS").
|
||||
|
@ -1031,8 +1031,8 @@ func TestRestoreVolumeInfoResult(t *testing.T) {
|
|||
},
|
||||
},
|
||||
pvNativeSnapshotMap: map[string]*NativeSnapshotInfo{},
|
||||
pvCSISnapshotMap: map[string]snapshotv1api.VolumeSnapshot{
|
||||
"testPV": *builder.ForVolumeSnapshot("sourceNS", "testCSISnapshot").
|
||||
pvcCSISnapshotMap: map[string]snapshotv1api.VolumeSnapshot{
|
||||
"testNS/testPVC": *builder.ForVolumeSnapshot("sourceNS", "testCSISnapshot").
|
||||
ObjectMeta(
|
||||
builder.WithAnnotations(VolumeSnapshotHandleAnnotation, "csi-snap-001",
|
||||
CSIDriverNameAnnotation, "test-csi-driver"),
|
||||
|
@ -1101,7 +1101,7 @@ func TestRestoreVolumeInfoResult(t *testing.T) {
|
|||
},
|
||||
},
|
||||
pvNativeSnapshotMap: map[string]*NativeSnapshotInfo{},
|
||||
pvCSISnapshotMap: map[string]snapshotv1api.VolumeSnapshot{},
|
||||
pvcCSISnapshotMap: map[string]snapshotv1api.VolumeSnapshot{},
|
||||
datadownloadList: &velerov2alpha1.DataDownloadList{
|
||||
Items: []velerov2alpha1.DataDownload{
|
||||
*builder.ForDataDownload("velero", "testDataDownload-1").
|
||||
|
|
|
@ -427,7 +427,7 @@ func TestReconcile(t *testing.T) {
|
|||
notCreateFSBR: true,
|
||||
},
|
||||
{
|
||||
name: "Dataupload should not be cancel with dismatch node",
|
||||
name: "Dataupload should not be cancel with mismatch node",
|
||||
pod: builder.ForPod(velerov1api.DefaultNamespace, dataUploadName).Volumes(&corev1.Volume{Name: "dataupload-1"}).Result(),
|
||||
du: func() *velerov2alpha1api.DataUpload {
|
||||
du := dataUploadBuilder().Phase(velerov2alpha1api.DataUploadPhaseInProgress).SnapshotType(fakeSnapshotType).Cancel(true).Result()
|
||||
|
|
|
@ -182,10 +182,6 @@ func (m *manager) PruneRepo(repo *velerov1api.BackupRepository) error {
|
|||
m.repoLocker.LockExclusive(repo.Name)
|
||||
defer m.repoLocker.UnlockExclusive(repo.Name)
|
||||
|
||||
prd, err := m.getRepositoryProvider(repo)
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
param, err := m.assembleRepoParam(repo)
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
|
@ -208,11 +204,7 @@ func (m *manager) PruneRepo(repo *velerov1api.BackupRepository) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
if err := prd.BoostRepoConnect(context.Background(), param); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
log.Info("Start to maintence repo")
|
||||
log.Info("Start to maintenance repo")
|
||||
|
||||
maintenanceJob, err := buildMaintenanceJob(m.maintenanceCfg, param, m.client, m.namespace)
|
||||
if err != nil {
|
||||
|
|
|
@ -581,7 +581,7 @@ func TestGetPodVolumeNameForPVC(t *testing.T) {
|
|||
expectedVolumeName string
|
||||
}{
|
||||
{
|
||||
name: "should get volume name for pod with multuple PVCs",
|
||||
name: "should get volume name for pod with multiple PVCs",
|
||||
pod: v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
|
|
|
@ -19,7 +19,7 @@ package test
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"math/rand/v2"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
|
@ -105,8 +105,7 @@ func (t *TestCase) Init() error {
|
|||
}
|
||||
|
||||
func (t *TestCase) GenerateUUID() string {
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
return fmt.Sprintf("%08d", rand.Intn(100000000))
|
||||
return fmt.Sprintf("%08d", rand.IntN(100000000))
|
||||
}
|
||||
|
||||
func (t *TestCase) CreateResources() error {
|
||||
|
@ -168,9 +167,16 @@ func (t *TestCase) Verify() error {
|
|||
func (t *TestCase) Start() error {
|
||||
t.Ctx, t.CtxCancel = context.WithTimeout(context.Background(), 1*time.Hour)
|
||||
veleroCfg := t.GetTestCase().VeleroCfg
|
||||
if (veleroCfg.CloudProvider == Azure || veleroCfg.CloudProvider == AWS) && strings.Contains(t.GetTestCase().CaseBaseName, "nodeport") {
|
||||
|
||||
if (veleroCfg.CloudProvider == Azure || veleroCfg.CloudProvider == AWS) &&
|
||||
strings.Contains(t.GetTestCase().CaseBaseName, "nodeport") {
|
||||
Skip("Skip due to issue https://github.com/kubernetes/kubernetes/issues/114384 on AKS")
|
||||
}
|
||||
|
||||
if veleroCfg.UploaderType == UploaderTypeRestic &&
|
||||
strings.Contains(t.GetTestCase().CaseBaseName, "ParallelFiles") {
|
||||
Skip("Skip Parallel Files upload and download test cases for environments using Restic as uploader.")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -178,11 +184,15 @@ func (t *TestCase) Clean() error {
|
|||
veleroCfg := t.GetTestCase().VeleroCfg
|
||||
if !veleroCfg.Debug {
|
||||
By(fmt.Sprintf("Clean namespace with prefix %s after test", t.CaseBaseName), func() {
|
||||
CleanupNamespaces(t.Ctx, t.Client, t.CaseBaseName)
|
||||
if err := CleanupNamespaces(t.Ctx, t.Client, t.CaseBaseName); err != nil {
|
||||
fmt.Println("Fail to cleanup namespaces: ", err)
|
||||
}
|
||||
})
|
||||
By("Clean backups after test", func() {
|
||||
veleroCfg.ClientToInstallVelero = &t.Client
|
||||
DeleteAllBackups(t.Ctx, &veleroCfg)
|
||||
if err := DeleteAllBackups(t.Ctx, &veleroCfg); err != nil {
|
||||
fmt.Println("Fail to clean backups after test: ", err)
|
||||
}
|
||||
})
|
||||
}
|
||||
return nil
|
||||
|
|
|
@ -38,6 +38,8 @@ const AWS = "aws"
|
|||
const Gcp = "gcp"
|
||||
const Vsphere = "vsphere"
|
||||
|
||||
const UploaderTypeRestic = "restic"
|
||||
|
||||
var PublicCloudProviders = []string{AWS, Azure, Gcp, Vsphere}
|
||||
var LocalCloudProviders = []string{Kind, VanillaZFS}
|
||||
var CloudProviders = append(PublicCloudProviders, LocalCloudProviders...)
|
||||
|
|
Loading…
Reference in New Issue