Compare commits

...

18 Commits

Author SHA1 Message Date
lyndon-li 2fc6300f22
Merge pull request #7860 from blackpiglet/update_e2e_for_1_14
Skip parallel files upload and download test for Restic case
2024-06-13 10:08:32 +08:00
Xun Jiang/Bruce Jiang 200f16e539
Merge branch 'release-1.14' into update_e2e_for_1_14 2024-06-12 21:25:19 +08:00
Xun Jiang/Bruce Jiang 0d3657240a
Merge pull request #7876 from reasonerjt/update-release-note-1.14
Update release note of 1.14
2024-06-12 20:14:23 +08:00
Xun Jiang/Bruce Jiang 08fea6e994
Merge branch 'release-1.14' into update-release-note-1.14 2024-06-12 20:04:13 +08:00
Xun Jiang d20bd165a9 Skip parallel files upload and download test for Restic case.
Signed-off-by: Xun Jiang <blackpigletbruce@gmail.com>
2024-06-12 19:52:22 +08:00
Xun Jiang/Bruce Jiang bf778c7d21
Merge pull request #7875 from reasonerjt/fix-restore-crash-1.14
Add checks for csisnapshot for vol_info population
2024-06-12 19:41:14 +08:00
Daniel Jiang a65005996a Update release note of 1.14
Signed-off-by: Daniel Jiang <daniel.jiang@broadcom.com>
2024-06-12 19:13:11 +08:00
Daniel Jiang f61c8b9042 Add checks for csisnapshot for vol_info population
fixes #7874

Signed-off-by: Daniel Jiang <daniel.jiang@broadcom.com>
2024-06-12 18:57:29 +08:00
Daniel Jiang 21366795d1
Merge pull request #7852 from reasonerjt/fix-7849-1.14
Use PVC to track the CSI snapshot in restore
2024-06-04 13:33:27 +08:00
Daniel Jiang f6367ca396 Use PVC to track the CSI snapshot in restore
This commit fixes #7849.
It will use PVC instead of PV to track CSI snapshots to generate restore
volume info metadata.  So that in the case the PVC is not bound to PV
the metadata can be populated correctly.

Signed-off-by: Daniel Jiang <daniel.jiang@broadcom.com>
2024-06-04 10:10:36 +08:00
Daniel Jiang ce16acb12e
Merge pull request #7847 from Lyndon-Li/release-1.14
Avoid unnecessary repo connect for maintenance
2024-05-31 13:11:46 +08:00
Lyndon-Li 54d5dabdda avoid unnecessary repo connect for maintenance
Signed-off-by: Lyndon-Li <lyonghui@vmware.com>
2024-05-31 11:20:12 +08:00
Daniel Jiang 952f713b3b
Merge pull request #7830 from reasonerjt/fix-git-release-issue-1.14
Fix issue in "git status" in goreleaser.sh
2024-05-27 17:01:51 +08:00
Daniel Jiang 1f1ccab948 Fix issue in "git status" in goreleaser.sh
When dry-run the tag-release.sh, there's an error
"fatal: detected dubious ownership in repository at
'/github.com/vmware-tanzu/velero'"

This commit works around this issue to make sure "tag-release.sh"
can finish successful

Signed-off-by: Daniel Jiang <daniel.jiang@broadcom.com>
2024-05-27 16:49:47 +08:00
Daniel Jiang 9164bc95a4
Merge pull request #7821 from reasonerjt/pin-image-1.14
Pin the version of Golang and base image for v1.14.0
2024-05-24 13:48:14 +08:00
Daniel Jiang 74966d0e2c Pin the version of Golang and base image
Signed-off-by: Daniel Jiang <daniel.jiang@broadcom.com>
2024-05-24 13:25:23 +08:00
Daniel Jiang 7d22548d7a
Merge pull request #7824 from reasonerjt/fix-codespell-1.14
Fix the problems found by codespell
2024-05-24 13:24:34 +08:00
Daniel Jiang 892fa79051 Fix the problems found by codespell
Signed-off-by: Daniel Jiang <daniel.jiang@broadcom.com>
2024-05-24 11:32:26 +08:00
22 changed files with 89 additions and 66 deletions

View File

@ -14,7 +14,7 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v5
with:
go-version: '1.22'
go-version: '1.22.2'
id: go
# Look for a CLI that's made for this PR
- name: Fetch built CLI

View File

@ -14,7 +14,7 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v5
with:
go-version: '1.22'
go-version: '1.22.2'
id: go
# Look for a CLI that's made for this PR
- name: Fetch built CLI
@ -82,7 +82,7 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v5
with:
go-version: '1.22'
go-version: '1.22.2'
id: go
- name: Check out the code
uses: actions/checkout@v4

View File

@ -10,7 +10,7 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v5
with:
go-version: '1.22'
go-version: '1.22.2'
id: go
- name: Check out the code
uses: actions/checkout@v4

View File

@ -15,7 +15,7 @@ jobs:
with:
# ignore the config/.../crd.go file as it's generated binary data that is edited elswhere.
skip: .git,*.png,*.jpg,*.woff,*.ttf,*.gif,*.ico,./config/crd/v1beta1/crds/crds.go,./config/crd/v1/crds/crds.go,./config/crd/v2alpha1/crds/crds.go,./go.sum,./LICENSE
ignore_words_list: iam,aks,ist,bridget,ue,shouldnot,atleast
ignore_words_list: iam,aks,ist,bridget,ue,shouldnot,atleast,notin,sme
check_filenames: true
check_hidden: true

View File

@ -18,7 +18,7 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v5
with:
go-version: '1.22'
go-version: '1.22.2'
id: go
- uses: actions/checkout@v4

View File

@ -5,7 +5,7 @@
We as members, contributors, and leaders pledge to make participation in the Velero project and our
community a harassment-free experience for everyone, regardless of age, body
size, visible or invisible disability, ethnicity, sex characteristics, gender
identity and expression, level of experience, education, socio-economic status,
identity and expression, level of experience, education, socioeconomic status,
nationality, personal appearance, race, religion, or sexual identity
and orientation.

View File

@ -13,7 +13,7 @@
# limitations under the License.
# Velero binary build section
FROM --platform=$BUILDPLATFORM golang:1.22-bookworm as velero-builder
FROM --platform=$BUILDPLATFORM golang:1.22.2-bookworm as velero-builder
ARG GOPROXY
ARG BIN
@ -47,7 +47,7 @@ RUN mkdir -p /output/usr/bin && \
go clean -modcache -cache
# Restic binary build section
FROM --platform=$BUILDPLATFORM golang:1.22-bookworm as restic-builder
FROM --platform=$BUILDPLATFORM golang:1.22.2-bookworm as restic-builder
ARG BIN
ARG TARGETOS
@ -70,7 +70,7 @@ RUN mkdir -p /output/usr/bin && \
go clean -modcache -cache
# Velero image packing section
FROM paketobuildpacks/run-jammy-tiny:latest
FROM paketobuildpacks/run-jammy-tiny:0.2.38
LABEL maintainer="Xun Jiang <jxun@vmware.com>"

View File

@ -52,7 +52,7 @@ git_sha = str(local("git rev-parse HEAD", quiet = True, echo_off = True)).strip(
tilt_helper_dockerfile_header = """
# Tilt image
FROM golang:1.22 as tilt-helper
FROM golang:1.22.2 as tilt-helper
# Support live reloading with Tilt
RUN wget --output-document /restart.sh --quiet https://raw.githubusercontent.com/windmilleng/rerun-process-wrapper/master/restart.sh && \

View File

@ -14,7 +14,7 @@ https://velero.io/docs/v1.14/upgrade-to-1.14/
### Highlights
#### The maintenance work for kopia backup repositories is run in jobs
#### The maintenance work for kopia/restic backup repositories is run in jobs
Since velero started using kopia as the approach for filesystem-level backup/restore, we've noticed an issue when velero connects to the kopia backup repositories and performs maintenance, it sometimes consumes excessive memory that can cause the velero pod to get OOM Killed. To mitigate this issue, the maintenance work will be moved out of velero pod to a separate kubernetes job, and the user will be able to specify the resource request in "velero install".
#### Volume Policies are extended to support more actions to handle volumes
In an earlier release, a flexible volume policy was introduced to skip certain volumes from a backup. In v1.14 we've made enhancement to this policy to allow the user to set how the volumes should be backed up. The user will be able to set "fs-backup" or "snapshot" as value of “action" in the policy and velero will backup the volumes accordingly. This enhancement allows the user to achieve a fine-grained control like "opt-in/out" without having to update the target workload. For more details please refer to https://velero.io/docs/v1.14/resource-filtering/#supported-volumepolicy-actions
@ -38,6 +38,7 @@ Besides the service principal with secret(password)-based authentication, Velero
* CSI plugin has been merged into velero repo in v1.14 release. It will be installed by default as an internal plugin, and should not be installed via "plugins " parameter in "velero install" command.
* The default resource requests and limitations for node agent are removed in v1.14, to make the node agent pods have the QoS class of "BestEffort", more details please refer to #7391
* There's a change in namespace filtering behavior during backup: In v1.14, when the includedNamespaces/excludedNamespaces fields are not set and the labelSelector/OrLabelSelectors are set in the backup spec, the backup will only include the namespaces which contain the resources that match the label selectors, while in previous releases all namespaces will be included in the backup with such settings. More details refer to #7105
* Patching the PV in the "Finalizing" state may cause the restore to be in "PartiallyFailed" state when the PV is blocked in "Pending" state, while in the previous release the restore may end up being in "Complete" state. For more details refer to #7866
### All Changes
* Fix backup log to show error string, not index (#7805, @piny940)

View File

@ -65,7 +65,7 @@ This page contains a pre-migration checklist for ensuring a repo migration goes
#### Updating Netlify
The settings for Netflify should remain the same, except that it now needs to be installed in the new repo. The instructions on how to install Netlify on the new repo are here: https://www.netlify.com/docs/github-permissions/.
The settings for Netlify should remain the same, except that it now needs to be installed in the new repo. The instructions on how to install Netlify on the new repo are here: https://www.netlify.com/docs/github-permissions/.
#### Communication strategy

View File

@ -27,7 +27,7 @@ Moreover, we would like to create a general workflow to variations during the da
- Support different data accesses, i.e., file system level and block level
- Support different snapshot types, i.e., CSI snapshot, volume snapshot API from storage vendors
- Support different snapshot accesses, i.e., through PV generated from snapshots, and through direct access API from storage vendors
- Reuse the existing Velero generic data path as creatd in [Unified Repository design][1]
- Reuse the existing Velero generic data path as created in [Unified Repository design][1]
## Non-Goals

2
go.mod
View File

@ -1,6 +1,6 @@
module github.com/vmware-tanzu/velero
go 1.22.0
go 1.22.2
require (
cloud.google.com/go/storage v1.40.0

View File

@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
FROM --platform=$TARGETPLATFORM golang:1.22-bookworm
FROM --platform=$TARGETPLATFORM golang:1.22.2-bookworm
ARG GOPROXY
@ -99,3 +99,6 @@ RUN curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/i
RUN curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/$(go env GOARCH)/kubectl
RUN chmod +x ./kubectl
RUN mv ./kubectl /usr/local/bin
# Fix the "dubious ownership" issue from git when running goreleaser.sh
RUN echo "[safe] \n\t directory = *" > /.gitconfig

View File

@ -182,7 +182,7 @@ func TestGetResourceMatchedAction(t *testing.T) {
expectedAction: &Action{Type: "snapshot"},
},
{
name: "dismatch all policies",
name: "mismatch all policies",
volume: &structuredVolume{
capacity: *resource.NewQuantity(50<<30, resource.BinarySI),
storageClass: "ebs-sc",
@ -394,7 +394,7 @@ volumePolicies:
skip: true,
},
{
name: "dismatch volume by types",
name: "mismatch volume by types",
yamlData: `version: v1
volumePolicies:
- conditions:

View File

@ -165,7 +165,7 @@ func TestNFSConditionMatch(t *testing.T) {
expectedMatch: true,
},
{
name: "server dismatch",
name: "server mismatch",
condition: &nfsCondition{&nFSVolumeSource{Server: "192.168.10.20", Path: ""}},
volume: setStructuredVolume(*resource.NewQuantity(0, resource.BinarySI), "", &nFSVolumeSource{Server: ""}, nil),
expectedMatch: false,

View File

@ -662,17 +662,17 @@ type RestoreVolumeInfoTracker struct {
// map of PV name to the NativeSnapshotInfo from which the PV is restored
pvNativeSnapshotMap map[string]*NativeSnapshotInfo
// map of PV name to the CSISnapshot object from which the PV is restored
pvCSISnapshotMap map[string]snapshotv1api.VolumeSnapshot
datadownloadList *velerov2alpha1.DataDownloadList
pvrs []*velerov1api.PodVolumeRestore
// map of PVC object to the CSISnapshot object from which the PV is restored
// the key is in the form of $pvc-ns/$pvc-name
pvcCSISnapshotMap map[string]snapshotv1api.VolumeSnapshot
datadownloadList *velerov2alpha1.DataDownloadList
pvrs []*velerov1api.PodVolumeRestore
}
// Populate data objects in the tracker, which will be used to generate the RestoreVolumeInfo array in Result()
// The input param resourceList should be the final result of the restore.
func (t *RestoreVolumeInfoTracker) Populate(ctx context.Context, restoredResourceList map[string][]string) {
pvcs := RestoredPVCFromRestoredResourceList(restoredResourceList)
t.Lock()
defer t.Unlock()
for item := range pvcs {
@ -684,25 +684,26 @@ func (t *RestoreVolumeInfoTracker) Populate(ctx context.Context, restoredResourc
log.WithError(err).Error("Failed to get PVC")
continue
}
if pvc.Status.Phase != corev1api.ClaimBound || pvc.Spec.VolumeName == "" {
log.Info("PVC is not bound or has no volume name")
continue
}
pv := &corev1api.PersistentVolume{}
if err := t.client.Get(ctx, kbclient.ObjectKey{Name: pvc.Spec.VolumeName}, pv); err != nil {
log.WithError(err).Error("Failed to get PV")
} else {
t.pvPvc.insert(*pv, pvcName, pvcNS)
}
// Collect the CSI VolumeSnapshot objects referenced by the restored PVCs,
if pvc.Spec.DataSource != nil && pvc.Spec.DataSource.Kind == "VolumeSnapshot" {
vs := &snapshotv1api.VolumeSnapshot{}
if err := t.client.Get(ctx, kbclient.ObjectKey{Namespace: pvcNS, Name: pvc.Spec.DataSource.Name}, vs); err != nil {
log.WithError(err).Error("Failed to get VolumeSnapshot")
} else {
t.pvCSISnapshotMap[pv.Name] = *vs
t.pvcCSISnapshotMap[pvc.Namespace+"/"+pvcName] = *vs
}
}
if pvc.Status.Phase == corev1api.ClaimBound && pvc.Spec.VolumeName != "" {
pv := &corev1api.PersistentVolume{}
if err := t.client.Get(ctx, kbclient.ObjectKey{Name: pvc.Spec.VolumeName}, pv); err != nil {
log.WithError(err).Error("Failed to get PV")
} else {
t.pvPvc.insert(*pv, pvcName, pvcNS)
}
} else {
log.Warn("PVC is not bound or has no volume name")
continue
}
}
if err := t.client.List(ctx, t.datadownloadList, &kbclient.ListOptions{
Namespace: t.restore.Namespace,
@ -761,21 +762,35 @@ func (t *RestoreVolumeInfoTracker) Result() []*RestoreVolumeInfo {
}
// Generate RestoreVolumeInfo for PVs restored from CSISnapshots
for pvName, csiSnapshot := range t.pvCSISnapshotMap {
for pvc, csiSnapshot := range t.pvcCSISnapshotMap {
n := strings.Split(pvc, "/")
if len(n) != 2 {
t.log.Warnf("Invalid PVC key '%s' in the pvc-CSISnapshot map, skip populating it to volume info", pvc)
continue
}
pvcNS, pvcName := n[0], n[1]
var restoreSize int64 = 0
if csiSnapshot.Status != nil && csiSnapshot.Status.RestoreSize != nil {
restoreSize = csiSnapshot.Status.RestoreSize.Value()
}
vscName := ""
if csiSnapshot.Spec.Source.VolumeSnapshotContentName != nil {
vscName = *csiSnapshot.Spec.Source.VolumeSnapshotContentName
}
volumeInfo := &RestoreVolumeInfo{
PVName: pvName,
PVCNamespace: pvcNS,
PVCName: pvcName,
SnapshotDataMoved: false,
RestoreMethod: CSISnapshot,
CSISnapshotInfo: &CSISnapshotInfo{
SnapshotHandle: csiSnapshot.Annotations[VolumeSnapshotHandleAnnotation],
Size: csiSnapshot.Status.RestoreSize.Value(),
Size: restoreSize,
Driver: csiSnapshot.Annotations[CSIDriverNameAnnotation],
VSCName: *csiSnapshot.Spec.Source.VolumeSnapshotContentName,
VSCName: vscName,
},
}
if pvcPVInfo := t.pvPvc.retrieve(pvName, "", ""); pvcPVInfo != nil {
volumeInfo.PVCName = pvcPVInfo.PVCName
volumeInfo.PVCNamespace = pvcPVInfo.PVCNamespace
if pvcPVInfo := t.pvPvc.retrieve("", pvcName, pvcNS); pvcPVInfo != nil {
volumeInfo.PVName = pvcPVInfo.PV.Name
}
volumeInfos = append(volumeInfos, volumeInfo)
}
@ -829,7 +844,7 @@ func NewRestoreVolInfoTracker(restore *velerov1api.Restore, logger logrus.FieldL
data: make(map[string]pvcPvInfo),
},
pvNativeSnapshotMap: make(map[string]*NativeSnapshotInfo),
pvCSISnapshotMap: make(map[string]snapshotv1api.VolumeSnapshot),
pvcCSISnapshotMap: make(map[string]snapshotv1api.VolumeSnapshot),
datadownloadList: &velerov2alpha1.DataDownloadList{},
}
}

View File

@ -933,7 +933,7 @@ func TestRestoreVolumeInfoResult(t *testing.T) {
data: make(map[string]pvcPvInfo),
},
pvNativeSnapshotMap: map[string]*NativeSnapshotInfo{},
pvCSISnapshotMap: map[string]snapshotv1api.VolumeSnapshot{},
pvcCSISnapshotMap: map[string]snapshotv1api.VolumeSnapshot{},
datadownloadList: &velerov2alpha1.DataDownloadList{},
pvrs: []*velerov1api.PodVolumeRestore{},
},
@ -968,8 +968,8 @@ func TestRestoreVolumeInfoResult(t *testing.T) {
IOPS: "10000",
},
},
pvCSISnapshotMap: map[string]snapshotv1api.VolumeSnapshot{},
datadownloadList: &velerov2alpha1.DataDownloadList{},
pvcCSISnapshotMap: map[string]snapshotv1api.VolumeSnapshot{},
datadownloadList: &velerov2alpha1.DataDownloadList{},
pvrs: []*velerov1api.PodVolumeRestore{
builder.ForPodVolumeRestore("velero", "testRestore-1234").
PodNamespace("testNS").
@ -1031,8 +1031,8 @@ func TestRestoreVolumeInfoResult(t *testing.T) {
},
},
pvNativeSnapshotMap: map[string]*NativeSnapshotInfo{},
pvCSISnapshotMap: map[string]snapshotv1api.VolumeSnapshot{
"testPV": *builder.ForVolumeSnapshot("sourceNS", "testCSISnapshot").
pvcCSISnapshotMap: map[string]snapshotv1api.VolumeSnapshot{
"testNS/testPVC": *builder.ForVolumeSnapshot("sourceNS", "testCSISnapshot").
ObjectMeta(
builder.WithAnnotations(VolumeSnapshotHandleAnnotation, "csi-snap-001",
CSIDriverNameAnnotation, "test-csi-driver"),
@ -1101,7 +1101,7 @@ func TestRestoreVolumeInfoResult(t *testing.T) {
},
},
pvNativeSnapshotMap: map[string]*NativeSnapshotInfo{},
pvCSISnapshotMap: map[string]snapshotv1api.VolumeSnapshot{},
pvcCSISnapshotMap: map[string]snapshotv1api.VolumeSnapshot{},
datadownloadList: &velerov2alpha1.DataDownloadList{
Items: []velerov2alpha1.DataDownload{
*builder.ForDataDownload("velero", "testDataDownload-1").

View File

@ -427,7 +427,7 @@ func TestReconcile(t *testing.T) {
notCreateFSBR: true,
},
{
name: "Dataupload should not be cancel with dismatch node",
name: "Dataupload should not be cancel with mismatch node",
pod: builder.ForPod(velerov1api.DefaultNamespace, dataUploadName).Volumes(&corev1.Volume{Name: "dataupload-1"}).Result(),
du: func() *velerov2alpha1api.DataUpload {
du := dataUploadBuilder().Phase(velerov2alpha1api.DataUploadPhaseInProgress).SnapshotType(fakeSnapshotType).Cancel(true).Result()

View File

@ -182,10 +182,6 @@ func (m *manager) PruneRepo(repo *velerov1api.BackupRepository) error {
m.repoLocker.LockExclusive(repo.Name)
defer m.repoLocker.UnlockExclusive(repo.Name)
prd, err := m.getRepositoryProvider(repo)
if err != nil {
return errors.WithStack(err)
}
param, err := m.assembleRepoParam(repo)
if err != nil {
return errors.WithStack(err)
@ -208,11 +204,7 @@ func (m *manager) PruneRepo(repo *velerov1api.BackupRepository) error {
return nil
}
if err := prd.BoostRepoConnect(context.Background(), param); err != nil {
return errors.WithStack(err)
}
log.Info("Start to maintence repo")
log.Info("Start to maintenance repo")
maintenanceJob, err := buildMaintenanceJob(m.maintenanceCfg, param, m.client, m.namespace)
if err != nil {

View File

@ -581,7 +581,7 @@ func TestGetPodVolumeNameForPVC(t *testing.T) {
expectedVolumeName string
}{
{
name: "should get volume name for pod with multuple PVCs",
name: "should get volume name for pod with multiple PVCs",
pod: v1.Pod{
Spec: v1.PodSpec{
Volumes: []v1.Volume{

View File

@ -19,7 +19,7 @@ package test
import (
"context"
"fmt"
"math/rand"
"math/rand/v2"
"strings"
"time"
@ -105,8 +105,7 @@ func (t *TestCase) Init() error {
}
func (t *TestCase) GenerateUUID() string {
rand.Seed(time.Now().UnixNano())
return fmt.Sprintf("%08d", rand.Intn(100000000))
return fmt.Sprintf("%08d", rand.IntN(100000000))
}
func (t *TestCase) CreateResources() error {
@ -168,9 +167,16 @@ func (t *TestCase) Verify() error {
func (t *TestCase) Start() error {
t.Ctx, t.CtxCancel = context.WithTimeout(context.Background(), 1*time.Hour)
veleroCfg := t.GetTestCase().VeleroCfg
if (veleroCfg.CloudProvider == Azure || veleroCfg.CloudProvider == AWS) && strings.Contains(t.GetTestCase().CaseBaseName, "nodeport") {
if (veleroCfg.CloudProvider == Azure || veleroCfg.CloudProvider == AWS) &&
strings.Contains(t.GetTestCase().CaseBaseName, "nodeport") {
Skip("Skip due to issue https://github.com/kubernetes/kubernetes/issues/114384 on AKS")
}
if veleroCfg.UploaderType == UploaderTypeRestic &&
strings.Contains(t.GetTestCase().CaseBaseName, "ParallelFiles") {
Skip("Skip Parallel Files upload and download test cases for environments using Restic as uploader.")
}
return nil
}
@ -178,11 +184,15 @@ func (t *TestCase) Clean() error {
veleroCfg := t.GetTestCase().VeleroCfg
if !veleroCfg.Debug {
By(fmt.Sprintf("Clean namespace with prefix %s after test", t.CaseBaseName), func() {
CleanupNamespaces(t.Ctx, t.Client, t.CaseBaseName)
if err := CleanupNamespaces(t.Ctx, t.Client, t.CaseBaseName); err != nil {
fmt.Println("Fail to cleanup namespaces: ", err)
}
})
By("Clean backups after test", func() {
veleroCfg.ClientToInstallVelero = &t.Client
DeleteAllBackups(t.Ctx, &veleroCfg)
if err := DeleteAllBackups(t.Ctx, &veleroCfg); err != nil {
fmt.Println("Fail to clean backups after test: ", err)
}
})
}
return nil

View File

@ -38,6 +38,8 @@ const AWS = "aws"
const Gcp = "gcp"
const Vsphere = "vsphere"
const UploaderTypeRestic = "restic"
var PublicCloudProviders = []string{AWS, Azure, Gcp, Vsphere}
var LocalCloudProviders = []string{Kind, VanillaZFS}
var CloudProviders = append(PublicCloudProviders, LocalCloudProviders...)