From fb2012c09f04b627c3f4cbc8e2b9628015ff5f1d Mon Sep 17 00:00:00 2001 From: Lyndon-Li Date: Fri, 20 Oct 2023 18:37:07 +0800 Subject: [PATCH] udmrepo use region specified in BSL when s3URL is empty Signed-off-by: Lyndon-Li --- changelogs/CHANGELOG-1.11.md | 2 +- changelogs/CHANGELOG-1.8.md | 2 +- changelogs/unreleased/6992-Lyndon-Li | 1 + design/Implemented/delete-item-action.md | 2 +- pkg/controller/backup_controller.go | 8 ++++---- pkg/controller/backup_deletion_controller.go | 2 +- pkg/repository/provider/unified_repo.go | 8 +++++--- site/content/docs/v0.5.0/faq.md | 2 +- site/content/docs/v0.6.0/faq.md | 2 +- site/content/docs/v0.7.0/faq.md | 2 +- site/content/docs/v0.7.1/faq.md | 2 +- site/content/docs/v0.8.0/faq.md | 2 +- site/content/docs/v0.8.1/faq.md | 2 +- site/content/docs/v1.5/restic.md | 2 +- test/e2e/backups/ttl.go | 2 +- test/e2e/bsl-mgmt/deletion.go | 2 +- 16 files changed, 23 insertions(+), 20 deletions(-) create mode 100644 changelogs/unreleased/6992-Lyndon-Li diff --git a/changelogs/CHANGELOG-1.11.md b/changelogs/CHANGELOG-1.11.md index b12b23680..2f01c9c01 100644 --- a/changelogs/CHANGELOG-1.11.md +++ b/changelogs/CHANGELOG-1.11.md @@ -130,7 +130,7 @@ To fix CVEs and keep pace with Golang, Velero made changes as follows: * Enable staticcheck linter. (#5788, @blackpiglet) * Set Kopia IgnoreUnknownTypes in ErrorHandlingPolicy to True for ignoring backup unknown file type (#5786, @qiuming-best) * Bump up Restic version to 0.15.0 (#5784, @qiuming-best) -* Add File system backup related matrics to Grafana dashboard +* Add File system backup related metrics to Grafana dashboard - Add metrics backup_warning_total for record of total warnings - Add metrics backup_last_status for record of last status of the backup (#5779, @allenxu404) * Design for Handling backup of volumes by resources filters (#5773, @qiuming-best) diff --git a/changelogs/CHANGELOG-1.8.md b/changelogs/CHANGELOG-1.8.md index 7c8f01946..e317849d1 100644 --- a/changelogs/CHANGELOG-1.8.md +++ b/changelogs/CHANGELOG-1.8.md @@ -61,7 +61,7 @@ in progress for 1.9. * Add rbac and annotation test cases (#4455, @mqiu) * remove --crds-version in velero install command. (#4446, @jxun) * Upgrade e2e test vsphere plugin (#4440, @mqiu) -* Fix e2e test failures for the inappropriate optimaze of velero install (#4438, @mqiu) +* Fix e2e test failures for the inappropriate optimize of velero install (#4438, @mqiu) * Limit backup namespaces on test resource filtering cases (#4437, @mqiu) * Bump up Go to 1.17 (#4431, @reasonerjt) * Added ``-itemsnapshots.json.gz to the backup format. This file exists diff --git a/changelogs/unreleased/6992-Lyndon-Li b/changelogs/unreleased/6992-Lyndon-Li new file mode 100644 index 000000000..6f79e1c9d --- /dev/null +++ b/changelogs/unreleased/6992-Lyndon-Li @@ -0,0 +1 @@ +Fix #6988, always get region from BSL if it is not empty \ No newline at end of file diff --git a/design/Implemented/delete-item-action.md b/design/Implemented/delete-item-action.md index 80baf685c..799173ff3 100644 --- a/design/Implemented/delete-item-action.md +++ b/design/Implemented/delete-item-action.md @@ -175,7 +175,7 @@ If there are one or more, download the backup tarball from backup storage, untar ## Alternatives Considered -Another proposal for higher level `DeleteItemActions` was initially included, which would require implementors to individually download the backup tarball themselves. +Another proposal for higher level `DeleteItemActions` was initially included, which would require implementers to individually download the backup tarball themselves. While this may be useful long term, it is not a good fit for the current goals as each plugin would be re-implementing a lot of boilerplate. See the deletion-plugins.md file for this alternative proposal in more detail. diff --git a/pkg/controller/backup_controller.go b/pkg/controller/backup_controller.go index f66c8f92e..74d3491ac 100644 --- a/pkg/controller/backup_controller.go +++ b/pkg/controller/backup_controller.go @@ -532,8 +532,8 @@ func (b *backupReconciler) validateAndGetSnapshotLocations(backup *velerov1api.B if len(errors) > 0 { return nil, errors } - allLocations := &velerov1api.VolumeSnapshotLocationList{} - err := b.kbClient.List(context.Background(), allLocations, &kbclient.ListOptions{Namespace: backup.Namespace, LabelSelector: labels.Everything()}) + volumeSnapshotLocations := &velerov1api.VolumeSnapshotLocationList{} + err := b.kbClient.List(context.Background(), volumeSnapshotLocations, &kbclient.ListOptions{Namespace: backup.Namespace, LabelSelector: labels.Everything()}) if err != nil { errors = append(errors, fmt.Sprintf("error listing volume snapshot locations: %v", err)) return nil, errors @@ -541,8 +541,8 @@ func (b *backupReconciler) validateAndGetSnapshotLocations(backup *velerov1api.B // build a map of provider->list of all locations for the provider allProviderLocations := make(map[string][]*velerov1api.VolumeSnapshotLocation) - for i := range allLocations.Items { - loc := allLocations.Items[i] + for i := range volumeSnapshotLocations.Items { + loc := volumeSnapshotLocations.Items[i] allProviderLocations[loc.Spec.Provider] = append(allProviderLocations[loc.Spec.Provider], &loc) } diff --git a/pkg/controller/backup_deletion_controller.go b/pkg/controller/backup_deletion_controller.go index 4511a50a0..f71349622 100644 --- a/pkg/controller/backup_deletion_controller.go +++ b/pkg/controller/backup_deletion_controller.go @@ -479,7 +479,7 @@ func (r *backupDeletionReconciler) patchDeleteBackupRequest(ctx context.Context, } func (r *backupDeletionReconciler) patchBackup(ctx context.Context, backup *velerov1api.Backup, mutate func(*velerov1api.Backup)) (*velerov1api.Backup, error) { - //TODO: The patchHelper can't be used here because the `backup/xxx/status` does not exist, until the bakcup resource is refactored + //TODO: The patchHelper can't be used here because the `backup/xxx/status` does not exist, until the backup resource is refactored // Record original json oldData, err := json.Marshal(backup) diff --git a/pkg/repository/provider/unified_repo.go b/pkg/repository/provider/unified_repo.go index 129f2ae6b..632a22fb1 100644 --- a/pkg/repository/provider/unified_repo.go +++ b/pkg/repository/provider/unified_repo.go @@ -477,9 +477,11 @@ func getStorageVariables(backupLocation *velerov1api.BackupStorageLocation, repo var err error if s3Url == "" { - region, err = getS3BucketRegion(bucket) - if err != nil { - return map[string]string{}, errors.Wrap(err, "error get s3 bucket region") + if region == "" { + region, err = getS3BucketRegion(bucket) + if err != nil { + return map[string]string{}, errors.Wrap(err, "error get s3 bucket region") + } } s3Url = fmt.Sprintf("s3-%s.amazonaws.com", region) diff --git a/site/content/docs/v0.5.0/faq.md b/site/content/docs/v0.5.0/faq.md index c96c6d549..b33998709 100644 --- a/site/content/docs/v0.5.0/faq.md +++ b/site/content/docs/v0.5.0/faq.md @@ -6,7 +6,7 @@ layout: docs ## When is it appropriate to use Ark instead of etcd's built in backup/restore? Etcd's backup/restore tooling is good for recovering from data loss in a single etcd cluster. For -example, it is a good idea to take a backup of etcd prior to upgrading etcd istelf. For more +example, it is a good idea to take a backup of etcd prior to upgrading etcd itself. For more sophisticated management of your Kubernetes cluster backups and restores, we feel that Ark is generally a better approach. It gives you the ability to throw away an unstable cluster and restore your Kubernetes resources and data into a new cluster, which you can't do easily just by backing up diff --git a/site/content/docs/v0.6.0/faq.md b/site/content/docs/v0.6.0/faq.md index 0fbfbbb35..07c9c9458 100644 --- a/site/content/docs/v0.6.0/faq.md +++ b/site/content/docs/v0.6.0/faq.md @@ -6,7 +6,7 @@ layout: docs ## When is it appropriate to use Ark instead of etcd's built in backup/restore? Etcd's backup/restore tooling is good for recovering from data loss in a single etcd cluster. For -example, it is a good idea to take a backup of etcd prior to upgrading etcd istelf. For more +example, it is a good idea to take a backup of etcd prior to upgrading etcd itself. For more sophisticated management of your Kubernetes cluster backups and restores, we feel that Ark is generally a better approach. It gives you the ability to throw away an unstable cluster and restore your Kubernetes resources and data into a new cluster, which you can't do easily just by backing up diff --git a/site/content/docs/v0.7.0/faq.md b/site/content/docs/v0.7.0/faq.md index 0fbfbbb35..07c9c9458 100644 --- a/site/content/docs/v0.7.0/faq.md +++ b/site/content/docs/v0.7.0/faq.md @@ -6,7 +6,7 @@ layout: docs ## When is it appropriate to use Ark instead of etcd's built in backup/restore? Etcd's backup/restore tooling is good for recovering from data loss in a single etcd cluster. For -example, it is a good idea to take a backup of etcd prior to upgrading etcd istelf. For more +example, it is a good idea to take a backup of etcd prior to upgrading etcd itself. For more sophisticated management of your Kubernetes cluster backups and restores, we feel that Ark is generally a better approach. It gives you the ability to throw away an unstable cluster and restore your Kubernetes resources and data into a new cluster, which you can't do easily just by backing up diff --git a/site/content/docs/v0.7.1/faq.md b/site/content/docs/v0.7.1/faq.md index 0fbfbbb35..07c9c9458 100644 --- a/site/content/docs/v0.7.1/faq.md +++ b/site/content/docs/v0.7.1/faq.md @@ -6,7 +6,7 @@ layout: docs ## When is it appropriate to use Ark instead of etcd's built in backup/restore? Etcd's backup/restore tooling is good for recovering from data loss in a single etcd cluster. For -example, it is a good idea to take a backup of etcd prior to upgrading etcd istelf. For more +example, it is a good idea to take a backup of etcd prior to upgrading etcd itself. For more sophisticated management of your Kubernetes cluster backups and restores, we feel that Ark is generally a better approach. It gives you the ability to throw away an unstable cluster and restore your Kubernetes resources and data into a new cluster, which you can't do easily just by backing up diff --git a/site/content/docs/v0.8.0/faq.md b/site/content/docs/v0.8.0/faq.md index 0fbfbbb35..07c9c9458 100644 --- a/site/content/docs/v0.8.0/faq.md +++ b/site/content/docs/v0.8.0/faq.md @@ -6,7 +6,7 @@ layout: docs ## When is it appropriate to use Ark instead of etcd's built in backup/restore? Etcd's backup/restore tooling is good for recovering from data loss in a single etcd cluster. For -example, it is a good idea to take a backup of etcd prior to upgrading etcd istelf. For more +example, it is a good idea to take a backup of etcd prior to upgrading etcd itself. For more sophisticated management of your Kubernetes cluster backups and restores, we feel that Ark is generally a better approach. It gives you the ability to throw away an unstable cluster and restore your Kubernetes resources and data into a new cluster, which you can't do easily just by backing up diff --git a/site/content/docs/v0.8.1/faq.md b/site/content/docs/v0.8.1/faq.md index 0fbfbbb35..07c9c9458 100644 --- a/site/content/docs/v0.8.1/faq.md +++ b/site/content/docs/v0.8.1/faq.md @@ -6,7 +6,7 @@ layout: docs ## When is it appropriate to use Ark instead of etcd's built in backup/restore? Etcd's backup/restore tooling is good for recovering from data loss in a single etcd cluster. For -example, it is a good idea to take a backup of etcd prior to upgrading etcd istelf. For more +example, it is a good idea to take a backup of etcd prior to upgrading etcd itself. For more sophisticated management of your Kubernetes cluster backups and restores, we feel that Ark is generally a better approach. It gives you the ability to throw away an unstable cluster and restore your Kubernetes resources and data into a new cluster, which you can't do easily just by backing up diff --git a/site/content/docs/v1.5/restic.md b/site/content/docs/v1.5/restic.md index 375994e8e..8265e0003 100644 --- a/site/content/docs/v1.5/restic.md +++ b/site/content/docs/v1.5/restic.md @@ -10,7 +10,7 @@ the supported cloud providers’ block storage offerings (Amazon EBS Volumes, Az It also provides a plugin model that enables anyone to implement additional object and block storage backends, outside the main Velero repository. -The restic intergation was added to give you an out-of-the-box solution for backing up and restoring almost any type of Kubernetes volume. This integration is an addition to Velero's capabilities, not a replacement for existing functionality. If you're running on AWS, and taking EBS snapshots as part of your regular Velero backups, there's no need to switch to using restic. However, if you need a volume snapshot plugin for your storage platform, or if you're using EFS, AzureFile, NFS, emptyDir, +The restic integration was added to give you an out-of-the-box solution for backing up and restoring almost any type of Kubernetes volume. This integration is an addition to Velero's capabilities, not a replacement for existing functionality. If you're running on AWS, and taking EBS snapshots as part of your regular Velero backups, there's no need to switch to using restic. However, if you need a volume snapshot plugin for your storage platform, or if you're using EFS, AzureFile, NFS, emptyDir, local, or any other volume type that doesn't have a native snapshot concept, restic might be for you. Restic is not tied to a specific storage platform, which means that this integration also paves the way for future work to enable diff --git a/test/e2e/backups/ttl.go b/test/e2e/backups/ttl.go index a513d22ca..7007d4e94 100644 --- a/test/e2e/backups/ttl.go +++ b/test/e2e/backups/ttl.go @@ -171,7 +171,7 @@ func TTLTest() { Expect(t).To(Equal(test.ttl)) }) - By(fmt.Sprintf("Waiting %s minutes for removing backup ralated resources by GC", test.ttl.String()), func() { + By(fmt.Sprintf("Waiting %s minutes for removing backup related resources by GC", test.ttl.String()), func() { time.Sleep(test.ttl) }) diff --git a/test/e2e/bsl-mgmt/deletion.go b/test/e2e/bsl-mgmt/deletion.go index b4d0b256c..a0fba63ff 100644 --- a/test/e2e/bsl-mgmt/deletion.go +++ b/test/e2e/bsl-mgmt/deletion.go @@ -168,7 +168,7 @@ func BslDeletionTest(useVolumeSnapshots bool) { Expect(AddLabelToPod(context.Background(), "kibishii-deployment-1", bslDeletionTestNs, label_2)).To(Succeed()) }) - By("Get all 2 PVCs of Kibishii and label them seprately ", func() { + By("Get all 2 PVCs of Kibishii and label them separately ", func() { pvc, err := GetPvcByPVCName(context.Background(), bslDeletionTestNs, podName_1) Expect(err).To(Succeed()) fmt.Println(pvc)