fix: Typos and add more spell checking rules to CI (#6415)

* fix: Typos and add more spell checking rules to CI

Signed-off-by: Mateus Oliveira <msouzaol@redhat.com>

* fixup! fix: Typos and add more spell checking rules to CI

Signed-off-by: Mateus Oliveira <msouzaol@redhat.com>

* fixup! fix: Typos and add more spell checking rules to CI

Signed-off-by: Mateus Oliveira <msouzaol@redhat.com>

* fixup! fix: Typos and add more spell checking rules to CI

Signed-off-by: Mateus Oliveira <msouzaol@redhat.com>

* fixup! fix: Typos and add more spell checking rules to CI

Signed-off-by: Mateus Oliveira <msouzaol@redhat.com>

* fixup! fix: Typos and add more spell checking rules to CI

Signed-off-by: Mateus Oliveira <msouzaol@redhat.com>

* fixup! fix: Typos and add more spell checking rules to CI

Signed-off-by: Mateus Oliveira <msouzaol@redhat.com>

* fixup! fix: Typos and add more spell checking rules to CI

Signed-off-by: Mateus Oliveira <msouzaol@redhat.com>

---------

Signed-off-by: Mateus Oliveira <msouzaol@redhat.com>
pull/6481/head
Mateus Oliveira 2023-07-21 18:25:24 -03:00 committed by GitHub
parent 8396163e77
commit 913b610196
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
61 changed files with 164 additions and 127 deletions

View File

@ -49,7 +49,7 @@ jobs:
run: |
make local
# Check the common CLI against all kubernetes versions
# Check the common CLI against all Kubernetes versions
crd-check:
needs: build-cli
runs-on: ubuntu-latest

View File

@ -53,7 +53,7 @@ jobs:
run: |
IMAGE=velero VERSION=pr-test make container
docker save velero:pr-test -o ./velero.tar
# Run E2E test against all kubernetes versions on kind
# Run E2E test against all Kubernetes versions on kind
run-e2e-test:
needs: build
runs-on: ubuntu-latest

View File

@ -14,7 +14,44 @@ jobs:
uses: codespell-project/actions-codespell@master
with:
# ignore the config/.../crd.go file as it's generated binary data that is edited elswhere.
skip: .git,*.png,*.jpg,*.woff,*.ttf,*.gif,*.ico,./config/crd/v1beta1/crds/crds.go,./config/crd/v1/crds/crds.go,,./config/crd/v2alpha1/crds/crds.go,./go.sum,./LICENSE
skip: .git,*.png,*.jpg,*.woff,*.ttf,*.gif,*.ico,./config/crd/v1beta1/crds/crds.go,./config/crd/v1/crds/crds.go,./config/crd/v2alpha1/crds/crds.go,./go.sum,./LICENSE
ignore_words_list: iam,aks,ist,bridget,ue,shouldnot,atleast
check_filenames: true
check_hidden: true
- name: Velero.io word list check
shell: bash {0}
run: |
IGNORE_COMMENT="Velero.io word list : ignore"
FILES_TO_CHECK=$(find . -type f \
! -path "./.git/*" \
! -path "./site/content/docs/v*" \
! -path "./changelogs/CHANGELOG-*" \
! -path "./.github/workflows/pr-codespell.yml" \
! -path "./site/static/fonts/Metropolis/Open Font License.md" \
! -regex '.*\.\(png\|jpg\|woff\|ttf\|gif\|ico\|svg\)'
)
function check_word_in_files() {
local word=$1
xargs grep -Iinr "$word" <<< "$FILES_TO_CHECK" | \
grep -v "$IGNORE_COMMENT" | \
grep -i --color=always "$word" && \
EXIT_STATUS=1
}
function check_word_case_sensitive_in_files() {
local word=$1
xargs grep -Inr "$word" <<< "$FILES_TO_CHECK" | \
grep -v "$IGNORE_COMMENT" | \
grep --color=always "$word" && \
EXIT_STATUS=1
}
EXIT_STATUS=0
check_word_case_sensitive_in_files ' kubernetes '
check_word_in_files 'on-premise\b'
check_word_in_files 'back-up'
check_word_in_files 'plug-in'
check_word_in_files 'whitelist'
check_word_in_files 'blacklist'
exit $EXIT_STATUS

View File

@ -22,10 +22,10 @@ Below is a list of adopters of Velero in **production environments** that have
publicly shared the details of how they use it.
**[BitGo][20]**
BitGo uses Velero backup and restore capabilities to seamlessly provision and scale fullnode statefulsets on the fly as well as having it serve an integral piece for our kubernetes disaster-recovery story.
BitGo uses Velero backup and restore capabilities to seamlessly provision and scale fullnode statefulsets on the fly as well as having it serve an integral piece for our Kubernetes disaster-recovery story.
**[Bugsnag][30]**
We use Velero for managing backups of an internal instance of our on-premise clustered solution. We also recommend our users of [on-premise Bugsnag installations][31] use Velero for [managing their own backups][32].
We use Velero for managing backups of an internal instance of our on-premise clustered solution. We also recommend our users of [on-premise Bugsnag installations](https://www.bugsnag.com/on-premise) use Velero for [managing their own backups](https://docs.bugsnag.com/on-premise/clustered/backup-restore/). <!-- Velero.io word list : ignore -->
**[Banzai Cloud][60]**
[Banzai Cloud Pipeline][61] is a Kubernetes-based microservices platform that integrates services needed for Day-1 and Day-2 operations along with first-class support both for on-prem and hybrid multi-cloud deployments. We use Velero to periodically [backup and restore these clusters in case of disasters][62].
@ -83,8 +83,6 @@ If you would like to add your logo to a future `Adopters of Velero` section on [
[20]: https://bitgo.com
[30]: https://bugsnag.com
[31]: https://www.bugsnag.com/on-premise
[32]: https://docs.bugsnag.com/on-premise/clustered/backup-restore/
[40]: https://kyma-project.io
[41]: https://kyma-project.io/docs/components/backup/#overview-overview

View File

@ -1,6 +1,6 @@
# Velero Assets
This folder contains logo images for Velero in gray (for light backgrounds) and white (for dark backgrounds like black tshirts or dark mode!) horizontal and stacked… in .eps and .svg.
This folder contains logo images for Velero in gray (for light backgrounds) and white (for dark backgrounds like black t-shirts or dark mode!) horizontal and stacked… in .eps and .svg.
## Some general guidelines for usage

View File

@ -0,0 +1 @@
fix: Typos and add more spell checking rules to CI

View File

@ -55,7 +55,7 @@ spec:
- CSIBackupVolumeSnapshotContents
type: string
name:
description: Name is the name of the kubernetes resource with
description: Name is the name of the Kubernetes resource with
which the file is associated.
type: string
required:

View File

@ -55,7 +55,7 @@ spec:
type: array
existingResourcePolicy:
description: ExistingResourcePolicy specifies the restore behavior
for the kubernetes resource to be restored
for the Kubernetes resource to be restored
nullable: true
type: string
hooks:

View File

@ -509,7 +509,7 @@ spec:
- CSIBackupVolumeSnapshotContents
type: string
name:
description: Name is the name of the kubernetes resource with
description: Name is the name of the Kubernetes resource with
which the file is associated.
type: string
required:

View File

@ -6,7 +6,7 @@ During backup process, user may need to back up resources of specific type in so
(Ex: primary-secondary database pods in a cluster).
## Goals
- Enable user to specify an order of back up resources belong to specific resource type
- Enable user to specify an order of backup resources belong to specific resource type
## Alternatives Considered
- Use a plugin to backup an resources and all the sub resources. For example use a plugin for StatefulSet and backup pods belong to the StatefulSet in specific order. This plugin solution is not generic and requires plugin for each resource type.

View File

@ -1,6 +1,6 @@
# Add support for `ExistingResourcePolicy` to restore API
## Abstract
Velero currently does not support any restore policy on kubernetes resources that are already present in-cluster. Velero skips over the restore of the resource if it already exists in the namespace/cluster irrespective of whether the resource present in the restore is the same or different from the one present on the cluster. It is desired that Velero gives the option to the user to decide whether or not the resource in backup should overwrite the one present in the cluster.
Velero currently does not support any restore policy on Kubernetes resources that are already present in-cluster. Velero skips over the restore of the resource if it already exists in the namespace/cluster irrespective of whether the resource present in the restore is the same or different from the one present on the cluster. It is desired that Velero gives the option to the user to decide whether or not the resource in backup should overwrite the one present in the cluster.
## Background
As of Today, Velero will skip over the restoration of resources that already exist in the cluster. The current workflow followed by Velero is (Using a `service` that is backed up for example):
@ -145,7 +145,7 @@ type RestoreSpec struct {
.
.
.
// ExistingResourcePolicy specifies the restore behaviour for the kubernetes resource to be restored
// ExistingResourcePolicy specifies the restore behaviour for the Kubernetes resource to be restored
// +optional
ExistingResourcePolicy PolicyType
@ -167,7 +167,7 @@ type RestoreSpec struct {
.
.
.
// ExistingResourcePolicyConfig specifies the restore behaviour for a particular/list of kubernetes resource(s) to be restored
// ExistingResourcePolicyConfig specifies the restore behaviour for a particular/list of Kubernetes resource(s) to be restored
// +optional
ExistingResourcePolicyConfig []PolicyConfiguration
@ -205,11 +205,11 @@ type RestoreSpec struct {
.
.
.
// ExistingResourceDefaultPolicy specifies the default restore behaviour for the kubernetes resource to be restored
// ExistingResourceDefaultPolicy specifies the default restore behaviour for the Kubernetes resource to be restored
// +optional
existingResourceDefaultPolicy PolicyType
// ExistingResourcePolicyOverrides specifies the restore behaviour for a particular/list of kubernetes resource(s) to be restored
// ExistingResourcePolicyOverrides specifies the restore behaviour for a particular/list of Kubernetes resource(s) to be restored
// +optional
existingResourcePolicyOverrides []PolicyConfiguration

View File

@ -5,9 +5,9 @@ This is intended as a replacement for the previously-approved Upload Progress Mo
snapshot uploads to include what was previously called Async Backup/Restore Item Actions. This
updated design should handle the combined set of use cases for those previously separate designs.
Volume snapshotter plug-in are used by Velero to take snapshots of persistent volume contents.
Volume snapshotter plugin are used by Velero to take snapshots of persistent volume contents.
Depending on the underlying storage system, those snapshots may be available to use immediately,
they may be uploaded to stable storage internally by the plug-in or they may need to be uploaded after
they may be uploaded to stable storage internally by the plugin or they may need to be uploaded after
the snapshot has been taken. We would like for Velero to continue on to the next part of the backup as quickly
as possible but we would also like the backup to not be marked as complete until it is a usable backup. We'd also
eventually like to bring the control of upload under the control of Velero and allow the user to make decisions
@ -36,7 +36,7 @@ backup and restore *could* make use of this framework if their actions are refac
Backup/RestoreItemActions.
- Data Movers
- Data movers are asynchronous processes executed inside backup/restore item actions that applies to a specific kubernetes resources. A common use case for data mover is to backup/restore PVCs whose data we want to move to some form of backup storage outside of using velero kopia/restic implementations.
- Data movers are asynchronous processes executed inside backup/restore item actions that applies to a specific Kubernetes resources. A common use case for data mover is to backup/restore PVCs whose data we want to move to some form of backup storage outside of using velero kopia/restic implementations.
- Workflow
- User takes velero backup of PVC A
- BIA plugin applies to PVCs with compatible storage driver
@ -91,7 +91,7 @@ ID).
### Internal configuration and management
In this model, movement of the snapshot to stable storage is under the control of the snapshot
plug-in. Decisions about where and when the snapshot gets moved to stable storage are not
plugin. Decisions about where and when the snapshot gets moved to stable storage are not
directly controlled by Velero. This is the model for the current VolumeSnapshot plugins.
### Velero controlled management
@ -120,7 +120,7 @@ will remain in the "WaitingForPluginOperations" phase until all BIA/RIA operatio
(for example, for a volume snapshotter, until all data has been successfully moved to persistent
storage). The backup/restore will not fail once it reaches this phase, although an error return
from a plugin could cause a backup or restore to move to "PartiallyFailed". If the backup is
deleted (cancelled), the plug-ins will attempt to delete the snapshots and stop the data movement -
deleted (cancelled), the plugins will attempt to delete the snapshots and stop the data movement -
this may not be possible with all storage systems.
In addition, for backups (but not restores), there will also be two additional phases, "Finalizing"
@ -145,7 +145,7 @@ terminates
When work on the backup/restore begins, it moves to the "InProgress" phase. It remains in the
"InProgress" phase until all pre/post execution hooks have been executed, all snapshots have been
taken and the Kubernetes metadata and backup/restore info is safely written to the object store
plug-in.
plugin.
In the current implementation, Restic backups will move data during the "InProgress" phase. In the
future, it may be possible to combine a snapshot with a Restic (or equivalent) backup which would
@ -263,7 +263,7 @@ InProgress backups will not have a `velero-backup.json` present in the object st
reconciliation, backups which do not have a `velero-backup.json` object in the object store will be
ignored.
## Plug-in API changes
## Plugin API changes
### OperationProgress struct
@ -289,15 +289,15 @@ Two new methods will be added to the VolumeSnapshotter interface:
Cancel(snapshotID string) (error)
Progress will report the current status of a snapshot upload. This should be callable at
any time after the snapshot has been taken. In the event a plug-in is restarted, if the operationID
any time after the snapshot has been taken. In the event a plugin is restarted, if the operationID
(snapshot ID) continues to be valid it should be possible to retrieve the progress.
`error` is set if there is an issue retrieving progress. If the snapshot is has encountered an
error during the upload, the error should be returned in OperationProgress and error should be nil.
### BackupItemAction and RestoreItemAction plug-in changes
### BackupItemAction and RestoreItemAction plugin changes
Currently CSI snapshots and the Velero Plug-in for vSphere are implemented as BackupItemAction
Currently CSI snapshots and the Velero Plugin for vSphere are implemented as BackupItemAction
plugins. While the majority of BackupItemAction plugins do not take snapshots or upload data, this
functionality is useful for any longstanding plugin operation managed by an external
process/controller so we will modify BackupItemAction and RestoreItemAction to optionally return an
@ -333,7 +333,7 @@ will be modified:
// initiate asynchronous actions, and a second slice of ResourceIdentifiers specifying related items
// which should be backed up after all asynchronous operations have completed. This last field will be
// ignored if operationID is empty, and should not be filled in unless the resource must be updated in the
// backup after async operations complete (i.e. some of the item's kubernetes metadata will be updated
// backup after async operations complete (i.e. some of the item's Kubernetes metadata will be updated
// during the asynch operation which will be required during restore)
Execute(item runtime.Unstructured, backup *api.Backup) (runtime.Unstructured, []velero.ResourceIdentifier, string, []velero.ResourceIdentifier, error)
@ -464,10 +464,10 @@ snapshot to stable storage. CSI snapshots expose the _readyToUse_ state that, i
indicates that the snapshot has been transferred to durable storage and is ready to be used. The
CSI BackupItemAction.Progress method will poll that field and when completed, return completion.
## vSphere plug-in
## vSphere plugin
The vSphere Plug-in for Velero uploads snapshots to S3 in the background. This is also a
BackupItemAction plug-in, it will check the status of the Upload records for the snapshot and return
The vSphere Plugin for Velero uploads snapshots to S3 in the background. This is also a
BackupItemAction plugin, it will check the status of the Upload records for the snapshot and return
progress.
## Backup workflow changes
@ -553,14 +553,14 @@ RestoreItemAction new plugin APIs
New backup phases
New restore phases
Defer uploading `velero-backup.json`
AWS EBS plug-in Progress implementation
AWS EBS plugin Progress implementation
Operation monitoring
Implementation of `<backup-name>-itemoperations.json.gz` file
Implementation of `<restore-name>-itemoperations.json.gz` file
Restart logic
Change in reconciliation logic to ignore backups/restores that have not completed
CSI plug-in BackupItemAction Progress implementation
vSphere plug-in BackupItemAction Progress implementation (vSphere plug-in team)
CSI plugin BackupItemAction Progress implementation
vSphere plugin BackupItemAction Progress implementation (vSphere plugin team)
# Open Questions

View File

@ -3,7 +3,7 @@
As of today Velero supports filtering of resources based on single label selector per backup. It is desired that Velero
support backing up of resources based on multiple labels (OR logic).
**Note:** This solution is required because kubernetes label selectors only allow AND logic of labels.
**Note:** This solution is required because Kubernetes label selectors only allow AND logic of labels.
## Background
Currently, Velero's Backup/Restore API has a spec field `LabelSelector` which helps in filtering of resources based on

View File

@ -393,7 +393,7 @@ Deletion of `VolumePluginBackup` CR can be delegated to plugin. Plugin can perfo
### 'core' Velero client/server required changes
- Creation of the VolumePluginBackup/VolumePluginRestore CRDs at installation time
- Persistence of VolumePluginBackup CRs towards the end of the back up operation
- Persistence of VolumePluginBackup CRs towards the end of the backup operation
- As part of backup synchronization, VolumePluginBackup CRs related to the backup will be synced.
- Deletion of VolumePluginBackup when volumeshapshotter's DeleteSnapshot is called
- Deletion of VolumePluginRestore as part of handling deletion of Restore CR

View File

@ -102,7 +102,7 @@ The code will consolidate the input parameters and execution context of the `vel
https://github.com/vmware-tanzu/crash-diagnostics/blob/v0.3.4/exec/executor.go#L17
## Alternatives Considered
The collection could be done via the kubernetes client-go API, but such integration is not necessarily trivial to implement, therefore, `crashd` is preferred approach
The collection could be done via the Kubernetes client-go API, but such integration is not necessarily trivial to implement, therefore, `crashd` is preferred approach
## Security Considerations
- The starlark script will be embedded into the velero binary, and the byte slice will be passed to the `exec.Execute` func directly, so theres little risk that the script will be modified before being executed.

View File

@ -160,10 +160,10 @@ while the cloud credential will always be used for the VolumeSnapshotter.
## Velero Plugin for vSphere compatibility
The vSphere plugin is implemented as a BackupItemAction and shares the credentials of the AWS plug-in for S3 access.
The vSphere plugin is implemented as a BackupItemAction and shares the credentials of the AWS plugin for S3 access.
The backup storage location is passed in _Backup.Spec.StorageLocation_. Currently the plugin retrieves the S3 bucket and
server from the BSL and creates a BackupRespositoryClaim with that and the credentials retrieved from the cloud credential.
The plug-in will need to be modified to retrieve the credentials field from the BSL and use that credential in the
The plugin will need to be modified to retrieve the credentials field from the BSL and use that credential in the
BackupRepositoryClaim.
## Backwards compatibility
@ -185,7 +185,7 @@ In order to support parallelism, Velero will need to be able to use multiple cre
ObjectStore. Currently backups are single threaded and a single BSL will be used throughout the entire backup. The only
existing points of parallelism are when a user downloads logs for a backup or the BackupStorageLocationReconciler
reconciles while a backup or restore is running. In the current code, `download_request_controller.go` and
`backup_storage_location_controller.go` create a new plug-in manager and hence another ObjectStore plugin in
`backup_storage_location_controller.go` create a new plugin manager and hence another ObjectStore plugin in
parallel with the ObjectStore plugin servicing a backup or restore (if one is running).
## Alternatives Considered

View File

@ -3,7 +3,7 @@
## Glossary & Abbreviation
**BR**: Backup & Restore
**Backup Storage**: The storage that meets BR requirements, for example, scalable, durable, cost-effective, etc., therefore, Backup Storage is usually implemented as Object storage or File System storage, it may be on-premise or in cloud. Backup Storage is not BR specific necessarily, so it usually doesnt provide most of the BR related features. On the other hand, storage vendors may provide BR specific storages that include some BR features like deduplication, compression, encryption, etc. For a standalone BR solution (i.e. Velero), the Backup Storage is not part of the solution, it is provided by users, so the BR solution should not assume the BR related features are always available from the Backup Storage.
**Backup Storage**: The storage that meets BR requirements, for example, scalable, durable, cost-effective, etc., therefore, Backup Storage is usually implemented as Object storage or File System storage, it may be on-premises or in cloud. Backup Storage is not BR specific necessarily, so it usually doesnt provide most of the BR related features. On the other hand, storage vendors may provide BR specific storages that include some BR features like deduplication, compression, encryption, etc. For a standalone BR solution (i.e. Velero), the Backup Storage is not part of the solution, it is provided by users, so the BR solution should not assume the BR related features are always available from the Backup Storage.
**Backup Repository**: Backup repository is layered between BR data movers and Backup Storage to provide BR related features. Backup Repository is a part of BR solution, so generally, BR solution by default leverages the Backup Repository to provide the features because Backup Repository is always available; when Backup Storage provides duplicated features, and the latter is more beneficial (i.e., performance is better), BR solution should have the ability to opt to use the Backup Storages implementation.
**Data Mover**: The BR module to read/write data from/to workloads, the aim is to eliminate the differences of workloads.
**TCO**: Total Cost of Ownership. This is a general criteria for products/solutions, but also means a lot for BR solutions. For example, this means what kind of backup storage (and its cost) it requires, the retention policy of backup copies, the ways to remove backup data redundancy, etc.

View File

@ -1,8 +1,8 @@
# Upload Progress Monitoring
Volume snapshotter plug-in are used by Velero to take snapshots of persistent volume contents.
Volume snapshotter plugin are used by Velero to take snapshots of persistent volume contents.
Depending on the underlying storage system, those snapshots may be available to use immediately,
they may be uploaded to stable storage internally by the plug-in or they may need to be uploaded after
they may be uploaded to stable storage internally by the plugin or they may need to be uploaded after
the snapshot has been taken. We would like for Velero to continue on to the next part of the backup as quickly
as possible but we would also like the backup to not be marked as complete until it is a usable backup. We'd also
eventually like to bring the control of upload under the control of Velero and allow the user to make decisions
@ -23,7 +23,7 @@ Restic - Does not go through the volume snapshot path. Restic backups will bloc
- Enable monitoring of operations that continue after snapshotting operations have completed
- Keep non-usable backups (upload/persistence has not finished) from appearing as completed
- Minimize change to volume snapshot and BackupItemAction plug-ins
- Minimize change to volume snapshot and BackupItemAction plugins
## Non-goals
- Unification of BackupItemActions and VolumeSnapshotters
@ -32,7 +32,7 @@ Restic - Does not go through the volume snapshot path. Restic backups will bloc
### Internal configuration and management
In this model, movement of the snapshot to stable storage is under the control of the snapshot
plug-in. Decisions about where and when the snapshot gets moved to stable storage are not
plugin. Decisions about where and when the snapshot gets moved to stable storage are not
directly controlled by Velero. This is the model for the current VolumeSnapshot plugins.
### Velero controlled management
@ -56,7 +56,7 @@ slow the progress of the system without adding any actual benefit to the user.
A new backup phase, "Uploading" will be introduced. When a backup has entered this phase, Velero
is free to start another backup. The backup will remain in the "Uploading" phase until all data
has been successfully moved to persistent storage. The backup will not fail once it reaches
this phase, it will continuously retry moving the data. If the backup is deleted (cancelled), the plug-ins will
this phase, it will continuously retry moving the data. If the backup is deleted (cancelled), the plugins will
attempt to delete the snapshots and stop the data movement - this may not be possible with all
storage systems.
@ -74,7 +74,7 @@ If the backup request is incorrectly formed, it goes to the "FailedValidation" p
### InProgress
When work on the backup begins, it moves to the "InProgress" phase. It remains in the "InProgress"
phase until all pre/post execution hooks have been executed, all snapshots have been taken and the
Kubernetes metadata and backup info is safely written to the object store plug-in.
Kubernetes metadata and backup info is safely written to the object store plugin.
In the current implementation, Restic backups will move data during the "InProgress" phase.
In the future, it may be possible to combine a snapshot with a Restic (or equivalent) backup which
@ -146,7 +146,7 @@ Completed, Failed or PartialFailure
InProgress backups will not have a `velero-backup.json` present in the object store. During reconciliation, backups which
do not have a `velero-backup.json` object in the object store will be ignored.
## Plug-in API changes
## Plugin API changes
### UploadProgress struct
@ -166,23 +166,23 @@ do not have a `velero-backup.json` object in the object store will be ignored.
### VolumeSnapshotter changes
A new method will be added to the VolumeSnapshotter interface (details depending on plug-in versioning spec)
A new method will be added to the VolumeSnapshotter interface (details depending on plugin versioning spec)
UploadProgress(snapshotID string) (UploadProgress, error)
UploadProgress will report the current status of a snapshot upload. This should be callable at any time after the snapshot
has been taken. In the event a plug-in is restarted, if the snapshotID continues to be valid it should be possible to
has been taken. In the event a plugin is restarted, if the snapshotID continues to be valid it should be possible to
retrieve the progress.
`error` is set if there is an issue retrieving progress. If the snapshot is has encountered an error during the upload,
the error should be return in UploadProgress and error should be nil.
### SnapshotItemAction plug-in
### SnapshotItemAction plugin
Currently CSI snapshots and the Velero Plug-in for vSphere are implemented as BackupItemAction plugins. The majority of
Currently CSI snapshots and the Velero Plugin for vSphere are implemented as BackupItemAction plugins. The majority of
BackupItemAction plugins do not take snapshots or upload data so rather than modify BackupItemAction we introduce a new
plug-ins, SnapshotItemAction. SnapshotItemAction will be used in place of BackupItemAction for
the CSI snapshots and the Velero Plug-in for vSphere and will return a snapshot ID in addition to the item itself.
plugins, SnapshotItemAction. SnapshotItemAction will be used in place of BackupItemAction for
the CSI snapshots and the Velero Plugin for vSphere and will return a snapshot ID in addition to the item itself.
The SnapshotItemAction plugin identifier as well as the Item and Snapshot ID will be stored in the
`<backup-name>-itemsnapshots.json.gz`. When checking for progress, this info will be used to select the appropriate
@ -248,9 +248,9 @@ stable storage. CSI snapshots expose the _readyToUse_ state that, in the case o
has been transferred to durable storage and is ready to be used. The CSI BackupItemProgress.Progress method will
poll that field and when completed, return completion.
## vSphere plug-in
## vSphere plugin
The vSphere Plug-in for Velero uploads snapshots to S3 in the background. This is also a BackupItemAction plug-in,
The vSphere Plugin for Velero uploads snapshots to S3 in the background. This is also a BackupItemAction plugin,
it will check the status of the Upload records for the snapshot and return progress.
## Backup workflow changes
@ -281,13 +281,13 @@ VolumeSnapshotter new plugin APIs
BackupItemProgress new plugin interface
New backup phases
Defer uploading `velero-backup.json`
AWS EBS plug-in UploadProgress implementation
AWS EBS plugin UploadProgress implementation
Upload monitoring
Implementation of `<backup-name>-itemsnapshots.json.gz` file
Restart logic
Change in reconciliation logic to ignore backups that have not completed
CSI plug-in BackupItemProgress implementation
vSphere plug-in BackupItemProgress implementation (vSphere plug-in team)
CSI plugin BackupItemProgress implementation
vSphere plugin BackupItemProgress implementation (vSphere plugin team)
# Future Fragile/Durable snapshot tracking
Futures are here for reference, they may change radically when actually implemented.
@ -296,11 +296,11 @@ Some storage systems have the ability to provide different levels of protection
and "Durable". Currently, Velero expects snapshots to be Durable (they should be able to survive the destruction of the
cluster and the storage it is using). In the future we would like the ability to take advantage of snapshots that are
Fragile. For example, vSphere snapshots are Fragile (they reside in the same datastore as the virtual disk). The Velero
Plug-in for vSphere uses a vSphere local/fragile snapshot to get a consistent snapshot, then uploads the data to S3 to
Plugin for vSphere uses a vSphere local/fragile snapshot to get a consistent snapshot, then uploads the data to S3 to
make it Durable. In the current design, upload progress will not be complete until the snapshot is ready to use and
Durable. It is possible, however, to restore data from a vSphere snapshot before it has been made Durable, and this is a
capability we'd like to expose in the future. Other storage systems implement this functionality as well. We will be moving
the control of the data movement from the vSphere plug-in into Velero.
the control of the data movement from the vSphere plugin into Velero.
Some storage system, such as EBS, are only capable of creating Durable snapshots. There is no usable intermediate Fragile stage.

View File

@ -201,12 +201,12 @@ linters-settings:
- shadow
disable-all: false
depguard:
list-type: blacklist
list-type: blacklist # Velero.io word list : ignore
include-go-root: false
packages:
- github.com/sirupsen/logrus
packages-with-error-message:
# specify an error message to output when a blacklisted package is used
# specify an error message to output when a denylisted package is used
- github.com/sirupsen/logrus: "logging is allowed only by logutils.Log"
lll:
# max line length, lines longer will be reported. Default is 120.

View File

@ -49,7 +49,7 @@ type DownloadTarget struct {
// Kind is the type of file to download.
Kind DownloadTargetKind `json:"kind"`
// Name is the name of the kubernetes resource with which the file is associated.
// Name is the name of the Kubernetes resource with which the file is associated.
Name string `json:"name"`
}

View File

@ -109,7 +109,7 @@ type RestoreSpec struct {
// +optional
Hooks RestoreHooks `json:"hooks,omitempty"`
// ExistingResourcePolicy specifies the restore behavior for the kubernetes resource to be restored
// ExistingResourcePolicy specifies the restore behavior for the Kubernetes resource to be restored
// +optional
// +nullable
ExistingResourcePolicy PolicyType `json:"existingResourcePolicy,omitempty"`

View File

@ -77,7 +77,7 @@ func (f FakeV1beta1ClusterRoleBindingLister) List() ([]ClusterRoleBinding, error
func TestServiceAccountActionAppliesTo(t *testing.T) {
// Instantiating the struct directly since using
// NewServiceAccountAction requires a full kubernetes clientset
// NewServiceAccountAction requires a full Kubernetes clientset
a := &ServiceAccountAction{}
actual, err := a.AppliesTo()

View File

@ -436,7 +436,7 @@ func (s *server) run() error {
}
// namespaceExists returns nil if namespace can be successfully
// gotten from the kubernetes API, or an error otherwise.
// gotten from the Kubernetes API, or an error otherwise.
func (s *server) namespaceExists(namespace string) error {
s.logger.WithField("namespace", namespace).Info("Checking existence of namespace.")

View File

@ -55,7 +55,7 @@ var kindToResource = map[string]string{
"VolumeSnapshotLocation": "volumesnapshotlocations",
}
// ResourceGroup represents a collection of kubernetes objects with a common ready condition
// ResourceGroup represents a collection of Kubernetes objects with a common ready condition
type ResourceGroup struct {
CRDResources []*unstructured.Unstructured
OtherResources []*unstructured.Unstructured
@ -164,7 +164,7 @@ func isAvailable(c appsv1.DeploymentCondition) bool {
return false
}
// DeploymentIsReady will poll the kubernetes API server to see if the velero deployment is ready to service user requests.
// DeploymentIsReady will poll the Kubernetes API server to see if the velero deployment is ready to service user requests.
func DeploymentIsReady(factory client.DynamicFactory, namespace string) (bool, error) {
gvk := schema.FromAPIVersionAndKind(appsv1.SchemeGroupVersion.String(), "Deployment")
apiResource := metav1.APIResource{
@ -206,7 +206,7 @@ func DeploymentIsReady(factory client.DynamicFactory, namespace string) (bool, e
return isReady, err
}
// DaemonSetIsReady will poll the kubernetes API server to ensure the node-agent daemonset is ready, i.e. that
// DaemonSetIsReady will poll the Kubernetes API server to ensure the node-agent daemonset is ready, i.e. that
// pods are scheduled and available on all of the desired nodes.
func DaemonSetIsReady(factory client.DynamicFactory, namespace string) (bool, error) {
gvk := schema.FromAPIVersionAndKind(appsv1.SchemeGroupVersion.String(), "DaemonSet")
@ -252,7 +252,7 @@ func DaemonSetIsReady(factory client.DynamicFactory, namespace string) (bool, er
return isReady, err
}
// GroupResources groups resources based on whether the resources are CustomResourceDefinitions or other types of kubernetes objects
// GroupResources groups resources based on whether the resources are CustomResourceDefinitions or other types of Kubernetes objects
// This is useful to wait for readiness before creating CRD objects
func GroupResources(resources *unstructured.UnstructuredList) *ResourceGroup {
rg := new(ResourceGroup)

View File

@ -27,7 +27,7 @@ import (
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
)
// GetValidName converts an input string to valid kubernetes label string in accordance to rfc1035 DNS Label spec
// GetValidName converts an input string to valid Kubernetes label string in accordance to rfc1035 DNS Label spec
// (https://github.com/kubernetes/community/blob/master/contributors/design-proposals/architecture/identifiers.md)
// Length of the label is adjusted basis the DNS1035LabelMaxLength (defined at k8s.io/apimachinery/pkg/util/validation)
// If length exceeds, we trim the label name to contain only max allowed characters

View File

@ -128,7 +128,7 @@ func NewServer() Server {
func (s *server) BindFlags(flags *pflag.FlagSet) Server {
flags.Var(s.logLevelFlag, "log-level", fmt.Sprintf("The level at which to log. Valid values are %s.", strings.Join(s.logLevelFlag.AllowedValues(), ", ")))
s.flagSet = flags
s.flagSet.ParseErrorsWhitelist.UnknownFlags = true
s.flagSet.ParseErrorsWhitelist.UnknownFlags = true // Velero.io word list : ignore
return s
}

View File

@ -47,7 +47,7 @@ type BackupItemAction interface {
// initiate (asynchronous) operations, and a second slice of ResourceIdentifiers specifying related items
// which should be backed up after all operations have completed. This last field will be
// ignored if operationID is empty, and should not be filled in unless the resource must be updated in the
// backup after operations complete (i.e. some of the item's kubernetes metadata will be updated
// backup after operations complete (i.e. some of the item's Kubernetes metadata will be updated
// during the operation which will be required during restore)
// Note that (async) operations are not supported for items being backed up during Finalize phases,
// so a plugin should not return an OperationID if the backup phase is "Finalizing"

View File

@ -99,7 +99,7 @@ The following use cases must be included as part of the Velero restore hooks MVP
**Title: **Allow restore hook to run on non-kubernetes databases
**Description: **As a user, I would like to run restore hook operations even on databases that are external to kubernetes (such as postgres, elastic, etc…).
**Description: **As a user, I would like to run restore hook operations even on databases that are external to Kubernetes (such as postgres, elastic, etc…).
**<span style="text-decoration:underline;">______________________________________________________________</span>**
@ -291,7 +291,7 @@ The following requirements are out of scope for the Velero Restore Hooks MVP:
1. Verifying the integrity of a backup, resource, or other artifact will not be included in the scope of this effort.
2. Verifying the integrity of a snapshot using kubernetes hash checks.
2. Verifying the integrity of a snapshot using Kubernetes hash checks.
3. Running concurrent restore operations (for the MVP) a secondary epic will be opened to align better with the concurrent workload operations currently set on the Velero roadmap for Q4 timeframe.
**Questions**

View File

@ -29,7 +29,7 @@ hero:
secondary_ctas:
cta1:
title: Introduction to Velero
url: /blog/Velero-is-an-Open-Source-Tool-to-Back-up-and-Migrate-Kubernetes-Clusters/
url: /blog/Velero-is-an-Open-Source-Tool-to-Back-up-and-Migrate-Kubernetes-Clusters/ # Velero.io word list : ignore
content: Learn about Velero and how to protect your Kubernetes resources and volumes.
cta2:
title: How Do You Use Velero?

View File

@ -103,7 +103,7 @@ spec:
# so that the exposed port numbers on the node will remain the same after restore. Optional
preserveNodePorts: true
# existingResourcePolicy specifies the restore behaviour
# for the kubernetes resource to be restored. Optional
# for the Kubernetes resource to be restored. Optional
existingResourcePolicy: none
# Actions to perform during or post restore. The only hooks currently supported are
# adding an init container to a pod before it can be restored and executing a command in a

View File

@ -52,9 +52,9 @@ region=ap-guangzhou,s3ForcePathStyle="true",s3Url=https://cos.ap-guangzhou.myqcl
Description of the parameters:
- `--provider`: Declares the type of plug-in provided by "aws".
- `--provider`: Declares the type of plugin provided by "aws".
- `--plugins`: Use the AWS S3 compatible API plug-in "velero-plugin-for-aws".
- `--plugins`: Use the AWS S3 compatible API plugin "velero-plugin-for-aws".
- `--bucket`: The bucket name created at Tencent Cloud COS.

View File

@ -122,7 +122,7 @@ velero install \
### Update resource requests and limits after install
After installation you can adjust the resource requests and limits in the Velero Deployment spec or node-agent DeamonSet spec, if you are using the File System Backup.
After installation you can adjust the resource requests and limits in the Velero Deployment spec or node-agent DaemonSet spec, if you are using the File System Backup.
**Velero pod**
@ -135,7 +135,7 @@ kubectl patch deployment velero -n velero --patch \
**node-agent pod**
Update the `spec.template.spec.containers.resources.limits` and `spec.template.spec.containers.resources.requests` values in the node-agent DeamonSet spec.
Update the `spec.template.spec.containers.resources.limits` and `spec.template.spec.containers.resources.requests` values in the node-agent DaemonSet spec.
```bash
kubectl patch daemonset node-agent -n velero --patch \

View File

@ -125,7 +125,7 @@ To mount the correct hostpath to pods volumes, run the node-agent pod in `privil
If node-agent is not running in a privileged mode, it will not be able to access pods volumes within the mounted
hostpath directory because of the default enforced SELinux mode configured in the host system level. You can
[create a custom SCC](https://docs.openshift.com/container-platform/3.11/admin_guide/manage_scc.html) to relax the
security in your cluster so that node-agent pods are allowed to use the hostPath volume plug-in without granting
security in your cluster so that node-agent pods are allowed to use the hostPath volume plugin without granting
them access to the `privileged` SCC.
By default a userland openshift namespace will not schedule pods on all nodes in the cluster.

View File

@ -142,7 +142,7 @@ Compression is either disabled or not unavailable for both uploader.
| Kopia | 4c4g |1m35s | 75% |248 MB |10 GB |
| Restic | 4c4g |3m17s | 171% |126 MB |10 GB |
#### conclusion:
- This case involves a relatively large backup size, there is no significant time reduction by increasing resources from 1c2g to 4c4g for Kopia uploader, but for Restic upoader when increasing CPU from 1 core to 4, backup time-consuming was shortened by one-third, which means in this scenario should allocate more CPU resources for Restic uploader.
- This case involves a relatively large backup size, there is no significant time reduction by increasing resources from 1c2g to 4c4g for Kopia uploader, but for Restic uploader when increasing CPU from 1 core to 4, backup time-consuming was shortened by one-third, which means in this scenario should allocate more CPU resources for Restic uploader.
- For the large backup size case, Restic uploader's repository size comes to normal
### Case 4: 900 files, 1 directory, 1.000GB per file total 900.000GB content

View File

@ -50,7 +50,7 @@ You will need to change this setting on the server to make it work.
## Skipping TLS verification
**Note:** The `--insecure-skip-tls-verify` flag is insecure and susceptible to man-in-the-middle attacks and meant to help your testing and developing scenarios in an on-premise environment. Using this flag in production is not recommended.
**Note:** The `--insecure-skip-tls-verify` flag is insecure and susceptible to man-in-the-middle attacks and meant to help your testing and developing scenarios in an on-premises environment. Using this flag in production is not recommended.
Velero provides a way for you to skip TLS verification on the object store when using the [AWS provider plugin](https://github.com/vmware-tanzu/velero-plugin-for-aws) or [File System Backup](file-system-backup.md) by passing the `--insecure-skip-tls-verify` flag with the following Velero commands,

View File

@ -261,17 +261,18 @@ nginx 1/1 Running 0 13s 10.200.0.4 worker0
A list of Velero-specific terms and words to be used consistently across the site.
{{< table caption="Velero.io word list" >}}
|Trem|Usage|
|Term|Usage|
|--- |--- |
|Kubernetes|Kubernetes should always be capitalized.|
|Docker|Docker should always be capitalized.|
|Velero|Velero should always be capitalized.|
|VMware|VMware should always be correctly capitalized.|
|On-premises|On-premises or on-prem rather than on-premise or other variations.|
|Backup|Backup rather than back up, back-up or other variations.|
|Plugin|Plugin rather than plug-in or other variations.|
|Allowlist|Use allowlist instead of whitelist.|
|Denylist|Use denylist instead of blacklist.|
|On-premises|On-premises or on-prem rather than on-premise or other variations.| <!-- Velero.io word list : ignore -->
|Backup|Backup for noun or adjective, rather than back-up, back up or other variations.| <!-- Velero.io word list : ignore -->
|Back up|Back up for verb, rather than other variations.|
|Plugin|Plugin rather than plug-in or other variations.| <!-- Velero.io word list : ignore -->
|Allowlist|Use allowlist instead of whitelist.| <!-- Velero.io word list : ignore -->
|Denylist|Use denylist instead of blacklist.| <!-- Velero.io word list : ignore -->
{{< /table >}}
## Markdown elements

View File

@ -44,7 +44,7 @@ Before upgrading, check the [Velero compatibility matrix](https://github.com/vmw
velero install --crds-only --dry-run -o yaml | kubectl apply -f -
```
**NOTE:** Since velero v1.10.0 only v1 CRD will be supported during installation, therefore, the v1.10.0 will only work on kubernetes version >= v1.16
**NOTE:** Since velero v1.10.0 only v1 CRD will be supported during installation, therefore, the v1.10.0 will only work on Kubernetes version >= v1.16
3. Update the container image and objects fields used by the Velero deployment and, optionally, the restic daemon set:

View File

@ -3,7 +3,7 @@ title: "Velero Install CLI"
layout: docs
---
This document serves as a guide to using the `velero install` CLI command to install `velero` server components into your kubernetes cluster.
This document serves as a guide to using the `velero install` CLI command to install `velero` server components into your Kubernetes cluster.
_NOTE_: `velero install` will, by default, use the CLI's version information to determine the version of the server components to deploy. This behavior may be overridden by using the `--image` flag. Refer to [Building Server Component Container Images][1].

View File

@ -103,7 +103,7 @@ spec:
# so that the exposed port numbers on the node will remain the same after restore. Optional
preserveNodePorts: true
# existingResourcePolicy specifies the restore behaviour
# for the kubernetes resource to be restored. Optional
# for the Kubernetes resource to be restored. Optional
existingResourcePolicy: none
# Actions to perform during or post restore. The only hooks currently supported are
# adding an init container to a pod before it can be restored and executing a command in a

View File

@ -52,9 +52,9 @@ region=ap-guangzhou,s3ForcePathStyle="true",s3Url=https://cos.ap-guangzhou.myqcl
Description of the parameters:
- `--provider`: Declares the type of plug-in provided by "aws".
- `--provider`: Declares the type of plugin provided by "aws".
- `--plugins`: Use the AWS S3 compatible API plug-in "velero-plugin-for-aws".
- `--plugins`: Use the AWS S3 compatible API plugin "velero-plugin-for-aws".
- `--bucket`: The bucket name created at Tencent Cloud COS.

View File

@ -122,7 +122,7 @@ velero install \
### Update resource requests and limits after install
After installation you can adjust the resource requests and limits in the Velero Deployment spec or node-agent DeamonSet spec, if you are using the File System Backup.
After installation you can adjust the resource requests and limits in the Velero Deployment spec or node-agent DaemonSet spec, if you are using the File System Backup.
**Velero pod**
@ -135,7 +135,7 @@ kubectl patch deployment velero -n velero --patch \
**node-agent pod**
Update the `spec.template.spec.containers.resources.limits` and `spec.template.spec.containers.resources.requests` values in the node-agent DeamonSet spec.
Update the `spec.template.spec.containers.resources.limits` and `spec.template.spec.containers.resources.requests` values in the node-agent DaemonSet spec.
```bash
kubectl patch daemonset node-agent -n velero --patch \

View File

@ -125,7 +125,7 @@ To mount the correct hostpath to pods volumes, run the node-agent pod in `privil
If node-agent is not running in a privileged mode, it will not be able to access pods volumes within the mounted
hostpath directory because of the default enforced SELinux mode configured in the host system level. You can
[create a custom SCC](https://docs.openshift.com/container-platform/3.11/admin_guide/manage_scc.html) to relax the
security in your cluster so that node-agent pods are allowed to use the hostPath volume plug-in without granting
security in your cluster so that node-agent pods are allowed to use the hostPath volume plugin without granting
them access to the `privileged` SCC.
By default a userland openshift namespace will not schedule pods on all nodes in the cluster.

View File

@ -142,7 +142,7 @@ Compression is either disabled or not unavailable for both uploader.
| Kopia | 4c4g |1m35s | 75% |248 MB |10 GB |
| Restic | 4c4g |3m17s | 171% |126 MB |10 GB |
#### conclusion:
- This case involves a relatively large backup size, there is no significant time reduction by increasing resources from 1c2g to 4c4g for Kopia uploader, but for Restic upoader when increasing CPU from 1 core to 4, backup time-consuming was shortened by one-third, which means in this scenario should allocate more CPU resources for Restic uploader.
- This case involves a relatively large backup size, there is no significant time reduction by increasing resources from 1c2g to 4c4g for Kopia uploader, but for Restic uploader when increasing CPU from 1 core to 4, backup time-consuming was shortened by one-third, which means in this scenario should allocate more CPU resources for Restic uploader.
- For the large backup size case, Restic uploader's repository size comes to normal
### Case 4: 900 files, 1 directory, 1.000GB per file total 900.000GB content

View File

@ -50,7 +50,7 @@ You will need to change this setting on the server to make it work.
## Skipping TLS verification
**Note:** The `--insecure-skip-tls-verify` flag is insecure and susceptible to man-in-the-middle attacks and meant to help your testing and developing scenarios in an on-premise environment. Using this flag in production is not recommended.
**Note:** The `--insecure-skip-tls-verify` flag is insecure and susceptible to man-in-the-middle attacks and meant to help your testing and developing scenarios in an on-premises environment. Using this flag in production is not recommended.
Velero provides a way for you to skip TLS verification on the object store when using the [AWS provider plugin](https://github.com/vmware-tanzu/velero-plugin-for-aws) or [File System Backup](file-system-backup.md) by passing the `--insecure-skip-tls-verify` flag with the following Velero commands,

View File

@ -261,17 +261,17 @@ nginx 1/1 Running 0 13s 10.200.0.4 worker0
A list of Velero-specific terms and words to be used consistently across the site.
{{< table caption="Velero.io word list" >}}
|Trem|Usage|
|Term|Usage|
|--- |--- |
|Kubernetes|Kubernetes should always be capitalized.|
|Docker|Docker should always be capitalized.|
|Velero|Velero should always be capitalized.|
|VMware|VMware should always be correctly capitalized.|
|On-premises|On-premises or on-prem rather than on-premise or other variations.|
|Backup|Backup rather than back up, back-up or other variations.|
|Plugin|Plugin rather than plug-in or other variations.|
|Allowlist|Use allowlist instead of whitelist.|
|Denylist|Use denylist instead of blacklist.|
|On-premises|On-premises or on-prem rather than on-premise or other variations.| <!-- Velero.io word list : ignore -->
|Backup|Backup rather than back up, back-up or other variations.| <!-- Velero.io word list : ignore -->
|Plugin|Plugin rather than plug-in or other variations.| <!-- Velero.io word list : ignore -->
|Allowlist|Use allowlist instead of whitelist.| <!-- Velero.io word list : ignore -->
|Denylist|Use denylist instead of blacklist.| <!-- Velero.io word list : ignore -->
{{< /table >}}
## Markdown elements

View File

@ -44,7 +44,7 @@ Before upgrading, check the [Velero compatibility matrix](https://github.com/vmw
velero install --crds-only --dry-run -o yaml | kubectl apply -f -
```
**NOTE:** Since velero v1.10.0 only v1 CRD will be supported during installation, therefore, the v1.10.0 will only work on kubernetes version >= v1.16
**NOTE:** Since velero v1.10.0 only v1 CRD will be supported during installation, therefore, the v1.10.0 will only work on Kubernetes version >= v1.16
3. Update the container image and objects fields used by the Velero deployment and, optionally, the restic daemon set:

View File

@ -3,7 +3,7 @@ title: "Velero Install CLI"
layout: docs
---
This document serves as a guide to using the `velero install` CLI command to install `velero` server components into your kubernetes cluster.
This document serves as a guide to using the `velero install` CLI command to install `velero` server components into your Kubernetes cluster.
_NOTE_: `velero install` will, by default, use the CLI's version information to determine the version of the server components to deploy. This behavior may be overridden by using the `--image` flag. Refer to [Building Server Component Container Images][1].

View File

@ -1,6 +1,6 @@
---
title: Velero is an Open Source Tool to Back up and Migrate Kubernetes Clusters
slug: Velero-is-an-Open-Source-Tool-to-Back-up-and-Migrate-Kubernetes-Clusters
slug: Velero-is-an-Open-Source-Tool-to-Back-up-and-Migrate-Kubernetes-Clusters # Velero.io word list : ignore
# image: https://placehold.it/200x200
excerpt: Velero is an open source tool to safely back up, recover, and migrate Kubernetes clusters and persistent volumes. It works both on premises and in a public cloud.
author_name: Velero Team
@ -31,4 +31,4 @@ Since Velero was initially released in August 2017, weve had nearly 70 contri
We are continuing to work towards Velero 1.0 and would love your help working on the items in our roadmap. If youre interested in contributing, we have a number of GitHub issues labeled as [Good First Issue](https://github.com/vmware-tanzu/velero/issues?q=is%3Aopen+is%3Aissue+label%3A%22Good+first+issue%22) and [Help Wanted](https://github.com/vmware-tanzu/velero/issues?q=is%3Aopen+is%3Aissue+label%3A%22Help+wanted%22), including items related to Prometheus metrics, the CLI UX, improved documentation, and more. We are more than happy to work with new and existing contributors alike.
_Previously posted at: <https://blogs.vmware.com/cloudnative/2019/02/28/velero-v0-11-delivers-an-open-source-tool-to-back-up-and-migrate-kubernetes-clusters/>_
_Previously posted at: <https://blogs.vmware.com/cloudnative/2019/02/28/velero-v0-11-delivers-an-open-source-tool-to-back-up-and-migrate-kubernetes-clusters/>_ <!-- Velero.io word list : ignore -->

View File

@ -28,7 +28,7 @@ A big focus of our work this cycle was continuing to improve support for restic.
Along with our bug fixes, weve provided an easier way to move restic backups between storage providers. Different providers often have different StorageClasses, requiring user intervention to make restores successfully complete.
To make cross-provider moves simpler, weve introduced a StorageClass remapping plug-in. It allows you to automatically translate one StorageClass on PersistentVolumeClaims and PersistentVolumes to another. You can read more about it in our [documentation](https://velero.io/docs/v1.1.0/restore-reference/#changing-pv-pvc-storage-classes).
To make cross-provider moves simpler, weve introduced a StorageClass remapping plugin. It allows you to automatically translate one StorageClass on PersistentVolumeClaims and PersistentVolumes to another. You can read more about it in our [documentation](https://velero.io/docs/v1.1.0/restore-reference/#changing-pv-pvc-storage-classes).
## Quality-of-Life Improvements
@ -42,7 +42,7 @@ In order to help you better understand what resources have been backed up, we
In the same vein, weve added the ability to put custom tags on cloud-provider snapshots. This approach should provide a better way to keep track of the resources being created in your cloud account. To add a label to a snapshot at backup time, use the `--labels` argument in the `velero backup create` command.
Our final change for increasing visibility into your Velero installation is the `velero plugin get` command. This command will report all the plug-ins within the Velero deployment..
Our final change for increasing visibility into your Velero installation is the `velero plugin get` command. This command will report all the plugins within the Velero deployment..
Velero has previously used a restore-only flag on the server to control whether a cluster could write backups to object storage. With Velero 1.1, weve now moved the restore-only behavior into read-only BackupStorageLocations. This move means that the Velero server can use a BackupStorageLocation as a source to restore from, but not for backups, while still retaining the ability to back up to other configured locations. In the future, the `--restore-only` flag will be removed in favor of configuring read-only BackupStorageLocations.

View File

@ -274,7 +274,7 @@ No resources found.
$ kubectl get pods -n cassandra
No resources found.
$ kubestl get pvc -n cassandra
$ kubectl get pvc -n cassandra
No resources found.
```

View File

@ -86,7 +86,7 @@ When deploying Velero on-premises, users have often asked for supporting a custo
Some users may have noticed that when restoring a backup containing CustomResourceDefinitions, the corresponding custom resources were not always restored. However, when running another restore, everything ran successfully.
With Velero v1.4.0, weve revisited our Kuberneters API server group discovery code and allowed the restore code to detect CustomResourceDefinition groups as they get restored, rather simply relying on time-based refreshes.
With Velero v1.4.0, weve revisited our Kubernetes API server group discovery code and allowed the restore code to detect CustomResourceDefinition groups as they get restored, rather simply relying on time-based refreshes.
## Refactored CRD backup code

View File

@ -44,7 +44,7 @@ In this release, we introduce a new plugin type, DeleteItemAction plugin, that o
The [velero-plugin-for-csi](https://github.com/vmware-tanzu/velero-plugin-for-csi) introduced a new pattern for backing up and restoring volume snapshots using BackupItemAction and RestoreItemAction plugins. To allow the community to adopt a similar pattern for their custom resources, Velero had to provide an extension point to clean up both in-cluster and external resources, created by their BackupItemAction plugins. This is now possible with DeleteItemAction plugins. The interface for this new plugin type is similar to that of BackupItemAction and RestoreItemAction plugins. You can read more about the design for this plugin in the [design documents of our repository on github](https://github.com/vmware-tanzu/velero/blob/main/design/delete-item-action.md).
### Code Mordernization
### Code Modernization
Velero has been helping its users with disaster recovery for their Kubernetes clusters since its first release in August 2017. Over the past three years, there have been major improvements in the ecosystem, including new frameworks that make it easier to develop solutions for Kubernetes. This release marks the first steps in our journey to modernize the Velero codebase and take advantage of newer frameworks as we begin the adoption of [kubebuilder](https://book.kubebuilder.io/), the most popular framework to build custom Kubernetes APIs and their respective controllers. As this effort continues, we would like to invite more folks to be a part of our growing contributor base.

View File

@ -115,7 +115,7 @@ run: ginkgo
@[ "${BSL_BUCKET}" ] && echo "Using bucket ${BSL_BUCKET} to store backups from E2E tests" || \
(echo "Bucket to store the backups from E2E tests is required, please re-run with BSL_BUCKET=<BucketName>"; exit 1 )
@[ "${CLOUD_PROVIDER}" ] && echo "Using cloud provider ${CLOUD_PROVIDER}" || \
(echo "Cloud provider for target cloud/plug-in provider is required, please rerun with CLOUD_PROVIDER=<aws,azure,kind,vsphere>"; exit 1)
(echo "Cloud provider for target cloud/plugin provider is required, please rerun with CLOUD_PROVIDER=<aws,azure,kind,vsphere>"; exit 1)
@$(GINKGO) -v $(FOCUS_STR) $(SKIP_STR) . -- -velerocli=$(VELERO_CLI) \
-velero-image=$(VELERO_IMAGE) \
-plugins=$(PLUGINS) \

View File

@ -9,7 +9,7 @@ If you previously ran unit tests using the `go test ./...` command or any of its
## Prerequisites
Running the E2E tests expects:
1. A running kubernetes cluster:
1. A running Kubernetes cluster:
1. With DNS and CNI installed.
1. Compatible with Velero- running Kubernetes v1.10 or later.
1. With necessary storage drivers/provisioners installed.
@ -31,7 +31,7 @@ These configuration parameters are expected as values to the following command l
1. `-credentials-file`: File containing credentials for backup and volume provider. Required.
1. `-bucket`: Name of the object storage bucket where backups from e2e tests should be stored. Required.
1. `-cloud-provider`: The cloud the tests will be run in. Appropriate plug-ins will be installed except for kind which requires
1. `-cloud-provider`: The cloud the tests will be run in. Appropriate plugins will be installed except for kind which requires
the object-store-provider to be specified.
1. `-object-store-provider`: Object store provider to use. Required when kind is the cloud provider.
1. `-velerocli`: Path to the velero application to use. Optional, by default uses `velero` in the `$PATH`

View File

@ -91,7 +91,7 @@ func BackupRestoreTest(useVolumeSnapshots bool) {
}
backupName = "backup-" + UUIDgen.String()
restoreName = "restore-" + UUIDgen.String()
// Even though we are using Velero's CloudProvider plugin for object storage, the kubernetes cluster is running on
// Even though we are using Velero's CloudProvider plugin for object storage, the Kubernetes cluster is running on
// KinD. So use the kind installation for Kibishii.
// if set ProvideSnapshotsVolumeParam to false here, make sure set it true in other tests of this case

View File

@ -132,7 +132,7 @@ func runBackupDeletionTests(client TestClient, veleroCfg VeleroConfig, backupNam
})
if providerName == "vsphere" && useVolumeSnapshots {
// Wait for uploads started by the Velero Plug-in for vSphere to complete
// Wait for uploads started by the Velero Plugin for vSphere to complete
// TODO - remove after upload progress monitoring is implemented
fmt.Println("Waiting for vSphere uploads to complete")
if err := WaitForVSphereUploadCompletion(oneHourTimeout, time.Hour, deletionTest, 2); err != nil {

View File

@ -179,7 +179,7 @@ func GetKubeconfigContext() error {
func TestE2e(t *testing.T) {
// Skip running E2E tests when running only "short" tests because:
// 1. E2E tests are long running tests involving installation of Velero and performing backup and restore operations.
// 2. E2E tests require a kubernetes cluster to install and run velero which further requires more configuration. See above referenced command line flags.
// 2. E2E tests require a Kubernetes cluster to install and run velero which further requires more configuration. See above referenced command line flags.
if testing.Short() {
t.Skip("Skipping E2E tests")
}

View File

@ -33,7 +33,7 @@ import (
"github.com/vmware-tanzu/velero/test/e2e/util/common"
)
// ensureClusterExists returns whether or not a kubernetes cluster exists for tests to be run on.
// ensureClusterExists returns whether or not a Kubernetes cluster exists for tests to be run on.
func EnsureClusterExists(ctx context.Context) error {
return exec.CommandContext(ctx, "kubectl", "cluster-info").Run()
}

View File

@ -108,7 +108,7 @@ func RunKibishiiTests(veleroCfg VeleroConfig, backupName, restoreName, backupLoc
fmt.Printf("VeleroBackupNamespace done %s\n", time.Now().Format("2006-01-02 15:04:05"))
if useVolumeSnapshots {
if providerName == "vsphere" {
// Wait for uploads started by the Velero Plug-in for vSphere to complete
// Wait for uploads started by the Velero Plugin for vSphere to complete
// TODO - remove after upload progress monitoring is implemented
fmt.Println("Waiting for vSphere uploads to complete")
if err := WaitForVSphereUploadCompletion(oneHourTimeout, time.Hour, kibishiiNamespace, 2); err != nil {
@ -134,7 +134,7 @@ func RunKibishiiTests(veleroCfg VeleroConfig, backupName, restoreName, backupLoc
return errors.New(fmt.Sprintf("PVB count %d should be %d in namespace %s", len(pvbs), pvCount, kibishiiNamespace))
}
if providerName == "vsphere" {
// Wait for uploads started by the Velero Plug-in for vSphere to complete
// Wait for uploads started by the Velero Plugin for vSphere to complete
// TODO - remove after upload progress monitoring is implemented
// TODO[High] - uncomment code block below when vSphere plugin PR #500 is included in release version.

View File

@ -80,7 +80,7 @@ func VeleroInstall(ctx context.Context, veleroCfg *VeleroConfig, isStandbyCluste
}
} else {
if veleroCfg.ObjectStoreProvider == "" {
return errors.New("No object store provider specified - must be specified when using kind as the cloud provider") // Gotta have an object store provider
return errors.New("No object store provider specified - must be specified when using kind as the cloud provider") // Must have an object store provider
}
}

View File

@ -677,7 +677,7 @@ func VeleroAddPluginsForProvider(ctx context.Context, veleroCLI string, veleroNa
return nil
}
// WaitForVSphereUploadCompletion waits for uploads started by the Velero Plug-in for vSphere to complete
// WaitForVSphereUploadCompletion waits for uploads started by the Velero Plugin for vSphere to complete
// TODO - remove after upload progress monitoring is implemented
func WaitForVSphereUploadCompletion(ctx context.Context, timeout time.Duration, namespace string, expectCount int) error {
err := wait.PollImmediate(time.Second*5, timeout, func() (bool, error) {