Merge branch 'main' into 5048-CSI-snapshot-timeout-configurable

pull/5104/head
Xun Jiang/Bruce Jiang 2022-08-08 17:18:44 +08:00 committed by GitHub
commit a5f4f8f9fc
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
137 changed files with 4459 additions and 3293 deletions

View File

@ -16,6 +16,7 @@ reviewers:
- blackpiglet
- qiuming-best
- shubham-pampattiwar
- Lyndon-Li
tech-writer:
- a-mccarthy

View File

@ -1,5 +1,9 @@
name: Pull Request Changelog Check
on: [pull_request]
# by setting `on: [pull_request]`, that means action will be trigger when PR is opened, synchronize, reopened.
# Add labeled and unlabeled events too.
on:
pull_request:
types: [opened, synchronize, reopened, labeled, unlabeled]
jobs:
build:

View File

@ -13,6 +13,7 @@
| Xun Jiang | [blackpiglet](https://github.com/blackpiglet) | [VMware](https://www.github.com/vmware/) |
| Ming Qiu | [qiuming-best](https://github.com/qiuming-best) | [VMware](https://www.github.com/vmware/) |
| Shubham Pampattiwar | [shubham-pampattiwar](https://github.com/shubham-pampattiwar) | [OpenShift](https://github.com/openshift)
| Yonghui Li | [Lyndon-Li](https://github.com/Lyndon-Li) | [VMware](https://www.github.com/vmware/) |
## Emeritus Maintainers
* Adnan Abdulhussein ([prydonius](https://github.com/prydonius))

View File

@ -163,6 +163,7 @@ shell: build-dirs build-env
@# under $GOPATH).
@docker run \
-e GOFLAGS \
-e GOPROXY \
-i $(TTY) \
--rm \
-u $$(id -u):$$(id -g) \

View File

@ -7,7 +7,7 @@ k8s_yaml([
'config/crd/v1/bases/velero.io_downloadrequests.yaml',
'config/crd/v1/bases/velero.io_podvolumebackups.yaml',
'config/crd/v1/bases/velero.io_podvolumerestores.yaml',
'config/crd/v1/bases/velero.io_resticrepositories.yaml',
'config/crd/v1/bases/velero.io_backuprepositories.yaml',
'config/crd/v1/bases/velero.io_restores.yaml',
'config/crd/v1/bases/velero.io_schedules.yaml',
'config/crd/v1/bases/velero.io_serverstatusrequests.yaml',

View File

@ -0,0 +1 @@
Unified Repository Design

View File

@ -0,0 +1 @@
Fix typo in doc, in https://velero.io/docs/main/restore-reference/ "Restore order" section, "Mamespace" should be "Namespace".

View File

@ -0,0 +1 @@
Move 'velero.io/exclude-from-backup' label string to const

View File

@ -0,0 +1 @@
Fix bsl validation bug: the BSL is validated continually and doesn't respect the validation period configured

View File

@ -0,0 +1 @@
Dump stack trace when the plugin server handles panic

View File

@ -0,0 +1 @@
Modify BackupStoreGetter to avoid BSL spec changes

View File

@ -0,0 +1 @@
Let "make shell xxx" respect GOPROXY

View File

@ -0,0 +1 @@
Update the CRD for kopia integration

View File

@ -0,0 +1,4 @@
Kopia Integration: Add the Unified Repository Interface definition.
Kopia Integration: Add the changes for Unified Repository storage config.
Related Issues; #5076, #5080

View File

@ -0,0 +1 @@
This commit splits the pkg/restic package into several packages to support Kopia integration works

View File

@ -0,0 +1 @@
Delay CA file deletion in PVB controller.

View File

@ -0,0 +1 @@
VolumeSnapshotLocation refactor with kubebuilder.

View File

@ -0,0 +1 @@
Add labeled and unlabeled events for PR changelog check action.

View File

@ -0,0 +1 @@
Skip registering "crd-remap-version" plugin when feature flag "EnableAPIGroupVersions" is set

View File

@ -0,0 +1 @@
Add changes for Kopia Integration: Unified Repository Provider - Repo Password

View File

@ -0,0 +1 @@
Fix restic backups to multiple backup storage locations bug

View File

@ -0,0 +1 @@
Reduce CRD size.

View File

@ -0,0 +1,2 @@
Treat namespaces with exclude label as excludedNamespaces
Related issue: #2413

View File

@ -0,0 +1 @@
Add annotation "pv.kubernetes.io/migrated-to" for CSI checking.

View File

@ -6,20 +6,23 @@ metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.7.0
creationTimestamp: null
name: resticrepositories.velero.io
name: backuprepositories.velero.io
spec:
group: velero.io
names:
kind: ResticRepository
listKind: ResticRepositoryList
plural: resticrepositories
singular: resticrepository
kind: BackupRepository
listKind: BackupRepositoryList
plural: backuprepositories
singular: backuprepository
scope: Namespaced
versions:
- additionalPrinterColumns:
- jsonPath: .metadata.creationTimestamp
name: Age
type: date
- jsonPath: .spec.repositoryType
name: Repository Type
type: string
name: v1
schema:
openAPIV3Schema:
@ -37,7 +40,7 @@ spec:
metadata:
type: object
spec:
description: ResticRepositorySpec is the specification for a ResticRepository.
description: BackupRepositorySpec is the specification for a BackupRepository.
properties:
backupStorageLocation:
description: BackupStorageLocation is the name of the BackupStorageLocation
@ -47,12 +50,19 @@ spec:
description: MaintenanceFrequency is how often maintenance should
be run.
type: string
repositoryType:
description: RepositoryType indicates the type of the backend repository
enum:
- kopia
- restic
- ""
type: string
resticIdentifier:
description: ResticIdentifier is the full restic-compatible string
for identifying this repository.
type: string
volumeNamespace:
description: VolumeNamespace is the namespace this restic repository
description: VolumeNamespace is the namespace this backup repository
contains pod volume backups for.
type: string
required:
@ -62,7 +72,7 @@ spec:
- volumeNamespace
type: object
status:
description: ResticRepositoryStatus is the current status of a ResticRepository.
description: BackupRepositoryStatus is the current status of a BackupRepository.
properties:
lastMaintenanceTime:
description: LastMaintenanceTime is the last time maintenance was
@ -72,10 +82,10 @@ spec:
type: string
message:
description: Message is a message about the current status of the
ResticRepository.
BackupRepository.
type: string
phase:
description: Phase is the current state of the ResticRepository.
description: Phase is the current state of the BackupRepository.
enum:
- New
- Ready

View File

@ -37,9 +37,13 @@ spec:
jsonPath: .spec.volume
name: Volume
type: string
- description: Restic repository identifier for this backup
- description: Backup repository identifier for this backup
jsonPath: .spec.repoIdentifier
name: Restic Repo
name: Repository ID
type: string
- description: The type of the uploader to handle data transfer
jsonPath: .spec.uploaderType
name: Uploader Type
type: string
- description: Name of the Backup Storage Location where this backup should be
stored
@ -70,7 +74,7 @@ spec:
properties:
backupStorageLocation:
description: BackupStorageLocation is the name of the backup storage
location where the restic repository is stored.
location where the backup repository is stored.
type: string
node:
description: Node is the name of the node that the Pod is running
@ -114,7 +118,7 @@ spec:
type: string
type: object
repoIdentifier:
description: RepoIdentifier is the restic repository identifier.
description: RepoIdentifier is the backup repository identifier.
type: string
tags:
additionalProperties:
@ -122,6 +126,14 @@ spec:
description: Tags are a map of key-value pairs that should be applied
to the volume backup as tags.
type: object
uploaderType:
description: UploaderType is the type of the uploader to handle the
data transfer.
enum:
- kopia
- restic
- ""
type: string
volume:
description: Volume is the name of the volume within the Pod to be
backed up.

View File

@ -25,6 +25,10 @@ spec:
jsonPath: .spec.pod.name
name: Pod
type: string
- description: The type of the uploader to handle data transfer
jsonPath: .spec.uploaderType
name: Uploader Type
type: string
- description: Name of the volume to be restored
jsonPath: .spec.volume
name: Volume
@ -67,7 +71,7 @@ spec:
properties:
backupStorageLocation:
description: BackupStorageLocation is the name of the backup storage
location where the restic repository is stored.
location where the backup repository is stored.
type: string
pod:
description: Pod is a reference to the pod containing the volume to
@ -107,11 +111,19 @@ spec:
type: string
type: object
repoIdentifier:
description: RepoIdentifier is the restic repository identifier.
description: RepoIdentifier is the backup repository identifier.
type: string
snapshotID:
description: SnapshotID is the ID of the volume snapshot to be restored.
type: string
uploaderType:
description: UploaderType is the type of the uploader to handle the
data transfer.
enum:
- kopia
- restic
- ""
type: string
volume:
description: Volume is the name of the volume within the Pod to be
restored.

File diff suppressed because it is too large Load Diff

View File

@ -13,6 +13,8 @@ spec:
kind: VolumeSnapshotLocation
listKind: VolumeSnapshotLocationList
plural: volumesnapshotlocations
shortNames:
- vsl
singular: volumesnapshotlocation
scope: Namespaced
versions:

View File

@ -24,6 +24,26 @@ rules:
- pods
verbs:
- get
- apiGroups:
- velero.io
resources:
- backuprepositories
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- velero.io
resources:
- backuprepositories/status
verbs:
- get
- patch
- update
- apiGroups:
- velero.io
resources:
@ -131,26 +151,6 @@ rules:
- get
- patch
- update
- apiGroups:
- velero.io
resources:
- resticrepositories
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- velero.io
resources:
- resticrepositories/status
verbs:
- get
- patch
- update
- apiGroups:
- velero.io
resources:
@ -191,3 +191,15 @@ rules:
- get
- patch
- update
- apiGroups:
- velero.io
resources:
- volumesnapshotlocations
verbs:
- create
- delete
- get
- list
- patch
- update
- watch

Binary file not shown.

After

Width:  |  Height:  |  Size: 141 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 57 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 61 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 78 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 56 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 32 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 119 KiB

View File

@ -0,0 +1,469 @@
# Unified Repository & Kopia Integration Design
## Glossary & Abbreviation
**BR**: Backup & Restore
**Backup Storage**: The storage that meets BR requirements, for example, scalable, durable, cost-effective, etc., therefore, Backup Storage is usually implemented as Object storage or File System storage, it may be on-premise or in cloud. Backup Storage is not BR specific necessarily, so it usually doesnt provide most of the BR related features. On the other hand, storage vendors may provide BR specific storages that include some BR features like deduplication, compression, encryption, etc. For a standalone BR solution (i.e. Velero), the Backup Storage is not part of the solution, it is provided by users, so the BR solution should not assume the BR related features are always available from the Backup Storage.
**Backup Repository**: Backup repository is layered between BR data movers and Backup Storage to provide BR related features. Backup Repository is a part of BR solution, so generally, BR solution by default leverages the Backup Repository to provide the features because Backup Repository is always available; when Backup Storage provides duplicated features, and the latter is more beneficial (i.e., performance is better), BR solution should have the ability to opt to use the Backup Storages implementation.
**Data Mover**: The BR module to read/write data from/to workloads, the aim is to eliminate the differences of workloads.
**TCO**: Total Cost of Ownership. This is a general criteria for products/solutions, but also means a lot for BR solutions. For example, this means what kind of backup storage (and its cost) it requires, the retention policy of backup copies, the ways to remove backup data redundancy, etc.
**RTO**: Recovery Time Objective. This is the duration of time that users business can recover after a disaster.
## Background
As a Kubernetes BR solution, Velero is pursuing the capability to back up data from the volatile and limited production environment into the durable, heterogeneous and scalable backup storage. This relies on two parts:
- Move data from various production workloads. The data mover has this role. Depending on the type of workload, Velero needs different data movers. For example, file system data mover, block data mover, and data movers for specific applications. At present, Velero supports moving file system data from PVs through Restic, which plays the role of the File System Data Mover.
- Persist data in backup storage. For a BR solution, this is the responsibility of the backup repository. Specifically, the backup repository is required to:
- Efficiently save data so as to reduce TCO. For example, deduplicate and compress the data before saving it
- Securely save data so as to meet security criteria. For example, encrypt the data on rest, make the data immutable after backup, and detect/protect from ransomware
- Efficiently retrieve data during restore so as to meet RTO. For example, restore a small unit of data or data associated with a small span of time
- Effectively manage data from all kinds of data movers in all kinds of backup storage. This means 2 things: first, apparently, backup storages are different from each other; second, some data movers may save quite different data from others, for example, some data movers save a portion of the logical object for each backup and need to visit and manage the portions as an entire logic object, aka. incremental backup. The backup repository needs to provide unified functionalities to eliminate the differences from the both ends
- Provide scalabilities so that users could assign resources (CPU, memory, network, etc.) in a flexible way to the backup repository since backup repository contains resource consuming modules
At present, Velero provides some of these capabilities by leveraging Restic (e.g., deduplication and encryption on rest). This means that in addition to being a data mover for file system level data, Restic also plays the role of a backup repository, albeit one that is incomplete and limited:
- Restic is an inseparable unit made up of a file system data mover and a repository. This means that the repository capabilities are only available for Restic file system backup. We cannot provide the same capabilities to other data movers using Restic.
- The backup storage Velero supports through our Restic backup path depends on the storage Restic supports. As a result, if there is a requirement to introduce backup storage that Restic doesnt support, we have no way to make it.
- There is no way to enhance or extend the repository capabilities, because of the same reason Restic is an inseparable unit, we cannot insert one or more customized layers to make the enhancements and extensions.
Moreover, as reflected by user-reported issues, Restic seems to have many performance issues on both the file system data mover side and the repository side.
On the other hand, based on a previous analysis and testing, we found that Kopia has better performance, with more features and more suitable to fulfill Veleros repository targets (Kopias architecture divides modules more clearly according to their responsibilities, every module plays a complete role with clear interfaces. This makes it easier to take individual modules to Velero without losing critical functionalities).
## Goals
- Define a Unified Repository Interface that various data movers could interact with. This is for below purposes:
- All kinds of data movers acquire the same set of backup repository capabilities very easily
- Provide the possibility to plugin in different backup repositories/backup storages without affecting the upper layers
- Provide the possibility to plugin in modules between data mover and backup repository, so as to extend the repository capabilities
- Provide the possibility to scale the backup repository without affecting the upper layers
- Use Kopia repository to implement the Unified Repository
- Use Kopia uploader as the file system data mover for Pod Volume Backup
- Have Kopia uploader calling the Unified Repository Interface and save/retrieve data to/from the Unified Repository
- Make Kopia uploader generic enough to move any file system data so that other data movement cases could use it
- Use the existing logic or add new logic to manage the unified repository and Kopia uploader
- Preserve the legacy Restic path, this is for the consideration of backward compatibility
## Non-Goals
- The Unified Repository supports all kinds of data movers to save logic objects into it. How these logic objects are organized for a specific data mover (for example, how a volumes block data is organized and represented by a unified repository object) should be included in the related data mover design.
- At present, Velero saves Kubernetes resources, backup metedata, debug logs separately. Eventually, we want to save them in the Unified Repository. How to organize these data into the Unified Repository should be included in a separate design.
- For PodVolume BR, this design focuses on the data path only, other parts beyond the data read/write and data persistency are irrelevant and kept unchanged.
- Kopia uploader is made generic enough to move any file system data. How it is integrated in other cases, is irrelevant to this design. Take CSI snapshot backup for example, how the snapshot is taken and exposed to Kopia uploader should be included in the related data mover design.
- The adanced modes of the Unified Repository, for example, backup repository/storage plugin, backup repository extension, etc. are not included in this design. We will have separate designs to cover them whenever necessary.
## Architecture of Unified Repository
Below shows the primary modules and their responsibilities:
- Kopia uploader, as been well isolated, could move all file system data either from the production PV (as Veleros PodVolume BR does), or from any kind of snapshot (i.e., CSI snapshot).
- Unified Repository Interface, data movers call the Unified Repository Interface to write/read data to/from the Unified Repository.
- Kopia repository layers, CAOS and CABS, work as the backup repository and expose the Kopia Repository interface.
- A Kopia Repository Library works as an adapter between Unified Repository Interface and Kopia Repository interface. Specifically, it implements Unified Repository Interface and calls Kopia Repository interface.
- At present, there is only one kind of backup repository -- Kopia Repository. If a new backup repository/storage is required, we need to create a new Library as an adapter to the Unified Repository Interface
- At present, the Kopia Repository works as a single piece in the same process of the caller, in future, we may run its CABS into a dedicated process or node.
- At present, we dont have a requirement to extend the backup repository, if needed, an extra module could be added as an upper layer into the Unified Repository without changing the data movers.
Neither Kopia uploader nor Kopia Repository is invoked through CLI, instead, they are invoked through code interfaces, because we need to do lots of customizations.
The Unified Repository takes two kinds of data:
- Unified Repository Object: This is the user's logical data, for example, files/directories, blocks of a volume, data of a database, etc.
- Unified Repository Manifest: This could include all other data to maintain the object data, for example, snapshot information, etc.
For Unified Repository Object/Manifest, a brief guidance to data movers are as below:
- Data movers treat the simple unit of data they recognize as an Object. For example, file system data movers treat a file or a directory as an Object; block data movers treat a volume as an Object. However, it is unnecessary that every data mover has a unique data format in the Unified Repository, to the opposite, it is recommended that data movers could share the data formats unless there is any reason not to, in this way, the data generated by one data mover could be used by other data movers.
- Data movers don't need to care about the differences between full and incremental backups regarding the data organization. Data movers always have full views of their objects, if an object is partially written, they use the object writer's Seek function to skip the unchanged parts
- Unified Repository may divide the data movers' logical Object into sub-objects or slices, or append internal metadata, but they are transparent to data movers
- Every Object has an unified identifier, in order to retrieve the Object later, data movers need to save the identifiers into the snapshot information. The snapshot information is saved as a Manifest.
- Manifests could hold any kind of small piece data in a K-V manner. Inside the backup repository, these kinds of data may be processed differently from Object data, but it is transparent to data movers.
- A Manifest also has an unified identifier, the Unified Repository provides the capabilities to list all the Manifests or a specified Manifest by its identifier, or a specified Manifest by its name, or a set of Manifests by their labels.
![A Unified Repository Architecture](unified-repo.png)
Velero by default uses the Unified Repository for all kinds of data movement, it is also able to integrate with other data movement paths from any party, for any purpose. Details are concluded as below:
- Built-in Data Path: this is the default data movement path, which uses Velero built-in data movers to backup/restore workloads, the data is written to/read from the Unified Repository.
- Data Mover Replacement: Any party could write its own data movers and plug them into Velero. Meanwhile, these plugin data movers could also write/read data to/from Veleros Unified Repository so that these data movers could expose the same capabilities that provided by the Unified Repository. In order to do this, the data mover providers need to call the Unified Repository Interface from inside their plugin data movers.
- Data Path Replacement: Some vendors may already have their own data movers and backup repository and they want to replace Veleros entire data path (including data movers and backup repository). In this case, the providers only need to implement their plugin data movers, all the things downwards are a black box to Velero and managed by providers themselves (including API call, data transport, installation, life cycle management, etc.). Therefore, this case is out of the scope of Unified Repository.
![A Scope](scope.png)
# Detailed Design
## The Unified Repository Interface
Below are the definitions of the Unified Repository Interface. All the functions are synchronization functions.
```
///BackupRepoService is used to initialize, open or maintain a backup repository
type BackupRepoService interface {
///Create a backup repository or connect to an existing backup repository
///repoOption: option to the backup repository and the underlying backup storage
///createNew: indicates whether to create a new or connect to an existing backup repository
///result: the backup repository specific output that could be used to open the backup repository later
Init(ctx context.Context, repoOption RepoOptions, createNew bool) error
///Open an backup repository that has been created/connected
///repoOption: options to open the backup repository and the underlying storage
Open(ctx context.Context, repoOption RepoOptions) (BackupRepo, error)
///Periodically called to maintain the backup repository to eliminate redundant data and improve performance
///repoOption: options to maintain the backup repository
Maintain(ctx context.Context, repoOption RepoOptions) error
}
///BackupRepo provides the access to the backup repository
type BackupRepo interface {
///Open an existing object for read
///id: the object's unified identifier
OpenObject(ctx context.Context, id ID) (ObjectReader, error)
///Get a manifest data
GetManifest(ctx context.Context, id ID, mani *RepoManifest) error
///Get one or more manifest data that match the given labels
FindManifests(ctx context.Context, filter ManifestFilter) ([]*ManifestEntryMetadata, error)
///Create a new object and return the object's writer interface
///return: A unified identifier of the object on success
NewObjectWriter(ctx context.Context, opt ObjectWriteOptions) ObjectWriter
///Save a manifest object
PutManifest(ctx context.Context, mani RepoManifest) (ID, error)
///Delete a manifest object
DeleteManifest(ctx context.Context, id ID) error
///Flush all the backup repository data
Flush(ctx context.Context) error
///Get the local time of the backup repository. It may be different from the time of the caller
Time() time.Time
///Close the backup repository
Close(ctx context.Context) error
}
type ObjectReader interface {
io.ReadCloser
io.Seeker
///Length returns the logical size of the object
Length() int64
}
type ObjectWriter interface {
io.WriteCloser
///For some cases, i.e. block incremental, the object is not written sequentially
io.Seeker
// Periodically called to preserve the state of data written to the repo so far
// Return a unified identifier that represent the current state
// An empty ID could be returned on success if the backup repository doesn't support this
Checkpoint() (ID, error)
///Wait for the completion of the object write
///Result returns the object's unified identifier after the write completes
Result() (ID, error)
}
```
Some data structure & constants used by the interfaces:
```
type RepoOptions struct {
///A repository specific string to identify a backup storage, i.e., "s3", "filesystem"
StorageType string
///Backup repository password, if any
RepoPassword string
///A custom path to save the repository's configuration, if any
ConfigFilePath string
///Other repository specific options
GeneralOptions map[string]string
///Storage specific options
StorageOptions map[string]string
}
///ObjectWriteOptions defines the options when creating an object for write
type ObjectWriteOptions struct {
FullPath string ///Full logical path of the object
Description string ///A description of the object, could be empty
Prefix ID ///A prefix of the name used to save the object
AccessMode int ///OBJECT_DATA_ACCESS_*
BackupMode int ///OBJECT_DATA_BACKUP_*
}
const (
///Below consts defines the access mode when creating an object for write
OBJECT_DATA_ACCESS_MODE_UNKNOWN int = 0
OBJECT_DATA_ACCESS_MODE_FILE int = 1
OBJECT_DATA_ACCESS_MODE_BLOCK int = 2
OBJECT_DATA_BACKUP_MODE_UNKNOWN int = 0
OBJECT_DATA_BACKUP_MODE_FULL int = 1
OBJECT_DATA_BACKUP_MODE_INC int = 2
)
///ManifestEntryMetadata is the metadata describing one manifest data
type ManifestEntryMetadata struct {
ID ID ///The ID of the manifest data
Length int32 ///The data size of the manifest data
Labels map[string]string ///Labels saved together with the manifest data
ModTime time.Time ///Modified time of the manifest data
}
type RepoManifest struct {
Payload interface{} ///The user data of manifest
Metadata *ManifestEntryMetadata ///The metadata data of manifest
}
type ManifestFilter struct {
Labels map[string]string
}
```
## Workflow
### Backup & Restore Workflow
We preserve the bone of the existing BR workflow, that is:
- Still use the Velero Server pod and VeleroNodeAgent daemonSet (originally called Restic daemonset) pods to hold the corresponding controllers and modules
- Still use the Backup/Restore CR and BackupRepository CR (originally called ResticRepository CR) to drive the BR workflow
The modules in gray color in below diagram are the existing modules and with no significant changes.
In the new design, we will have separate and independent modules/logics for backup repository and uploader (data mover), specifically:
- Repository Provider provides functionalities to manage the backup repository. For example, initialize a repository, connect to a repository, manage the snapshots in the repository, maintain a repository, etc.
- Uploader Provider provides functionalities to run a backup or restore.
The Repository Provider and Uploader Provider use options to choose the path --- legacy path vs. new path (Kopia uploader + Unified Repository). Specifically, for legacy path, Repository Provider will manage Restic Repository only, otherwise, it manages Unified Repository only; for legacy path, Uploader Provider calls Restic to do the BR, otherwise, it calls Kopia uploader to do the BR.
In order to manage Restic Repository, the Repository Provider calls Restic Repository Provider, the latter invokes the existing Restic CLIs.
In order to manage Unified Repository, the Repository Provider calls Unified Repository Provider, the latter calls the Unified Repository module through the udmrepo.BackupRepoService interface. It doesnt know how the Unified Repository is implemented necessarily.
In order to use Restic to do BR, the Uploader Provider calls Restic Uploader Provider, the latter invokes the existing Restic CLIs.
In order to use Kopia to do BR, the Uploader Provider calls Kopia Uploader Provider, the latter do the following things:
- Call Unified Repository through the udmrepo.BackupRepoService interface to open the unified repository for read/write. Again, it doesnt know how the Unified Repository is implemented necessarily. It gets a BackupRepos read/write handle after the call succeeds
- Wrap the BackupRepo handle into a Kopia Shim which implements Kopia Repository interface
- Call the Kopia Uploader. Kopia Uploader is a Kopia module without any change, so it only understands Kopia Repository interface
- Kopia Uploader starts to backup/restore the corresponding PVs file system data and write/read data to/from the provided Kopia Repository implementation, that is, Kopia Shim here
- When read/write calls go into Kopia Shim, it in turn calls the BackupRepo handle for read/write
- Finally, the read/write calls flow to Unified Repository module
The Unified Repository provides all-in-one functionalities of a Backup Repository and exposes the Unified Repository Interface. Inside, Kopia Library is an adapter for Kopia Repository to translate the Unified Repository Interface calls to Kopia Repository interface calls.
Both Kopia Shim and Kopia Library rely on Kopia Repository interface, so we need to have some Kopia version control. We may need to change Kopia Shim and Kopia Library when upgrading Kopia to a new version and the Kopia Repository interface has some changes in the new version.
![A BR Workflow](br-workflow.png)
The modules in blue color in below diagram represent the newly added modules/logics or reorganized logics.
The modules in yellow color in below diagram represent the called Kopia modules without changes.
### Delete Snapshot Workflow
The Delete Snapshot workflow follows the similar manner with BR workflow, that is, we preserve the upper-level workflows until the calls reach to BackupDeletionController, then:
- Leverage Repository Provider to switch between Restic implementation and Unified Repository implementation in the same mechanism as BR
- For Restic implementation, the Restic Repository Provider invokes the existing “Forget” Restic CLI
- For Unified Repository implementation, the Unified Repository Provider calls udmrepo.BackupRepos DeleteManifest to delete a snapshot
![A Snapshot Deletion Workflow](snapshot-deletion-workflow.png)
### Maintenance Workflow
Backup Repository/Backup Storage may need to periodically reorganize its data so that it could guarantee its QOS during the long-time service. Some Backup Repository/Backup Storage does this in background automatically, so the user doesnt need to interfere; some others need the caller to explicitly call their maintenance interface periodically. Restic and Kopia both go with the second way, that is, Velero needs to periodically call their maintenance interface.
Velero already has an existing workflow to call Restic maintenance (it is called “Prune” in Restic, so Velero uses the same word). The existing workflow is as follows:
- The Prune is triggered at the time of the backup
- When a BackupRepository CR (originally called ResticRepository CR) is created by PodVolumeBackup/Restore Controller, the BackupRepository controller checks if it reaches to the Prune Due Time, if so, it calls PruneRepo
- In the new design, the Repository Provider implements PruneRepo call, it uses the same way to switch between Restic Repository Provider and Unified Repository Provider, then:
- For Restic Repository, Restic Repository Provider invokes the existing “Prune” CLI of Restic
- For Unified Repository, Unified Repository Provider calls udmrepo.BackupRepoServices Maintain function
Kopia has two maintenance modes the full maintenance and quick maintenance. There are many differences between full and quick mode, but briefly speaking, quick mode only processes the hottest data (primarily, it is the metadata and index data), so quick maintenance is much faster than full maintenance. On the other hand, quick maintenance also scatters the burden of full maintenance so that the full maintenance could finish fastly and make less impact. We will also take this quick maintenance into Velero.
We will add a new Due Time to Velero, finally, we have two Prune Due Time:
- Normal Due Time: For Restic, this will invoke Restic Prune; for Unified Repository, this will invoke udmrepo.BackupRepoServices Maintain(full) call and finally call Kopias full maintenance
- Quick Due Time: For Restic, this does nothing; for Unified Repository, this will invoke udmrepo.BackupRepoServices Maintain(quick) call and finally call Kopias quick maintenance
We assign different values to Normal Due Time and Quick Due Time, as a result of which, the quick maintenance happens more frequently than full maintenance.
![A Maintenance Workflow](maintenance-workflow.png)
### Progress Update
Because Kopia Uploader is an unchanged Kopia module, we need to find a way to get its progress during the BR.
Kopia Uploader accepts a Progress interface to update rich information during the BR, so the Kopia Uploader Provider will implement a Kopias Progress interface and then pass it to Kopia Uploader during its initialization.
In this way, Velero will be able to get the progress as shown in the diagram below.
![A Progress Update](progress-update.png)
### Logs
In the current design, Velero is using two unchanged Kopia modules --- the Kopia Uploader and the Kopia Repository. Both will generate debug logs during their run. Velero will collect these logs in order to aid the debug.
Kopias Uploader and Repository both get the Logger information from the current GO Context, therefore, the Kopia Uploader Provider/Kopia Library could set the Logger interface into the current context and pass the context to Kopia Uploader/Kopia Repository.
Velero will set Logger interfaces separately for Kopia Uploader and Kopia Repository. In this way, the Unified Repository could serve other data movers without losing the debug log capability; and the Kopia Uploader could write to any repository without losing the debug log capability.
Kopias debug logs will be written to the same log file as Velero server or VeleroNodeAgent daemonset, so Velero doesnt need to upload/download these debug logs separately.
![A Debug Log for Uploader](debug-log-uploader.png)
![A Debug Log for Repository](debug-log-repository.png)
## Path Switch & Coexist
As mentioned above, There will be two paths. The related controllers need to identify the path during runtime and adjust its working mode.
According to the requirements, path changing is fulfilled at the backup/restore level. In order to let the controllers know the path, we need to add some option values. Specifically, there will be option/mode values for path selection in two places:
- Add the “uploader-type” option as a parameter of the Velero server. The parameters will be set by the installation. Currently the option has two values, either "restic" or "kopia" (in future, we may add other file system uploaders, then we will have more values).
- Add a "uploaderType" value in the PodVolume Backup/Restore CR and a "repositoryType" value in the BackupRepository CR. "uploaderType" currently has two values , either "restic" or "kopia"; "repositoryType" currently has two values, either "restic" or "kopia" (in future, the Unified Repository could opt among multiple backup repository/backup storage, so there may be more values. This is a good reason that repositoryType is a multivariate flag, however, in which way to opt among the backup repository/backup storage is not covered in this PR). If the values are missing in the CRs, it by default means "uploaderType=restic" and "repositoryType=restic", so the legacy CRs are handled correctly by Restic.
The corresponding controllers handle the CRs by checking the CRs' path value. Some examples are as below:
- The PodVolume BR controller checks the "uploaderType" value from PodVolume CRs and decide its working path
- The BackupRepository controller checks the "repositoryType" value from BackupRepository CRs and decide its working path
- The Backup controller that runs in Velero server checks its “uploader-type” parameter to decide the path for the Backup it is going to create and then create the PodVolume Backup CR and BackupRepository CR
- The Restore controller checks the Backup, from which it is going to restore, for the path and then create the PodVolume Restore CR and BackupRepository CR
As described above, the “uploader-type” parameter of the Velero server is only used to decide the path when creating a new Backup, for other cases, the path selection is driven by the related CRs. Therefore, we only need to add this parameter to the Velero server.
## Velero CR Name Changes
We will change below CRs' name to make them more generic:
- "ResticRepository" CR to "BackupRepository" CR
This means, we add a new CR type and deprecate the old one. As a result, if users upgrade from the old release, the old CRs will be orphaned, Velero will neither refer to it nor manage it, users need to delete these CRs manually.
As a side effect, when upgrading from an old release, even though the path is not changed, the BackupRepository gets created all the time, because Velero will not refer to the old CR's status. This seems to cause the repository to initialize more than once, however, it won't happen. In the BackupRepository controller, before initializing a repository, it always tries to connect to the repository first, if it is connectable, it won't do the initialization.
When backing up with the new release, Velero always creates BackupRepository CRs instead of ResticRepository CRs.
When restoring from an old backup, Velero always creates BackupRepository CRs instead of ResticRepository CRs.
When there are already backups or restores running during the upgrade, since after upgrade, the Velero server pods and VeleroNodeAgent daemonset pods are restarted, the existing backups/restores will fail immediately.
## Storage Configuration
The backup repository needs some parameters to connect to various backup storage. For example, for a S3 compatible storage, the parameters may include bucket name, region, endpoint, etc. Different backup storage have totally different parameters. BackupRepository CRs, PodVolume Backup CRs and PodVolume Restore CRs save these parameters in their spec, as a string called repoIdentififer. The format of the string is for S3 storage only, it meets Restic CLI's requirements but is not enough for other backup repository. On the other hand, the parameters that are used to generate the repoIdentififer all come from the BackupStorageLocation. The latter has a map structure that could take parameters from any storage kind.
Therefore, for the new path, Velero uses the information in the BackupStorageLocation directly. That is, whenever Velero needs to initialize/connect to the Unified Repository, it acquires the storage configuration from the corresponding BackupStorageLocation. Then no more elements will be added in BackupRepository CRs, PodVolume Backup CRs or PodVolume Restore CRs.
The legacy path will be kept as is. That is, Velero still sets/gets the repoIdentififer in BackupRepository CRs, PodVolume Backup CRs and PodVolume Restore CRs and then passes to Restic CLI.
## Installation
We will add a new flag "--pod-volume-backup-uploader" during installation. The flag has 3 meanings:
- It indicates PodVolume BR as the default method to protect PV data over other methods, i.e., durable snapshot. Therefore, the existing --use-restic option will be replaced
- It indicates the file system uploader to be used by PodVolume BR
- It implies the backup repository type manner, Restic if pod-volume-backup-uploader=restic, Unified Repository in all other cases
The flag has below two values:
**"Restic"**: it means Velero will use Restic to do the pod volume backup. Therefore, the Velero server deployment will be created as below:
```
spec:
containers:
- args:
- server
- --features=
- --uploader-type=restic
command:
- /velero
```
The BackupRepository CRs and PodVolume Backup/Restore CRs created in this case are as below:
```
spec:
backupStorageLocation: default
maintenanceFrequency: 168h0m0s
repositoryType: restic
volumeNamespace: nginx-example
```
```
spec:
backupStorageLocation: default
node: aks-agentpool-27359964-vmss000000
pod:
kind: Pod
name: nginx-stateful-0
namespace: nginx-example
uid: 86aaec56-2b21-4736-9964-621047717133
tags:
...
uploaderType: restic
volume: nginx-log
```
```
spec:
backupStorageLocation: default
pod:
kind: Pod
name: nginx-stateful-0
namespace: nginx-example
uid: e56d5872-3d94-4125-bfe8-8a222bf0fcf1
snapshotID: 1741e5f1
uploaderType: restic
volume: nginx-log
```
**"Kopia"**: it means Velero will use Kopia uploader to do the pod volume backup (so it will use Unified Repository as the backup target). Therefore, the Velero server deployment will be created as below:
```
spec:
containers:
- args:
- server
- --features=
- --uploader-type=kopia
command:
- /velero
```
The BackupRepository CRs created in this case are hard set with "kopia" at present, sice Kopia is the only option as a backup repository. The PodVolume Backup/Restore CRs are created with "kopia" as well:
```
spec:
backupStorageLocation: default
maintenanceFrequency: 168h0m0s
repositoryType: kopia
volumeNamespace: nginx-example
```
```
spec:
backupStorageLocation: default
node: aks-agentpool-27359964-vmss000000
pod:
kind: Pod
name: nginx-stateful-0
namespace: nginx-example
uid: 86aaec56-2b21-4736-9964-621047717133
tags:
...
uploaderType: kopia
volume: nginx-log
```
```
spec:
backupStorageLocation: default
pod:
kind: Pod
name: nginx-stateful-0
namespace: nginx-example
uid: e56d5872-3d94-4125-bfe8-8a222bf0fcf1
snapshotID: 1741e5f1
uploaderType: kopia
volume: nginx-log
```
We will add the flag for both CLI installation and Helm Chart Installation. Specifically:
- Helm Chart Installation: add the "--pod-volume-backup-uploader" flag into its value.yaml and then generate the deployments according to the value. Value.yaml is the user-provided configuration file, therefore, users could set this value at the time of installation. The changes in Value.yaml are as below:
```
command:
- /velero
args:
- server
{{- with .Values.configuration }}
{{- if .pod-volume-backup-uploader "restic" }}
- --legacy
{{- end }}
```
- CLI Installation: add the "--pod-volume-backup-uploader" flag into the installation command line, and then create the two deployments accordingly. Users could change the option at the time of installation. The CLI is as below:
```velero install --pod-volume-backup-uploader=restic```
```velero install --pod-volume-backup-uploader=kopia```
## Upgrade
For upgrade, we allow users to change the path by specifying "--pod-volume-backup-uploader" flag in the same way as the fresh installation. Therefore, the flag change should be applied to the Velero server after upgrade. Additionally, We need to add a label to Velero server to indicate the current path, so as to provide an easy for querying it.
Moreover, if users upgrade from the old release, we need to change the existing Restic Daemonset name to VeleroNodeAgent daemonSet. The name change should be applied after upgrade.
The recommended way for upgrade is to modify the related Velero resource directly through kubectl, the above changes will be applied in the same way. We need to modify the Velero doc for all these changes.
## CLI
Below Velero CLI or its output needs some changes:
- ```Velero backup describe```: the output should indicate the path
- ```Velero restore describe```: the output should indicate the path
- ```Velero restic repo get```: the name of this CLI should be changed to a generic one, for example, "Velero repo get"; the output of this CLI should print all the backup repository if Restic repository and Unified Repository exist at the same time
At present, we don't have a requirement for selecting the path during backup, so we don't change the ```Velero backup create``` CLI for now. If there is a requirement in future, we could simply add a flag similar to "--pod-volume-backup-uploader" to select the path.
## CR Example
Below sample files demonstrate complete CRs with all the changes mentioned above:
- BackupRepository CR: https://gist.github.com/Lyndon-Li/f38ad69dd8c4785c046cd7ed0ef2b6ed#file-backup-repository-sample-yaml
- PodVolumeBackup CR: https://gist.github.com/Lyndon-Li/f38ad69dd8c4785c046cd7ed0ef2b6ed#file-pvb-sample-yaml
- PodVolumeRestore CR: https://gist.github.com/Lyndon-Li/f38ad69dd8c4785c046cd7ed0ef2b6ed#file-pvr-sample-yaml
## User Perspective
This design aims to provide a flexible backup repository layer and a generic file system uploader, which are fundermental for PodVolume and other data movements. Although this will make Velero more capable, at present, we don't pursue to expose differentiated features end to end. Specifically:
- By default, Velero still uses Restic for PodVolume BR
- Even when changing to the new path, Velero still allows users to restore from the data backed up by Restic
- The capability of PodVolume BR under the new path is kept the same as it under Restic path and the same as the existing PodVolume BR
- The operational experiences are kept the same as much as possible, the known changes are listed below
Below user experiences are changed for this design:
- Installation CLI change: a new option is added to the installation CLI, see the Installation section for details
- CR change: One or more existing CRs have been renamed, see the Velero CR Changes section for details
- Velero CLI name and output change, see the CLI section for details
- Velero daemonset name change
- Wording Alignment: as the existing situation, many places are using the word of "Restic", for example, "default-volume-to-restic" option, most of them are not accurate anymore, we will change these words and give a detailed list of the changes

Binary file not shown.

After

Width:  |  Height:  |  Size: 38 KiB

View File

@ -1,3 +0,0 @@
[
{ "op": "replace", "path": "/spec/versions/0/schema/openAPIV3Schema/properties/spec/properties/hooks/properties/resources/items/properties/postHooks/items/properties/init/properties/initContainers/items/properties/ports/items/required", "value": [ "containerPort", "protocol"] }
]

View File

@ -1,3 +0,0 @@
[
{ "op": "replace", "path": "/spec/validation/openAPIV3Schema/properties/spec/properties/hooks/properties/resources/items/properties/postHooks/items/properties/init/properties/initContainers/items/properties/ports/items/required", "value": [ "containerPort", "protocol"] }
]

View File

@ -47,7 +47,7 @@ ${GOPATH}/src/k8s.io/code-generator/generate-groups.sh \
# Generate apiextensions.k8s.io/v1
# Generate manifests e.g. CRD, RBAC etc.
controller-gen \
crd:crdVersions=v1\
crd:crdVersions=v1 \
paths=./pkg/apis/velero/v1/... \
rbac:roleName=velero-perms \
paths=./pkg/controller/... \
@ -55,13 +55,4 @@ controller-gen \
object \
paths=./pkg/apis/velero/v1/...
# this is a super hacky workaround for https://github.com/kubernetes/kubernetes/issues/91395
# which a result of fixing the validation on CRD objects. The validation ensures the fields that are list map keys, are either marked
# as required or have default values to ensure merging of list map items work as expected.
# With "containerPort" and "protocol" being considered as x-kubernetes-list-map-keys in the container ports, and "protocol" was not
# a required field, the CRD would fail validation with errors similar to the one reported in https://github.com/kubernetes/kubernetes/issues/91395.
# once controller-gen (above) is able to generate CRDs with `protocol` as a required field, this hack can be removed.
kubectl patch -f config/crd/v1/bases/velero.io_restores.yaml -p "$(cat hack/restore-crd-patch-v1.json)" --type=json --local=true -o yaml > /tmp/velero.io_restores-yaml.patched
mv /tmp/velero.io_restores-yaml.patched config/crd/v1/bases/velero.io_restores.yaml
go generate ./config/crd/v1/crds

View File

@ -0,0 +1,24 @@
/*
Copyright the Velero contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package credentials
// CredentialGetter is a collection of interfaces for interacting with credentials
// that are stored in different targets
type CredentialGetter struct {
FromFile FileStore
FromSecret SecretStore
}

View File

@ -0,0 +1,49 @@
// Code generated by mockery v2.14.0. DO NOT EDIT.
package mocks
import (
mock "github.com/stretchr/testify/mock"
v1 "k8s.io/api/core/v1"
)
// FileStore is an autogenerated mock type for the FileStore type
type FileStore struct {
mock.Mock
}
// Path provides a mock function with given fields: selector
func (_m *FileStore) Path(selector *v1.SecretKeySelector) (string, error) {
ret := _m.Called(selector)
var r0 string
if rf, ok := ret.Get(0).(func(*v1.SecretKeySelector) string); ok {
r0 = rf(selector)
} else {
r0 = ret.Get(0).(string)
}
var r1 error
if rf, ok := ret.Get(1).(func(*v1.SecretKeySelector) error); ok {
r1 = rf(selector)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
type mockConstructorTestingTNewFileStore interface {
mock.TestingT
Cleanup(func())
}
// NewFileStore creates a new instance of FileStore. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
func NewFileStore(t mockConstructorTestingTNewFileStore) *FileStore {
mock := &FileStore{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}

View File

@ -0,0 +1,49 @@
// Code generated by mockery v2.14.0. DO NOT EDIT.
package mocks
import (
mock "github.com/stretchr/testify/mock"
v1 "k8s.io/api/core/v1"
)
// SecretStore is an autogenerated mock type for the SecretStore type
type SecretStore struct {
mock.Mock
}
// Get provides a mock function with given fields: selector
func (_m *SecretStore) Get(selector *v1.SecretKeySelector) (string, error) {
ret := _m.Called(selector)
var r0 string
if rf, ok := ret.Get(0).(func(*v1.SecretKeySelector) string); ok {
r0 = rf(selector)
} else {
r0 = ret.Get(0).(string)
}
var r1 error
if rf, ok := ret.Get(1).(func(*v1.SecretKeySelector) error); ok {
r1 = rf(selector)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
type mockConstructorTestingTNewSecretStore interface {
mock.TestingT
Cleanup(func())
}
// NewSecretStore creates a new instance of SecretStore. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
func NewSecretStore(t mockConstructorTestingTNewSecretStore) *SecretStore {
mock := &SecretStore{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}

View File

@ -0,0 +1,56 @@
/*
Copyright the Velero contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package credentials
import (
"github.com/pkg/errors"
corev1api "k8s.io/api/core/v1"
kbclient "sigs.k8s.io/controller-runtime/pkg/client"
"github.com/vmware-tanzu/velero/pkg/util/kube"
)
// SecretStore defines operations for interacting with credentials
// that are stored in Secret.
type SecretStore interface {
// Get returns the secret key defined by the given selector
Get(selector *corev1api.SecretKeySelector) (string, error)
}
type namespacedSecretStore struct {
client kbclient.Client
namespace string
}
// NewNamespacedSecretStore returns a SecretStore which can interact with credentials
// for the given namespace.
func NewNamespacedSecretStore(client kbclient.Client, namespace string) (SecretStore, error) {
return &namespacedSecretStore{
client: client,
namespace: namespace,
}, nil
}
// Buffer returns the secret key defined by the given selector.
func (n *namespacedSecretStore) Get(selector *corev1api.SecretKeySelector) (string, error) {
creds, err := kube.GetSecretKey(n.client, n.namespace, selector)
if err != nil {
return "", errors.Wrap(err, "unable to get key for secret")
}
return string(creds), nil
}

View File

@ -131,10 +131,10 @@ func (i *InitContainerRestoreHookHandler) HandleRestoreHooks(
pod.Spec.InitContainers = pod.Spec.InitContainers[1:]
}
hooksFromAnnotations := getInitRestoreHookFromAnnotation(kube.NamespaceAndName(pod), metadata.GetAnnotations(), log)
if hooksFromAnnotations != nil {
initContainerFromAnnotations := getInitContainerFromAnnotation(kube.NamespaceAndName(pod), metadata.GetAnnotations(), log)
if initContainerFromAnnotations != nil {
log.Infof("Handling InitRestoreHooks from pod annotations")
initContainers = append(initContainers, hooksFromAnnotations.InitContainers...)
initContainers = append(initContainers, *initContainerFromAnnotations)
} else {
log.Infof("Handling InitRestoreHooks from RestoreSpec")
// pod did not have the annotations appropriate for restore hooks
@ -155,7 +155,22 @@ func (i *InitContainerRestoreHookHandler) HandleRestoreHooks(
}
for _, hook := range rh.RestoreHooks {
if hook.Init != nil {
initContainers = append(initContainers, hook.Init.InitContainers...)
containers := make([]corev1api.Container, 0)
for _, raw := range hook.Init.InitContainers {
container := corev1api.Container{}
err := ValidateContainer(raw.Raw)
if err != nil {
log.Errorf("invalid Restore Init hook: %s", err.Error())
return nil, err
}
err = json.Unmarshal(raw.Raw, &container)
if err != nil {
log.Errorf("fail to Unmarshal hook Init into container: %s", err.Error())
return nil, errors.WithStack(err)
}
containers = append(containers, container)
}
initContainers = append(initContainers, containers...)
}
}
}
@ -350,7 +365,7 @@ type ResourceRestoreHook struct {
RestoreHooks []velerov1api.RestoreResourceHook
}
func getInitRestoreHookFromAnnotation(podName string, annotations map[string]string, log logrus.FieldLogger) *velerov1api.InitRestoreHook {
func getInitContainerFromAnnotation(podName string, annotations map[string]string, log logrus.FieldLogger) *corev1api.Container {
containerImage := annotations[podRestoreHookInitContainerImageAnnotationKey]
containerName := annotations[podRestoreHookInitContainerNameAnnotationKey]
command := annotations[podRestoreHookInitContainerCommandAnnotationKey]
@ -373,15 +388,13 @@ func getInitRestoreHookFromAnnotation(podName string, annotations map[string]str
log.Infof("Pod %s has no %s annotation, using generated name %s for initContainer", podName, podRestoreHookInitContainerNameAnnotationKey, containerName)
}
return &velerov1api.InitRestoreHook{
InitContainers: []corev1api.Container{
{
Image: containerImage,
Name: containerName,
Command: parseStringToCommand(command),
},
},
initContainer := corev1api.Container{
Image: containerImage,
Name: containerName,
Command: parseStringToCommand(command),
}
return &initContainer
}
// GetRestoreHooksFromSpec returns a list of ResourceRestoreHooks from the restore Spec.
@ -406,7 +419,7 @@ func GetRestoreHooksFromSpec(hooksSpec *velerov1api.RestoreHooks) ([]ResourceRes
if rs.LabelSelector != nil {
ls, err := metav1.LabelSelectorAsSelector(rs.LabelSelector)
if err != nil {
return nil, errors.WithStack(err)
return []ResourceRestoreHook{}, errors.WithStack(err)
}
rh.Selector.LabelSelector = ls
}
@ -526,3 +539,17 @@ func GroupRestoreExecHooks(
return byContainer, nil
}
// ValidateContainer validate whether a map contains mandatory k8s Container fields.
// mandatory fields include name, image and commands.
func ValidateContainer(raw []byte) error {
container := corev1api.Container{}
err := json.Unmarshal(raw, &container)
if err != nil {
return err
}
if len(container.Command) <= 0 || len(container.Name) <= 0 || len(container.Image) <= 0 {
return fmt.Errorf("invalid InitContainer in restore hook, it doesn't have Command, Name or Image field")
}
return nil
}

View File

@ -1191,11 +1191,11 @@ func TestGroupRestoreExecHooks(t *testing.T) {
}
}
func TestGetInitRestoreHookFromAnnotations(t *testing.T) {
func TestGetInitContainerFromAnnotations(t *testing.T) {
testCases := []struct {
name string
inputAnnotations map[string]string
expected velerov1api.InitRestoreHook
expected *corev1api.Container
expectNil bool
}{
{
@ -1223,12 +1223,8 @@ func TestGetInitRestoreHookFromAnnotations(t *testing.T) {
podRestoreHookInitContainerNameAnnotationKey: "",
podRestoreHookInitContainerCommandAnnotationKey: "/usr/bin/data-populator /user-data full",
},
expected: velerov1api.InitRestoreHook{
InitContainers: []corev1api.Container{
*builder.ForContainer("restore-init1", "busy-box").
Command([]string{"/usr/bin/data-populator /user-data full"}).Result(),
},
},
expected: builder.ForContainer("restore-init1", "busy-box").
Command([]string{"/usr/bin/data-populator /user-data full"}).Result(),
},
{
name: "should generate container name when container name is missing",
@ -1237,22 +1233,14 @@ func TestGetInitRestoreHookFromAnnotations(t *testing.T) {
podRestoreHookInitContainerImageAnnotationKey: "busy-box",
podRestoreHookInitContainerCommandAnnotationKey: "/usr/bin/data-populator /user-data full",
},
expected: velerov1api.InitRestoreHook{
InitContainers: []corev1api.Container{
*builder.ForContainer("restore-init1", "busy-box").
Command([]string{"/usr/bin/data-populator /user-data full"}).Result(),
},
},
expected: builder.ForContainer("restore-init1", "busy-box").
Command([]string{"/usr/bin/data-populator /user-data full"}).Result(),
},
{
name: "should return expected init container when all annotations are specified",
expectNil: false,
expected: velerov1api.InitRestoreHook{
InitContainers: []corev1api.Container{
*builder.ForContainer("restore-init1", "busy-box").
Command([]string{"/usr/bin/data-populator /user-data full"}).Result(),
},
},
expected: builder.ForContainer("restore-init1", "busy-box").
Command([]string{"/usr/bin/data-populator /user-data full"}).Result(),
inputAnnotations: map[string]string{
podRestoreHookInitContainerImageAnnotationKey: "busy-box",
podRestoreHookInitContainerNameAnnotationKey: "restore-init",
@ -1262,12 +1250,8 @@ func TestGetInitRestoreHookFromAnnotations(t *testing.T) {
{
name: "should return expected init container when all annotations are specified with command as a JSON array",
expectNil: false,
expected: velerov1api.InitRestoreHook{
InitContainers: []corev1api.Container{
*builder.ForContainer("restore-init1", "busy-box").
Command([]string{"a", "b", "c"}).Result(),
},
},
expected: builder.ForContainer("restore-init1", "busy-box").
Command([]string{"a", "b", "c"}).Result(),
inputAnnotations: map[string]string{
podRestoreHookInitContainerImageAnnotationKey: "busy-box",
podRestoreHookInitContainerNameAnnotationKey: "restore-init",
@ -1277,12 +1261,8 @@ func TestGetInitRestoreHookFromAnnotations(t *testing.T) {
{
name: "should return expected init container when all annotations are specified with command as malformed a JSON array",
expectNil: false,
expected: velerov1api.InitRestoreHook{
InitContainers: []corev1api.Container{
*builder.ForContainer("restore-init1", "busy-box").
Command([]string{"[foobarbaz"}).Result(),
},
},
expected: builder.ForContainer("restore-init1", "busy-box").
Command([]string{"[foobarbaz"}).Result(),
inputAnnotations: map[string]string{
podRestoreHookInitContainerImageAnnotationKey: "busy-box",
podRestoreHookInitContainerNameAnnotationKey: "restore-init",
@ -1293,15 +1273,14 @@ func TestGetInitRestoreHookFromAnnotations(t *testing.T) {
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
actual := getInitRestoreHookFromAnnotation("test/pod1", tc.inputAnnotations, velerotest.NewLogger())
actualInitContainer := getInitContainerFromAnnotation("test/pod1", tc.inputAnnotations, velerotest.NewLogger())
if tc.expectNil {
assert.Nil(t, actual)
assert.Nil(t, actualInitContainer)
return
}
assert.NotEmpty(t, actual.InitContainers[0].Name)
assert.Equal(t, len(tc.expected.InitContainers), len(actual.InitContainers))
assert.Equal(t, tc.expected.InitContainers[0].Image, actual.InitContainers[0].Image)
assert.Equal(t, tc.expected.InitContainers[0].Command, actual.InitContainers[0].Command)
assert.NotEmpty(t, actualInitContainer.Name)
assert.Equal(t, tc.expected.Image, actualInitContainer.Image)
assert.Equal(t, tc.expected.Command, actualInitContainer.Command)
})
}
}
@ -1347,11 +1326,11 @@ func TestGetRestoreHooksFromSpec(t *testing.T) {
PostHooks: []velerov1api.RestoreResourceHook{
{
Init: &velerov1api.InitRestoreHook{
InitContainers: []corev1api.Container{
*builder.ForContainer("restore-init1", "busy-box").
Command([]string{"foobarbaz"}).Result(),
*builder.ForContainer("restore-init2", "busy-box").
Command([]string{"foobarbaz"}).Result(),
InitContainers: []runtime.RawExtension{
builder.ForContainer("restore-init1", "busy-box").
Command([]string{"foobarbaz"}).ResultRawExtension(),
builder.ForContainer("restore-init2", "busy-box").
Command([]string{"foobarbaz"}).ResultRawExtension(),
},
},
},
@ -1369,11 +1348,11 @@ func TestGetRestoreHooksFromSpec(t *testing.T) {
RestoreHooks: []velerov1api.RestoreResourceHook{
{
Init: &velerov1api.InitRestoreHook{
InitContainers: []corev1api.Container{
*builder.ForContainer("restore-init1", "busy-box").
Command([]string{"foobarbaz"}).Result(),
*builder.ForContainer("restore-init2", "busy-box").
Command([]string{"foobarbaz"}).Result(),
InitContainers: []runtime.RawExtension{
builder.ForContainer("restore-init1", "busy-box").
Command([]string{"foobarbaz"}).ResultRawExtension(),
builder.ForContainer("restore-init2", "busy-box").
Command([]string{"foobarbaz"}).ResultRawExtension(),
},
},
},
@ -1539,9 +1518,9 @@ func TestHandleRestoreHooks(t *testing.T) {
RestoreHooks: []velerov1api.RestoreResourceHook{
{
Init: &velerov1api.InitRestoreHook{
InitContainers: []corev1api.Container{
*builder.ForContainer("should-not exist", "does-not-matter").
Command([]string{""}).Result(),
InitContainers: []runtime.RawExtension{
builder.ForContainer("should-not exist", "does-not-matter").
Command([]string{""}).ResultRawExtension(),
},
},
},
@ -1556,6 +1535,9 @@ func TestHandleRestoreHooks(t *testing.T) {
Name: "app1",
Namespace: "default",
},
Spec: corev1api.PodSpec{
InitContainers: []corev1api.Container{},
},
},
expectedError: nil,
expectedPod: &corev1api.Pod{
@ -1582,11 +1564,11 @@ func TestHandleRestoreHooks(t *testing.T) {
RestoreHooks: []velerov1api.RestoreResourceHook{
{
Init: &velerov1api.InitRestoreHook{
InitContainers: []corev1api.Container{
*builder.ForContainer("restore-init-container-1", "nginx").
Command([]string{"a", "b", "c"}).Result(),
*builder.ForContainer("restore-init-container-2", "nginx").
Command([]string{"a", "b", "c"}).Result(),
InitContainers: []runtime.RawExtension{
builder.ForContainer("restore-init-container-1", "nginx").
Command([]string{"a", "b", "c"}).ResultRawExtension(),
builder.ForContainer("restore-init-container-2", "nginx").
Command([]string{"a", "b", "c"}).ResultRawExtension(),
},
},
},
@ -1643,11 +1625,11 @@ func TestHandleRestoreHooks(t *testing.T) {
RestoreHooks: []velerov1api.RestoreResourceHook{
{
Init: &velerov1api.InitRestoreHook{
InitContainers: []corev1api.Container{
*builder.ForContainer("restore-init-container-1", "nginx").
Command([]string{"a", "b", "c"}).Result(),
*builder.ForContainer("restore-init-container-2", "nginx").
Command([]string{"a", "b", "c"}).Result(),
InitContainers: []runtime.RawExtension{
builder.ForContainer("restore-init-container-1", "nginx").
Command([]string{"a", "b", "c"}).ResultRawExtension(),
builder.ForContainer("restore-init-container-2", "nginx").
Command([]string{"a", "b", "c"}).ResultRawExtension(),
},
},
},
@ -1680,11 +1662,11 @@ func TestHandleRestoreHooks(t *testing.T) {
RestoreHooks: []velerov1api.RestoreResourceHook{
{
Init: &velerov1api.InitRestoreHook{
InitContainers: []corev1api.Container{
*builder.ForContainer("restore-init-container-1", "nginx").
Command([]string{"a", "b", "c"}).Result(),
*builder.ForContainer("restore-init-container-2", "nginx").
Command([]string{"a", "b", "c"}).Result(),
InitContainers: []runtime.RawExtension{
builder.ForContainer("restore-init-container-1", "nginx").
Command([]string{"a", "b", "c"}).ResultRawExtension(),
builder.ForContainer("restore-init-container-2", "nginx").
Command([]string{"a", "b", "c"}).ResultRawExtension(),
},
},
},
@ -1733,11 +1715,11 @@ func TestHandleRestoreHooks(t *testing.T) {
RestoreHooks: []velerov1api.RestoreResourceHook{
{
Init: &velerov1api.InitRestoreHook{
InitContainers: []corev1api.Container{
*builder.ForContainer("restore-init-container-1", "nginx").
Command([]string{"a", "b", "c"}).Result(),
*builder.ForContainer("restore-init-container-2", "nginx").
Command([]string{"a", "b", "c"}).Result(),
InitContainers: []runtime.RawExtension{
builder.ForContainer("restore-init-container-1", "nginx").
Command([]string{"a", "b", "c"}).ResultRawExtension(),
builder.ForContainer("restore-init-container-2", "nginx").
Command([]string{"a", "b", "c"}).ResultRawExtension(),
},
},
},
@ -1795,11 +1777,11 @@ func TestHandleRestoreHooks(t *testing.T) {
RestoreHooks: []velerov1api.RestoreResourceHook{
{
Init: &velerov1api.InitRestoreHook{
InitContainers: []corev1api.Container{
*builder.ForContainer("restore-init-container-1", "nginx").
Command([]string{"a", "b", "c"}).Result(),
*builder.ForContainer("restore-init-container-2", "nginx").
Command([]string{"a", "b", "c"}).Result(),
InitContainers: []runtime.RawExtension{
builder.ForContainer("restore-init-container-1", "nginx").
Command([]string{"a", "b", "c"}).ResultRawExtension(),
builder.ForContainer("restore-init-container-2", "nginx").
Command([]string{"a", "b", "c"}).ResultRawExtension(),
},
},
},
@ -1868,9 +1850,9 @@ func TestHandleRestoreHooks(t *testing.T) {
RestoreHooks: []velerov1api.RestoreResourceHook{
{
Init: &velerov1api.InitRestoreHook{
InitContainers: []corev1api.Container{
*builder.ForContainer("restore-init-container-1", "nginx").
Command([]string{"a", "b", "c"}).Result(),
InitContainers: []runtime.RawExtension{
builder.ForContainer("restore-init-container-1", "nginx").
Command([]string{"a", "b", "c"}).ResultRawExtension(),
},
},
},
@ -1911,9 +1893,9 @@ func TestHandleRestoreHooks(t *testing.T) {
RestoreHooks: []velerov1api.RestoreResourceHook{
{
Init: &velerov1api.InitRestoreHook{
InitContainers: []corev1api.Container{
*builder.ForContainer("restore-init-container-1", "nginx").
Command([]string{"a", "b", "c"}).Result(),
InitContainers: []runtime.RawExtension{
builder.ForContainer("restore-init-container-1", "nginx").
Command([]string{"a", "b", "c"}).ResultRawExtension(),
},
},
},
@ -1922,6 +1904,37 @@ func TestHandleRestoreHooks(t *testing.T) {
},
namespaceMapping: map[string]string{"default": "new"},
},
{
name: "Invalid InitContainer in Restore hook should return nil as pod, and error.",
podInput: corev1api.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "app1",
Namespace: "new",
},
Spec: corev1api.PodSpec{},
},
expectedError: fmt.Errorf("invalid InitContainer in restore hook, it doesn't have Command, Name or Image field"),
expectedPod: nil,
restoreHooks: []ResourceRestoreHook{
{
Name: "hook1",
Selector: ResourceHookSelector{
Namespaces: collections.NewIncludesExcludes().Includes("new"),
Resources: collections.NewIncludesExcludes().Includes(kuberesource.Pods.Resource),
},
RestoreHooks: []velerov1api.RestoreResourceHook{
{
Init: &velerov1api.InitRestoreHook{
InitContainers: []runtime.RawExtension{
builder.ForContainer("restore-init-container-1", "nginx").
ResultRawExtension(),
},
},
},
},
},
},
},
}
for _, tc := range testCases {
@ -1931,10 +1944,32 @@ func TestHandleRestoreHooks(t *testing.T) {
assert.NoError(t, err)
actual, err := handler.HandleRestoreHooks(velerotest.NewLogger(), kuberesource.Pods, &unstructured.Unstructured{Object: podMap}, tc.restoreHooks, tc.namespaceMapping)
assert.Equal(t, tc.expectedError, err)
actualPod := new(corev1api.Pod)
err = runtime.DefaultUnstructuredConverter.FromUnstructured(actual.UnstructuredContent(), actualPod)
assert.NoError(t, err)
assert.Equal(t, tc.expectedPod, actualPod)
if actual != nil {
actualPod := new(corev1api.Pod)
err = runtime.DefaultUnstructuredConverter.FromUnstructured(actual.UnstructuredContent(), actualPod)
assert.NoError(t, err)
assert.Equal(t, tc.expectedPod, actualPod)
}
})
}
}
func TestValidateContainer(t *testing.T) {
valid := `{"name": "test", "image": "busybox", "command": ["pwd"]}`
noName := `{"image": "busybox", "command": ["pwd"]}`
noImage := `{"name": "test", "command": ["pwd"]}`
noCommand := `{"name": "test", "image": "busybox"}`
expectedError := fmt.Errorf("invalid InitContainer in restore hook, it doesn't have Command, Name or Image field")
// valid string should return nil as result.
assert.Equal(t, nil, ValidateContainer([]byte(valid)))
// noName string should return expected error as result.
assert.Equal(t, expectedError, ValidateContainer([]byte(noName)))
// noImage string should return expected error as result.
assert.Equal(t, expectedError, ValidateContainer([]byte(noImage)))
// noCommand string should return expected error as result.
assert.Equal(t, expectedError, ValidateContainer([]byte(noCommand)))
}

View File

@ -20,9 +20,9 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// ResticRepositorySpec is the specification for a ResticRepository.
type ResticRepositorySpec struct {
// VolumeNamespace is the namespace this restic repository contains
// BackupRepositorySpec is the specification for a BackupRepository.
type BackupRepositorySpec struct {
// VolumeNamespace is the namespace this backup repository contains
// pod volume backups for.
VolumeNamespace string `json:"volumeNamespace"`
@ -30,6 +30,11 @@ type ResticRepositorySpec struct {
// that should contain this repository.
BackupStorageLocation string `json:"backupStorageLocation"`
// RepositoryType indicates the type of the backend repository
// +kubebuilder:validation:Enum=kopia;restic;""
// +optional
RepositoryType string `json:"repositoryType"`
// ResticIdentifier is the full restic-compatible string for identifying
// this repository.
ResticIdentifier string `json:"resticIdentifier"`
@ -38,23 +43,23 @@ type ResticRepositorySpec struct {
MaintenanceFrequency metav1.Duration `json:"maintenanceFrequency"`
}
// ResticRepositoryPhase represents the lifecycle phase of a ResticRepository.
// BackupRepositoryPhase represents the lifecycle phase of a BackupRepository.
// +kubebuilder:validation:Enum=New;Ready;NotReady
type ResticRepositoryPhase string
type BackupRepositoryPhase string
const (
ResticRepositoryPhaseNew ResticRepositoryPhase = "New"
ResticRepositoryPhaseReady ResticRepositoryPhase = "Ready"
ResticRepositoryPhaseNotReady ResticRepositoryPhase = "NotReady"
BackupRepositoryPhaseNew BackupRepositoryPhase = "New"
BackupRepositoryPhaseReady BackupRepositoryPhase = "Ready"
BackupRepositoryPhaseNotReady BackupRepositoryPhase = "NotReady"
)
// ResticRepositoryStatus is the current status of a ResticRepository.
type ResticRepositoryStatus struct {
// Phase is the current state of the ResticRepository.
// BackupRepositoryStatus is the current status of a BackupRepository.
type BackupRepositoryStatus struct {
// Phase is the current state of the BackupRepository.
// +optional
Phase ResticRepositoryPhase `json:"phase,omitempty"`
Phase BackupRepositoryPhase `json:"phase,omitempty"`
// Message is a message about the current status of the ResticRepository.
// Message is a message about the current status of the BackupRepository.
// +optional
Message string `json:"message,omitempty"`
@ -72,33 +77,35 @@ type ResticRepositoryStatus struct {
// +kubebuilder:object:generate=true
// +kubebuilder:storageversion
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp"
// +kubebuilder:printcolumn:name="Repository Type",type="string",JSONPath=".spec.repositoryType"
//
type ResticRepository struct {
type BackupRepository struct {
metav1.TypeMeta `json:",inline"`
// +optional
metav1.ObjectMeta `json:"metadata,omitempty"`
// +optional
Spec ResticRepositorySpec `json:"spec,omitempty"`
Spec BackupRepositorySpec `json:"spec,omitempty"`
// +optional
Status ResticRepositoryStatus `json:"status,omitempty"`
Status BackupRepositoryStatus `json:"status,omitempty"`
}
// TODO(2.0) After converting all resources to use the runtime-controller client,
// the k8s:deepcopy marker will no longer be needed and should be removed.
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +kubebuilder:object:root=true
// +kubebuilder:rbac:groups=velero.io,resources=resticrepositories,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=velero.io,resources=resticrepositories/status,verbs=get;update;patch
// +kubebuilder:rbac:groups=velero.io,resources=backuprepositories,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=velero.io,resources=backuprepositories/status,verbs=get;update;patch
// ResticRepositoryList is a list of ResticRepositories.
type ResticRepositoryList struct {
// BackupRepositoryList is a list of BackupRepositories.
type BackupRepositoryList struct {
metav1.TypeMeta `json:",inline"`
// +optional
metav1.ListMeta `json:"metadata,omitempty"`
Items []ResticRepository `json:"items"`
Items []BackupRepository `json:"items"`
}

View File

@ -34,12 +34,17 @@ type PodVolumeBackupSpec struct {
Volume string `json:"volume"`
// BackupStorageLocation is the name of the backup storage location
// where the restic repository is stored.
// where the backup repository is stored.
BackupStorageLocation string `json:"backupStorageLocation"`
// RepoIdentifier is the restic repository identifier.
// RepoIdentifier is the backup repository identifier.
RepoIdentifier string `json:"repoIdentifier"`
// UploaderType is the type of the uploader to handle the data transfer.
// +kubebuilder:validation:Enum=kopia;restic;""
// +optional
UploaderType string `json:"uploaderType"`
// Tags are a map of key-value pairs that should be applied to the
// volume backup as tags.
// +optional
@ -107,7 +112,8 @@ type PodVolumeBackupStatus struct {
// +kubebuilder:printcolumn:name="Namespace",type="string",JSONPath=".spec.pod.namespace",description="Namespace of the pod containing the volume to be backed up"
// +kubebuilder:printcolumn:name="Pod",type="string",JSONPath=".spec.pod.name",description="Name of the pod containing the volume to be backed up"
// +kubebuilder:printcolumn:name="Volume",type="string",JSONPath=".spec.volume",description="Name of the volume to be backed up"
// +kubebuilder:printcolumn:name="Restic Repo",type="string",JSONPath=".spec.repoIdentifier",description="Restic repository identifier for this backup"
// +kubebuilder:printcolumn:name="Repository ID",type="string",JSONPath=".spec.repoIdentifier",description="Backup repository identifier for this backup"
// +kubebuilder:printcolumn:name="Uploader Type",type="string",JSONPath=".spec.uploaderType",description="The type of the uploader to handle data transfer"
// +kubebuilder:printcolumn:name="Storage Location",type="string",JSONPath=".spec.backupStorageLocation",description="Name of the Backup Storage Location where this backup should be stored"
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp"
// +kubebuilder:object:root=true

View File

@ -30,12 +30,17 @@ type PodVolumeRestoreSpec struct {
Volume string `json:"volume"`
// BackupStorageLocation is the name of the backup storage location
// where the restic repository is stored.
// where the backup repository is stored.
BackupStorageLocation string `json:"backupStorageLocation"`
// RepoIdentifier is the restic repository identifier.
// RepoIdentifier is the backup repository identifier.
RepoIdentifier string `json:"repoIdentifier"`
// UploaderType is the type of the uploader to handle the data transfer.
// +kubebuilder:validation:Enum=kopia;restic;""
// +optional
UploaderType string `json:"uploaderType"`
// SnapshotID is the ID of the volume snapshot to be restored.
SnapshotID string `json:"snapshotID"`
}
@ -89,6 +94,7 @@ type PodVolumeRestoreStatus struct {
// +kubebuilder:storageversion
// +kubebuilder:printcolumn:name="Namespace",type="string",JSONPath=".spec.pod.namespace",description="Namespace of the pod containing the volume to be restored"
// +kubebuilder:printcolumn:name="Pod",type="string",JSONPath=".spec.pod.name",description="Name of the pod containing the volume to be restored"
// +kubebuilder:printcolumn:name="Uploader Type",type="string",JSONPath=".spec.uploaderType",description="The type of the uploader to handle data transfer"
// +kubebuilder:printcolumn:name="Volume",type="string",JSONPath=".spec.volume",description="Name of the volume to be restored"
// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.phase",description="Pod Volume Restore status such as New/InProgress"
// +kubebuilder:printcolumn:name="TotalBytes",type="integer",format="int64",JSONPath=".status.progress.totalBytes",description="Pod Volume Restore status such as New/InProgress"

View File

@ -52,7 +52,7 @@ func CustomResources() map[string]typeInfo {
"DeleteBackupRequest": newTypeInfo("deletebackuprequests", &DeleteBackupRequest{}, &DeleteBackupRequestList{}),
"PodVolumeBackup": newTypeInfo("podvolumebackups", &PodVolumeBackup{}, &PodVolumeBackupList{}),
"PodVolumeRestore": newTypeInfo("podvolumerestores", &PodVolumeRestore{}, &PodVolumeRestoreList{}),
"ResticRepository": newTypeInfo("resticrepositories", &ResticRepository{}, &ResticRepositoryList{}),
"BackupRepository": newTypeInfo("backuprepositories", &BackupRepository{}, &BackupRepositoryList{}),
"BackupStorageLocation": newTypeInfo("backupstoragelocations", &BackupStorageLocation{}, &BackupStorageLocationList{}),
"VolumeSnapshotLocation": newTypeInfo("volumesnapshotlocations", &VolumeSnapshotLocation{}, &VolumeSnapshotLocationList{}),
"ServerStatusRequest": newTypeInfo("serverstatusrequests", &ServerStatusRequest{}, &ServerStatusRequestList{}),

View File

@ -17,8 +17,8 @@ limitations under the License.
package v1
import (
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
)
// RestoreSpec defines the specification for a Velero restore.
@ -208,9 +208,10 @@ type ExecRestoreHook struct {
// InitRestoreHook is a hook that adds an init container to a PodSpec to run commands before the
// workload pod is able to start.
type InitRestoreHook struct {
// +kubebuilder:pruning:PreserveUnknownFields
// InitContainers is list of init containers to be added to a pod during its restore.
// +optional
InitContainers []v1.Container `json:"initContainers"`
InitContainers []runtime.RawExtension `json:"initContainers"`
// Timeout defines the maximum amount of time Velero should wait for the initContainers to complete.
// +optional

View File

@ -1,5 +1,5 @@
/*
Copyright 2018 the Velero contributors.
Copyright the Velero contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@ -20,6 +20,10 @@ import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +kubebuilder:object:root=true
// +kubebuilder:resource:shortName=vsl
// +kubebuilder:object:generate=true
// +kubebuilder:storageversion
// VolumeSnapshotLocation is a location where Velero stores volume snapshots.
type VolumeSnapshotLocation struct {
@ -36,6 +40,8 @@ type VolumeSnapshotLocation struct {
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +kubebuilder:object:root=true
// +kubebuilder:rbac:groups=velero.io,resources=volumesnapshotlocations,verbs=get;list;watch;create;update;patch;delete
// VolumeSnapshotLocationList is a list of VolumeSnapshotLocations.
type VolumeSnapshotLocationList struct {

View File

@ -107,6 +107,100 @@ func (in *BackupProgress) DeepCopy() *BackupProgress {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *BackupRepository) DeepCopyInto(out *BackupRepository) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
out.Spec = in.Spec
in.Status.DeepCopyInto(&out.Status)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupRepository.
func (in *BackupRepository) DeepCopy() *BackupRepository {
if in == nil {
return nil
}
out := new(BackupRepository)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *BackupRepository) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *BackupRepositoryList) DeepCopyInto(out *BackupRepositoryList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]BackupRepository, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupRepositoryList.
func (in *BackupRepositoryList) DeepCopy() *BackupRepositoryList {
if in == nil {
return nil
}
out := new(BackupRepositoryList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *BackupRepositoryList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *BackupRepositorySpec) DeepCopyInto(out *BackupRepositorySpec) {
*out = *in
out.MaintenanceFrequency = in.MaintenanceFrequency
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupRepositorySpec.
func (in *BackupRepositorySpec) DeepCopy() *BackupRepositorySpec {
if in == nil {
return nil
}
out := new(BackupRepositorySpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *BackupRepositoryStatus) DeepCopyInto(out *BackupRepositoryStatus) {
*out = *in
if in.LastMaintenanceTime != nil {
in, out := &in.LastMaintenanceTime, &out.LastMaintenanceTime
*out = (*in).DeepCopy()
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupRepositoryStatus.
func (in *BackupRepositoryStatus) DeepCopy() *BackupRepositoryStatus {
if in == nil {
return nil
}
out := new(BackupRepositoryStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *BackupResourceHook) DeepCopyInto(out *BackupResourceHook) {
*out = *in
@ -671,7 +765,7 @@ func (in *InitRestoreHook) DeepCopyInto(out *InitRestoreHook) {
*out = *in
if in.InitContainers != nil {
in, out := &in.InitContainers, &out.InitContainers
*out = make([]corev1.Container, len(*in))
*out = make([]runtime.RawExtension, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
@ -966,100 +1060,6 @@ func (in *PodVolumeRestoreStatus) DeepCopy() *PodVolumeRestoreStatus {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResticRepository) DeepCopyInto(out *ResticRepository) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
out.Spec = in.Spec
in.Status.DeepCopyInto(&out.Status)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResticRepository.
func (in *ResticRepository) DeepCopy() *ResticRepository {
if in == nil {
return nil
}
out := new(ResticRepository)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ResticRepository) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResticRepositoryList) DeepCopyInto(out *ResticRepositoryList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]ResticRepository, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResticRepositoryList.
func (in *ResticRepositoryList) DeepCopy() *ResticRepositoryList {
if in == nil {
return nil
}
out := new(ResticRepositoryList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ResticRepositoryList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResticRepositorySpec) DeepCopyInto(out *ResticRepositorySpec) {
*out = *in
out.MaintenanceFrequency = in.MaintenanceFrequency
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResticRepositorySpec.
func (in *ResticRepositorySpec) DeepCopy() *ResticRepositorySpec {
if in == nil {
return nil
}
out := new(ResticRepositorySpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResticRepositoryStatus) DeepCopyInto(out *ResticRepositoryStatus) {
*out = *in
if in.LastMaintenanceTime != nil {
in, out := &in.LastMaintenanceTime, &out.LastMaintenanceTime
*out = (*in).DeepCopy()
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResticRepositoryStatus.
func (in *ResticRepositoryStatus) DeepCopy() *ResticRepositoryStatus {
if in == nil {
return nil
}
out := new(ResticRepositoryStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Restore) DeepCopyInto(out *Restore) {
*out = *in

View File

@ -46,7 +46,7 @@ import (
"github.com/vmware-tanzu/velero/pkg/plugin/framework"
"github.com/vmware-tanzu/velero/pkg/plugin/velero"
"github.com/vmware-tanzu/velero/pkg/podexec"
"github.com/vmware-tanzu/velero/pkg/restic"
"github.com/vmware-tanzu/velero/pkg/podvolume"
"github.com/vmware-tanzu/velero/pkg/util/boolptr"
"github.com/vmware-tanzu/velero/pkg/util/collections"
)
@ -74,7 +74,7 @@ type kubernetesBackupper struct {
dynamicFactory client.DynamicFactory
discoveryHelper discovery.Helper
podCommandExecutor podexec.PodCommandExecutor
resticBackupperFactory restic.BackupperFactory
resticBackupperFactory podvolume.BackupperFactory
resticTimeout time.Duration
defaultVolumesToRestic bool
clientPageSize int
@ -100,7 +100,7 @@ func NewKubernetesBackupper(
discoveryHelper discovery.Helper,
dynamicFactory client.DynamicFactory,
podCommandExecutor podexec.PodCommandExecutor,
resticBackupperFactory restic.BackupperFactory,
resticBackupperFactory podvolume.BackupperFactory,
resticTimeout time.Duration,
defaultVolumesToRestic bool,
clientPageSize int,
@ -234,7 +234,7 @@ func (kb *kubernetesBackupper) BackupWithResolvers(log logrus.FieldLogger,
ctx, cancelFunc := context.WithTimeout(context.Background(), podVolumeTimeout)
defer cancelFunc()
var resticBackupper restic.Backupper
var resticBackupper podvolume.Backupper
if kb.resticBackupperFactory != nil {
resticBackupper, err = kb.resticBackupperFactory.NewBackupper(ctx, backupRequest.Backup)
if err != nil {

View File

@ -47,7 +47,7 @@ import (
"github.com/vmware-tanzu/velero/pkg/discovery"
"github.com/vmware-tanzu/velero/pkg/kuberesource"
"github.com/vmware-tanzu/velero/pkg/plugin/velero"
"github.com/vmware-tanzu/velero/pkg/restic"
"github.com/vmware-tanzu/velero/pkg/podvolume"
"github.com/vmware-tanzu/velero/pkg/test"
testutil "github.com/vmware-tanzu/velero/pkg/test"
kubeutil "github.com/vmware-tanzu/velero/pkg/util/kube"
@ -2595,7 +2595,7 @@ func TestBackupWithHooks(t *testing.T) {
type fakeResticBackupperFactory struct{}
func (f *fakeResticBackupperFactory) NewBackupper(context.Context, *velerov1.Backup) (restic.Backupper, error) {
func (f *fakeResticBackupperFactory) NewBackupper(context.Context, *velerov1.Backup) (podvolume.Backupper, error) {
return &fakeResticBackupper{}, nil
}

View File

@ -42,7 +42,7 @@ import (
"github.com/vmware-tanzu/velero/pkg/features"
"github.com/vmware-tanzu/velero/pkg/kuberesource"
"github.com/vmware-tanzu/velero/pkg/plugin/velero"
"github.com/vmware-tanzu/velero/pkg/restic"
"github.com/vmware-tanzu/velero/pkg/podvolume"
"github.com/vmware-tanzu/velero/pkg/util/boolptr"
"github.com/vmware-tanzu/velero/pkg/volume"
)
@ -53,7 +53,7 @@ type itemBackupper struct {
tarWriter tarWriter
dynamicFactory client.DynamicFactory
discoveryHelper discovery.Helper
resticBackupper restic.Backupper
resticBackupper podvolume.Backupper
resticSnapshotTracker *pvcSnapshotTracker
volumeSnapshotterGetter VolumeSnapshotterGetter
@ -61,6 +61,11 @@ type itemBackupper struct {
snapshotLocationVolumeSnapshotters map[string]velero.VolumeSnapshotter
}
const (
// veleroExcludeFromBackupLabel labeled item should be exclude by velero in backup job.
veleroExcludeFromBackupLabel = "velero.io/exclude-from-backup"
)
// backupItem backs up an individual item to tarWriter. The item may be excluded based on the
// namespaces IncludesExcludes list.
// In addition to the error return, backupItem also returns a bool indicating whether the item
@ -78,8 +83,8 @@ func (ib *itemBackupper) backupItem(logger logrus.FieldLogger, obj runtime.Unstr
log = log.WithField("resource", groupResource.String())
log = log.WithField("namespace", namespace)
if metadata.GetLabels()["velero.io/exclude-from-backup"] == "true" {
log.Info("Excluding item because it has label velero.io/exclude-from-backup=true")
if metadata.GetLabels()[veleroExcludeFromBackupLabel] == "true" {
log.Infof("Excluding item because it has label %s=true", veleroExcludeFromBackupLabel)
return false, nil
}
@ -144,7 +149,7 @@ func (ib *itemBackupper) backupItem(logger logrus.FieldLogger, obj runtime.Unstr
// Get the list of volumes to back up using restic from the pod's annotations. Remove from this list
// any volumes that use a PVC that we've already backed up (this would be in a read-write-many scenario,
// where it's been backed up from another pod), since we don't need >1 backup per PVC.
for _, volume := range restic.GetPodVolumesUsingRestic(pod, boolptr.IsSetToTrue(ib.backupRequest.Spec.DefaultVolumesToRestic)) {
for _, volume := range podvolume.GetPodVolumesUsingRestic(pod, boolptr.IsSetToTrue(ib.backupRequest.Spec.DefaultVolumesToRestic)) {
if found, pvcName := ib.resticSnapshotTracker.HasPVCForPodVolume(pod, volume); found {
log.WithFields(map[string]interface{}{
"podVolume": volume,

View File

@ -17,9 +17,11 @@ limitations under the License.
package builder
import (
"encoding/json"
"strings"
corev1api "k8s.io/api/core/v1"
apimachineryRuntime "k8s.io/apimachinery/pkg/runtime"
)
// ContainerBuilder builds Container objects
@ -89,6 +91,17 @@ func (b *ContainerBuilder) Result() *corev1api.Container {
return b.object
}
// ResultRawExtension returns the Container as runtime.RawExtension.
func (b *ContainerBuilder) ResultRawExtension() apimachineryRuntime.RawExtension {
result, err := json.Marshal(b.object)
if err != nil {
return apimachineryRuntime.RawExtension{}
}
return apimachineryRuntime.RawExtension{
Raw: result,
}
}
// Args sets the container's Args.
func (b *ContainerBuilder) Args(args ...string) *ContainerBuilder {
b.object.Args = append(b.object.Args, args...)

View File

@ -151,8 +151,8 @@ func findAssociatedBackups(client kbclient.Client, bslName, ns string) (velerov1
return backups, err
}
func findAssociatedResticRepos(client kbclient.Client, bslName, ns string) (velerov1api.ResticRepositoryList, error) {
var repos velerov1api.ResticRepositoryList
func findAssociatedResticRepos(client kbclient.Client, bslName, ns string) (velerov1api.BackupRepositoryList, error) {
var repos velerov1api.BackupRepositoryList
err := client.List(context.Background(), &repos, &kbclient.ListOptions{
Namespace: ns,
Raw: &metav1.ListOptions{LabelSelector: bslLabelKey + "=" + bslName},
@ -172,7 +172,7 @@ func deleteBackups(client kbclient.Client, backups velerov1api.BackupList) []err
return errs
}
func deleteResticRepos(client kbclient.Client, repos velerov1api.ResticRepositoryList) []error {
func deleteResticRepos(client kbclient.Client, repos velerov1api.BackupRepositoryList) []error {
var errs []error
for _, repo := range repos.Items {
if err := client.Delete(context.Background(), &repo, &kbclient.DeleteOptions{}); err != nil {

View File

@ -41,16 +41,16 @@ func NewGetCommand(f client.Factory, use string) *cobra.Command {
veleroClient, err := f.Client()
cmd.CheckError(err)
var repos *api.ResticRepositoryList
var repos *api.BackupRepositoryList
if len(args) > 0 {
repos = new(api.ResticRepositoryList)
repos = new(api.BackupRepositoryList)
for _, name := range args {
repo, err := veleroClient.VeleroV1().ResticRepositories(f.Namespace()).Get(context.TODO(), name, metav1.GetOptions{})
repo, err := veleroClient.VeleroV1().BackupRepositories(f.Namespace()).Get(context.TODO(), name, metav1.GetOptions{})
cmd.CheckError(err)
repos.Items = append(repos.Items, *repo)
}
} else {
repos, err = veleroClient.VeleroV1().ResticRepositories(f.Namespace()).List(context.TODO(), listOptions)
repos, err = veleroClient.VeleroV1().BackupRepositories(f.Namespace()).List(context.TODO(), listOptions)
cmd.CheckError(err)
}

View File

@ -63,6 +63,7 @@ type CreateOptions struct {
func NewCreateOptions() *CreateOptions {
return &CreateOptions{
Config: flag.NewMap(),
Labels: flag.NewMap(),
}
}

View File

@ -19,9 +19,11 @@ package plugin
import (
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
apiextensions "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
"github.com/vmware-tanzu/velero/pkg/features"
"github.com/vmware-tanzu/velero/pkg/backup"
"github.com/vmware-tanzu/velero/pkg/client"
velerodiscovery "github.com/vmware-tanzu/velero/pkg/discovery"
@ -36,11 +38,10 @@ func NewCommand(f client.Factory) *cobra.Command {
Hidden: true,
Short: "INTERNAL COMMAND ONLY - not intended to be run directly by users",
Run: func(c *cobra.Command, args []string) {
pluginServer.
pluginServer = pluginServer.
RegisterBackupItemAction("velero.io/pv", newPVBackupItemAction).
RegisterBackupItemAction("velero.io/pod", newPodBackupItemAction).
RegisterBackupItemAction("velero.io/service-account", newServiceAccountBackupItemAction(f)).
RegisterBackupItemAction("velero.io/crd-remap-version", newRemapCRDVersionAction(f)).
RegisterRestoreItemAction("velero.io/job", newJobRestoreItemAction).
RegisterRestoreItemAction("velero.io/pod", newPodRestoreItemAction).
RegisterRestoreItemAction("velero.io/restic", newResticRestoreItemAction(f)).
@ -55,13 +56,15 @@ func NewCommand(f client.Factory) *cobra.Command {
RegisterRestoreItemAction("velero.io/crd-preserve-fields", newCRDV1PreserveUnknownFieldsItemAction).
RegisterRestoreItemAction("velero.io/change-pvc-node-selector", newChangePVCNodeSelectorItemAction(f)).
RegisterRestoreItemAction("velero.io/apiservice", newAPIServiceRestoreItemAction).
RegisterRestoreItemAction("velero.io/admission-webhook-configuration", newAdmissionWebhookConfigurationAction).
Serve()
RegisterRestoreItemAction("velero.io/admission-webhook-configuration", newAdmissionWebhookConfigurationAction)
if !features.IsEnabled(velerov1api.APIGroupVersionsFeatureFlag) {
// Do not register crd-remap-version BIA if the API Group feature flag is enabled, so that the v1 CRD can be backed up
pluginServer = pluginServer.RegisterBackupItemAction("velero.io/crd-remap-version", newRemapCRDVersionAction(f))
}
pluginServer.Serve()
},
}
pluginServer.BindFlags(c.Flags())
return c
}

View File

@ -80,6 +80,7 @@ import (
"github.com/vmware-tanzu/velero/internal/storage"
"github.com/vmware-tanzu/velero/internal/util/managercontroller"
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
repokey "github.com/vmware-tanzu/velero/pkg/repository/keys"
)
const (
@ -522,7 +523,7 @@ func (s *server) initRestic() error {
}
// ensure the repo key secret is set up
if err := restic.EnsureCommonRepositoryKey(s.kubeClient.CoreV1(), s.namespace); err != nil {
if err := repokey.EnsureCommonRepositoryKey(s.kubeClient.CoreV1(), s.namespace); err != nil {
return err
}
@ -530,7 +531,7 @@ func (s *server) initRestic() error {
s.ctx,
s.namespace,
s.veleroClient,
s.sharedInformerFactory.Velero().V1().ResticRepositories(),
s.sharedInformerFactory.Velero().V1().BackupRepositories(),
s.veleroClient.VeleroV1(),
s.mgr.GetClient(),
s.kubeClient.CoreV1(),

View File

@ -177,15 +177,15 @@ func printTable(cmd *cobra.Command, obj runtime.Object) (bool, error) {
ColumnDefinitions: scheduleColumns,
Rows: printScheduleList(obj.(*velerov1api.ScheduleList)),
}
case *velerov1api.ResticRepository:
case *velerov1api.BackupRepository:
table = &metav1.Table{
ColumnDefinitions: resticRepoColumns,
Rows: printResticRepo(obj.(*velerov1api.ResticRepository)),
Rows: printResticRepo(obj.(*velerov1api.BackupRepository)),
}
case *velerov1api.ResticRepositoryList:
case *velerov1api.BackupRepositoryList:
table = &metav1.Table{
ColumnDefinitions: resticRepoColumns,
Rows: printResticRepoList(obj.(*velerov1api.ResticRepositoryList)),
Rows: printResticRepoList(obj.(*velerov1api.BackupRepositoryList)),
}
case *velerov1api.BackupStorageLocation:
table = &metav1.Table{

View File

@ -33,7 +33,7 @@ var (
}
)
func printResticRepoList(list *v1.ResticRepositoryList) []metav1.TableRow {
func printResticRepoList(list *v1.BackupRepositoryList) []metav1.TableRow {
rows := make([]metav1.TableRow, 0, len(list.Items))
for i := range list.Items {
@ -42,14 +42,14 @@ func printResticRepoList(list *v1.ResticRepositoryList) []metav1.TableRow {
return rows
}
func printResticRepo(repo *v1.ResticRepository) []metav1.TableRow {
func printResticRepo(repo *v1.BackupRepository) []metav1.TableRow {
row := metav1.TableRow{
Object: runtime.RawExtension{Object: repo},
}
status := repo.Status.Phase
if status == "" {
status = v1.ResticRepositoryPhaseNew
status = v1.BackupRepositoryPhaseNew
}
var lastMaintenance string

View File

@ -70,6 +70,7 @@ import (
"github.com/vmware-tanzu/velero/pkg/util/logging"
"github.com/vmware-tanzu/velero/pkg/volume"
corev1api "k8s.io/api/core/v1"
kbclient "sigs.k8s.io/controller-runtime/pkg/client"
)
@ -260,7 +261,6 @@ func (c *backupController) processBackup(key string) error {
log.Debug("Preparing backup request")
request := c.prepareBackupRequest(original)
if len(request.Status.ValidationErrors) > 0 {
request.Status.Phase = velerov1api.BackupPhaseFailedValidation
} else {
@ -444,6 +444,17 @@ func (c *backupController) prepareBackupRequest(backup *velerov1api.Backup) *pkg
request.Annotations[velerov1api.SourceClusterK8sMajorVersionAnnotation] = c.discoveryHelper.ServerVersion().Major
request.Annotations[velerov1api.SourceClusterK8sMinorVersionAnnotation] = c.discoveryHelper.ServerVersion().Minor
// Add namespaces with label velero.io/exclude-from-backup=true into request.Spec.ExcludedNamespaces
// Essentially, adding the label velero.io/exclude-from-backup=true to a namespace would be equivalent to setting spec.ExcludedNamespaces
namespaces := corev1api.NamespaceList{}
if err := c.kbClient.List(context.Background(), &namespaces, kbclient.MatchingLabels{"velero.io/exclude-from-backup": "true"}); err == nil {
for _, ns := range namespaces.Items {
request.Spec.ExcludedNamespaces = append(request.Spec.ExcludedNamespaces, ns.Name)
}
} else {
request.Status.ValidationErrors = append(request.Status.ValidationErrors, fmt.Sprintf("error getting namespace list: %v", err))
}
// validate the included/excluded resources
for _, err := range collections.ValidateIncludesExcludes(request.Spec.IncludedResources, request.Spec.ExcludedResources) {
request.Status.ValidationErrors = append(request.Status.ValidationErrors, fmt.Sprintf("Invalid included/excluded resource lists: %v", err))

View File

@ -198,7 +198,7 @@ func (r *backupDeletionReconciler) Reconcile(ctx context.Context, req ctrl.Reque
return ctrl.Result{}, err
}
// if the request object has no labels defined, initialise an empty map since
// if the request object has no labels defined, initialize an empty map since
// we will be updating labels
if dbr.Labels == nil {
dbr.Labels = map[string]string{}

View File

@ -24,12 +24,10 @@ import (
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/builder"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/event"
"sigs.k8s.io/controller-runtime/pkg/predicate"
"github.com/vmware-tanzu/velero/internal/storage"
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
@ -39,7 +37,10 @@ import (
)
const (
backupStorageLocationSyncPeriod = 1 * time.Minute
// keep the enqueue period a smaller value to make sure the BSL can be validated as expected.
// The BSL validation frequency is 1 minute by default, if we set the enqueue period as 1 minute,
// this will cause the actual validation interval for each BSL to be 2 minutes
bslValidationEnqueuePeriod = 10 * time.Second
)
// BackupStorageLocationReconciler reconciles a BackupStorageLocation object
@ -185,7 +186,7 @@ func (r *BackupStorageLocationReconciler) SetupWithManager(mgr ctrl.Manager) err
r.Log,
mgr.GetClient(),
&velerov1api.BackupStorageLocationList{},
backupStorageLocationSyncPeriod,
bslValidationEnqueuePeriod,
// Add filter function to enqueue BSL per ValidationFrequency setting.
func(object client.Object) bool {
location := object.(*velerov1api.BackupStorageLocation)
@ -193,22 +194,8 @@ func (r *BackupStorageLocationReconciler) SetupWithManager(mgr ctrl.Manager) err
},
)
return ctrl.NewControllerManagedBy(mgr).
For(&velerov1api.BackupStorageLocation{}).
// Handle BSL's creation event and spec update event to let changed BSL got validation immediately.
WithEventFilter(predicate.Funcs{
CreateFunc: func(ce event.CreateEvent) bool {
return true
},
UpdateFunc: func(ue event.UpdateEvent) bool {
return ue.ObjectNew.GetGeneration() != ue.ObjectOld.GetGeneration()
},
DeleteFunc: func(de event.DeleteEvent) bool {
return false
},
GenericFunc: func(ge event.GenericEvent) bool {
return false
},
}).
// As the "status.LastValidationTime" field is always updated, this triggers new reconciling process, skip the update event that include no spec change to avoid the reconcile loop
For(&velerov1api.BackupStorageLocation{}, builder.WithPredicates(kube.SpecChangePredicate{})).
Watches(g, nil).
Complete(r)
}

View File

@ -36,6 +36,7 @@ import (
"github.com/vmware-tanzu/velero/internal/credentials"
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
"github.com/vmware-tanzu/velero/pkg/metrics"
repokey "github.com/vmware-tanzu/velero/pkg/repository/keys"
"github.com/vmware-tanzu/velero/pkg/restic"
"github.com/vmware-tanzu/velero/pkg/util/filesystem"
"github.com/vmware-tanzu/velero/pkg/util/kube"
@ -124,7 +125,11 @@ func (r *PodVolumeBackupReconciler) Reconcile(ctx context.Context, req ctrl.Requ
if err != nil {
return r.updateStatusToFailed(ctx, &pvb, err, "building Restic command", log)
}
defer os.Remove(resticDetails.credsFile)
defer func() {
os.Remove(resticDetails.credsFile)
os.Remove(resticDetails.caCertFile)
}()
backupLocation := &velerov1api.BackupStorageLocation{}
if err := r.Client.Get(context.Background(), client.ObjectKey{
@ -204,19 +209,6 @@ func (r *PodVolumeBackupReconciler) SetupWithManager(mgr ctrl.Manager) error {
Complete(r)
}
func (r *PodVolumeBackupReconciler) singlePathMatch(path string) (string, error) {
matches, err := r.FileSystem.Glob(path)
if err != nil {
return "", errors.WithStack(err)
}
if len(matches) != 1 {
return "", errors.Errorf("expected one matching path: %s, got %d", path, len(matches))
}
return matches[0], nil
}
// getParentSnapshot finds the most recent completed PodVolumeBackup for the
// specified PVC and returns its Restic snapshot ID. Any errors encountered are
// logged but not returned since they do not prevent a backup from proceeding.
@ -237,7 +229,7 @@ func (r *PodVolumeBackupReconciler) getParentSnapshot(ctx context.Context, log l
// Go through all the podvolumebackups for the PVC and look for the most
// recent completed one to use as the parent.
var mostRecentPVB *velerov1api.PodVolumeBackup
var mostRecentPVB velerov1api.PodVolumeBackup
for _, pvb := range pvbList.Items {
if pvb.Status.Phase != velerov1api.PodVolumeBackupPhaseCompleted {
continue
@ -254,12 +246,12 @@ func (r *PodVolumeBackupReconciler) getParentSnapshot(ctx context.Context, log l
continue
}
if mostRecentPVB == nil || pvb.Status.StartTimestamp.After(mostRecentPVB.Status.StartTimestamp.Time) {
mostRecentPVB = &pvb
if mostRecentPVB.Status == (velerov1api.PodVolumeBackupStatus{}) || pvb.Status.StartTimestamp.After(mostRecentPVB.Status.StartTimestamp.Time) {
mostRecentPVB = pvb
}
}
if mostRecentPVB == nil {
if mostRecentPVB.Status == (velerov1api.PodVolumeBackupStatus{}) {
log.Info("No completed PodVolumeBackup found for PVC")
return ""
}
@ -313,14 +305,14 @@ func (r *PodVolumeBackupReconciler) buildResticCommand(ctx context.Context, log
pathGlob := fmt.Sprintf("/host_pods/%s/volumes/*/%s", string(pvb.Spec.Pod.UID), volDir)
log.WithField("pathGlob", pathGlob).Debug("Looking for path matching glob")
path, err := r.singlePathMatch(pathGlob)
path, err := kube.SinglePathMatch(pathGlob, r.FileSystem, log)
if err != nil {
return nil, errors.Wrap(err, "identifying unique volume path on host")
}
log.WithField("path", path).Debugf("Found path matching glob")
// Temporary credentials.
details.credsFile, err = r.CredsFileStore.Path(restic.RepoKeySelector())
details.credsFile, err = r.CredsFileStore.Path(repokey.RepoKeySelector())
if err != nil {
return nil, errors.Wrap(err, "creating temporary Restic credentials file")
}
@ -344,8 +336,6 @@ func (r *PodVolumeBackupReconciler) buildResticCommand(ctx context.Context, log
if err != nil {
log.WithError(err).Error("creating temporary caCert file")
}
defer os.Remove(details.caCertFile)
}
cmd.CACertFile = details.caCertFile

View File

@ -39,6 +39,7 @@ import (
"github.com/vmware-tanzu/velero/internal/credentials"
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
repokey "github.com/vmware-tanzu/velero/pkg/repository/keys"
"github.com/vmware-tanzu/velero/pkg/restic"
"github.com/vmware-tanzu/velero/pkg/util/boolptr"
"github.com/vmware-tanzu/velero/pkg/util/filesystem"
@ -215,19 +216,6 @@ func getResticInitContainerIndex(pod *corev1api.Pod) int {
return -1
}
func singlePathMatch(path string) (string, error) {
matches, err := filepath.Glob(path)
if err != nil {
return "", errors.WithStack(err)
}
if len(matches) != 1 {
return "", errors.Errorf("expected one matching path: %s, got %d", path, len(matches))
}
return matches[0], nil
}
func (c *PodVolumeRestoreReconciler) processRestore(ctx context.Context, req *velerov1api.PodVolumeRestore, pod *corev1api.Pod, log logrus.FieldLogger) error {
volumeDir, err := kube.GetVolumeDirectory(ctx, log, pod, req.Spec.Volume, c.Client)
if err != nil {
@ -236,12 +224,14 @@ func (c *PodVolumeRestoreReconciler) processRestore(ctx context.Context, req *ve
// Get the full path of the new volume's directory as mounted in the daemonset pod, which
// will look like: /host_pods/<new-pod-uid>/volumes/<volume-plugin-name>/<volume-dir>
volumePath, err := singlePathMatch(fmt.Sprintf("/host_pods/%s/volumes/*/%s", string(req.Spec.Pod.UID), volumeDir))
volumePath, err := kube.SinglePathMatch(
fmt.Sprintf("/host_pods/%s/volumes/*/%s", string(req.Spec.Pod.UID), volumeDir),
c.fileSystem, log)
if err != nil {
return errors.Wrap(err, "error identifying path of volume")
}
credsFile, err := c.credentialsFileStore.Path(restic.RepoKeySelector())
credsFile, err := c.credentialsFileStore.Path(repokey.RepoKeySelector())
if err != nil {
return errors.Wrap(err, "error creating temp restic credentials file")
}

View File

@ -26,11 +26,11 @@ import (
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/clock"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
repoconfig "github.com/vmware-tanzu/velero/pkg/repository/config"
"github.com/vmware-tanzu/velero/pkg/restic"
"github.com/vmware-tanzu/velero/pkg/util/kube"
)
@ -68,16 +68,16 @@ func NewResticRepoReconciler(namespace string, logger logrus.FieldLogger, client
}
func (r *ResticRepoReconciler) SetupWithManager(mgr ctrl.Manager) error {
s := kube.NewPeriodicalEnqueueSource(r.logger, mgr.GetClient(), &velerov1api.ResticRepositoryList{}, repoSyncPeriod)
s := kube.NewPeriodicalEnqueueSource(r.logger, mgr.GetClient(), &velerov1api.BackupRepositoryList{}, repoSyncPeriod)
return ctrl.NewControllerManagedBy(mgr).
For(&velerov1api.ResticRepository{}).
For(&velerov1api.BackupRepository{}).
Watches(s, nil).
Complete(r)
}
func (r *ResticRepoReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
log := r.logger.WithField("resticRepo", req.String())
resticRepo := &velerov1api.ResticRepository{}
resticRepo := &velerov1api.BackupRepository{}
if err := r.Get(ctx, req.NamespacedName, resticRepo); err != nil {
if apierrors.IsNotFound(err) {
log.Warnf("restic repository %s in namespace %s is not found", req.Name, req.Namespace)
@ -87,7 +87,7 @@ func (r *ResticRepoReconciler) Reconcile(ctx context.Context, req ctrl.Request)
return ctrl.Result{}, err
}
if resticRepo.Status.Phase == "" || resticRepo.Status.Phase == velerov1api.ResticRepositoryPhaseNew {
if resticRepo.Status.Phase == "" || resticRepo.Status.Phase == velerov1api.BackupRepositoryPhaseNew {
if err := r.initializeRepo(ctx, resticRepo, log); err != nil {
log.WithError(err).Error("error initialize repository")
return ctrl.Result{}, errors.WithStack(err)
@ -105,16 +105,16 @@ func (r *ResticRepoReconciler) Reconcile(ctx context.Context, req ctrl.Request)
}
switch resticRepo.Status.Phase {
case velerov1api.ResticRepositoryPhaseReady:
case velerov1api.BackupRepositoryPhaseReady:
return ctrl.Result{}, r.runMaintenanceIfDue(ctx, resticRepo, log)
case velerov1api.ResticRepositoryPhaseNotReady:
case velerov1api.BackupRepositoryPhaseNotReady:
return ctrl.Result{}, r.checkNotReadyRepo(ctx, resticRepo, log)
}
return ctrl.Result{}, nil
}
func (r *ResticRepoReconciler) initializeRepo(ctx context.Context, req *velerov1api.ResticRepository, log logrus.FieldLogger) error {
func (r *ResticRepoReconciler) initializeRepo(ctx context.Context, req *velerov1api.BackupRepository, log logrus.FieldLogger) error {
log.Info("Initializing restic repository")
// confirm the repo's BackupStorageLocation is valid
@ -127,11 +127,11 @@ func (r *ResticRepoReconciler) initializeRepo(ctx context.Context, req *velerov1
return r.patchResticRepository(ctx, req, repoNotReady(err.Error()))
}
repoIdentifier, err := restic.GetRepoIdentifier(loc, req.Spec.VolumeNamespace)
repoIdentifier, err := repoconfig.GetRepoIdentifier(loc, req.Spec.VolumeNamespace)
if err != nil {
return r.patchResticRepository(ctx, req, func(rr *velerov1api.ResticRepository) {
return r.patchResticRepository(ctx, req, func(rr *velerov1api.BackupRepository) {
rr.Status.Message = err.Error()
rr.Status.Phase = velerov1api.ResticRepositoryPhaseNotReady
rr.Status.Phase = velerov1api.BackupRepositoryPhaseNotReady
if rr.Spec.MaintenanceFrequency.Duration <= 0 {
rr.Spec.MaintenanceFrequency = metav1.Duration{Duration: r.defaultMaintenanceFrequency}
@ -140,7 +140,7 @@ func (r *ResticRepoReconciler) initializeRepo(ctx context.Context, req *velerov1
}
// defaulting - if the patch fails, return an error so the item is returned to the queue
if err := r.patchResticRepository(ctx, req, func(rr *velerov1api.ResticRepository) {
if err := r.patchResticRepository(ctx, req, func(rr *velerov1api.BackupRepository) {
rr.Spec.ResticIdentifier = repoIdentifier
if rr.Spec.MaintenanceFrequency.Duration <= 0 {
@ -154,8 +154,8 @@ func (r *ResticRepoReconciler) initializeRepo(ctx context.Context, req *velerov1
return r.patchResticRepository(ctx, req, repoNotReady(err.Error()))
}
return r.patchResticRepository(ctx, req, func(rr *velerov1api.ResticRepository) {
rr.Status.Phase = velerov1api.ResticRepositoryPhaseReady
return r.patchResticRepository(ctx, req, func(rr *velerov1api.BackupRepository) {
rr.Status.Phase = velerov1api.BackupRepositoryPhaseReady
rr.Status.LastMaintenanceTime = &metav1.Time{Time: time.Now()}
})
}
@ -163,7 +163,7 @@ func (r *ResticRepoReconciler) initializeRepo(ctx context.Context, req *velerov1
// ensureRepo checks to see if a repository exists, and attempts to initialize it if
// it does not exist. An error is returned if the repository can't be connected to
// or initialized.
func ensureRepo(repo *velerov1api.ResticRepository, repoManager restic.RepositoryManager) error {
func ensureRepo(repo *velerov1api.BackupRepository, repoManager restic.RepositoryManager) error {
if err := repoManager.ConnectToRepo(repo); err != nil {
// If the repository has not yet been initialized, the error message will always include
// the following string. This is the only scenario where we should try to initialize it.
@ -179,7 +179,7 @@ func ensureRepo(repo *velerov1api.ResticRepository, repoManager restic.Repositor
return nil
}
func (r *ResticRepoReconciler) runMaintenanceIfDue(ctx context.Context, req *velerov1api.ResticRepository, log logrus.FieldLogger) error {
func (r *ResticRepoReconciler) runMaintenanceIfDue(ctx context.Context, req *velerov1api.BackupRepository, log logrus.FieldLogger) error {
log.Debug("resticRepositoryController.runMaintenanceIfDue")
now := r.clock.Now()
@ -196,21 +196,21 @@ func (r *ResticRepoReconciler) runMaintenanceIfDue(ctx context.Context, req *vel
log.Debug("Pruning repo")
if err := r.repositoryManager.PruneRepo(req); err != nil {
log.WithError(err).Warn("error pruning repository")
return r.patchResticRepository(ctx, req, func(rr *velerov1api.ResticRepository) {
return r.patchResticRepository(ctx, req, func(rr *velerov1api.BackupRepository) {
rr.Status.Message = err.Error()
})
}
return r.patchResticRepository(ctx, req, func(rr *velerov1api.ResticRepository) {
return r.patchResticRepository(ctx, req, func(rr *velerov1api.BackupRepository) {
rr.Status.LastMaintenanceTime = &metav1.Time{Time: now}
})
}
func dueForMaintenance(req *velerov1api.ResticRepository, now time.Time) bool {
func dueForMaintenance(req *velerov1api.BackupRepository, now time.Time) bool {
return req.Status.LastMaintenanceTime == nil || req.Status.LastMaintenanceTime.Add(req.Spec.MaintenanceFrequency.Duration).Before(now)
}
func (r *ResticRepoReconciler) checkNotReadyRepo(ctx context.Context, req *velerov1api.ResticRepository, log logrus.FieldLogger) error {
func (r *ResticRepoReconciler) checkNotReadyRepo(ctx context.Context, req *velerov1api.BackupRepository, log logrus.FieldLogger) error {
// no identifier: can't possibly be ready, so just return
if req.Spec.ResticIdentifier == "" {
return nil
@ -226,16 +226,16 @@ func (r *ResticRepoReconciler) checkNotReadyRepo(ctx context.Context, req *veler
return r.patchResticRepository(ctx, req, repoReady())
}
func repoNotReady(msg string) func(*velerov1api.ResticRepository) {
return func(r *velerov1api.ResticRepository) {
r.Status.Phase = velerov1api.ResticRepositoryPhaseNotReady
func repoNotReady(msg string) func(*velerov1api.BackupRepository) {
return func(r *velerov1api.BackupRepository) {
r.Status.Phase = velerov1api.BackupRepositoryPhaseNotReady
r.Status.Message = msg
}
}
func repoReady() func(*velerov1api.ResticRepository) {
return func(r *velerov1api.ResticRepository) {
r.Status.Phase = velerov1api.ResticRepositoryPhaseReady
func repoReady() func(*velerov1api.BackupRepository) {
return func(r *velerov1api.BackupRepository) {
r.Status.Phase = velerov1api.BackupRepositoryPhaseReady
r.Status.Message = ""
}
}
@ -243,7 +243,7 @@ func repoReady() func(*velerov1api.ResticRepository) {
// patchResticRepository mutates req with the provided mutate function, and patches it
// through the Kube API. After executing this function, req will be updated with both
// the mutation and the results of the Patch() API call.
func (r *ResticRepoReconciler) patchResticRepository(ctx context.Context, req *velerov1api.ResticRepository, mutate func(*velerov1api.ResticRepository)) error {
func (r *ResticRepoReconciler) patchResticRepository(ctx context.Context, req *velerov1api.BackupRepository, mutate func(*velerov1api.BackupRepository)) error {
original := req.DeepCopy()
mutate(req)
if err := r.Patch(ctx, req, client.MergeFrom(original)); err != nil {

View File

@ -30,7 +30,7 @@ import (
const defaultMaintenanceFrequency = 10 * time.Minute
func mockResticRepoReconciler(t *testing.T, rr *velerov1api.ResticRepository, mockOn string, arg interface{}, ret interface{}) *ResticRepoReconciler {
func mockResticRepoReconciler(t *testing.T, rr *velerov1api.BackupRepository, mockOn string, arg interface{}, ret interface{}) *ResticRepoReconciler {
mgr := &resticmokes.RepositoryManager{}
if mockOn != "" {
mgr.On(mockOn, arg).Return(ret)
@ -44,13 +44,13 @@ func mockResticRepoReconciler(t *testing.T, rr *velerov1api.ResticRepository, mo
)
}
func mockResticRepositoryCR() *velerov1api.ResticRepository {
return &velerov1api.ResticRepository{
func mockResticRepositoryCR() *velerov1api.BackupRepository {
return &velerov1api.BackupRepository{
ObjectMeta: metav1.ObjectMeta{
Namespace: velerov1api.DefaultNamespace,
Name: "repo",
},
Spec: velerov1api.ResticRepositorySpec{
Spec: velerov1api.BackupRepositorySpec{
MaintenanceFrequency: metav1.Duration{defaultMaintenanceFrequency},
},
}
@ -64,10 +64,10 @@ func TestPatchResticRepository(t *testing.T) {
assert.NoError(t, err)
err = reconciler.patchResticRepository(context.Background(), rr, repoReady())
assert.NoError(t, err)
assert.Equal(t, rr.Status.Phase, velerov1api.ResticRepositoryPhaseReady)
assert.Equal(t, rr.Status.Phase, velerov1api.BackupRepositoryPhaseReady)
err = reconciler.patchResticRepository(context.Background(), rr, repoNotReady("not ready"))
assert.NoError(t, err)
assert.NotEqual(t, rr.Status.Phase, velerov1api.ResticRepositoryPhaseReady)
assert.NotEqual(t, rr.Status.Phase, velerov1api.BackupRepositoryPhaseReady)
}
func TestCheckNotReadyRepo(t *testing.T) {
@ -77,11 +77,11 @@ func TestCheckNotReadyRepo(t *testing.T) {
assert.NoError(t, err)
err = reconciler.checkNotReadyRepo(context.TODO(), rr, reconciler.logger)
assert.NoError(t, err)
assert.Equal(t, rr.Status.Phase, velerov1api.ResticRepositoryPhase(""))
assert.Equal(t, rr.Status.Phase, velerov1api.BackupRepositoryPhase(""))
rr.Spec.ResticIdentifier = "s3:test.amazonaws.com/bucket/restic"
err = reconciler.checkNotReadyRepo(context.TODO(), rr, reconciler.logger)
assert.NoError(t, err)
assert.Equal(t, rr.Status.Phase, velerov1api.ResticRepositoryPhaseReady)
assert.Equal(t, rr.Status.Phase, velerov1api.BackupRepositoryPhaseReady)
}
func TestRunMaintenanceIfDue(t *testing.T) {
@ -121,23 +121,23 @@ func TestInitializeRepo(t *testing.T) {
assert.NoError(t, err)
err = reconciler.initializeRepo(context.TODO(), rr, reconciler.logger)
assert.NoError(t, err)
assert.Equal(t, rr.Status.Phase, velerov1api.ResticRepositoryPhaseReady)
assert.Equal(t, rr.Status.Phase, velerov1api.BackupRepositoryPhaseReady)
}
func TestResticRepoReconcile(t *testing.T) {
tests := []struct {
name string
repo *velerov1api.ResticRepository
repo *velerov1api.BackupRepository
expectNil bool
}{
{
name: "test on api server not found",
repo: &velerov1api.ResticRepository{
repo: &velerov1api.BackupRepository{
ObjectMeta: metav1.ObjectMeta{
Namespace: velerov1api.DefaultNamespace,
Name: "unknown",
},
Spec: velerov1api.ResticRepositorySpec{
Spec: velerov1api.BackupRepositorySpec{
MaintenanceFrequency: metav1.Duration{defaultMaintenanceFrequency},
},
},
@ -145,12 +145,12 @@ func TestResticRepoReconcile(t *testing.T) {
},
{
name: "test on initialize repo",
repo: &velerov1api.ResticRepository{
repo: &velerov1api.BackupRepository{
ObjectMeta: metav1.ObjectMeta{
Namespace: velerov1api.DefaultNamespace,
Name: "repo",
},
Spec: velerov1api.ResticRepositorySpec{
Spec: velerov1api.BackupRepositorySpec{
MaintenanceFrequency: metav1.Duration{defaultMaintenanceFrequency},
},
},
@ -158,16 +158,16 @@ func TestResticRepoReconcile(t *testing.T) {
},
{
name: "test on repo with new phase",
repo: &velerov1api.ResticRepository{
repo: &velerov1api.BackupRepository{
ObjectMeta: metav1.ObjectMeta{
Namespace: velerov1api.DefaultNamespace,
Name: "repo",
},
Spec: velerov1api.ResticRepositorySpec{
Spec: velerov1api.BackupRepositorySpec{
MaintenanceFrequency: metav1.Duration{defaultMaintenanceFrequency},
},
Status: velerov1api.ResticRepositoryStatus{
Phase: velerov1api.ResticRepositoryPhaseNew,
Status: velerov1api.BackupRepositoryStatus{
Phase: velerov1api.BackupRepositoryPhaseNew,
},
},
expectNil: true,

View File

@ -38,6 +38,7 @@ import (
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/tools/cache"
hook "github.com/vmware-tanzu/velero/internal/hook"
api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
velerov1client "github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/typed/velero/v1"
@ -71,10 +72,14 @@ var nonRestorableResources = []string{
// https://github.com/vmware-tanzu/velero/issues/622
"restores.velero.io",
// TODO: Remove this in v1.11 or v1.12
// Restic repositories are automatically managed by Velero and will be automatically
// created as needed if they don't exist.
// https://github.com/vmware-tanzu/velero/issues/1113
"resticrepositories.velero.io",
// Backup repositories were renamed from Restic repositories
"backuprepositories.velero.io",
}
type restoreController struct {
@ -324,6 +329,22 @@ func (c *restoreController) validateAndComplete(restore *api.Restore, pluginMana
return backupInfo{}
}
// validate Restore Init Hook's InitContainers
restoreHooks, err := hook.GetRestoreHooksFromSpec(&restore.Spec.Hooks)
if err != nil {
restore.Status.ValidationErrors = append(restore.Status.ValidationErrors, err.Error())
}
for _, resource := range restoreHooks {
for _, h := range resource.RestoreHooks {
for _, container := range h.Init.InitContainers {
err = hook.ValidateContainer(container.Raw)
if err != nil {
restore.Status.ValidationErrors = append(restore.Status.ValidationErrors, err.Error())
}
}
}
}
// if ScheduleName is specified, fill in BackupName with the most recent successful backup from
// the schedule
if restore.Spec.ScheduleName != "" {

View File

@ -30,46 +30,46 @@ import (
rest "k8s.io/client-go/rest"
)
// ResticRepositoriesGetter has a method to return a ResticRepositoryInterface.
// BackupRepositoriesGetter has a method to return a BackupRepositoryInterface.
// A group's client should implement this interface.
type ResticRepositoriesGetter interface {
ResticRepositories(namespace string) ResticRepositoryInterface
type BackupRepositoriesGetter interface {
BackupRepositories(namespace string) BackupRepositoryInterface
}
// ResticRepositoryInterface has methods to work with ResticRepository resources.
type ResticRepositoryInterface interface {
Create(ctx context.Context, resticRepository *v1.ResticRepository, opts metav1.CreateOptions) (*v1.ResticRepository, error)
Update(ctx context.Context, resticRepository *v1.ResticRepository, opts metav1.UpdateOptions) (*v1.ResticRepository, error)
UpdateStatus(ctx context.Context, resticRepository *v1.ResticRepository, opts metav1.UpdateOptions) (*v1.ResticRepository, error)
// BackupRepositoryInterface has methods to work with BackupRepository resources.
type BackupRepositoryInterface interface {
Create(ctx context.Context, backupRepository *v1.BackupRepository, opts metav1.CreateOptions) (*v1.BackupRepository, error)
Update(ctx context.Context, backupRepository *v1.BackupRepository, opts metav1.UpdateOptions) (*v1.BackupRepository, error)
UpdateStatus(ctx context.Context, backupRepository *v1.BackupRepository, opts metav1.UpdateOptions) (*v1.BackupRepository, error)
Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ResticRepository, error)
List(ctx context.Context, opts metav1.ListOptions) (*v1.ResticRepositoryList, error)
Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.BackupRepository, error)
List(ctx context.Context, opts metav1.ListOptions) (*v1.BackupRepositoryList, error)
Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ResticRepository, err error)
ResticRepositoryExpansion
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.BackupRepository, err error)
BackupRepositoryExpansion
}
// resticRepositories implements ResticRepositoryInterface
type resticRepositories struct {
// backupRepositories implements BackupRepositoryInterface
type backupRepositories struct {
client rest.Interface
ns string
}
// newResticRepositories returns a ResticRepositories
func newResticRepositories(c *VeleroV1Client, namespace string) *resticRepositories {
return &resticRepositories{
// newBackupRepositories returns a BackupRepositories
func newBackupRepositories(c *VeleroV1Client, namespace string) *backupRepositories {
return &backupRepositories{
client: c.RESTClient(),
ns: namespace,
}
}
// Get takes name of the resticRepository, and returns the corresponding resticRepository object, and an error if there is any.
func (c *resticRepositories) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ResticRepository, err error) {
result = &v1.ResticRepository{}
// Get takes name of the backupRepository, and returns the corresponding backupRepository object, and an error if there is any.
func (c *backupRepositories) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.BackupRepository, err error) {
result = &v1.BackupRepository{}
err = c.client.Get().
Namespace(c.ns).
Resource("resticrepositories").
Resource("backuprepositories").
Name(name).
VersionedParams(&options, scheme.ParameterCodec).
Do(ctx).
@ -77,16 +77,16 @@ func (c *resticRepositories) Get(ctx context.Context, name string, options metav
return
}
// List takes label and field selectors, and returns the list of ResticRepositories that match those selectors.
func (c *resticRepositories) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ResticRepositoryList, err error) {
// List takes label and field selectors, and returns the list of BackupRepositories that match those selectors.
func (c *backupRepositories) List(ctx context.Context, opts metav1.ListOptions) (result *v1.BackupRepositoryList, err error) {
var timeout time.Duration
if opts.TimeoutSeconds != nil {
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
}
result = &v1.ResticRepositoryList{}
result = &v1.BackupRepositoryList{}
err = c.client.Get().
Namespace(c.ns).
Resource("resticrepositories").
Resource("backuprepositories").
VersionedParams(&opts, scheme.ParameterCodec).
Timeout(timeout).
Do(ctx).
@ -94,8 +94,8 @@ func (c *resticRepositories) List(ctx context.Context, opts metav1.ListOptions)
return
}
// Watch returns a watch.Interface that watches the requested resticRepositories.
func (c *resticRepositories) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
// Watch returns a watch.Interface that watches the requested backupRepositories.
func (c *backupRepositories) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
var timeout time.Duration
if opts.TimeoutSeconds != nil {
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@ -103,34 +103,34 @@ func (c *resticRepositories) Watch(ctx context.Context, opts metav1.ListOptions)
opts.Watch = true
return c.client.Get().
Namespace(c.ns).
Resource("resticrepositories").
Resource("backuprepositories").
VersionedParams(&opts, scheme.ParameterCodec).
Timeout(timeout).
Watch(ctx)
}
// Create takes the representation of a resticRepository and creates it. Returns the server's representation of the resticRepository, and an error, if there is any.
func (c *resticRepositories) Create(ctx context.Context, resticRepository *v1.ResticRepository, opts metav1.CreateOptions) (result *v1.ResticRepository, err error) {
result = &v1.ResticRepository{}
// Create takes the representation of a backupRepository and creates it. Returns the server's representation of the backupRepository, and an error, if there is any.
func (c *backupRepositories) Create(ctx context.Context, backupRepository *v1.BackupRepository, opts metav1.CreateOptions) (result *v1.BackupRepository, err error) {
result = &v1.BackupRepository{}
err = c.client.Post().
Namespace(c.ns).
Resource("resticrepositories").
Resource("backuprepositories").
VersionedParams(&opts, scheme.ParameterCodec).
Body(resticRepository).
Body(backupRepository).
Do(ctx).
Into(result)
return
}
// Update takes the representation of a resticRepository and updates it. Returns the server's representation of the resticRepository, and an error, if there is any.
func (c *resticRepositories) Update(ctx context.Context, resticRepository *v1.ResticRepository, opts metav1.UpdateOptions) (result *v1.ResticRepository, err error) {
result = &v1.ResticRepository{}
// Update takes the representation of a backupRepository and updates it. Returns the server's representation of the backupRepository, and an error, if there is any.
func (c *backupRepositories) Update(ctx context.Context, backupRepository *v1.BackupRepository, opts metav1.UpdateOptions) (result *v1.BackupRepository, err error) {
result = &v1.BackupRepository{}
err = c.client.Put().
Namespace(c.ns).
Resource("resticrepositories").
Name(resticRepository.Name).
Resource("backuprepositories").
Name(backupRepository.Name).
VersionedParams(&opts, scheme.ParameterCodec).
Body(resticRepository).
Body(backupRepository).
Do(ctx).
Into(result)
return
@ -138,25 +138,25 @@ func (c *resticRepositories) Update(ctx context.Context, resticRepository *v1.Re
// UpdateStatus was generated because the type contains a Status member.
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
func (c *resticRepositories) UpdateStatus(ctx context.Context, resticRepository *v1.ResticRepository, opts metav1.UpdateOptions) (result *v1.ResticRepository, err error) {
result = &v1.ResticRepository{}
func (c *backupRepositories) UpdateStatus(ctx context.Context, backupRepository *v1.BackupRepository, opts metav1.UpdateOptions) (result *v1.BackupRepository, err error) {
result = &v1.BackupRepository{}
err = c.client.Put().
Namespace(c.ns).
Resource("resticrepositories").
Name(resticRepository.Name).
Resource("backuprepositories").
Name(backupRepository.Name).
SubResource("status").
VersionedParams(&opts, scheme.ParameterCodec).
Body(resticRepository).
Body(backupRepository).
Do(ctx).
Into(result)
return
}
// Delete takes name of the resticRepository and deletes it. Returns an error if one occurs.
func (c *resticRepositories) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
// Delete takes name of the backupRepository and deletes it. Returns an error if one occurs.
func (c *backupRepositories) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
return c.client.Delete().
Namespace(c.ns).
Resource("resticrepositories").
Resource("backuprepositories").
Name(name).
Body(&opts).
Do(ctx).
@ -164,14 +164,14 @@ func (c *resticRepositories) Delete(ctx context.Context, name string, opts metav
}
// DeleteCollection deletes a collection of objects.
func (c *resticRepositories) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
func (c *backupRepositories) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
var timeout time.Duration
if listOpts.TimeoutSeconds != nil {
timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
}
return c.client.Delete().
Namespace(c.ns).
Resource("resticrepositories").
Resource("backuprepositories").
VersionedParams(&listOpts, scheme.ParameterCodec).
Timeout(timeout).
Body(&opts).
@ -179,12 +179,12 @@ func (c *resticRepositories) DeleteCollection(ctx context.Context, opts metav1.D
Error()
}
// Patch applies the patch and returns the patched resticRepository.
func (c *resticRepositories) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ResticRepository, err error) {
result = &v1.ResticRepository{}
// Patch applies the patch and returns the patched backupRepository.
func (c *backupRepositories) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.BackupRepository, err error) {
result = &v1.BackupRepository{}
err = c.client.Patch(pt).
Namespace(c.ns).
Resource("resticrepositories").
Resource("backuprepositories").
Name(name).
SubResource(subresources...).
VersionedParams(&opts, scheme.ParameterCodec).

View File

@ -0,0 +1,142 @@
/*
Copyright the Velero contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package fake
import (
"context"
velerov1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
labels "k8s.io/apimachinery/pkg/labels"
schema "k8s.io/apimachinery/pkg/runtime/schema"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
testing "k8s.io/client-go/testing"
)
// FakeBackupRepositories implements BackupRepositoryInterface
type FakeBackupRepositories struct {
Fake *FakeVeleroV1
ns string
}
var backuprepositoriesResource = schema.GroupVersionResource{Group: "velero.io", Version: "v1", Resource: "backuprepositories"}
var backuprepositoriesKind = schema.GroupVersionKind{Group: "velero.io", Version: "v1", Kind: "BackupRepository"}
// Get takes name of the backupRepository, and returns the corresponding backupRepository object, and an error if there is any.
func (c *FakeBackupRepositories) Get(ctx context.Context, name string, options v1.GetOptions) (result *velerov1.BackupRepository, err error) {
obj, err := c.Fake.
Invokes(testing.NewGetAction(backuprepositoriesResource, c.ns, name), &velerov1.BackupRepository{})
if obj == nil {
return nil, err
}
return obj.(*velerov1.BackupRepository), err
}
// List takes label and field selectors, and returns the list of BackupRepositories that match those selectors.
func (c *FakeBackupRepositories) List(ctx context.Context, opts v1.ListOptions) (result *velerov1.BackupRepositoryList, err error) {
obj, err := c.Fake.
Invokes(testing.NewListAction(backuprepositoriesResource, backuprepositoriesKind, c.ns, opts), &velerov1.BackupRepositoryList{})
if obj == nil {
return nil, err
}
label, _, _ := testing.ExtractFromListOptions(opts)
if label == nil {
label = labels.Everything()
}
list := &velerov1.BackupRepositoryList{ListMeta: obj.(*velerov1.BackupRepositoryList).ListMeta}
for _, item := range obj.(*velerov1.BackupRepositoryList).Items {
if label.Matches(labels.Set(item.Labels)) {
list.Items = append(list.Items, item)
}
}
return list, err
}
// Watch returns a watch.Interface that watches the requested backupRepositories.
func (c *FakeBackupRepositories) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
return c.Fake.
InvokesWatch(testing.NewWatchAction(backuprepositoriesResource, c.ns, opts))
}
// Create takes the representation of a backupRepository and creates it. Returns the server's representation of the backupRepository, and an error, if there is any.
func (c *FakeBackupRepositories) Create(ctx context.Context, backupRepository *velerov1.BackupRepository, opts v1.CreateOptions) (result *velerov1.BackupRepository, err error) {
obj, err := c.Fake.
Invokes(testing.NewCreateAction(backuprepositoriesResource, c.ns, backupRepository), &velerov1.BackupRepository{})
if obj == nil {
return nil, err
}
return obj.(*velerov1.BackupRepository), err
}
// Update takes the representation of a backupRepository and updates it. Returns the server's representation of the backupRepository, and an error, if there is any.
func (c *FakeBackupRepositories) Update(ctx context.Context, backupRepository *velerov1.BackupRepository, opts v1.UpdateOptions) (result *velerov1.BackupRepository, err error) {
obj, err := c.Fake.
Invokes(testing.NewUpdateAction(backuprepositoriesResource, c.ns, backupRepository), &velerov1.BackupRepository{})
if obj == nil {
return nil, err
}
return obj.(*velerov1.BackupRepository), err
}
// UpdateStatus was generated because the type contains a Status member.
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
func (c *FakeBackupRepositories) UpdateStatus(ctx context.Context, backupRepository *velerov1.BackupRepository, opts v1.UpdateOptions) (*velerov1.BackupRepository, error) {
obj, err := c.Fake.
Invokes(testing.NewUpdateSubresourceAction(backuprepositoriesResource, "status", c.ns, backupRepository), &velerov1.BackupRepository{})
if obj == nil {
return nil, err
}
return obj.(*velerov1.BackupRepository), err
}
// Delete takes name of the backupRepository and deletes it. Returns an error if one occurs.
func (c *FakeBackupRepositories) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
_, err := c.Fake.
Invokes(testing.NewDeleteAction(backuprepositoriesResource, c.ns, name), &velerov1.BackupRepository{})
return err
}
// DeleteCollection deletes a collection of objects.
func (c *FakeBackupRepositories) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
action := testing.NewDeleteCollectionAction(backuprepositoriesResource, c.ns, listOpts)
_, err := c.Fake.Invokes(action, &velerov1.BackupRepositoryList{})
return err
}
// Patch applies the patch and returns the patched backupRepository.
func (c *FakeBackupRepositories) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *velerov1.BackupRepository, err error) {
obj, err := c.Fake.
Invokes(testing.NewPatchSubresourceAction(backuprepositoriesResource, c.ns, name, pt, data, subresources...), &velerov1.BackupRepository{})
if obj == nil {
return nil, err
}
return obj.(*velerov1.BackupRepository), err
}

View File

@ -1,142 +0,0 @@
/*
Copyright the Velero contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package fake
import (
"context"
velerov1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
labels "k8s.io/apimachinery/pkg/labels"
schema "k8s.io/apimachinery/pkg/runtime/schema"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
testing "k8s.io/client-go/testing"
)
// FakeResticRepositories implements ResticRepositoryInterface
type FakeResticRepositories struct {
Fake *FakeVeleroV1
ns string
}
var resticrepositoriesResource = schema.GroupVersionResource{Group: "velero.io", Version: "v1", Resource: "resticrepositories"}
var resticrepositoriesKind = schema.GroupVersionKind{Group: "velero.io", Version: "v1", Kind: "ResticRepository"}
// Get takes name of the resticRepository, and returns the corresponding resticRepository object, and an error if there is any.
func (c *FakeResticRepositories) Get(ctx context.Context, name string, options v1.GetOptions) (result *velerov1.ResticRepository, err error) {
obj, err := c.Fake.
Invokes(testing.NewGetAction(resticrepositoriesResource, c.ns, name), &velerov1.ResticRepository{})
if obj == nil {
return nil, err
}
return obj.(*velerov1.ResticRepository), err
}
// List takes label and field selectors, and returns the list of ResticRepositories that match those selectors.
func (c *FakeResticRepositories) List(ctx context.Context, opts v1.ListOptions) (result *velerov1.ResticRepositoryList, err error) {
obj, err := c.Fake.
Invokes(testing.NewListAction(resticrepositoriesResource, resticrepositoriesKind, c.ns, opts), &velerov1.ResticRepositoryList{})
if obj == nil {
return nil, err
}
label, _, _ := testing.ExtractFromListOptions(opts)
if label == nil {
label = labels.Everything()
}
list := &velerov1.ResticRepositoryList{ListMeta: obj.(*velerov1.ResticRepositoryList).ListMeta}
for _, item := range obj.(*velerov1.ResticRepositoryList).Items {
if label.Matches(labels.Set(item.Labels)) {
list.Items = append(list.Items, item)
}
}
return list, err
}
// Watch returns a watch.Interface that watches the requested resticRepositories.
func (c *FakeResticRepositories) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
return c.Fake.
InvokesWatch(testing.NewWatchAction(resticrepositoriesResource, c.ns, opts))
}
// Create takes the representation of a resticRepository and creates it. Returns the server's representation of the resticRepository, and an error, if there is any.
func (c *FakeResticRepositories) Create(ctx context.Context, resticRepository *velerov1.ResticRepository, opts v1.CreateOptions) (result *velerov1.ResticRepository, err error) {
obj, err := c.Fake.
Invokes(testing.NewCreateAction(resticrepositoriesResource, c.ns, resticRepository), &velerov1.ResticRepository{})
if obj == nil {
return nil, err
}
return obj.(*velerov1.ResticRepository), err
}
// Update takes the representation of a resticRepository and updates it. Returns the server's representation of the resticRepository, and an error, if there is any.
func (c *FakeResticRepositories) Update(ctx context.Context, resticRepository *velerov1.ResticRepository, opts v1.UpdateOptions) (result *velerov1.ResticRepository, err error) {
obj, err := c.Fake.
Invokes(testing.NewUpdateAction(resticrepositoriesResource, c.ns, resticRepository), &velerov1.ResticRepository{})
if obj == nil {
return nil, err
}
return obj.(*velerov1.ResticRepository), err
}
// UpdateStatus was generated because the type contains a Status member.
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
func (c *FakeResticRepositories) UpdateStatus(ctx context.Context, resticRepository *velerov1.ResticRepository, opts v1.UpdateOptions) (*velerov1.ResticRepository, error) {
obj, err := c.Fake.
Invokes(testing.NewUpdateSubresourceAction(resticrepositoriesResource, "status", c.ns, resticRepository), &velerov1.ResticRepository{})
if obj == nil {
return nil, err
}
return obj.(*velerov1.ResticRepository), err
}
// Delete takes name of the resticRepository and deletes it. Returns an error if one occurs.
func (c *FakeResticRepositories) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
_, err := c.Fake.
Invokes(testing.NewDeleteAction(resticrepositoriesResource, c.ns, name), &velerov1.ResticRepository{})
return err
}
// DeleteCollection deletes a collection of objects.
func (c *FakeResticRepositories) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
action := testing.NewDeleteCollectionAction(resticrepositoriesResource, c.ns, listOpts)
_, err := c.Fake.Invokes(action, &velerov1.ResticRepositoryList{})
return err
}
// Patch applies the patch and returns the patched resticRepository.
func (c *FakeResticRepositories) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *velerov1.ResticRepository, err error) {
obj, err := c.Fake.
Invokes(testing.NewPatchSubresourceAction(resticrepositoriesResource, c.ns, name, pt, data, subresources...), &velerov1.ResticRepository{})
if obj == nil {
return nil, err
}
return obj.(*velerov1.ResticRepository), err
}

View File

@ -32,6 +32,10 @@ func (c *FakeVeleroV1) Backups(namespace string) v1.BackupInterface {
return &FakeBackups{c, namespace}
}
func (c *FakeVeleroV1) BackupRepositories(namespace string) v1.BackupRepositoryInterface {
return &FakeBackupRepositories{c, namespace}
}
func (c *FakeVeleroV1) BackupStorageLocations(namespace string) v1.BackupStorageLocationInterface {
return &FakeBackupStorageLocations{c, namespace}
}
@ -52,10 +56,6 @@ func (c *FakeVeleroV1) PodVolumeRestores(namespace string) v1.PodVolumeRestoreIn
return &FakePodVolumeRestores{c, namespace}
}
func (c *FakeVeleroV1) ResticRepositories(namespace string) v1.ResticRepositoryInterface {
return &FakeResticRepositories{c, namespace}
}
func (c *FakeVeleroV1) Restores(namespace string) v1.RestoreInterface {
return &FakeRestores{c, namespace}
}

View File

@ -20,6 +20,8 @@ package v1
type BackupExpansion interface{}
type BackupRepositoryExpansion interface{}
type BackupStorageLocationExpansion interface{}
type DeleteBackupRequestExpansion interface{}
@ -30,8 +32,6 @@ type PodVolumeBackupExpansion interface{}
type PodVolumeRestoreExpansion interface{}
type ResticRepositoryExpansion interface{}
type RestoreExpansion interface{}
type ScheduleExpansion interface{}

View File

@ -27,12 +27,12 @@ import (
type VeleroV1Interface interface {
RESTClient() rest.Interface
BackupsGetter
BackupRepositoriesGetter
BackupStorageLocationsGetter
DeleteBackupRequestsGetter
DownloadRequestsGetter
PodVolumeBackupsGetter
PodVolumeRestoresGetter
ResticRepositoriesGetter
RestoresGetter
SchedulesGetter
ServerStatusRequestsGetter
@ -48,6 +48,10 @@ func (c *VeleroV1Client) Backups(namespace string) BackupInterface {
return newBackups(c, namespace)
}
func (c *VeleroV1Client) BackupRepositories(namespace string) BackupRepositoryInterface {
return newBackupRepositories(c, namespace)
}
func (c *VeleroV1Client) BackupStorageLocations(namespace string) BackupStorageLocationInterface {
return newBackupStorageLocations(c, namespace)
}
@ -68,10 +72,6 @@ func (c *VeleroV1Client) PodVolumeRestores(namespace string) PodVolumeRestoreInt
return newPodVolumeRestores(c, namespace)
}
func (c *VeleroV1Client) ResticRepositories(namespace string) ResticRepositoryInterface {
return newResticRepositories(c, namespace)
}
func (c *VeleroV1Client) Restores(namespace string) RestoreInterface {
return newRestores(c, namespace)
}

View File

@ -55,6 +55,8 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource
// Group=velero.io, Version=v1
case v1.SchemeGroupVersion.WithResource("backups"):
return &genericInformer{resource: resource.GroupResource(), informer: f.Velero().V1().Backups().Informer()}, nil
case v1.SchemeGroupVersion.WithResource("backuprepositories"):
return &genericInformer{resource: resource.GroupResource(), informer: f.Velero().V1().BackupRepositories().Informer()}, nil
case v1.SchemeGroupVersion.WithResource("backupstoragelocations"):
return &genericInformer{resource: resource.GroupResource(), informer: f.Velero().V1().BackupStorageLocations().Informer()}, nil
case v1.SchemeGroupVersion.WithResource("deletebackuprequests"):
@ -65,8 +67,6 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource
return &genericInformer{resource: resource.GroupResource(), informer: f.Velero().V1().PodVolumeBackups().Informer()}, nil
case v1.SchemeGroupVersion.WithResource("podvolumerestores"):
return &genericInformer{resource: resource.GroupResource(), informer: f.Velero().V1().PodVolumeRestores().Informer()}, nil
case v1.SchemeGroupVersion.WithResource("resticrepositories"):
return &genericInformer{resource: resource.GroupResource(), informer: f.Velero().V1().ResticRepositories().Informer()}, nil
case v1.SchemeGroupVersion.WithResource("restores"):
return &genericInformer{resource: resource.GroupResource(), informer: f.Velero().V1().Restores().Informer()}, nil
case v1.SchemeGroupVersion.WithResource("schedules"):

View File

@ -32,59 +32,59 @@ import (
cache "k8s.io/client-go/tools/cache"
)
// ResticRepositoryInformer provides access to a shared informer and lister for
// ResticRepositories.
type ResticRepositoryInformer interface {
// BackupRepositoryInformer provides access to a shared informer and lister for
// BackupRepositories.
type BackupRepositoryInformer interface {
Informer() cache.SharedIndexInformer
Lister() v1.ResticRepositoryLister
Lister() v1.BackupRepositoryLister
}
type resticRepositoryInformer struct {
type backupRepositoryInformer struct {
factory internalinterfaces.SharedInformerFactory
tweakListOptions internalinterfaces.TweakListOptionsFunc
namespace string
}
// NewResticRepositoryInformer constructs a new informer for ResticRepository type.
// NewBackupRepositoryInformer constructs a new informer for BackupRepository type.
// Always prefer using an informer factory to get a shared informer instead of getting an independent
// one. This reduces memory footprint and number of connections to the server.
func NewResticRepositoryInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
return NewFilteredResticRepositoryInformer(client, namespace, resyncPeriod, indexers, nil)
func NewBackupRepositoryInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
return NewFilteredBackupRepositoryInformer(client, namespace, resyncPeriod, indexers, nil)
}
// NewFilteredResticRepositoryInformer constructs a new informer for ResticRepository type.
// NewFilteredBackupRepositoryInformer constructs a new informer for BackupRepository type.
// Always prefer using an informer factory to get a shared informer instead of getting an independent
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredResticRepositoryInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
func NewFilteredBackupRepositoryInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
&cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
}
return client.VeleroV1().ResticRepositories(namespace).List(context.TODO(), options)
return client.VeleroV1().BackupRepositories(namespace).List(context.TODO(), options)
},
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
}
return client.VeleroV1().ResticRepositories(namespace).Watch(context.TODO(), options)
return client.VeleroV1().BackupRepositories(namespace).Watch(context.TODO(), options)
},
},
&velerov1.ResticRepository{},
&velerov1.BackupRepository{},
resyncPeriod,
indexers,
)
}
func (f *resticRepositoryInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
return NewFilteredResticRepositoryInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
func (f *backupRepositoryInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
return NewFilteredBackupRepositoryInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
}
func (f *resticRepositoryInformer) Informer() cache.SharedIndexInformer {
return f.factory.InformerFor(&velerov1.ResticRepository{}, f.defaultInformer)
func (f *backupRepositoryInformer) Informer() cache.SharedIndexInformer {
return f.factory.InformerFor(&velerov1.BackupRepository{}, f.defaultInformer)
}
func (f *resticRepositoryInformer) Lister() v1.ResticRepositoryLister {
return v1.NewResticRepositoryLister(f.Informer().GetIndexer())
func (f *backupRepositoryInformer) Lister() v1.BackupRepositoryLister {
return v1.NewBackupRepositoryLister(f.Informer().GetIndexer())
}

View File

@ -26,6 +26,8 @@ import (
type Interface interface {
// Backups returns a BackupInformer.
Backups() BackupInformer
// BackupRepositories returns a BackupRepositoryInformer.
BackupRepositories() BackupRepositoryInformer
// BackupStorageLocations returns a BackupStorageLocationInformer.
BackupStorageLocations() BackupStorageLocationInformer
// DeleteBackupRequests returns a DeleteBackupRequestInformer.
@ -36,8 +38,6 @@ type Interface interface {
PodVolumeBackups() PodVolumeBackupInformer
// PodVolumeRestores returns a PodVolumeRestoreInformer.
PodVolumeRestores() PodVolumeRestoreInformer
// ResticRepositories returns a ResticRepositoryInformer.
ResticRepositories() ResticRepositoryInformer
// Restores returns a RestoreInformer.
Restores() RestoreInformer
// Schedules returns a ScheduleInformer.
@ -64,6 +64,11 @@ func (v *version) Backups() BackupInformer {
return &backupInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
}
// BackupRepositories returns a BackupRepositoryInformer.
func (v *version) BackupRepositories() BackupRepositoryInformer {
return &backupRepositoryInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
}
// BackupStorageLocations returns a BackupStorageLocationInformer.
func (v *version) BackupStorageLocations() BackupStorageLocationInformer {
return &backupStorageLocationInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
@ -89,11 +94,6 @@ func (v *version) PodVolumeRestores() PodVolumeRestoreInformer {
return &podVolumeRestoreInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
}
// ResticRepositories returns a ResticRepositoryInformer.
func (v *version) ResticRepositories() ResticRepositoryInformer {
return &resticRepositoryInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
}
// Restores returns a RestoreInformer.
func (v *version) Restores() RestoreInformer {
return &restoreInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}

View File

@ -0,0 +1,99 @@
/*
Copyright the Velero contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by lister-gen. DO NOT EDIT.
package v1
import (
v1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/client-go/tools/cache"
)
// BackupRepositoryLister helps list BackupRepositories.
// All objects returned here must be treated as read-only.
type BackupRepositoryLister interface {
// List lists all BackupRepositories in the indexer.
// Objects returned here must be treated as read-only.
List(selector labels.Selector) (ret []*v1.BackupRepository, err error)
// BackupRepositories returns an object that can list and get BackupRepositories.
BackupRepositories(namespace string) BackupRepositoryNamespaceLister
BackupRepositoryListerExpansion
}
// backupRepositoryLister implements the BackupRepositoryLister interface.
type backupRepositoryLister struct {
indexer cache.Indexer
}
// NewBackupRepositoryLister returns a new BackupRepositoryLister.
func NewBackupRepositoryLister(indexer cache.Indexer) BackupRepositoryLister {
return &backupRepositoryLister{indexer: indexer}
}
// List lists all BackupRepositories in the indexer.
func (s *backupRepositoryLister) List(selector labels.Selector) (ret []*v1.BackupRepository, err error) {
err = cache.ListAll(s.indexer, selector, func(m interface{}) {
ret = append(ret, m.(*v1.BackupRepository))
})
return ret, err
}
// BackupRepositories returns an object that can list and get BackupRepositories.
func (s *backupRepositoryLister) BackupRepositories(namespace string) BackupRepositoryNamespaceLister {
return backupRepositoryNamespaceLister{indexer: s.indexer, namespace: namespace}
}
// BackupRepositoryNamespaceLister helps list and get BackupRepositories.
// All objects returned here must be treated as read-only.
type BackupRepositoryNamespaceLister interface {
// List lists all BackupRepositories in the indexer for a given namespace.
// Objects returned here must be treated as read-only.
List(selector labels.Selector) (ret []*v1.BackupRepository, err error)
// Get retrieves the BackupRepository from the indexer for a given namespace and name.
// Objects returned here must be treated as read-only.
Get(name string) (*v1.BackupRepository, error)
BackupRepositoryNamespaceListerExpansion
}
// backupRepositoryNamespaceLister implements the BackupRepositoryNamespaceLister
// interface.
type backupRepositoryNamespaceLister struct {
indexer cache.Indexer
namespace string
}
// List lists all BackupRepositories in the indexer for a given namespace.
func (s backupRepositoryNamespaceLister) List(selector labels.Selector) (ret []*v1.BackupRepository, err error) {
err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {
ret = append(ret, m.(*v1.BackupRepository))
})
return ret, err
}
// Get retrieves the BackupRepository from the indexer for a given namespace and name.
func (s backupRepositoryNamespaceLister) Get(name string) (*v1.BackupRepository, error) {
obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name)
if err != nil {
return nil, err
}
if !exists {
return nil, errors.NewNotFound(v1.Resource("backuprepository"), name)
}
return obj.(*v1.BackupRepository), nil
}

View File

@ -26,6 +26,14 @@ type BackupListerExpansion interface{}
// BackupNamespaceLister.
type BackupNamespaceListerExpansion interface{}
// BackupRepositoryListerExpansion allows custom methods to be added to
// BackupRepositoryLister.
type BackupRepositoryListerExpansion interface{}
// BackupRepositoryNamespaceListerExpansion allows custom methods to be added to
// BackupRepositoryNamespaceLister.
type BackupRepositoryNamespaceListerExpansion interface{}
// BackupStorageLocationListerExpansion allows custom methods to be added to
// BackupStorageLocationLister.
type BackupStorageLocationListerExpansion interface{}
@ -66,14 +74,6 @@ type PodVolumeRestoreListerExpansion interface{}
// PodVolumeRestoreNamespaceLister.
type PodVolumeRestoreNamespaceListerExpansion interface{}
// ResticRepositoryListerExpansion allows custom methods to be added to
// ResticRepositoryLister.
type ResticRepositoryListerExpansion interface{}
// ResticRepositoryNamespaceListerExpansion allows custom methods to be added to
// ResticRepositoryNamespaceLister.
type ResticRepositoryNamespaceListerExpansion interface{}
// RestoreListerExpansion allows custom methods to be added to
// RestoreLister.
type RestoreListerExpansion interface{}

View File

@ -1,99 +0,0 @@
/*
Copyright the Velero contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by lister-gen. DO NOT EDIT.
package v1
import (
v1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/client-go/tools/cache"
)
// ResticRepositoryLister helps list ResticRepositories.
// All objects returned here must be treated as read-only.
type ResticRepositoryLister interface {
// List lists all ResticRepositories in the indexer.
// Objects returned here must be treated as read-only.
List(selector labels.Selector) (ret []*v1.ResticRepository, err error)
// ResticRepositories returns an object that can list and get ResticRepositories.
ResticRepositories(namespace string) ResticRepositoryNamespaceLister
ResticRepositoryListerExpansion
}
// resticRepositoryLister implements the ResticRepositoryLister interface.
type resticRepositoryLister struct {
indexer cache.Indexer
}
// NewResticRepositoryLister returns a new ResticRepositoryLister.
func NewResticRepositoryLister(indexer cache.Indexer) ResticRepositoryLister {
return &resticRepositoryLister{indexer: indexer}
}
// List lists all ResticRepositories in the indexer.
func (s *resticRepositoryLister) List(selector labels.Selector) (ret []*v1.ResticRepository, err error) {
err = cache.ListAll(s.indexer, selector, func(m interface{}) {
ret = append(ret, m.(*v1.ResticRepository))
})
return ret, err
}
// ResticRepositories returns an object that can list and get ResticRepositories.
func (s *resticRepositoryLister) ResticRepositories(namespace string) ResticRepositoryNamespaceLister {
return resticRepositoryNamespaceLister{indexer: s.indexer, namespace: namespace}
}
// ResticRepositoryNamespaceLister helps list and get ResticRepositories.
// All objects returned here must be treated as read-only.
type ResticRepositoryNamespaceLister interface {
// List lists all ResticRepositories in the indexer for a given namespace.
// Objects returned here must be treated as read-only.
List(selector labels.Selector) (ret []*v1.ResticRepository, err error)
// Get retrieves the ResticRepository from the indexer for a given namespace and name.
// Objects returned here must be treated as read-only.
Get(name string) (*v1.ResticRepository, error)
ResticRepositoryNamespaceListerExpansion
}
// resticRepositoryNamespaceLister implements the ResticRepositoryNamespaceLister
// interface.
type resticRepositoryNamespaceLister struct {
indexer cache.Indexer
namespace string
}
// List lists all ResticRepositories in the indexer for a given namespace.
func (s resticRepositoryNamespaceLister) List(selector labels.Selector) (ret []*v1.ResticRepository, err error) {
err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {
ret = append(ret, m.(*v1.ResticRepository))
})
return ret, err
}
// Get retrieves the ResticRepository from the indexer for a given namespace and name.
func (s resticRepositoryNamespaceLister) Get(name string) (*v1.ResticRepository, error) {
obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name)
if err != nil {
return nil, err
}
if !exists {
return nil, errors.NewNotFound(v1.Resource("resticrepository"), name)
}
return obj.(*v1.ResticRepository), nil
}

View File

@ -131,19 +131,25 @@ func (b *objectBackupStoreGetter) Get(location *velerov1api.BackupStorageLocatio
return nil, errors.Errorf("backup storage location's bucket name %q must not contain a '/' (if using a prefix, put it in the 'Prefix' field instead)", location.Spec.ObjectStorage.Bucket)
}
// Pass a new map into the object store rather than modifying the passed-in
// location. This prevents Velero controllers from accidentally modifying
// the in-cluster BSL with data which doesn't belong in Spec.Config
objectStoreConfig := make(map[string]string)
if location.Spec.Config != nil {
for key, val := range location.Spec.Config {
objectStoreConfig[key] = val
}
}
// add the bucket name and prefix to the config map so that object stores
// can use them when initializing. The AWS object store uses the bucket
// name to determine the bucket's region when setting up its client.
if location.Spec.Config == nil {
location.Spec.Config = make(map[string]string)
}
location.Spec.Config["bucket"] = bucket
location.Spec.Config["prefix"] = prefix
objectStoreConfig["bucket"] = bucket
objectStoreConfig["prefix"] = prefix
// Only include a CACert if it's specified in order to maintain compatibility with plugins that don't expect it.
if location.Spec.ObjectStorage.CACert != nil {
location.Spec.Config["caCert"] = string(location.Spec.ObjectStorage.CACert)
objectStoreConfig["caCert"] = string(location.Spec.ObjectStorage.CACert)
}
// If the BSL specifies a credential, fetch its path on disk and pass to
@ -154,7 +160,7 @@ func (b *objectBackupStoreGetter) Get(location *velerov1api.BackupStorageLocatio
return nil, errors.Wrap(err, "unable to get credentials")
}
location.Spec.Config["credentialsFile"] = credsFile
objectStoreConfig["credentialsFile"] = credsFile
}
objectStore, err := objectStoreGetter.GetObjectStore(location.Spec.Provider)
@ -162,7 +168,7 @@ func (b *objectBackupStoreGetter) Get(location *velerov1api.BackupStorageLocatio
return nil, err
}
if err := objectStore.Init(location.Spec.Config); err != nil {
if err := objectStore.Init(objectStoreConfig); err != nil {
return nil, err
}

View File

@ -17,6 +17,8 @@ limitations under the License.
package framework
import (
"runtime/debug"
"github.com/pkg/errors"
"google.golang.org/grpc/codes"
)
@ -38,7 +40,8 @@ func handlePanic(p interface{}) error {
if _, ok := panicErr.(stackTracer); ok {
err = panicErr
} else {
err = errors.Wrap(panicErr, "plugin panicked")
errWithStacktrace := errors.Errorf("%v, stack trace: %s", panicErr, debug.Stack())
err = errors.Wrap(errWithStacktrace, "plugin panicked")
}
}

View File

@ -25,7 +25,6 @@ import (
"github.com/sirupsen/logrus"
"github.com/spf13/pflag"
veleroflag "github.com/vmware-tanzu/velero/pkg/cmd/util/flag"
"github.com/vmware-tanzu/velero/pkg/util/logging"
)
@ -78,6 +77,7 @@ type Server interface {
// RegisterItemSnapshotters registers multiple Item Snapshotters
RegisterItemSnapshotters(map[string]HandlerInitializer) Server
// Server runs the plugin server.
Serve()
}
@ -87,7 +87,6 @@ type server struct {
log *logrus.Logger
logLevelFlag *logging.LevelFlag
flagSet *pflag.FlagSet
featureSet *veleroflag.StringArray
backupItemAction *BackupItemActionPlugin
volumeSnapshotter *VolumeSnapshotterPlugin
objectStore *ObjectStorePlugin
@ -99,12 +98,10 @@ type server struct {
// NewServer returns a new Server
func NewServer() Server {
log := newLogger()
features := veleroflag.NewStringArray()
return &server{
log: log,
logLevelFlag: logging.LogLevelFlag(log.Level),
featureSet: &features,
backupItemAction: NewBackupItemActionPlugin(serverLogger(log)),
volumeSnapshotter: NewVolumeSnapshotterPlugin(serverLogger(log)),
objectStore: NewObjectStorePlugin(serverLogger(log)),
@ -116,7 +113,6 @@ func NewServer() Server {
func (s *server) BindFlags(flags *pflag.FlagSet) Server {
flags.Var(s.logLevelFlag, "log-level", fmt.Sprintf("The level at which to log. Valid values are %s.", strings.Join(s.logLevelFlag.AllowedValues(), ", ")))
flags.Var(s.featureSet, "features", "List of feature flags for this plugin")
s.flagSet = flags
s.flagSet.ParseErrorsWhitelist.UnknownFlags = true

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package restic
package podvolume
import (
"context"
@ -30,7 +30,9 @@ import (
"k8s.io/client-go/tools/cache"
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
clientset "github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned"
"github.com/vmware-tanzu/velero/pkg/label"
"github.com/vmware-tanzu/velero/pkg/repository"
"github.com/vmware-tanzu/velero/pkg/util/boolptr"
)
@ -41,11 +43,12 @@ type Backupper interface {
}
type backupper struct {
ctx context.Context
repoManager *repositoryManager
repoEnsurer *repositoryEnsurer
pvcClient corev1client.PersistentVolumeClaimsGetter
pvClient corev1client.PersistentVolumesGetter
ctx context.Context
repoLocker *repository.RepoLocker
repoEnsurer *repository.RepositoryEnsurer
veleroClient clientset.Interface
pvcClient corev1client.PersistentVolumeClaimsGetter
pvClient corev1client.PersistentVolumesGetter
results map[string]chan *velerov1api.PodVolumeBackup
resultsLock sync.Mutex
@ -53,19 +56,21 @@ type backupper struct {
func newBackupper(
ctx context.Context,
repoManager *repositoryManager,
repoEnsurer *repositoryEnsurer,
repoLocker *repository.RepoLocker,
repoEnsurer *repository.RepositoryEnsurer,
podVolumeBackupInformer cache.SharedIndexInformer,
veleroClient clientset.Interface,
pvcClient corev1client.PersistentVolumeClaimsGetter,
pvClient corev1client.PersistentVolumesGetter,
log logrus.FieldLogger,
) *backupper {
b := &backupper{
ctx: ctx,
repoManager: repoManager,
repoEnsurer: repoEnsurer,
pvcClient: pvcClient,
pvClient: pvClient,
ctx: ctx,
repoLocker: repoLocker,
repoEnsurer: repoEnsurer,
veleroClient: veleroClient,
pvcClient: pvcClient,
pvClient: pvClient,
results: make(map[string]chan *velerov1api.PodVolumeBackup),
}
@ -109,8 +114,8 @@ func (b *backupper) BackupPodVolumes(backup *velerov1api.Backup, pod *corev1api.
// get a single non-exclusive lock since we'll wait for all individual
// backups to be complete before releasing it.
b.repoManager.repoLocker.Lock(repo.Name)
defer b.repoManager.repoLocker.Unlock(repo.Name)
b.repoLocker.Lock(repo.Name)
defer b.repoLocker.Unlock(repo.Name)
resultsChan := make(chan *velerov1api.PodVolumeBackup)
@ -177,8 +182,9 @@ func (b *backupper) BackupPodVolumes(backup *velerov1api.Backup, pod *corev1api.
continue
}
volumeBackup := newPodVolumeBackup(backup, pod, volume, repo.Spec.ResticIdentifier, pvc)
if volumeBackup, err = b.repoManager.veleroClient.VeleroV1().PodVolumeBackups(volumeBackup.Namespace).Create(context.TODO(), volumeBackup, metav1.CreateOptions{}); err != nil {
// TODO: Remove the hard-coded uploader type before v1.10 FC
volumeBackup := newPodVolumeBackup(backup, pod, volume, repo.Spec.ResticIdentifier, "restic", pvc)
if volumeBackup, err = b.veleroClient.VeleroV1().PodVolumeBackups(volumeBackup.Namespace).Create(context.TODO(), volumeBackup, metav1.CreateOptions{}); err != nil {
errs = append(errs, err)
continue
}
@ -236,7 +242,7 @@ func isHostPathVolume(volume *corev1api.Volume, pvc *corev1api.PersistentVolumeC
return pv.Spec.HostPath != nil, nil
}
func newPodVolumeBackup(backup *velerov1api.Backup, pod *corev1api.Pod, volume corev1api.Volume, repoIdentifier string, pvc *corev1api.PersistentVolumeClaim) *velerov1api.PodVolumeBackup {
func newPodVolumeBackup(backup *velerov1api.Backup, pod *corev1api.Pod, volume corev1api.Volume, repoIdentifier, uploaderType string, pvc *corev1api.PersistentVolumeClaim) *velerov1api.PodVolumeBackup {
pvb := &velerov1api.PodVolumeBackup{
ObjectMeta: metav1.ObjectMeta{
Namespace: backup.Namespace,
@ -274,6 +280,7 @@ func newPodVolumeBackup(backup *velerov1api.Backup, pod *corev1api.Pod, volume c
},
BackupStorageLocation: backup.Spec.StorageLocation,
RepoIdentifier: repoIdentifier,
UploaderType: uploaderType,
},
}

View File

@ -0,0 +1,88 @@
/*
Copyright the Velero contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package podvolume
import (
"context"
"fmt"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
corev1client "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/tools/cache"
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
clientset "github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned"
velerov1informers "github.com/vmware-tanzu/velero/pkg/generated/informers/externalversions/velero/v1"
"github.com/vmware-tanzu/velero/pkg/repository"
)
// BackupperFactory can construct pod volumes backuppers.
type BackupperFactory interface {
// NewBackupper returns a pod volumes backupper for use during a single Velero backup.
NewBackupper(context.Context, *velerov1api.Backup) (Backupper, error)
}
func NewBackupperFactory(repoLocker *repository.RepoLocker,
repoEnsurer *repository.RepositoryEnsurer,
veleroClient clientset.Interface,
pvcClient corev1client.PersistentVolumeClaimsGetter,
pvClient corev1client.PersistentVolumesGetter,
repoInformerSynced cache.InformerSynced,
log logrus.FieldLogger) BackupperFactory {
return &backupperFactory{
repoLocker: repoLocker,
repoEnsurer: repoEnsurer,
veleroClient: veleroClient,
pvcClient: pvcClient,
pvClient: pvClient,
repoInformerSynced: repoInformerSynced,
log: log,
}
}
type backupperFactory struct {
repoLocker *repository.RepoLocker
repoEnsurer *repository.RepositoryEnsurer
veleroClient clientset.Interface
pvcClient corev1client.PersistentVolumeClaimsGetter
pvClient corev1client.PersistentVolumesGetter
repoInformerSynced cache.InformerSynced
log logrus.FieldLogger
}
func (bf *backupperFactory) NewBackupper(ctx context.Context, backup *velerov1api.Backup) (Backupper, error) {
informer := velerov1informers.NewFilteredPodVolumeBackupInformer(
bf.veleroClient,
backup.Namespace,
0,
cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
func(opts *metav1.ListOptions) {
opts.LabelSelector = fmt.Sprintf("%s=%s", velerov1api.BackupUIDLabel, backup.UID)
},
)
b := newBackupper(ctx, bf.repoLocker, bf.repoEnsurer, informer, bf.veleroClient, bf.pvcClient, bf.pvClient, bf.log)
go informer.Run(ctx.Done())
if !cache.WaitForCacheSync(ctx.Done(), informer.HasSynced, bf.repoInformerSynced) {
return nil, errors.New("timed out waiting for caches to sync")
}
return b, nil
}

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package restic
package podvolume
import (
"context"

View File

@ -2,8 +2,10 @@
package mocks
import mock "github.com/stretchr/testify/mock"
import restic "github.com/vmware-tanzu/velero/pkg/restic"
import (
mock "github.com/stretchr/testify/mock"
"github.com/vmware-tanzu/velero/pkg/podvolume"
)
// Restorer is an autogenerated mock type for the Restorer type
type Restorer struct {
@ -11,11 +13,11 @@ type Restorer struct {
}
// RestorePodVolumes provides a mock function with given fields: _a0
func (_m *Restorer) RestorePodVolumes(_a0 restic.RestoreData) []error {
func (_m *Restorer) RestorePodVolumes(_a0 podvolume.RestoreData) []error {
ret := _m.Called(_a0)
var r0 []error
if rf, ok := ret.Get(0).(func(restic.RestoreData) []error); ok {
if rf, ok := ret.Get(0).(func(podvolume.RestoreData) []error); ok {
r0 = rf(_a0)
} else {
if ret.Get(0) != nil {

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package restic
package podvolume
import (
"context"
@ -28,7 +28,9 @@ import (
"k8s.io/client-go/tools/cache"
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
clientset "github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned"
"github.com/vmware-tanzu/velero/pkg/label"
"github.com/vmware-tanzu/velero/pkg/repository"
"github.com/vmware-tanzu/velero/pkg/util/boolptr"
)
@ -46,10 +48,11 @@ type Restorer interface {
}
type restorer struct {
ctx context.Context
repoManager *repositoryManager
repoEnsurer *repositoryEnsurer
pvcClient corev1client.PersistentVolumeClaimsGetter
ctx context.Context
repoLocker *repository.RepoLocker
repoEnsurer *repository.RepositoryEnsurer
veleroClient clientset.Interface
pvcClient corev1client.PersistentVolumeClaimsGetter
resultsLock sync.Mutex
results map[string]chan *velerov1api.PodVolumeRestore
@ -57,17 +60,19 @@ type restorer struct {
func newRestorer(
ctx context.Context,
rm *repositoryManager,
repoEnsurer *repositoryEnsurer,
repoLocker *repository.RepoLocker,
repoEnsurer *repository.RepositoryEnsurer,
podVolumeRestoreInformer cache.SharedIndexInformer,
veleroClient clientset.Interface,
pvcClient corev1client.PersistentVolumeClaimsGetter,
log logrus.FieldLogger,
) *restorer {
r := &restorer{
ctx: ctx,
repoManager: rm,
repoEnsurer: repoEnsurer,
pvcClient: pvcClient,
ctx: ctx,
repoLocker: repoLocker,
repoEnsurer: repoEnsurer,
veleroClient: veleroClient,
pvcClient: pvcClient,
results: make(map[string]chan *velerov1api.PodVolumeRestore),
}
@ -108,8 +113,8 @@ func (r *restorer) RestorePodVolumes(data RestoreData) []error {
// get a single non-exclusive lock since we'll wait for all individual
// restores to be complete before releasing it.
r.repoManager.repoLocker.Lock(repo.Name)
defer r.repoManager.repoLocker.Unlock(repo.Name)
r.repoLocker.Lock(repo.Name)
defer r.repoLocker.Unlock(repo.Name)
resultsChan := make(chan *velerov1api.PodVolumeRestore)
@ -139,10 +144,10 @@ func (r *restorer) RestorePodVolumes(data RestoreData) []error {
}
}
}
// TODO: Remove the hard-coded uploader type before v1.10 FC
volumeRestore := newPodVolumeRestore(data.Restore, data.Pod, data.BackupLocation, volume, snapshot, repo.Spec.ResticIdentifier, "restic", pvc)
volumeRestore := newPodVolumeRestore(data.Restore, data.Pod, data.BackupLocation, volume, snapshot, repo.Spec.ResticIdentifier, pvc)
if err := errorOnly(r.repoManager.veleroClient.VeleroV1().PodVolumeRestores(volumeRestore.Namespace).Create(context.TODO(), volumeRestore, metav1.CreateOptions{})); err != nil {
if err := errorOnly(r.veleroClient.VeleroV1().PodVolumeRestores(volumeRestore.Namespace).Create(context.TODO(), volumeRestore, metav1.CreateOptions{})); err != nil {
errs = append(errs, errors.WithStack(err))
continue
}
@ -169,7 +174,7 @@ ForEachVolume:
return errs
}
func newPodVolumeRestore(restore *velerov1api.Restore, pod *corev1api.Pod, backupLocation, volume, snapshot, repoIdentifier string, pvc *corev1api.PersistentVolumeClaim) *velerov1api.PodVolumeRestore {
func newPodVolumeRestore(restore *velerov1api.Restore, pod *corev1api.Pod, backupLocation, volume, snapshot, repoIdentifier, uploaderType string, pvc *corev1api.PersistentVolumeClaim) *velerov1api.PodVolumeRestore {
pvr := &velerov1api.PodVolumeRestore{
ObjectMeta: metav1.ObjectMeta{
Namespace: restore.Namespace,

View File

@ -0,0 +1,85 @@
/*
Copyright the Velero contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package podvolume
import (
"context"
"fmt"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
corev1client "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/tools/cache"
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
clientset "github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned"
velerov1informers "github.com/vmware-tanzu/velero/pkg/generated/informers/externalversions/velero/v1"
"github.com/vmware-tanzu/velero/pkg/repository"
)
// RestorerFactory can construct pod volumes restorers.
type RestorerFactory interface {
// NewRestorer returns a pod volumes restorer for use during a single Velero restore.
NewRestorer(context.Context, *velerov1api.Restore) (Restorer, error)
}
func NewRestorerFactory(repoLocker *repository.RepoLocker,
repoEnsurer *repository.RepositoryEnsurer,
veleroClient clientset.Interface,
pvcClient corev1client.PersistentVolumeClaimsGetter,
repoInformerSynced cache.InformerSynced,
log logrus.FieldLogger) RestorerFactory {
return &restorerFactory{
repoLocker: repoLocker,
repoEnsurer: repoEnsurer,
veleroClient: veleroClient,
pvcClient: pvcClient,
repoInformerSynced: repoInformerSynced,
log: log,
}
}
type restorerFactory struct {
repoLocker *repository.RepoLocker
repoEnsurer *repository.RepositoryEnsurer
veleroClient clientset.Interface
pvcClient corev1client.PersistentVolumeClaimsGetter
repoInformerSynced cache.InformerSynced
log logrus.FieldLogger
}
func (rf *restorerFactory) NewRestorer(ctx context.Context, restore *velerov1api.Restore) (Restorer, error) {
informer := velerov1informers.NewFilteredPodVolumeRestoreInformer(
rf.veleroClient,
restore.Namespace,
0,
cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
func(opts *metav1.ListOptions) {
opts.LabelSelector = fmt.Sprintf("%s=%s", velerov1api.RestoreUIDLabel, restore.UID)
},
)
r := newRestorer(ctx, rf.repoLocker, rf.repoEnsurer, informer, rf.veleroClient, rf.pvcClient, rf.log)
go informer.Run(ctx.Done())
if !cache.WaitForCacheSync(ctx.Done(), informer.HasSynced, rf.repoInformerSynced) {
return nil, errors.New("timed out waiting for cache to sync")
}
return r, nil
}

198
pkg/podvolume/util.go Normal file
View File

@ -0,0 +1,198 @@
/*
Copyright the Velero contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package podvolume
import (
"strings"
corev1api "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
)
const (
// PVCNameAnnotation is the key for the annotation added to
// pod volume backups when they're for a PVC.
PVCNameAnnotation = "velero.io/pvc-name"
// Deprecated.
//
// TODO(2.0): remove
podAnnotationPrefix = "snapshot.velero.io/"
// VolumesToBackupAnnotation is the annotation on a pod whose mounted volumes
// need to be backed up using restic.
VolumesToBackupAnnotation = "backup.velero.io/backup-volumes"
// VolumesToExcludeAnnotation is the annotation on a pod whose mounted volumes
// should be excluded from restic backup.
VolumesToExcludeAnnotation = "backup.velero.io/backup-volumes-excludes"
)
// GetVolumeBackupsForPod returns a map, of volume name -> snapshot id,
// of the PodVolumeBackups that exist for the provided pod.
func GetVolumeBackupsForPod(podVolumeBackups []*velerov1api.PodVolumeBackup, pod *corev1api.Pod, sourcePodNs string) map[string]string {
volumes := make(map[string]string)
for _, pvb := range podVolumeBackups {
if !isPVBMatchPod(pvb, pod.GetName(), sourcePodNs) {
continue
}
// skip PVBs without a snapshot ID since there's nothing
// to restore (they could be failed, or for empty volumes).
if pvb.Status.SnapshotID == "" {
continue
}
// If the volume came from a projected or DownwardAPI source, skip its restore.
// This allows backups affected by https://github.com/vmware-tanzu/velero/issues/3863
// or https://github.com/vmware-tanzu/velero/issues/4053 to be restored successfully.
if volumeHasNonRestorableSource(pvb.Spec.Volume, pod.Spec.Volumes) {
continue
}
volumes[pvb.Spec.Volume] = pvb.Status.SnapshotID
}
if len(volumes) > 0 {
return volumes
}
return getPodSnapshotAnnotations(pod)
}
func isPVBMatchPod(pvb *velerov1api.PodVolumeBackup, podName string, namespace string) bool {
return podName == pvb.Spec.Pod.Name && namespace == pvb.Spec.Pod.Namespace
}
// volumeHasNonRestorableSource checks if the given volume exists in the list of podVolumes
// and returns true if the volume's source is not restorable. This is true for volumes with
// a Projected or DownwardAPI source.
func volumeHasNonRestorableSource(volumeName string, podVolumes []corev1api.Volume) bool {
var volume corev1api.Volume
for _, v := range podVolumes {
if v.Name == volumeName {
volume = v
break
}
}
return volume.Projected != nil || volume.DownwardAPI != nil
}
// getPodSnapshotAnnotations returns a map, of volume name -> snapshot id,
// of all snapshots for this pod.
// TODO(2.0) to remove
// Deprecated: we will stop using pod annotations to record restic snapshot IDs after they're taken,
// therefore we won't need to check if these annotations exist.
func getPodSnapshotAnnotations(obj metav1.Object) map[string]string {
var res map[string]string
insertSafe := func(k, v string) {
if res == nil {
res = make(map[string]string)
}
res[k] = v
}
for k, v := range obj.GetAnnotations() {
if strings.HasPrefix(k, podAnnotationPrefix) {
insertSafe(k[len(podAnnotationPrefix):], v)
}
}
return res
}
// GetVolumesToBackup returns a list of volume names to backup for
// the provided pod.
// Deprecated: Use GetPodVolumesUsingRestic instead.
func GetVolumesToBackup(obj metav1.Object) []string {
annotations := obj.GetAnnotations()
if annotations == nil {
return nil
}
backupsValue := annotations[VolumesToBackupAnnotation]
if backupsValue == "" {
return nil
}
return strings.Split(backupsValue, ",")
}
func getVolumesToExclude(obj metav1.Object) []string {
annotations := obj.GetAnnotations()
if annotations == nil {
return nil
}
return strings.Split(annotations[VolumesToExcludeAnnotation], ",")
}
func contains(list []string, k string) bool {
for _, i := range list {
if i == k {
return true
}
}
return false
}
// GetPodVolumesUsingRestic returns a list of volume names to backup for the provided pod.
func GetPodVolumesUsingRestic(pod *corev1api.Pod, defaultVolumesToRestic bool) []string {
if !defaultVolumesToRestic {
return GetVolumesToBackup(pod)
}
volsToExclude := getVolumesToExclude(pod)
podVolumes := []string{}
for _, pv := range pod.Spec.Volumes {
// cannot backup hostpath volumes as they are not mounted into /var/lib/kubelet/pods
// and therefore not accessible to the restic daemon set.
if pv.HostPath != nil {
continue
}
// don't backup volumes mounting secrets. Secrets will be backed up separately.
if pv.Secret != nil {
continue
}
// don't backup volumes mounting config maps. Config maps will be backed up separately.
if pv.ConfigMap != nil {
continue
}
// don't backup volumes mounted as projected volumes, all data in those come from kube state.
if pv.Projected != nil {
continue
}
// don't backup DownwardAPI volumes, all data in those come from kube state.
if pv.DownwardAPI != nil {
continue
}
// don't backup volumes that are included in the exclude list.
if contains(volsToExclude, pv.Name) {
continue
}
// don't include volumes that mount the default service account token.
if strings.HasPrefix(pv.Name, "default-token") {
continue
}
podVolumes = append(podVolumes, pv.Name)
}
return podVolumes
}

563
pkg/podvolume/util_test.go Normal file
View File

@ -0,0 +1,563 @@
/*
Copyright the Velero contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package podvolume
import (
"sort"
"testing"
"github.com/stretchr/testify/assert"
corev1api "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
"github.com/vmware-tanzu/velero/pkg/builder"
)
func TestGetVolumeBackupsForPod(t *testing.T) {
tests := []struct {
name string
podVolumeBackups []*velerov1api.PodVolumeBackup
podVolumes []corev1api.Volume
podAnnotations map[string]string
podName string
sourcePodNs string
expected map[string]string
}{
{
name: "nil annotations results in no volume backups returned",
podAnnotations: nil,
expected: nil,
},
{
name: "empty annotations results in no volume backups returned",
podAnnotations: make(map[string]string),
expected: nil,
},
{
name: "pod annotations with no snapshot annotation prefix results in no volume backups returned",
podAnnotations: map[string]string{"foo": "bar"},
expected: nil,
},
{
name: "pod annotation with only snapshot annotation prefix, results in volume backup with empty volume key",
podAnnotations: map[string]string{podAnnotationPrefix: "snapshotID"},
expected: map[string]string{"": "snapshotID"},
},
{
name: "pod annotation with snapshot annotation prefix results in volume backup with volume name and snapshot ID",
podAnnotations: map[string]string{podAnnotationPrefix + "volume": "snapshotID"},
expected: map[string]string{"volume": "snapshotID"},
},
{
name: "only pod annotations with snapshot annotation prefix are considered",
podAnnotations: map[string]string{"x": "y", podAnnotationPrefix + "volume1": "snapshot1", podAnnotationPrefix + "volume2": "snapshot2"},
expected: map[string]string{"volume1": "snapshot1", "volume2": "snapshot2"},
},
{
name: "pod annotations are not considered if PVBs are provided",
podVolumeBackups: []*velerov1api.PodVolumeBackup{
builder.ForPodVolumeBackup("velero", "pvb-1").PodName("TestPod").PodNamespace("TestNS").SnapshotID("snapshot1").Volume("pvbtest1-foo").Result(),
builder.ForPodVolumeBackup("velero", "pvb-2").PodName("TestPod").PodNamespace("TestNS").SnapshotID("snapshot2").Volume("pvbtest2-abc").Result(),
},
podName: "TestPod",
sourcePodNs: "TestNS",
podAnnotations: map[string]string{"x": "y", podAnnotationPrefix + "foo": "bar", podAnnotationPrefix + "abc": "123"},
expected: map[string]string{"pvbtest1-foo": "snapshot1", "pvbtest2-abc": "snapshot2"},
},
{
name: "volume backups are returned even if no pod annotations are present",
podVolumeBackups: []*velerov1api.PodVolumeBackup{
builder.ForPodVolumeBackup("velero", "pvb-1").PodName("TestPod").PodNamespace("TestNS").SnapshotID("snapshot1").Volume("pvbtest1-foo").Result(),
builder.ForPodVolumeBackup("velero", "pvb-2").PodName("TestPod").PodNamespace("TestNS").SnapshotID("snapshot2").Volume("pvbtest2-abc").Result(),
},
podName: "TestPod",
sourcePodNs: "TestNS",
expected: map[string]string{"pvbtest1-foo": "snapshot1", "pvbtest2-abc": "snapshot2"},
},
{
name: "only volumes from PVBs with snapshot IDs are returned",
podVolumeBackups: []*velerov1api.PodVolumeBackup{
builder.ForPodVolumeBackup("velero", "pvb-1").PodName("TestPod").PodNamespace("TestNS").SnapshotID("snapshot1").Volume("pvbtest1-foo").Result(),
builder.ForPodVolumeBackup("velero", "pvb-2").PodName("TestPod").PodNamespace("TestNS").SnapshotID("snapshot2").Volume("pvbtest2-abc").Result(),
builder.ForPodVolumeBackup("velero", "pvb-3").PodName("TestPod").PodNamespace("TestNS").Volume("pvbtest3-foo").Result(),
builder.ForPodVolumeBackup("velero", "pvb-4").PodName("TestPod").PodNamespace("TestNS").Volume("pvbtest4-abc").Result(),
},
podName: "TestPod",
sourcePodNs: "TestNS",
expected: map[string]string{"pvbtest1-foo": "snapshot1", "pvbtest2-abc": "snapshot2"},
},
{
name: "only volumes from PVBs for the given pod are returned",
podVolumeBackups: []*velerov1api.PodVolumeBackup{
builder.ForPodVolumeBackup("velero", "pvb-1").PodName("TestPod").PodNamespace("TestNS").SnapshotID("snapshot1").Volume("pvbtest1-foo").Result(),
builder.ForPodVolumeBackup("velero", "pvb-2").PodName("TestPod").PodNamespace("TestNS").SnapshotID("snapshot2").Volume("pvbtest2-abc").Result(),
builder.ForPodVolumeBackup("velero", "pvb-3").PodName("TestAnotherPod").SnapshotID("snapshot3").Volume("pvbtest3-xyz").Result(),
},
podName: "TestPod",
sourcePodNs: "TestNS",
expected: map[string]string{"pvbtest1-foo": "snapshot1", "pvbtest2-abc": "snapshot2"},
},
{
name: "only volumes from PVBs which match the pod name and source pod namespace are returned",
podVolumeBackups: []*velerov1api.PodVolumeBackup{
builder.ForPodVolumeBackup("velero", "pvb-1").PodName("TestPod").PodNamespace("TestNS").SnapshotID("snapshot1").Volume("pvbtest1-foo").Result(),
builder.ForPodVolumeBackup("velero", "pvb-2").PodName("TestAnotherPod").PodNamespace("TestNS").SnapshotID("snapshot2").Volume("pvbtest2-abc").Result(),
builder.ForPodVolumeBackup("velero", "pvb-3").PodName("TestPod").PodNamespace("TestAnotherNS").SnapshotID("snapshot3").Volume("pvbtest3-xyz").Result(),
},
podName: "TestPod",
sourcePodNs: "TestNS",
expected: map[string]string{"pvbtest1-foo": "snapshot1"},
},
{
name: "volumes from PVBs that correspond to a pod volume from a projected source are not returned",
podVolumeBackups: []*velerov1api.PodVolumeBackup{
builder.ForPodVolumeBackup("velero", "pvb-1").PodName("TestPod").PodNamespace("TestNS").SnapshotID("snapshot1").Volume("pvb-non-projected").Result(),
builder.ForPodVolumeBackup("velero", "pvb-1").PodName("TestPod").PodNamespace("TestNS").SnapshotID("snapshot2").Volume("pvb-projected").Result(),
},
podVolumes: []corev1api.Volume{
{
Name: "pvb-non-projected",
VolumeSource: corev1api.VolumeSource{
PersistentVolumeClaim: &corev1api.PersistentVolumeClaimVolumeSource{},
},
},
{
Name: "pvb-projected",
VolumeSource: corev1api.VolumeSource{
Projected: &corev1api.ProjectedVolumeSource{},
},
},
},
podName: "TestPod",
sourcePodNs: "TestNS",
expected: map[string]string{"pvb-non-projected": "snapshot1"},
},
{
name: "volumes from PVBs that correspond to a pod volume from a DownwardAPI source are not returned",
podVolumeBackups: []*velerov1api.PodVolumeBackup{
builder.ForPodVolumeBackup("velero", "pvb-1").PodName("TestPod").PodNamespace("TestNS").SnapshotID("snapshot1").Volume("pvb-non-downwardapi").Result(),
builder.ForPodVolumeBackup("velero", "pvb-1").PodName("TestPod").PodNamespace("TestNS").SnapshotID("snapshot2").Volume("pvb-downwardapi").Result(),
},
podVolumes: []corev1api.Volume{
{
Name: "pvb-non-downwardapi",
VolumeSource: corev1api.VolumeSource{
PersistentVolumeClaim: &corev1api.PersistentVolumeClaimVolumeSource{},
},
},
{
Name: "pvb-downwardapi",
VolumeSource: corev1api.VolumeSource{
DownwardAPI: &corev1api.DownwardAPIVolumeSource{},
},
},
},
podName: "TestPod",
sourcePodNs: "TestNS",
expected: map[string]string{"pvb-non-downwardapi": "snapshot1"},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
pod := &corev1api.Pod{}
pod.Annotations = test.podAnnotations
pod.Name = test.podName
pod.Spec.Volumes = test.podVolumes
res := GetVolumeBackupsForPod(test.podVolumeBackups, pod, test.sourcePodNs)
assert.Equal(t, test.expected, res)
})
}
}
func TestVolumeHasNonRestorableSource(t *testing.T) {
testCases := []struct {
name string
volumeName string
podVolumes []corev1api.Volume
expected bool
}{
{
name: "volume name not in list of volumes",
volumeName: "missing-volume",
podVolumes: []corev1api.Volume{
{
Name: "restorable",
VolumeSource: corev1api.VolumeSource{
PersistentVolumeClaim: &corev1api.PersistentVolumeClaimVolumeSource{},
},
},
{
Name: "projected",
VolumeSource: corev1api.VolumeSource{
Projected: &corev1api.ProjectedVolumeSource{},
},
},
{
Name: "downwardapi",
VolumeSource: corev1api.VolumeSource{
DownwardAPI: &corev1api.DownwardAPIVolumeSource{},
},
},
},
expected: false,
},
{
name: "volume name in list of volumes but not projected or DownwardAPI",
volumeName: "restorable",
podVolumes: []corev1api.Volume{
{
Name: "restorable",
VolumeSource: corev1api.VolumeSource{
PersistentVolumeClaim: &corev1api.PersistentVolumeClaimVolumeSource{},
},
},
{
Name: "projected",
VolumeSource: corev1api.VolumeSource{
Projected: &corev1api.ProjectedVolumeSource{},
},
},
{
Name: "downwardapi",
VolumeSource: corev1api.VolumeSource{
DownwardAPI: &corev1api.DownwardAPIVolumeSource{},
},
},
},
expected: false,
},
{
name: "volume name in list of volumes and projected",
volumeName: "projected",
podVolumes: []corev1api.Volume{
{
Name: "restorable",
VolumeSource: corev1api.VolumeSource{
PersistentVolumeClaim: &corev1api.PersistentVolumeClaimVolumeSource{},
},
},
{
Name: "projected",
VolumeSource: corev1api.VolumeSource{
Projected: &corev1api.ProjectedVolumeSource{},
},
},
{
Name: "downwardapi",
VolumeSource: corev1api.VolumeSource{
DownwardAPI: &corev1api.DownwardAPIVolumeSource{},
},
},
},
expected: true,
},
{
name: "volume name in list of volumes and is a DownwardAPI volume",
volumeName: "downwardapi",
podVolumes: []corev1api.Volume{
{
Name: "restorable",
VolumeSource: corev1api.VolumeSource{
PersistentVolumeClaim: &corev1api.PersistentVolumeClaimVolumeSource{},
},
},
{
Name: "projected",
VolumeSource: corev1api.VolumeSource{
Projected: &corev1api.ProjectedVolumeSource{},
},
},
{
Name: "downwardapi",
VolumeSource: corev1api.VolumeSource{
DownwardAPI: &corev1api.DownwardAPIVolumeSource{},
},
},
},
expected: true,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
actual := volumeHasNonRestorableSource(tc.volumeName, tc.podVolumes)
assert.Equal(t, tc.expected, actual)
})
}
}
func TestGetVolumesToBackup(t *testing.T) {
tests := []struct {
name string
annotations map[string]string
expected []string
}{
{
name: "nil annotations",
annotations: nil,
expected: nil,
},
{
name: "no volumes to backup",
annotations: map[string]string{"foo": "bar"},
expected: nil,
},
{
name: "one volume to backup",
annotations: map[string]string{"foo": "bar", VolumesToBackupAnnotation: "volume-1"},
expected: []string{"volume-1"},
},
{
name: "multiple volumes to backup",
annotations: map[string]string{"foo": "bar", VolumesToBackupAnnotation: "volume-1,volume-2,volume-3"},
expected: []string{"volume-1", "volume-2", "volume-3"},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
pod := &corev1api.Pod{}
pod.Annotations = test.annotations
res := GetVolumesToBackup(pod)
// sort to ensure good compare of slices
sort.Strings(test.expected)
sort.Strings(res)
assert.Equal(t, test.expected, res)
})
}
}
func TestGetPodVolumesUsingRestic(t *testing.T) {
testCases := []struct {
name string
pod *corev1api.Pod
expected []string
defaultVolumesToRestic bool
}{
{
name: "should get PVs from VolumesToBackupAnnotation when defaultVolumesToRestic is false",
defaultVolumesToRestic: false,
pod: &corev1api.Pod{
ObjectMeta: metav1.ObjectMeta{
Annotations: map[string]string{
VolumesToBackupAnnotation: "resticPV1,resticPV2,resticPV3",
},
},
},
expected: []string{"resticPV1", "resticPV2", "resticPV3"},
},
{
name: "should get all pod volumes when defaultVolumesToRestic is true and no PVs are excluded",
defaultVolumesToRestic: true,
pod: &corev1api.Pod{
Spec: corev1api.PodSpec{
Volumes: []corev1api.Volume{
// Restic Volumes
{Name: "resticPV1"}, {Name: "resticPV2"}, {Name: "resticPV3"},
},
},
},
expected: []string{"resticPV1", "resticPV2", "resticPV3"},
},
{
name: "should get all pod volumes except ones excluded when defaultVolumesToRestic is true",
defaultVolumesToRestic: true,
pod: &corev1api.Pod{
ObjectMeta: metav1.ObjectMeta{
Annotations: map[string]string{
VolumesToExcludeAnnotation: "nonResticPV1,nonResticPV2,nonResticPV3",
},
},
Spec: corev1api.PodSpec{
Volumes: []corev1api.Volume{
// Restic Volumes
{Name: "resticPV1"}, {Name: "resticPV2"}, {Name: "resticPV3"},
/// Excluded from restic through annotation
{Name: "nonResticPV1"}, {Name: "nonResticPV2"}, {Name: "nonResticPV3"},
},
},
},
expected: []string{"resticPV1", "resticPV2", "resticPV3"},
},
{
name: "should exclude default service account token from restic backup",
defaultVolumesToRestic: true,
pod: &corev1api.Pod{
Spec: corev1api.PodSpec{
Volumes: []corev1api.Volume{
// Restic Volumes
{Name: "resticPV1"}, {Name: "resticPV2"}, {Name: "resticPV3"},
/// Excluded from restic because colume mounting default service account token
{Name: "default-token-5xq45"},
},
},
},
expected: []string{"resticPV1", "resticPV2", "resticPV3"},
},
{
name: "should exclude host path volumes from restic backups",
defaultVolumesToRestic: true,
pod: &corev1api.Pod{
ObjectMeta: metav1.ObjectMeta{
Annotations: map[string]string{
VolumesToExcludeAnnotation: "nonResticPV1,nonResticPV2,nonResticPV3",
},
},
Spec: corev1api.PodSpec{
Volumes: []corev1api.Volume{
// Restic Volumes
{Name: "resticPV1"}, {Name: "resticPV2"}, {Name: "resticPV3"},
/// Excluded from restic through annotation
{Name: "nonResticPV1"}, {Name: "nonResticPV2"}, {Name: "nonResticPV3"},
// Excluded from restic because hostpath
{Name: "hostPath1", VolumeSource: corev1api.VolumeSource{HostPath: &corev1api.HostPathVolumeSource{Path: "/hostpathVol"}}},
},
},
},
expected: []string{"resticPV1", "resticPV2", "resticPV3"},
},
{
name: "should exclude volumes mounting secrets",
defaultVolumesToRestic: true,
pod: &corev1api.Pod{
ObjectMeta: metav1.ObjectMeta{
Annotations: map[string]string{
VolumesToExcludeAnnotation: "nonResticPV1,nonResticPV2,nonResticPV3",
},
},
Spec: corev1api.PodSpec{
Volumes: []corev1api.Volume{
// Restic Volumes
{Name: "resticPV1"}, {Name: "resticPV2"}, {Name: "resticPV3"},
/// Excluded from restic through annotation
{Name: "nonResticPV1"}, {Name: "nonResticPV2"}, {Name: "nonResticPV3"},
// Excluded from restic because hostpath
{Name: "superSecret", VolumeSource: corev1api.VolumeSource{Secret: &corev1api.SecretVolumeSource{SecretName: "super-secret"}}},
},
},
},
expected: []string{"resticPV1", "resticPV2", "resticPV3"},
},
{
name: "should exclude volumes mounting config maps",
defaultVolumesToRestic: true,
pod: &corev1api.Pod{
ObjectMeta: metav1.ObjectMeta{
Annotations: map[string]string{
VolumesToExcludeAnnotation: "nonResticPV1,nonResticPV2,nonResticPV3",
},
},
Spec: corev1api.PodSpec{
Volumes: []corev1api.Volume{
// Restic Volumes
{Name: "resticPV1"}, {Name: "resticPV2"}, {Name: "resticPV3"},
/// Excluded from restic through annotation
{Name: "nonResticPV1"}, {Name: "nonResticPV2"}, {Name: "nonResticPV3"},
// Excluded from restic because hostpath
{Name: "appCOnfig", VolumeSource: corev1api.VolumeSource{ConfigMap: &corev1api.ConfigMapVolumeSource{LocalObjectReference: corev1api.LocalObjectReference{Name: "app-config"}}}},
},
},
},
expected: []string{"resticPV1", "resticPV2", "resticPV3"},
},
{
name: "should exclude projected volumes",
defaultVolumesToRestic: true,
pod: &corev1api.Pod{
ObjectMeta: metav1.ObjectMeta{
Annotations: map[string]string{
VolumesToExcludeAnnotation: "nonResticPV1,nonResticPV2,nonResticPV3",
},
},
Spec: corev1api.PodSpec{
Volumes: []corev1api.Volume{
{Name: "resticPV1"}, {Name: "resticPV2"}, {Name: "resticPV3"},
{
Name: "projected",
VolumeSource: corev1api.VolumeSource{
Projected: &corev1api.ProjectedVolumeSource{
Sources: []corev1api.VolumeProjection{{
Secret: &corev1api.SecretProjection{
LocalObjectReference: corev1api.LocalObjectReference{},
Items: nil,
Optional: nil,
},
DownwardAPI: nil,
ConfigMap: nil,
ServiceAccountToken: nil,
}},
DefaultMode: nil,
},
},
},
},
},
},
expected: []string{"resticPV1", "resticPV2", "resticPV3"},
},
{
name: "should exclude DownwardAPI volumes",
defaultVolumesToRestic: true,
pod: &corev1api.Pod{
ObjectMeta: metav1.ObjectMeta{
Annotations: map[string]string{
VolumesToExcludeAnnotation: "nonResticPV1,nonResticPV2,nonResticPV3",
},
},
Spec: corev1api.PodSpec{
Volumes: []corev1api.Volume{
{Name: "resticPV1"}, {Name: "resticPV2"}, {Name: "resticPV3"},
{
Name: "downwardAPI",
VolumeSource: corev1api.VolumeSource{
DownwardAPI: &corev1api.DownwardAPIVolumeSource{
Items: []corev1api.DownwardAPIVolumeFile{
{
Path: "labels",
FieldRef: &corev1api.ObjectFieldSelector{
APIVersion: "v1",
FieldPath: "metadata.labels",
},
},
},
},
},
},
},
},
},
expected: []string{"resticPV1", "resticPV2", "resticPV3"},
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
actual := GetPodVolumesUsingRestic(tc.pod, tc.defaultVolumesToRestic)
sort.Strings(tc.expected)
sort.Strings(actual)
assert.Equal(t, tc.expected, actual)
})
}
}

View File

@ -0,0 +1,99 @@
/*
Copyright the Velero contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package config
import (
"context"
"os"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/endpoints"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3/s3manager"
"github.com/pkg/errors"
)
const (
// AWS specific environment variable
awsProfileEnvVar = "AWS_PROFILE"
awsProfileKey = "profile"
awsCredentialsFileEnvVar = "AWS_SHARED_CREDENTIALS_FILE"
)
// GetS3ResticEnvVars gets the environment variables that restic
// relies on (AWS_PROFILE) based on info in the provided object
// storage location config map.
func GetS3ResticEnvVars(config map[string]string) (map[string]string, error) {
result := make(map[string]string)
if credentialsFile, ok := config[CredentialsFileKey]; ok {
result[awsCredentialsFileEnvVar] = credentialsFile
}
if profile, ok := config[awsProfileKey]; ok {
result[awsProfileEnvVar] = profile
}
return result, nil
}
// GetS3Credentials gets the S3 credential values according to the information
// of the provided config or the system's environment variables
func GetS3Credentials(config map[string]string) (credentials.Value, error) {
credentialsFile := config[CredentialsFileKey]
if credentialsFile == "" {
credentialsFile = os.Getenv("AWS_SHARED_CREDENTIALS_FILE")
}
if credentialsFile == "" {
return credentials.Value{}, errors.New("missing credential file")
}
creds := credentials.NewSharedCredentials(credentialsFile, "")
credValue, err := creds.Get()
if err != nil {
return credValue, err
}
return credValue, nil
}
// GetAWSBucketRegion returns the AWS region that a bucket is in, or an error
// if the region cannot be determined.
func GetAWSBucketRegion(bucket string) (string, error) {
var region string
sess, err := session.NewSession()
if err != nil {
return "", errors.WithStack(err)
}
for _, partition := range endpoints.DefaultPartitions() {
for regionHint := range partition.Regions() {
region, _ = s3manager.GetBucketRegion(context.Background(), sess, bucket, regionHint)
// we only need to try a single region hint per partition, so break after the first
break
}
if region != "" {
return region, nil
}
}
return "", errors.New("unable to determine bucket's region")
}

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package restic
package config
import (
"testing"
@ -55,7 +55,7 @@ func TestGetS3ResticEnvVars(t *testing.T) {
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
actual, err := getS3ResticEnvVars(tc.config)
actual, err := GetS3ResticEnvVars(tc.config)
require.NoError(t, err)

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package restic
package config
import (
"context"
@ -37,6 +37,7 @@ const (
storageAccountConfigKey = "storageAccount"
storageAccountKeyEnvVarConfigKey = "storageAccountKeyEnvVar"
subscriptionIDConfigKey = "subscriptionId"
storageDomainConfigKey = "storageDomain"
)
// getSubscriptionID gets the subscription ID from the 'config' map if it contains
@ -131,10 +132,10 @@ func mapLookup(data map[string]string) func(string) string {
}
}
// getAzureResticEnvVars gets the environment variables that restic
// GetAzureResticEnvVars gets the environment variables that restic
// relies on (AZURE_ACCOUNT_NAME and AZURE_ACCOUNT_KEY) based
// on info in the provided object storage location config map.
func getAzureResticEnvVars(config map[string]string) (map[string]string, error) {
func GetAzureResticEnvVars(config map[string]string) (map[string]string, error) {
storageAccountKey, _, err := getStorageAccountKey(config)
if err != nil {
return nil, err
@ -158,7 +159,7 @@ func credentialsFileFromEnv() string {
// selectCredentialsFile selects the Azure credentials file to use, retrieving it
// from the given config or falling back to retrieving it from the environment.
func selectCredentialsFile(config map[string]string) string {
if credentialsFile, ok := config[credentialsFileKey]; ok {
if credentialsFile, ok := config[CredentialsFileKey]; ok {
return credentialsFile
}
@ -208,3 +209,22 @@ func getRequiredValues(getValue func(string) string, keys ...string) (map[string
return results, nil
}
// GetAzureStorageDomain gets the Azure storage domain required by a Azure blob connection,
// if the provided config doean't have the value, get it from system's environment variables
func GetAzureStorageDomain(config map[string]string) string {
if domain, exist := config[storageDomainConfigKey]; exist {
return domain
} else {
return os.Getenv(cloudNameEnvVar)
}
}
func GetAzureCredentials(config map[string]string) (string, string, error) {
storageAccountKey, _, err := getStorageAccountKey(config)
if err != nil {
return "", "", err
}
return config[storageAccountConfigKey], storageAccountKey, nil
}

Some files were not shown because too many files have changed in this diff Show More