Merge branch 'main' into update_golang_version_new

Signed-off-by: Xun Jiang/Bruce Jiang <59276555+blackpiglet@users.noreply.github.com>
Signed-off-by: Xun Jiang <blackpiglet@gmail.com>
pull/6039/head
Xun Jiang/Bruce Jiang 2023-04-04 12:02:57 +08:00 committed by Xun Jiang
commit 42ec72146d
10 changed files with 149 additions and 8 deletions

View File

@ -0,0 +1 @@
Fixed backup deletion bug related to async operations

View File

@ -0,0 +1 @@
Restore Services before Clusters

View File

@ -33,8 +33,7 @@ fi
# get code-generation tools (for now keep in GOPATH since they're not fully modules-compatible yet) # get code-generation tools (for now keep in GOPATH since they're not fully modules-compatible yet)
mkdir -p ${GOPATH}/src/k8s.io mkdir -p ${GOPATH}/src/k8s.io
pushd ${GOPATH}/src/k8s.io pushd ${GOPATH}/src/k8s.io
git config --global advice.detachedHead false git clone -b v0.22.2 https://github.com/kubernetes/code-generator
git clone -b v0.25.6 https://github.com/kubernetes/code-generator
popd popd
${GOPATH}/src/k8s.io/code-generator/generate-groups.sh \ ${GOPATH}/src/k8s.io/code-generator/generate-groups.sh \

View File

@ -323,11 +323,11 @@ func (ib *itemBackupper) executeActions(
continue continue
} }
log.Info("Executing custom action") log.Info("Executing custom action")
actionName := action.Name()
if act, err := ib.getMatchAction(obj, groupResource, action.Name()); err != nil { if act, err := ib.getMatchAction(obj, groupResource, actionName); err != nil {
return nil, itemFiles, errors.WithStack(err) return nil, itemFiles, errors.WithStack(err)
} else if act != nil && act.Type == resourcepolicies.Skip { } else if act != nil && act.Type == resourcepolicies.Skip {
log.Infof("skip snapshot of pvc %s/%s bound pv for the matched resource policies", namespace, name) log.Infof("Skip executing Backup Item Action: %s of resource %s: %s/%s for the matched resource policies", actionName, groupResource, namespace, name)
continue continue
} }

View File

@ -543,6 +543,7 @@ var defaultRestorePriorities = restore.Priorities{
// in the backup. // in the backup.
"replicasets.apps", "replicasets.apps",
"clusterclasses.cluster.x-k8s.io", "clusterclasses.cluster.x-k8s.io",
"services",
}, },
LowPriorities: []string{ LowPriorities: []string{
"clusterbootstraps.run.tanzu.vmware.com", "clusterbootstraps.run.tanzu.vmware.com",
@ -805,6 +806,7 @@ func (s *server) runControllers(defaultVolumeSnapshotLocations map[string]string
clock.RealClock{}, clock.RealClock{},
backupper, backupper,
newPluginManager, newPluginManager,
backupTracker,
backupStoreGetter, backupStoreGetter,
s.logger, s.logger,
s.metrics, s.metrics,

View File

@ -267,7 +267,12 @@ func (b *backupReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctr
} }
b.backupTracker.Add(request.Namespace, request.Name) b.backupTracker.Add(request.Namespace, request.Name)
defer b.backupTracker.Delete(request.Namespace, request.Name) defer func() {
switch request.Status.Phase {
case velerov1api.BackupPhaseCompleted, velerov1api.BackupPhasePartiallyFailed, velerov1api.BackupPhaseFailed, velerov1api.BackupPhaseFailedValidation:
b.backupTracker.Delete(request.Namespace, request.Name)
}
}()
log.Debug("Running backup") log.Debug("Running backup")

View File

@ -44,6 +44,7 @@ type backupFinalizerReconciler struct {
clock clocks.WithTickerAndDelayedExecution clock clocks.WithTickerAndDelayedExecution
backupper pkgbackup.Backupper backupper pkgbackup.Backupper
newPluginManager func(logrus.FieldLogger) clientmgmt.Manager newPluginManager func(logrus.FieldLogger) clientmgmt.Manager
backupTracker BackupTracker
metrics *metrics.ServerMetrics metrics *metrics.ServerMetrics
backupStoreGetter persistence.ObjectBackupStoreGetter backupStoreGetter persistence.ObjectBackupStoreGetter
log logrus.FieldLogger log logrus.FieldLogger
@ -55,6 +56,7 @@ func NewBackupFinalizerReconciler(
clock clocks.WithTickerAndDelayedExecution, clock clocks.WithTickerAndDelayedExecution,
backupper pkgbackup.Backupper, backupper pkgbackup.Backupper,
newPluginManager func(logrus.FieldLogger) clientmgmt.Manager, newPluginManager func(logrus.FieldLogger) clientmgmt.Manager,
backupTracker BackupTracker,
backupStoreGetter persistence.ObjectBackupStoreGetter, backupStoreGetter persistence.ObjectBackupStoreGetter,
log logrus.FieldLogger, log logrus.FieldLogger,
metrics *metrics.ServerMetrics, metrics *metrics.ServerMetrics,
@ -64,6 +66,7 @@ func NewBackupFinalizerReconciler(
clock: clock, clock: clock,
backupper: backupper, backupper: backupper,
newPluginManager: newPluginManager, newPluginManager: newPluginManager,
backupTracker: backupTracker,
backupStoreGetter: backupStoreGetter, backupStoreGetter: backupStoreGetter,
log: log, log: log,
metrics: metrics, metrics: metrics,
@ -102,6 +105,10 @@ func (r *backupFinalizerReconciler) Reconcile(ctx context.Context, req ctrl.Requ
original := backup.DeepCopy() original := backup.DeepCopy()
defer func() { defer func() {
switch backup.Status.Phase {
case velerov1api.BackupPhaseCompleted, velerov1api.BackupPhasePartiallyFailed, velerov1api.BackupPhaseFailed, velerov1api.BackupPhaseFailedValidation:
r.backupTracker.Delete(backup.Namespace, backup.Name)
}
// Always attempt to Patch the backup object and status after each reconciliation. // Always attempt to Patch the backup object and status after each reconciliation.
if err := r.client.Patch(ctx, backup, kbclient.MergeFrom(original)); err != nil { if err := r.client.Patch(ctx, backup, kbclient.MergeFrom(original)); err != nil {
log.WithError(err).Error("Error updating backup") log.WithError(err).Error("Error updating backup")

View File

@ -52,6 +52,7 @@ func mockBackupFinalizerReconciler(fakeClient kbclient.Client, fakeClock *testcl
fakeClock, fakeClock,
backupper, backupper,
func(logrus.FieldLogger) clientmgmt.Manager { return pluginManager }, func(logrus.FieldLogger) clientmgmt.Manager { return pluginManager },
NewBackupTracker(),
NewFakeSingleObjectBackupStoreGetter(backupStore), NewFakeSingleObjectBackupStoreGetter(backupStore),
logrus.StandardLogger(), logrus.StandardLogger(),
metrics.NewServerMetrics(), metrics.NewServerMetrics(),

View File

@ -37,6 +37,11 @@ spec:
# asynchronous BackupItemAction operations # asynchronous BackupItemAction operations
# The default value is 1 hour. # The default value is 1 hour.
itemOperationTimeout: 1h itemOperationTimeout: 1h
# resourcePolicy specifies the referenced resource policies that backup should follow
# optional
resourcePolicy:
kind: configmap
name: resource-policy-configmap
# Array of namespaces to include in the backup. If unspecified, all namespaces are included. # Array of namespaces to include in the backup. If unspecified, all namespaces are included.
# Optional. # Optional.
includedNamespaces: includedNamespaces:

View File

@ -3,9 +3,11 @@ title: "Resource filtering"
layout: docs layout: docs
--- ---
*Filter objects by namespace, type, or labels.* *Filter objects by namespace, type, labels or resource policies.*
This page describes how to use the include and exclude flags with the `velero backup` and `velero restore` commands. By default Velero includes all objects in a backup or restore when no filtering options are used. This page describes how to filter resource for backup and restore.
User could use the include and exclude flags with the `velero backup` and `velero restore` commands. And user could also use resource policies to handle backup.
By default, Velero includes all objects in a backup or restore when no filtering options are used.
## Includes ## Includes
@ -201,3 +203,121 @@ Kubernetes namespace resources to exclude from the backup, formatted as resource
```bash ```bash
velero backup create <backup-name> --exclude-namespaced-resources="*" velero backup create <backup-name> --exclude-namespaced-resources="*"
``` ```
## Resource policies
Velero provides resource policies to filter resources to do backup or restore. currently, it only supports skip backup volume by resource policies.
**Creating resource policies**
Below is the two-step of using resource policies to skip backup of volume:
1. Creating resource policies configmap
Users need to create one configmap in Velero install namespace from a YAML file that defined resource policies. The creating command would be like the below:
```bash
kubectl create cm <configmap-name> --from-file <yaml-file> -n velero
```
2. Creating a backup reference to the defined resource policies
Users create a backup with the flag `--resource-policies-configmap`, which will reference the current backup to the defined resource policies. The creating command would be like the below:
```bash
velero backup create --resource-policies-configmap <configmap-name>
```
This flag could also be combined with the other include and exclude filters above
**YAML template**
Velero only support volume resource policies currently, other kinds of resource policies could be extended in the future. The policies YAML config file would look like this:
- Yaml template:
```yaml
# currently only supports v1 version
version: v1
volumePolicies:
# each policy consists of a list of conditions and an action
# we could have lots of policies, but if the resource matched the first policy, the latters will be ignored
# each key in the object is one condition, and one policy will apply to resources that meet ALL conditions
# NOTE: capacity or storageClass is suited for [Persistent Volumes](https://kubernetes.io/docs/concepts/storage/persistent-volumes), and pod [Volume](https://kubernetes.io/docs/concepts/storage/volumes) not support it.
- conditions:
# capacity condition matches the volumes whose capacity falls into the range
capacity: "10,100Gi"
# pv matches specific csi driver
csi:
driver: aws.ebs.csi.driver
# pv matches one of the storage class list
storageClass:
- gp2
- standard
action:
type: skip
- conditions:
capacity: "0,100Gi"
# nfs volume source with specific server and path (nfs could be empty or only config server or path)
nfs:
server: 192.168.200.90
path: /mnt/data
action:
type: skip
- conditions:
nfs:
server: 192.168.200.90
action:
type: skip
- conditions:
# nfs could be empty which matches any nfs volume source
nfs: {}
action:
type: skip
- conditions:
# csi could be empty which matches any csi volume source
csi: {}
action:
type: skip
```
**Supported conditions**
Currently, Velero supports the volume attributes listed below:
- capacity: matching volumes have the capacity that falls within this `capacity` range. The capacity value should include the lower value and upper value concatenated by commas, the unit of each value in capacity could be `Ti`, `Gi`, `Mi`, `Ki` etc, which is a standard storage unit in Kubernetes. And it has several combinations below:
- "0,5Gi" or "0Gi,5Gi" which means capacity or size matches from 0 to 5Gi, including value 0 and value 5Gi
- ",5Gi" which is equal to "0,5Gi"
- "5Gi," which means capacity or size matches larger than 5Gi, including value 5Gi
- "5Gi" which is not supported and will be failed in validating the configuration
- storageClass: matching volumes those with specified `storageClass`, such as `gp2`, `ebs-sc` in eks
- volume sources: matching volumes that used specified volume sources. Currently we support nfs or csi backend volume source
Velero supported conditions and format listed below:
- capacity
```yaml
# match volume has the size between 10Gi and 100Gi
capacity: "10Gi,100Gi"
```
- storageClass
```yaml
# match volume has the storage class gp2 or ebs-sc
storageClass:
- gp2
- ebs-sc
```
- volume sources (currently only support below format and attributes)
1. Specify the volume source name, the name could be `nfs`, `rbd`, `iscsi`, `csi` etc, but Velero only support `nfs` and `csi` currently.
```yaml
# match any volume has nfs volume source
nfs : {}
# match any volume has csi volume source
csi : {}
```
2. Specify details for the related volume source (currently we only support csi driver filter and nfs server or path filter)
```yaml
# match volume has csi volume source and using `aws.efs.csi.driver`
csi:
driver: aws.efs.csi.driver
# match volume has nfs volume source and using below server and path
nfs:
server: 192.168.200.90
path: /mnt/nfs
```
For volume provisioned by [Persistent Volumes](https://kubernetes.io/docs/concepts/storage/persistent-volumes) support all above attributes, but for pod [Volume](https://kubernetes.io/docs/concepts/storage/volumes) only support filtered by volume source.
**Resource policies rules**
- Velero already has lots of include or exclude filters. the resource policies are the final filters after others include or exclude filters in one backup processing workflow. So if use a defined similar filter like the opt-in approach to backup one pod volume but skip backup of the same pod volume in resource policies, as resource policies are the final filters that are applied, the volume will not be backed up.
- If volume resource policies conflict with themselves the first matched policy will be respected when many policies are defined.