Merge pull request #8109 from shubham-pampattiwar/backup-pvc-config-support
Add support for backup PVC configurationpull/8115/head
commit
4e781d4009
|
@ -0,0 +1 @@
|
|||
Add support for backup PVC configuration
|
|
@ -292,7 +292,13 @@ func (s *nodeAgentServer) run() {
|
|||
if s.dataPathConfigs != nil && len(s.dataPathConfigs.LoadAffinity) > 0 {
|
||||
loadAffinity = s.dataPathConfigs.LoadAffinity[0]
|
||||
}
|
||||
dataUploadReconciler := controller.NewDataUploadReconciler(s.mgr.GetClient(), s.mgr, s.kubeClient, s.csiSnapshotClient.SnapshotV1(), s.dataPathMgr, loadAffinity, repoEnsurer, clock.RealClock{}, credentialGetter, s.nodeName, s.fileSystem, s.config.dataMoverPrepareTimeout, s.logger, s.metrics)
|
||||
|
||||
var backupPVCConfig map[string]nodeagent.BackupPVC
|
||||
if s.dataPathConfigs != nil && s.dataPathConfigs.BackupPVCConfig != nil {
|
||||
backupPVCConfig = s.dataPathConfigs.BackupPVCConfig
|
||||
}
|
||||
|
||||
dataUploadReconciler := controller.NewDataUploadReconciler(s.mgr.GetClient(), s.mgr, s.kubeClient, s.csiSnapshotClient.SnapshotV1(), s.dataPathMgr, loadAffinity, backupPVCConfig, repoEnsurer, clock.RealClock{}, credentialGetter, s.nodeName, s.fileSystem, s.config.dataMoverPrepareTimeout, s.logger, s.metrics)
|
||||
if err = dataUploadReconciler.SetupWithManager(s.mgr); err != nil {
|
||||
s.logger.WithError(err).Fatal("Unable to create the data upload controller")
|
||||
}
|
||||
|
|
|
@ -78,12 +78,13 @@ type DataUploadReconciler struct {
|
|||
snapshotExposerList map[velerov2alpha1api.SnapshotType]exposer.SnapshotExposer
|
||||
dataPathMgr *datapath.Manager
|
||||
loadAffinity *nodeagent.LoadAffinity
|
||||
backupPVCConfig map[string]nodeagent.BackupPVC
|
||||
preparingTimeout time.Duration
|
||||
metrics *metrics.ServerMetrics
|
||||
}
|
||||
|
||||
func NewDataUploadReconciler(client client.Client, mgr manager.Manager, kubeClient kubernetes.Interface, csiSnapshotClient snapshotter.SnapshotV1Interface,
|
||||
dataPathMgr *datapath.Manager, loadAffinity *nodeagent.LoadAffinity, repoEnsurer *repository.Ensurer, clock clocks.WithTickerAndDelayedExecution,
|
||||
dataPathMgr *datapath.Manager, loadAffinity *nodeagent.LoadAffinity, backupPVCConfig map[string]nodeagent.BackupPVC, repoEnsurer *repository.Ensurer, clock clocks.WithTickerAndDelayedExecution,
|
||||
cred *credentials.CredentialGetter, nodeName string, fs filesystem.Interface, preparingTimeout time.Duration, log logrus.FieldLogger, metrics *metrics.ServerMetrics) *DataUploadReconciler {
|
||||
return &DataUploadReconciler{
|
||||
client: client,
|
||||
|
@ -99,6 +100,7 @@ func NewDataUploadReconciler(client client.Client, mgr manager.Manager, kubeClie
|
|||
snapshotExposerList: map[velerov2alpha1api.SnapshotType]exposer.SnapshotExposer{velerov2alpha1api.SnapshotTypeCSI: exposer.NewCSISnapshotExposer(kubeClient, csiSnapshotClient, log)},
|
||||
dataPathMgr: dataPathMgr,
|
||||
loadAffinity: loadAffinity,
|
||||
backupPVCConfig: backupPVCConfig,
|
||||
preparingTimeout: preparingTimeout,
|
||||
metrics: metrics,
|
||||
}
|
||||
|
@ -788,6 +790,7 @@ func (r *DataUploadReconciler) setupExposeParam(du *velerov2alpha1api.DataUpload
|
|||
ExposeTimeout: r.preparingTimeout,
|
||||
VolumeSize: pvc.Spec.Resources.Requests[corev1.ResourceStorage],
|
||||
Affinity: r.loadAffinity,
|
||||
BackupPVCConfig: r.backupPVCConfig,
|
||||
}, nil
|
||||
}
|
||||
return nil, nil
|
||||
|
|
|
@ -22,6 +22,8 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/vmware-tanzu/velero/pkg/nodeagent"
|
||||
|
||||
snapshotv1api "github.com/kubernetes-csi/external-snapshotter/client/v7/apis/volumesnapshot/v1"
|
||||
snapshotFake "github.com/kubernetes-csi/external-snapshotter/client/v7/clientset/versioned/fake"
|
||||
"github.com/pkg/errors"
|
||||
|
@ -245,7 +247,7 @@ func initDataUploaderReconcilerWithError(needError ...error) (*DataUploadReconci
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return NewDataUploadReconciler(fakeClient, nil, fakeKubeClient, fakeSnapshotClient.SnapshotV1(), dataPathMgr, nil, nil,
|
||||
return NewDataUploadReconciler(fakeClient, nil, fakeKubeClient, fakeSnapshotClient.SnapshotV1(), dataPathMgr, nil, map[string]nodeagent.BackupPVC{}, nil,
|
||||
testclocks.NewFakeClock(now), &credentials.CredentialGetter{FromFile: credentialFileStore}, "test-node", fakeFS, time.Minute*5, velerotest.NewLogger(), metrics.NewServerMetrics()), nil
|
||||
}
|
||||
|
||||
|
|
|
@ -67,6 +67,9 @@ type CSISnapshotExposeParam struct {
|
|||
|
||||
// Affinity specifies the node affinity of the backup pod
|
||||
Affinity *nodeagent.LoadAffinity
|
||||
|
||||
// BackupPVCConfig is the config for backupPVC (intermediate PVC) of snapshot data movement
|
||||
BackupPVCConfig map[string]nodeagent.BackupPVC
|
||||
}
|
||||
|
||||
// CSISnapshotExposeWaitParam define the input param for WaitExposed of CSI snapshots
|
||||
|
@ -163,7 +166,17 @@ func (e *csiSnapshotExposer) Expose(ctx context.Context, ownerObject corev1.Obje
|
|||
curLog.WithField("vs name", volumeSnapshot.Name).Warnf("The snapshot doesn't contain a valid restore size, use source volume's size %v", volumeSize)
|
||||
}
|
||||
|
||||
backupPVC, err := e.createBackupPVC(ctx, ownerObject, backupVS.Name, csiExposeParam.StorageClass, csiExposeParam.AccessMode, volumeSize)
|
||||
// check if there is a mapping for source pvc storage class in backupPVC config
|
||||
// if the mapping exists then use the values(storage class, readOnly accessMode)
|
||||
// for backupPVC (intermediate PVC in snapshot data movement) object creation
|
||||
backupPVCStorageClass := csiExposeParam.StorageClass
|
||||
backupPVCReadOnly := false
|
||||
if value, exists := csiExposeParam.BackupPVCConfig[csiExposeParam.StorageClass]; exists {
|
||||
backupPVCStorageClass = value.StorageClass
|
||||
backupPVCReadOnly = value.ReadOnly
|
||||
}
|
||||
|
||||
backupPVC, err := e.createBackupPVC(ctx, ownerObject, backupVS.Name, backupPVCStorageClass, csiExposeParam.AccessMode, volumeSize, backupPVCReadOnly)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error to create backup pvc")
|
||||
}
|
||||
|
@ -347,7 +360,7 @@ func (e *csiSnapshotExposer) createBackupVSC(ctx context.Context, ownerObject co
|
|||
return e.csiSnapshotClient.VolumeSnapshotContents().Create(ctx, vsc, metav1.CreateOptions{})
|
||||
}
|
||||
|
||||
func (e *csiSnapshotExposer) createBackupPVC(ctx context.Context, ownerObject corev1.ObjectReference, backupVS, storageClass, accessMode string, resource resource.Quantity) (*corev1.PersistentVolumeClaim, error) {
|
||||
func (e *csiSnapshotExposer) createBackupPVC(ctx context.Context, ownerObject corev1.ObjectReference, backupVS, storageClass, accessMode string, resource resource.Quantity, readOnly bool) (*corev1.PersistentVolumeClaim, error) {
|
||||
backupPVCName := ownerObject.Name
|
||||
|
||||
volumeMode, err := getVolumeModeByAccessMode(accessMode)
|
||||
|
@ -355,6 +368,12 @@ func (e *csiSnapshotExposer) createBackupPVC(ctx context.Context, ownerObject co
|
|||
return nil, err
|
||||
}
|
||||
|
||||
pvcAccessMode := corev1.ReadWriteOnce
|
||||
|
||||
if readOnly {
|
||||
pvcAccessMode = corev1.ReadOnlyMany
|
||||
}
|
||||
|
||||
dataSource := &corev1.TypedLocalObjectReference{
|
||||
APIGroup: &snapshotv1api.SchemeGroupVersion.Group,
|
||||
Kind: "VolumeSnapshot",
|
||||
|
@ -377,7 +396,7 @@ func (e *csiSnapshotExposer) createBackupPVC(ctx context.Context, ownerObject co
|
|||
},
|
||||
Spec: corev1.PersistentVolumeClaimSpec{
|
||||
AccessModes: []corev1.PersistentVolumeAccessMode{
|
||||
corev1.ReadWriteOnce,
|
||||
pvcAccessMode,
|
||||
},
|
||||
StorageClassName: &storageClass,
|
||||
VolumeMode: &volumeMode,
|
||||
|
|
|
@ -18,10 +18,13 @@ package exposer
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/utils/pointer"
|
||||
|
||||
snapshotv1api "github.com/kubernetes-csi/external-snapshotter/client/v7/apis/volumesnapshot/v1"
|
||||
snapshotFake "github.com/kubernetes-csi/external-snapshotter/client/v7/clientset/versioned/fake"
|
||||
"github.com/pkg/errors"
|
||||
|
@ -821,3 +824,147 @@ func TestToSystemAffinity(t *testing.T) {
|
|||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_csiSnapshotExposer_createBackupPVC(t *testing.T) {
|
||||
backup := &velerov1.Backup{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: velerov1.SchemeGroupVersion.String(),
|
||||
Kind: "Backup",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: velerov1.DefaultNamespace,
|
||||
Name: "fake-backup",
|
||||
UID: "fake-uid",
|
||||
},
|
||||
}
|
||||
|
||||
dataSource := &corev1.TypedLocalObjectReference{
|
||||
APIGroup: &snapshotv1api.SchemeGroupVersion.Group,
|
||||
Kind: "VolumeSnapshot",
|
||||
Name: "fake-snapshot",
|
||||
}
|
||||
volumeMode := corev1.PersistentVolumeFilesystem
|
||||
|
||||
backupPVC := corev1.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: velerov1.DefaultNamespace,
|
||||
Name: "fake-backup",
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
{
|
||||
APIVersion: backup.APIVersion,
|
||||
Kind: backup.Kind,
|
||||
Name: backup.Name,
|
||||
UID: backup.UID,
|
||||
Controller: pointer.BoolPtr(true),
|
||||
},
|
||||
},
|
||||
},
|
||||
Spec: corev1.PersistentVolumeClaimSpec{
|
||||
AccessModes: []corev1.PersistentVolumeAccessMode{
|
||||
corev1.ReadWriteOnce,
|
||||
},
|
||||
VolumeMode: &volumeMode,
|
||||
DataSource: dataSource,
|
||||
DataSourceRef: nil,
|
||||
StorageClassName: pointer.String("fake-storage-class"),
|
||||
Resources: corev1.VolumeResourceRequirements{
|
||||
Requests: corev1.ResourceList{
|
||||
corev1.ResourceStorage: resource.MustParse("1Gi"),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
backupPVCReadOnly := corev1.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: velerov1.DefaultNamespace,
|
||||
Name: "fake-backup",
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
{
|
||||
APIVersion: backup.APIVersion,
|
||||
Kind: backup.Kind,
|
||||
Name: backup.Name,
|
||||
UID: backup.UID,
|
||||
Controller: pointer.BoolPtr(true),
|
||||
},
|
||||
},
|
||||
},
|
||||
Spec: corev1.PersistentVolumeClaimSpec{
|
||||
AccessModes: []corev1.PersistentVolumeAccessMode{
|
||||
corev1.ReadOnlyMany,
|
||||
},
|
||||
VolumeMode: &volumeMode,
|
||||
DataSource: dataSource,
|
||||
DataSourceRef: nil,
|
||||
StorageClassName: pointer.String("fake-storage-class"),
|
||||
Resources: corev1.VolumeResourceRequirements{
|
||||
Requests: corev1.ResourceList{
|
||||
corev1.ResourceStorage: resource.MustParse("1Gi"),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
ownerBackup *velerov1.Backup
|
||||
backupVS string
|
||||
storageClass string
|
||||
accessMode string
|
||||
resource resource.Quantity
|
||||
readOnly bool
|
||||
kubeClientObj []runtime.Object
|
||||
snapshotClientObj []runtime.Object
|
||||
want *corev1.PersistentVolumeClaim
|
||||
wantErr assert.ErrorAssertionFunc
|
||||
}{
|
||||
{
|
||||
name: "backupPVC gets created successfully with parameters from source PVC",
|
||||
ownerBackup: backup,
|
||||
backupVS: "fake-snapshot",
|
||||
storageClass: "fake-storage-class",
|
||||
accessMode: AccessModeFileSystem,
|
||||
resource: resource.MustParse("1Gi"),
|
||||
readOnly: false,
|
||||
want: &backupPVC,
|
||||
wantErr: assert.NoError,
|
||||
},
|
||||
{
|
||||
name: "backupPVC gets created successfully with parameters from source PVC but accessMode from backupPVC Config as read only",
|
||||
ownerBackup: backup,
|
||||
backupVS: "fake-snapshot",
|
||||
storageClass: "fake-storage-class",
|
||||
accessMode: AccessModeFileSystem,
|
||||
resource: resource.MustParse("1Gi"),
|
||||
readOnly: true,
|
||||
want: &backupPVCReadOnly,
|
||||
wantErr: assert.NoError,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
fakeKubeClient := fake.NewSimpleClientset(tt.kubeClientObj...)
|
||||
fakeSnapshotClient := snapshotFake.NewSimpleClientset(tt.snapshotClientObj...)
|
||||
e := &csiSnapshotExposer{
|
||||
kubeClient: fakeKubeClient,
|
||||
csiSnapshotClient: fakeSnapshotClient.SnapshotV1(),
|
||||
log: velerotest.NewLogger(),
|
||||
}
|
||||
var ownerObject corev1.ObjectReference
|
||||
if tt.ownerBackup != nil {
|
||||
ownerObject = corev1.ObjectReference{
|
||||
Kind: tt.ownerBackup.Kind,
|
||||
Namespace: tt.ownerBackup.Namespace,
|
||||
Name: tt.ownerBackup.Name,
|
||||
UID: tt.ownerBackup.UID,
|
||||
APIVersion: tt.ownerBackup.APIVersion,
|
||||
}
|
||||
}
|
||||
got, err := e.createBackupPVC(context.Background(), ownerObject, tt.backupVS, tt.storageClass, tt.accessMode, tt.resource, tt.readOnly)
|
||||
if !tt.wantErr(t, err, fmt.Sprintf("createBackupPVC(%v, %v, %v, %v, %v, %v)", ownerObject, tt.backupVS, tt.storageClass, tt.accessMode, tt.resource, tt.readOnly)) {
|
||||
return
|
||||
}
|
||||
assert.Equalf(t, tt.want, got, "createBackupPVC(%v, %v, %v, %v, %v, %v)", ownerObject, tt.backupVS, tt.storageClass, tt.accessMode, tt.resource, tt.readOnly)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -63,12 +63,23 @@ type RuledConfigs struct {
|
|||
Number int `json:"number"`
|
||||
}
|
||||
|
||||
type BackupPVC struct {
|
||||
// StorageClass is the name of storage class to be used by the backupPVC
|
||||
StorageClass string `json:"storageClass,omitempty"`
|
||||
|
||||
// ReadOnly sets the backupPVC's access mode as read only
|
||||
ReadOnly bool `json:"readOnly,omitempty"`
|
||||
}
|
||||
|
||||
type Configs struct {
|
||||
// LoadConcurrency is the config for data path load concurrency per node.
|
||||
LoadConcurrency *LoadConcurrency `json:"loadConcurrency,omitempty"`
|
||||
|
||||
// LoadAffinity is the config for data path load affinity.
|
||||
LoadAffinity []*LoadAffinity `json:"loadAffinity,omitempty"`
|
||||
|
||||
// BackupPVCConfig is the config for backupPVC (intermediate PVC) of snapshot data movement
|
||||
BackupPVCConfig map[string]BackupPVC `json:"backupPVC,omitempty"`
|
||||
}
|
||||
|
||||
// IsRunning checks if the node agent daemonset is running properly. If not, return the error found
|
||||
|
|
Loading…
Reference in New Issue