Merge pull request #9015 from Lyndon-Li/vgdp-ms-pvb-controller

VGDP MS PVB controller
pull/8979/head^2
Wenkai Yin(尹文开) 2025-06-16 13:10:44 +08:00 committed by GitHub
commit 7d8a36a6e0
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
12 changed files with 1630 additions and 607 deletions

View File

@ -0,0 +1 @@
Fix issue #8958, add VGDP MS PVB controller

View File

@ -15,38 +15,41 @@ spec:
scope: Namespaced
versions:
- additionalPrinterColumns:
- description: Pod Volume Backup status such as New/InProgress
- description: PodVolumeBackup status such as New/InProgress
jsonPath: .status.phase
name: Status
type: string
- description: Time when this backup was started
- description: Time duration since this PodVolumeBackup was started
jsonPath: .status.startTimestamp
name: Created
name: Started
type: date
- description: Namespace of the pod containing the volume to be backed up
jsonPath: .spec.pod.namespace
name: Namespace
type: string
- description: Name of the pod containing the volume to be backed up
jsonPath: .spec.pod.name
name: Pod
type: string
- description: Name of the volume to be backed up
jsonPath: .spec.volume
name: Volume
type: string
- description: The type of the uploader to handle data transfer
jsonPath: .spec.uploaderType
name: Uploader Type
type: string
- description: Completed bytes
format: int64
jsonPath: .status.progress.bytesDone
name: Bytes Done
type: integer
- description: Total bytes
format: int64
jsonPath: .status.progress.totalBytes
name: Total Bytes
type: integer
- description: Name of the Backup Storage Location where this backup should be
stored
jsonPath: .spec.backupStorageLocation
name: Storage Location
type: string
- jsonPath: .metadata.creationTimestamp
- description: Time duration since this PodVolumeBackup was created
jsonPath: .metadata.creationTimestamp
name: Age
type: date
- description: Name of the node where the PodVolumeBackup is processed
jsonPath: .status.node
name: Node
type: string
- description: The type of the uploader to handle data transfer
jsonPath: .spec.uploaderType
name: Uploader
type: string
name: v1
schema:
openAPIV3Schema:
@ -170,6 +173,13 @@ spec:
status:
description: PodVolumeBackupStatus is the current status of a PodVolumeBackup.
properties:
acceptedTimestamp:
description: |-
AcceptedTimestamp records the time the pod volume backup is to be prepared.
The server's time is used for AcceptedTimestamp
format: date-time
nullable: true
type: string
completionTimestamp:
description: |-
CompletionTimestamp records the time a backup was completed.
@ -190,7 +200,11 @@ spec:
description: Phase is the current state of the PodVolumeBackup.
enum:
- New
- Accepted
- Prepared
- InProgress
- Canceling
- Canceled
- Completed
- Failed
type: string

File diff suppressed because one or more lines are too long

View File

@ -105,6 +105,9 @@ const (
// defaultVGSLabelKey is the default label key used to group PVCs under a VolumeGroupSnapshot
DefaultVGSLabelKey = "velero.io/volume-group"
// PVBLabel is the label key used to identify the pvb for pvb pod
PVBLabel = "velero.io/pod-volume-backup"
// PVRLabel is the label key used to identify the pvb for pvr pod
PVRLabel = "velero.io/pod-volume-restore"
)

View File

@ -64,12 +64,16 @@ type PodVolumeBackupSpec struct {
}
// PodVolumeBackupPhase represents the lifecycle phase of a PodVolumeBackup.
// +kubebuilder:validation:Enum=New;InProgress;Completed;Failed
// +kubebuilder:validation:Enum=New;Accepted;Prepared;InProgress;Canceling;Canceled;Completed;Failed
type PodVolumeBackupPhase string
const (
PodVolumeBackupPhaseNew PodVolumeBackupPhase = "New"
PodVolumeBackupPhaseAccepted PodVolumeBackupPhase = "Accepted"
PodVolumeBackupPhasePrepared PodVolumeBackupPhase = "Prepared"
PodVolumeBackupPhaseInProgress PodVolumeBackupPhase = "InProgress"
PodVolumeBackupPhaseCanceling PodVolumeBackupPhase = "Canceling"
PodVolumeBackupPhaseCanceled PodVolumeBackupPhase = "Canceled"
PodVolumeBackupPhaseCompleted PodVolumeBackupPhase = "Completed"
PodVolumeBackupPhaseFailed PodVolumeBackupPhase = "Failed"
)
@ -113,20 +117,27 @@ type PodVolumeBackupStatus struct {
// about the backup operation.
// +optional
Progress shared.DataMoveOperationProgress `json:"progress,omitempty"`
// AcceptedTimestamp records the time the pod volume backup is to be prepared.
// The server's time is used for AcceptedTimestamp
// +optional
// +nullable
AcceptedTimestamp *metav1.Time `json:"acceptedTimestamp,omitempty"`
}
// TODO(2.0) After converting all resources to use the runttime-controller client,
// the genclient and k8s:deepcopy markers will no longer be needed and should be removed.
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.phase",description="Pod Volume Backup status such as New/InProgress"
// +kubebuilder:printcolumn:name="Created",type="date",JSONPath=".status.startTimestamp",description="Time when this backup was started"
// +kubebuilder:printcolumn:name="Namespace",type="string",JSONPath=".spec.pod.namespace",description="Namespace of the pod containing the volume to be backed up"
// +kubebuilder:printcolumn:name="Pod",type="string",JSONPath=".spec.pod.name",description="Name of the pod containing the volume to be backed up"
// +kubebuilder:printcolumn:name="Volume",type="string",JSONPath=".spec.volume",description="Name of the volume to be backed up"
// +kubebuilder:printcolumn:name="Uploader Type",type="string",JSONPath=".spec.uploaderType",description="The type of the uploader to handle data transfer"
// +kubebuilder:storageversion
// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.phase",description="PodVolumeBackup status such as New/InProgress"
// +kubebuilder:printcolumn:name="Started",type="date",JSONPath=".status.startTimestamp",description="Time duration since this PodVolumeBackup was started"
// +kubebuilder:printcolumn:name="Bytes Done",type="integer",format="int64",JSONPath=".status.progress.bytesDone",description="Completed bytes"
// +kubebuilder:printcolumn:name="Total Bytes",type="integer",format="int64",JSONPath=".status.progress.totalBytes",description="Total bytes"
// +kubebuilder:printcolumn:name="Storage Location",type="string",JSONPath=".spec.backupStorageLocation",description="Name of the Backup Storage Location where this backup should be stored"
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp"
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="Time duration since this PodVolumeBackup was created"
// +kubebuilder:printcolumn:name="Node",type="string",JSONPath=".status.node",description="Name of the node where the PodVolumeBackup is processed"
// +kubebuilder:printcolumn:name="Uploader",type="string",JSONPath=".spec.uploaderType",description="The type of the uploader to handle data transfer"
// +kubebuilder:object:root=true
// +kubebuilder:object:generate=true

View File

@ -1043,6 +1043,10 @@ func (in *PodVolumeBackupStatus) DeepCopyInto(out *PodVolumeBackupStatus) {
*out = (*in).DeepCopy()
}
out.Progress = in.Progress
if in.AcceptedTimestamp != nil {
in, out := &in.AcceptedTimestamp, &out.AcceptedTimestamp
*out = (*in).DeepCopy()
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodVolumeBackupStatus.

View File

@ -119,3 +119,33 @@ func (b *PodVolumeBackupBuilder) Annotations(annotations map[string]string) *Pod
b.object.Annotations = annotations
return b
}
// Cancel sets the PodVolumeBackup's Cancel.
func (b *PodVolumeBackupBuilder) Cancel(cancel bool) *PodVolumeBackupBuilder {
b.object.Spec.Cancel = cancel
return b
}
// AcceptedTimestamp sets the PodVolumeBackup's AcceptedTimestamp.
func (b *PodVolumeBackupBuilder) AcceptedTimestamp(acceptedTimestamp *metav1.Time) *PodVolumeBackupBuilder {
b.object.Status.AcceptedTimestamp = acceptedTimestamp
return b
}
// Finalizers sets the PodVolumeBackup's Finalizers.
func (b *PodVolumeBackupBuilder) Finalizers(finalizers []string) *PodVolumeBackupBuilder {
b.object.Finalizers = finalizers
return b
}
// Message sets the PodVolumeBackup's Message.
func (b *PodVolumeBackupBuilder) Message(msg string) *PodVolumeBackupBuilder {
b.object.Status.Message = msg
return b
}
// OwnerReference sets the PodVolumeBackup's OwnerReference.
func (b *PodVolumeBackupBuilder) OwnerReference(ref metav1.OwnerReference) *PodVolumeBackupBuilder {
b.object.OwnerReferences = append(b.object.OwnerReferences, ref)
return b
}

View File

@ -48,7 +48,6 @@ import (
snapshotv1client "github.com/kubernetes-csi/external-snapshotter/client/v7/clientset/versioned"
"github.com/vmware-tanzu/velero/internal/credentials"
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
velerov2alpha1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v2alpha1"
"github.com/vmware-tanzu/velero/pkg/buildinfo"
@ -60,7 +59,6 @@ import (
"github.com/vmware-tanzu/velero/pkg/datapath"
"github.com/vmware-tanzu/velero/pkg/metrics"
"github.com/vmware-tanzu/velero/pkg/nodeagent"
"github.com/vmware-tanzu/velero/pkg/repository"
"github.com/vmware-tanzu/velero/pkg/util/filesystem"
"github.com/vmware-tanzu/velero/pkg/util/kube"
"github.com/vmware-tanzu/velero/pkg/util/logging"
@ -282,30 +280,6 @@ func (s *nodeAgentServer) run() {
s.logger.Info("Starting controllers")
credentialFileStore, err := credentials.NewNamespacedFileStore(
s.mgr.GetClient(),
s.namespace,
credentials.DefaultStoreDirectory(),
filesystem.NewFileSystem(),
)
if err != nil {
s.logger.Fatalf("Failed to create credentials file store: %v", err)
}
credSecretStore, err := credentials.NewNamespacedSecretStore(s.mgr.GetClient(), s.namespace)
if err != nil {
s.logger.Fatalf("Failed to create secret file store: %v", err)
}
credentialGetter := &credentials.CredentialGetter{FromFile: credentialFileStore, FromSecret: credSecretStore}
repoEnsurer := repository.NewEnsurer(s.mgr.GetClient(), s.logger, s.config.resourceTimeout)
pvbReconciler := controller.NewPodVolumeBackupReconciler(s.mgr.GetClient(), s.kubeClient, s.dataPathMgr, repoEnsurer,
credentialGetter, s.nodeName, s.mgr.GetScheme(), s.metrics, s.logger)
if err := pvbReconciler.SetupWithManager(s.mgr); err != nil {
s.logger.Fatal(err, "unable to create controller", "controller", constant.ControllerPodVolumeBackup)
}
var loadAffinity *kube.LoadAffinity
if s.dataPathConfigs != nil && len(s.dataPathConfigs.LoadAffinity) > 0 {
loadAffinity = s.dataPathConfigs.LoadAffinity[0]
@ -328,7 +302,12 @@ func (s *nodeAgentServer) run() {
}
}
if err = controller.NewPodVolumeRestoreReconciler(s.mgr.GetClient(), s.mgr, s.kubeClient, s.dataPathMgr, s.nodeName, s.config.dataMoverPrepareTimeout, s.config.resourceTimeout, podResources, s.logger).SetupWithManager(s.mgr); err != nil {
pvbReconciler := controller.NewPodVolumeBackupReconciler(s.mgr.GetClient(), s.mgr, s.kubeClient, s.dataPathMgr, s.nodeName, s.config.dataMoverPrepareTimeout, s.config.resourceTimeout, podResources, s.metrics, s.logger)
if err := pvbReconciler.SetupWithManager(s.mgr); err != nil {
s.logger.Fatal(err, "unable to create controller", "controller", constant.ControllerPodVolumeBackup)
}
if err := controller.NewPodVolumeRestoreReconciler(s.mgr.GetClient(), s.mgr, s.kubeClient, s.dataPathMgr, s.nodeName, s.config.dataMoverPrepareTimeout, s.config.resourceTimeout, podResources, s.logger).SetupWithManager(s.mgr); err != nil {
s.logger.WithError(err).Fatal("Unable to create the pod volume restore controller")
}
@ -347,7 +326,7 @@ func (s *nodeAgentServer) run() {
s.logger,
s.metrics,
)
if err = dataUploadReconciler.SetupWithManager(s.mgr); err != nil {
if err := dataUploadReconciler.SetupWithManager(s.mgr); err != nil {
s.logger.WithError(err).Fatal("Unable to create the data upload controller")
}
@ -358,7 +337,7 @@ func (s *nodeAgentServer) run() {
}
dataDownloadReconciler := controller.NewDataDownloadReconciler(s.mgr.GetClient(), s.mgr, s.kubeClient, s.dataPathMgr, restorePVCConfig, podResources, s.nodeName, s.config.dataMoverPrepareTimeout, s.logger, s.metrics)
if err = dataDownloadReconciler.SetupWithManager(s.mgr); err != nil {
if err := dataDownloadReconciler.SetupWithManager(s.mgr); err != nil {
s.logger.WithError(err).Fatal("Unable to create the data download controller")
}

View File

@ -312,30 +312,29 @@ func (f *fakeSnapshotExposer) DiagnoseExpose(context.Context, corev1api.ObjectRe
func (f *fakeSnapshotExposer) CleanUp(context.Context, corev1api.ObjectReference, string, string) {
}
type fakeDataUploadFSBR struct {
du *velerov2alpha1api.DataUpload
type fakeFSBR struct {
kubeClient kbclient.Client
clock clock.WithTickerAndDelayedExecution
initErr error
startErr error
}
func (f *fakeDataUploadFSBR) Init(ctx context.Context, param any) error {
func (f *fakeFSBR) Init(ctx context.Context, param any) error {
return f.initErr
}
func (f *fakeDataUploadFSBR) StartBackup(source datapath.AccessPoint, uploaderConfigs map[string]string, param any) error {
func (f *fakeFSBR) StartBackup(source datapath.AccessPoint, uploaderConfigs map[string]string, param any) error {
return f.startErr
}
func (f *fakeDataUploadFSBR) StartRestore(snapshotID string, target datapath.AccessPoint, uploaderConfigs map[string]string) error {
func (f *fakeFSBR) StartRestore(snapshotID string, target datapath.AccessPoint, uploaderConfigs map[string]string) error {
return nil
}
func (b *fakeDataUploadFSBR) Cancel() {
func (b *fakeFSBR) Cancel() {
}
func (b *fakeDataUploadFSBR) Close(ctx context.Context) {
func (b *fakeFSBR) Close(ctx context.Context) {
}
func TestReconcile(t *testing.T) {
@ -651,8 +650,7 @@ func TestReconcile(t *testing.T) {
}
datapath.MicroServiceBRWatcherCreator = func(kbclient.Client, kubernetes.Interface, manager.Manager, string, string, string, string, string, string, datapath.Callbacks, logrus.FieldLogger) datapath.AsyncBR {
return &fakeDataUploadFSBR{
du: test.du,
return &fakeFSBR{
kubeClient: r.client,
clock: r.Clock,
initErr: test.fsBRInitErr,

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -169,7 +169,8 @@ func newBackupper(
}
if pvb.Status.Phase != velerov1api.PodVolumeBackupPhaseCompleted &&
pvb.Status.Phase != velerov1api.PodVolumeBackupPhaseFailed {
pvb.Status.Phase != velerov1api.PodVolumeBackupPhaseFailed &&
pvb.Status.Phase != velerov1api.PodVolumeBackupPhaseCanceled {
return
}
@ -179,7 +180,8 @@ func newBackupper(
existPVB, ok := existObj.(*velerov1api.PodVolumeBackup)
// the PVB in the indexer is already in final status, no need to call WaitGroup.Done()
if ok && (existPVB.Status.Phase == velerov1api.PodVolumeBackupPhaseCompleted ||
existPVB.Status.Phase == velerov1api.PodVolumeBackupPhaseFailed) {
existPVB.Status.Phase == velerov1api.PodVolumeBackupPhaseFailed ||
pvb.Status.Phase == velerov1api.PodVolumeBackupPhaseCanceled) {
statusChangedToFinal = false
}
}
@ -428,7 +430,7 @@ func (b *backupper) WaitAllPodVolumesProcessed(log logrus.FieldLogger) []*velero
continue
}
podVolumeBackups = append(podVolumeBackups, pvb)
if pvb.Status.Phase == velerov1api.PodVolumeBackupPhaseFailed {
if pvb.Status.Phase == velerov1api.PodVolumeBackupPhaseFailed || pvb.Status.Phase == velerov1api.PodVolumeBackupPhaseCanceled {
log.Errorf("pod volume backup failed: %s", pvb.Status.Message)
}
}