Merge pull request #9015 from Lyndon-Li/vgdp-ms-pvb-controller
VGDP MS PVB controllerpull/8979/head^2
commit
7d8a36a6e0
|
@ -0,0 +1 @@
|
||||||
|
Fix issue #8958, add VGDP MS PVB controller
|
|
@ -15,38 +15,41 @@ spec:
|
||||||
scope: Namespaced
|
scope: Namespaced
|
||||||
versions:
|
versions:
|
||||||
- additionalPrinterColumns:
|
- additionalPrinterColumns:
|
||||||
- description: Pod Volume Backup status such as New/InProgress
|
- description: PodVolumeBackup status such as New/InProgress
|
||||||
jsonPath: .status.phase
|
jsonPath: .status.phase
|
||||||
name: Status
|
name: Status
|
||||||
type: string
|
type: string
|
||||||
- description: Time when this backup was started
|
- description: Time duration since this PodVolumeBackup was started
|
||||||
jsonPath: .status.startTimestamp
|
jsonPath: .status.startTimestamp
|
||||||
name: Created
|
name: Started
|
||||||
type: date
|
type: date
|
||||||
- description: Namespace of the pod containing the volume to be backed up
|
- description: Completed bytes
|
||||||
jsonPath: .spec.pod.namespace
|
format: int64
|
||||||
name: Namespace
|
jsonPath: .status.progress.bytesDone
|
||||||
type: string
|
name: Bytes Done
|
||||||
- description: Name of the pod containing the volume to be backed up
|
type: integer
|
||||||
jsonPath: .spec.pod.name
|
- description: Total bytes
|
||||||
name: Pod
|
format: int64
|
||||||
type: string
|
jsonPath: .status.progress.totalBytes
|
||||||
- description: Name of the volume to be backed up
|
name: Total Bytes
|
||||||
jsonPath: .spec.volume
|
type: integer
|
||||||
name: Volume
|
|
||||||
type: string
|
|
||||||
- description: The type of the uploader to handle data transfer
|
|
||||||
jsonPath: .spec.uploaderType
|
|
||||||
name: Uploader Type
|
|
||||||
type: string
|
|
||||||
- description: Name of the Backup Storage Location where this backup should be
|
- description: Name of the Backup Storage Location where this backup should be
|
||||||
stored
|
stored
|
||||||
jsonPath: .spec.backupStorageLocation
|
jsonPath: .spec.backupStorageLocation
|
||||||
name: Storage Location
|
name: Storage Location
|
||||||
type: string
|
type: string
|
||||||
- jsonPath: .metadata.creationTimestamp
|
- description: Time duration since this PodVolumeBackup was created
|
||||||
|
jsonPath: .metadata.creationTimestamp
|
||||||
name: Age
|
name: Age
|
||||||
type: date
|
type: date
|
||||||
|
- description: Name of the node where the PodVolumeBackup is processed
|
||||||
|
jsonPath: .status.node
|
||||||
|
name: Node
|
||||||
|
type: string
|
||||||
|
- description: The type of the uploader to handle data transfer
|
||||||
|
jsonPath: .spec.uploaderType
|
||||||
|
name: Uploader
|
||||||
|
type: string
|
||||||
name: v1
|
name: v1
|
||||||
schema:
|
schema:
|
||||||
openAPIV3Schema:
|
openAPIV3Schema:
|
||||||
|
@ -170,6 +173,13 @@ spec:
|
||||||
status:
|
status:
|
||||||
description: PodVolumeBackupStatus is the current status of a PodVolumeBackup.
|
description: PodVolumeBackupStatus is the current status of a PodVolumeBackup.
|
||||||
properties:
|
properties:
|
||||||
|
acceptedTimestamp:
|
||||||
|
description: |-
|
||||||
|
AcceptedTimestamp records the time the pod volume backup is to be prepared.
|
||||||
|
The server's time is used for AcceptedTimestamp
|
||||||
|
format: date-time
|
||||||
|
nullable: true
|
||||||
|
type: string
|
||||||
completionTimestamp:
|
completionTimestamp:
|
||||||
description: |-
|
description: |-
|
||||||
CompletionTimestamp records the time a backup was completed.
|
CompletionTimestamp records the time a backup was completed.
|
||||||
|
@ -190,7 +200,11 @@ spec:
|
||||||
description: Phase is the current state of the PodVolumeBackup.
|
description: Phase is the current state of the PodVolumeBackup.
|
||||||
enum:
|
enum:
|
||||||
- New
|
- New
|
||||||
|
- Accepted
|
||||||
|
- Prepared
|
||||||
- InProgress
|
- InProgress
|
||||||
|
- Canceling
|
||||||
|
- Canceled
|
||||||
- Completed
|
- Completed
|
||||||
- Failed
|
- Failed
|
||||||
type: string
|
type: string
|
||||||
|
|
File diff suppressed because one or more lines are too long
|
@ -105,6 +105,9 @@ const (
|
||||||
// defaultVGSLabelKey is the default label key used to group PVCs under a VolumeGroupSnapshot
|
// defaultVGSLabelKey is the default label key used to group PVCs under a VolumeGroupSnapshot
|
||||||
DefaultVGSLabelKey = "velero.io/volume-group"
|
DefaultVGSLabelKey = "velero.io/volume-group"
|
||||||
|
|
||||||
|
// PVBLabel is the label key used to identify the pvb for pvb pod
|
||||||
|
PVBLabel = "velero.io/pod-volume-backup"
|
||||||
|
|
||||||
// PVRLabel is the label key used to identify the pvb for pvr pod
|
// PVRLabel is the label key used to identify the pvb for pvr pod
|
||||||
PVRLabel = "velero.io/pod-volume-restore"
|
PVRLabel = "velero.io/pod-volume-restore"
|
||||||
)
|
)
|
||||||
|
|
|
@ -64,12 +64,16 @@ type PodVolumeBackupSpec struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// PodVolumeBackupPhase represents the lifecycle phase of a PodVolumeBackup.
|
// PodVolumeBackupPhase represents the lifecycle phase of a PodVolumeBackup.
|
||||||
// +kubebuilder:validation:Enum=New;InProgress;Completed;Failed
|
// +kubebuilder:validation:Enum=New;Accepted;Prepared;InProgress;Canceling;Canceled;Completed;Failed
|
||||||
type PodVolumeBackupPhase string
|
type PodVolumeBackupPhase string
|
||||||
|
|
||||||
const (
|
const (
|
||||||
PodVolumeBackupPhaseNew PodVolumeBackupPhase = "New"
|
PodVolumeBackupPhaseNew PodVolumeBackupPhase = "New"
|
||||||
|
PodVolumeBackupPhaseAccepted PodVolumeBackupPhase = "Accepted"
|
||||||
|
PodVolumeBackupPhasePrepared PodVolumeBackupPhase = "Prepared"
|
||||||
PodVolumeBackupPhaseInProgress PodVolumeBackupPhase = "InProgress"
|
PodVolumeBackupPhaseInProgress PodVolumeBackupPhase = "InProgress"
|
||||||
|
PodVolumeBackupPhaseCanceling PodVolumeBackupPhase = "Canceling"
|
||||||
|
PodVolumeBackupPhaseCanceled PodVolumeBackupPhase = "Canceled"
|
||||||
PodVolumeBackupPhaseCompleted PodVolumeBackupPhase = "Completed"
|
PodVolumeBackupPhaseCompleted PodVolumeBackupPhase = "Completed"
|
||||||
PodVolumeBackupPhaseFailed PodVolumeBackupPhase = "Failed"
|
PodVolumeBackupPhaseFailed PodVolumeBackupPhase = "Failed"
|
||||||
)
|
)
|
||||||
|
@ -113,20 +117,27 @@ type PodVolumeBackupStatus struct {
|
||||||
// about the backup operation.
|
// about the backup operation.
|
||||||
// +optional
|
// +optional
|
||||||
Progress shared.DataMoveOperationProgress `json:"progress,omitempty"`
|
Progress shared.DataMoveOperationProgress `json:"progress,omitempty"`
|
||||||
|
|
||||||
|
// AcceptedTimestamp records the time the pod volume backup is to be prepared.
|
||||||
|
// The server's time is used for AcceptedTimestamp
|
||||||
|
// +optional
|
||||||
|
// +nullable
|
||||||
|
AcceptedTimestamp *metav1.Time `json:"acceptedTimestamp,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO(2.0) After converting all resources to use the runttime-controller client,
|
// TODO(2.0) After converting all resources to use the runttime-controller client,
|
||||||
// the genclient and k8s:deepcopy markers will no longer be needed and should be removed.
|
// the genclient and k8s:deepcopy markers will no longer be needed and should be removed.
|
||||||
// +genclient
|
// +genclient
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.phase",description="Pod Volume Backup status such as New/InProgress"
|
// +kubebuilder:storageversion
|
||||||
// +kubebuilder:printcolumn:name="Created",type="date",JSONPath=".status.startTimestamp",description="Time when this backup was started"
|
// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.phase",description="PodVolumeBackup status such as New/InProgress"
|
||||||
// +kubebuilder:printcolumn:name="Namespace",type="string",JSONPath=".spec.pod.namespace",description="Namespace of the pod containing the volume to be backed up"
|
// +kubebuilder:printcolumn:name="Started",type="date",JSONPath=".status.startTimestamp",description="Time duration since this PodVolumeBackup was started"
|
||||||
// +kubebuilder:printcolumn:name="Pod",type="string",JSONPath=".spec.pod.name",description="Name of the pod containing the volume to be backed up"
|
// +kubebuilder:printcolumn:name="Bytes Done",type="integer",format="int64",JSONPath=".status.progress.bytesDone",description="Completed bytes"
|
||||||
// +kubebuilder:printcolumn:name="Volume",type="string",JSONPath=".spec.volume",description="Name of the volume to be backed up"
|
// +kubebuilder:printcolumn:name="Total Bytes",type="integer",format="int64",JSONPath=".status.progress.totalBytes",description="Total bytes"
|
||||||
// +kubebuilder:printcolumn:name="Uploader Type",type="string",JSONPath=".spec.uploaderType",description="The type of the uploader to handle data transfer"
|
|
||||||
// +kubebuilder:printcolumn:name="Storage Location",type="string",JSONPath=".spec.backupStorageLocation",description="Name of the Backup Storage Location where this backup should be stored"
|
// +kubebuilder:printcolumn:name="Storage Location",type="string",JSONPath=".spec.backupStorageLocation",description="Name of the Backup Storage Location where this backup should be stored"
|
||||||
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp"
|
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="Time duration since this PodVolumeBackup was created"
|
||||||
|
// +kubebuilder:printcolumn:name="Node",type="string",JSONPath=".status.node",description="Name of the node where the PodVolumeBackup is processed"
|
||||||
|
// +kubebuilder:printcolumn:name="Uploader",type="string",JSONPath=".spec.uploaderType",description="The type of the uploader to handle data transfer"
|
||||||
// +kubebuilder:object:root=true
|
// +kubebuilder:object:root=true
|
||||||
// +kubebuilder:object:generate=true
|
// +kubebuilder:object:generate=true
|
||||||
|
|
||||||
|
|
|
@ -1043,6 +1043,10 @@ func (in *PodVolumeBackupStatus) DeepCopyInto(out *PodVolumeBackupStatus) {
|
||||||
*out = (*in).DeepCopy()
|
*out = (*in).DeepCopy()
|
||||||
}
|
}
|
||||||
out.Progress = in.Progress
|
out.Progress = in.Progress
|
||||||
|
if in.AcceptedTimestamp != nil {
|
||||||
|
in, out := &in.AcceptedTimestamp, &out.AcceptedTimestamp
|
||||||
|
*out = (*in).DeepCopy()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodVolumeBackupStatus.
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodVolumeBackupStatus.
|
||||||
|
|
|
@ -119,3 +119,33 @@ func (b *PodVolumeBackupBuilder) Annotations(annotations map[string]string) *Pod
|
||||||
b.object.Annotations = annotations
|
b.object.Annotations = annotations
|
||||||
return b
|
return b
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Cancel sets the PodVolumeBackup's Cancel.
|
||||||
|
func (b *PodVolumeBackupBuilder) Cancel(cancel bool) *PodVolumeBackupBuilder {
|
||||||
|
b.object.Spec.Cancel = cancel
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
// AcceptedTimestamp sets the PodVolumeBackup's AcceptedTimestamp.
|
||||||
|
func (b *PodVolumeBackupBuilder) AcceptedTimestamp(acceptedTimestamp *metav1.Time) *PodVolumeBackupBuilder {
|
||||||
|
b.object.Status.AcceptedTimestamp = acceptedTimestamp
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
// Finalizers sets the PodVolumeBackup's Finalizers.
|
||||||
|
func (b *PodVolumeBackupBuilder) Finalizers(finalizers []string) *PodVolumeBackupBuilder {
|
||||||
|
b.object.Finalizers = finalizers
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
// Message sets the PodVolumeBackup's Message.
|
||||||
|
func (b *PodVolumeBackupBuilder) Message(msg string) *PodVolumeBackupBuilder {
|
||||||
|
b.object.Status.Message = msg
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
// OwnerReference sets the PodVolumeBackup's OwnerReference.
|
||||||
|
func (b *PodVolumeBackupBuilder) OwnerReference(ref metav1.OwnerReference) *PodVolumeBackupBuilder {
|
||||||
|
b.object.OwnerReferences = append(b.object.OwnerReferences, ref)
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
|
@ -48,7 +48,6 @@ import (
|
||||||
|
|
||||||
snapshotv1client "github.com/kubernetes-csi/external-snapshotter/client/v7/clientset/versioned"
|
snapshotv1client "github.com/kubernetes-csi/external-snapshotter/client/v7/clientset/versioned"
|
||||||
|
|
||||||
"github.com/vmware-tanzu/velero/internal/credentials"
|
|
||||||
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
|
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
|
||||||
velerov2alpha1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v2alpha1"
|
velerov2alpha1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v2alpha1"
|
||||||
"github.com/vmware-tanzu/velero/pkg/buildinfo"
|
"github.com/vmware-tanzu/velero/pkg/buildinfo"
|
||||||
|
@ -60,7 +59,6 @@ import (
|
||||||
"github.com/vmware-tanzu/velero/pkg/datapath"
|
"github.com/vmware-tanzu/velero/pkg/datapath"
|
||||||
"github.com/vmware-tanzu/velero/pkg/metrics"
|
"github.com/vmware-tanzu/velero/pkg/metrics"
|
||||||
"github.com/vmware-tanzu/velero/pkg/nodeagent"
|
"github.com/vmware-tanzu/velero/pkg/nodeagent"
|
||||||
"github.com/vmware-tanzu/velero/pkg/repository"
|
|
||||||
"github.com/vmware-tanzu/velero/pkg/util/filesystem"
|
"github.com/vmware-tanzu/velero/pkg/util/filesystem"
|
||||||
"github.com/vmware-tanzu/velero/pkg/util/kube"
|
"github.com/vmware-tanzu/velero/pkg/util/kube"
|
||||||
"github.com/vmware-tanzu/velero/pkg/util/logging"
|
"github.com/vmware-tanzu/velero/pkg/util/logging"
|
||||||
|
@ -282,30 +280,6 @@ func (s *nodeAgentServer) run() {
|
||||||
|
|
||||||
s.logger.Info("Starting controllers")
|
s.logger.Info("Starting controllers")
|
||||||
|
|
||||||
credentialFileStore, err := credentials.NewNamespacedFileStore(
|
|
||||||
s.mgr.GetClient(),
|
|
||||||
s.namespace,
|
|
||||||
credentials.DefaultStoreDirectory(),
|
|
||||||
filesystem.NewFileSystem(),
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
s.logger.Fatalf("Failed to create credentials file store: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
credSecretStore, err := credentials.NewNamespacedSecretStore(s.mgr.GetClient(), s.namespace)
|
|
||||||
if err != nil {
|
|
||||||
s.logger.Fatalf("Failed to create secret file store: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
credentialGetter := &credentials.CredentialGetter{FromFile: credentialFileStore, FromSecret: credSecretStore}
|
|
||||||
repoEnsurer := repository.NewEnsurer(s.mgr.GetClient(), s.logger, s.config.resourceTimeout)
|
|
||||||
pvbReconciler := controller.NewPodVolumeBackupReconciler(s.mgr.GetClient(), s.kubeClient, s.dataPathMgr, repoEnsurer,
|
|
||||||
credentialGetter, s.nodeName, s.mgr.GetScheme(), s.metrics, s.logger)
|
|
||||||
|
|
||||||
if err := pvbReconciler.SetupWithManager(s.mgr); err != nil {
|
|
||||||
s.logger.Fatal(err, "unable to create controller", "controller", constant.ControllerPodVolumeBackup)
|
|
||||||
}
|
|
||||||
|
|
||||||
var loadAffinity *kube.LoadAffinity
|
var loadAffinity *kube.LoadAffinity
|
||||||
if s.dataPathConfigs != nil && len(s.dataPathConfigs.LoadAffinity) > 0 {
|
if s.dataPathConfigs != nil && len(s.dataPathConfigs.LoadAffinity) > 0 {
|
||||||
loadAffinity = s.dataPathConfigs.LoadAffinity[0]
|
loadAffinity = s.dataPathConfigs.LoadAffinity[0]
|
||||||
|
@ -328,7 +302,12 @@ func (s *nodeAgentServer) run() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = controller.NewPodVolumeRestoreReconciler(s.mgr.GetClient(), s.mgr, s.kubeClient, s.dataPathMgr, s.nodeName, s.config.dataMoverPrepareTimeout, s.config.resourceTimeout, podResources, s.logger).SetupWithManager(s.mgr); err != nil {
|
pvbReconciler := controller.NewPodVolumeBackupReconciler(s.mgr.GetClient(), s.mgr, s.kubeClient, s.dataPathMgr, s.nodeName, s.config.dataMoverPrepareTimeout, s.config.resourceTimeout, podResources, s.metrics, s.logger)
|
||||||
|
if err := pvbReconciler.SetupWithManager(s.mgr); err != nil {
|
||||||
|
s.logger.Fatal(err, "unable to create controller", "controller", constant.ControllerPodVolumeBackup)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := controller.NewPodVolumeRestoreReconciler(s.mgr.GetClient(), s.mgr, s.kubeClient, s.dataPathMgr, s.nodeName, s.config.dataMoverPrepareTimeout, s.config.resourceTimeout, podResources, s.logger).SetupWithManager(s.mgr); err != nil {
|
||||||
s.logger.WithError(err).Fatal("Unable to create the pod volume restore controller")
|
s.logger.WithError(err).Fatal("Unable to create the pod volume restore controller")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -347,7 +326,7 @@ func (s *nodeAgentServer) run() {
|
||||||
s.logger,
|
s.logger,
|
||||||
s.metrics,
|
s.metrics,
|
||||||
)
|
)
|
||||||
if err = dataUploadReconciler.SetupWithManager(s.mgr); err != nil {
|
if err := dataUploadReconciler.SetupWithManager(s.mgr); err != nil {
|
||||||
s.logger.WithError(err).Fatal("Unable to create the data upload controller")
|
s.logger.WithError(err).Fatal("Unable to create the data upload controller")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -358,7 +337,7 @@ func (s *nodeAgentServer) run() {
|
||||||
}
|
}
|
||||||
|
|
||||||
dataDownloadReconciler := controller.NewDataDownloadReconciler(s.mgr.GetClient(), s.mgr, s.kubeClient, s.dataPathMgr, restorePVCConfig, podResources, s.nodeName, s.config.dataMoverPrepareTimeout, s.logger, s.metrics)
|
dataDownloadReconciler := controller.NewDataDownloadReconciler(s.mgr.GetClient(), s.mgr, s.kubeClient, s.dataPathMgr, restorePVCConfig, podResources, s.nodeName, s.config.dataMoverPrepareTimeout, s.logger, s.metrics)
|
||||||
if err = dataDownloadReconciler.SetupWithManager(s.mgr); err != nil {
|
if err := dataDownloadReconciler.SetupWithManager(s.mgr); err != nil {
|
||||||
s.logger.WithError(err).Fatal("Unable to create the data download controller")
|
s.logger.WithError(err).Fatal("Unable to create the data download controller")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -312,30 +312,29 @@ func (f *fakeSnapshotExposer) DiagnoseExpose(context.Context, corev1api.ObjectRe
|
||||||
func (f *fakeSnapshotExposer) CleanUp(context.Context, corev1api.ObjectReference, string, string) {
|
func (f *fakeSnapshotExposer) CleanUp(context.Context, corev1api.ObjectReference, string, string) {
|
||||||
}
|
}
|
||||||
|
|
||||||
type fakeDataUploadFSBR struct {
|
type fakeFSBR struct {
|
||||||
du *velerov2alpha1api.DataUpload
|
|
||||||
kubeClient kbclient.Client
|
kubeClient kbclient.Client
|
||||||
clock clock.WithTickerAndDelayedExecution
|
clock clock.WithTickerAndDelayedExecution
|
||||||
initErr error
|
initErr error
|
||||||
startErr error
|
startErr error
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *fakeDataUploadFSBR) Init(ctx context.Context, param any) error {
|
func (f *fakeFSBR) Init(ctx context.Context, param any) error {
|
||||||
return f.initErr
|
return f.initErr
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *fakeDataUploadFSBR) StartBackup(source datapath.AccessPoint, uploaderConfigs map[string]string, param any) error {
|
func (f *fakeFSBR) StartBackup(source datapath.AccessPoint, uploaderConfigs map[string]string, param any) error {
|
||||||
return f.startErr
|
return f.startErr
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *fakeDataUploadFSBR) StartRestore(snapshotID string, target datapath.AccessPoint, uploaderConfigs map[string]string) error {
|
func (f *fakeFSBR) StartRestore(snapshotID string, target datapath.AccessPoint, uploaderConfigs map[string]string) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *fakeDataUploadFSBR) Cancel() {
|
func (b *fakeFSBR) Cancel() {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *fakeDataUploadFSBR) Close(ctx context.Context) {
|
func (b *fakeFSBR) Close(ctx context.Context) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestReconcile(t *testing.T) {
|
func TestReconcile(t *testing.T) {
|
||||||
|
@ -651,8 +650,7 @@ func TestReconcile(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
datapath.MicroServiceBRWatcherCreator = func(kbclient.Client, kubernetes.Interface, manager.Manager, string, string, string, string, string, string, datapath.Callbacks, logrus.FieldLogger) datapath.AsyncBR {
|
datapath.MicroServiceBRWatcherCreator = func(kbclient.Client, kubernetes.Interface, manager.Manager, string, string, string, string, string, string, datapath.Callbacks, logrus.FieldLogger) datapath.AsyncBR {
|
||||||
return &fakeDataUploadFSBR{
|
return &fakeFSBR{
|
||||||
du: test.du,
|
|
||||||
kubeClient: r.client,
|
kubeClient: r.client,
|
||||||
clock: r.Clock,
|
clock: r.Clock,
|
||||||
initErr: test.fsBRInitErr,
|
initErr: test.fsBRInitErr,
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -169,7 +169,8 @@ func newBackupper(
|
||||||
}
|
}
|
||||||
|
|
||||||
if pvb.Status.Phase != velerov1api.PodVolumeBackupPhaseCompleted &&
|
if pvb.Status.Phase != velerov1api.PodVolumeBackupPhaseCompleted &&
|
||||||
pvb.Status.Phase != velerov1api.PodVolumeBackupPhaseFailed {
|
pvb.Status.Phase != velerov1api.PodVolumeBackupPhaseFailed &&
|
||||||
|
pvb.Status.Phase != velerov1api.PodVolumeBackupPhaseCanceled {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -179,7 +180,8 @@ func newBackupper(
|
||||||
existPVB, ok := existObj.(*velerov1api.PodVolumeBackup)
|
existPVB, ok := existObj.(*velerov1api.PodVolumeBackup)
|
||||||
// the PVB in the indexer is already in final status, no need to call WaitGroup.Done()
|
// the PVB in the indexer is already in final status, no need to call WaitGroup.Done()
|
||||||
if ok && (existPVB.Status.Phase == velerov1api.PodVolumeBackupPhaseCompleted ||
|
if ok && (existPVB.Status.Phase == velerov1api.PodVolumeBackupPhaseCompleted ||
|
||||||
existPVB.Status.Phase == velerov1api.PodVolumeBackupPhaseFailed) {
|
existPVB.Status.Phase == velerov1api.PodVolumeBackupPhaseFailed ||
|
||||||
|
pvb.Status.Phase == velerov1api.PodVolumeBackupPhaseCanceled) {
|
||||||
statusChangedToFinal = false
|
statusChangedToFinal = false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -428,7 +430,7 @@ func (b *backupper) WaitAllPodVolumesProcessed(log logrus.FieldLogger) []*velero
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
podVolumeBackups = append(podVolumeBackups, pvb)
|
podVolumeBackups = append(podVolumeBackups, pvb)
|
||||||
if pvb.Status.Phase == velerov1api.PodVolumeBackupPhaseFailed {
|
if pvb.Status.Phase == velerov1api.PodVolumeBackupPhaseFailed || pvb.Status.Phase == velerov1api.PodVolumeBackupPhaseCanceled {
|
||||||
log.Errorf("pod volume backup failed: %s", pvb.Status.Message)
|
log.Errorf("pod volume backup failed: %s", pvb.Status.Message)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue