record PodVolumeBackup/Restore start and completion timestamps (#1609)
* record PodVolumeBackup start and completion timestamps adds startTimestamp and completionTimestamp fields to the PodVolumeBackup status spec Signed-off-by: Adnan Abdulhussein <aadnan@vmware.com> * record PodVolumeRestore start and completion timestamps Signed-off-by: Adnan Abdulhussein <aadnan@vmware.com>pull/1620/head
parent
022099a62e
commit
2156124dfc
|
@ -0,0 +1 @@
|
|||
add startTimestamp and completionTimestamp to PodVolumeBackup and PodVolumeRestore status fields
|
|
@ -68,6 +68,18 @@ type PodVolumeBackupStatus struct {
|
|||
|
||||
// Message is a message about the pod volume backup's status.
|
||||
Message string `json:"message"`
|
||||
|
||||
// StartTimestamp records the time a backup was started.
|
||||
// Separate from CreationTimestamp, since that value changes
|
||||
// on restores.
|
||||
// The server's time is used for StartTimestamps
|
||||
StartTimestamp metav1.Time `json:"startTimestamp"`
|
||||
|
||||
// CompletionTimestamp records the time a backup was completed.
|
||||
// Completion time is recorded even on failed backups.
|
||||
// Completion time is recorded before uploading the backup object.
|
||||
// The server's time is used for CompletionTimestamps
|
||||
CompletionTimestamp metav1.Time `json:"completionTimestamp"`
|
||||
}
|
||||
|
||||
// +genclient
|
||||
|
|
|
@ -57,6 +57,15 @@ type PodVolumeRestoreStatus struct {
|
|||
|
||||
// Message is a message about the pod volume restore's status.
|
||||
Message string `json:"message"`
|
||||
|
||||
// StartTimestamp records the time a restore was started.
|
||||
// The server's time is used for StartTimestamps
|
||||
StartTimestamp metav1.Time `json:"startTimestamp"`
|
||||
|
||||
// CompletionTimestamp records the time a restore was completed.
|
||||
// Completion time is recorded even on failed restores.
|
||||
// The server's time is used for CompletionTimestamps
|
||||
CompletionTimestamp metav1.Time `json:"completionTimestamp"`
|
||||
}
|
||||
|
||||
// +genclient
|
||||
|
|
|
@ -638,7 +638,7 @@ func (in *PodVolumeBackup) DeepCopyInto(out *PodVolumeBackup) {
|
|||
out.TypeMeta = in.TypeMeta
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
in.Spec.DeepCopyInto(&out.Spec)
|
||||
out.Status = in.Status
|
||||
in.Status.DeepCopyInto(&out.Status)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -720,6 +720,8 @@ func (in *PodVolumeBackupSpec) DeepCopy() *PodVolumeBackupSpec {
|
|||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *PodVolumeBackupStatus) DeepCopyInto(out *PodVolumeBackupStatus) {
|
||||
*out = *in
|
||||
in.StartTimestamp.DeepCopyInto(&out.StartTimestamp)
|
||||
in.CompletionTimestamp.DeepCopyInto(&out.CompletionTimestamp)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -739,7 +741,7 @@ func (in *PodVolumeRestore) DeepCopyInto(out *PodVolumeRestore) {
|
|||
out.TypeMeta = in.TypeMeta
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
out.Spec = in.Spec
|
||||
out.Status = in.Status
|
||||
in.Status.DeepCopyInto(&out.Status)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -814,6 +816,8 @@ func (in *PodVolumeRestoreSpec) DeepCopy() *PodVolumeRestoreSpec {
|
|||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *PodVolumeRestoreStatus) DeepCopyInto(out *PodVolumeRestoreStatus) {
|
||||
*out = *in
|
||||
in.StartTimestamp.DeepCopyInto(&out.StartTimestamp)
|
||||
in.CompletionTimestamp.DeepCopyInto(&out.CompletionTimestamp)
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
@ -28,6 +28,7 @@ import (
|
|||
"github.com/sirupsen/logrus"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
corev1informers "k8s.io/client-go/informers/core/v1"
|
||||
corev1listers "k8s.io/client-go/listers/core/v1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
|
@ -55,6 +56,7 @@ type podVolumeBackupController struct {
|
|||
|
||||
processBackupFunc func(*velerov1api.PodVolumeBackup) error
|
||||
fileSystem filesystem.Interface
|
||||
clock clock.Clock
|
||||
}
|
||||
|
||||
// NewPodVolumeBackupController creates a new pod volume backup controller.
|
||||
|
@ -79,6 +81,7 @@ func NewPodVolumeBackupController(
|
|||
nodeName: nodeName,
|
||||
|
||||
fileSystem: filesystem.NewFileSystem(),
|
||||
clock: &clock.RealClock{},
|
||||
}
|
||||
|
||||
c.syncHandler = c.processQueueItem
|
||||
|
@ -173,9 +176,12 @@ func (c *podVolumeBackupController) processBackup(req *velerov1api.PodVolumeBack
|
|||
var err error
|
||||
|
||||
// update status to InProgress
|
||||
req, err = c.patchPodVolumeBackup(req, updatePhaseFunc(velerov1api.PodVolumeBackupPhaseInProgress))
|
||||
req, err = c.patchPodVolumeBackup(req, func(r *velerov1api.PodVolumeBackup) {
|
||||
r.Status.Phase = velerov1api.PodVolumeBackupPhaseInProgress
|
||||
r.Status.StartTimestamp.Time = c.clock.Now()
|
||||
})
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Error setting phase to InProgress")
|
||||
log.WithError(err).Error("Error setting PodVolumeBackup StartTimestamp and phase to InProgress")
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
|
@ -253,12 +259,13 @@ func (c *podVolumeBackupController) processBackup(req *velerov1api.PodVolumeBack
|
|||
r.Status.Path = path
|
||||
r.Status.Phase = velerov1api.PodVolumeBackupPhaseCompleted
|
||||
r.Status.SnapshotID = snapshotID
|
||||
r.Status.CompletionTimestamp.Time = c.clock.Now()
|
||||
if emptySnapshot {
|
||||
r.Status.Message = "volume was empty so no snapshot was taken"
|
||||
}
|
||||
})
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Error setting phase to Completed")
|
||||
log.WithError(err).Error("Error setting PodVolumeBackup phase to Completed")
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -300,19 +307,14 @@ func (c *podVolumeBackupController) fail(req *velerov1api.PodVolumeBackup, msg s
|
|||
if _, err := c.patchPodVolumeBackup(req, func(r *velerov1api.PodVolumeBackup) {
|
||||
r.Status.Phase = velerov1api.PodVolumeBackupPhaseFailed
|
||||
r.Status.Message = msg
|
||||
r.Status.CompletionTimestamp.Time = c.clock.Now()
|
||||
}); err != nil {
|
||||
log.WithError(err).Error("Error setting phase to Failed")
|
||||
log.WithError(err).Error("Error setting PodVolumeBackup phase to Failed")
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func updatePhaseFunc(phase velerov1api.PodVolumeBackupPhase) func(r *velerov1api.PodVolumeBackup) {
|
||||
return func(r *velerov1api.PodVolumeBackup) {
|
||||
r.Status.Phase = phase
|
||||
}
|
||||
}
|
||||
|
||||
func singlePathMatch(path string) (string, error) {
|
||||
matches, err := filepath.Glob(path)
|
||||
if err != nil {
|
||||
|
|
|
@ -31,6 +31,7 @@ import (
|
|||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
corev1informers "k8s.io/client-go/informers/core/v1"
|
||||
corev1listers "k8s.io/client-go/listers/core/v1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
|
@ -59,6 +60,7 @@ type podVolumeRestoreController struct {
|
|||
|
||||
processRestoreFunc func(*velerov1api.PodVolumeRestore) error
|
||||
fileSystem filesystem.Interface
|
||||
clock clock.Clock
|
||||
}
|
||||
|
||||
// NewPodVolumeRestoreController creates a new pod volume restore controller.
|
||||
|
@ -83,6 +85,7 @@ func NewPodVolumeRestoreController(
|
|||
nodeName: nodeName,
|
||||
|
||||
fileSystem: filesystem.NewFileSystem(),
|
||||
clock: &clock.RealClock{},
|
||||
}
|
||||
|
||||
c.syncHandler = c.processQueueItem
|
||||
|
@ -258,9 +261,12 @@ func (c *podVolumeRestoreController) processRestore(req *velerov1api.PodVolumeRe
|
|||
var err error
|
||||
|
||||
// update status to InProgress
|
||||
req, err = c.patchPodVolumeRestore(req, updatePodVolumeRestorePhaseFunc(velerov1api.PodVolumeRestorePhaseInProgress))
|
||||
req, err = c.patchPodVolumeRestore(req, func(r *velerov1api.PodVolumeRestore) {
|
||||
r.Status.Phase = velerov1api.PodVolumeRestorePhaseInProgress
|
||||
r.Status.StartTimestamp.Time = c.clock.Now()
|
||||
})
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Error setting phase to InProgress")
|
||||
log.WithError(err).Error("Error setting PodVolumeRestore startTimestamp and phase to InProgress")
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
|
@ -291,8 +297,11 @@ func (c *podVolumeRestoreController) processRestore(req *velerov1api.PodVolumeRe
|
|||
}
|
||||
|
||||
// update status to Completed
|
||||
if _, err = c.patchPodVolumeRestore(req, updatePodVolumeRestorePhaseFunc(velerov1api.PodVolumeRestorePhaseCompleted)); err != nil {
|
||||
log.WithError(err).Error("Error setting phase to Completed")
|
||||
if _, err = c.patchPodVolumeRestore(req, func(r *velerov1api.PodVolumeRestore) {
|
||||
r.Status.Phase = velerov1api.PodVolumeRestorePhaseCompleted
|
||||
r.Status.CompletionTimestamp.Time = c.clock.Now()
|
||||
}); err != nil {
|
||||
log.WithError(err).Error("Error setting PodVolumeRestore completionTimestamp and phase to Completed")
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -397,15 +406,10 @@ func (c *podVolumeRestoreController) failRestore(req *velerov1api.PodVolumeResto
|
|||
if _, err := c.patchPodVolumeRestore(req, func(pvr *velerov1api.PodVolumeRestore) {
|
||||
pvr.Status.Phase = velerov1api.PodVolumeRestorePhaseFailed
|
||||
pvr.Status.Message = msg
|
||||
pvr.Status.CompletionTimestamp.Time = c.clock.Now()
|
||||
}); err != nil {
|
||||
log.WithError(err).Error("Error setting phase to Failed")
|
||||
log.WithError(err).Error("Error setting PodVolumeRestore phase to Failed")
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func updatePodVolumeRestorePhaseFunc(phase velerov1api.PodVolumeRestorePhase) func(r *velerov1api.PodVolumeRestore) {
|
||||
return func(r *velerov1api.PodVolumeRestore) {
|
||||
r.Status.Phase = phase
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue