2018-02-28 01:35:35 +00:00
/ *
2022-01-15 00:24:59 +00:00
Copyright The Velero Contributors .
2018-02-28 01:35:35 +00:00
Licensed under the Apache License , Version 2.0 ( the "License" ) ;
you may not use this file except in compliance with the License .
You may obtain a copy of the License at
http : //www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing , software
distributed under the License is distributed on an "AS IS" BASIS ,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied .
See the License for the specific language governing permissions and
limitations under the License .
* /
package controller
import (
k8s 1.18 import (#2651)
* k8s 1.18 import wip
backup, cmd, controller, generated, restic, restore, serverstatusrequest, test and util
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* go mod tidy
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* add changelog file
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* go fmt
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* update code-generator and controller-gen in CI
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* checkout proper code-generator version, regen
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* fix remaining calls
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* regenerate CRDs with ./hack/update-generated-crd-code.sh
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* use existing context in restic and server
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* fix test cases by resetting resource version
also use main library go context, not golang.org/x/net/context, in pkg/restore/restore.go
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* clarify changelog message
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* use github.com/kubernetes-csi/external-snapshotter/v2@v2.2.0-rc1
Signed-off-by: Andrew Lavery <laverya@umich.edu>
* run 'go mod tidy' to remove old external-snapshotter version
Signed-off-by: Andrew Lavery <laverya@umich.edu>
2020-07-16 16:21:37 +00:00
"context"
2018-02-28 01:35:35 +00:00
"fmt"
2020-07-22 19:07:52 +00:00
"time"
2018-02-28 01:35:35 +00:00
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
2022-01-15 00:24:59 +00:00
corev1 "k8s.io/api/core/v1"
2018-02-28 01:35:35 +00:00
apierrors "k8s.io/apimachinery/pkg/api/errors"
2019-10-14 16:20:28 +00:00
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
2022-08-30 07:52:11 +00:00
"k8s.io/apimachinery/pkg/labels"
2022-01-15 00:24:59 +00:00
"k8s.io/apimachinery/pkg/runtime"
2019-06-28 15:58:02 +00:00
"k8s.io/apimachinery/pkg/util/clock"
2022-01-15 00:24:59 +00:00
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
2018-02-28 01:35:35 +00:00
Use Credential from BSL for restic commands (#3489)
* Use Credential from BSL for restic commands
This change introduces support for restic to make use of per-BSL
credentials. It makes use of the `credentials.FileStore` introduced in
PR #3442 to write the BSL credentials to disk. To support per-BSL
credentials for restic, the environment for the restic commands needs to
be modified for each provider to ensure that the credentials are
provided via the correct provider specific environment variables.
This change introduces a new function `restic.CmdEnv` to check the BSL
provider and create the correct mapping of environment variables for
each provider.
Previously, AWS and GCP could rely on the environment variables in the
Velero deployments to obtain the credentials file, but now these
environment variables need to be set with the path to the serialized
credentials file if a credential is set on the BSL.
For Azure, the credentials file in the environment was loaded and parsed
to set the environment variables for restic. Now, we check if the BSL
has a credential, and if it does, load and parse that file instead.
This change also introduces a few other small improvements. Now that we
are fetching the BSL to check for the `Credential` field, we can use the
BSL directly to get the `CACert` which means that we can remove the
`GetCACert` function. Also, now that we have a way to serialize secrets
to disk, we can use the `credentials.FileStore` to get a temp file for
the restic repo password and remove the `restic.TempCredentialsFile`
function.
Signed-off-by: Bridget McErlean <bmcerlean@vmware.com>
* Add documentation for per-BSL credentials
Signed-off-by: Bridget McErlean <bmcerlean@vmware.com>
* Address review feedback
Signed-off-by: Bridget McErlean <bmcerlean@vmware.com>
* Address review comments
Signed-off-by: Bridget McErlean <bmcerlean@vmware.com>
2021-03-11 18:10:51 +00:00
"github.com/vmware-tanzu/velero/internal/credentials"
2019-09-30 21:26:56 +00:00
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
2022-08-30 07:52:11 +00:00
"github.com/vmware-tanzu/velero/pkg/label"
2020-07-22 19:07:52 +00:00
"github.com/vmware-tanzu/velero/pkg/metrics"
2022-08-04 07:20:02 +00:00
repokey "github.com/vmware-tanzu/velero/pkg/repository/keys"
2022-08-30 07:52:11 +00:00
"github.com/vmware-tanzu/velero/pkg/repository/util"
2022-08-19 06:45:28 +00:00
"github.com/vmware-tanzu/velero/pkg/uploader"
2022-08-15 10:34:08 +00:00
"github.com/vmware-tanzu/velero/pkg/uploader/provider"
2019-09-30 21:26:56 +00:00
"github.com/vmware-tanzu/velero/pkg/util/filesystem"
"github.com/vmware-tanzu/velero/pkg/util/kube"
2018-02-28 01:35:35 +00:00
)
2022-08-15 10:34:08 +00:00
// For unit test to mock function
var NewUploaderProviderFunc = provider . NewUploaderProvider
2018-06-20 22:46:41 +00:00
2022-01-15 00:24:59 +00:00
// PodVolumeBackupReconciler reconciles a PodVolumeBackup object
type PodVolumeBackupReconciler struct {
2022-08-15 10:34:08 +00:00
Scheme * runtime . Scheme
Client client . Client
Clock clock . Clock
Metrics * metrics . ServerMetrics
CredentialGetter * credentials . CredentialGetter
NodeName string
FileSystem filesystem . Interface
Log logrus . FieldLogger
2018-02-28 01:35:35 +00:00
}
2022-08-19 06:45:28 +00:00
type BackupProgressUpdater struct {
PodVolumeBackup * velerov1api . PodVolumeBackup
Log logrus . FieldLogger
Ctx context . Context
Cli client . Client
}
2022-01-15 00:24:59 +00:00
// +kubebuilder:rbac:groups=velero.io,resources=podvolumebackups,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=velero.io,resources=podvolumebackups/status,verbs=get;update;patch
func ( r * PodVolumeBackupReconciler ) Reconcile ( ctx context . Context , req ctrl . Request ) ( ctrl . Result , error ) {
log := r . Log . WithFields ( logrus . Fields {
"controller" : "podvolumebackup" ,
"podvolumebackup" : req . NamespacedName ,
2018-02-28 01:35:35 +00:00
} )
2022-01-15 00:24:59 +00:00
var pvb velerov1api . PodVolumeBackup
if err := r . Client . Get ( ctx , req . NamespacedName , & pvb ) ; err != nil {
if apierrors . IsNotFound ( err ) {
log . Debug ( "Unable to find PodVolumeBackup" )
return ctrl . Result { } , nil
}
return ctrl . Result { } , errors . Wrap ( err , "getting PodVolumeBackup" )
2018-06-20 22:46:41 +00:00
}
2022-01-15 00:24:59 +00:00
if len ( pvb . OwnerReferences ) == 1 {
log = log . WithField (
"backup" ,
fmt . Sprintf ( "%s/%s" , req . Namespace , pvb . OwnerReferences [ 0 ] . Name ) ,
)
2018-02-28 01:35:35 +00:00
}
2022-01-15 00:24:59 +00:00
log . Info ( "PodVolumeBackup starting" )
2018-02-28 01:35:35 +00:00
2022-01-15 00:24:59 +00:00
// Only process items for this node.
if pvb . Spec . Node != r . NodeName {
return ctrl . Result { } , nil
2018-02-28 01:35:35 +00:00
}
2022-05-06 06:47:46 +00:00
switch pvb . Status . Phase {
case "" , velerov1api . PodVolumeBackupPhaseNew :
2022-06-06 14:56:29 +00:00
// Only process new items.
2022-05-06 06:47:46 +00:00
default :
2022-06-06 14:56:29 +00:00
log . Debug ( "PodVolumeBackup is not new, not processing" )
2022-01-15 00:24:59 +00:00
return ctrl . Result { } , nil
2018-02-28 01:35:35 +00:00
}
2022-01-15 00:24:59 +00:00
r . Metrics . RegisterPodVolumeBackupEnqueue ( r . NodeName )
2018-02-28 01:35:35 +00:00
2022-01-15 00:24:59 +00:00
// Update status to InProgress.
2022-05-06 06:47:46 +00:00
original := pvb . DeepCopy ( )
2022-01-15 00:24:59 +00:00
pvb . Status . Phase = velerov1api . PodVolumeBackupPhaseInProgress
pvb . Status . StartTimestamp = & metav1 . Time { Time : r . Clock . Now ( ) }
2022-06-07 13:07:44 +00:00
if err := r . Client . Patch ( ctx , & pvb , client . MergeFrom ( original ) ) ; err != nil {
2022-05-06 06:47:46 +00:00
log . WithError ( err ) . Error ( "error updating PodVolumeBackup status" )
return ctrl . Result { } , err
}
2018-02-28 01:35:35 +00:00
2022-01-15 00:24:59 +00:00
var pod corev1 . Pod
podNamespacedName := client . ObjectKey {
Namespace : pvb . Spec . Pod . Namespace ,
Name : pvb . Spec . Pod . Name ,
2020-03-24 21:50:48 +00:00
}
2022-01-15 00:24:59 +00:00
if err := r . Client . Get ( ctx , podNamespacedName , & pod ) ; err != nil {
2022-05-06 06:47:46 +00:00
return r . updateStatusToFailed ( ctx , & pvb , err , fmt . Sprintf ( "getting pod %s/%s" , pvb . Spec . Pod . Namespace , pvb . Spec . Pod . Name ) , log )
2020-03-24 21:50:48 +00:00
}
2022-08-15 10:34:08 +00:00
volDir , err := kube . GetVolumeDirectory ( ctx , log , & pod , pvb . Spec . Volume , r . Client )
Use Credential from BSL for restic commands (#3489)
* Use Credential from BSL for restic commands
This change introduces support for restic to make use of per-BSL
credentials. It makes use of the `credentials.FileStore` introduced in
PR #3442 to write the BSL credentials to disk. To support per-BSL
credentials for restic, the environment for the restic commands needs to
be modified for each provider to ensure that the credentials are
provided via the correct provider specific environment variables.
This change introduces a new function `restic.CmdEnv` to check the BSL
provider and create the correct mapping of environment variables for
each provider.
Previously, AWS and GCP could rely on the environment variables in the
Velero deployments to obtain the credentials file, but now these
environment variables need to be set with the path to the serialized
credentials file if a credential is set on the BSL.
For Azure, the credentials file in the environment was loaded and parsed
to set the environment variables for restic. Now, we check if the BSL
has a credential, and if it does, load and parse that file instead.
This change also introduces a few other small improvements. Now that we
are fetching the BSL to check for the `Credential` field, we can use the
BSL directly to get the `CACert` which means that we can remove the
`GetCACert` function. Also, now that we have a way to serialize secrets
to disk, we can use the `credentials.FileStore` to get a temp file for
the restic repo password and remove the `restic.TempCredentialsFile`
function.
Signed-off-by: Bridget McErlean <bmcerlean@vmware.com>
* Add documentation for per-BSL credentials
Signed-off-by: Bridget McErlean <bmcerlean@vmware.com>
* Address review feedback
Signed-off-by: Bridget McErlean <bmcerlean@vmware.com>
* Address review comments
Signed-off-by: Bridget McErlean <bmcerlean@vmware.com>
2021-03-11 18:10:51 +00:00
if err != nil {
2022-08-15 10:34:08 +00:00
return r . updateStatusToFailed ( ctx , & pvb , err , "getting volume directory name" , log )
2018-09-25 21:46:29 +00:00
}
2022-07-22 08:19:47 +00:00
2022-08-15 10:34:08 +00:00
pathGlob := fmt . Sprintf ( "/host_pods/%s/volumes/*/%s" , string ( pvb . Spec . Pod . UID ) , volDir )
log . WithField ( "pathGlob" , pathGlob ) . Debug ( "Looking for path matching glob" )
path , err := kube . SinglePathMatch ( pathGlob , r . FileSystem , log )
if err != nil {
return r . updateStatusToFailed ( ctx , & pvb , err , "identifying unique volume path on host" , log )
}
log . WithField ( "path" , path ) . Debugf ( "Found path matching glob" )
2018-02-28 01:35:35 +00:00
2022-04-11 12:49:20 +00:00
backupLocation := & velerov1api . BackupStorageLocation { }
if err := r . Client . Get ( context . Background ( ) , client . ObjectKey {
Namespace : pvb . Namespace ,
Name : pvb . Spec . BackupStorageLocation ,
} , backupLocation ) ; err != nil {
return ctrl . Result { } , errors . Wrap ( err , "error getting backup storage location" )
}
2022-08-30 07:52:11 +00:00
selector := labels . SelectorFromSet (
map [ string ] string {
//TODO
//velerov1api.VolumeNamespaceLabel: label.GetValidName(volumeNamespace),
velerov1api . StorageLocationLabel : label . GetValidName ( pvb . Spec . BackupStorageLocation ) ,
//velerov1api.RepositoryTypeLabel: label.GetValidName(repositoryType),
} ,
)
backupRepo , err := util . GetBackupRepositoryByLabel ( ctx , r . Client , pvb . Namespace , selector )
if err != nil {
2022-08-15 10:34:08 +00:00
return ctrl . Result { } , errors . Wrap ( err , "error getting backup repository" )
2022-04-11 12:49:20 +00:00
}
2022-08-15 10:34:08 +00:00
var uploaderProv provider . Provider
uploaderProv , err = NewUploaderProviderFunc ( ctx , r . Client , pvb . Spec . UploaderType , pvb . Spec . RepoIdentifier ,
backupLocation , & backupRepo , r . CredentialGetter , repokey . RepoKeySelector ( ) , log )
2022-01-15 00:24:59 +00:00
if err != nil {
2022-08-15 10:34:08 +00:00
return r . updateStatusToFailed ( ctx , & pvb , err , "error creating uploader" , log )
}
// If this is a PVC, look for the most recent completed pod volume backup for it and get
2022-08-30 07:52:11 +00:00
// its snapshot ID to do new backup based on it. Without this,
2022-08-15 10:34:08 +00:00
// if the pod using the PVC (and therefore the directory path under /host_pods/) has
2022-08-30 07:52:11 +00:00
// changed since the PVC's last backup, for backup, it will not be able to identify a suitable
2022-08-15 10:34:08 +00:00
// parent snapshot to use, and will have to do a full rescan of the contents of the PVC.
var parentSnapshotID string
if pvcUID , ok := pvb . Labels [ velerov1api . PVCUIDLabel ] ; ok {
parentSnapshotID = r . getParentSnapshot ( ctx , log , pvb . Namespace , pvcUID , pvb . Spec . BackupStorageLocation )
if parentSnapshotID == "" {
2022-08-30 07:52:11 +00:00
log . Info ( "No parent snapshot found for PVC, not based on parent snapshot for this backup" )
2019-05-30 16:48:21 +00:00
} else {
2022-08-30 07:52:11 +00:00
log . WithField ( "parentSnapshotID" , parentSnapshotID ) . Info ( "Based on parent snapshot for this backup" )
2019-05-30 16:48:21 +00:00
}
2018-02-28 01:35:35 +00:00
}
Use Credential from BSL for restic commands (#3489)
* Use Credential from BSL for restic commands
This change introduces support for restic to make use of per-BSL
credentials. It makes use of the `credentials.FileStore` introduced in
PR #3442 to write the BSL credentials to disk. To support per-BSL
credentials for restic, the environment for the restic commands needs to
be modified for each provider to ensure that the credentials are
provided via the correct provider specific environment variables.
This change introduces a new function `restic.CmdEnv` to check the BSL
provider and create the correct mapping of environment variables for
each provider.
Previously, AWS and GCP could rely on the environment variables in the
Velero deployments to obtain the credentials file, but now these
environment variables need to be set with the path to the serialized
credentials file if a credential is set on the BSL.
For Azure, the credentials file in the environment was loaded and parsed
to set the environment variables for restic. Now, we check if the BSL
has a credential, and if it does, load and parse that file instead.
This change also introduces a few other small improvements. Now that we
are fetching the BSL to check for the `Credential` field, we can use the
BSL directly to get the `CACert` which means that we can remove the
`GetCACert` function. Also, now that we have a way to serialize secrets
to disk, we can use the `credentials.FileStore` to get a temp file for
the restic repo password and remove the `restic.TempCredentialsFile`
function.
Signed-off-by: Bridget McErlean <bmcerlean@vmware.com>
* Add documentation for per-BSL credentials
Signed-off-by: Bridget McErlean <bmcerlean@vmware.com>
* Address review feedback
Signed-off-by: Bridget McErlean <bmcerlean@vmware.com>
* Address review comments
Signed-off-by: Bridget McErlean <bmcerlean@vmware.com>
2021-03-11 18:10:51 +00:00
2022-08-15 10:34:08 +00:00
defer func ( ) {
if err := uploaderProv . Close ( ctx ) ; err != nil {
log . Errorf ( "failed to close uploader provider with error %v" , err )
2022-04-11 12:49:20 +00:00
}
2022-08-15 10:34:08 +00:00
} ( )
2022-04-11 12:49:20 +00:00
2022-08-30 07:52:11 +00:00
snapshotID , emptySnapshot , err := uploaderProv . RunBackup ( ctx , path , pvb . Spec . Tags , parentSnapshotID , r . NewBackupProgressUpdater ( & pvb , log , ctx ) )
2022-08-15 10:34:08 +00:00
if err != nil {
2022-08-30 07:52:11 +00:00
return r . updateStatusToFailed ( ctx , & pvb , err , fmt . Sprintf ( "running backup, stderr=%v" , err ) , log )
2018-02-28 01:35:35 +00:00
}
2022-01-15 00:24:59 +00:00
// Update status to Completed with path & snapshot ID.
2022-05-06 06:47:46 +00:00
original = pvb . DeepCopy ( )
2022-08-15 10:34:08 +00:00
pvb . Status . Path = path
2022-01-15 00:24:59 +00:00
pvb . Status . Phase = velerov1api . PodVolumeBackupPhaseCompleted
pvb . Status . SnapshotID = snapshotID
pvb . Status . CompletionTimestamp = & metav1 . Time { Time : r . Clock . Now ( ) }
if emptySnapshot {
pvb . Status . Message = "volume was empty so no snapshot was taken"
2018-02-28 01:35:35 +00:00
}
2022-06-07 13:07:44 +00:00
if err = r . Client . Patch ( ctx , & pvb , client . MergeFrom ( original ) ) ; err != nil {
2022-05-06 06:47:46 +00:00
log . WithError ( err ) . Error ( "error updating PodVolumeBackup status" )
return ctrl . Result { } , err
}
2022-01-15 00:24:59 +00:00
latencyDuration := pvb . Status . CompletionTimestamp . Time . Sub ( pvb . Status . StartTimestamp . Time )
2020-07-22 19:07:52 +00:00
latencySeconds := float64 ( latencyDuration / time . Second )
2022-01-15 00:24:59 +00:00
backupName := fmt . Sprintf ( "%s/%s" , req . Namespace , pvb . OwnerReferences [ 0 ] . Name )
2022-08-30 07:52:11 +00:00
generateOpName := fmt . Sprintf ( "%s-%s-%s-%s-%s-backup" , pvb . Name , backupRepo . Name , pvb . Spec . BackupStorageLocation , pvb . Namespace , pvb . Spec . UploaderType )
2022-08-15 10:34:08 +00:00
r . Metrics . ObserveResticOpLatency ( r . NodeName , req . Name , generateOpName , backupName , latencySeconds )
r . Metrics . RegisterResticOpLatencyGauge ( r . NodeName , req . Name , generateOpName , backupName , latencySeconds )
2022-01-15 00:24:59 +00:00
r . Metrics . RegisterPodVolumeBackupDequeue ( r . NodeName )
2022-01-28 23:31:05 +00:00
log . Info ( "PodVolumeBackup completed" )
2022-01-15 00:24:59 +00:00
return ctrl . Result { } , nil
}
// SetupWithManager registers the PVB controller.
func ( r * PodVolumeBackupReconciler ) SetupWithManager ( mgr ctrl . Manager ) error {
return ctrl . NewControllerManagedBy ( mgr ) .
For ( & velerov1api . PodVolumeBackup { } ) .
Complete ( r )
}
// getParentSnapshot finds the most recent completed PodVolumeBackup for the
// specified PVC and returns its Restic snapshot ID. Any errors encountered are
// logged but not returned since they do not prevent a backup from proceeding.
func ( r * PodVolumeBackupReconciler ) getParentSnapshot ( ctx context . Context , log logrus . FieldLogger , pvbNamespace , pvcUID , bsl string ) string {
2019-08-27 23:37:51 +00:00
log = log . WithField ( "pvcUID" , pvcUID )
2022-01-15 00:24:59 +00:00
log . Infof ( "Looking for most recent completed PodVolumeBackup for this PVC" )
2019-08-27 23:37:51 +00:00
2022-01-15 00:24:59 +00:00
listOpts := & client . ListOptions {
Namespace : pvbNamespace ,
}
matchingLabels := client . MatchingLabels ( map [ string ] string { velerov1api . PVCUIDLabel : pvcUID } )
matchingLabels . ApplyToList ( listOpts )
var pvbList velerov1api . PodVolumeBackupList
if err := r . Client . List ( ctx , & pvbList , listOpts ) ; err != nil {
log . WithError ( errors . WithStack ( err ) ) . Error ( "getting list of podvolumebackups for this PVC" )
2019-08-27 23:37:51 +00:00
}
2022-01-15 00:24:59 +00:00
// Go through all the podvolumebackups for the PVC and look for the most
// recent completed one to use as the parent.
2022-08-02 10:20:53 +00:00
var mostRecentPVB velerov1api . PodVolumeBackup
2022-01-15 00:24:59 +00:00
for _ , pvb := range pvbList . Items {
if pvb . Status . Phase != velerov1api . PodVolumeBackupPhaseCompleted {
2019-08-27 23:37:51 +00:00
continue
}
2022-01-15 00:24:59 +00:00
if bsl != pvb . Spec . BackupStorageLocation {
// Check the backup storage location is the same as spec in order to
// support backup to multiple backup-locations. Otherwise, there exists
// a case that backup volume snapshot to the second location would
// failed, since the founded parent ID is only valid for the first
// backup location, not the second backup location. Also, the second
// backup should not use the first backup parent ID since its for the
// first backup location only.
2020-02-19 19:01:22 +00:00
continue
}
2022-08-02 10:20:53 +00:00
if mostRecentPVB . Status == ( velerov1api . PodVolumeBackupStatus { } ) || pvb . Status . StartTimestamp . After ( mostRecentPVB . Status . StartTimestamp . Time ) {
mostRecentPVB = pvb
2019-08-27 23:37:51 +00:00
}
}
2022-08-02 10:20:53 +00:00
if mostRecentPVB . Status == ( velerov1api . PodVolumeBackupStatus { } ) {
2022-01-15 00:24:59 +00:00
log . Info ( "No completed PodVolumeBackup found for PVC" )
2019-08-27 23:37:51 +00:00
return ""
}
log . WithFields ( map [ string ] interface { } {
2022-01-15 00:24:59 +00:00
"parentPodVolumeBackup" : mostRecentPVB . Name ,
"parentSnapshotID" : mostRecentPVB . Status . SnapshotID ,
} ) . Info ( "Found most recent completed PodVolumeBackup for PVC" )
2019-08-27 23:37:51 +00:00
2022-01-15 00:24:59 +00:00
return mostRecentPVB . Status . SnapshotID
2019-08-27 23:37:51 +00:00
}
2022-05-06 06:47:46 +00:00
func ( r * PodVolumeBackupReconciler ) updateStatusToFailed ( ctx context . Context , pvb * velerov1api . PodVolumeBackup , err error , msg string , log logrus . FieldLogger ) ( ctrl . Result , error ) {
original := pvb . DeepCopy ( )
2022-01-15 00:24:59 +00:00
pvb . Status . Phase = velerov1api . PodVolumeBackupPhaseFailed
2022-06-06 14:56:29 +00:00
pvb . Status . Message = errors . WithMessage ( err , msg ) . Error ( )
2022-01-15 00:24:59 +00:00
pvb . Status . CompletionTimestamp = & metav1 . Time { Time : r . Clock . Now ( ) }
2022-05-06 06:47:46 +00:00
2022-06-07 13:07:44 +00:00
if err = r . Client . Patch ( ctx , pvb , client . MergeFrom ( original ) ) ; err != nil {
2022-05-06 06:47:46 +00:00
log . WithError ( err ) . Error ( "error updating PodVolumeBackup status" )
return ctrl . Result { } , err
}
return ctrl . Result { } , nil
2022-01-15 00:24:59 +00:00
}
2022-08-19 06:45:28 +00:00
func ( r * PodVolumeBackupReconciler ) NewBackupProgressUpdater ( pvb * velerov1api . PodVolumeBackup , log logrus . FieldLogger , ctx context . Context ) * BackupProgressUpdater {
return & BackupProgressUpdater { pvb , log , ctx , r . Client }
}
//UpdateProgress which implement ProgressUpdater interface to update pvb progress status
func ( b * BackupProgressUpdater ) UpdateProgress ( p * uploader . UploaderProgress ) {
original := b . PodVolumeBackup . DeepCopy ( )
b . PodVolumeBackup . Status . Progress = velerov1api . PodVolumeOperationProgress { TotalBytes : p . TotalBytes , BytesDone : p . BytesDone }
if b . Cli == nil {
b . Log . Errorf ( "failed to update backup pod %s volume %s progress with uninitailize client" , b . PodVolumeBackup . Spec . Pod . Name , b . PodVolumeBackup . Spec . Volume )
return
}
if err := b . Cli . Patch ( b . Ctx , b . PodVolumeBackup , client . MergeFrom ( original ) ) ; err != nil {
b . Log . Errorf ( "update backup pod %s volume %s progress with %v" , b . PodVolumeBackup . Spec . Pod . Name , b . PodVolumeBackup . Spec . Volume , err )
}
}