2017-08-02 17:27:17 +00:00
/ *
2018-01-02 18:51:49 +00:00
Copyright 2017 the Heptio Ark contributors .
2017-08-02 17:27:17 +00:00
Licensed under the Apache License , Version 2.0 ( the "License" ) ;
you may not use this file except in compliance with the License .
You may obtain a copy of the License at
http : //www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing , software
distributed under the License is distributed on an "AS IS" BASIS ,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied .
See the License for the specific language governing permissions and
limitations under the License .
* /
package controller
import (
"bytes"
2018-05-13 13:28:09 +00:00
"compress/gzip"
2017-12-11 22:10:52 +00:00
"encoding/json"
2017-08-02 17:27:17 +00:00
"fmt"
2017-12-19 17:16:39 +00:00
"io"
2017-08-02 17:27:17 +00:00
"io/ioutil"
"os"
"time"
2018-05-14 21:34:24 +00:00
jsonpatch "github.com/evanphx/json-patch"
2017-09-14 21:27:31 +00:00
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
2017-08-02 17:27:17 +00:00
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
2017-12-11 22:10:52 +00:00
"k8s.io/apimachinery/pkg/types"
2017-08-02 17:27:17 +00:00
"k8s.io/apimachinery/pkg/util/clock"
2017-12-19 17:16:39 +00:00
kerrors "k8s.io/apimachinery/pkg/util/errors"
2017-08-02 17:27:17 +00:00
"k8s.io/client-go/tools/cache"
api "github.com/heptio/ark/pkg/apis/ark/v1"
2018-09-26 22:18:45 +00:00
pkgbackup "github.com/heptio/ark/pkg/backup"
2017-10-25 16:42:03 +00:00
arkv1client "github.com/heptio/ark/pkg/generated/clientset/versioned/typed/ark/v1"
2017-08-02 17:27:17 +00:00
informers "github.com/heptio/ark/pkg/generated/informers/externalversions/ark/v1"
listers "github.com/heptio/ark/pkg/generated/listers/ark/v1"
2018-06-06 21:35:06 +00:00
"github.com/heptio/ark/pkg/metrics"
2018-08-20 18:47:16 +00:00
"github.com/heptio/ark/pkg/persistence"
2017-11-15 02:35:02 +00:00
"github.com/heptio/ark/pkg/plugin"
2017-08-02 17:27:17 +00:00
"github.com/heptio/ark/pkg/util/collections"
"github.com/heptio/ark/pkg/util/encode"
2017-09-14 21:27:31 +00:00
kubeutil "github.com/heptio/ark/pkg/util/kube"
2018-05-13 13:28:09 +00:00
"github.com/heptio/ark/pkg/util/logging"
2018-10-22 16:37:30 +00:00
"github.com/heptio/ark/pkg/volume"
2017-08-02 17:27:17 +00:00
)
const backupVersion = 1
type backupController struct {
2018-08-29 19:52:09 +00:00
* genericController
2018-09-26 22:18:45 +00:00
backupper pkgbackup . Backupper
2018-09-25 14:51:28 +00:00
lister listers . BackupLister
client arkv1client . BackupsGetter
clock clock . Clock
backupLogLevel logrus . Level
newPluginManager func ( logrus . FieldLogger ) plugin . Manager
backupTracker BackupTracker
backupLocationLister listers . BackupStorageLocationLister
defaultBackupLocation string
snapshotLocationLister listers . VolumeSnapshotLocationLister
2018-09-26 22:18:45 +00:00
defaultSnapshotLocations map [ string ] * api . VolumeSnapshotLocation
2018-09-25 14:51:28 +00:00
metrics * metrics . ServerMetrics
newBackupStore func ( * api . BackupStorageLocation , persistence . ObjectStoreGetter , logrus . FieldLogger ) ( persistence . BackupStore , error )
2017-08-02 17:27:17 +00:00
}
func NewBackupController (
backupInformer informers . BackupInformer ,
client arkv1client . BackupsGetter ,
2018-09-26 22:18:45 +00:00
backupper pkgbackup . Backupper ,
2017-12-11 22:10:52 +00:00
logger logrus . FieldLogger ,
2018-08-29 19:52:09 +00:00
backupLogLevel logrus . Level ,
2018-08-25 19:53:56 +00:00
newPluginManager func ( logrus . FieldLogger ) plugin . Manager ,
2018-04-06 17:08:39 +00:00
backupTracker BackupTracker ,
2018-08-16 22:41:59 +00:00
backupLocationInformer informers . BackupStorageLocationInformer ,
defaultBackupLocation string ,
2018-09-25 14:51:28 +00:00
volumeSnapshotLocationInformer informers . VolumeSnapshotLocationInformer ,
2018-09-26 22:18:45 +00:00
defaultSnapshotLocations map [ string ] * api . VolumeSnapshotLocation ,
2018-06-06 21:35:06 +00:00
metrics * metrics . ServerMetrics ,
2017-08-02 17:27:17 +00:00
) Interface {
c := & backupController {
2018-09-25 14:51:28 +00:00
genericController : newGenericController ( "backup" , logger ) ,
backupper : backupper ,
lister : backupInformer . Lister ( ) ,
client : client ,
clock : & clock . RealClock { } ,
backupLogLevel : backupLogLevel ,
newPluginManager : newPluginManager ,
backupTracker : backupTracker ,
backupLocationLister : backupLocationInformer . Lister ( ) ,
defaultBackupLocation : defaultBackupLocation ,
snapshotLocationLister : volumeSnapshotLocationInformer . Lister ( ) ,
defaultSnapshotLocations : defaultSnapshotLocations ,
metrics : metrics ,
2018-08-20 23:29:54 +00:00
newBackupStore : persistence . NewObjectBackupStore ,
2017-08-02 17:27:17 +00:00
}
c . syncHandler = c . processBackup
2018-08-29 19:52:09 +00:00
c . cacheSyncWaiters = append ( c . cacheSyncWaiters ,
backupInformer . Informer ( ) . HasSynced ,
backupLocationInformer . Informer ( ) . HasSynced ,
2018-09-25 14:51:28 +00:00
volumeSnapshotLocationInformer . Informer ( ) . HasSynced ,
2018-08-29 19:52:09 +00:00
)
2017-08-02 17:27:17 +00:00
backupInformer . Informer ( ) . AddEventHandler (
cache . ResourceEventHandlerFuncs {
AddFunc : func ( obj interface { } ) {
backup := obj . ( * api . Backup )
switch backup . Status . Phase {
case "" , api . BackupPhaseNew :
// only process new backups
default :
2017-09-14 21:27:31 +00:00
c . logger . WithFields ( logrus . Fields {
"backup" : kubeutil . NamespaceAndName ( backup ) ,
"phase" : backup . Status . Phase ,
} ) . Debug ( "Backup is not new, skipping" )
2017-08-02 17:27:17 +00:00
return
}
key , err := cache . MetaNamespaceKeyFunc ( backup )
if err != nil {
2017-09-14 21:27:31 +00:00
c . logger . WithError ( err ) . WithField ( "backup" , backup ) . Error ( "Error creating queue key, item not added to queue" )
2017-08-02 17:27:17 +00:00
return
}
c . queue . Add ( key )
} ,
} ,
)
return c
}
2018-08-29 19:52:09 +00:00
func ( c * backupController ) processBackup ( key string ) error {
log := c . logger . WithField ( "key" , key )
2017-09-14 21:27:31 +00:00
2018-08-29 19:52:09 +00:00
log . Debug ( "Running processBackup" )
2017-08-02 17:27:17 +00:00
ns , name , err := cache . SplitMetaNamespaceKey ( key )
if err != nil {
2017-09-14 21:27:31 +00:00
return errors . Wrap ( err , "error splitting queue key" )
2017-08-02 17:27:17 +00:00
}
2018-08-29 19:52:09 +00:00
log . Debug ( "Getting backup" )
2018-09-26 22:18:45 +00:00
original , err := c . lister . Backups ( ns ) . Get ( name )
2017-08-02 17:27:17 +00:00
if err != nil {
2017-09-14 21:27:31 +00:00
return errors . Wrap ( err , "error getting backup" )
2017-08-02 17:27:17 +00:00
}
2017-12-19 17:16:39 +00:00
// Double-check we have the correct phase. In the unlikely event that multiple controller
// instances are running, it's possible for controller A to succeed in changing the phase to
// InProgress, while controller B's attempt to patch the phase fails. When controller B
// reprocesses the same backup, it will either show up as New (informer hasn't seen the update
// yet) or as InProgress. In the former case, the patch attempt will fail again, until the
// informer sees the update. In the latter case, after the informer has seen the update to
// InProgress, we still need this check so we can return nil to indicate we've finished processing
// this key (even though it was a no-op).
2018-09-26 22:18:45 +00:00
switch original . Status . Phase {
2017-08-02 17:27:17 +00:00
case "" , api . BackupPhaseNew :
// only process new backups
default :
return nil
}
2018-09-26 22:18:45 +00:00
log . Debug ( "Preparing backup request" )
request := c . prepareBackupRequest ( original )
2017-08-02 17:27:17 +00:00
2018-09-26 22:18:45 +00:00
if len ( request . Status . ValidationErrors ) > 0 {
request . Status . Phase = api . BackupPhaseFailedValidation
2017-08-02 17:27:17 +00:00
} else {
2018-09-26 22:18:45 +00:00
request . Status . Phase = api . BackupPhaseInProgress
2017-08-02 17:27:17 +00:00
}
// update status
2018-09-26 22:18:45 +00:00
updatedBackup , err := patchBackup ( original , request . Backup , c . client )
2017-08-02 17:27:17 +00:00
if err != nil {
2018-09-26 22:18:45 +00:00
return errors . Wrapf ( err , "error updating Backup status to %s" , request . Status . Phase )
2017-08-02 17:27:17 +00:00
}
2017-12-11 22:10:52 +00:00
// store ref to just-updated item for creating patch
original = updatedBackup
2018-09-26 22:18:45 +00:00
request . Backup = updatedBackup . DeepCopy ( )
2017-08-02 17:27:17 +00:00
2018-09-26 22:18:45 +00:00
if request . Status . Phase == api . BackupPhaseFailedValidation {
2017-08-02 17:27:17 +00:00
return nil
}
2018-09-26 22:18:45 +00:00
c . backupTracker . Add ( request . Namespace , request . Name )
defer c . backupTracker . Delete ( request . Namespace , request . Name )
2018-04-06 17:08:39 +00:00
2018-08-29 19:52:09 +00:00
log . Debug ( "Running backup" )
2017-08-02 17:27:17 +00:00
// execution & upload of backup
2018-09-26 22:18:45 +00:00
backupScheduleName := request . GetLabels ( ) [ "ark-schedule" ]
2018-08-29 19:52:09 +00:00
c . metrics . RegisterBackupAttempt ( backupScheduleName )
2018-06-06 21:35:06 +00:00
2018-09-26 22:18:45 +00:00
if err := c . runBackup ( request ) ; err != nil {
2018-08-29 19:52:09 +00:00
log . WithError ( err ) . Error ( "backup failed" )
2018-09-26 22:18:45 +00:00
request . Status . Phase = api . BackupPhaseFailed
2018-08-29 19:52:09 +00:00
c . metrics . RegisterBackupFailed ( backupScheduleName )
2018-06-06 21:35:06 +00:00
} else {
2018-08-29 19:52:09 +00:00
c . metrics . RegisterBackupSuccess ( backupScheduleName )
2017-08-02 17:27:17 +00:00
}
2018-08-29 19:52:09 +00:00
log . Debug ( "Updating backup's final status" )
2018-09-26 22:18:45 +00:00
if _ , err := patchBackup ( original , request . Backup , c . client ) ; err != nil {
2018-08-29 19:52:09 +00:00
log . WithError ( err ) . Error ( "error updating backup's final status" )
2017-08-02 17:27:17 +00:00
}
return nil
}
2017-12-11 22:10:52 +00:00
func patchBackup ( original , updated * api . Backup , client arkv1client . BackupsGetter ) ( * api . Backup , error ) {
origBytes , err := json . Marshal ( original )
if err != nil {
return nil , errors . Wrap ( err , "error marshalling original backup" )
}
updatedBytes , err := json . Marshal ( updated )
if err != nil {
return nil , errors . Wrap ( err , "error marshalling updated backup" )
}
2018-05-14 21:34:24 +00:00
patchBytes , err := jsonpatch . CreateMergePatch ( origBytes , updatedBytes )
2017-12-11 22:10:52 +00:00
if err != nil {
2018-05-14 21:34:24 +00:00
return nil , errors . Wrap ( err , "error creating json merge patch for backup" )
2017-12-11 22:10:52 +00:00
}
2017-12-22 14:43:44 +00:00
res , err := client . Backups ( original . Namespace ) . Patch ( original . Name , types . MergePatchType , patchBytes )
2017-12-11 22:10:52 +00:00
if err != nil {
return nil , errors . Wrap ( err , "error patching backup" )
}
return res , nil
}
2018-09-26 22:18:45 +00:00
func ( c * backupController ) prepareBackupRequest ( backup * api . Backup ) * pkgbackup . Request {
request := & pkgbackup . Request {
Backup : backup . DeepCopy ( ) , // don't modify items in the cache
2017-08-02 17:27:17 +00:00
}
2018-09-26 22:18:45 +00:00
// set backup version
request . Status . Version = backupVersion
// calculate expiration
if request . Spec . TTL . Duration > 0 {
request . Status . Expiration = metav1 . NewTime ( c . clock . Now ( ) . Add ( request . Spec . TTL . Duration ) )
2017-08-02 17:27:17 +00:00
}
2018-09-26 22:18:45 +00:00
// default storage location if not specified
if request . Spec . StorageLocation == "" {
request . Spec . StorageLocation = c . defaultBackupLocation
2018-08-16 22:41:59 +00:00
}
2018-08-21 23:52:49 +00:00
// add the storage location as a label for easy filtering later.
2018-09-26 22:18:45 +00:00
if request . Labels == nil {
request . Labels = make ( map [ string ] string )
2018-08-21 23:52:49 +00:00
}
2018-09-26 22:18:45 +00:00
request . Labels [ api . StorageLocationLabel ] = request . Spec . StorageLocation
2018-08-21 23:52:49 +00:00
2018-09-26 22:18:45 +00:00
// validate the included/excluded resources and namespaces
for _ , err := range collections . ValidateIncludesExcludes ( request . Spec . IncludedResources , request . Spec . ExcludedResources ) {
request . Status . ValidationErrors = append ( request . Status . ValidationErrors , fmt . Sprintf ( "Invalid included/excluded resource lists: %v" , err ) )
2018-08-16 22:41:59 +00:00
}
2018-09-26 22:18:45 +00:00
for _ , err := range collections . ValidateIncludesExcludes ( request . Spec . IncludedNamespaces , request . Spec . ExcludedNamespaces ) {
request . Status . ValidationErrors = append ( request . Status . ValidationErrors , fmt . Sprintf ( "Invalid included/excluded namespace lists: %v" , err ) )
}
// validate the storage location, and store the BackupStorageLocation API obj on the request
if storageLocation , err := c . backupLocationLister . BackupStorageLocations ( request . Namespace ) . Get ( request . Spec . StorageLocation ) ; err != nil {
request . Status . ValidationErrors = append ( request . Status . ValidationErrors , fmt . Sprintf ( "Error getting backup storage location: %v" , err ) )
} else {
request . StorageLocation = storageLocation
}
// validate and get the backup's VolumeSnapshotLocations, and store the
// VolumeSnapshotLocation API objs on the request
if locs , errs := c . validateAndGetSnapshotLocations ( request . Backup ) ; len ( errs ) > 0 {
request . Status . ValidationErrors = append ( request . Status . ValidationErrors , errs ... )
} else {
request . Spec . VolumeSnapshotLocations = nil
for _ , loc := range locs {
request . Spec . VolumeSnapshotLocations = append ( request . Spec . VolumeSnapshotLocations , loc . Name )
request . SnapshotLocations = append ( request . SnapshotLocations , loc )
}
}
return request
2017-08-02 17:27:17 +00:00
}
2018-09-26 22:18:45 +00:00
// validateAndGetSnapshotLocations gets a collection of VolumeSnapshotLocation objects that
// this backup will use (returned as a map of provider name -> VSL), and ensures:
// - each location name in .spec.volumeSnapshotLocations exists as a location
// - exactly 1 location per provider
// - a given provider's default location name is added to .spec.volumeSnapshotLocations if one
// is not explicitly specified for the provider
func ( c * backupController ) validateAndGetSnapshotLocations ( backup * api . Backup ) ( map [ string ] * api . VolumeSnapshotLocation , [ ] string ) {
errors := [ ] string { }
providerLocations := make ( map [ string ] * api . VolumeSnapshotLocation )
for _ , locationName := range backup . Spec . VolumeSnapshotLocations {
2018-09-25 14:51:28 +00:00
// validate each locationName exists as a VolumeSnapshotLocation
2018-09-26 22:18:45 +00:00
location , err := c . snapshotLocationLister . VolumeSnapshotLocations ( backup . Namespace ) . Get ( locationName )
2018-09-25 14:51:28 +00:00
if err != nil {
errors = append ( errors , fmt . Sprintf ( "error getting volume snapshot location named %s: %v" , locationName , err ) )
continue
}
2018-09-26 22:18:45 +00:00
// ensure we end up with exactly 1 location *per provider*
if providerLocation , ok := providerLocations [ location . Spec . Provider ] ; ok {
2018-09-25 14:51:28 +00:00
// if > 1 location name per provider as in ["aws-us-east-1" | "aws-us-west-1"] (same provider, multiple names)
2018-09-26 22:18:45 +00:00
if providerLocation . Name != locationName {
errors = append ( errors , fmt . Sprintf ( "more than one VolumeSnapshotLocation name specified for provider %s: %s; unexpected name was %s" , location . Spec . Provider , locationName , providerLocation . Name ) )
2018-09-25 14:51:28 +00:00
continue
}
} else {
// keep track of all valid existing locations, per provider
2018-09-26 22:18:45 +00:00
providerLocations [ location . Spec . Provider ] = location
2018-09-25 14:51:28 +00:00
}
}
if len ( errors ) > 0 {
2018-09-26 22:18:45 +00:00
return nil , errors
2018-09-25 14:51:28 +00:00
}
2018-09-26 22:18:45 +00:00
for provider , defaultLocation := range c . defaultSnapshotLocations {
2018-09-25 14:51:28 +00:00
// if a location name for a given provider does not already exist, add the provider's default
2018-09-26 22:18:45 +00:00
if _ , ok := providerLocations [ provider ] ; ! ok {
providerLocations [ provider ] = defaultLocation
2018-09-25 14:51:28 +00:00
}
}
2018-09-26 22:18:45 +00:00
return providerLocations , nil
2018-09-25 14:51:28 +00:00
}
2018-09-26 22:18:45 +00:00
func ( c * backupController ) runBackup ( backup * pkgbackup . Request ) error {
2018-08-29 19:52:09 +00:00
log := c . logger . WithField ( "backup" , kubeutil . NamespaceAndName ( backup ) )
2017-12-19 17:16:39 +00:00
log . Info ( "Starting backup" )
2018-08-29 19:52:09 +00:00
backup . Status . StartTimestamp . Time = c . clock . Now ( )
2017-08-11 15:05:56 +00:00
logFile , err := ioutil . TempFile ( "" , "" )
if err != nil {
2017-12-19 17:16:39 +00:00
return errors . Wrap ( err , "error creating temp file for backup log" )
2017-08-11 15:05:56 +00:00
}
2018-05-13 13:28:09 +00:00
gzippedLogFile := gzip . NewWriter ( logFile )
// Assuming we successfully uploaded the log file, this will have already been closed below. It is safe to call
// close multiple times. If we get an error closing this, there's not really anything we can do about it.
defer gzippedLogFile . Close ( )
2018-08-29 19:52:09 +00:00
defer closeAndRemoveFile ( logFile , c . logger )
2018-05-13 13:28:09 +00:00
// Log the backup to both a backup log file and to stdout. This will help see what happened if the upload of the
// backup log failed for whatever reason.
2018-08-29 19:52:09 +00:00
logger := logging . DefaultLogger ( c . backupLogLevel )
2018-05-13 13:28:09 +00:00
logger . Out = io . MultiWriter ( os . Stdout , gzippedLogFile )
log = logger . WithField ( "backup" , kubeutil . NamespaceAndName ( backup ) )
log . Info ( "Starting backup" )
2017-12-19 17:16:39 +00:00
backupFile , err := ioutil . TempFile ( "" , "" )
if err != nil {
return errors . Wrap ( err , "error creating temp file for backup" )
}
defer closeAndRemoveFile ( backupFile , log )
2017-08-02 17:27:17 +00:00
2018-08-29 19:52:09 +00:00
pluginManager := c . newPluginManager ( log )
2018-08-20 23:29:54 +00:00
defer pluginManager . CleanupClients ( )
2018-05-13 13:28:09 +00:00
actions , err := pluginManager . GetBackupItemActions ( )
if err != nil {
return err
}
2018-09-26 22:18:45 +00:00
backupStore , err := c . newBackupStore ( backup . StorageLocation , pluginManager , log )
2017-11-15 02:35:02 +00:00
if err != nil {
return err
}
2017-12-19 17:16:39 +00:00
var errs [ ] error
2017-11-15 02:35:02 +00:00
2017-12-19 17:16:39 +00:00
// Do the actual backup
2018-09-26 22:18:45 +00:00
if err := c . backupper . Backup ( log , backup , backupFile , actions , pluginManager ) ; err != nil {
2017-12-19 17:16:39 +00:00
errs = append ( errs , err )
backup . Status . Phase = api . BackupPhaseFailed
} else {
backup . Status . Phase = api . BackupPhaseCompleted
}
2017-08-02 17:27:17 +00:00
2018-10-12 17:55:02 +00:00
if err := gzippedLogFile . Close ( ) ; err != nil {
c . logger . WithError ( err ) . Error ( "error closing gzippedLogFile" )
}
2018-07-10 22:17:53 +00:00
// Mark completion timestamp before serializing and uploading.
// Otherwise, the JSON file in object storage has a CompletionTimestamp of 'null'.
2018-08-29 19:52:09 +00:00
backup . Status . CompletionTimestamp . Time = c . clock . Now ( )
2018-07-10 22:17:53 +00:00
2018-10-22 16:37:30 +00:00
backup . Status . VolumeSnapshotsAttempted = len ( backup . VolumeSnapshots )
for _ , snap := range backup . VolumeSnapshots {
if snap . Status . Phase == volume . SnapshotPhaseCompleted {
backup . Status . VolumeSnapshotsCompleted ++
}
}
2018-10-12 17:55:02 +00:00
errs = append ( errs , persistBackup ( backup , backupFile , logFile , backupStore , c . logger ) ... )
errs = append ( errs , recordBackupMetrics ( backup . Backup , backupFile , c . metrics ) )
log . Info ( "Backup completed" )
return kerrors . NewAggregate ( errs )
}
func recordBackupMetrics ( backup * api . Backup , backupFile * os . File , serverMetrics * metrics . ServerMetrics ) error {
backupScheduleName := backup . GetLabels ( ) [ "ark-schedule" ]
2017-08-02 17:27:17 +00:00
2018-06-06 21:35:06 +00:00
var backupSizeBytes int64
2018-10-12 17:55:02 +00:00
var err error
2018-06-06 21:35:06 +00:00
if backupFileStat , err := backupFile . Stat ( ) ; err != nil {
2018-10-12 17:55:02 +00:00
err = errors . Wrap ( err , "error getting file info" )
2018-06-06 21:35:06 +00:00
} else {
backupSizeBytes = backupFileStat . Size ( )
}
2018-10-12 17:55:02 +00:00
serverMetrics . SetBackupTarballSizeBytesGauge ( backupScheduleName , backupSizeBytes )
2018-06-06 21:35:06 +00:00
2018-10-12 17:55:02 +00:00
backupDuration := backup . Status . CompletionTimestamp . Time . Sub ( backup . Status . StartTimestamp . Time )
backupDurationSeconds := float64 ( backupDuration / time . Second )
serverMetrics . RegisterBackupDuration ( backupScheduleName , backupDurationSeconds )
2018-05-13 13:28:09 +00:00
2018-10-12 17:55:02 +00:00
return err
}
func persistBackup ( backup * pkgbackup . Request , backupContents , backupLog * os . File , backupStore persistence . BackupStore , log logrus . FieldLogger ) [ ] error {
errs := [ ] error { }
backupJSON := new ( bytes . Buffer )
if err := encode . EncodeTo ( backup . Backup , "json" , backupJSON ) ; err != nil {
errs = append ( errs , errors . Wrap ( err , "error encoding backup" ) )
2017-08-02 17:27:17 +00:00
}
2018-10-12 17:55:02 +00:00
volumeSnapshots := new ( bytes . Buffer )
gzw := gzip . NewWriter ( volumeSnapshots )
defer gzw . Close ( )
2018-06-06 21:35:06 +00:00
2018-10-12 17:55:02 +00:00
if err := json . NewEncoder ( gzw ) . Encode ( backup . VolumeSnapshots ) ; err != nil {
errs = append ( errs , errors . Wrap ( err , "error encoding list of volume snapshots" ) )
}
2018-10-15 20:22:00 +00:00
if err := gzw . Close ( ) ; err != nil {
errs = append ( errs , errors . Wrap ( err , "error closing gzip writer" ) )
}
2018-06-20 18:08:07 +00:00
2018-10-12 17:55:02 +00:00
if len ( errs ) > 0 {
// Don't upload the JSON files or backup tarball if encoding to json fails.
backupJSON = nil
backupContents = nil
volumeSnapshots = nil
}
2017-12-19 17:16:39 +00:00
2018-10-12 17:55:02 +00:00
if err := backupStore . PutBackup ( backup . Name , backupJSON , backupContents , backupLog , volumeSnapshots ) ; err != nil {
errs = append ( errs , err )
}
return errs
2017-12-19 17:16:39 +00:00
}
func closeAndRemoveFile ( file * os . File , log logrus . FieldLogger ) {
if err := file . Close ( ) ; err != nil {
log . WithError ( err ) . WithField ( "file" , file . Name ( ) ) . Error ( "error closing file" )
2017-08-11 15:05:56 +00:00
}
2017-12-19 17:16:39 +00:00
if err := os . Remove ( file . Name ( ) ) ; err != nil {
log . WithError ( err ) . WithField ( "file" , file . Name ( ) ) . Error ( "error removing file" )
2017-08-02 17:27:17 +00:00
}
}