Merge branch 'main' into update_enabled_runtime_controllers

pull/5241/head
Xun Jiang/Bruce Jiang 2022-08-29 20:06:31 +08:00 committed by GitHub
commit eaf9fab711
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
55 changed files with 4562 additions and 763 deletions

View File

@ -40,7 +40,9 @@ We have integrated our [solution with Velero][11] to provide our customers with
Kyma [integrates with Velero][41] to effortlessly back up and restore Kyma clusters with all its resources. Velero capabilities allow Kyma users to define and run manual and scheduled backups in order to successfully handle a disaster-recovery scenario.
**[Red Hat][50]**
Red Hat has developed the [Cluster Application Migration Tool][51] which uses [Velero and Restic][52] to drive the migration of applications between OpenShift clusters.
Red Hat has developed 2 operators for the OpenShift platform:
- [Migration Toolkit for Containers][51] (Crane): This operator uses [Velero and Restic][52] to drive the migration of applications between OpenShift clusters.
- [OADP (OpenShift API for Data Protection) Operator][53]: This operator sets up and installs Velero on the OpenShift platform, allowing users to backup and restore applications.
**[Dell EMC][70]**
For Kubernetes environments, [PowerProtect Data Manager][71] leverages the Container Storage Interface (CSI) framework to take snapshots to back up the persistent data or the data that the application creates e.g. databases. [Dell EMC leverages Velero][72] to backup the namespace configuration files (also known as Namespace meta data) for enterprise grade data protection.
@ -89,6 +91,7 @@ If you would like to add your logo to a future `Adopters of Velero` section on [
[50]: https://redhat.com
[51]: https://github.com/fusor/mig-operator
[52]: https://github.com/fusor/mig-operator/blob/master/docs/usage/2.md
[53]: https://github.com/openshift/oadp-operator
[60]: https://banzaicloud.com
[61]: https://banzaicloud.com/products/pipeline/

View File

@ -0,0 +1 @@
Refactor GCController with kubebuilder

View File

@ -0,0 +1 @@
check vsc null pointer

View File

@ -0,0 +1 @@
Uploader Implementation: Kopia backup and restore

View File

@ -0,0 +1,2 @@
Add changes for Kopia Integration: Kopia Lib - method implementation
Add changes to write Kopia Repository logs to Velero log

View File

@ -0,0 +1 @@
Remove reference to non-existent doc

View File

@ -93,126 +93,140 @@ Velero by default uses the Unified Repository for all kinds of data movement, it
## The Unified Repository Interface
Below are the definitions of the Unified Repository Interface. All the functions are synchronization functions.
```
///BackupRepoService is used to initialize, open or maintain a backup repository
// BackupRepoService is used to initialize, open or maintain a backup repository
type BackupRepoService interface {
///Create a backup repository or connect to an existing backup repository
///repoOption: option to the backup repository and the underlying backup storage
///createNew: indicates whether to create a new or connect to an existing backup repository
///result: the backup repository specific output that could be used to open the backup repository later
Init(ctx context.Context, repoOption RepoOptions, createNew bool) error
///Open an backup repository that has been created/connected
///repoOption: options to open the backup repository and the underlying storage
Open(ctx context.Context, repoOption RepoOptions) (BackupRepo, error)
///Periodically called to maintain the backup repository to eliminate redundant data and improve performance
///repoOption: options to maintain the backup repository
Maintain(ctx context.Context, repoOption RepoOptions) error
// Init creates a backup repository or connect to an existing backup repository.
// repoOption: option to the backup repository and the underlying backup storage.
// createNew: indicates whether to create a new or connect to an existing backup repository.
Init(ctx context.Context, repoOption RepoOptions, createNew bool) error
// Open opens an backup repository that has been created/connected.
// repoOption: options to open the backup repository and the underlying storage.
Open(ctx context.Context, repoOption RepoOptions) (BackupRepo, error)
// Maintain is periodically called to maintain the backup repository to eliminate redundant data.
// repoOption: options to maintain the backup repository.
Maintain(ctx context.Context, repoOption RepoOptions) error
// DefaultMaintenanceFrequency returns the defgault frequency of maintenance, callers refer this
// frequency to maintain the backup repository to get the best maintenance performance
DefaultMaintenanceFrequency() time.Duration
}
///BackupRepo provides the access to the backup repository
// BackupRepo provides the access to the backup repository
type BackupRepo interface {
///Open an existing object for read
///id: the object's unified identifier
OpenObject(ctx context.Context, id ID) (ObjectReader, error)
///Get a manifest data
GetManifest(ctx context.Context, id ID, mani *RepoManifest) error
///Get one or more manifest data that match the given labels
FindManifests(ctx context.Context, filter ManifestFilter) ([]*ManifestEntryMetadata, error)
///Create a new object and return the object's writer interface
///return: A unified identifier of the object on success
NewObjectWriter(ctx context.Context, opt ObjectWriteOptions) ObjectWriter
///Save a manifest object
PutManifest(ctx context.Context, mani RepoManifest) (ID, error)
///Delete a manifest object
DeleteManifest(ctx context.Context, id ID) error
///Flush all the backup repository data
Flush(ctx context.Context) error
///Get the local time of the backup repository. It may be different from the time of the caller
Time() time.Time
///Close the backup repository
Close(ctx context.Context) error
}
// OpenObject opens an existing object for read.
// id: the object's unified identifier.
OpenObject(ctx context.Context, id ID) (ObjectReader, error)
// GetManifest gets a manifest data from the backup repository.
GetManifest(ctx context.Context, id ID, mani *RepoManifest) error
// FindManifests gets one or more manifest data that match the given labels
FindManifests(ctx context.Context, filter ManifestFilter) ([]*ManifestEntryMetadata, error)
// NewObjectWriter creates a new object and return the object's writer interface.
// return: A unified identifier of the object on success.
NewObjectWriter(ctx context.Context, opt ObjectWriteOptions) ObjectWriter
// PutManifest saves a manifest object into the backup repository.
PutManifest(ctx context.Context, mani RepoManifest) (ID, error)
// DeleteManifest deletes a manifest object from the backup repository.
DeleteManifest(ctx context.Context, id ID) error
// Flush flushes all the backup repository data
Flush(ctx context.Context) error
// Time returns the local time of the backup repository. It may be different from the time of the caller
Time() time.Time
// Close closes the backup repository
Close(ctx context.Context) error
type ObjectReader interface {
io.ReadCloser
io.Seeker
///Length returns the logical size of the object
Length() int64
io.ReadCloser
io.Seeker
// Length returns the logical size of the object
Length() int64
}
type ObjectWriter interface {
io.WriteCloser
///For some cases, i.e. block incremental, the object is not written sequentially
io.Seeker
// Periodically called to preserve the state of data written to the repo so far
// Return a unified identifier that represent the current state
// An empty ID could be returned on success if the backup repository doesn't support this
Checkpoint() (ID, error)
io.WriteCloser
///Wait for the completion of the object write
///Result returns the object's unified identifier after the write completes
Result() (ID, error)
}
// Seeker is used in the cases that the object is not written sequentially
io.Seeker
// Checkpoint is periodically called to preserve the state of data written to the repo so far.
// Checkpoint returns a unified identifier that represent the current state.
// An empty ID could be returned on success if the backup repository doesn't support this.
Checkpoint() (ID, error)
// Result waits for the completion of the object write.
// Result returns the object's unified identifier after the write completes.
Result() (ID, error)
}
```
Some data structure & constants used by the interfaces:
```
```
type RepoOptions struct {
///A repository specific string to identify a backup storage, i.e., "s3", "filesystem"
StorageType string
///Backup repository password, if any
RepoPassword string
///A custom path to save the repository's configuration, if any
ConfigFilePath string
///Other repository specific options
GeneralOptions map[string]string
///Storage specific options
StorageOptions map[string]string
// StorageType is a repository specific string to identify a backup storage, i.e., "s3", "filesystem"
StorageType string
// RepoPassword is the backup repository's password, if any
RepoPassword string
// ConfigFilePath is a custom path to save the repository's configuration, if any
ConfigFilePath string
// GeneralOptions takes other repository specific options
GeneralOptions map[string]string
// StorageOptions takes storage specific options
StorageOptions map[string]string
// Description is a description of the backup repository/backup repository operation.
// It is for logging/debugging purpose only and doesn't control any behavior of the backup repository.
Description string
}
///ObjectWriteOptions defines the options when creating an object for write
// ObjectWriteOptions defines the options when creating an object for write
type ObjectWriteOptions struct {
FullPath string ///Full logical path of the object
Description string ///A description of the object, could be empty
Prefix ID ///A prefix of the name used to save the object
AccessMode int ///OBJECT_DATA_ACCESS_*
BackupMode int ///OBJECT_DATA_BACKUP_*
FullPath string // Full logical path of the object
DataType int // OBJECT_DATA_TYPE_*
Description string // A description of the object, could be empty
Prefix ID // A prefix of the name used to save the object
AccessMode int // OBJECT_DATA_ACCESS_*
BackupMode int // OBJECT_DATA_BACKUP_*
}
const (
///Below consts defines the access mode when creating an object for write
OBJECT_DATA_ACCESS_MODE_UNKNOWN int = 0
OBJECT_DATA_ACCESS_MODE_FILE int = 1
OBJECT_DATA_ACCESS_MODE_BLOCK int = 2
// Below consts descrbe the data type of one object.
// Metadata: This type describes how the data is organized.
// For a file system backup, the Metadata describes a Dir or File.
// For a block backup, the Metadata describes a Disk and its incremental link.
ObjectDataTypeUnknown int = 0
ObjectDataTypeMetadata int = 1
ObjectDataTypeData int = 2
OBJECT_DATA_BACKUP_MODE_UNKNOWN int = 0
OBJECT_DATA_BACKUP_MODE_FULL int = 1
OBJECT_DATA_BACKUP_MODE_INC int = 2
// Below consts defines the access mode when creating an object for write
ObjectDataAccessModeUnknown int = 0
ObjectDataAccessModeFile int = 1
ObjectDataAccessModeBlock int = 2
ObjectDataBackupModeUnknown int = 0
ObjectDataBackupModeFull int = 1
ObjectDataBackupModeInc int = 2
)
///ManifestEntryMetadata is the metadata describing one manifest data
// ManifestEntryMetadata is the metadata describing one manifest data
type ManifestEntryMetadata struct {
ID ID ///The ID of the manifest data
Length int32 ///The data size of the manifest data
Labels map[string]string ///Labels saved together with the manifest data
ModTime time.Time ///Modified time of the manifest data
ID ID // The ID of the manifest data
Length int32 // The data size of the manifest data
Labels map[string]string // Labels saved together with the manifest data
ModTime time.Time // Modified time of the manifest data
}
type RepoManifest struct {
Payload interface{} ///The user data of manifest
Metadata *ManifestEntryMetadata ///The metadata data of manifest
Payload interface{} // The user data of manifest
Metadata *ManifestEntryMetadata // The metadata data of manifest
}
type ManifestFilter struct {

1
go.mod
View File

@ -76,6 +76,7 @@ require (
github.com/fsnotify/fsnotify v1.5.1 // indirect
github.com/go-logr/logr v0.4.0 // indirect
github.com/go-logr/zapr v0.4.0 // indirect
github.com/gofrs/flock v0.8.1 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang-jwt/jwt/v4 v4.4.1 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect

2
go.sum
View File

@ -323,6 +323,7 @@ github.com/gobwas/pool v0.2.1/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6Wezm
github.com/gobwas/ws v1.1.0/go.mod h1:nzvNcVha5eUziGrbxFCo6qFIojQHjJV5cLYIbezhfL0=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/godbus/dbus/v5 v5.0.6/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw=
github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU=
github.com/gofrs/uuid v3.2.0+incompatible h1:y12jRkkFxsd7GpqdSZ+/KCs/fJbqpEXSGd4+jfEaewE=
github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
@ -552,6 +553,7 @@ github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/kubernetes-csi/external-snapshotter/client/v4 v4.2.0 h1:nHHjmvjitIiyPlUHk/ofpgvBcNcawJLtf4PYHORLjAA=
github.com/kubernetes-csi/external-snapshotter/client/v4 v4.2.0/go.mod h1:YBCo4DoEeDndqvAn6eeu0vWM7QdXmHEeI9cFWplmBys=
github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k=
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0=
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE=

View File

@ -36,7 +36,7 @@ import (
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
"github.com/vmware-tanzu/velero/pkg/kuberesource"
"github.com/vmware-tanzu/velero/pkg/podexec"
"github.com/vmware-tanzu/velero/pkg/restic"
"github.com/vmware-tanzu/velero/pkg/podvolume"
"github.com/vmware-tanzu/velero/pkg/util/collections"
"github.com/vmware-tanzu/velero/pkg/util/kube"
)
@ -126,7 +126,7 @@ func (i *InitContainerRestoreHookHandler) HandleRestoreHooks(
// restored data to be consumed by the application container(s).
// So if there is a "restic-wait" init container already on the pod at index 0, we'll preserve that and run
// it before running any other init container.
if len(pod.Spec.InitContainers) > 0 && pod.Spec.InitContainers[0].Name == restic.InitContainer {
if len(pod.Spec.InitContainers) > 0 && pod.Spec.InitContainers[0].Name == podvolume.InitContainer {
initContainers = append(initContainers, pod.Spec.InitContainers[0])
pod.Spec.InitContainers = pod.Spec.InitContainers[1:]
}

View File

@ -28,7 +28,7 @@ import (
"github.com/vmware-tanzu/velero/pkg/client"
"github.com/vmware-tanzu/velero/pkg/cmd"
"github.com/vmware-tanzu/velero/pkg/cmd/util/output"
"github.com/vmware-tanzu/velero/pkg/restic"
"github.com/vmware-tanzu/velero/pkg/label"
)
func NewDescribeCommand(f client.Factory, use string) *cobra.Command {
@ -69,7 +69,7 @@ func NewDescribeCommand(f client.Factory, use string) *cobra.Command {
first := true
for _, restore := range restores.Items {
opts := restic.NewPodVolumeRestoreListOptions(restore.Name)
opts := newPodVolumeRestoreListOptions(restore.Name)
podvolumeRestoreList, err := veleroClient.VeleroV1().PodVolumeRestores(f.Namespace()).List(context.TODO(), opts)
if err != nil {
fmt.Fprintf(os.Stderr, "error getting PodVolumeRestores for restore %s: %v\n", restore.Name, err)
@ -94,3 +94,11 @@ func NewDescribeCommand(f client.Factory, use string) *cobra.Command {
return c
}
// newPodVolumeRestoreListOptions creates a ListOptions with a label selector configured to
// find PodVolumeRestores for the restore identified by name.
func newPodVolumeRestoreListOptions(name string) metav1.ListOptions {
return metav1.ListOptions{
LabelSelector: fmt.Sprintf("%s=%s", velerov1api.RestoreNameLabel, label.GetValidName(name)),
}
}

View File

@ -112,6 +112,9 @@ const (
// defaultCredentialsDirectory is the path on disk where credential
// files will be written to
defaultCredentialsDirectory = "/tmp/credentials"
// daemonSet is the name of the Velero restic daemonset.
daemonSet = "restic"
)
type serverConfig struct {
@ -529,7 +532,7 @@ var defaultRestorePriorities = []string{
func (s *server) initRestic() error {
// warn if restic daemonset does not exist
if _, err := s.kubeClient.AppsV1().DaemonSets(s.namespace).Get(s.ctx, restic.DaemonSet, metav1.GetOptions{}); apierrors.IsNotFound(err) {
if _, err := s.kubeClient.AppsV1().DaemonSets(s.namespace).Get(s.ctx, daemonSet, metav1.GetOptions{}); apierrors.IsNotFound(err) {
s.logger.Warn("Velero restic daemonset not found; restic backups/restores will not work until it's created")
} else if err != nil {
s.logger.WithError(errors.WithStack(err)).Warn("Error checking for existence of velero restic daemonset")
@ -674,22 +677,6 @@ func (s *server) runControllers(defaultVolumeSnapshotLocations map[string]string
}
}
gcControllerRunInfo := func() controllerRunInfo {
gcController := controller.NewGCController(
s.logger,
s.sharedInformerFactory.Velero().V1().Backups(),
s.sharedInformerFactory.Velero().V1().DeleteBackupRequests().Lister(),
s.veleroClient.VeleroV1(),
s.mgr.GetClient(),
s.config.garbageCollectionFrequency,
)
return controllerRunInfo{
controller: gcController,
numWorkers: defaultControllerWorkers,
}
}
restoreControllerRunInfo := func() controllerRunInfo {
restorer, err := restore.NewKubernetesRestorer(
s.veleroClient.VeleroV1(),
@ -731,10 +718,9 @@ func (s *server) runControllers(defaultVolumeSnapshotLocations map[string]string
}
enabledControllers := map[string]func() controllerRunInfo{
controller.BackupSync: backupSyncControllerRunInfo,
controller.Backup: backupControllerRunInfo,
controller.GarbageCollection: gcControllerRunInfo,
controller.Restore: restoreControllerRunInfo,
controller.BackupSync: backupSyncControllerRunInfo,
controller.Backup: backupControllerRunInfo,
controller.Restore: restoreControllerRunInfo,
}
// Note: all runtime type controllers that can be disabled are grouped separately, below:
enabledRuntimeControllers := map[string]struct{}{
@ -858,6 +844,13 @@ func (s *server) runControllers(defaultVolumeSnapshotLocations map[string]string
}
}
if _, ok := enabledRuntimeControllers[controller.GarbageCollection]; ok {
r := controller.NewGCReconciler(s.logger, s.mgr.GetClient())
if err := r.SetupWithManager(s.mgr); err != nil {
s.logger.Fatal(err, "unable to create controller", "controller", controller.GarbageCollection)
}
}
// TODO(2.0): presuming all controllers and resources are converted to runtime-controller
// by v2.0, the block from this line and including the `s.mgr.Start() will be
// deprecated, since the manager auto-starts all the caches. Until then, we need to start the

View File

@ -486,7 +486,7 @@ func (v *volumesByPod) Add(namespace, name, volume, phase string, progress veler
key := fmt.Sprintf("%s/%s", namespace, name)
// append backup progress percentage if backup is in progress
if phase == "In Progress" && progress != (velerov1api.PodVolumeOperationProgress{}) {
if phase == "In Progress" && progress.TotalBytes != 0 {
volume = fmt.Sprintf("%s (%.2f%%)", volume, float64(progress.BytesDone)/float64(progress.TotalBytes)*100)
}

View File

@ -951,6 +951,10 @@ func (c *backupController) deleteVolumeSnapshot(volumeSnapshots []*snapshotv1api
if vs.Status.BoundVolumeSnapshotContentName != nil &&
len(*vs.Status.BoundVolumeSnapshotContentName) > 0 {
vsc = vscMap[*vs.Status.BoundVolumeSnapshotContentName]
if nil == vsc {
logger.Errorf("Not find %s from the vscMap", vs.Status.BoundVolumeSnapshotContentName)
return
}
if vsc.Spec.DeletionPolicy == snapshotv1api.VolumeSnapshotContentDelete {
modifyVSCFlag = true
}

View File

@ -41,7 +41,6 @@ import (
"github.com/vmware-tanzu/velero/pkg/plugin/clientmgmt"
"github.com/vmware-tanzu/velero/pkg/plugin/velero"
"github.com/vmware-tanzu/velero/pkg/repository"
"github.com/vmware-tanzu/velero/pkg/restic"
"github.com/vmware-tanzu/velero/pkg/util/filesystem"
"github.com/vmware-tanzu/velero/pkg/util/kube"
@ -440,7 +439,7 @@ func (r *backupDeletionReconciler) deleteResticSnapshots(ctx context.Context, ba
return nil
}
snapshots, err := restic.GetSnapshotsInBackup(ctx, backup, r.Client)
snapshots, err := getSnapshotsInBackup(ctx, backup, r.Client)
if err != nil {
return []error{err}
}
@ -491,3 +490,33 @@ func (r *backupDeletionReconciler) patchBackup(ctx context.Context, backup *vele
}
return backup, nil
}
// getSnapshotsInBackup returns a list of all restic snapshot ids associated with
// a given Velero backup.
func getSnapshotsInBackup(ctx context.Context, backup *velerov1api.Backup, kbClient client.Client) ([]repository.SnapshotIdentifier, error) {
podVolumeBackups := &velerov1api.PodVolumeBackupList{}
options := &client.ListOptions{
LabelSelector: labels.Set(map[string]string{
velerov1api.BackupNameLabel: label.GetValidName(backup.Name),
}).AsSelector(),
}
err := kbClient.List(ctx, podVolumeBackups, options)
if err != nil {
return nil, errors.WithStack(err)
}
var res []repository.SnapshotIdentifier
for _, item := range podVolumeBackups.Items {
if item.Status.SnapshotID == "" {
continue
}
res = append(res, repository.SnapshotIdentifier{
VolumeNamespace: item.Spec.Pod.Namespace,
BackupStorageLocation: backup.Spec.StorageLocation,
SnapshotID: item.Status.SnapshotID,
})
}
return res, nil
}

View File

@ -19,6 +19,7 @@ package controller
import (
"bytes"
"fmt"
"sort"
"time"
"context"
@ -32,6 +33,7 @@ import (
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
corev1api "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
@ -52,6 +54,7 @@ import (
persistencemocks "github.com/vmware-tanzu/velero/pkg/persistence/mocks"
"github.com/vmware-tanzu/velero/pkg/plugin/clientmgmt"
pluginmocks "github.com/vmware-tanzu/velero/pkg/plugin/mocks"
"github.com/vmware-tanzu/velero/pkg/repository"
velerotest "github.com/vmware-tanzu/velero/pkg/test"
)
@ -692,3 +695,172 @@ func TestBackupDeletionControllerReconcile(t *testing.T) {
})
}
func TestGetSnapshotsInBackup(t *testing.T) {
tests := []struct {
name string
podVolumeBackups []velerov1api.PodVolumeBackup
expected []repository.SnapshotIdentifier
longBackupNameEnabled bool
}{
{
name: "no pod volume backups",
podVolumeBackups: nil,
expected: nil,
},
{
name: "no pod volume backups with matching label",
podVolumeBackups: []velerov1api.PodVolumeBackup{
{
ObjectMeta: metav1.ObjectMeta{Name: "foo", Labels: map[string]string{velerov1api.BackupNameLabel: "non-matching-backup-1"}},
Spec: velerov1api.PodVolumeBackupSpec{
Pod: corev1api.ObjectReference{Name: "pod-1", Namespace: "ns-1"},
},
Status: velerov1api.PodVolumeBackupStatus{SnapshotID: "snap-1"},
},
{
ObjectMeta: metav1.ObjectMeta{Name: "bar", Labels: map[string]string{velerov1api.BackupNameLabel: "non-matching-backup-2"}},
Spec: velerov1api.PodVolumeBackupSpec{
Pod: corev1api.ObjectReference{Name: "pod-2", Namespace: "ns-2"},
},
Status: velerov1api.PodVolumeBackupStatus{SnapshotID: "snap-2"},
},
},
expected: nil,
},
{
name: "some pod volume backups with matching label",
podVolumeBackups: []velerov1api.PodVolumeBackup{
{
ObjectMeta: metav1.ObjectMeta{Name: "foo", Labels: map[string]string{velerov1api.BackupNameLabel: "non-matching-backup-1"}},
Spec: velerov1api.PodVolumeBackupSpec{
Pod: corev1api.ObjectReference{Name: "pod-1", Namespace: "ns-1"},
},
Status: velerov1api.PodVolumeBackupStatus{SnapshotID: "snap-1"},
},
{
ObjectMeta: metav1.ObjectMeta{Name: "bar", Labels: map[string]string{velerov1api.BackupNameLabel: "non-matching-backup-2"}},
Spec: velerov1api.PodVolumeBackupSpec{
Pod: corev1api.ObjectReference{Name: "pod-2", Namespace: "ns-2"},
},
Status: velerov1api.PodVolumeBackupStatus{SnapshotID: "snap-2"},
},
{
ObjectMeta: metav1.ObjectMeta{Name: "completed-pvb", Labels: map[string]string{velerov1api.BackupNameLabel: "backup-1"}},
Spec: velerov1api.PodVolumeBackupSpec{
Pod: corev1api.ObjectReference{Name: "pod-1", Namespace: "ns-1"},
},
Status: velerov1api.PodVolumeBackupStatus{SnapshotID: "snap-3"},
},
{
ObjectMeta: metav1.ObjectMeta{Name: "completed-pvb-2", Labels: map[string]string{velerov1api.BackupNameLabel: "backup-1"}},
Spec: velerov1api.PodVolumeBackupSpec{
Pod: corev1api.ObjectReference{Name: "pod-1", Namespace: "ns-1"},
},
Status: velerov1api.PodVolumeBackupStatus{SnapshotID: "snap-4"},
},
{
ObjectMeta: metav1.ObjectMeta{Name: "incomplete-or-failed-pvb", Labels: map[string]string{velerov1api.BackupNameLabel: "backup-1"}},
Spec: velerov1api.PodVolumeBackupSpec{
Pod: corev1api.ObjectReference{Name: "pod-1", Namespace: "ns-2"},
},
Status: velerov1api.PodVolumeBackupStatus{SnapshotID: ""},
},
},
expected: []repository.SnapshotIdentifier{
{
VolumeNamespace: "ns-1",
SnapshotID: "snap-3",
},
{
VolumeNamespace: "ns-1",
SnapshotID: "snap-4",
},
},
},
{
name: "some pod volume backups with matching label and backup name greater than 63 chars",
longBackupNameEnabled: true,
podVolumeBackups: []velerov1api.PodVolumeBackup{
{
ObjectMeta: metav1.ObjectMeta{Name: "foo", Labels: map[string]string{velerov1api.BackupNameLabel: "non-matching-backup-1"}},
Spec: velerov1api.PodVolumeBackupSpec{
Pod: corev1api.ObjectReference{Name: "pod-1", Namespace: "ns-1"},
},
Status: velerov1api.PodVolumeBackupStatus{SnapshotID: "snap-1"},
},
{
ObjectMeta: metav1.ObjectMeta{Name: "bar", Labels: map[string]string{velerov1api.BackupNameLabel: "non-matching-backup-2"}},
Spec: velerov1api.PodVolumeBackupSpec{
Pod: corev1api.ObjectReference{Name: "pod-2", Namespace: "ns-2"},
},
Status: velerov1api.PodVolumeBackupStatus{SnapshotID: "snap-2"},
},
{
ObjectMeta: metav1.ObjectMeta{Name: "completed-pvb", Labels: map[string]string{velerov1api.BackupNameLabel: "the-really-long-backup-name-that-is-much-more-than-63-cha6ca4bc"}},
Spec: velerov1api.PodVolumeBackupSpec{
Pod: corev1api.ObjectReference{Name: "pod-1", Namespace: "ns-1"},
},
Status: velerov1api.PodVolumeBackupStatus{SnapshotID: "snap-3"},
},
{
ObjectMeta: metav1.ObjectMeta{Name: "completed-pvb-2", Labels: map[string]string{velerov1api.BackupNameLabel: "backup-1"}},
Spec: velerov1api.PodVolumeBackupSpec{
Pod: corev1api.ObjectReference{Name: "pod-1", Namespace: "ns-1"},
},
Status: velerov1api.PodVolumeBackupStatus{SnapshotID: "snap-4"},
},
{
ObjectMeta: metav1.ObjectMeta{Name: "incomplete-or-failed-pvb", Labels: map[string]string{velerov1api.BackupNameLabel: "backup-1"}},
Spec: velerov1api.PodVolumeBackupSpec{
Pod: corev1api.ObjectReference{Name: "pod-1", Namespace: "ns-2"},
},
Status: velerov1api.PodVolumeBackupStatus{SnapshotID: ""},
},
},
expected: []repository.SnapshotIdentifier{
{
VolumeNamespace: "ns-1",
SnapshotID: "snap-3",
},
},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
var (
clientBuilder = velerotest.NewFakeControllerRuntimeClientBuilder(t)
veleroBackup = &velerov1api.Backup{}
)
veleroBackup.Name = "backup-1"
if test.longBackupNameEnabled {
veleroBackup.Name = "the-really-long-backup-name-that-is-much-more-than-63-characters"
}
clientBuilder.WithLists(&velerov1api.PodVolumeBackupList{
Items: test.podVolumeBackups,
})
res, err := getSnapshotsInBackup(context.TODO(), veleroBackup, clientBuilder.Build())
assert.NoError(t, err)
// sort to ensure good compare of slices
less := func(snapshots []repository.SnapshotIdentifier) func(i, j int) bool {
return func(i, j int) bool {
if snapshots[i].VolumeNamespace == snapshots[j].VolumeNamespace {
return snapshots[i].SnapshotID < snapshots[j].SnapshotID
}
return snapshots[i].VolumeNamespace < snapshots[j].VolumeNamespace
}
}
sort.Slice(test.expected, less(test.expected))
sort.Slice(res, less(res))
assert.Equal(t, test.expected, res)
})
}
}

View File

@ -23,19 +23,16 @@ import (
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/client-go/tools/cache"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/event"
"sigs.k8s.io/controller-runtime/pkg/predicate"
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
pkgbackup "github.com/vmware-tanzu/velero/pkg/backup"
velerov1client "github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/typed/velero/v1"
velerov1informers "github.com/vmware-tanzu/velero/pkg/generated/informers/externalversions/velero/v1"
velerov1listers "github.com/vmware-tanzu/velero/pkg/generated/listers/velero/v1"
"github.com/vmware-tanzu/velero/pkg/label"
"github.com/vmware-tanzu/velero/pkg/util/kube"
)
const (
@ -46,100 +43,77 @@ const (
gcFailureBSLReadOnly = "BSLReadOnly"
)
// gcController creates DeleteBackupRequests for expired backups.
type gcController struct {
*genericController
backupLister velerov1listers.BackupLister
deleteBackupRequestLister velerov1listers.DeleteBackupRequestLister
deleteBackupRequestClient velerov1client.DeleteBackupRequestsGetter
kbClient client.Client
frequency time.Duration
clock clock.Clock
// gcReconciler creates DeleteBackupRequests for expired backups.
type gcReconciler struct {
client.Client
logger logrus.FieldLogger
clock clock.Clock
}
// NewGCController constructs a new gcController.
func NewGCController(
// NewGCReconciler constructs a new gcReconciler.
func NewGCReconciler(
logger logrus.FieldLogger,
backupInformer velerov1informers.BackupInformer,
deleteBackupRequestLister velerov1listers.DeleteBackupRequestLister,
deleteBackupRequestClient velerov1client.DeleteBackupRequestsGetter,
kbClient client.Client,
frequency time.Duration,
) Interface {
c := &gcController{
genericController: newGenericController(GarbageCollection, logger),
clock: clock.RealClock{},
backupLister: backupInformer.Lister(),
deleteBackupRequestLister: deleteBackupRequestLister,
deleteBackupRequestClient: deleteBackupRequestClient,
kbClient: kbClient,
}
c.syncHandler = c.processQueueItem
c.resyncPeriod = frequency
if c.resyncPeriod <= 0 {
c.resyncPeriod = defaultGCFrequency
}
logger.Infof("Garbage collection frequency: %s", c.resyncPeriod.String())
c.resyncFunc = c.enqueueAllBackups
backupInformer.Informer().AddEventHandler(
cache.ResourceEventHandlerFuncs{
AddFunc: c.enqueue,
UpdateFunc: func(_, obj interface{}) { c.enqueue(obj) },
},
)
return c
}
// enqueueAllBackups lists all backups from cache and enqueues all of them so we can check each one
// for expiration.
func (c *gcController) enqueueAllBackups() {
c.logger.Debug("gcController.enqueueAllBackups")
backups, err := c.backupLister.List(labels.Everything())
if err != nil {
c.logger.WithError(errors.WithStack(err)).Error("error listing backups")
return
}
for _, backup := range backups {
c.enqueue(backup)
client client.Client,
) *gcReconciler {
return &gcReconciler{
Client: client,
logger: logger,
clock: clock.RealClock{},
}
}
func (c *gcController) processQueueItem(key string) error {
log := c.logger.WithField("backup", key)
// GCController only watches on CreateEvent for ensuring every new backup will be taken care of.
// Other Events will be filtered to decrease the number of reconcile call. Especially UpdateEvent must be filtered since we removed
// the backup status as the sub-resource of backup in v1.9, every change on it will be treated as UpdateEvent and trigger reconcile call.
func (c *gcReconciler) SetupWithManager(mgr ctrl.Manager) error {
s := kube.NewPeriodicalEnqueueSource(c.logger, mgr.GetClient(), &velerov1api.BackupList{}, defaultGCFrequency)
return ctrl.NewControllerManagedBy(mgr).
For(&velerov1api.Backup{}).
WithEventFilter(predicate.Funcs{
UpdateFunc: func(ue event.UpdateEvent) bool {
return false
},
DeleteFunc: func(de event.DeleteEvent) bool {
return false
},
GenericFunc: func(ge event.GenericEvent) bool {
return false
},
}).
Watches(s, nil).
Complete(c)
}
ns, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
return errors.Wrap(err, "error splitting queue key")
}
// +kubebuilder:rbac:groups=velero.io,resources=backups,verbs=get;list;watch;update
// +kubebuilder:rbac:groups=velero.io,resources=backups/status,verbs=get
// +kubebuilder:rbac:groups=velero.io,resources=deletebackuprequests,verbs=get;list;watch;create;
// +kubebuilder:rbac:groups=velero.io,resources=deletebackuprequests/status,verbs=get
// +kubebuilder:rbac:groups=velero.io,resources=backupstoragelocations,verbs=get
func (c *gcReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
log := c.logger.WithField("gc backup", req.String())
log.Debug("gcController getting backup")
backup, err := c.backupLister.Backups(ns).Get(name)
if apierrors.IsNotFound(err) {
log.Debug("Unable to find backup")
return nil
}
if err != nil {
return errors.Wrap(err, "error getting backup")
backup := &velerov1api.Backup{}
if err := c.Get(ctx, req.NamespacedName, backup); err != nil {
if apierrors.IsNotFound(err) {
log.WithError(err).Error("backup not found")
return ctrl.Result{}, nil
}
return ctrl.Result{}, errors.Wrapf(err, "error getting backup %s", req.String())
}
log.Debugf("backup: %v", backup)
log = c.logger.WithFields(
logrus.Fields{
"backup": key,
"backup": req.String(),
"expiration": backup.Status.Expiration,
},
)
now := c.clock.Now()
if backup.Status.Expiration == nil || backup.Status.Expiration.After(now) {
log.Debug("Backup has not expired yet, skipping")
return nil
return ctrl.Result{}, nil
}
log.Info("Backup has expired")
@ -149,8 +123,8 @@ func (c *gcController) processQueueItem(key string) error {
}
loc := &velerov1api.BackupStorageLocation{}
if err := c.kbClient.Get(context.Background(), client.ObjectKey{
Namespace: ns,
if err := c.Get(ctx, client.ObjectKey{
Namespace: req.Namespace,
Name: backup.Spec.StorageLocation,
}, loc); err != nil {
if apierrors.IsNotFound(err) {
@ -159,53 +133,56 @@ func (c *gcController) processQueueItem(key string) error {
} else {
backup.Labels[garbageCollectionFailure] = gcFailureBSLCannotGet
}
if err := c.kbClient.Update(context.Background(), backup); err != nil {
if err := c.Update(ctx, backup); err != nil {
log.WithError(err).Error("error updating backup labels")
}
return errors.Wrap(err, "error getting backup storage location")
return ctrl.Result{}, errors.Wrap(err, "error getting backup storage location")
}
if loc.Spec.AccessMode == velerov1api.BackupStorageLocationAccessModeReadOnly {
log.Infof("Backup cannot be garbage-collected because backup storage location %s is currently in read-only mode", loc.Name)
backup.Labels[garbageCollectionFailure] = gcFailureBSLReadOnly
if err := c.kbClient.Update(context.Background(), backup); err != nil {
if err := c.Update(ctx, backup); err != nil {
log.WithError(err).Error("error updating backup labels")
}
return nil
return ctrl.Result{}, nil
}
// remove gc fail error label after this point
delete(backup.Labels, garbageCollectionFailure)
if err := c.kbClient.Update(context.Background(), backup); err != nil {
if err := c.Update(ctx, backup); err != nil {
log.WithError(err).Error("error updating backup labels")
}
selector := labels.SelectorFromSet(labels.Set(map[string]string{
selector := client.MatchingLabels{
velerov1api.BackupNameLabel: label.GetValidName(backup.Name),
velerov1api.BackupUIDLabel: string(backup.UID),
}))
dbrs, err := c.deleteBackupRequestLister.DeleteBackupRequests(ns).List(selector)
if err != nil {
return errors.Wrap(err, "error listing existing DeleteBackupRequests for backup")
}
dbrs := &velerov1api.DeleteBackupRequestList{}
if err := c.List(ctx, dbrs, selector); err != nil {
log.WithError(err).Error("error listing DeleteBackupRequests")
return ctrl.Result{}, errors.Wrap(err, "error listing existing DeleteBackupRequests for backup")
}
log.Debugf("length of dbrs:%d", len(dbrs.Items))
// if there's an existing unprocessed deletion request for this backup, don't create
// another one
for _, dbr := range dbrs {
for _, dbr := range dbrs.Items {
switch dbr.Status.Phase {
case "", velerov1api.DeleteBackupRequestPhaseNew, velerov1api.DeleteBackupRequestPhaseInProgress:
log.Info("Backup already has a pending deletion request")
return nil
return ctrl.Result{}, nil
}
}
log.Info("Creating a new deletion request")
req := pkgbackup.NewDeleteBackupRequest(backup.Name, string(backup.UID))
if _, err = c.deleteBackupRequestClient.DeleteBackupRequests(ns).Create(context.TODO(), req, metav1.CreateOptions{}); err != nil {
return errors.Wrap(err, "error creating DeleteBackupRequest")
ndbr := pkgbackup.NewDeleteBackupRequest(backup.Name, string(backup.UID))
ndbr.SetNamespace(backup.Namespace)
if err := c.Create(ctx, ndbr); err != nil {
log.WithError(err).Error("error creating DeleteBackupRequests")
return ctrl.Result{}, errors.Wrap(err, "error creating DeleteBackupRequest")
}
return nil
return ctrl.Result{}, nil
}

View File

@ -18,152 +18,41 @@ package controller
import (
"context"
"fmt"
"sort"
"testing"
"time"
"github.com/pkg/errors"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/apimachinery/pkg/watch"
core "k8s.io/client-go/testing"
ctrl "sigs.k8s.io/controller-runtime"
kbclient "sigs.k8s.io/controller-runtime/pkg/client"
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
"github.com/vmware-tanzu/velero/pkg/builder"
"github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/fake"
informers "github.com/vmware-tanzu/velero/pkg/generated/informers/externalversions"
velerotest "github.com/vmware-tanzu/velero/pkg/test"
"github.com/vmware-tanzu/velero/pkg/util/kube"
)
func TestGCControllerEnqueueAllBackups(t *testing.T) {
var (
client = fake.NewSimpleClientset()
sharedInformers = informers.NewSharedInformerFactory(client, 0)
controller = NewGCController(
velerotest.NewLogger(),
sharedInformers.Velero().V1().Backups(),
sharedInformers.Velero().V1().DeleteBackupRequests().Lister(),
client.VeleroV1(),
nil,
defaultGCFrequency,
).(*gcController)
)
keys := make(chan string)
controller.syncHandler = func(key string) error {
keys <- key
return nil
}
var expected []string
for i := 0; i < 3; i++ {
backup := builder.ForBackup(velerov1api.DefaultNamespace, fmt.Sprintf("backup-%d", i)).Result()
sharedInformers.Velero().V1().Backups().Informer().GetStore().Add(backup)
expected = append(expected, kube.NamespaceAndName(backup))
}
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
go controller.Run(ctx, 1)
var received []string
Loop:
for {
select {
case <-ctx.Done():
t.Fatal("test timed out")
case key := <-keys:
received = append(received, key)
if len(received) == len(expected) {
break Loop
}
}
}
sort.Strings(expected)
sort.Strings(received)
assert.Equal(t, expected, received)
}
func TestGCControllerHasUpdateFunc(t *testing.T) {
backup := defaultBackup().Result()
expected := kube.NamespaceAndName(backup)
client := fake.NewSimpleClientset(backup)
fakeWatch := watch.NewFake()
defer fakeWatch.Stop()
client.PrependWatchReactor("backups", core.DefaultWatchReactor(fakeWatch, nil))
sharedInformers := informers.NewSharedInformerFactory(client, 0)
controller := NewGCController(
func mockGCReconciler(fakeClient kbclient.Client, fakeClock *clock.FakeClock) *gcReconciler {
gcr := NewGCReconciler(
velerotest.NewLogger(),
sharedInformers.Velero().V1().Backups(),
sharedInformers.Velero().V1().DeleteBackupRequests().Lister(),
client.VeleroV1(),
nil,
defaultGCFrequency,
).(*gcController)
keys := make(chan string)
controller.syncHandler = func(key string) error {
keys <- key
return nil
}
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
go sharedInformers.Start(ctx.Done())
go controller.Run(ctx, 1)
// wait for the AddFunc
select {
case <-ctx.Done():
t.Fatal("test timed out waiting for AddFunc")
case key := <-keys:
assert.Equal(t, expected, key)
}
backup.Status.Version = 1234
fakeWatch.Add(backup)
// wait for the UpdateFunc
select {
case <-ctx.Done():
t.Fatal("test timed out waiting for UpdateFunc")
case key := <-keys:
assert.Equal(t, expected, key)
}
fakeClient,
)
gcr.clock = fakeClock
return gcr
}
func TestGCControllerProcessQueueItem(t *testing.T) {
func TestGCReconcile(t *testing.T) {
fakeClock := clock.NewFakeClock(time.Now())
defaultBackupLocation := builder.ForBackupStorageLocation("velero", "default").Result()
defaultBackupLocation := builder.ForBackupStorageLocation(velerov1api.DefaultNamespace, "default").Result()
tests := []struct {
name string
backup *velerov1api.Backup
deleteBackupRequests []*velerov1api.DeleteBackupRequest
backupLocation *velerov1api.BackupStorageLocation
expectDeletion bool
createDeleteBackupRequestError bool
expectError bool
name string
backup *velerov1api.Backup
deleteBackupRequests []*velerov1api.DeleteBackupRequest
backupLocation *velerov1api.BackupStorageLocation
expectError bool
}{
{
name: "can't find backup - no error",
@ -172,25 +61,21 @@ func TestGCControllerProcessQueueItem(t *testing.T) {
name: "unexpired backup is not deleted",
backup: defaultBackup().Expiration(fakeClock.Now().Add(time.Minute)).StorageLocation("default").Result(),
backupLocation: defaultBackupLocation,
expectDeletion: false,
},
{
name: "expired backup in read-only storage location is not deleted",
backup: defaultBackup().Expiration(fakeClock.Now().Add(-time.Minute)).StorageLocation("read-only").Result(),
backupLocation: builder.ForBackupStorageLocation("velero", "read-only").AccessMode(velerov1api.BackupStorageLocationAccessModeReadOnly).Result(),
expectDeletion: false,
},
{
name: "expired backup in read-write storage location is deleted",
backup: defaultBackup().Expiration(fakeClock.Now().Add(-time.Minute)).StorageLocation("read-write").Result(),
backupLocation: builder.ForBackupStorageLocation("velero", "read-write").AccessMode(velerov1api.BackupStorageLocationAccessModeReadWrite).Result(),
expectDeletion: true,
},
{
name: "expired backup with no pending deletion requests is deleted",
backup: defaultBackup().Expiration(fakeClock.Now().Add(-time.Second)).StorageLocation("default").Result(),
backupLocation: defaultBackupLocation,
expectDeletion: true,
},
{
name: "expired backup with a pending deletion request is not deleted",
@ -211,7 +96,6 @@ func TestGCControllerProcessQueueItem(t *testing.T) {
},
},
},
expectDeletion: false,
},
{
name: "expired backup with only processed deletion requests is deleted",
@ -232,72 +116,31 @@ func TestGCControllerProcessQueueItem(t *testing.T) {
},
},
},
expectDeletion: true,
},
{
name: "create DeleteBackupRequest error returns an error",
backup: defaultBackup().Expiration(fakeClock.Now().Add(-time.Second)).StorageLocation("default").Result(),
backupLocation: defaultBackupLocation,
expectDeletion: true,
createDeleteBackupRequestError: true,
expectError: true,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
var (
client = fake.NewSimpleClientset()
sharedInformers = informers.NewSharedInformerFactory(client, 0)
)
var fakeClient kbclient.Client
if test.backupLocation != nil {
fakeClient = velerotest.NewFakeControllerRuntimeClient(t, test.backupLocation)
} else {
fakeClient = velerotest.NewFakeControllerRuntimeClient(t)
if test.backup == nil {
return
}
controller := NewGCController(
velerotest.NewLogger(),
sharedInformers.Velero().V1().Backups(),
sharedInformers.Velero().V1().DeleteBackupRequests().Lister(),
client.VeleroV1(),
fakeClient,
defaultGCFrequency,
).(*gcController)
controller.clock = fakeClock
initObjs := []runtime.Object{}
initObjs = append(initObjs, test.backup)
var key string
if test.backup != nil {
key = kube.NamespaceAndName(test.backup)
sharedInformers.Velero().V1().Backups().Informer().GetStore().Add(test.backup)
if test.backupLocation != nil {
initObjs = append(initObjs, test.backupLocation)
}
for _, dbr := range test.deleteBackupRequests {
sharedInformers.Velero().V1().DeleteBackupRequests().Informer().GetStore().Add(dbr)
initObjs = append(initObjs, dbr)
}
if test.createDeleteBackupRequestError {
client.PrependReactor("create", "deletebackuprequests", func(action core.Action) (bool, runtime.Object, error) {
return true, nil, errors.New("foo")
})
}
err := controller.processQueueItem(key)
fakeClient := velerotest.NewFakeControllerRuntimeClient(t, initObjs...)
reconciler := mockGCReconciler(fakeClient, fakeClock)
_, err := reconciler.Reconcile(context.TODO(), ctrl.Request{NamespacedName: types.NamespacedName{Namespace: test.backup.Namespace, Name: test.backup.Name}})
gotErr := err != nil
assert.Equal(t, test.expectError, gotErr)
if test.expectDeletion {
require.Len(t, client.Actions(), 1)
createAction, ok := client.Actions()[0].(core.CreateAction)
require.True(t, ok)
assert.Equal(t, "deletebackuprequests", createAction.GetResource().Resource)
} else {
assert.Len(t, client.Actions(), 0)
}
})
}
}

View File

@ -38,6 +38,7 @@ import (
"github.com/vmware-tanzu/velero/pkg/metrics"
repokey "github.com/vmware-tanzu/velero/pkg/repository/keys"
"github.com/vmware-tanzu/velero/pkg/restic"
"github.com/vmware-tanzu/velero/pkg/uploader"
"github.com/vmware-tanzu/velero/pkg/util/filesystem"
"github.com/vmware-tanzu/velero/pkg/util/kube"
)
@ -61,6 +62,13 @@ type PodVolumeBackupReconciler struct {
Log logrus.FieldLogger
}
type BackupProgressUpdater struct {
PodVolumeBackup *velerov1api.PodVolumeBackup
Log logrus.FieldLogger
Ctx context.Context
Cli client.Client
}
// +kubebuilder:rbac:groups=velero.io,resources=podvolumebackups,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=velero.io,resources=podvolumebackups/status,verbs=get;update;patch
func (r *PodVolumeBackupReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
@ -364,3 +372,20 @@ func (r *PodVolumeBackupReconciler) buildResticCommand(ctx context.Context, log
return cmd, nil
}
func (r *PodVolumeBackupReconciler) NewBackupProgressUpdater(pvb *velerov1api.PodVolumeBackup, log logrus.FieldLogger, ctx context.Context) *BackupProgressUpdater {
return &BackupProgressUpdater{pvb, log, ctx, r.Client}
}
//UpdateProgress which implement ProgressUpdater interface to update pvb progress status
func (b *BackupProgressUpdater) UpdateProgress(p *uploader.UploaderProgress) {
original := b.PodVolumeBackup.DeepCopy()
b.PodVolumeBackup.Status.Progress = velerov1api.PodVolumeOperationProgress{TotalBytes: p.TotalBytes, BytesDone: p.BytesDone}
if b.Cli == nil {
b.Log.Errorf("failed to update backup pod %s volume %s progress with uninitailize client", b.PodVolumeBackup.Spec.Pod.Name, b.PodVolumeBackup.Spec.Volume)
return
}
if err := b.Cli.Patch(b.Ctx, b.PodVolumeBackup, client.MergeFrom(original)); err != nil {
b.Log.Errorf("update backup pod %s volume %s progress with %v", b.PodVolumeBackup.Spec.Pod.Name, b.PodVolumeBackup.Spec.Volume, err)
}
}

View File

@ -39,8 +39,10 @@ import (
"github.com/vmware-tanzu/velero/internal/credentials"
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
"github.com/vmware-tanzu/velero/pkg/podvolume"
repokey "github.com/vmware-tanzu/velero/pkg/repository/keys"
"github.com/vmware-tanzu/velero/pkg/restic"
"github.com/vmware-tanzu/velero/pkg/uploader"
"github.com/vmware-tanzu/velero/pkg/util/boolptr"
"github.com/vmware-tanzu/velero/pkg/util/filesystem"
"github.com/vmware-tanzu/velero/pkg/util/kube"
@ -64,6 +66,13 @@ type PodVolumeRestoreReconciler struct {
clock clock.Clock
}
type RestoreProgressUpdater struct {
PodVolumeRestore *velerov1api.PodVolumeRestore
Log logrus.FieldLogger
Ctx context.Context
Cli client.Client
}
// +kubebuilder:rbac:groups=velero.io,resources=podvolumerestores,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=velero.io,resources=podvolumerestores/status,verbs=get;update;patch
// +kubebuilder:rbac:groups="",resources=pods,verbs=get
@ -98,7 +107,7 @@ func (c *PodVolumeRestoreReconciler) Reconcile(ctx context.Context, req ctrl.Req
resticInitContainerIndex := getResticInitContainerIndex(pod)
if resticInitContainerIndex > 0 {
log.Warnf(`Init containers before the %s container may cause issues
if they interfere with volumes being restored: %s index %d`, restic.InitContainer, restic.InitContainer, resticInitContainerIndex)
if they interfere with volumes being restored: %s index %d`, podvolume.InitContainer, podvolume.InitContainer, resticInitContainerIndex)
}
log.Info("Restore starting")
@ -208,7 +217,7 @@ func isResticInitContainerRunning(pod *corev1api.Pod) bool {
func getResticInitContainerIndex(pod *corev1api.Pod) int {
// Restic wait container can be anywhere in the list of init containers so locate it.
for i, initContainer := range pod.Spec.InitContainers {
if initContainer.Name == restic.InitContainer {
if initContainer.Name == podvolume.InitContainer {
return i
}
}
@ -329,3 +338,20 @@ func (c *PodVolumeRestoreReconciler) updateRestoreProgressFunc(req *velerov1api.
}
}
}
func (r *PodVolumeRestoreReconciler) NewRestoreProgressUpdater(pvr *velerov1api.PodVolumeRestore, log logrus.FieldLogger, ctx context.Context) *RestoreProgressUpdater {
return &RestoreProgressUpdater{pvr, log, ctx, r.Client}
}
//UpdateProgress which implement ProgressUpdater interface to update pvr progress status
func (r *RestoreProgressUpdater) UpdateProgress(p *uploader.UploaderProgress) {
original := r.PodVolumeRestore.DeepCopy()
r.PodVolumeRestore.Status.Progress = velerov1api.PodVolumeOperationProgress{TotalBytes: p.TotalBytes, BytesDone: p.BytesDone}
if r.Cli == nil {
r.Log.Errorf("failed to update restore pod %s volume %s progress with uninitailize client", r.PodVolumeRestore.Spec.Pod.Name, r.PodVolumeRestore.Spec.Volume)
return
}
if err := r.Cli.Patch(r.Ctx, r.PodVolumeRestore, client.MergeFrom(original)); err != nil {
r.Log.Errorf("update restore pod %s volume %s progress with %v", r.PodVolumeRestore.Spec.Pod.Name, r.PodVolumeRestore.Spec.Volume, err)
}
}

View File

@ -31,7 +31,7 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client/fake"
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
"github.com/vmware-tanzu/velero/pkg/restic"
"github.com/vmware-tanzu/velero/pkg/podvolume"
"github.com/vmware-tanzu/velero/pkg/test"
)
@ -120,7 +120,7 @@ func TestShouldProcess(t *testing.T) {
NodeName: controllerNode,
InitContainers: []corev1api.Container{
{
Name: restic.InitContainer,
Name: podvolume.InitContainer,
},
},
},
@ -160,7 +160,7 @@ func TestShouldProcess(t *testing.T) {
NodeName: controllerNode,
InitContainers: []corev1api.Container{
{
Name: restic.InitContainer,
Name: podvolume.InitContainer,
},
},
},
@ -260,7 +260,7 @@ func TestIsResticContainerRunning(t *testing.T) {
Name: "non-restic-init",
},
{
Name: restic.InitContainer,
Name: podvolume.InitContainer,
},
},
},
@ -291,7 +291,7 @@ func TestIsResticContainerRunning(t *testing.T) {
Spec: corev1api.PodSpec{
InitContainers: []corev1api.Container{
{
Name: restic.InitContainer,
Name: podvolume.InitContainer,
},
{
Name: "non-restic-init",
@ -323,7 +323,7 @@ func TestIsResticContainerRunning(t *testing.T) {
Spec: corev1api.PodSpec{
InitContainers: []corev1api.Container{
{
Name: restic.InitContainer,
Name: podvolume.InitContainer,
},
{
Name: "non-restic-init",
@ -357,7 +357,7 @@ func TestIsResticContainerRunning(t *testing.T) {
Spec: corev1api.PodSpec{
InitContainers: []corev1api.Container{
{
Name: restic.InitContainer,
Name: podvolume.InitContainer,
},
},
},
@ -422,7 +422,7 @@ func TestGetResticInitContainerIndex(t *testing.T) {
Name: "non-restic-init",
},
{
Name: restic.InitContainer,
Name: podvolume.InitContainer,
},
},
},
@ -439,7 +439,7 @@ func TestGetResticInitContainerIndex(t *testing.T) {
Spec: corev1api.PodSpec{
InitContainers: []corev1api.Container{
{
Name: restic.InitContainer,
Name: podvolume.InitContainer,
},
{
Name: "non-restic-init",
@ -459,7 +459,7 @@ func TestGetResticInitContainerIndex(t *testing.T) {
Spec: corev1api.PodSpec{
InitContainers: []corev1api.Container{
{
Name: restic.InitContainer,
Name: podvolume.InitContainer,
},
{
Name: "non-restic-init",

View File

@ -42,6 +42,10 @@ const (
// VolumesToExcludeAnnotation is the annotation on a pod whose mounted volumes
// should be excluded from restic backup.
VolumesToExcludeAnnotation = "backup.velero.io/backup-volumes-excludes"
// InitContainer is the name of the init container added
// to workload pods to help with restores.
InitContainer = "restic-wait"
)
// GetVolumeBackupsForPod returns a map, of volume name -> snapshot id,

View File

@ -27,10 +27,24 @@ import (
"github.com/vmware-tanzu/velero/internal/credentials"
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
"github.com/vmware-tanzu/velero/pkg/repository/provider"
"github.com/vmware-tanzu/velero/pkg/restic"
"github.com/vmware-tanzu/velero/pkg/util/filesystem"
)
// SnapshotIdentifier uniquely identifies a restic snapshot
// taken by Velero.
type SnapshotIdentifier struct {
// VolumeNamespace is the namespace of the pod/volume that
// the restic snapshot is for.
VolumeNamespace string
// BackupStorageLocation is the backup's storage location
// name.
BackupStorageLocation string
// SnapshotID is the short ID of the restic snapshot.
SnapshotID string
}
// Manager manages backup repositories.
type Manager interface {
// InitRepo initializes a repo with the specified name and identifier.
@ -50,7 +64,7 @@ type Manager interface {
// Forget removes a snapshot from the list of
// available snapshots in a repo.
Forget(context.Context, restic.SnapshotIdentifier) error
Forget(context.Context, SnapshotIdentifier) error
}
type manager struct {
@ -147,7 +161,7 @@ func (m *manager) UnlockRepo(repo *velerov1api.BackupRepository) error {
return prd.EnsureUnlockRepo(context.Background(), param)
}
func (m *manager) Forget(ctx context.Context, snapshot restic.SnapshotIdentifier) error {
func (m *manager) Forget(ctx context.Context, snapshot SnapshotIdentifier) error {
repo, err := m.repoEnsurer.EnsureRepo(ctx, m.namespace, snapshot.VolumeNamespace, snapshot.BackupStorageLocation)
if err != nil {
return err

View File

@ -20,10 +20,10 @@ import (
context "context"
mock "github.com/stretchr/testify/mock"
restic "github.com/vmware-tanzu/velero/pkg/restic"
v1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
"github.com/vmware-tanzu/velero/pkg/podvolume"
"github.com/vmware-tanzu/velero/pkg/repository"
)
// RepositoryManager is an autogenerated mock type for the RepositoryManager type
@ -46,11 +46,11 @@ func (_m *RepositoryManager) ConnectToRepo(repo *v1.BackupRepository) error {
}
// Forget provides a mock function with given fields: _a0, _a1
func (_m *RepositoryManager) Forget(_a0 context.Context, _a1 restic.SnapshotIdentifier) error {
func (_m *RepositoryManager) Forget(_a0 context.Context, _a1 repository.SnapshotIdentifier) error {
ret := _m.Called(_a0, _a1)
var r0 error
if rf, ok := ret.Get(0).(func(context.Context, restic.SnapshotIdentifier) error); ok {
if rf, ok := ret.Get(0).(func(context.Context, repository.SnapshotIdentifier) error); ok {
r0 = rf(_a0, _a1)
} else {
r0 = ret.Error(0)

View File

@ -0,0 +1,349 @@
/*
Copyright The Velero Contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package mocks
import (
context "context"
index "github.com/kopia/kopia/repo/content/index"
manifest "github.com/kopia/kopia/repo/manifest"
mock "github.com/stretchr/testify/mock"
object "github.com/kopia/kopia/repo/object"
repo "github.com/kopia/kopia/repo"
time "time"
)
// RepositoryWriter is an autogenerated mock type for the RepositoryWriter type
type RepositoryWriter struct {
mock.Mock
}
// ClientOptions provides a mock function with given fields:
func (_m *RepositoryWriter) ClientOptions() repo.ClientOptions {
ret := _m.Called()
var r0 repo.ClientOptions
if rf, ok := ret.Get(0).(func() repo.ClientOptions); ok {
r0 = rf()
} else {
r0 = ret.Get(0).(repo.ClientOptions)
}
return r0
}
// Close provides a mock function with given fields: ctx
func (_m *RepositoryWriter) Close(ctx context.Context) error {
ret := _m.Called(ctx)
var r0 error
if rf, ok := ret.Get(0).(func(context.Context) error); ok {
r0 = rf(ctx)
} else {
r0 = ret.Error(0)
}
return r0
}
// ContentInfo provides a mock function with given fields: ctx, contentID
func (_m *RepositoryWriter) ContentInfo(ctx context.Context, contentID index.ID) (index.Info, error) {
ret := _m.Called(ctx, contentID)
var r0 index.Info
if rf, ok := ret.Get(0).(func(context.Context, index.ID) index.Info); ok {
r0 = rf(ctx, contentID)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(index.Info)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, index.ID) error); ok {
r1 = rf(ctx, contentID)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// DeleteManifest provides a mock function with given fields: ctx, id
func (_m *RepositoryWriter) DeleteManifest(ctx context.Context, id manifest.ID) error {
ret := _m.Called(ctx, id)
var r0 error
if rf, ok := ret.Get(0).(func(context.Context, manifest.ID) error); ok {
r0 = rf(ctx, id)
} else {
r0 = ret.Error(0)
}
return r0
}
// FindManifests provides a mock function with given fields: ctx, labels
func (_m *RepositoryWriter) FindManifests(ctx context.Context, labels map[string]string) ([]*manifest.EntryMetadata, error) {
ret := _m.Called(ctx, labels)
var r0 []*manifest.EntryMetadata
if rf, ok := ret.Get(0).(func(context.Context, map[string]string) []*manifest.EntryMetadata); ok {
r0 = rf(ctx, labels)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]*manifest.EntryMetadata)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, map[string]string) error); ok {
r1 = rf(ctx, labels)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// Flush provides a mock function with given fields: ctx
func (_m *RepositoryWriter) Flush(ctx context.Context) error {
ret := _m.Called(ctx)
var r0 error
if rf, ok := ret.Get(0).(func(context.Context) error); ok {
r0 = rf(ctx)
} else {
r0 = ret.Error(0)
}
return r0
}
// GetManifest provides a mock function with given fields: ctx, id, data
func (_m *RepositoryWriter) GetManifest(ctx context.Context, id manifest.ID, data interface{}) (*manifest.EntryMetadata, error) {
ret := _m.Called(ctx, id, data)
var r0 *manifest.EntryMetadata
if rf, ok := ret.Get(0).(func(context.Context, manifest.ID, interface{}) *manifest.EntryMetadata); ok {
r0 = rf(ctx, id, data)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*manifest.EntryMetadata)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, manifest.ID, interface{}) error); ok {
r1 = rf(ctx, id, data)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// NewObjectWriter provides a mock function with given fields: ctx, opt
func (_m *RepositoryWriter) NewObjectWriter(ctx context.Context, opt object.WriterOptions) object.Writer {
ret := _m.Called(ctx, opt)
var r0 object.Writer
if rf, ok := ret.Get(0).(func(context.Context, object.WriterOptions) object.Writer); ok {
r0 = rf(ctx, opt)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(object.Writer)
}
}
return r0
}
// NewWriter provides a mock function with given fields: ctx, opt
func (_m *RepositoryWriter) NewWriter(ctx context.Context, opt repo.WriteSessionOptions) (context.Context, repo.RepositoryWriter, error) {
ret := _m.Called(ctx, opt)
var r0 context.Context
if rf, ok := ret.Get(0).(func(context.Context, repo.WriteSessionOptions) context.Context); ok {
r0 = rf(ctx, opt)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(context.Context)
}
}
var r1 repo.RepositoryWriter
if rf, ok := ret.Get(1).(func(context.Context, repo.WriteSessionOptions) repo.RepositoryWriter); ok {
r1 = rf(ctx, opt)
} else {
if ret.Get(1) != nil {
r1 = ret.Get(1).(repo.RepositoryWriter)
}
}
var r2 error
if rf, ok := ret.Get(2).(func(context.Context, repo.WriteSessionOptions) error); ok {
r2 = rf(ctx, opt)
} else {
r2 = ret.Error(2)
}
return r0, r1, r2
}
// OpenObject provides a mock function with given fields: ctx, id
func (_m *RepositoryWriter) OpenObject(ctx context.Context, id object.ID) (object.Reader, error) {
ret := _m.Called(ctx, id)
var r0 object.Reader
if rf, ok := ret.Get(0).(func(context.Context, object.ID) object.Reader); ok {
r0 = rf(ctx, id)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(object.Reader)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, object.ID) error); ok {
r1 = rf(ctx, id)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// PrefetchContents provides a mock function with given fields: ctx, contentIDs, hint
func (_m *RepositoryWriter) PrefetchContents(ctx context.Context, contentIDs []index.ID, hint string) []index.ID {
ret := _m.Called(ctx, contentIDs, hint)
var r0 []index.ID
if rf, ok := ret.Get(0).(func(context.Context, []index.ID, string) []index.ID); ok {
r0 = rf(ctx, contentIDs, hint)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]index.ID)
}
}
return r0
}
// PrefetchObjects provides a mock function with given fields: ctx, objectIDs, hint
func (_m *RepositoryWriter) PrefetchObjects(ctx context.Context, objectIDs []object.ID, hint string) ([]index.ID, error) {
ret := _m.Called(ctx, objectIDs, hint)
var r0 []index.ID
if rf, ok := ret.Get(0).(func(context.Context, []object.ID, string) []index.ID); ok {
r0 = rf(ctx, objectIDs, hint)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]index.ID)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, []object.ID, string) error); ok {
r1 = rf(ctx, objectIDs, hint)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// PutManifest provides a mock function with given fields: ctx, labels, payload
func (_m *RepositoryWriter) PutManifest(ctx context.Context, labels map[string]string, payload interface{}) (manifest.ID, error) {
ret := _m.Called(ctx, labels, payload)
var r0 manifest.ID
if rf, ok := ret.Get(0).(func(context.Context, map[string]string, interface{}) manifest.ID); ok {
r0 = rf(ctx, labels, payload)
} else {
r0 = ret.Get(0).(manifest.ID)
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, map[string]string, interface{}) error); ok {
r1 = rf(ctx, labels, payload)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// Refresh provides a mock function with given fields: ctx
func (_m *RepositoryWriter) Refresh(ctx context.Context) error {
ret := _m.Called(ctx)
var r0 error
if rf, ok := ret.Get(0).(func(context.Context) error); ok {
r0 = rf(ctx)
} else {
r0 = ret.Error(0)
}
return r0
}
// Time provides a mock function with given fields:
func (_m *RepositoryWriter) Time() time.Time {
ret := _m.Called()
var r0 time.Time
if rf, ok := ret.Get(0).(func() time.Time); ok {
r0 = rf()
} else {
r0 = ret.Get(0).(time.Time)
}
return r0
}
// UpdateDescription provides a mock function with given fields: d
func (_m *RepositoryWriter) UpdateDescription(d string) {
_m.Called(d)
}
// VerifyObject provides a mock function with given fields: ctx, id
func (_m *RepositoryWriter) VerifyObject(ctx context.Context, id object.ID) ([]index.ID, error) {
ret := _m.Called(ctx, id)
var r0 []index.ID
if rf, ok := ret.Get(0).(func(context.Context, object.ID) []index.ID); ok {
r0 = rf(ctx, id)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]index.ID)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, object.ID) error); ok {
r1 = rf(ctx, id)
} else {
r1 = ret.Error(1)
}
return r0, r1
}

View File

@ -18,6 +18,7 @@ package provider
import (
"context"
"time"
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
)
@ -30,27 +31,27 @@ type RepoParam struct {
// Provider defines the methods to manipulate a backup repository
type Provider interface {
//InitRepo is to initialize a repository from a new storage place
// InitRepo is to initialize a repository from a new storage place
InitRepo(ctx context.Context, param RepoParam) error
//ConnectToRepo is to establish the connection to a
//storage place that a repository is already initialized
// ConnectToRepo is to establish the connection to a
// storage place that a repository is already initialized
ConnectToRepo(ctx context.Context, param RepoParam) error
//PrepareRepo is a combination of InitRepo and ConnectToRepo,
//it may do initializing + connecting, connecting only if the repository
//is already initialized, or do nothing if the repository is already connected
// PrepareRepo is a combination of InitRepo and ConnectToRepo,
// it may do initializing + connecting, connecting only if the repository
// is already initialized, or do nothing if the repository is already connected
PrepareRepo(ctx context.Context, param RepoParam) error
//PruneRepo does a full prune/maintenance of the repository
// PruneRepo does a full prune/maintenance of the repository
PruneRepo(ctx context.Context, param RepoParam) error
//PruneRepoQuick does a quick prune/maintenance of the repository if available
PruneRepoQuick(ctx context.Context, param RepoParam) error
//EnsureUnlockRepo esures to remove any stale file locks in the storage
// EnsureUnlockRepo esures to remove any stale file locks in the storage
EnsureUnlockRepo(ctx context.Context, param RepoParam) error
//Forget is to delete a snapshot from the repository
// Forget is to delete a snapshot from the repository
Forget(ctx context.Context, snapshotID string, param RepoParam) error
// DefaultMaintenanceFrequency returns the default frequency to run maintenance
DefaultMaintenanceFrequency(ctx context.Context, param RepoParam) time.Duration
}

View File

@ -18,6 +18,7 @@ package provider
import (
"context"
"time"
"github.com/sirupsen/logrus"
@ -55,11 +56,6 @@ func (r *resticRepositoryProvider) PruneRepo(ctx context.Context, param RepoPara
return r.svc.PruneRepo(param.BackupLocation, param.BackupRepo)
}
func (r *resticRepositoryProvider) PruneRepoQuick(ctx context.Context, param RepoParam) error {
// restic doesn't support this operation
return nil
}
func (r *resticRepositoryProvider) EnsureUnlockRepo(ctx context.Context, param RepoParam) error {
return r.svc.UnlockRepo(param.BackupLocation, param.BackupRepo)
}
@ -67,3 +63,7 @@ func (r *resticRepositoryProvider) EnsureUnlockRepo(ctx context.Context, param R
func (r *resticRepositoryProvider) Forget(ctx context.Context, snapshotID string, param RepoParam) error {
return r.svc.Forget(param.BackupLocation, param.BackupRepo, snapshotID)
}
func (r *resticRepositoryProvider) DefaultMaintenanceFrequency(ctx context.Context, param RepoParam) time.Duration {
return r.svc.DefaultMaintenanceFrequency()
}

View File

@ -21,6 +21,7 @@ import (
"fmt"
"path"
"strings"
"time"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
@ -59,9 +60,8 @@ var funcTable = localFuncTable{
}
const (
repoOpDescFullMaintain = "full maintenance"
repoOpDescQuickMaintain = "quick maintenance"
repoOpDescForget = "forget"
repoOpDescMaintain = "repo maintenance"
repoOpDescForget = "forget"
repoConnectDesc = "unfied repo"
)
@ -70,7 +70,7 @@ const (
func NewUnifiedRepoProvider(
credentialGetter credentials.CredentialGetter,
log logrus.FieldLogger,
) (Provider, error) {
) Provider {
repo := unifiedRepoProvider{
credentialGetter: credentialGetter,
log: log,
@ -78,22 +78,21 @@ func NewUnifiedRepoProvider(
repo.repoService = createRepoService(log)
log.Debug("Finished create unified repo service")
return &repo, nil
return &repo
}
func (urp *unifiedRepoProvider) InitRepo(ctx context.Context, param RepoParam) error {
log := urp.log.WithFields(logrus.Fields{
"BSL name": param.BackupLocation.Name,
"BSL UID": param.BackupLocation.UID,
"BSL name": param.BackupLocation.Name,
"repo name": param.BackupRepo.Name,
"repo UID": param.BackupRepo.UID,
})
log.Debug("Start to init repo")
repoOption, err := udmrepo.NewRepoOptions(
udmrepo.WithPassword(urp, param),
udmrepo.WithConfigFile(urp.workPath, string(param.BackupLocation.UID)),
udmrepo.WithConfigFile(urp.workPath, string(param.BackupRepo.UID)),
udmrepo.WithGenOptions(
map[string]string{
udmrepo.GenOptionOwnerName: udmrepo.GetRepoUser(),
@ -120,15 +119,16 @@ func (urp *unifiedRepoProvider) InitRepo(ctx context.Context, param RepoParam) e
func (urp *unifiedRepoProvider) ConnectToRepo(ctx context.Context, param RepoParam) error {
log := urp.log.WithFields(logrus.Fields{
"BSL name": param.BackupLocation.Name,
"BSL UID": param.BackupLocation.UID,
"BSL name": param.BackupLocation.Name,
"repo name": param.BackupRepo.Name,
"repo UID": param.BackupRepo.UID,
})
log.Debug("Start to connect repo")
repoOption, err := udmrepo.NewRepoOptions(
udmrepo.WithPassword(urp, param),
udmrepo.WithConfigFile(urp.workPath, string(param.BackupLocation.UID)),
udmrepo.WithConfigFile(urp.workPath, string(param.BackupRepo.UID)),
udmrepo.WithGenOptions(
map[string]string{
udmrepo.GenOptionOwnerName: udmrepo.GetRepoUser(),
@ -155,15 +155,16 @@ func (urp *unifiedRepoProvider) ConnectToRepo(ctx context.Context, param RepoPar
func (urp *unifiedRepoProvider) PrepareRepo(ctx context.Context, param RepoParam) error {
log := urp.log.WithFields(logrus.Fields{
"BSL name": param.BackupLocation.Name,
"BSL UID": param.BackupLocation.UID,
"BSL name": param.BackupLocation.Name,
"repo name": param.BackupRepo.Name,
"repo UID": param.BackupRepo.UID,
})
log.Debug("Start to prepare repo")
repoOption, err := udmrepo.NewRepoOptions(
udmrepo.WithPassword(urp, param),
udmrepo.WithConfigFile(urp.workPath, string(param.BackupLocation.UID)),
udmrepo.WithConfigFile(urp.workPath, string(param.BackupRepo.UID)),
udmrepo.WithGenOptions(
map[string]string{
udmrepo.GenOptionOwnerName: udmrepo.GetRepoUser(),
@ -196,21 +197,17 @@ func (urp *unifiedRepoProvider) PrepareRepo(ctx context.Context, param RepoParam
func (urp *unifiedRepoProvider) PruneRepo(ctx context.Context, param RepoParam) error {
log := urp.log.WithFields(logrus.Fields{
"BSL name": param.BackupLocation.Name,
"BSL UID": param.BackupLocation.UID,
"BSL name": param.BackupLocation.Name,
"repo name": param.BackupRepo.Name,
"repo UID": param.BackupRepo.UID,
})
log.Debug("Start to prune repo")
repoOption, err := udmrepo.NewRepoOptions(
udmrepo.WithPassword(urp, param),
udmrepo.WithConfigFile(urp.workPath, string(param.BackupLocation.UID)),
udmrepo.WithGenOptions(
map[string]string{
udmrepo.GenOptionMaintainMode: udmrepo.GenOptionMaintainFull,
},
),
udmrepo.WithDescription(repoOpDescFullMaintain),
udmrepo.WithConfigFile(urp.workPath, string(param.BackupRepo.UID)),
udmrepo.WithDescription(repoOpDescMaintain),
)
if err != nil {
@ -227,39 +224,6 @@ func (urp *unifiedRepoProvider) PruneRepo(ctx context.Context, param RepoParam)
return nil
}
func (urp *unifiedRepoProvider) PruneRepoQuick(ctx context.Context, param RepoParam) error {
log := urp.log.WithFields(logrus.Fields{
"BSL name": param.BackupLocation.Name,
"BSL UID": param.BackupLocation.UID,
})
log.Debug("Start to prune repo quick")
repoOption, err := udmrepo.NewRepoOptions(
udmrepo.WithPassword(urp, param),
udmrepo.WithConfigFile(urp.workPath, string(param.BackupLocation.UID)),
udmrepo.WithGenOptions(
map[string]string{
udmrepo.GenOptionMaintainMode: udmrepo.GenOptionMaintainQuick,
},
),
udmrepo.WithDescription(repoOpDescQuickMaintain),
)
if err != nil {
return errors.Wrap(err, "error to get repo options")
}
err = urp.repoService.Maintain(ctx, *repoOption)
if err != nil {
return errors.Wrap(err, "error to prune backup repo quick")
}
log.Debug("Prune repo quick complete")
return nil
}
func (urp *unifiedRepoProvider) EnsureUnlockRepo(ctx context.Context, param RepoParam) error {
return nil
}
@ -267,7 +231,8 @@ func (urp *unifiedRepoProvider) EnsureUnlockRepo(ctx context.Context, param Repo
func (urp *unifiedRepoProvider) Forget(ctx context.Context, snapshotID string, param RepoParam) error {
log := urp.log.WithFields(logrus.Fields{
"BSL name": param.BackupLocation.Name,
"BSL UID": param.BackupLocation.UID,
"repo name": param.BackupRepo.Name,
"repo UID": param.BackupRepo.UID,
"snapshotID": snapshotID,
})
@ -275,7 +240,7 @@ func (urp *unifiedRepoProvider) Forget(ctx context.Context, snapshotID string, p
repoOption, err := udmrepo.NewRepoOptions(
udmrepo.WithPassword(urp, param),
udmrepo.WithConfigFile(urp.workPath, string(param.BackupLocation.UID)),
udmrepo.WithConfigFile(urp.workPath, string(param.BackupRepo.UID)),
udmrepo.WithDescription(repoOpDescForget),
)
@ -305,6 +270,10 @@ func (urp *unifiedRepoProvider) Forget(ctx context.Context, snapshotID string, p
return nil
}
func (urp *unifiedRepoProvider) DefaultMaintenanceFrequency(ctx context.Context, param RepoParam) time.Duration {
return urp.repoService.DefaultMaintenanceFrequency()
}
func (urp *unifiedRepoProvider) GetPassword(param interface{}) (string, error) {
repoParam, ok := param.(RepoParam)
if !ok {

View File

@ -775,6 +775,7 @@ func TestForget(t *testing.T) {
err := urp.Forget(context.Background(), "", RepoParam{
BackupLocation: &velerov1api.BackupStorageLocation{},
BackupRepo: &velerov1api.BackupRepository{},
})
if tc.expectedErr == "" {

View File

@ -18,6 +18,7 @@ package restic
import (
"os"
"time"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
@ -71,6 +72,10 @@ func (r *RepositoryService) Forget(bsl *velerov1api.BackupStorageLocation, repo
return r.exec(restic.ForgetCommand(repo.Spec.ResticIdentifier, snapshotID), bsl)
}
func (r *RepositoryService) DefaultMaintenanceFrequency() time.Duration {
return restic.DefaultMaintenanceFrequency
}
func (r *RepositoryService) exec(cmd *restic.Command, bsl *velerov1api.BackupStorageLocation) error {
file, err := r.credentialsFileStore.Path(repokey.RepoKeySelector())
if err != nil {

View File

@ -0,0 +1,542 @@
// Code generated by mockery v2.14.0. DO NOT EDIT.
package mocks
import (
blob "github.com/kopia/kopia/repo/blob"
content "github.com/kopia/kopia/repo/content"
context "context"
index "github.com/kopia/kopia/repo/content/index"
manifest "github.com/kopia/kopia/repo/manifest"
mock "github.com/stretchr/testify/mock"
object "github.com/kopia/kopia/repo/object"
repo "github.com/kopia/kopia/repo"
throttling "github.com/kopia/kopia/repo/blob/throttling"
time "time"
)
// DirectRepository is an autogenerated mock type for the DirectRepository type
type DirectRepository struct {
mock.Mock
}
// AlsoLogToContentLog provides a mock function with given fields: ctx
func (_m *DirectRepository) AlsoLogToContentLog(ctx context.Context) context.Context {
ret := _m.Called(ctx)
var r0 context.Context
if rf, ok := ret.Get(0).(func(context.Context) context.Context); ok {
r0 = rf(ctx)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(context.Context)
}
}
return r0
}
// BlobCfg provides a mock function with given fields:
func (_m *DirectRepository) BlobCfg() content.BlobCfgBlob {
ret := _m.Called()
var r0 content.BlobCfgBlob
if rf, ok := ret.Get(0).(func() content.BlobCfgBlob); ok {
r0 = rf()
} else {
r0 = ret.Get(0).(content.BlobCfgBlob)
}
return r0
}
// BlobReader provides a mock function with given fields:
func (_m *DirectRepository) BlobReader() blob.Reader {
ret := _m.Called()
var r0 blob.Reader
if rf, ok := ret.Get(0).(func() blob.Reader); ok {
r0 = rf()
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(blob.Reader)
}
}
return r0
}
// BlobVolume provides a mock function with given fields:
func (_m *DirectRepository) BlobVolume() blob.Volume {
ret := _m.Called()
var r0 blob.Volume
if rf, ok := ret.Get(0).(func() blob.Volume); ok {
r0 = rf()
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(blob.Volume)
}
}
return r0
}
// ClientOptions provides a mock function with given fields:
func (_m *DirectRepository) ClientOptions() repo.ClientOptions {
ret := _m.Called()
var r0 repo.ClientOptions
if rf, ok := ret.Get(0).(func() repo.ClientOptions); ok {
r0 = rf()
} else {
r0 = ret.Get(0).(repo.ClientOptions)
}
return r0
}
// Close provides a mock function with given fields: ctx
func (_m *DirectRepository) Close(ctx context.Context) error {
ret := _m.Called(ctx)
var r0 error
if rf, ok := ret.Get(0).(func(context.Context) error); ok {
r0 = rf(ctx)
} else {
r0 = ret.Error(0)
}
return r0
}
// ConfigFilename provides a mock function with given fields:
func (_m *DirectRepository) ConfigFilename() string {
ret := _m.Called()
var r0 string
if rf, ok := ret.Get(0).(func() string); ok {
r0 = rf()
} else {
r0 = ret.Get(0).(string)
}
return r0
}
// ContentInfo provides a mock function with given fields: ctx, contentID
func (_m *DirectRepository) ContentInfo(ctx context.Context, contentID index.ID) (index.Info, error) {
ret := _m.Called(ctx, contentID)
var r0 index.Info
if rf, ok := ret.Get(0).(func(context.Context, index.ID) index.Info); ok {
r0 = rf(ctx, contentID)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(index.Info)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, index.ID) error); ok {
r1 = rf(ctx, contentID)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// ContentReader provides a mock function with given fields:
func (_m *DirectRepository) ContentReader() content.Reader {
ret := _m.Called()
var r0 content.Reader
if rf, ok := ret.Get(0).(func() content.Reader); ok {
r0 = rf()
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(content.Reader)
}
}
return r0
}
// Crypter provides a mock function with given fields:
func (_m *DirectRepository) Crypter() *content.Crypter {
ret := _m.Called()
var r0 *content.Crypter
if rf, ok := ret.Get(0).(func() *content.Crypter); ok {
r0 = rf()
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*content.Crypter)
}
}
return r0
}
// DeriveKey provides a mock function with given fields: purpose, keyLength
func (_m *DirectRepository) DeriveKey(purpose []byte, keyLength int) []byte {
ret := _m.Called(purpose, keyLength)
var r0 []byte
if rf, ok := ret.Get(0).(func([]byte, int) []byte); ok {
r0 = rf(purpose, keyLength)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]byte)
}
}
return r0
}
// DisableIndexRefresh provides a mock function with given fields:
func (_m *DirectRepository) DisableIndexRefresh() {
_m.Called()
}
// FindManifests provides a mock function with given fields: ctx, labels
func (_m *DirectRepository) FindManifests(ctx context.Context, labels map[string]string) ([]*manifest.EntryMetadata, error) {
ret := _m.Called(ctx, labels)
var r0 []*manifest.EntryMetadata
if rf, ok := ret.Get(0).(func(context.Context, map[string]string) []*manifest.EntryMetadata); ok {
r0 = rf(ctx, labels)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]*manifest.EntryMetadata)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, map[string]string) error); ok {
r1 = rf(ctx, labels)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// GetManifest provides a mock function with given fields: ctx, id, data
func (_m *DirectRepository) GetManifest(ctx context.Context, id manifest.ID, data interface{}) (*manifest.EntryMetadata, error) {
ret := _m.Called(ctx, id, data)
var r0 *manifest.EntryMetadata
if rf, ok := ret.Get(0).(func(context.Context, manifest.ID, interface{}) *manifest.EntryMetadata); ok {
r0 = rf(ctx, id, data)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*manifest.EntryMetadata)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, manifest.ID, interface{}) error); ok {
r1 = rf(ctx, id, data)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// IndexBlobs provides a mock function with given fields: ctx, includeInactive
func (_m *DirectRepository) IndexBlobs(ctx context.Context, includeInactive bool) ([]content.IndexBlobInfo, error) {
ret := _m.Called(ctx, includeInactive)
var r0 []content.IndexBlobInfo
if rf, ok := ret.Get(0).(func(context.Context, bool) []content.IndexBlobInfo); ok {
r0 = rf(ctx, includeInactive)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]content.IndexBlobInfo)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, bool) error); ok {
r1 = rf(ctx, includeInactive)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// NewDirectWriter provides a mock function with given fields: ctx, opt
func (_m *DirectRepository) NewDirectWriter(ctx context.Context, opt repo.WriteSessionOptions) (context.Context, repo.DirectRepositoryWriter, error) {
ret := _m.Called(ctx, opt)
var r0 context.Context
if rf, ok := ret.Get(0).(func(context.Context, repo.WriteSessionOptions) context.Context); ok {
r0 = rf(ctx, opt)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(context.Context)
}
}
var r1 repo.DirectRepositoryWriter
if rf, ok := ret.Get(1).(func(context.Context, repo.WriteSessionOptions) repo.DirectRepositoryWriter); ok {
r1 = rf(ctx, opt)
} else {
if ret.Get(1) != nil {
r1 = ret.Get(1).(repo.DirectRepositoryWriter)
}
}
var r2 error
if rf, ok := ret.Get(2).(func(context.Context, repo.WriteSessionOptions) error); ok {
r2 = rf(ctx, opt)
} else {
r2 = ret.Error(2)
}
return r0, r1, r2
}
// NewWriter provides a mock function with given fields: ctx, opt
func (_m *DirectRepository) NewWriter(ctx context.Context, opt repo.WriteSessionOptions) (context.Context, repo.RepositoryWriter, error) {
ret := _m.Called(ctx, opt)
var r0 context.Context
if rf, ok := ret.Get(0).(func(context.Context, repo.WriteSessionOptions) context.Context); ok {
r0 = rf(ctx, opt)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(context.Context)
}
}
var r1 repo.RepositoryWriter
if rf, ok := ret.Get(1).(func(context.Context, repo.WriteSessionOptions) repo.RepositoryWriter); ok {
r1 = rf(ctx, opt)
} else {
if ret.Get(1) != nil {
r1 = ret.Get(1).(repo.RepositoryWriter)
}
}
var r2 error
if rf, ok := ret.Get(2).(func(context.Context, repo.WriteSessionOptions) error); ok {
r2 = rf(ctx, opt)
} else {
r2 = ret.Error(2)
}
return r0, r1, r2
}
// ObjectFormat provides a mock function with given fields:
func (_m *DirectRepository) ObjectFormat() object.Format {
ret := _m.Called()
var r0 object.Format
if rf, ok := ret.Get(0).(func() object.Format); ok {
r0 = rf()
} else {
r0 = ret.Get(0).(object.Format)
}
return r0
}
// OpenObject provides a mock function with given fields: ctx, id
func (_m *DirectRepository) OpenObject(ctx context.Context, id object.ID) (object.Reader, error) {
ret := _m.Called(ctx, id)
var r0 object.Reader
if rf, ok := ret.Get(0).(func(context.Context, object.ID) object.Reader); ok {
r0 = rf(ctx, id)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(object.Reader)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, object.ID) error); ok {
r1 = rf(ctx, id)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// PrefetchContents provides a mock function with given fields: ctx, contentIDs, hint
func (_m *DirectRepository) PrefetchContents(ctx context.Context, contentIDs []index.ID, hint string) []index.ID {
ret := _m.Called(ctx, contentIDs, hint)
var r0 []index.ID
if rf, ok := ret.Get(0).(func(context.Context, []index.ID, string) []index.ID); ok {
r0 = rf(ctx, contentIDs, hint)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]index.ID)
}
}
return r0
}
// PrefetchObjects provides a mock function with given fields: ctx, objectIDs, hint
func (_m *DirectRepository) PrefetchObjects(ctx context.Context, objectIDs []object.ID, hint string) ([]index.ID, error) {
ret := _m.Called(ctx, objectIDs, hint)
var r0 []index.ID
if rf, ok := ret.Get(0).(func(context.Context, []object.ID, string) []index.ID); ok {
r0 = rf(ctx, objectIDs, hint)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]index.ID)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, []object.ID, string) error); ok {
r1 = rf(ctx, objectIDs, hint)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// Refresh provides a mock function with given fields: ctx
func (_m *DirectRepository) Refresh(ctx context.Context) error {
ret := _m.Called(ctx)
var r0 error
if rf, ok := ret.Get(0).(func(context.Context) error); ok {
r0 = rf(ctx)
} else {
r0 = ret.Error(0)
}
return r0
}
// Throttler provides a mock function with given fields:
func (_m *DirectRepository) Throttler() throttling.SettableThrottler {
ret := _m.Called()
var r0 throttling.SettableThrottler
if rf, ok := ret.Get(0).(func() throttling.SettableThrottler); ok {
r0 = rf()
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(throttling.SettableThrottler)
}
}
return r0
}
// Time provides a mock function with given fields:
func (_m *DirectRepository) Time() time.Time {
ret := _m.Called()
var r0 time.Time
if rf, ok := ret.Get(0).(func() time.Time); ok {
r0 = rf()
} else {
r0 = ret.Get(0).(time.Time)
}
return r0
}
// Token provides a mock function with given fields: password
func (_m *DirectRepository) Token(password string) (string, error) {
ret := _m.Called(password)
var r0 string
if rf, ok := ret.Get(0).(func(string) string); ok {
r0 = rf(password)
} else {
r0 = ret.Get(0).(string)
}
var r1 error
if rf, ok := ret.Get(1).(func(string) error); ok {
r1 = rf(password)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// UniqueID provides a mock function with given fields:
func (_m *DirectRepository) UniqueID() []byte {
ret := _m.Called()
var r0 []byte
if rf, ok := ret.Get(0).(func() []byte); ok {
r0 = rf()
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]byte)
}
}
return r0
}
// UpdateDescription provides a mock function with given fields: d
func (_m *DirectRepository) UpdateDescription(d string) {
_m.Called(d)
}
// VerifyObject provides a mock function with given fields: ctx, id
func (_m *DirectRepository) VerifyObject(ctx context.Context, id object.ID) ([]index.ID, error) {
ret := _m.Called(ctx, id)
var r0 []index.ID
if rf, ok := ret.Get(0).(func(context.Context, object.ID) []index.ID); ok {
r0 = rf(ctx, id)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]index.ID)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, object.ID) error); ok {
r1 = rf(ctx, id)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
type mockConstructorTestingTNewDirectRepository interface {
mock.TestingT
Cleanup(func())
}
// NewDirectRepository creates a new instance of DirectRepository. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
func NewDirectRepository(t mockConstructorTestingTNewDirectRepository) *DirectRepository {
mock := &DirectRepository{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}

View File

@ -0,0 +1,718 @@
// Code generated by mockery v2.14.0. DO NOT EDIT.
package mocks
import (
blob "github.com/kopia/kopia/repo/blob"
content "github.com/kopia/kopia/repo/content"
context "context"
index "github.com/kopia/kopia/repo/content/index"
manifest "github.com/kopia/kopia/repo/manifest"
mock "github.com/stretchr/testify/mock"
object "github.com/kopia/kopia/repo/object"
repo "github.com/kopia/kopia/repo"
throttling "github.com/kopia/kopia/repo/blob/throttling"
time "time"
)
// DirectRepositoryWriter is an autogenerated mock type for the DirectRepositoryWriter type
type DirectRepositoryWriter struct {
mock.Mock
}
// AlsoLogToContentLog provides a mock function with given fields: ctx
func (_m *DirectRepositoryWriter) AlsoLogToContentLog(ctx context.Context) context.Context {
ret := _m.Called(ctx)
var r0 context.Context
if rf, ok := ret.Get(0).(func(context.Context) context.Context); ok {
r0 = rf(ctx)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(context.Context)
}
}
return r0
}
// BlobCfg provides a mock function with given fields:
func (_m *DirectRepositoryWriter) BlobCfg() content.BlobCfgBlob {
ret := _m.Called()
var r0 content.BlobCfgBlob
if rf, ok := ret.Get(0).(func() content.BlobCfgBlob); ok {
r0 = rf()
} else {
r0 = ret.Get(0).(content.BlobCfgBlob)
}
return r0
}
// BlobReader provides a mock function with given fields:
func (_m *DirectRepositoryWriter) BlobReader() blob.Reader {
ret := _m.Called()
var r0 blob.Reader
if rf, ok := ret.Get(0).(func() blob.Reader); ok {
r0 = rf()
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(blob.Reader)
}
}
return r0
}
// BlobStorage provides a mock function with given fields:
func (_m *DirectRepositoryWriter) BlobStorage() blob.Storage {
ret := _m.Called()
var r0 blob.Storage
if rf, ok := ret.Get(0).(func() blob.Storage); ok {
r0 = rf()
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(blob.Storage)
}
}
return r0
}
// BlobVolume provides a mock function with given fields:
func (_m *DirectRepositoryWriter) BlobVolume() blob.Volume {
ret := _m.Called()
var r0 blob.Volume
if rf, ok := ret.Get(0).(func() blob.Volume); ok {
r0 = rf()
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(blob.Volume)
}
}
return r0
}
// ChangePassword provides a mock function with given fields: ctx, newPassword
func (_m *DirectRepositoryWriter) ChangePassword(ctx context.Context, newPassword string) error {
ret := _m.Called(ctx, newPassword)
var r0 error
if rf, ok := ret.Get(0).(func(context.Context, string) error); ok {
r0 = rf(ctx, newPassword)
} else {
r0 = ret.Error(0)
}
return r0
}
// ClientOptions provides a mock function with given fields:
func (_m *DirectRepositoryWriter) ClientOptions() repo.ClientOptions {
ret := _m.Called()
var r0 repo.ClientOptions
if rf, ok := ret.Get(0).(func() repo.ClientOptions); ok {
r0 = rf()
} else {
r0 = ret.Get(0).(repo.ClientOptions)
}
return r0
}
// Close provides a mock function with given fields: ctx
func (_m *DirectRepositoryWriter) Close(ctx context.Context) error {
ret := _m.Called(ctx)
var r0 error
if rf, ok := ret.Get(0).(func(context.Context) error); ok {
r0 = rf(ctx)
} else {
r0 = ret.Error(0)
}
return r0
}
// CommitUpgrade provides a mock function with given fields: ctx
func (_m *DirectRepositoryWriter) CommitUpgrade(ctx context.Context) error {
ret := _m.Called(ctx)
var r0 error
if rf, ok := ret.Get(0).(func(context.Context) error); ok {
r0 = rf(ctx)
} else {
r0 = ret.Error(0)
}
return r0
}
// ConfigFilename provides a mock function with given fields:
func (_m *DirectRepositoryWriter) ConfigFilename() string {
ret := _m.Called()
var r0 string
if rf, ok := ret.Get(0).(func() string); ok {
r0 = rf()
} else {
r0 = ret.Get(0).(string)
}
return r0
}
// ContentInfo provides a mock function with given fields: ctx, contentID
func (_m *DirectRepositoryWriter) ContentInfo(ctx context.Context, contentID index.ID) (index.Info, error) {
ret := _m.Called(ctx, contentID)
var r0 index.Info
if rf, ok := ret.Get(0).(func(context.Context, index.ID) index.Info); ok {
r0 = rf(ctx, contentID)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(index.Info)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, index.ID) error); ok {
r1 = rf(ctx, contentID)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// ContentManager provides a mock function with given fields:
func (_m *DirectRepositoryWriter) ContentManager() *content.WriteManager {
ret := _m.Called()
var r0 *content.WriteManager
if rf, ok := ret.Get(0).(func() *content.WriteManager); ok {
r0 = rf()
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*content.WriteManager)
}
}
return r0
}
// ContentReader provides a mock function with given fields:
func (_m *DirectRepositoryWriter) ContentReader() content.Reader {
ret := _m.Called()
var r0 content.Reader
if rf, ok := ret.Get(0).(func() content.Reader); ok {
r0 = rf()
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(content.Reader)
}
}
return r0
}
// Crypter provides a mock function with given fields:
func (_m *DirectRepositoryWriter) Crypter() *content.Crypter {
ret := _m.Called()
var r0 *content.Crypter
if rf, ok := ret.Get(0).(func() *content.Crypter); ok {
r0 = rf()
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*content.Crypter)
}
}
return r0
}
// DeleteManifest provides a mock function with given fields: ctx, id
func (_m *DirectRepositoryWriter) DeleteManifest(ctx context.Context, id manifest.ID) error {
ret := _m.Called(ctx, id)
var r0 error
if rf, ok := ret.Get(0).(func(context.Context, manifest.ID) error); ok {
r0 = rf(ctx, id)
} else {
r0 = ret.Error(0)
}
return r0
}
// DeriveKey provides a mock function with given fields: purpose, keyLength
func (_m *DirectRepositoryWriter) DeriveKey(purpose []byte, keyLength int) []byte {
ret := _m.Called(purpose, keyLength)
var r0 []byte
if rf, ok := ret.Get(0).(func([]byte, int) []byte); ok {
r0 = rf(purpose, keyLength)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]byte)
}
}
return r0
}
// DisableIndexRefresh provides a mock function with given fields:
func (_m *DirectRepositoryWriter) DisableIndexRefresh() {
_m.Called()
}
// FindManifests provides a mock function with given fields: ctx, labels
func (_m *DirectRepositoryWriter) FindManifests(ctx context.Context, labels map[string]string) ([]*manifest.EntryMetadata, error) {
ret := _m.Called(ctx, labels)
var r0 []*manifest.EntryMetadata
if rf, ok := ret.Get(0).(func(context.Context, map[string]string) []*manifest.EntryMetadata); ok {
r0 = rf(ctx, labels)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]*manifest.EntryMetadata)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, map[string]string) error); ok {
r1 = rf(ctx, labels)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// Flush provides a mock function with given fields: ctx
func (_m *DirectRepositoryWriter) Flush(ctx context.Context) error {
ret := _m.Called(ctx)
var r0 error
if rf, ok := ret.Get(0).(func(context.Context) error); ok {
r0 = rf(ctx)
} else {
r0 = ret.Error(0)
}
return r0
}
// GetManifest provides a mock function with given fields: ctx, id, data
func (_m *DirectRepositoryWriter) GetManifest(ctx context.Context, id manifest.ID, data interface{}) (*manifest.EntryMetadata, error) {
ret := _m.Called(ctx, id, data)
var r0 *manifest.EntryMetadata
if rf, ok := ret.Get(0).(func(context.Context, manifest.ID, interface{}) *manifest.EntryMetadata); ok {
r0 = rf(ctx, id, data)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*manifest.EntryMetadata)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, manifest.ID, interface{}) error); ok {
r1 = rf(ctx, id, data)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// IndexBlobs provides a mock function with given fields: ctx, includeInactive
func (_m *DirectRepositoryWriter) IndexBlobs(ctx context.Context, includeInactive bool) ([]content.IndexBlobInfo, error) {
ret := _m.Called(ctx, includeInactive)
var r0 []content.IndexBlobInfo
if rf, ok := ret.Get(0).(func(context.Context, bool) []content.IndexBlobInfo); ok {
r0 = rf(ctx, includeInactive)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]content.IndexBlobInfo)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, bool) error); ok {
r1 = rf(ctx, includeInactive)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// NewDirectWriter provides a mock function with given fields: ctx, opt
func (_m *DirectRepositoryWriter) NewDirectWriter(ctx context.Context, opt repo.WriteSessionOptions) (context.Context, repo.DirectRepositoryWriter, error) {
ret := _m.Called(ctx, opt)
var r0 context.Context
if rf, ok := ret.Get(0).(func(context.Context, repo.WriteSessionOptions) context.Context); ok {
r0 = rf(ctx, opt)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(context.Context)
}
}
var r1 repo.DirectRepositoryWriter
if rf, ok := ret.Get(1).(func(context.Context, repo.WriteSessionOptions) repo.DirectRepositoryWriter); ok {
r1 = rf(ctx, opt)
} else {
if ret.Get(1) != nil {
r1 = ret.Get(1).(repo.DirectRepositoryWriter)
}
}
var r2 error
if rf, ok := ret.Get(2).(func(context.Context, repo.WriteSessionOptions) error); ok {
r2 = rf(ctx, opt)
} else {
r2 = ret.Error(2)
}
return r0, r1, r2
}
// NewObjectWriter provides a mock function with given fields: ctx, opt
func (_m *DirectRepositoryWriter) NewObjectWriter(ctx context.Context, opt object.WriterOptions) object.Writer {
ret := _m.Called(ctx, opt)
var r0 object.Writer
if rf, ok := ret.Get(0).(func(context.Context, object.WriterOptions) object.Writer); ok {
r0 = rf(ctx, opt)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(object.Writer)
}
}
return r0
}
// NewWriter provides a mock function with given fields: ctx, opt
func (_m *DirectRepositoryWriter) NewWriter(ctx context.Context, opt repo.WriteSessionOptions) (context.Context, repo.RepositoryWriter, error) {
ret := _m.Called(ctx, opt)
var r0 context.Context
if rf, ok := ret.Get(0).(func(context.Context, repo.WriteSessionOptions) context.Context); ok {
r0 = rf(ctx, opt)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(context.Context)
}
}
var r1 repo.RepositoryWriter
if rf, ok := ret.Get(1).(func(context.Context, repo.WriteSessionOptions) repo.RepositoryWriter); ok {
r1 = rf(ctx, opt)
} else {
if ret.Get(1) != nil {
r1 = ret.Get(1).(repo.RepositoryWriter)
}
}
var r2 error
if rf, ok := ret.Get(2).(func(context.Context, repo.WriteSessionOptions) error); ok {
r2 = rf(ctx, opt)
} else {
r2 = ret.Error(2)
}
return r0, r1, r2
}
// ObjectFormat provides a mock function with given fields:
func (_m *DirectRepositoryWriter) ObjectFormat() object.Format {
ret := _m.Called()
var r0 object.Format
if rf, ok := ret.Get(0).(func() object.Format); ok {
r0 = rf()
} else {
r0 = ret.Get(0).(object.Format)
}
return r0
}
// OpenObject provides a mock function with given fields: ctx, id
func (_m *DirectRepositoryWriter) OpenObject(ctx context.Context, id object.ID) (object.Reader, error) {
ret := _m.Called(ctx, id)
var r0 object.Reader
if rf, ok := ret.Get(0).(func(context.Context, object.ID) object.Reader); ok {
r0 = rf(ctx, id)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(object.Reader)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, object.ID) error); ok {
r1 = rf(ctx, id)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// PrefetchContents provides a mock function with given fields: ctx, contentIDs, hint
func (_m *DirectRepositoryWriter) PrefetchContents(ctx context.Context, contentIDs []index.ID, hint string) []index.ID {
ret := _m.Called(ctx, contentIDs, hint)
var r0 []index.ID
if rf, ok := ret.Get(0).(func(context.Context, []index.ID, string) []index.ID); ok {
r0 = rf(ctx, contentIDs, hint)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]index.ID)
}
}
return r0
}
// PrefetchObjects provides a mock function with given fields: ctx, objectIDs, hint
func (_m *DirectRepositoryWriter) PrefetchObjects(ctx context.Context, objectIDs []object.ID, hint string) ([]index.ID, error) {
ret := _m.Called(ctx, objectIDs, hint)
var r0 []index.ID
if rf, ok := ret.Get(0).(func(context.Context, []object.ID, string) []index.ID); ok {
r0 = rf(ctx, objectIDs, hint)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]index.ID)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, []object.ID, string) error); ok {
r1 = rf(ctx, objectIDs, hint)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// PutManifest provides a mock function with given fields: ctx, labels, payload
func (_m *DirectRepositoryWriter) PutManifest(ctx context.Context, labels map[string]string, payload interface{}) (manifest.ID, error) {
ret := _m.Called(ctx, labels, payload)
var r0 manifest.ID
if rf, ok := ret.Get(0).(func(context.Context, map[string]string, interface{}) manifest.ID); ok {
r0 = rf(ctx, labels, payload)
} else {
r0 = ret.Get(0).(manifest.ID)
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, map[string]string, interface{}) error); ok {
r1 = rf(ctx, labels, payload)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// Refresh provides a mock function with given fields: ctx
func (_m *DirectRepositoryWriter) Refresh(ctx context.Context) error {
ret := _m.Called(ctx)
var r0 error
if rf, ok := ret.Get(0).(func(context.Context) error); ok {
r0 = rf(ctx)
} else {
r0 = ret.Error(0)
}
return r0
}
// RollbackUpgrade provides a mock function with given fields: ctx
func (_m *DirectRepositoryWriter) RollbackUpgrade(ctx context.Context) error {
ret := _m.Called(ctx)
var r0 error
if rf, ok := ret.Get(0).(func(context.Context) error); ok {
r0 = rf(ctx)
} else {
r0 = ret.Error(0)
}
return r0
}
// SetParameters provides a mock function with given fields: ctx, m, blobcfg
func (_m *DirectRepositoryWriter) SetParameters(ctx context.Context, m content.MutableParameters, blobcfg content.BlobCfgBlob) error {
ret := _m.Called(ctx, m, blobcfg)
var r0 error
if rf, ok := ret.Get(0).(func(context.Context, content.MutableParameters, content.BlobCfgBlob) error); ok {
r0 = rf(ctx, m, blobcfg)
} else {
r0 = ret.Error(0)
}
return r0
}
// SetUpgradeLockIntent provides a mock function with given fields: ctx, l
func (_m *DirectRepositoryWriter) SetUpgradeLockIntent(ctx context.Context, l content.UpgradeLock) (*content.UpgradeLock, error) {
ret := _m.Called(ctx, l)
var r0 *content.UpgradeLock
if rf, ok := ret.Get(0).(func(context.Context, content.UpgradeLock) *content.UpgradeLock); ok {
r0 = rf(ctx, l)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*content.UpgradeLock)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, content.UpgradeLock) error); ok {
r1 = rf(ctx, l)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// Throttler provides a mock function with given fields:
func (_m *DirectRepositoryWriter) Throttler() throttling.SettableThrottler {
ret := _m.Called()
var r0 throttling.SettableThrottler
if rf, ok := ret.Get(0).(func() throttling.SettableThrottler); ok {
r0 = rf()
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(throttling.SettableThrottler)
}
}
return r0
}
// Time provides a mock function with given fields:
func (_m *DirectRepositoryWriter) Time() time.Time {
ret := _m.Called()
var r0 time.Time
if rf, ok := ret.Get(0).(func() time.Time); ok {
r0 = rf()
} else {
r0 = ret.Get(0).(time.Time)
}
return r0
}
// Token provides a mock function with given fields: password
func (_m *DirectRepositoryWriter) Token(password string) (string, error) {
ret := _m.Called(password)
var r0 string
if rf, ok := ret.Get(0).(func(string) string); ok {
r0 = rf(password)
} else {
r0 = ret.Get(0).(string)
}
var r1 error
if rf, ok := ret.Get(1).(func(string) error); ok {
r1 = rf(password)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// UniqueID provides a mock function with given fields:
func (_m *DirectRepositoryWriter) UniqueID() []byte {
ret := _m.Called()
var r0 []byte
if rf, ok := ret.Get(0).(func() []byte); ok {
r0 = rf()
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]byte)
}
}
return r0
}
// UpdateDescription provides a mock function with given fields: d
func (_m *DirectRepositoryWriter) UpdateDescription(d string) {
_m.Called(d)
}
// VerifyObject provides a mock function with given fields: ctx, id
func (_m *DirectRepositoryWriter) VerifyObject(ctx context.Context, id object.ID) ([]index.ID, error) {
ret := _m.Called(ctx, id)
var r0 []index.ID
if rf, ok := ret.Get(0).(func(context.Context, object.ID) []index.ID); ok {
r0 = rf(ctx, id)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]index.ID)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, object.ID) error); ok {
r1 = rf(ctx, id)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
type mockConstructorTestingTNewDirectRepositoryWriter interface {
mock.TestingT
Cleanup(func())
}
// NewDirectRepositoryWriter creates a new instance of DirectRepositoryWriter. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
func NewDirectRepositoryWriter(t mockConstructorTestingTNewDirectRepositoryWriter) *DirectRepositoryWriter {
mock := &DirectRepositoryWriter{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}

View File

@ -0,0 +1,587 @@
/*
Copyright the Velero contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kopialib
import (
"context"
"os"
"strings"
"sync/atomic"
"time"
"github.com/kopia/kopia/repo"
"github.com/kopia/kopia/repo/compression"
"github.com/kopia/kopia/repo/content/index"
"github.com/kopia/kopia/repo/maintenance"
"github.com/kopia/kopia/repo/manifest"
"github.com/kopia/kopia/repo/object"
"github.com/kopia/kopia/snapshot/snapshotmaintenance"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/vmware-tanzu/velero/pkg/repository/udmrepo"
"github.com/vmware-tanzu/velero/pkg/util/logging"
)
type kopiaRepoService struct {
logger logrus.FieldLogger
}
type kopiaRepository struct {
rawRepo repo.Repository
rawWriter repo.RepositoryWriter
description string
uploaded int64
openTime time.Time
throttle logThrottle
logger logrus.FieldLogger
}
type kopiaMaintenance struct {
mode maintenance.Mode
startTime time.Time
uploaded int64
throttle logThrottle
logger logrus.FieldLogger
}
type logThrottle struct {
lastTime int64
interval time.Duration
}
type kopiaObjectReader struct {
rawReader object.Reader
}
type kopiaObjectWriter struct {
rawWriter object.Writer
}
const (
defaultLogInterval = time.Duration(time.Second * 10)
defaultMaintainCheckPeriod = time.Hour
overwriteFullMaintainInterval = time.Duration(0)
overwriteQuickMaintainInterval = time.Duration(0)
)
var kopiaRepoOpen = repo.Open
// NewKopiaRepoService creates an instance of BackupRepoService implemented by Kopia
func NewKopiaRepoService(logger logrus.FieldLogger) udmrepo.BackupRepoService {
ks := &kopiaRepoService{
logger: logger,
}
return ks
}
func (ks *kopiaRepoService) Init(ctx context.Context, repoOption udmrepo.RepoOptions, createNew bool) error {
repoCtx := logging.SetupKopiaLog(ctx, ks.logger)
if createNew {
if err := CreateBackupRepo(repoCtx, repoOption); err != nil {
return err
}
return writeInitParameters(repoCtx, repoOption, ks.logger)
} else {
return ConnectBackupRepo(repoCtx, repoOption)
}
}
func (ks *kopiaRepoService) Open(ctx context.Context, repoOption udmrepo.RepoOptions) (udmrepo.BackupRepo, error) {
repoConfig := repoOption.ConfigFilePath
if repoConfig == "" {
return nil, errors.New("invalid config file path")
}
if _, err := os.Stat(repoConfig); os.IsNotExist(err) {
return nil, errors.Wrapf(err, "repo config %s doesn't exist", repoConfig)
}
repoCtx := logging.SetupKopiaLog(ctx, ks.logger)
r, err := openKopiaRepo(repoCtx, repoConfig, repoOption.RepoPassword)
if err != nil {
return nil, err
}
kr := kopiaRepository{
rawRepo: r,
openTime: time.Now(),
description: repoOption.Description,
throttle: logThrottle{
interval: defaultLogInterval,
},
logger: ks.logger,
}
_, kr.rawWriter, err = r.NewWriter(repoCtx, repo.WriteSessionOptions{
Purpose: repoOption.Description,
OnUpload: kr.updateProgress,
})
if err != nil {
if e := r.Close(repoCtx); e != nil {
ks.logger.WithError(e).Error("Failed to close raw repository on error")
}
return nil, errors.Wrap(err, "error to create repo writer")
}
return &kr, nil
}
func (ks *kopiaRepoService) Maintain(ctx context.Context, repoOption udmrepo.RepoOptions) error {
repoConfig := repoOption.ConfigFilePath
if repoConfig == "" {
return errors.New("invalid config file path")
}
if _, err := os.Stat(repoConfig); os.IsNotExist(err) {
return errors.Wrapf(err, "repo config %s doesn't exist", repoConfig)
}
repoCtx := logging.SetupKopiaLog(ctx, ks.logger)
r, err := openKopiaRepo(repoCtx, repoConfig, repoOption.RepoPassword)
if err != nil {
return err
}
defer func() {
c := r.Close(repoCtx)
if c != nil {
ks.logger.WithError(c).Error("Failed to close repo")
}
}()
km := kopiaMaintenance{
mode: maintenance.ModeAuto,
startTime: time.Now(),
throttle: logThrottle{
interval: defaultLogInterval,
},
logger: ks.logger,
}
if mode, exist := repoOption.GeneralOptions[udmrepo.GenOptionMaintainMode]; exist {
if strings.EqualFold(mode, udmrepo.GenOptionMaintainFull) {
km.mode = maintenance.ModeFull
} else if strings.EqualFold(mode, udmrepo.GenOptionMaintainQuick) {
km.mode = maintenance.ModeQuick
}
}
err = repo.DirectWriteSession(repoCtx, r.(repo.DirectRepository), repo.WriteSessionOptions{
Purpose: "UdmRepoMaintenance",
OnUpload: km.maintainProgress,
}, func(ctx context.Context, dw repo.DirectRepositoryWriter) error {
return km.runMaintenance(ctx, dw)
})
if err != nil {
return errors.Wrap(err, "error to maintain repo")
}
return nil
}
func (ks *kopiaRepoService) DefaultMaintenanceFrequency() time.Duration {
return defaultMaintainCheckPeriod
}
func (km *kopiaMaintenance) runMaintenance(ctx context.Context, rep repo.DirectRepositoryWriter) error {
err := snapshotmaintenance.Run(logging.SetupKopiaLog(ctx, km.logger), rep, km.mode, false, maintenance.SafetyFull)
if err != nil {
return errors.Wrapf(err, "error to run maintenance under mode %s", km.mode)
}
return nil
}
// maintainProgress is called when the repository writes a piece of blob data to the storage during the maintenance
func (km *kopiaMaintenance) maintainProgress(uploaded int64) {
total := atomic.AddInt64(&km.uploaded, uploaded)
if km.throttle.shouldLog() {
km.logger.WithFields(
logrus.Fields{
"Start Time": km.startTime.Format(time.RFC3339Nano),
"Current": time.Now().Format(time.RFC3339Nano),
},
).Debugf("Repo maintenance uploaded %d bytes.", total)
}
}
func (kr *kopiaRepository) OpenObject(ctx context.Context, id udmrepo.ID) (udmrepo.ObjectReader, error) {
if kr.rawRepo == nil {
return nil, errors.New("repo is closed or not open")
}
reader, err := kr.rawRepo.OpenObject(logging.SetupKopiaLog(ctx, kr.logger), object.ID(id))
if err != nil {
return nil, errors.Wrap(err, "error to open object")
}
return &kopiaObjectReader{
rawReader: reader,
}, nil
}
func (kr *kopiaRepository) GetManifest(ctx context.Context, id udmrepo.ID, mani *udmrepo.RepoManifest) error {
if kr.rawRepo == nil {
return errors.New("repo is closed or not open")
}
metadata, err := kr.rawRepo.GetManifest(logging.SetupKopiaLog(ctx, kr.logger), manifest.ID(id), mani.Payload)
if err != nil {
return errors.Wrap(err, "error to get manifest")
}
mani.Metadata = getManifestEntryFromKopia(metadata)
return nil
}
func (kr *kopiaRepository) FindManifests(ctx context.Context, filter udmrepo.ManifestFilter) ([]*udmrepo.ManifestEntryMetadata, error) {
if kr.rawRepo == nil {
return nil, errors.New("repo is closed or not open")
}
metadata, err := kr.rawRepo.FindManifests(logging.SetupKopiaLog(ctx, kr.logger), filter.Labels)
if err != nil {
return nil, errors.Wrap(err, "error to find manifests")
}
return getManifestEntriesFromKopia(metadata), nil
}
func (kr *kopiaRepository) Time() time.Time {
if kr.rawRepo == nil {
return time.Time{}
}
return kr.rawRepo.Time()
}
func (kr *kopiaRepository) Close(ctx context.Context) error {
if kr.rawWriter != nil {
err := kr.rawWriter.Close(logging.SetupKopiaLog(ctx, kr.logger))
if err != nil {
return errors.Wrap(err, "error to close repo writer")
}
kr.rawWriter = nil
}
if kr.rawRepo != nil {
err := kr.rawRepo.Close(logging.SetupKopiaLog(ctx, kr.logger))
if err != nil {
return errors.Wrap(err, "error to close repo")
}
kr.rawRepo = nil
}
return nil
}
func (kr *kopiaRepository) NewObjectWriter(ctx context.Context, opt udmrepo.ObjectWriteOptions) udmrepo.ObjectWriter {
if kr.rawWriter == nil {
return nil
}
writer := kr.rawWriter.NewObjectWriter(logging.SetupKopiaLog(ctx, kr.logger), object.WriterOptions{
Description: opt.Description,
Prefix: index.ID(opt.Prefix),
AsyncWrites: getAsyncWrites(),
Compressor: getCompressorForObject(opt),
})
if writer == nil {
return nil
}
return &kopiaObjectWriter{
rawWriter: writer,
}
}
func (kr *kopiaRepository) PutManifest(ctx context.Context, manifest udmrepo.RepoManifest) (udmrepo.ID, error) {
if kr.rawWriter == nil {
return "", errors.New("repo writer is closed or not open")
}
id, err := kr.rawWriter.PutManifest(logging.SetupKopiaLog(ctx, kr.logger), manifest.Metadata.Labels, manifest.Payload)
if err != nil {
return "", errors.Wrap(err, "error to put manifest")
}
return udmrepo.ID(id), nil
}
func (kr *kopiaRepository) DeleteManifest(ctx context.Context, id udmrepo.ID) error {
if kr.rawWriter == nil {
return errors.New("repo writer is closed or not open")
}
err := kr.rawWriter.DeleteManifest(logging.SetupKopiaLog(ctx, kr.logger), manifest.ID(id))
if err != nil {
return errors.Wrap(err, "error to delete manifest")
}
return nil
}
func (kr *kopiaRepository) Flush(ctx context.Context) error {
if kr.rawWriter == nil {
return errors.New("repo writer is closed or not open")
}
err := kr.rawWriter.Flush(logging.SetupKopiaLog(ctx, kr.logger))
if err != nil {
return errors.Wrap(err, "error to flush repo")
}
return nil
}
// updateProgress is called when the repository writes a piece of blob data to the storage during data write
func (kr *kopiaRepository) updateProgress(uploaded int64) {
total := atomic.AddInt64(&kr.uploaded, uploaded)
if kr.throttle.shouldLog() {
kr.logger.WithFields(
logrus.Fields{
"Description": kr.description,
"Open Time": kr.openTime.Format(time.RFC3339Nano),
"Current": time.Now().Format(time.RFC3339Nano),
},
).Debugf("Repo uploaded %d bytes.", total)
}
}
func (kor *kopiaObjectReader) Read(p []byte) (int, error) {
if kor.rawReader == nil {
return 0, errors.New("object reader is closed or not open")
}
n, err := kor.rawReader.Read(p)
if err != nil {
return 0, errors.Wrap(err, "error to read object")
}
return n, nil
}
func (kor *kopiaObjectReader) Seek(offset int64, whence int) (int64, error) {
if kor.rawReader == nil {
return -1, errors.New("object reader is closed or not open")
}
p, err := kor.rawReader.Seek(offset, whence)
if err != nil {
return -1, errors.Wrap(err, "error to seek object")
}
return p, nil
}
func (kor *kopiaObjectReader) Close() error {
if kor.rawReader == nil {
return nil
}
err := kor.rawReader.Close()
if err != nil {
return errors.Wrap(err, "error to close object reader")
}
kor.rawReader = nil
return nil
}
func (kor *kopiaObjectReader) Length() int64 {
if kor.rawReader == nil {
return -1
}
return kor.rawReader.Length()
}
func (kow *kopiaObjectWriter) Write(p []byte) (int, error) {
if kow.rawWriter == nil {
return 0, errors.New("object writer is closed or not open")
}
n, err := kow.rawWriter.Write(p)
if err != nil {
return 0, errors.Wrap(err, "error to write object")
}
return n, nil
}
func (kow *kopiaObjectWriter) Seek(offset int64, whence int) (int64, error) {
return -1, errors.New("not supported")
}
func (kow *kopiaObjectWriter) Checkpoint() (udmrepo.ID, error) {
if kow.rawWriter == nil {
return udmrepo.ID(""), errors.New("object writer is closed or not open")
}
id, err := kow.rawWriter.Checkpoint()
if err != nil {
return udmrepo.ID(""), errors.Wrap(err, "error to checkpoint object")
}
return udmrepo.ID(id), nil
}
func (kow *kopiaObjectWriter) Result() (udmrepo.ID, error) {
if kow.rawWriter == nil {
return udmrepo.ID(""), errors.New("object writer is closed or not open")
}
id, err := kow.rawWriter.Result()
if err != nil {
return udmrepo.ID(""), errors.Wrap(err, "error to wait object")
}
return udmrepo.ID(id), nil
}
func (kow *kopiaObjectWriter) Close() error {
if kow.rawWriter == nil {
return nil
}
err := kow.rawWriter.Close()
if err != nil {
return errors.Wrap(err, "error to close object writer")
}
kow.rawWriter = nil
return nil
}
// getAsyncWrites returns the number of async writes, at present, we don't support async writes
func getAsyncWrites() int {
return 0
}
// getCompressorForObject returns the compressor for an object, at present, we don't support compression
func getCompressorForObject(opt udmrepo.ObjectWriteOptions) compression.Name {
return ""
}
func getManifestEntryFromKopia(kMani *manifest.EntryMetadata) *udmrepo.ManifestEntryMetadata {
return &udmrepo.ManifestEntryMetadata{
ID: udmrepo.ID(kMani.ID),
Labels: kMani.Labels,
Length: int32(kMani.Length),
ModTime: kMani.ModTime,
}
}
func getManifestEntriesFromKopia(kMani []*manifest.EntryMetadata) []*udmrepo.ManifestEntryMetadata {
var ret []*udmrepo.ManifestEntryMetadata
for _, entry := range kMani {
ret = append(ret, &udmrepo.ManifestEntryMetadata{
ID: udmrepo.ID(entry.ID),
Labels: entry.Labels,
Length: int32(entry.Length),
ModTime: entry.ModTime,
})
}
return ret
}
func (lt *logThrottle) shouldLog() bool {
nextOutputTime := atomic.LoadInt64((*int64)(&lt.lastTime))
if nowNano := time.Now().UnixNano(); nowNano > nextOutputTime {
if atomic.CompareAndSwapInt64((*int64)(&lt.lastTime), nextOutputTime, nowNano+lt.interval.Nanoseconds()) {
return true
}
}
return false
}
func openKopiaRepo(ctx context.Context, configFile string, password string) (repo.Repository, error) {
r, err := kopiaRepoOpen(ctx, configFile, password, &repo.Options{})
if os.IsNotExist(err) {
return nil, errors.Wrap(err, "error to open repo, repo doesn't exist")
}
if err != nil {
return nil, errors.Wrap(err, "error to open repo")
}
return r, nil
}
func writeInitParameters(ctx context.Context, repoOption udmrepo.RepoOptions, logger logrus.FieldLogger) error {
r, err := openKopiaRepo(ctx, repoOption.ConfigFilePath, repoOption.RepoPassword)
if err != nil {
return err
}
defer func() {
c := r.Close(ctx)
if c != nil {
logger.WithError(c).Error("Failed to close repo")
}
}()
err = repo.WriteSession(ctx, r, repo.WriteSessionOptions{
Purpose: "set init parameters",
}, func(ctx context.Context, w repo.RepositoryWriter) error {
p := maintenance.DefaultParams()
if overwriteFullMaintainInterval != time.Duration(0) {
logger.Infof("Full maintenance interval change from %v to %v", p.FullCycle.Interval, overwriteFullMaintainInterval)
p.FullCycle.Interval = overwriteFullMaintainInterval
}
if overwriteQuickMaintainInterval != time.Duration(0) {
logger.Infof("Quick maintenance interval change from %v to %v", p.QuickCycle.Interval, overwriteQuickMaintainInterval)
p.QuickCycle.Interval = overwriteQuickMaintainInterval
}
p.Owner = r.ClientOptions().UsernameAtHost()
if err := maintenance.SetParams(ctx, w, &p); err != nil {
return errors.Wrap(err, "error to set maintenance params")
}
return nil
})
if err != nil {
return errors.Wrap(err, "error to init write repo parameters")
}
return nil
}

View File

@ -0,0 +1,406 @@
/*
Copyright the Velero contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kopialib
import (
"context"
"os"
"testing"
"time"
"github.com/kopia/kopia/repo"
"github.com/pkg/errors"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
"github.com/vmware-tanzu/velero/pkg/repository/udmrepo"
repomocks "github.com/vmware-tanzu/velero/pkg/repository/udmrepo/kopialib/backend/mocks"
velerotest "github.com/vmware-tanzu/velero/pkg/test"
)
func TestOpen(t *testing.T) {
var directRpo *repomocks.DirectRepository
testCases := []struct {
name string
repoOptions udmrepo.RepoOptions
returnRepo *repomocks.DirectRepository
repoOpen func(context.Context, string, string, *repo.Options) (repo.Repository, error)
newWriterError error
expectedErr string
expected *kopiaRepository
}{
{
name: "invalid config file",
expectedErr: "invalid config file path",
},
{
name: "config file doesn't exist",
repoOptions: udmrepo.RepoOptions{
ConfigFilePath: "fake-file",
},
expectedErr: "repo config fake-file doesn't exist: stat fake-file: no such file or directory",
},
{
name: "repo open fail, repo not exist",
repoOptions: udmrepo.RepoOptions{
ConfigFilePath: "/tmp",
},
repoOpen: func(context.Context, string, string, *repo.Options) (repo.Repository, error) {
return nil, os.ErrNotExist
},
expectedErr: "error to open repo, repo doesn't exist: file does not exist",
},
{
name: "repo open fail, other error",
repoOptions: udmrepo.RepoOptions{
ConfigFilePath: "/tmp",
},
repoOpen: func(context.Context, string, string, *repo.Options) (repo.Repository, error) {
return nil, errors.New("fake-repo-open-error")
},
expectedErr: "error to open repo: fake-repo-open-error",
},
{
name: "create repository writer fail",
repoOptions: udmrepo.RepoOptions{
ConfigFilePath: "/tmp",
},
repoOpen: func(context.Context, string, string, *repo.Options) (repo.Repository, error) {
return directRpo, nil
},
returnRepo: new(repomocks.DirectRepository),
newWriterError: errors.New("fake-new-writer-error"),
expectedErr: "error to create repo writer: fake-new-writer-error",
},
{
name: "create repository success",
repoOptions: udmrepo.RepoOptions{
ConfigFilePath: "/tmp",
Description: "fake-description",
},
repoOpen: func(context.Context, string, string, *repo.Options) (repo.Repository, error) {
return directRpo, nil
},
returnRepo: new(repomocks.DirectRepository),
expected: &kopiaRepository{
description: "fake-description",
throttle: logThrottle{
interval: defaultLogInterval,
},
},
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
logger := velerotest.NewLogger()
service := kopiaRepoService{
logger: logger,
}
if tc.repoOpen != nil {
kopiaRepoOpen = tc.repoOpen
}
if tc.returnRepo != nil {
directRpo = tc.returnRepo
}
if tc.returnRepo != nil {
tc.returnRepo.On("NewWriter", mock.Anything, mock.Anything).Return(nil, nil, tc.newWriterError)
tc.returnRepo.On("Close", mock.Anything).Return(nil)
}
repo, err := service.Open(context.Background(), tc.repoOptions)
if repo != nil {
require.Equal(t, tc.expected.description, repo.(*kopiaRepository).description)
require.Equal(t, tc.expected.throttle.interval, repo.(*kopiaRepository).throttle.interval)
require.Equal(t, repo.(*kopiaRepository).logger, logger)
}
if tc.expectedErr == "" {
assert.NoError(t, err)
} else {
assert.EqualError(t, err, tc.expectedErr)
}
})
}
}
func TestMaintain(t *testing.T) {
var directRpo *repomocks.DirectRepository
testCases := []struct {
name string
repoOptions udmrepo.RepoOptions
returnRepo *repomocks.DirectRepository
returnRepoWriter *repomocks.DirectRepositoryWriter
repoOpen func(context.Context, string, string, *repo.Options) (repo.Repository, error)
newRepoWriterError error
findManifestError error
expectedErr string
}{
{
name: "invalid config file",
expectedErr: "invalid config file path",
},
{
name: "config file doesn't exist",
repoOptions: udmrepo.RepoOptions{
ConfigFilePath: "fake-file",
},
expectedErr: "repo config fake-file doesn't exist: stat fake-file: no such file or directory",
},
{
name: "repo open fail, repo not exist",
repoOptions: udmrepo.RepoOptions{
ConfigFilePath: "/tmp",
GeneralOptions: map[string]string{},
},
repoOpen: func(context.Context, string, string, *repo.Options) (repo.Repository, error) {
return nil, os.ErrNotExist
},
expectedErr: "error to open repo, repo doesn't exist: file does not exist",
},
{
name: "repo open fail, other error",
repoOptions: udmrepo.RepoOptions{
ConfigFilePath: "/tmp",
GeneralOptions: map[string]string{},
},
repoOpen: func(context.Context, string, string, *repo.Options) (repo.Repository, error) {
return nil, errors.New("fake-repo-open-error")
},
expectedErr: "error to open repo: fake-repo-open-error",
},
{
name: "write session fail",
repoOptions: udmrepo.RepoOptions{
ConfigFilePath: "/tmp",
GeneralOptions: map[string]string{},
},
repoOpen: func(context.Context, string, string, *repo.Options) (repo.Repository, error) {
return directRpo, nil
},
returnRepo: new(repomocks.DirectRepository),
newRepoWriterError: errors.New("fake-new-direct-writer-error"),
expectedErr: "error to maintain repo: unable to create direct writer: fake-new-direct-writer-error",
},
{
name: "maintain fail",
repoOptions: udmrepo.RepoOptions{
ConfigFilePath: "/tmp",
GeneralOptions: map[string]string{},
},
repoOpen: func(context.Context, string, string, *repo.Options) (repo.Repository, error) {
return directRpo, nil
},
returnRepo: new(repomocks.DirectRepository),
returnRepoWriter: new(repomocks.DirectRepositoryWriter),
findManifestError: errors.New("fake-find-manifest-error"),
expectedErr: "error to maintain repo: error to run maintenance under mode auto: unable to get maintenance params: error looking for maintenance manifest: fake-find-manifest-error",
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
logger := velerotest.NewLogger()
ctx := context.Background()
service := kopiaRepoService{
logger: logger,
}
if tc.repoOpen != nil {
kopiaRepoOpen = tc.repoOpen
}
if tc.returnRepo != nil {
directRpo = tc.returnRepo
}
if tc.returnRepo != nil {
tc.returnRepo.On("NewDirectWriter", mock.Anything, mock.Anything).Return(ctx, tc.returnRepoWriter, tc.newRepoWriterError)
tc.returnRepo.On("Close", mock.Anything).Return(nil)
}
if tc.returnRepoWriter != nil {
tc.returnRepoWriter.On("DisableIndexRefresh").Return()
tc.returnRepoWriter.On("AlsoLogToContentLog", mock.Anything).Return(nil)
tc.returnRepoWriter.On("Close", mock.Anything).Return(nil)
tc.returnRepoWriter.On("FindManifests", mock.Anything, mock.Anything).Return(nil, tc.findManifestError)
}
err := service.Maintain(ctx, tc.repoOptions)
if tc.expectedErr == "" {
assert.NoError(t, err)
} else {
assert.EqualError(t, err, tc.expectedErr)
}
})
}
}
func TestWriteInitParameters(t *testing.T) {
var directRpo *repomocks.DirectRepository
testCases := []struct {
name string
repoOptions udmrepo.RepoOptions
returnRepo *repomocks.DirectRepository
returnRepoWriter *repomocks.DirectRepositoryWriter
repoOpen func(context.Context, string, string, *repo.Options) (repo.Repository, error)
newRepoWriterError error
findManifestError error
expectedErr string
}{
{
name: "repo open fail, repo not exist",
repoOptions: udmrepo.RepoOptions{
ConfigFilePath: "/tmp",
GeneralOptions: map[string]string{},
},
repoOpen: func(context.Context, string, string, *repo.Options) (repo.Repository, error) {
return nil, os.ErrNotExist
},
expectedErr: "error to open repo, repo doesn't exist: file does not exist",
},
{
name: "repo open fail, other error",
repoOptions: udmrepo.RepoOptions{
ConfigFilePath: "/tmp",
GeneralOptions: map[string]string{},
},
repoOpen: func(context.Context, string, string, *repo.Options) (repo.Repository, error) {
return nil, errors.New("fake-repo-open-error")
},
expectedErr: "error to open repo: fake-repo-open-error",
},
{
name: "write session fail",
repoOptions: udmrepo.RepoOptions{
ConfigFilePath: "/tmp",
GeneralOptions: map[string]string{},
},
repoOpen: func(context.Context, string, string, *repo.Options) (repo.Repository, error) {
return directRpo, nil
},
returnRepo: new(repomocks.DirectRepository),
newRepoWriterError: errors.New("fake-new-writer-error"),
expectedErr: "error to init write repo parameters: unable to create writer: fake-new-writer-error",
},
{
name: "set repo param fail",
repoOptions: udmrepo.RepoOptions{
ConfigFilePath: "/tmp",
GeneralOptions: map[string]string{},
},
repoOpen: func(context.Context, string, string, *repo.Options) (repo.Repository, error) {
return directRpo, nil
},
returnRepo: new(repomocks.DirectRepository),
returnRepoWriter: new(repomocks.DirectRepositoryWriter),
findManifestError: errors.New("fake-find-manifest-error"),
expectedErr: "error to init write repo parameters: error to set maintenance params: error looking for maintenance manifest: fake-find-manifest-error",
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
logger := velerotest.NewLogger()
ctx := context.Background()
if tc.repoOpen != nil {
kopiaRepoOpen = tc.repoOpen
}
if tc.returnRepo != nil {
directRpo = tc.returnRepo
}
if tc.returnRepo != nil {
tc.returnRepo.On("NewWriter", mock.Anything, mock.Anything).Return(ctx, tc.returnRepoWriter, tc.newRepoWriterError)
tc.returnRepo.On("ClientOptions").Return(repo.ClientOptions{})
tc.returnRepo.On("Close", mock.Anything).Return(nil)
}
if tc.returnRepoWriter != nil {
tc.returnRepoWriter.On("Close", mock.Anything).Return(nil)
tc.returnRepoWriter.On("FindManifests", mock.Anything, mock.Anything).Return(nil, tc.findManifestError)
}
err := writeInitParameters(ctx, tc.repoOptions, logger)
if tc.expectedErr == "" {
assert.NoError(t, err)
} else {
assert.EqualError(t, err, tc.expectedErr)
}
})
}
}
func TestShouldLog(t *testing.T) {
testCases := []struct {
name string
lastTime int64
interval time.Duration
retValue bool
}{
{
name: "first time",
retValue: true,
},
{
name: "not run",
lastTime: time.Now().Add(time.Hour).UnixNano(),
interval: time.Second * 10,
},
{
name: "not first time, run",
lastTime: time.Now().Add(-time.Hour).UnixNano(),
interval: time.Second * 10,
retValue: true,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
lt := logThrottle{
lastTime: tc.lastTime,
interval: tc.interval,
}
before := lt.lastTime
nw := time.Now()
s := lt.shouldLog()
require.Equal(t, s, tc.retValue)
if s {
require.GreaterOrEqual(t, lt.lastTime-nw.UnixNano(), lt.interval)
} else {
require.Equal(t, lt.lastTime, before)
}
})
}
}

View File

@ -4,8 +4,10 @@ package mocks
import (
context "context"
time "time"
mock "github.com/stretchr/testify/mock"
udmrepo "github.com/vmware-tanzu/velero/pkg/repository/udmrepo"
)
@ -14,6 +16,20 @@ type BackupRepoService struct {
mock.Mock
}
// DefaultMaintenanceFrequency provides a mock function with given fields:
func (_m *BackupRepoService) DefaultMaintenanceFrequency() time.Duration {
ret := _m.Called()
var r0 time.Duration
if rf, ok := ret.Get(0).(func() time.Duration); ok {
r0 = rf()
} else {
r0 = ret.Get(0).(time.Duration)
}
return r0
}
// Init provides a mock function with given fields: ctx, repoOption, createNew
func (_m *BackupRepoService) Init(ctx context.Context, repoOption udmrepo.RepoOptions, createNew bool) error {
ret := _m.Called(ctx, repoOption, createNew)

View File

@ -84,6 +84,10 @@ type BackupRepoService interface {
// Maintain is periodically called to maintain the backup repository to eliminate redundant data.
// repoOption: options to maintain the backup repository.
Maintain(ctx context.Context, repoOption RepoOptions) error
// DefaultMaintenanceFrequency returns the defgault frequency of maintenance, callers refer this
// frequency to maintain the backup repository to get the best maintenance performance
DefaultMaintenanceFrequency() time.Duration
}
// BackupRepo provides the access to the backup repository

View File

@ -20,10 +20,10 @@ import (
"github.com/sirupsen/logrus"
"github.com/vmware-tanzu/velero/pkg/repository/udmrepo"
"github.com/vmware-tanzu/velero/pkg/repository/udmrepo/kopialib"
)
// Create creates an instance of BackupRepoService
func Create(logger logrus.FieldLogger) udmrepo.BackupRepoService {
///TODO: create from kopiaLib
return nil
return kopialib.NewKopiaRepoService(logger)
}

View File

@ -17,7 +17,6 @@ limitations under the License.
package restic
import (
"context"
"fmt"
"os"
"strconv"
@ -25,24 +24,14 @@ import (
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/vmware-tanzu/velero/internal/credentials"
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
"github.com/vmware-tanzu/velero/pkg/label"
repoconfig "github.com/vmware-tanzu/velero/pkg/repository/config"
"github.com/vmware-tanzu/velero/pkg/util/filesystem"
)
const (
// DaemonSet is the name of the Velero restic daemonset.
DaemonSet = "restic"
// InitContainer is the name of the init container added
// to workload pods to help with restores.
InitContainer = "restic-wait"
// DefaultMaintenanceFrequency is the default time interval
// at which restic prune is run.
@ -61,51 +50,6 @@ const (
resticInsecureTLSFlag = "--insecure-tls"
)
// SnapshotIdentifier uniquely identifies a restic snapshot
// taken by Velero.
type SnapshotIdentifier struct {
// VolumeNamespace is the namespace of the pod/volume that
// the restic snapshot is for.
VolumeNamespace string
// BackupStorageLocation is the backup's storage location
// name.
BackupStorageLocation string
// SnapshotID is the short ID of the restic snapshot.
SnapshotID string
}
// GetSnapshotsInBackup returns a list of all restic snapshot ids associated with
// a given Velero backup.
func GetSnapshotsInBackup(ctx context.Context, backup *velerov1api.Backup, kbClient client.Client) ([]SnapshotIdentifier, error) {
podVolumeBackups := &velerov1api.PodVolumeBackupList{}
options := &client.ListOptions{
LabelSelector: labels.Set(map[string]string{
velerov1api.BackupNameLabel: label.GetValidName(backup.Name),
}).AsSelector(),
}
err := kbClient.List(ctx, podVolumeBackups, options)
if err != nil {
return nil, errors.WithStack(err)
}
var res []SnapshotIdentifier
for _, item := range podVolumeBackups.Items {
if item.Status.SnapshotID == "" {
continue
}
res = append(res, SnapshotIdentifier{
VolumeNamespace: item.Spec.Pod.Namespace,
BackupStorageLocation: backup.Spec.StorageLocation,
SnapshotID: item.Status.SnapshotID,
})
}
return res, nil
}
// TempCACertFile creates a temp file containing a CA bundle
// and returns its path. The caller should generally call os.Remove()
// to remove the file when done with it.
@ -131,14 +75,6 @@ func TempCACertFile(caCert []byte, bsl string, fs filesystem.Interface) (string,
return name, nil
}
// NewPodVolumeRestoreListOptions creates a ListOptions with a label selector configured to
// find PodVolumeRestores for the restore identified by name.
func NewPodVolumeRestoreListOptions(name string) metav1.ListOptions {
return metav1.ListOptions{
LabelSelector: fmt.Sprintf("%s=%s", velerov1api.RestoreNameLabel, label.GetValidName(name)),
}
}
// CmdEnv returns a list of environment variables (in the format var=val) that
// should be used when running a restic command for a particular backend provider.
// This list is the current environment, plus any provider-specific variables restic needs.

View File

@ -17,190 +17,17 @@ limitations under the License.
package restic
import (
"context"
"os"
"sort"
"testing"
"github.com/sirupsen/logrus"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
corev1api "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
velerotest "github.com/vmware-tanzu/velero/pkg/test"
)
func TestGetSnapshotsInBackup(t *testing.T) {
tests := []struct {
name string
podVolumeBackups []velerov1api.PodVolumeBackup
expected []SnapshotIdentifier
longBackupNameEnabled bool
}{
{
name: "no pod volume backups",
podVolumeBackups: nil,
expected: nil,
},
{
name: "no pod volume backups with matching label",
podVolumeBackups: []velerov1api.PodVolumeBackup{
{
ObjectMeta: metav1.ObjectMeta{Name: "foo", Labels: map[string]string{velerov1api.BackupNameLabel: "non-matching-backup-1"}},
Spec: velerov1api.PodVolumeBackupSpec{
Pod: corev1api.ObjectReference{Name: "pod-1", Namespace: "ns-1"},
},
Status: velerov1api.PodVolumeBackupStatus{SnapshotID: "snap-1"},
},
{
ObjectMeta: metav1.ObjectMeta{Name: "bar", Labels: map[string]string{velerov1api.BackupNameLabel: "non-matching-backup-2"}},
Spec: velerov1api.PodVolumeBackupSpec{
Pod: corev1api.ObjectReference{Name: "pod-2", Namespace: "ns-2"},
},
Status: velerov1api.PodVolumeBackupStatus{SnapshotID: "snap-2"},
},
},
expected: nil,
},
{
name: "some pod volume backups with matching label",
podVolumeBackups: []velerov1api.PodVolumeBackup{
{
ObjectMeta: metav1.ObjectMeta{Name: "foo", Labels: map[string]string{velerov1api.BackupNameLabel: "non-matching-backup-1"}},
Spec: velerov1api.PodVolumeBackupSpec{
Pod: corev1api.ObjectReference{Name: "pod-1", Namespace: "ns-1"},
},
Status: velerov1api.PodVolumeBackupStatus{SnapshotID: "snap-1"},
},
{
ObjectMeta: metav1.ObjectMeta{Name: "bar", Labels: map[string]string{velerov1api.BackupNameLabel: "non-matching-backup-2"}},
Spec: velerov1api.PodVolumeBackupSpec{
Pod: corev1api.ObjectReference{Name: "pod-2", Namespace: "ns-2"},
},
Status: velerov1api.PodVolumeBackupStatus{SnapshotID: "snap-2"},
},
{
ObjectMeta: metav1.ObjectMeta{Name: "completed-pvb", Labels: map[string]string{velerov1api.BackupNameLabel: "backup-1"}},
Spec: velerov1api.PodVolumeBackupSpec{
Pod: corev1api.ObjectReference{Name: "pod-1", Namespace: "ns-1"},
},
Status: velerov1api.PodVolumeBackupStatus{SnapshotID: "snap-3"},
},
{
ObjectMeta: metav1.ObjectMeta{Name: "completed-pvb-2", Labels: map[string]string{velerov1api.BackupNameLabel: "backup-1"}},
Spec: velerov1api.PodVolumeBackupSpec{
Pod: corev1api.ObjectReference{Name: "pod-1", Namespace: "ns-1"},
},
Status: velerov1api.PodVolumeBackupStatus{SnapshotID: "snap-4"},
},
{
ObjectMeta: metav1.ObjectMeta{Name: "incomplete-or-failed-pvb", Labels: map[string]string{velerov1api.BackupNameLabel: "backup-1"}},
Spec: velerov1api.PodVolumeBackupSpec{
Pod: corev1api.ObjectReference{Name: "pod-1", Namespace: "ns-2"},
},
Status: velerov1api.PodVolumeBackupStatus{SnapshotID: ""},
},
},
expected: []SnapshotIdentifier{
{
VolumeNamespace: "ns-1",
SnapshotID: "snap-3",
},
{
VolumeNamespace: "ns-1",
SnapshotID: "snap-4",
},
},
},
{
name: "some pod volume backups with matching label and backup name greater than 63 chars",
longBackupNameEnabled: true,
podVolumeBackups: []velerov1api.PodVolumeBackup{
{
ObjectMeta: metav1.ObjectMeta{Name: "foo", Labels: map[string]string{velerov1api.BackupNameLabel: "non-matching-backup-1"}},
Spec: velerov1api.PodVolumeBackupSpec{
Pod: corev1api.ObjectReference{Name: "pod-1", Namespace: "ns-1"},
},
Status: velerov1api.PodVolumeBackupStatus{SnapshotID: "snap-1"},
},
{
ObjectMeta: metav1.ObjectMeta{Name: "bar", Labels: map[string]string{velerov1api.BackupNameLabel: "non-matching-backup-2"}},
Spec: velerov1api.PodVolumeBackupSpec{
Pod: corev1api.ObjectReference{Name: "pod-2", Namespace: "ns-2"},
},
Status: velerov1api.PodVolumeBackupStatus{SnapshotID: "snap-2"},
},
{
ObjectMeta: metav1.ObjectMeta{Name: "completed-pvb", Labels: map[string]string{velerov1api.BackupNameLabel: "the-really-long-backup-name-that-is-much-more-than-63-cha6ca4bc"}},
Spec: velerov1api.PodVolumeBackupSpec{
Pod: corev1api.ObjectReference{Name: "pod-1", Namespace: "ns-1"},
},
Status: velerov1api.PodVolumeBackupStatus{SnapshotID: "snap-3"},
},
{
ObjectMeta: metav1.ObjectMeta{Name: "completed-pvb-2", Labels: map[string]string{velerov1api.BackupNameLabel: "backup-1"}},
Spec: velerov1api.PodVolumeBackupSpec{
Pod: corev1api.ObjectReference{Name: "pod-1", Namespace: "ns-1"},
},
Status: velerov1api.PodVolumeBackupStatus{SnapshotID: "snap-4"},
},
{
ObjectMeta: metav1.ObjectMeta{Name: "incomplete-or-failed-pvb", Labels: map[string]string{velerov1api.BackupNameLabel: "backup-1"}},
Spec: velerov1api.PodVolumeBackupSpec{
Pod: corev1api.ObjectReference{Name: "pod-1", Namespace: "ns-2"},
},
Status: velerov1api.PodVolumeBackupStatus{SnapshotID: ""},
},
},
expected: []SnapshotIdentifier{
{
VolumeNamespace: "ns-1",
SnapshotID: "snap-3",
},
},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
var (
clientBuilder = velerotest.NewFakeControllerRuntimeClientBuilder(t)
veleroBackup = &velerov1api.Backup{}
)
veleroBackup.Name = "backup-1"
if test.longBackupNameEnabled {
veleroBackup.Name = "the-really-long-backup-name-that-is-much-more-than-63-characters"
}
clientBuilder.WithLists(&velerov1api.PodVolumeBackupList{
Items: test.podVolumeBackups,
})
res, err := GetSnapshotsInBackup(context.TODO(), veleroBackup, clientBuilder.Build())
assert.NoError(t, err)
// sort to ensure good compare of slices
less := func(snapshots []SnapshotIdentifier) func(i, j int) bool {
return func(i, j int) bool {
if snapshots[i].VolumeNamespace == snapshots[j].VolumeNamespace {
return snapshots[i].SnapshotID < snapshots[j].SnapshotID
}
return snapshots[i].VolumeNamespace < snapshots[j].VolumeNamespace
}
}
sort.Slice(test.expected, less(test.expected))
sort.Slice(res, less(res))
assert.Equal(t, test.expected, res)
})
}
}
func TestTempCACertFile(t *testing.T) {
var (
fs = velerotest.NewFakeFileSystem()

View File

@ -37,7 +37,6 @@ import (
"github.com/vmware-tanzu/velero/pkg/plugin/framework"
"github.com/vmware-tanzu/velero/pkg/plugin/velero"
"github.com/vmware-tanzu/velero/pkg/podvolume"
"github.com/vmware-tanzu/velero/pkg/restic"
"github.com/vmware-tanzu/velero/pkg/util/kube"
)
@ -161,7 +160,7 @@ func (a *ResticRestoreAction) Execute(input *velero.RestoreItemActionExecuteInpu
initContainerBuilder.Command(getCommand(log, config))
initContainer := *initContainerBuilder.Result()
if len(pod.Spec.InitContainers) == 0 || pod.Spec.InitContainers[0].Name != restic.InitContainer {
if len(pod.Spec.InitContainers) == 0 || pod.Spec.InitContainers[0].Name != podvolume.InitContainer {
pod.Spec.InitContainers = append([]corev1.Container{initContainer}, pod.Spec.InitContainers...)
} else {
pod.Spec.InitContainers[0] = initContainer
@ -290,7 +289,7 @@ func getPluginConfig(kind framework.PluginKind, name string, client corev1client
}
func newResticInitContainerBuilder(image, restoreUID string) *builder.ContainerBuilder {
return builder.ForContainer(restic.InitContainer, image).
return builder.ForContainer(podvolume.InitContainer, image).
Args(restoreUID).
Env([]*corev1.EnvVar{
{

View File

@ -60,9 +60,9 @@ type KopiaProgress struct {
estimatedFileCount int32 // +checklocksignore the total count of files to be processed
estimatedTotalBytes int64 // +checklocksignore the total size of files to be processed
// +checkatomic
processedBytes int64 // which statistic all bytes has been processed currently
outputThrottle Throttle // which control the frequency of update progress
UpFunc func(uploader.UploaderProgress) //which called by UpdateProgress func, it is used to update pvb or pvr status
processedBytes int64 // which statistic all bytes has been processed currently
outputThrottle Throttle // which control the frequency of update progress
Updater uploader.ProgressUpdater //which kopia progress will call the UpdateProgress interface, the third party will implement the interface to do the progress update
}
//UploadedBytes the total bytes has uploaded currently
@ -90,13 +90,10 @@ func (p *KopiaProgress) EstimatedDataSize(fileCount int, totalBytes int64) {
p.UpdateProgress()
}
//UpdateProgress which called by UpdateProgress func, it is used to update pvb or pvr status
//UpdateProgress which calls Updater UpdateProgress interface, update progress by third-party implementation
func (p *KopiaProgress) UpdateProgress() {
if p.outputThrottle.ShouldOutput() {
p.UpFunc(uploader.UploaderProgress{
TotalBytes: atomic.LoadInt64(&p.estimatedTotalBytes),
BytesDone: atomic.LoadInt64(&p.processedBytes),
})
p.Updater.UpdateProgress(&uploader.UploaderProgress{TotalBytes: p.estimatedTotalBytes, BytesDone: p.processedBytes})
}
}

View File

@ -0,0 +1,287 @@
/*
Copyright The Velero Contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kopia
import (
"context"
"math"
"os"
"path/filepath"
"runtime"
"strings"
"time"
"github.com/sirupsen/logrus"
"github.com/vmware-tanzu/velero/pkg/repository/udmrepo"
"github.com/vmware-tanzu/velero/pkg/uploader"
"github.com/kopia/kopia/fs"
"github.com/kopia/kopia/fs/localfs"
"github.com/kopia/kopia/repo"
"github.com/kopia/kopia/repo/manifest"
"github.com/kopia/kopia/snapshot"
"github.com/kopia/kopia/snapshot/policy"
"github.com/kopia/kopia/snapshot/restore"
"github.com/kopia/kopia/snapshot/snapshotfs"
"github.com/pkg/errors"
)
//All function mainly used to make testing more convenient
var treeForSourceFunc = policy.TreeForSource
var applyRetentionPolicyFunc = policy.ApplyRetentionPolicy
var setPolicyFunc = policy.SetPolicy
var saveSnapshotFunc = snapshot.SaveSnapshot
var loadSnapshotFunc = snapshot.LoadSnapshot
//SnapshotUploader which mainly used for UT test that could overwrite Upload interface
type SnapshotUploader interface {
Upload(
ctx context.Context,
source fs.Entry,
policyTree *policy.Tree,
sourceInfo snapshot.SourceInfo,
previousManifests ...*snapshot.Manifest,
) (*snapshot.Manifest, error)
}
func newOptionalInt(b policy.OptionalInt) *policy.OptionalInt {
return &b
}
//setupDefaultPolicy set default policy for kopia
func setupDefaultPolicy(ctx context.Context, rep repo.RepositoryWriter, sourceInfo snapshot.SourceInfo) error {
return setPolicyFunc(ctx, rep, sourceInfo, &policy.Policy{
RetentionPolicy: policy.RetentionPolicy{
KeepLatest: newOptionalInt(math.MaxInt32),
},
CompressionPolicy: policy.CompressionPolicy{
CompressorName: "none",
},
UploadPolicy: policy.UploadPolicy{
MaxParallelFileReads: newOptionalInt(policy.OptionalInt(runtime.NumCPU())),
},
SchedulingPolicy: policy.SchedulingPolicy{
Manual: true,
},
})
}
//Backup backup specific sourcePath and update progress
func Backup(ctx context.Context, fsUploader *snapshotfs.Uploader, repoWriter repo.RepositoryWriter, sourcePath string,
parentSnapshot string, log logrus.FieldLogger) (*uploader.SnapshotInfo, error) {
if fsUploader == nil {
return nil, errors.New("get empty kopia uploader")
}
dir, err := filepath.Abs(sourcePath)
if err != nil {
return nil, errors.Wrapf(err, "Invalid source path '%s'", sourcePath)
}
sourceInfo := snapshot.SourceInfo{
UserName: udmrepo.GetRepoUser(),
Host: udmrepo.GetRepoDomain(),
Path: filepath.Clean(dir),
}
rootDir, err := getLocalFSEntry(sourceInfo.Path)
if err != nil {
return nil, errors.Wrap(err, "Unable to get local filesystem entry")
}
snapID, snapshotSize, err := SnapshotSource(ctx, repoWriter, fsUploader, sourceInfo, rootDir, parentSnapshot, log, "Kopia Uploader")
if err != nil {
return nil, err
}
snapshotInfo := &uploader.SnapshotInfo{
ID: snapID,
Size: snapshotSize,
}
return snapshotInfo, nil
}
func getLocalFSEntry(path0 string) (fs.Entry, error) {
path, err := resolveSymlink(path0)
if err != nil {
return nil, errors.Wrap(err, "resolveSymlink")
}
e, err := localfs.NewEntry(path)
if err != nil {
return nil, errors.Wrap(err, "can't get local fs entry")
}
return e, nil
}
//resolveSymlink returns the path name after the evaluation of any symbolic links
func resolveSymlink(path string) (string, error) {
st, err := os.Lstat(path)
if err != nil {
return "", errors.Wrap(err, "stat")
}
if (st.Mode() & os.ModeSymlink) == 0 {
return path, nil
}
return filepath.EvalSymlinks(path)
}
//SnapshotSource which setup policy for snapshot, upload snapshot, update progress
func SnapshotSource(
ctx context.Context,
rep repo.RepositoryWriter,
u SnapshotUploader,
sourceInfo snapshot.SourceInfo,
rootDir fs.Entry,
parentSnapshot string,
log logrus.FieldLogger,
description string,
) (string, int64, error) {
log.Info("Start to snapshot...")
snapshotStartTime := time.Now()
var previous []*snapshot.Manifest
if parentSnapshot != "" {
mani, err := loadSnapshotFunc(ctx, rep, manifest.ID(parentSnapshot))
if err != nil {
return "", 0, errors.Wrapf(err, "Failed to load previous snapshot %v from kopia", parentSnapshot)
}
previous = append(previous, mani)
} else {
pre, err := findPreviousSnapshotManifest(ctx, rep, sourceInfo, nil)
if err != nil {
return "", 0, errors.Wrapf(err, "Failed to find previous kopia snapshot manifests for si %v", sourceInfo)
}
previous = pre
}
var manifest *snapshot.Manifest
if err := setupDefaultPolicy(ctx, rep, sourceInfo); err != nil {
return "", 0, errors.Wrapf(err, "unable to set policy for si %v", sourceInfo)
}
policyTree, err := treeForSourceFunc(ctx, rep, sourceInfo)
if err != nil {
return "", 0, errors.Wrapf(err, "unable to create policy getter for si %v", sourceInfo)
}
manifest, err = u.Upload(ctx, rootDir, policyTree, sourceInfo, previous...)
if err != nil {
return "", 0, errors.Wrapf(err, "Failed to upload the kopia snapshot for si %v", sourceInfo)
}
manifest.Description = description
if _, err = saveSnapshotFunc(ctx, rep, manifest); err != nil {
return "", 0, errors.Wrapf(err, "Failed to save kopia manifest %v", manifest.ID)
}
_, err = applyRetentionPolicyFunc(ctx, rep, sourceInfo, true)
if err != nil {
return "", 0, errors.Wrapf(err, "Failed to apply kopia retention policy for si %v", sourceInfo)
}
if err = rep.Flush(ctx); err != nil {
return "", 0, errors.Wrapf(err, "Failed to flush kopia repository")
}
log.Infof("Created snapshot with root %v and ID %v in %v", manifest.RootObjectID(), manifest.ID, time.Since(snapshotStartTime).Truncate(time.Second))
return reportSnapshotStatus(manifest)
}
func reportSnapshotStatus(manifest *snapshot.Manifest) (string, int64, error) {
manifestID := manifest.ID
snapSize := manifest.Stats.TotalFileSize
var errs []string
if ds := manifest.RootEntry.DirSummary; ds != nil {
for _, ent := range ds.FailedEntries {
errs = append(errs, ent.Error)
}
}
if len(errs) != 0 {
return "", 0, errors.New(strings.Join(errs, "\n"))
}
return string(manifestID), snapSize, nil
}
// findPreviousSnapshotManifest returns the list of previous snapshots for a given source, including
// last complete snapshot following it.
func findPreviousSnapshotManifest(ctx context.Context, rep repo.Repository, sourceInfo snapshot.SourceInfo, noLaterThan *time.Time) ([]*snapshot.Manifest, error) {
man, err := snapshot.ListSnapshots(ctx, rep, sourceInfo)
if err != nil {
return nil, err
}
var previousComplete *snapshot.Manifest
var result []*snapshot.Manifest
for _, p := range man {
if noLaterThan != nil && p.StartTime.After(*noLaterThan) {
continue
}
if p.IncompleteReason == "" && (previousComplete == nil || p.StartTime.After(previousComplete.StartTime)) {
previousComplete = p
}
}
if previousComplete != nil {
result = append(result, previousComplete)
}
return result, nil
}
//Restore restore specific sourcePath with given snapshotID and update progress
func Restore(ctx context.Context, rep repo.RepositoryWriter, progress *KopiaProgress, snapshotID, dest string, log logrus.FieldLogger, cancleCh chan struct{}) (int64, int32, error) {
log.Info("Start to restore...")
rootEntry, err := snapshotfs.FilesystemEntryFromIDWithPath(ctx, rep, snapshotID, false)
if err != nil {
return 0, 0, errors.Wrapf(err, "Unable to get filesystem entry for snapshot %v", snapshotID)
}
path, err := filepath.Abs(dest)
if err != nil {
return 0, 0, errors.Wrapf(err, "Unable to resolve path %v", dest)
}
output := &restore.FilesystemOutput{
TargetPath: path,
OverwriteDirectories: true,
OverwriteFiles: true,
OverwriteSymlinks: true,
IgnorePermissionErrors: true,
}
stat, err := restore.Entry(ctx, rep, output, rootEntry, restore.Options{
Parallel: runtime.NumCPU(),
RestoreDirEntryAtDepth: math.MaxInt32,
Cancel: cancleCh,
ProgressCallback: func(ctx context.Context, stats restore.Stats) {
progress.ProgressBytes(stats.RestoredTotalFileSize, stats.EnqueuedTotalFileSize)
},
})
if err != nil {
return 0, 0, errors.Wrapf(err, "Failed to copy snapshot data to the target")
}
return stat.RestoredTotalFileSize, stat.RestoredFileCount, nil
}

View File

@ -0,0 +1,198 @@
/*
Copyright The Velero Contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kopia
import (
"context"
"testing"
"github.com/kopia/kopia/snapshot"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
repomocks "github.com/vmware-tanzu/velero/pkg/repository/mocks"
uploadermocks "github.com/vmware-tanzu/velero/pkg/uploader/mocks"
)
type snapshotMockes struct {
policyMock *uploadermocks.Policy
snapshotMock *uploadermocks.Snapshot
uploderMock *uploadermocks.Uploader
repoWriterMock *repomocks.RepositoryWriter
}
type mockArgs struct {
methodName string
returns []interface{}
}
func InjectSnapshotFuncs() *snapshotMockes {
s := &snapshotMockes{
policyMock: &uploadermocks.Policy{},
snapshotMock: &uploadermocks.Snapshot{},
uploderMock: &uploadermocks.Uploader{},
repoWriterMock: &repomocks.RepositoryWriter{},
}
setPolicyFunc = s.policyMock.SetPolicy
treeForSourceFunc = s.policyMock.TreeForSource
applyRetentionPolicyFunc = s.policyMock.ApplyRetentionPolicy
loadSnapshotFunc = s.snapshotMock.LoadSnapshot
saveSnapshotFunc = s.snapshotMock.SaveSnapshot
return s
}
func MockFuncs(s *snapshotMockes, args []mockArgs) {
s.snapshotMock.On("LoadSnapshot", mock.Anything, mock.Anything, mock.Anything).Return(args[0].returns...)
s.snapshotMock.On("SaveSnapshot", mock.Anything, mock.Anything, mock.Anything).Return(args[1].returns...)
s.policyMock.On("TreeForSource", mock.Anything, mock.Anything, mock.Anything).Return(args[2].returns...)
s.policyMock.On("ApplyRetentionPolicy", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(args[3].returns...)
s.policyMock.On("SetPolicy", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(args[4].returns...)
s.uploderMock.On("Upload", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(args[5].returns...)
s.repoWriterMock.On("Flush", mock.Anything).Return(args[6].returns...)
}
func TestSnapshotSource(t *testing.T) {
ctx := context.TODO()
sourceInfo := snapshot.SourceInfo{
UserName: "testUserName",
Host: "testHost",
Path: "/var",
}
rootDir, err := getLocalFSEntry(sourceInfo.Path)
assert.NoError(t, err)
log := logrus.New()
manifest := &snapshot.Manifest{
ID: "test",
RootEntry: &snapshot.DirEntry{},
}
testCases := []struct {
name string
args []mockArgs
notError bool
}{
{
name: "regular test",
args: []mockArgs{
{methodName: "LoadSnapshot", returns: []interface{}{manifest, nil}},
{methodName: "SaveSnapshot", returns: []interface{}{manifest.ID, nil}},
{methodName: "TreeForSource", returns: []interface{}{nil, nil}},
{methodName: "ApplyRetentionPolicy", returns: []interface{}{nil, nil}},
{methodName: "SetPolicy", returns: []interface{}{nil}},
{methodName: "Upload", returns: []interface{}{manifest, nil}},
{methodName: "Flush", returns: []interface{}{nil}},
},
notError: true,
},
{
name: "failed to load snapshot",
args: []mockArgs{
{methodName: "LoadSnapshot", returns: []interface{}{manifest, errors.New("failed to load snapshot")}},
{methodName: "SaveSnapshot", returns: []interface{}{manifest.ID, nil}},
{methodName: "TreeForSource", returns: []interface{}{nil, nil}},
{methodName: "ApplyRetentionPolicy", returns: []interface{}{nil, nil}},
{methodName: "SetPolicy", returns: []interface{}{nil}},
{methodName: "Upload", returns: []interface{}{manifest, nil}},
{methodName: "Flush", returns: []interface{}{nil}},
},
notError: false,
},
{
name: "failed to save snapshot",
args: []mockArgs{
{methodName: "LoadSnapshot", returns: []interface{}{manifest, nil}},
{methodName: "SaveSnapshot", returns: []interface{}{manifest.ID, errors.New("failed to save snapshot")}},
{methodName: "TreeForSource", returns: []interface{}{nil, nil}},
{methodName: "ApplyRetentionPolicy", returns: []interface{}{nil, nil}},
{methodName: "SetPolicy", returns: []interface{}{nil}},
{methodName: "Upload", returns: []interface{}{manifest, nil}},
{methodName: "Flush", returns: []interface{}{nil}},
},
notError: false,
},
{
name: "failed to apply policy",
args: []mockArgs{
{methodName: "LoadSnapshot", returns: []interface{}{manifest, nil}},
{methodName: "SaveSnapshot", returns: []interface{}{manifest.ID, nil}},
{methodName: "TreeForSource", returns: []interface{}{nil, nil}},
{methodName: "ApplyRetentionPolicy", returns: []interface{}{nil, errors.New("failed to save snapshot")}},
{methodName: "SetPolicy", returns: []interface{}{nil}},
{methodName: "Upload", returns: []interface{}{manifest, nil}},
{methodName: "Flush", returns: []interface{}{nil}},
},
notError: false,
},
{
name: "failed to set policy",
args: []mockArgs{
{methodName: "LoadSnapshot", returns: []interface{}{manifest, nil}},
{methodName: "SaveSnapshot", returns: []interface{}{manifest.ID, nil}},
{methodName: "TreeForSource", returns: []interface{}{nil, nil}},
{methodName: "ApplyRetentionPolicy", returns: []interface{}{nil, nil}},
{methodName: "SetPolicy", returns: []interface{}{errors.New("failed to set policy")}},
{methodName: "Upload", returns: []interface{}{manifest, nil}},
{methodName: "Flush", returns: []interface{}{nil}},
},
notError: false,
},
{
name: "failed to upload snapshot",
args: []mockArgs{
{methodName: "LoadSnapshot", returns: []interface{}{manifest, nil}},
{methodName: "SaveSnapshot", returns: []interface{}{manifest.ID, nil}},
{methodName: "TreeForSource", returns: []interface{}{nil, nil}},
{methodName: "ApplyRetentionPolicy", returns: []interface{}{nil, nil}},
{methodName: "SetPolicy", returns: []interface{}{nil}},
{methodName: "Upload", returns: []interface{}{manifest, errors.New("failed to upload snapshot")}},
{methodName: "Flush", returns: []interface{}{nil}},
},
notError: false,
},
{
name: "failed to flush repo",
args: []mockArgs{
{methodName: "LoadSnapshot", returns: []interface{}{manifest, nil}},
{methodName: "SaveSnapshot", returns: []interface{}{manifest.ID, errors.New("failed to save snapshot")}},
{methodName: "TreeForSource", returns: []interface{}{nil, nil}},
{methodName: "ApplyRetentionPolicy", returns: []interface{}{nil, nil}},
{methodName: "SetPolicy", returns: []interface{}{nil}},
{methodName: "Upload", returns: []interface{}{manifest, nil}},
{methodName: "Flush", returns: []interface{}{errors.New("failed to flush repo")}},
},
notError: false,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
s := InjectSnapshotFuncs()
MockFuncs(s, tc.args)
_, _, err = SnapshotSource(ctx, s.repoWriterMock, s.uploderMock, sourceInfo, rootDir, "/", log, "TestSnapshotSource")
if tc.notError {
assert.NoError(t, err)
} else {
assert.Error(t, err)
}
})
}
}

View File

@ -0,0 +1,92 @@
/*
Copyright The Velero Contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package mocks
import (
"context"
"github.com/kopia/kopia/snapshot/policy"
"github.com/stretchr/testify/mock"
"github.com/kopia/kopia/repo"
"github.com/kopia/kopia/snapshot"
)
// policy is an autogenerated mock type for the TreeForSource type
type Policy struct {
mock.Mock
}
// Execute provides a mock function with given fields: ctx, rep, si
func (_m *Policy) TreeForSource(ctx context.Context, rep repo.Repository, si snapshot.SourceInfo) (*policy.Tree, error) {
ret := _m.Called(ctx, rep, si)
var r0 *policy.Tree
if rf, ok := ret.Get(0).(func(context.Context, repo.Repository, snapshot.SourceInfo) *policy.Tree); ok {
r0 = rf(ctx, rep, si)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*policy.Tree)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, repo.Repository, snapshot.SourceInfo) error); ok {
r1 = rf(ctx, rep, si)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// ApplyRetentionPolicy provides a mock function with given fields: ctx, rep, sourceInfo, reallyDelete
func (_m *Policy) ApplyRetentionPolicy(ctx context.Context, rep repo.RepositoryWriter, sourceInfo snapshot.SourceInfo, reallyDelete bool) ([]*snapshot.Manifest, error) {
ret := _m.Called(ctx, rep, sourceInfo, reallyDelete)
var r0 []*snapshot.Manifest
if rf, ok := ret.Get(0).(func(context.Context, repo.RepositoryWriter, snapshot.SourceInfo, bool) []*snapshot.Manifest); ok {
r0 = rf(ctx, rep, sourceInfo, reallyDelete)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]*snapshot.Manifest)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, repo.RepositoryWriter, snapshot.SourceInfo, bool) error); ok {
r1 = rf(ctx, rep, sourceInfo, reallyDelete)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
func (_m *Policy) SetPolicy(ctx context.Context, rep repo.RepositoryWriter, si snapshot.SourceInfo, pol *policy.Policy) error {
ret := _m.Called(ctx, rep, si, pol)
var r0 error
if rf, ok := ret.Get(0).(func(context.Context, repo.RepositoryWriter, snapshot.SourceInfo, *policy.Policy) error); ok {
r0 = rf(ctx, rep, si, pol)
} else {
r0 = ret.Error(0)
}
return r0
}

View File

@ -0,0 +1,42 @@
/*
Copyright The Velero Contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package mocks
import (
context "context"
mock "github.com/stretchr/testify/mock"
)
// shimRepository is an autogenerated mock type for the shimRepository type
type ShimRepository struct {
mock.Mock
}
// Flush provides a mock function with given fields: ctx
func (_m *ShimRepository) Flush(ctx context.Context) error {
ret := _m.Called(ctx)
var r0 error
if rf, ok := ret.Get(0).(func(context.Context) error); ok {
r0 = rf(ctx)
} else {
r0 = ret.Error(0)
}
return r0
}

View File

@ -0,0 +1,76 @@
/*
Copyright The Velero Contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package mocks
import (
"context"
"github.com/kopia/kopia/repo/manifest"
"github.com/kopia/kopia/snapshot"
"github.com/stretchr/testify/mock"
"github.com/kopia/kopia/repo"
)
// snapshot is an autogenerated mock type for the snapshot type
type Snapshot struct {
mock.Mock
}
// LoadSnapshot provides a mock function with given fields: ctx, rep, manifestID
func (_m *Snapshot) LoadSnapshot(ctx context.Context, rep repo.Repository, manifestID manifest.ID) (*snapshot.Manifest, error) {
ret := _m.Called(ctx, rep, manifestID)
var r0 *snapshot.Manifest
if rf, ok := ret.Get(0).(func(context.Context, repo.Repository, manifest.ID) *snapshot.Manifest); ok {
r0 = rf(ctx, rep, manifestID)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*snapshot.Manifest)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, repo.Repository, manifest.ID) error); ok {
r1 = rf(ctx, rep, manifestID)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// SaveSnapshot provides a mock function with given fields: ctx, rep, man
func (_m *Snapshot) SaveSnapshot(ctx context.Context, rep repo.RepositoryWriter, man *snapshot.Manifest) (manifest.ID, error) {
ret := _m.Called(ctx, rep, man)
var r0 manifest.ID
if rf, ok := ret.Get(0).(func(context.Context, repo.RepositoryWriter, *snapshot.Manifest) manifest.ID); ok {
r0 = rf(ctx, rep, man)
} else {
r0 = ret.Get(0).(manifest.ID)
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, repo.RepositoryWriter, *snapshot.Manifest) error); ok {
r1 = rf(ctx, rep, man)
} else {
r1 = ret.Error(1)
}
return r0, r1
}

View File

@ -0,0 +1,63 @@
/*
Copyright The Velero Contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package mocks
import (
"context"
"github.com/kopia/kopia/fs"
"github.com/stretchr/testify/mock"
"github.com/kopia/kopia/snapshot/policy"
"github.com/kopia/kopia/snapshot"
)
// Upload is an autogenerated mock type for the Upload type
type Uploader struct {
mock.Mock
}
// Execute provides a mock function with given fields: ctx, source, policyTree, sourceInfo, previousManifests
func (_m *Uploader) Upload(ctx context.Context, source fs.Entry, policyTree *policy.Tree, sourceInfo snapshot.SourceInfo, previousManifests ...*snapshot.Manifest) (*snapshot.Manifest, error) {
_va := make([]interface{}, len(previousManifests))
for _i := range previousManifests {
_va[_i] = previousManifests[_i]
}
var _ca []interface{}
_ca = append(_ca, ctx, source, policyTree, sourceInfo)
_ca = append(_ca, _va...)
ret := _m.Called(_ca...)
var r0 *snapshot.Manifest
if rf, ok := ret.Get(0).(func(context.Context, fs.Entry, *policy.Tree, snapshot.SourceInfo, ...*snapshot.Manifest) *snapshot.Manifest); ok {
r0 = rf(ctx, source, policyTree, sourceInfo, previousManifests...)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*snapshot.Manifest)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, fs.Entry, *policy.Tree, snapshot.SourceInfo, ...*snapshot.Manifest) error); ok {
r1 = rf(ctx, source, policyTree, sourceInfo, previousManifests...)
} else {
r1 = ret.Error(1)
}
return r0, r1
}

View File

@ -0,0 +1,208 @@
/*
Copyright The Velero Contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package provider
import (
"context"
"fmt"
"strings"
"github.com/kopia/kopia/snapshot/snapshotfs"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/vmware-tanzu/velero/pkg/uploader"
"github.com/vmware-tanzu/velero/pkg/uploader/kopia"
"github.com/vmware-tanzu/velero/internal/credentials"
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
repokeys "github.com/vmware-tanzu/velero/pkg/repository/keys"
"github.com/vmware-tanzu/velero/pkg/repository/udmrepo"
"github.com/vmware-tanzu/velero/pkg/repository/udmrepo/service"
)
//BackupFunc mainly used to make testing more convenient
var BackupFunc = kopia.Backup
var RestoreFunc = kopia.Restore
//kopiaProvider recorded info related with kopiaProvider
type kopiaProvider struct {
bkRepo udmrepo.BackupRepo
credGetter *credentials.CredentialGetter
log logrus.FieldLogger
}
//NewKopiaUploaderProvider initialized with open or create a repository
func NewKopiaUploaderProvider(
ctx context.Context,
credGetter *credentials.CredentialGetter,
backupRepo *velerov1api.BackupRepository,
log logrus.FieldLogger,
) (Provider, error) {
kp := &kopiaProvider{
log: log,
credGetter: credGetter,
}
//repoUID which is used to generate kopia repository config with unique directory path
repoUID := string(backupRepo.GetUID())
repoOpt, err := udmrepo.NewRepoOptions(
udmrepo.WithPassword(kp, ""),
udmrepo.WithConfigFile("", repoUID),
udmrepo.WithDescription("Initial kopia uploader provider"),
)
if err != nil {
return nil, errors.Wrapf(err, "error to get repo options")
}
repoSvc := service.Create(log)
log.WithField("repoUID", repoUID).Info("Opening backup repo")
kp.bkRepo, err = repoSvc.Open(ctx, *repoOpt)
if err != nil {
return nil, errors.Wrapf(err, "Failed to find kopia repository")
}
return kp, nil
}
//CheckContext check context status check if context is timeout or cancel and backup restore once finished it will quit and return
func (kp *kopiaProvider) CheckContext(ctx context.Context, finishChan chan struct{}, restoreChan chan struct{}, uploader *snapshotfs.Uploader) {
select {
case <-finishChan:
kp.log.Infof("Action finished")
return
case <-ctx.Done():
if uploader != nil {
uploader.Cancel()
kp.log.Infof("Backup is been canceled")
}
if restoreChan != nil {
close(restoreChan)
kp.log.Infof("Restore is been canceled")
}
return
}
}
func (kp *kopiaProvider) Close(ctx context.Context) {
kp.bkRepo.Close(ctx)
}
//RunBackup which will backup specific path and update backup progress
func (kp *kopiaProvider) RunBackup(
ctx context.Context,
path string,
tags map[string]string,
parentSnapshot string,
updater uploader.ProgressUpdater) (string, error) {
if updater == nil {
return "", errors.New("Need to initial backup progress updater first")
}
log := kp.log.WithFields(logrus.Fields{
"path": path,
"parentSnapshot": parentSnapshot,
})
repoWriter := kopia.NewShimRepo(kp.bkRepo)
kpUploader := snapshotfs.NewUploader(repoWriter)
prorgess := new(kopia.KopiaProgress)
prorgess.InitThrottle(backupProgressCheckInterval)
prorgess.Updater = updater
kpUploader.Progress = prorgess
quit := make(chan struct{})
log.Info("Starting backup")
go kp.CheckContext(ctx, quit, nil, kpUploader)
defer func() {
close(quit)
}()
snapshotInfo, err := BackupFunc(ctx, kpUploader, repoWriter, path, parentSnapshot, log)
if err != nil {
return "", errors.Wrapf(err, "Failed to run kopia backup")
} else if snapshotInfo == nil {
return "", fmt.Errorf("failed to get kopia backup snapshot info for path %v", path)
}
// which ensure that the statistic data of TotalBytes equal to BytesDone when finished
updater.UpdateProgress(
&uploader.UploaderProgress{
TotalBytes: snapshotInfo.Size,
BytesDone: snapshotInfo.Size,
},
)
log.Debugf("Kopia backup finished, snapshot ID %s, backup size %d", snapshotInfo.ID, snapshotInfo.Size)
return snapshotInfo.ID, nil
}
func (kp *kopiaProvider) GetPassword(param interface{}) (string, error) {
if kp.credGetter.FromSecret == nil {
return "", errors.New("invalid credentials interface")
}
rawPass, err := kp.credGetter.FromSecret.Get(repokeys.RepoKeySelector())
if err != nil {
return "", errors.Wrap(err, "error to get password")
}
return strings.TrimSpace(rawPass), nil
}
//RunRestore which will restore specific path and update restore progress
func (kp *kopiaProvider) RunRestore(
ctx context.Context,
snapshotID string,
volumePath string,
updater uploader.ProgressUpdater) error {
log := kp.log.WithFields(logrus.Fields{
"snapshotID": snapshotID,
"volumePath": volumePath,
})
repoWriter := kopia.NewShimRepo(kp.bkRepo)
prorgess := new(kopia.KopiaProgress)
prorgess.InitThrottle(restoreProgressCheckInterval)
prorgess.Updater = updater
restoreCancel := make(chan struct{})
quit := make(chan struct{})
log.Info("Starting restore")
go kp.CheckContext(ctx, quit, restoreCancel, nil)
defer func() {
if restoreCancel != nil {
close(restoreCancel)
}
close(quit)
}()
size, fileCount, err := RestoreFunc(ctx, repoWriter, prorgess, snapshotID, volumePath, log, restoreCancel)
if err != nil {
return errors.Wrapf(err, "Failed to run kopia restore")
}
// which ensure that the statistic data of TotalBytes equal to BytesDone when finished
updater.UpdateProgress(&uploader.UploaderProgress{
TotalBytes: size,
BytesDone: size,
})
output := fmt.Sprintf("Kopia restore finished, restore size %d, file count %d", size, fileCount)
log.Info(output)
return nil
}

View File

@ -0,0 +1,118 @@
/*
Copyright The Velero Contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package provider
import (
"context"
"testing"
"github.com/kopia/kopia/repo"
"github.com/kopia/kopia/snapshot/snapshotfs"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/stretchr/testify/assert"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
"github.com/vmware-tanzu/velero/pkg/controller"
"github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/scheme"
"github.com/vmware-tanzu/velero/pkg/uploader"
"github.com/vmware-tanzu/velero/pkg/uploader/kopia"
)
func TestRunBackup(t *testing.T) {
var kp kopiaProvider
kp.log = logrus.New()
updater := controller.BackupProgressUpdater{PodVolumeBackup: &velerov1api.PodVolumeBackup{}, Log: kp.log, Ctx: context.Background(), Cli: fake.NewFakeClientWithScheme(scheme.Scheme)}
testCases := []struct {
name string
hookBackupFunc func(ctx context.Context, fsUploader *snapshotfs.Uploader, repoWriter repo.RepositoryWriter, sourcePath, parentSnapshot string, log logrus.FieldLogger) (*uploader.SnapshotInfo, error)
notError bool
}{
{
name: "success to backup",
hookBackupFunc: func(ctx context.Context, fsUploader *snapshotfs.Uploader, repoWriter repo.RepositoryWriter, sourcePath, parentSnapshot string, log logrus.FieldLogger) (*uploader.SnapshotInfo, error) {
return &uploader.SnapshotInfo{}, nil
},
notError: true,
},
{
name: "get error to backup",
hookBackupFunc: func(ctx context.Context, fsUploader *snapshotfs.Uploader, repoWriter repo.RepositoryWriter, sourcePath, parentSnapshot string, log logrus.FieldLogger) (*uploader.SnapshotInfo, error) {
return &uploader.SnapshotInfo{}, errors.New("failed to backup")
},
notError: false,
},
{
name: "got empty snapshot",
hookBackupFunc: func(ctx context.Context, fsUploader *snapshotfs.Uploader, repoWriter repo.RepositoryWriter, sourcePath, parentSnapshot string, log logrus.FieldLogger) (*uploader.SnapshotInfo, error) {
return nil, nil
},
notError: false,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
BackupFunc = tc.hookBackupFunc
_, err := kp.RunBackup(context.Background(), "var", nil, "", &updater)
if tc.notError {
assert.NoError(t, err)
} else {
assert.Error(t, err)
}
})
}
}
func TestRunRestore(t *testing.T) {
var kp kopiaProvider
kp.log = logrus.New()
updater := controller.RestoreProgressUpdater{PodVolumeRestore: &velerov1api.PodVolumeRestore{}, Log: kp.log, Ctx: context.Background(), Cli: fake.NewFakeClientWithScheme(scheme.Scheme)}
testCases := []struct {
name string
hookRestoreFunc func(ctx context.Context, rep repo.RepositoryWriter, progress *kopia.KopiaProgress, snapshotID, dest string, log logrus.FieldLogger, cancleCh chan struct{}) (int64, int32, error)
notError bool
}{
{
name: "normal restore",
hookRestoreFunc: func(ctx context.Context, rep repo.RepositoryWriter, progress *kopia.KopiaProgress, snapshotID, dest string, log logrus.FieldLogger, cancleCh chan struct{}) (int64, int32, error) {
return 0, 0, nil
},
notError: true,
},
{
name: "failed to restore",
hookRestoreFunc: func(ctx context.Context, rep repo.RepositoryWriter, progress *kopia.KopiaProgress, snapshotID, dest string, log logrus.FieldLogger, cancleCh chan struct{}) (int64, int32, error) {
return 0, 0, errors.New("failed to restore")
},
notError: false,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
RestoreFunc = tc.hookRestoreFunc
err := kp.RunRestore(context.Background(), "", "/var", &updater)
if tc.notError {
assert.NoError(t, err)
} else {
assert.Error(t, err)
}
})
}
}

View File

@ -18,27 +18,54 @@ package provider
import (
"context"
"time"
"github.com/sirupsen/logrus"
v1 "k8s.io/api/core/v1"
"github.com/vmware-tanzu/velero/internal/credentials"
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
"github.com/vmware-tanzu/velero/pkg/uploader"
)
const restoreProgressCheckInterval = 10 * time.Second
const backupProgressCheckInterval = 10 * time.Second
// Provider which is designed for one pod volumn to do the backup or restore
type Provider interface {
// RunBackup which will do backup for one specific volumn and return snapshotID error
// updateFunc which is used for update backup progress into related pvb status
// updater is used for updating backup progress which implement by third-party
RunBackup(
ctx context.Context,
path string,
tags map[string]string,
parentSnapshot string,
updateFunc func(velerov1api.PodVolumeOperationProgress)) (string, error)
updater uploader.ProgressUpdater) (string, error)
// RunRestore which will do restore for one specific volumn with given snapshot id and return error
// updateFunc which is used for update restore progress into related pvr status
// updater is used for updating backup progress which implement by third-party
RunRestore(
ctx context.Context,
snapshotID string,
volumePath string,
updateFunc func(velerov1api.PodVolumeOperationProgress)) error
updater uploader.ProgressUpdater) error
// Close which will close related repository
Close(ctx context.Context)
}
// NewUploaderProvider initialize provider with specific uploaderType
func NewUploaderProvider(
ctx context.Context,
uploaderType string,
repoIdentifier string,
bsl *velerov1api.BackupStorageLocation,
backupReo *velerov1api.BackupRepository,
credGetter *credentials.CredentialGetter,
repoKeySelector *v1.SecretKeySelector,
log logrus.FieldLogger,
) (Provider, error) {
if uploaderType == uploader.KopiaType {
return NewResticUploaderProvider(repoIdentifier, bsl, credGetter, repoKeySelector, log)
} else {
return NewKopiaUploaderProvider(ctx, credGetter, backupReo, log)
}
}

View File

@ -0,0 +1,34 @@
/*
Copyright The Velero Contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package provider
import (
"github.com/sirupsen/logrus"
v1 "k8s.io/api/core/v1"
"github.com/vmware-tanzu/velero/internal/credentials"
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
)
func NewResticUploaderProvider(
repoIdentifier string,
bsl *velerov1api.BackupStorageLocation,
credGetter *credentials.CredentialGetter,
repoKeySelector *v1.SecretKeySelector,
log logrus.FieldLogger,
) (Provider, error) {
return nil, nil //TODO
}

View File

@ -22,10 +22,8 @@ import (
)
const (
ResticType = "restic"
KopiaType = "kopia"
VeleroBackup = "backup"
VeleroRestore = "restore"
ResticType = "restic"
KopiaType = "kopia"
)
// ValidateUploaderType validates if the input param is a valid uploader type.
@ -43,7 +41,13 @@ type SnapshotInfo struct {
Size int64 `json:"Size"`
}
//UploaderProgress which defined two variables to record progress
type UploaderProgress struct {
TotalBytes int64 `json:"totalBytes,omitempty"`
BytesDone int64 `json:"doneBytes,omitempty"`
}
//UploaderProgress which defined generic interface to update progress
type ProgressUpdater interface {
UpdateProgress(p *UploaderProgress)
}

View File

@ -0,0 +1,90 @@
/*
Copyright the Velero contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package logging
import (
"context"
"github.com/kopia/kopia/repo/logging"
"github.com/sirupsen/logrus"
)
type kopiaLog struct {
module string
logger logrus.FieldLogger
}
// SetupKopiaLog sets the Kopia log handler to the specific context, Kopia modules
// call the logger in the context to write logs
func SetupKopiaLog(ctx context.Context, logger logrus.FieldLogger) context.Context {
return logging.WithLogger(ctx, func(module string) logging.Logger {
return &kopiaLog{
module: module,
logger: logger,
}
})
}
func (kl *kopiaLog) Debugf(msg string, args ...interface{}) {
logger := kl.logger.WithField("logSource", kl.getLogSource())
logger.Debugf(msg, args...)
}
func (kl *kopiaLog) Debugw(msg string, keyValuePairs ...interface{}) {
logger := kl.logger.WithField("logSource", kl.getLogSource())
logger.WithFields(getLogFields(keyValuePairs...)).Debug(msg)
}
func (kl *kopiaLog) Infof(msg string, args ...interface{}) {
logger := kl.logger.WithField("logSource", kl.getLogSource())
logger.Infof(msg, args...)
}
func (kl *kopiaLog) Warnf(msg string, args ...interface{}) {
logger := kl.logger.WithField("logSource", kl.getLogSource())
logger.Warnf(msg, args...)
}
// We see Kopia generates error logs for some normal cases or non-critical
// cases. So Kopia's error logs are regarded as warning logs so that they don't
// affect Velero's workflow.
func (kl *kopiaLog) Errorf(msg string, args ...interface{}) {
logger := kl.logger.WithFields(logrus.Fields{
"logSource": kl.getLogSource(),
"sublevel": "error",
})
logger.Warnf(msg, args...)
}
func (kl *kopiaLog) getLogSource() string {
return "kopia/" + kl.module
}
func getLogFields(keyValuePairs ...interface{}) map[string]interface{} {
m := map[string]interface{}{}
for i := 0; i+1 < len(keyValuePairs); i += 2 {
s, ok := keyValuePairs[i].(string)
if !ok {
s = "non-string-key"
}
m[s] = keyValuePairs[i+1]
}
return m
}

View File

@ -0,0 +1,86 @@
/*
Copyright the Velero contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package logging
import (
"testing"
"github.com/stretchr/testify/require"
)
func TestGetLogFields(t *testing.T) {
testCases := []struct {
name string
pairs []interface{}
expected map[string]interface{}
}{
{
name: "normal",
pairs: []interface{}{
"fake-key1",
"fake-value1",
"fake-key2",
10,
"fake-key3",
struct{ v int }{v: 10},
},
expected: map[string]interface{}{
"fake-key1": "fake-value1",
"fake-key2": 10,
"fake-key3": struct{ v int }{v: 10},
},
},
{
name: "non string key",
pairs: []interface{}{
"fake-key1",
"fake-value1",
10,
10,
"fake-key3",
struct{ v int }{v: 10},
},
expected: map[string]interface{}{
"fake-key1": "fake-value1",
"non-string-key": 10,
"fake-key3": struct{ v int }{v: 10},
},
},
{
name: "missing value",
pairs: []interface{}{
"fake-key1",
"fake-value1",
"fake-key2",
10,
"fake-key3",
},
expected: map[string]interface{}{
"fake-key1": "fake-value1",
"fake-key2": 10,
},
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
m := getLogFields(tc.pairs...)
require.Equal(t, tc.expected, m)
})
}
}

View File

@ -52,9 +52,6 @@ spec.
This examples walks you through using both pre and post hooks for freezing a file system. Freezing the
file system is useful to ensure that all pending disk I/O operations have completed prior to taking a snapshot.
This example uses [examples/nginx-app/with-pv.yaml][2]. Follow the [steps for your provider][3] to
setup this example.
### Annotations
The Velero [example/nginx-app/with-pv.yaml][2] serves as an example of adding the pre and post hook annotations directly
@ -108,4 +105,3 @@ Note that the container must support the shell command you use.
[1]: api-types/backup.md
[2]: https://github.com/vmware-tanzu/velero/blob/main/examples/nginx-app/with-pv.yaml
[3]: cloud-common.md