parent
0afaa70e9b
commit
1a237d3e4c
|
@ -479,6 +479,7 @@ spec:
|
|||
type: string
|
||||
uploaderConfig:
|
||||
description: UploaderConfig specifies the configuration for the uploader.
|
||||
nullable: true
|
||||
properties:
|
||||
parallelFilesUpload:
|
||||
description: ParallelFilesUpload is the number of files parallel
|
||||
|
|
|
@ -126,6 +126,7 @@ spec:
|
|||
type: string
|
||||
description: UploaderSettings are a map of key-value pairs that should
|
||||
be applied to the uploader configuration.
|
||||
nullable: true
|
||||
type: object
|
||||
uploaderType:
|
||||
description: UploaderType is the type of the uploader to handle the
|
||||
|
|
|
@ -124,6 +124,7 @@ spec:
|
|||
type: string
|
||||
description: UploaderSettings are a map of key-value pairs that should
|
||||
be applied to the uploader configuration.
|
||||
nullable: true
|
||||
type: object
|
||||
uploaderType:
|
||||
description: UploaderType is the type of the uploader to handle the
|
||||
|
|
|
@ -420,10 +420,12 @@ spec:
|
|||
type: string
|
||||
uploaderConfig:
|
||||
description: UploaderConfig specifies the configuration for the restore.
|
||||
nullable: true
|
||||
properties:
|
||||
writeSparseFiles:
|
||||
description: WriteSparseFiles is a flag to indicate whether write
|
||||
files sparsely or not.
|
||||
nullable: true
|
||||
type: boolean
|
||||
type: object
|
||||
required:
|
||||
|
|
|
@ -517,6 +517,7 @@ spec:
|
|||
uploaderConfig:
|
||||
description: UploaderConfig specifies the configuration for the
|
||||
uploader.
|
||||
nullable: true
|
||||
properties:
|
||||
parallelFilesUpload:
|
||||
description: ParallelFilesUpload is the number of files parallel
|
||||
|
|
File diff suppressed because one or more lines are too long
|
@ -178,10 +178,11 @@ type BackupSpec struct {
|
|||
|
||||
// UploaderConfig specifies the configuration for the uploader.
|
||||
// +optional
|
||||
UploaderConfigForBackup *UploaderConfigForBackup `json:"uploaderConfig,omitempty"`
|
||||
// +nullable
|
||||
UploaderConfig *UploaderConfigForBackup `json:"uploaderConfig,omitempty"`
|
||||
}
|
||||
|
||||
// UploaderConfigForBackup defines the configuration for the backup.
|
||||
// UploaderConfigForBackup defines the configuration for the uploader when doing backup.
|
||||
type UploaderConfigForBackup struct {
|
||||
// ParallelFilesUpload is the number of files parallel uploads to perform when using the uploader.
|
||||
// +optional
|
||||
|
|
|
@ -55,7 +55,8 @@ type PodVolumeBackupSpec struct {
|
|||
// UploaderSettings are a map of key-value pairs that should be applied to the
|
||||
// uploader configuration.
|
||||
// +optional
|
||||
UploaderSettings *map[string]string `json:"uploaderSettings,omitempty"`
|
||||
// +nullable
|
||||
UploaderSettings map[string]string `json:"uploaderSettings,omitempty"`
|
||||
}
|
||||
|
||||
// PodVolumeBackupPhase represents the lifecycle phase of a PodVolumeBackup.
|
||||
|
|
|
@ -52,7 +52,8 @@ type PodVolumeRestoreSpec struct {
|
|||
// UploaderSettings are a map of key-value pairs that should be applied to the
|
||||
// uploader configuration.
|
||||
// +optional
|
||||
UploaderSettings *map[string]string `json:"uploaderSettings,omitempty"`
|
||||
// +nullable
|
||||
UploaderSettings map[string]string `json:"uploaderSettings,omitempty"`
|
||||
}
|
||||
|
||||
// PodVolumeRestorePhase represents the lifecycle phase of a PodVolumeRestore.
|
||||
|
|
|
@ -126,14 +126,16 @@ type RestoreSpec struct {
|
|||
|
||||
// UploaderConfig specifies the configuration for the restore.
|
||||
// +optional
|
||||
UploaderConfigForRestore *UploaderConfigForRestore `json:"uploaderConfig,omitempty"`
|
||||
// +nullable
|
||||
UploaderConfig *UploaderConfigForRestore `json:"uploaderConfig,omitempty"`
|
||||
}
|
||||
|
||||
// UploaderConfigForRestore defines the configuration for the restore.
|
||||
type UploaderConfigForRestore struct {
|
||||
// WriteSparseFiles is a flag to indicate whether write files sparsely or not.
|
||||
// +optional
|
||||
WriteSparseFiles bool `json:"writeSparseFiles,omitempty"`
|
||||
// +nullable
|
||||
WriteSparseFiles *bool `json:"writeSparseFiles,omitempty"`
|
||||
}
|
||||
|
||||
// RestoreHooks contains custom behaviors that should be executed during or post restore.
|
||||
|
|
|
@ -381,8 +381,8 @@ func (in *BackupSpec) DeepCopyInto(out *BackupSpec) {
|
|||
*out = new(bool)
|
||||
**out = **in
|
||||
}
|
||||
if in.UploaderConfigForBackup != nil {
|
||||
in, out := &in.UploaderConfigForBackup, &out.UploaderConfigForBackup
|
||||
if in.UploaderConfig != nil {
|
||||
in, out := &in.UploaderConfig, &out.UploaderConfig
|
||||
*out = new(UploaderConfigForBackup)
|
||||
**out = **in
|
||||
}
|
||||
|
@ -978,13 +978,9 @@ func (in *PodVolumeBackupSpec) DeepCopyInto(out *PodVolumeBackupSpec) {
|
|||
}
|
||||
if in.UploaderSettings != nil {
|
||||
in, out := &in.UploaderSettings, &out.UploaderSettings
|
||||
*out = new(map[string]string)
|
||||
if **in != nil {
|
||||
in, out := *in, *out
|
||||
*out = make(map[string]string, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
*out = make(map[string]string, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1088,13 +1084,9 @@ func (in *PodVolumeRestoreSpec) DeepCopyInto(out *PodVolumeRestoreSpec) {
|
|||
out.Pod = in.Pod
|
||||
if in.UploaderSettings != nil {
|
||||
in, out := &in.UploaderSettings, &out.UploaderSettings
|
||||
*out = new(map[string]string)
|
||||
if **in != nil {
|
||||
in, out := *in, *out
|
||||
*out = make(map[string]string, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
*out = make(map[string]string, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1374,10 +1366,10 @@ func (in *RestoreSpec) DeepCopyInto(out *RestoreSpec) {
|
|||
*out = new(corev1.TypedLocalObjectReference)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.UploaderConfigForRestore != nil {
|
||||
in, out := &in.UploaderConfigForRestore, &out.UploaderConfigForRestore
|
||||
if in.UploaderConfig != nil {
|
||||
in, out := &in.UploaderConfig, &out.UploaderConfig
|
||||
*out = new(UploaderConfigForRestore)
|
||||
**out = **in
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1694,6 +1686,11 @@ func (in *UploaderConfigForBackup) DeepCopy() *UploaderConfigForBackup {
|
|||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *UploaderConfigForRestore) DeepCopyInto(out *UploaderConfigForRestore) {
|
||||
*out = *in
|
||||
if in.WriteSparseFiles != nil {
|
||||
in, out := &in.WriteSparseFiles, &out.WriteSparseFiles
|
||||
*out = new(bool)
|
||||
**out = **in
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UploaderConfigForRestore.
|
||||
|
|
|
@ -51,7 +51,7 @@ type DataUploadSpec struct {
|
|||
// DataMoverConfig is for data-mover-specific configuration fields.
|
||||
// +optional
|
||||
// +nullable
|
||||
DataMoverConfig *map[string]string `json:"dataMoverConfig,omitempty"`
|
||||
DataMoverConfig map[string]string `json:"dataMoverConfig,omitempty"`
|
||||
|
||||
// Cancel indicates request to cancel the ongoing DataUpload. It can be set
|
||||
// when the DataUpload is in InProgress phase
|
||||
|
|
|
@ -226,13 +226,9 @@ func (in *DataUploadSpec) DeepCopyInto(out *DataUploadSpec) {
|
|||
}
|
||||
if in.DataMoverConfig != nil {
|
||||
in, out := &in.DataMoverConfig, &out.DataMoverConfig
|
||||
*out = new(map[string]string)
|
||||
if **in != nil {
|
||||
in, out := *in, *out
|
||||
*out = make(map[string]string, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
*out = make(map[string]string, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
out.OperationTimeout = in.OperationTimeout
|
||||
|
|
|
@ -302,10 +302,10 @@ func (b *BackupBuilder) DataMover(name string) *BackupBuilder {
|
|||
|
||||
// ParallelFilesUpload sets the Backup's uploader parallel uploads
|
||||
func (b *BackupBuilder) ParallelFilesUpload(parallel int) *BackupBuilder {
|
||||
if b.object.Spec.UploaderConfigForBackup == nil {
|
||||
b.object.Spec.UploaderConfigForBackup = &velerov1api.UploaderConfigForBackup{}
|
||||
if b.object.Spec.UploaderConfig == nil {
|
||||
b.object.Spec.UploaderConfig = &velerov1api.UploaderConfigForBackup{}
|
||||
}
|
||||
b.object.Spec.UploaderConfigForBackup.ParallelFilesUpload = parallel
|
||||
b.object.Spec.UploaderConfig.ParallelFilesUpload = parallel
|
||||
return b
|
||||
}
|
||||
|
||||
|
|
|
@ -103,7 +103,7 @@ func (d *DataUploadBuilder) OperationTimeout(timeout metav1.Duration) *DataUploa
|
|||
}
|
||||
|
||||
// DataMoverConfig sets the DataUpload's DataMoverConfig.
|
||||
func (d *DataUploadBuilder) DataMoverConfig(config *map[string]string) *DataUploadBuilder {
|
||||
func (d *DataUploadBuilder) DataMoverConfig(config map[string]string) *DataUploadBuilder {
|
||||
d.object.Spec.DataMoverConfig = config
|
||||
return d
|
||||
}
|
||||
|
|
|
@ -174,6 +174,6 @@ func (b *RestoreBuilder) ItemOperationTimeout(timeout time.Duration) *RestoreBui
|
|||
|
||||
// WriteSparseFiles sets the Restore's uploader write sparse files
|
||||
func (b *RestoreBuilder) WriteSparseFiles(val bool) *RestoreBuilder {
|
||||
b.object.Spec.UploaderConfigForRestore.WriteSparseFiles = val
|
||||
b.object.Spec.UploaderConfig.WriteSparseFiles = &val
|
||||
return b
|
||||
}
|
||||
|
|
|
@ -149,8 +149,8 @@ func (o *CreateOptions) BindFlags(flags *pflag.FlagSet) {
|
|||
|
||||
flags.StringVar(&o.ResourceModifierConfigMap, "resource-modifier-configmap", "", "Reference to the resource modifier configmap that restore will use")
|
||||
|
||||
f = flags.VarPF(&o.WriteSparseFiles, "write-sparse-files", "", "Whether to write sparse files when restoring volumes")
|
||||
f.NoOptDefVal = cmd.FALSE
|
||||
f = flags.VarPF(&o.WriteSparseFiles, "write-sparse-files", "", "Whether to write sparse files during restoring volumes")
|
||||
f.NoOptDefVal = cmd.TRUE
|
||||
}
|
||||
|
||||
func (o *CreateOptions) Complete(args []string, f client.Factory) error {
|
||||
|
@ -323,8 +323,8 @@ func (o *CreateOptions) Run(c *cobra.Command, f client.Factory) error {
|
|||
ItemOperationTimeout: metav1.Duration{
|
||||
Duration: o.ItemOperationTimeout,
|
||||
},
|
||||
UploaderConfigForRestore: &api.UploaderConfigForRestore{
|
||||
WriteSparseFiles: boolptr.IsSetToTrue(o.WriteSparseFiles.Value),
|
||||
UploaderConfig: &api.UploaderConfigForRestore{
|
||||
WriteSparseFiles: o.WriteSparseFiles.Value,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
|
|
@ -84,7 +84,7 @@ func TestCreateCommand(t *testing.T) {
|
|||
includeClusterResources := "true"
|
||||
allowPartiallyFailed := "true"
|
||||
itemOperationTimeout := "10m0s"
|
||||
writeSparseFiles := "false"
|
||||
writeSparseFiles := "true"
|
||||
|
||||
flags := new(pflag.FlagSet)
|
||||
o := NewCreateOptions()
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
package cmd
|
||||
|
||||
var TRUE string = "true"
|
||||
var FALSE string = "false"
|
||||
|
|
|
@ -88,9 +88,9 @@ func DescribeBackup(
|
|||
DescribeResourcePolicies(d, backup.Spec.ResourcePolicy)
|
||||
}
|
||||
|
||||
if backup.Spec.UploaderConfigForBackup != nil && backup.Spec.UploaderConfigForBackup.ParallelFilesUpload > 0 {
|
||||
if backup.Spec.UploaderConfig != nil && backup.Spec.UploaderConfig.ParallelFilesUpload > 0 {
|
||||
d.Println()
|
||||
DescribeUploaderConfig(d, backup.Spec)
|
||||
DescribeUploaderConfigForBackup(d, backup.Spec)
|
||||
}
|
||||
|
||||
status := backup.Status
|
||||
|
@ -135,10 +135,10 @@ func DescribeResourcePolicies(d *Describer, resPolicies *v1.TypedLocalObjectRefe
|
|||
d.Printf("\tName:\t%s\n", resPolicies.Name)
|
||||
}
|
||||
|
||||
// DescribeUploaderConfig describes uploader config in human-readable format
|
||||
func DescribeUploaderConfig(d *Describer, spec velerov1api.BackupSpec) {
|
||||
// DescribeUploaderConfigForBackup describes uploader config in human-readable format
|
||||
func DescribeUploaderConfigForBackup(d *Describer, spec velerov1api.BackupSpec) {
|
||||
d.Printf("Uploader config:\n")
|
||||
d.Printf("\tParallel files upload:\t%d\n", spec.UploaderConfigForBackup.ParallelFilesUpload)
|
||||
d.Printf("\tParallel files upload:\t%d\n", spec.UploaderConfig.ParallelFilesUpload)
|
||||
}
|
||||
|
||||
// DescribeBackupSpec describes a backup spec in human-readable format.
|
||||
|
|
|
@ -28,7 +28,7 @@ func TestDescribeUploaderConfig(t *testing.T) {
|
|||
buf: &bytes.Buffer{},
|
||||
}
|
||||
d.out.Init(d.buf, 0, 8, 2, ' ', 0)
|
||||
DescribeUploaderConfig(d, input)
|
||||
DescribeUploaderConfigForBackup(d, input)
|
||||
d.out.Flush()
|
||||
expect := `Uploader config:
|
||||
Parallel files upload: 10
|
||||
|
|
|
@ -32,6 +32,7 @@ import (
|
|||
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
|
||||
"github.com/vmware-tanzu/velero/pkg/cmd/util/downloadrequest"
|
||||
"github.com/vmware-tanzu/velero/pkg/itemoperation"
|
||||
"github.com/vmware-tanzu/velero/pkg/util/boolptr"
|
||||
"github.com/vmware-tanzu/velero/pkg/util/results"
|
||||
)
|
||||
|
||||
|
@ -177,9 +178,9 @@ func DescribeRestore(ctx context.Context, kbClient kbclient.Client, restore *vel
|
|||
d.Println()
|
||||
d.Printf("Preserve Service NodePorts:\t%s\n", BoolPointerString(restore.Spec.PreserveNodePorts, "false", "true", "auto"))
|
||||
|
||||
if restore.Spec.UploaderConfigForRestore != nil && restore.Spec.UploaderConfigForRestore.WriteSparseFiles {
|
||||
if restore.Spec.UploaderConfig != nil && boolptr.IsSetToTrue(restore.Spec.UploaderConfig.WriteSparseFiles) {
|
||||
d.Println()
|
||||
d.Printf("Write Sparse Files:\t%T\n", restore.Spec.UploaderConfigForRestore.WriteSparseFiles)
|
||||
DescribeUploaderConfigForRestore(d, restore.Spec)
|
||||
}
|
||||
|
||||
d.Println()
|
||||
|
@ -198,6 +199,12 @@ func DescribeRestore(ctx context.Context, kbClient kbclient.Client, restore *vel
|
|||
})
|
||||
}
|
||||
|
||||
// DescribeUploaderConfigForRestore describes uploader config in human-readable format
|
||||
func DescribeUploaderConfigForRestore(d *Describer, spec velerov1api.RestoreSpec) {
|
||||
d.Printf("Uploader config:\n")
|
||||
d.Printf("\tWrite Sparse Files:\t%T\n", boolptr.IsSetToTrue(spec.UploaderConfig.WriteSparseFiles))
|
||||
}
|
||||
|
||||
func describeRestoreItemOperations(ctx context.Context, kbClient kbclient.Client, d *Describer, restore *velerov1api.Restore, details bool, insecureSkipTLSVerify bool, caCertPath string) {
|
||||
status := restore.Status
|
||||
if status.RestoreItemOperationsAttempted > 0 {
|
||||
|
|
|
@ -48,9 +48,9 @@ func DescribeSchedule(schedule *v1.Schedule) string {
|
|||
DescribeResourcePolicies(d, schedule.Spec.Template.ResourcePolicy)
|
||||
}
|
||||
|
||||
if schedule.Spec.Template.UploaderConfigForBackup != nil && schedule.Spec.Template.UploaderConfigForBackup.ParallelFilesUpload > 0 {
|
||||
if schedule.Spec.Template.UploaderConfig != nil && schedule.Spec.Template.UploaderConfig.ParallelFilesUpload > 0 {
|
||||
d.Println()
|
||||
DescribeUploaderConfig(d, schedule.Spec.Template)
|
||||
DescribeUploaderConfigForBackup(d, schedule.Spec.Template)
|
||||
}
|
||||
|
||||
status := schedule.Status
|
||||
|
|
|
@ -332,7 +332,7 @@ func (r *DataDownloadReconciler) runCancelableDataPath(ctx context.Context, fsRe
|
|||
}
|
||||
log.WithField("path", path.ByPath).Info("fs init")
|
||||
|
||||
if err := fsRestore.StartRestore(dd.Spec.SnapshotID, path, &dd.Spec.DataMoverConfig); err != nil {
|
||||
if err := fsRestore.StartRestore(dd.Spec.SnapshotID, path, dd.Spec.DataMoverConfig); err != nil {
|
||||
return r.errorOut(ctx, dd, err, fmt.Sprintf("error starting data path %s restore", path.ByPath), log)
|
||||
}
|
||||
|
||||
|
|
|
@ -296,7 +296,7 @@ func (f *fakeDataUploadFSBR) Init(ctx context.Context, bslName string, sourceNam
|
|||
return nil
|
||||
}
|
||||
|
||||
func (f *fakeDataUploadFSBR) StartBackup(source datapath.AccessPoint, realSource string, parentSnapshot string, forceFull bool, tags map[string]string, uploaderConfigs *map[string]string) error {
|
||||
func (f *fakeDataUploadFSBR) StartBackup(source datapath.AccessPoint, realSource string, parentSnapshot string, forceFull bool, tags map[string]string, uploaderConfigs map[string]string) error {
|
||||
du := f.du
|
||||
original := f.du.DeepCopy()
|
||||
du.Status.Phase = velerov2alpha1api.DataUploadPhaseCompleted
|
||||
|
@ -306,7 +306,7 @@ func (f *fakeDataUploadFSBR) StartBackup(source datapath.AccessPoint, realSource
|
|||
return nil
|
||||
}
|
||||
|
||||
func (f *fakeDataUploadFSBR) StartRestore(snapshotID string, target datapath.AccessPoint, uploaderConfigs *map[string]string) error {
|
||||
func (f *fakeDataUploadFSBR) StartRestore(snapshotID string, target datapath.AccessPoint, uploaderConfigs map[string]string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -103,7 +103,7 @@ func (b *fakeFSBR) Init(ctx context.Context, bslName string, sourceNamespace str
|
|||
return nil
|
||||
}
|
||||
|
||||
func (b *fakeFSBR) StartBackup(source datapath.AccessPoint, realSource string, parentSnapshot string, forceFull bool, tags map[string]string, uploaderConfigs *map[string]string) error {
|
||||
func (b *fakeFSBR) StartBackup(source datapath.AccessPoint, realSource string, parentSnapshot string, forceFull bool, tags map[string]string, uploaderConfigs map[string]string) error {
|
||||
pvb := b.pvb
|
||||
|
||||
original := b.pvb.DeepCopy()
|
||||
|
@ -115,7 +115,7 @@ func (b *fakeFSBR) StartBackup(source datapath.AccessPoint, realSource string, p
|
|||
return nil
|
||||
}
|
||||
|
||||
func (b *fakeFSBR) StartRestore(snapshotID string, target datapath.AccessPoint, uploaderConfigs *map[string]string) error {
|
||||
func (b *fakeFSBR) StartRestore(snapshotID string, target datapath.AccessPoint, uploaderConfigs map[string]string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -129,7 +129,7 @@ func (fs *fileSystemBR) Close(ctx context.Context) {
|
|||
fs.log.WithField("user", fs.jobName).Info("FileSystemBR is closed")
|
||||
}
|
||||
|
||||
func (fs *fileSystemBR) StartBackup(source AccessPoint, realSource string, parentSnapshot string, forceFull bool, tags map[string]string, uploaderConfig *map[string]string) error {
|
||||
func (fs *fileSystemBR) StartBackup(source AccessPoint, realSource string, parentSnapshot string, forceFull bool, tags map[string]string, uploaderConfig map[string]string) error {
|
||||
if !fs.initialized {
|
||||
return errors.New("file system data path is not initialized")
|
||||
}
|
||||
|
@ -150,7 +150,7 @@ func (fs *fileSystemBR) StartBackup(source AccessPoint, realSource string, paren
|
|||
return nil
|
||||
}
|
||||
|
||||
func (fs *fileSystemBR) StartRestore(snapshotID string, target AccessPoint, uploaderConfigs *map[string]string) error {
|
||||
func (fs *fileSystemBR) StartRestore(snapshotID string, target AccessPoint, uploaderConfigs map[string]string) error {
|
||||
if !fs.initialized {
|
||||
return errors.New("file system data path is not initialized")
|
||||
}
|
||||
|
|
|
@ -100,7 +100,7 @@ func TestAsyncBackup(t *testing.T) {
|
|||
fs.initialized = true
|
||||
fs.callbacks = test.callbacks
|
||||
|
||||
err := fs.StartBackup(AccessPoint{ByPath: test.path}, "", "", false, nil, &map[string]string{})
|
||||
err := fs.StartBackup(AccessPoint{ByPath: test.path}, "", "", false, nil, map[string]string{})
|
||||
require.Equal(t, nil, err)
|
||||
|
||||
<-finish
|
||||
|
@ -183,7 +183,7 @@ func TestAsyncRestore(t *testing.T) {
|
|||
fs.initialized = true
|
||||
fs.callbacks = test.callbacks
|
||||
|
||||
err := fs.StartRestore(test.snapshot, AccessPoint{ByPath: test.path}, &map[string]string{})
|
||||
err := fs.StartRestore(test.snapshot, AccessPoint{ByPath: test.path}, map[string]string{})
|
||||
require.Equal(t, nil, err)
|
||||
|
||||
<-finish
|
||||
|
|
|
@ -43,11 +43,11 @@ func (_m *AsyncBR) Init(ctx context.Context, bslName string, sourceNamespace str
|
|||
}
|
||||
|
||||
// StartBackup provides a mock function with given fields: source, realSource, parentSnapshot, forceFull, tags, dataMoverConfig
|
||||
func (_m *AsyncBR) StartBackup(source datapath.AccessPoint, realSource string, parentSnapshot string, forceFull bool, tags map[string]string, dataMoverConfig *map[string]string) error {
|
||||
func (_m *AsyncBR) StartBackup(source datapath.AccessPoint, realSource string, parentSnapshot string, forceFull bool, tags map[string]string, dataMoverConfig map[string]string) error {
|
||||
ret := _m.Called(source, realSource, parentSnapshot, forceFull, tags, dataMoverConfig)
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(datapath.AccessPoint, string, string, bool, map[string]string, *map[string]string) error); ok {
|
||||
if rf, ok := ret.Get(0).(func(datapath.AccessPoint, string, string, bool, map[string]string, map[string]string) error); ok {
|
||||
r0 = rf(source, realSource, parentSnapshot, forceFull, tags, dataMoverConfig)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
|
@ -57,11 +57,11 @@ func (_m *AsyncBR) StartBackup(source datapath.AccessPoint, realSource string, p
|
|||
}
|
||||
|
||||
// StartRestore provides a mock function with given fields: snapshotID, target, dataMoverConfig
|
||||
func (_m *AsyncBR) StartRestore(snapshotID string, target datapath.AccessPoint, dataMoverConfig *map[string]string) error {
|
||||
func (_m *AsyncBR) StartRestore(snapshotID string, target datapath.AccessPoint, dataMoverConfig map[string]string) error {
|
||||
ret := _m.Called(snapshotID, target, dataMoverConfig)
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(string, datapath.AccessPoint, *map[string]string) error); ok {
|
||||
if rf, ok := ret.Get(0).(func(string, datapath.AccessPoint, map[string]string) error); ok {
|
||||
r0 = rf(snapshotID, target, dataMoverConfig)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
|
|
|
@ -62,10 +62,10 @@ type AsyncBR interface {
|
|||
Init(ctx context.Context, bslName string, sourceNamespace string, uploaderType string, repositoryType string, repoIdentifier string, repositoryEnsurer *repository.Ensurer, credentialGetter *credentials.CredentialGetter) error
|
||||
|
||||
// StartBackup starts an asynchronous data path instance for backup
|
||||
StartBackup(source AccessPoint, realSource string, parentSnapshot string, forceFull bool, tags map[string]string, dataMoverConfig *map[string]string) error
|
||||
StartBackup(source AccessPoint, realSource string, parentSnapshot string, forceFull bool, tags map[string]string, dataMoverConfig map[string]string) error
|
||||
|
||||
// StartRestore starts an asynchronous data path instance for restore
|
||||
StartRestore(snapshotID string, target AccessPoint, dataMoverConfig *map[string]string) error
|
||||
StartRestore(snapshotID string, target AccessPoint, dataMoverConfig map[string]string) error
|
||||
|
||||
// Cancel cancels an asynchronous data path instance
|
||||
Cancel()
|
||||
|
|
|
@ -36,9 +36,9 @@ import (
|
|||
"github.com/vmware-tanzu/velero/pkg/label"
|
||||
"github.com/vmware-tanzu/velero/pkg/nodeagent"
|
||||
"github.com/vmware-tanzu/velero/pkg/repository"
|
||||
uploaderutil "github.com/vmware-tanzu/velero/pkg/uploader/util"
|
||||
"github.com/vmware-tanzu/velero/pkg/util/boolptr"
|
||||
"github.com/vmware-tanzu/velero/pkg/util/kube"
|
||||
"github.com/vmware-tanzu/velero/pkg/util/uploaderconfig"
|
||||
)
|
||||
|
||||
// Backupper can execute pod volume backups of volumes in a pod.
|
||||
|
@ -411,8 +411,8 @@ func newPodVolumeBackup(backup *velerov1api.Backup, pod *corev1api.Pod, volume c
|
|||
pvb.Spec.Tags["pvc-uid"] = string(pvc.UID)
|
||||
}
|
||||
|
||||
if backup.Spec.UploaderConfigForBackup != nil {
|
||||
pvb.Spec.UploaderSettings = uploaderconfig.StoreBackupConfig(backup.Spec.UploaderConfigForBackup)
|
||||
if backup.Spec.UploaderConfig != nil {
|
||||
pvb.Spec.UploaderSettings = uploaderutil.StoreBackupConfig(backup.Spec.UploaderConfig)
|
||||
}
|
||||
|
||||
return pvb
|
||||
|
|
|
@ -36,9 +36,9 @@ import (
|
|||
"github.com/vmware-tanzu/velero/pkg/label"
|
||||
"github.com/vmware-tanzu/velero/pkg/nodeagent"
|
||||
"github.com/vmware-tanzu/velero/pkg/repository"
|
||||
uploaderutil "github.com/vmware-tanzu/velero/pkg/uploader/util"
|
||||
"github.com/vmware-tanzu/velero/pkg/util/boolptr"
|
||||
"github.com/vmware-tanzu/velero/pkg/util/kube"
|
||||
"github.com/vmware-tanzu/velero/pkg/util/uploaderconfig"
|
||||
)
|
||||
|
||||
type RestoreData struct {
|
||||
|
@ -286,8 +286,8 @@ func newPodVolumeRestore(restore *velerov1api.Restore, pod *corev1api.Pod, backu
|
|||
pvr.Labels[velerov1api.PVCUIDLabel] = string(pvc.UID)
|
||||
}
|
||||
|
||||
if restore.Spec.UploaderConfigForRestore != nil {
|
||||
pvr.Spec.UploaderSettings = uploaderconfig.StoreRestoreConfig(restore.Spec.UploaderConfigForRestore)
|
||||
if restore.Spec.UploaderConfig != nil {
|
||||
pvr.Spec.UploaderSettings = uploaderutil.StoreRestoreConfig(restore.Spec.UploaderConfig)
|
||||
}
|
||||
|
||||
return pvr
|
||||
|
|
|
@ -38,11 +38,10 @@ import (
|
|||
"github.com/kopia/kopia/snapshot/snapshotfs"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/vmware-tanzu/velero/pkg/util/uploaderconfig"
|
||||
|
||||
"github.com/vmware-tanzu/velero/pkg/kopia"
|
||||
"github.com/vmware-tanzu/velero/pkg/repository/udmrepo"
|
||||
"github.com/vmware-tanzu/velero/pkg/uploader"
|
||||
uploaderutil "github.com/vmware-tanzu/velero/pkg/uploader/util"
|
||||
)
|
||||
|
||||
// All function mainly used to make testing more convenient
|
||||
|
@ -106,17 +105,17 @@ func getDefaultPolicy() *policy.Policy {
|
|||
}
|
||||
}
|
||||
|
||||
func setupPolicy(ctx context.Context, rep repo.RepositoryWriter, sourceInfo snapshot.SourceInfo, uploaderCfg *map[string]string) (*policy.Tree, error) {
|
||||
func setupPolicy(ctx context.Context, rep repo.RepositoryWriter, sourceInfo snapshot.SourceInfo, uploaderCfg map[string]string) (*policy.Tree, error) {
|
||||
// some internal operations from Kopia code retrieves policies from repo directly, so we need to persist the policy to repo
|
||||
curPolicy := getDefaultPolicy()
|
||||
|
||||
if uploaderCfg != nil {
|
||||
uploaderConfig, err := uploaderconfig.GetBackupConfig(uploaderCfg)
|
||||
if len(uploaderCfg) > 0 {
|
||||
parallelUpload, err := uploaderutil.GetParallelFilesUpload(uploaderCfg)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to get uploader config")
|
||||
}
|
||||
if uploaderConfig.ParallelFilesUpload > 0 {
|
||||
curPolicy.UploadPolicy.MaxParallelFileReads = newOptionalInt(uploaderConfig.ParallelFilesUpload)
|
||||
if parallelUpload > 0 {
|
||||
curPolicy.UploadPolicy.MaxParallelFileReads = newOptionalInt(parallelUpload)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -141,7 +140,7 @@ func setupPolicy(ctx context.Context, rep repo.RepositoryWriter, sourceInfo snap
|
|||
|
||||
// Backup backup specific sourcePath and update progress
|
||||
func Backup(ctx context.Context, fsUploader SnapshotUploader, repoWriter repo.RepositoryWriter, sourcePath string, realSource string,
|
||||
forceFull bool, parentSnapshot string, volMode uploader.PersistentVolumeMode, uploaderCfg *map[string]string, tags map[string]string, log logrus.FieldLogger) (*uploader.SnapshotInfo, bool, error) {
|
||||
forceFull bool, parentSnapshot string, volMode uploader.PersistentVolumeMode, uploaderCfg map[string]string, tags map[string]string, log logrus.FieldLogger) (*uploader.SnapshotInfo, bool, error) {
|
||||
if fsUploader == nil {
|
||||
return nil, false, errors.New("get empty kopia uploader")
|
||||
}
|
||||
|
@ -237,7 +236,7 @@ func SnapshotSource(
|
|||
forceFull bool,
|
||||
parentSnapshot string,
|
||||
snapshotTags map[string]string,
|
||||
uploaderCfg *map[string]string,
|
||||
uploaderCfg map[string]string,
|
||||
log logrus.FieldLogger,
|
||||
description string,
|
||||
) (string, int64, error) {
|
||||
|
@ -369,7 +368,7 @@ func findPreviousSnapshotManifest(ctx context.Context, rep repo.Repository, sour
|
|||
}
|
||||
|
||||
// Restore restore specific sourcePath with given snapshotID and update progress
|
||||
func Restore(ctx context.Context, rep repo.RepositoryWriter, progress *Progress, snapshotID, dest string, volMode uploader.PersistentVolumeMode, uploaderCfg *map[string]string,
|
||||
func Restore(ctx context.Context, rep repo.RepositoryWriter, progress *Progress, snapshotID, dest string, volMode uploader.PersistentVolumeMode, uploaderCfg map[string]string,
|
||||
log logrus.FieldLogger, cancleCh chan struct{}) (int64, int32, error) {
|
||||
log.Info("Start to restore...")
|
||||
|
||||
|
@ -400,13 +399,16 @@ func Restore(ctx context.Context, rep repo.RepositoryWriter, progress *Progress,
|
|||
IgnorePermissionErrors: true,
|
||||
}
|
||||
|
||||
if uploaderCfg != nil {
|
||||
restoreCfg, err := uploaderconfig.GetRestoreConfig(uploaderCfg)
|
||||
if len(uploaderCfg) > 0 {
|
||||
writeSparseFiles, err := uploaderutil.GetWriteSparseFiles(uploaderCfg)
|
||||
if err != nil {
|
||||
return 0, 0, errors.Wrap(err, "failed to get uploader config")
|
||||
}
|
||||
fsOutput.WriteSparseFiles = restoreCfg.WriteSparseFiles
|
||||
if writeSparseFiles {
|
||||
fsOutput.WriteSparseFiles = true
|
||||
}
|
||||
}
|
||||
|
||||
log.Debugf("Restore filesystem output %v", fsOutput)
|
||||
|
||||
err = fsOutput.Init(ctx)
|
||||
|
|
|
@ -96,7 +96,7 @@ func TestSnapshotSource(t *testing.T) {
|
|||
testCases := []struct {
|
||||
name string
|
||||
args []mockArgs
|
||||
uploaderCfg *map[string]string
|
||||
uploaderCfg map[string]string
|
||||
notError bool
|
||||
}{
|
||||
{
|
||||
|
@ -162,7 +162,7 @@ func TestSnapshotSource(t *testing.T) {
|
|||
{methodName: "Upload", returns: []interface{}{manifest, nil}},
|
||||
{methodName: "Flush", returns: []interface{}{nil}},
|
||||
},
|
||||
uploaderCfg: &map[string]string{
|
||||
uploaderCfg: map[string]string{
|
||||
"ParallelFilesUpload": "10",
|
||||
},
|
||||
notError: true,
|
||||
|
@ -647,9 +647,9 @@ func TestBackup(t *testing.T) {
|
|||
var snapshotInfo *uploader.SnapshotInfo
|
||||
var err error
|
||||
if tc.isEmptyUploader {
|
||||
snapshotInfo, isSnapshotEmpty, err = Backup(context.Background(), nil, s.repoWriterMock, tc.sourcePath, "", tc.forceFull, tc.parentSnapshot, tc.volMode, &map[string]string{}, tc.tags, &logrus.Logger{})
|
||||
snapshotInfo, isSnapshotEmpty, err = Backup(context.Background(), nil, s.repoWriterMock, tc.sourcePath, "", tc.forceFull, tc.parentSnapshot, tc.volMode, map[string]string{}, tc.tags, &logrus.Logger{})
|
||||
} else {
|
||||
snapshotInfo, isSnapshotEmpty, err = Backup(context.Background(), s.uploderMock, s.repoWriterMock, tc.sourcePath, "", tc.forceFull, tc.parentSnapshot, tc.volMode, &map[string]string{}, tc.tags, &logrus.Logger{})
|
||||
snapshotInfo, isSnapshotEmpty, err = Backup(context.Background(), s.uploderMock, s.repoWriterMock, tc.sourcePath, "", tc.forceFull, tc.parentSnapshot, tc.volMode, map[string]string{}, tc.tags, &logrus.Logger{})
|
||||
}
|
||||
// Check if the returned error matches the expected error
|
||||
if tc.expectedError != nil {
|
||||
|
@ -788,7 +788,7 @@ func TestRestore(t *testing.T) {
|
|||
repoWriterMock.On("OpenObject", mock.Anything, mock.Anything).Return(em, nil)
|
||||
|
||||
progress := new(Progress)
|
||||
bytesRestored, fileCount, err := Restore(context.Background(), repoWriterMock, progress, tc.snapshotID, tc.dest, tc.volMode, &map[string]string{}, logrus.New(), nil)
|
||||
bytesRestored, fileCount, err := Restore(context.Background(), repoWriterMock, progress, tc.snapshotID, tc.dest, tc.volMode, map[string]string{}, logrus.New(), nil)
|
||||
|
||||
// Check if the returned error matches the expected error
|
||||
if tc.expectedError != nil {
|
||||
|
|
|
@ -119,7 +119,7 @@ func (kp *kopiaProvider) RunBackup(
|
|||
forceFull bool,
|
||||
parentSnapshot string,
|
||||
volMode uploader.PersistentVolumeMode,
|
||||
uploaderCfg *map[string]string,
|
||||
uploaderCfg map[string]string,
|
||||
updater uploader.ProgressUpdater) (string, bool, error) {
|
||||
if updater == nil {
|
||||
return "", false, errors.New("Need to initial backup progress updater first")
|
||||
|
@ -204,7 +204,7 @@ func (kp *kopiaProvider) RunRestore(
|
|||
snapshotID string,
|
||||
volumePath string,
|
||||
volMode uploader.PersistentVolumeMode,
|
||||
uploaderCfg *map[string]string,
|
||||
uploaderCfg map[string]string,
|
||||
updater uploader.ProgressUpdater) error {
|
||||
log := kp.log.WithFields(logrus.Fields{
|
||||
"snapshotID": snapshotID,
|
||||
|
|
|
@ -68,34 +68,34 @@ func TestRunBackup(t *testing.T) {
|
|||
|
||||
testCases := []struct {
|
||||
name string
|
||||
hookBackupFunc func(ctx context.Context, fsUploader kopia.SnapshotUploader, repoWriter repo.RepositoryWriter, sourcePath string, realSource string, forceFull bool, parentSnapshot string, volMode uploader.PersistentVolumeMode, uploaderCfg *map[string]string, tags map[string]string, log logrus.FieldLogger) (*uploader.SnapshotInfo, bool, error)
|
||||
hookBackupFunc func(ctx context.Context, fsUploader kopia.SnapshotUploader, repoWriter repo.RepositoryWriter, sourcePath string, realSource string, forceFull bool, parentSnapshot string, volMode uploader.PersistentVolumeMode, uploaderCfg map[string]string, tags map[string]string, log logrus.FieldLogger) (*uploader.SnapshotInfo, bool, error)
|
||||
volMode uploader.PersistentVolumeMode
|
||||
notError bool
|
||||
}{
|
||||
{
|
||||
name: "success to backup",
|
||||
hookBackupFunc: func(ctx context.Context, fsUploader kopia.SnapshotUploader, repoWriter repo.RepositoryWriter, sourcePath string, realSource string, forceFull bool, parentSnapshot string, volMode uploader.PersistentVolumeMode, uploaderCfg *map[string]string, tags map[string]string, log logrus.FieldLogger) (*uploader.SnapshotInfo, bool, error) {
|
||||
hookBackupFunc: func(ctx context.Context, fsUploader kopia.SnapshotUploader, repoWriter repo.RepositoryWriter, sourcePath string, realSource string, forceFull bool, parentSnapshot string, volMode uploader.PersistentVolumeMode, uploaderCfg map[string]string, tags map[string]string, log logrus.FieldLogger) (*uploader.SnapshotInfo, bool, error) {
|
||||
return &uploader.SnapshotInfo{}, false, nil
|
||||
},
|
||||
notError: true,
|
||||
},
|
||||
{
|
||||
name: "get error to backup",
|
||||
hookBackupFunc: func(ctx context.Context, fsUploader kopia.SnapshotUploader, repoWriter repo.RepositoryWriter, sourcePath string, realSource string, forceFull bool, parentSnapshot string, volMode uploader.PersistentVolumeMode, uploaderCfg *map[string]string, tags map[string]string, log logrus.FieldLogger) (*uploader.SnapshotInfo, bool, error) {
|
||||
hookBackupFunc: func(ctx context.Context, fsUploader kopia.SnapshotUploader, repoWriter repo.RepositoryWriter, sourcePath string, realSource string, forceFull bool, parentSnapshot string, volMode uploader.PersistentVolumeMode, uploaderCfg map[string]string, tags map[string]string, log logrus.FieldLogger) (*uploader.SnapshotInfo, bool, error) {
|
||||
return &uploader.SnapshotInfo{}, false, errors.New("failed to backup")
|
||||
},
|
||||
notError: false,
|
||||
},
|
||||
{
|
||||
name: "got empty snapshot",
|
||||
hookBackupFunc: func(ctx context.Context, fsUploader kopia.SnapshotUploader, repoWriter repo.RepositoryWriter, sourcePath string, realSource string, forceFull bool, parentSnapshot string, volMode uploader.PersistentVolumeMode, uploaderCfg *map[string]string, tags map[string]string, log logrus.FieldLogger) (*uploader.SnapshotInfo, bool, error) {
|
||||
hookBackupFunc: func(ctx context.Context, fsUploader kopia.SnapshotUploader, repoWriter repo.RepositoryWriter, sourcePath string, realSource string, forceFull bool, parentSnapshot string, volMode uploader.PersistentVolumeMode, uploaderCfg map[string]string, tags map[string]string, log logrus.FieldLogger) (*uploader.SnapshotInfo, bool, error) {
|
||||
return nil, true, errors.New("snapshot is empty")
|
||||
},
|
||||
notError: false,
|
||||
},
|
||||
{
|
||||
name: "success to backup block mode volume",
|
||||
hookBackupFunc: func(ctx context.Context, fsUploader kopia.SnapshotUploader, repoWriter repo.RepositoryWriter, sourcePath string, realSource string, forceFull bool, parentSnapshot string, volMode uploader.PersistentVolumeMode, uploaderCfg *map[string]string, tags map[string]string, log logrus.FieldLogger) (*uploader.SnapshotInfo, bool, error) {
|
||||
hookBackupFunc: func(ctx context.Context, fsUploader kopia.SnapshotUploader, repoWriter repo.RepositoryWriter, sourcePath string, realSource string, forceFull bool, parentSnapshot string, volMode uploader.PersistentVolumeMode, uploaderCfg map[string]string, tags map[string]string, log logrus.FieldLogger) (*uploader.SnapshotInfo, bool, error) {
|
||||
return &uploader.SnapshotInfo{}, false, nil
|
||||
},
|
||||
volMode: uploader.PersistentVolumeBlock,
|
||||
|
@ -108,7 +108,7 @@ func TestRunBackup(t *testing.T) {
|
|||
tc.volMode = uploader.PersistentVolumeFilesystem
|
||||
}
|
||||
BackupFunc = tc.hookBackupFunc
|
||||
_, _, err := kp.RunBackup(context.Background(), "var", "", nil, false, "", tc.volMode, &map[string]string{}, &updater)
|
||||
_, _, err := kp.RunBackup(context.Background(), "var", "", nil, false, "", tc.volMode, map[string]string{}, &updater)
|
||||
if tc.notError {
|
||||
assert.NoError(t, err)
|
||||
} else {
|
||||
|
@ -125,27 +125,27 @@ func TestRunRestore(t *testing.T) {
|
|||
|
||||
testCases := []struct {
|
||||
name string
|
||||
hookRestoreFunc func(ctx context.Context, rep repo.RepositoryWriter, progress *kopia.Progress, snapshotID, dest string, volMode uploader.PersistentVolumeMode, uploaderCfg *map[string]string, log logrus.FieldLogger, cancleCh chan struct{}) (int64, int32, error)
|
||||
hookRestoreFunc func(ctx context.Context, rep repo.RepositoryWriter, progress *kopia.Progress, snapshotID, dest string, volMode uploader.PersistentVolumeMode, uploaderCfg map[string]string, log logrus.FieldLogger, cancleCh chan struct{}) (int64, int32, error)
|
||||
notError bool
|
||||
volMode uploader.PersistentVolumeMode
|
||||
}{
|
||||
{
|
||||
name: "normal restore",
|
||||
hookRestoreFunc: func(ctx context.Context, rep repo.RepositoryWriter, progress *kopia.Progress, snapshotID, dest string, volMode uploader.PersistentVolumeMode, uploaderCfg *map[string]string, log logrus.FieldLogger, cancleCh chan struct{}) (int64, int32, error) {
|
||||
hookRestoreFunc: func(ctx context.Context, rep repo.RepositoryWriter, progress *kopia.Progress, snapshotID, dest string, volMode uploader.PersistentVolumeMode, uploaderCfg map[string]string, log logrus.FieldLogger, cancleCh chan struct{}) (int64, int32, error) {
|
||||
return 0, 0, nil
|
||||
},
|
||||
notError: true,
|
||||
},
|
||||
{
|
||||
name: "failed to restore",
|
||||
hookRestoreFunc: func(ctx context.Context, rep repo.RepositoryWriter, progress *kopia.Progress, snapshotID, dest string, volMode uploader.PersistentVolumeMode, uploaderCfg *map[string]string, log logrus.FieldLogger, cancleCh chan struct{}) (int64, int32, error) {
|
||||
hookRestoreFunc: func(ctx context.Context, rep repo.RepositoryWriter, progress *kopia.Progress, snapshotID, dest string, volMode uploader.PersistentVolumeMode, uploaderCfg map[string]string, log logrus.FieldLogger, cancleCh chan struct{}) (int64, int32, error) {
|
||||
return 0, 0, errors.New("failed to restore")
|
||||
},
|
||||
notError: false,
|
||||
},
|
||||
{
|
||||
name: "normal block mode restore",
|
||||
hookRestoreFunc: func(ctx context.Context, rep repo.RepositoryWriter, progress *kopia.Progress, snapshotID, dest string, volMode uploader.PersistentVolumeMode, uploaderCfg *map[string]string, log logrus.FieldLogger, cancleCh chan struct{}) (int64, int32, error) {
|
||||
hookRestoreFunc: func(ctx context.Context, rep repo.RepositoryWriter, progress *kopia.Progress, snapshotID, dest string, volMode uploader.PersistentVolumeMode, uploaderCfg map[string]string, log logrus.FieldLogger, cancleCh chan struct{}) (int64, int32, error) {
|
||||
return 0, 0, nil
|
||||
},
|
||||
volMode: uploader.PersistentVolumeBlock,
|
||||
|
@ -159,7 +159,7 @@ func TestRunRestore(t *testing.T) {
|
|||
tc.volMode = uploader.PersistentVolumeFilesystem
|
||||
}
|
||||
RestoreFunc = tc.hookRestoreFunc
|
||||
err := kp.RunRestore(context.Background(), "", "/var", tc.volMode, &map[string]string{}, &updater)
|
||||
err := kp.RunRestore(context.Background(), "", "/var", tc.volMode, map[string]string{}, &updater)
|
||||
if tc.notError {
|
||||
assert.NoError(t, err)
|
||||
} else {
|
||||
|
|
|
@ -30,28 +30,28 @@ func (_m *Provider) Close(ctx context.Context) error {
|
|||
}
|
||||
|
||||
// RunBackup provides a mock function with given fields: ctx, path, realSource, tags, forceFull, parentSnapshot, volMode, uploaderCfg, updater
|
||||
func (_m *Provider) RunBackup(ctx context.Context, path string, realSource string, tags map[string]string, forceFull bool, parentSnapshot string, volMode uploader.PersistentVolumeMode, uploaderCfg *map[string]string, updater uploader.ProgressUpdater) (string, bool, error) {
|
||||
func (_m *Provider) RunBackup(ctx context.Context, path string, realSource string, tags map[string]string, forceFull bool, parentSnapshot string, volMode uploader.PersistentVolumeMode, uploaderCfg map[string]string, updater uploader.ProgressUpdater) (string, bool, error) {
|
||||
ret := _m.Called(ctx, path, realSource, tags, forceFull, parentSnapshot, volMode, uploaderCfg, updater)
|
||||
|
||||
var r0 string
|
||||
var r1 bool
|
||||
var r2 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, string, string, map[string]string, bool, string, uploader.PersistentVolumeMode, *map[string]string, uploader.ProgressUpdater) (string, bool, error)); ok {
|
||||
if rf, ok := ret.Get(0).(func(context.Context, string, string, map[string]string, bool, string, uploader.PersistentVolumeMode, map[string]string, uploader.ProgressUpdater) (string, bool, error)); ok {
|
||||
return rf(ctx, path, realSource, tags, forceFull, parentSnapshot, volMode, uploaderCfg, updater)
|
||||
}
|
||||
if rf, ok := ret.Get(0).(func(context.Context, string, string, map[string]string, bool, string, uploader.PersistentVolumeMode, *map[string]string, uploader.ProgressUpdater) string); ok {
|
||||
if rf, ok := ret.Get(0).(func(context.Context, string, string, map[string]string, bool, string, uploader.PersistentVolumeMode, map[string]string, uploader.ProgressUpdater) string); ok {
|
||||
r0 = rf(ctx, path, realSource, tags, forceFull, parentSnapshot, volMode, uploaderCfg, updater)
|
||||
} else {
|
||||
r0 = ret.Get(0).(string)
|
||||
}
|
||||
|
||||
if rf, ok := ret.Get(1).(func(context.Context, string, string, map[string]string, bool, string, uploader.PersistentVolumeMode, *map[string]string, uploader.ProgressUpdater) bool); ok {
|
||||
if rf, ok := ret.Get(1).(func(context.Context, string, string, map[string]string, bool, string, uploader.PersistentVolumeMode, map[string]string, uploader.ProgressUpdater) bool); ok {
|
||||
r1 = rf(ctx, path, realSource, tags, forceFull, parentSnapshot, volMode, uploaderCfg, updater)
|
||||
} else {
|
||||
r1 = ret.Get(1).(bool)
|
||||
}
|
||||
|
||||
if rf, ok := ret.Get(2).(func(context.Context, string, string, map[string]string, bool, string, uploader.PersistentVolumeMode, *map[string]string, uploader.ProgressUpdater) error); ok {
|
||||
if rf, ok := ret.Get(2).(func(context.Context, string, string, map[string]string, bool, string, uploader.PersistentVolumeMode, map[string]string, uploader.ProgressUpdater) error); ok {
|
||||
r2 = rf(ctx, path, realSource, tags, forceFull, parentSnapshot, volMode, uploaderCfg, updater)
|
||||
} else {
|
||||
r2 = ret.Error(2)
|
||||
|
@ -61,11 +61,11 @@ func (_m *Provider) RunBackup(ctx context.Context, path string, realSource strin
|
|||
}
|
||||
|
||||
// RunRestore provides a mock function with given fields: ctx, snapshotID, volumePath, volMode, uploaderConfig, updater
|
||||
func (_m *Provider) RunRestore(ctx context.Context, snapshotID string, volumePath string, volMode uploader.PersistentVolumeMode, uploaderConfig *map[string]string, updater uploader.ProgressUpdater) error {
|
||||
func (_m *Provider) RunRestore(ctx context.Context, snapshotID string, volumePath string, volMode uploader.PersistentVolumeMode, uploaderConfig map[string]string, updater uploader.ProgressUpdater) error {
|
||||
ret := _m.Called(ctx, snapshotID, volumePath, volMode, uploaderConfig, updater)
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, string, string, uploader.PersistentVolumeMode, *map[string]string, uploader.ProgressUpdater) error); ok {
|
||||
if rf, ok := ret.Get(0).(func(context.Context, string, string, uploader.PersistentVolumeMode, map[string]string, uploader.ProgressUpdater) error); ok {
|
||||
r0 = rf(ctx, snapshotID, volumePath, volMode, uploaderConfig, updater)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
|
|
|
@ -49,7 +49,7 @@ type Provider interface {
|
|||
forceFull bool,
|
||||
parentSnapshot string,
|
||||
volMode uploader.PersistentVolumeMode,
|
||||
uploaderCfg *map[string]string,
|
||||
uploaderCfg map[string]string,
|
||||
updater uploader.ProgressUpdater) (string, bool, error)
|
||||
// RunRestore which will do restore for one specific volume with given snapshot id and return error
|
||||
// updater is used for updating backup progress which implement by third-party
|
||||
|
@ -58,7 +58,7 @@ type Provider interface {
|
|||
snapshotID string,
|
||||
volumePath string,
|
||||
volMode uploader.PersistentVolumeMode,
|
||||
uploaderConfig *map[string]string,
|
||||
uploaderConfig map[string]string,
|
||||
updater uploader.ProgressUpdater) error
|
||||
// Close which will close related repository
|
||||
Close(ctx context.Context) error
|
||||
|
|
|
@ -30,8 +30,8 @@ import (
|
|||
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
|
||||
"github.com/vmware-tanzu/velero/pkg/restic"
|
||||
"github.com/vmware-tanzu/velero/pkg/uploader"
|
||||
uploaderutil "github.com/vmware-tanzu/velero/pkg/uploader/util"
|
||||
"github.com/vmware-tanzu/velero/pkg/util/filesystem"
|
||||
"github.com/vmware-tanzu/velero/pkg/util/uploaderconfig"
|
||||
)
|
||||
|
||||
// resticBackupCMDFunc and resticRestoreCMDFunc are mainly used to make testing more convenient
|
||||
|
@ -123,7 +123,7 @@ func (rp *resticProvider) RunBackup(
|
|||
forceFull bool,
|
||||
parentSnapshot string,
|
||||
volMode uploader.PersistentVolumeMode,
|
||||
uploaderCfg *map[string]string,
|
||||
uploaderCfg map[string]string,
|
||||
updater uploader.ProgressUpdater) (string, bool, error) {
|
||||
if updater == nil {
|
||||
return "", false, errors.New("Need to initial backup progress updater first")
|
||||
|
@ -146,14 +146,13 @@ func (rp *resticProvider) RunBackup(
|
|||
"parentSnapshot": parentSnapshot,
|
||||
})
|
||||
|
||||
if uploaderCfg != nil {
|
||||
uploaderConfig, err := uploaderconfig.GetBackupConfig(uploaderCfg)
|
||||
if len(uploaderCfg) > 0 {
|
||||
parallelFilesUpload, err := uploaderutil.GetParallelFilesUpload(uploaderCfg)
|
||||
if err != nil {
|
||||
return "", false, errors.Wrap(err, "failed to get uploader config")
|
||||
}
|
||||
|
||||
if uploaderConfig.ParallelFilesUpload > 0 {
|
||||
log.Warnf("ParallelFilesUpload is set to %d, but restic does not support parallel file uploads. Ignoring.", uploaderConfig.ParallelFilesUpload)
|
||||
if parallelFilesUpload > 0 {
|
||||
log.Warnf("ParallelFilesUpload is set to %d, but restic does not support parallel file uploads. Ignoring.", parallelFilesUpload)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -198,7 +197,7 @@ func (rp *resticProvider) RunRestore(
|
|||
snapshotID string,
|
||||
volumePath string,
|
||||
volMode uploader.PersistentVolumeMode,
|
||||
uploaderCfg *map[string]string,
|
||||
uploaderCfg map[string]string,
|
||||
updater uploader.ProgressUpdater) error {
|
||||
if updater == nil {
|
||||
return errors.New("Need to initial backup progress updater first")
|
||||
|
@ -219,13 +218,11 @@ func (rp *resticProvider) RunRestore(
|
|||
restoreCmd.ExtraFlags = append(restoreCmd.ExtraFlags, rp.extraFlags...)
|
||||
}
|
||||
|
||||
if uploaderCfg != nil {
|
||||
extraFlags, err := rp.parseRestoreExtraFlags(uploaderCfg)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to parse uploader config")
|
||||
} else if len(extraFlags) != 0 {
|
||||
restoreCmd.ExtraFlags = append(restoreCmd.ExtraFlags, extraFlags...)
|
||||
}
|
||||
extraFlags, err := rp.parseRestoreExtraFlags(uploaderCfg)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to parse uploader config")
|
||||
} else if len(extraFlags) != 0 {
|
||||
restoreCmd.ExtraFlags = append(restoreCmd.ExtraFlags, extraFlags...)
|
||||
}
|
||||
|
||||
stdout, stderr, err := restic.RunRestore(restoreCmd, log, updater)
|
||||
|
@ -234,15 +231,20 @@ func (rp *resticProvider) RunRestore(
|
|||
return err
|
||||
}
|
||||
|
||||
func (rp *resticProvider) parseRestoreExtraFlags(uploaderCfg *map[string]string) ([]string, error) {
|
||||
func (rp *resticProvider) parseRestoreExtraFlags(uploaderCfg map[string]string) ([]string, error) {
|
||||
extraFlags := []string{}
|
||||
uploaderConfig, err := uploaderconfig.GetRestoreConfig(uploaderCfg)
|
||||
if len(uploaderCfg) == 0 {
|
||||
return extraFlags, nil
|
||||
}
|
||||
|
||||
writeSparseFiles, err := uploaderutil.GetWriteSparseFiles(uploaderCfg)
|
||||
if err != nil {
|
||||
return extraFlags, errors.Wrap(err, "failed to get uploader config")
|
||||
}
|
||||
|
||||
if uploaderConfig.WriteSparseFiles {
|
||||
if writeSparseFiles {
|
||||
extraFlags = append(extraFlags, "--sparse")
|
||||
}
|
||||
|
||||
return extraFlags, nil
|
||||
}
|
||||
|
|
|
@ -150,9 +150,9 @@ func TestResticRunBackup(t *testing.T) {
|
|||
}
|
||||
if !tc.nilUpdater {
|
||||
updater := FakeBackupProgressUpdater{PodVolumeBackup: &velerov1api.PodVolumeBackup{}, Log: tc.rp.log, Ctx: context.Background(), Cli: fake.NewClientBuilder().WithScheme(util.VeleroScheme).Build()}
|
||||
_, _, err = tc.rp.RunBackup(context.Background(), "var", "", map[string]string{}, false, parentSnapshot, tc.volMode, &map[string]string{}, &updater)
|
||||
_, _, err = tc.rp.RunBackup(context.Background(), "var", "", map[string]string{}, false, parentSnapshot, tc.volMode, map[string]string{}, &updater)
|
||||
} else {
|
||||
_, _, err = tc.rp.RunBackup(context.Background(), "var", "", map[string]string{}, false, parentSnapshot, tc.volMode, &map[string]string{}, nil)
|
||||
_, _, err = tc.rp.RunBackup(context.Background(), "var", "", map[string]string{}, false, parentSnapshot, tc.volMode, map[string]string{}, nil)
|
||||
}
|
||||
|
||||
tc.rp.log.Infof("test name %v error %v", tc.name, err)
|
||||
|
@ -223,9 +223,9 @@ func TestResticRunRestore(t *testing.T) {
|
|||
var err error
|
||||
if !tc.nilUpdater {
|
||||
updater := FakeBackupProgressUpdater{PodVolumeBackup: &velerov1api.PodVolumeBackup{}, Log: tc.rp.log, Ctx: context.Background(), Cli: fake.NewClientBuilder().WithScheme(util.VeleroScheme).Build()}
|
||||
err = tc.rp.RunRestore(context.Background(), "", "var", tc.volMode, &map[string]string{}, &updater)
|
||||
err = tc.rp.RunRestore(context.Background(), "", "var", tc.volMode, map[string]string{}, &updater)
|
||||
} else {
|
||||
err = tc.rp.RunRestore(context.Background(), "", "var", tc.volMode, &map[string]string{}, nil)
|
||||
err = tc.rp.RunRestore(context.Background(), "", "var", tc.volMode, map[string]string{}, nil)
|
||||
}
|
||||
|
||||
tc.rp.log.Infof("test name %v error %v", tc.name, err)
|
||||
|
@ -417,19 +417,19 @@ func TestParseUploaderConfig(t *testing.T) {
|
|||
|
||||
testCases := []struct {
|
||||
name string
|
||||
uploaderConfig *map[string]string
|
||||
uploaderConfig map[string]string
|
||||
expectedFlags []string
|
||||
}{
|
||||
{
|
||||
name: "SparseFilesEnabled",
|
||||
uploaderConfig: &map[string]string{
|
||||
uploaderConfig: map[string]string{
|
||||
"WriteSparseFiles": "true",
|
||||
},
|
||||
expectedFlags: []string{"--sparse"},
|
||||
},
|
||||
{
|
||||
name: "SparseFilesDisabled",
|
||||
uploaderConfig: &map[string]string{
|
||||
uploaderConfig: map[string]string{
|
||||
"writeSparseFiles": "false",
|
||||
},
|
||||
expectedFlags: []string{},
|
||||
|
|
|
@ -0,0 +1,70 @@
|
|||
/*
|
||||
Copyright The Velero Contributors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package util
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
|
||||
)
|
||||
|
||||
const (
|
||||
parallelFilesUpload = "ParallelFilesUpload"
|
||||
writeSparseFiles = "WriteSparseFiles"
|
||||
)
|
||||
|
||||
func StoreBackupConfig(config *velerov1api.UploaderConfigForBackup) map[string]string {
|
||||
data := make(map[string]string)
|
||||
data[parallelFilesUpload] = strconv.Itoa(config.ParallelFilesUpload)
|
||||
return data
|
||||
}
|
||||
|
||||
func StoreRestoreConfig(config *velerov1api.UploaderConfigForRestore) map[string]string {
|
||||
data := make(map[string]string)
|
||||
if config.WriteSparseFiles != nil {
|
||||
data[writeSparseFiles] = strconv.FormatBool(*config.WriteSparseFiles)
|
||||
} else {
|
||||
data[writeSparseFiles] = strconv.FormatBool(false)
|
||||
}
|
||||
return data
|
||||
}
|
||||
|
||||
func GetParallelFilesUpload(uploaderCfg map[string]string) (int, error) {
|
||||
parallelFilesUpload, ok := uploaderCfg[parallelFilesUpload]
|
||||
if ok {
|
||||
parallelFilesUploadInt, err := strconv.Atoi(parallelFilesUpload)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "failed to parse ParallelFilesUpload config")
|
||||
}
|
||||
return parallelFilesUploadInt, nil
|
||||
}
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
func GetWriteSparseFiles(uploaderCfg map[string]string) (bool, error) {
|
||||
writeSparseFiles, ok := uploaderCfg[writeSparseFiles]
|
||||
if ok {
|
||||
writeSparseFilesBool, err := strconv.ParseBool(writeSparseFiles)
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "failed to parse WriteSparseFiles config")
|
||||
}
|
||||
return writeSparseFilesBool, nil
|
||||
}
|
||||
return false, nil
|
||||
}
|
|
@ -0,0 +1,149 @@
|
|||
/*
|
||||
Copyright The Velero Contributors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package util
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
|
||||
)
|
||||
|
||||
func TestStoreBackupConfig(t *testing.T) {
|
||||
config := &velerov1api.UploaderConfigForBackup{
|
||||
ParallelFilesUpload: 3,
|
||||
}
|
||||
|
||||
expectedData := map[string]string{
|
||||
parallelFilesUpload: "3",
|
||||
}
|
||||
|
||||
result := StoreBackupConfig(config)
|
||||
|
||||
if !reflect.DeepEqual(result, expectedData) {
|
||||
t.Errorf("Expected: %v, but got: %v", expectedData, result)
|
||||
}
|
||||
}
|
||||
|
||||
func TestStoreRestoreConfig(t *testing.T) {
|
||||
boolTrue := true
|
||||
config := &velerov1api.UploaderConfigForRestore{
|
||||
WriteSparseFiles: &boolTrue,
|
||||
}
|
||||
|
||||
expectedData := map[string]string{
|
||||
writeSparseFiles: "true",
|
||||
}
|
||||
|
||||
result := StoreRestoreConfig(config)
|
||||
|
||||
if !reflect.DeepEqual(result, expectedData) {
|
||||
t.Errorf("Expected: %v, but got: %v", expectedData, result)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetParallelFilesUpload(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
uploaderCfg map[string]string
|
||||
expectedResult int
|
||||
expectedError error
|
||||
}{
|
||||
{
|
||||
name: "Valid ParallelFilesUpload",
|
||||
uploaderCfg: map[string]string{parallelFilesUpload: "5"},
|
||||
expectedResult: 5,
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Missing ParallelFilesUpload",
|
||||
uploaderCfg: map[string]string{},
|
||||
expectedResult: 0,
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Invalid ParallelFilesUpload (not a number)",
|
||||
uploaderCfg: map[string]string{parallelFilesUpload: "invalid"},
|
||||
expectedResult: 0,
|
||||
expectedError: errors.Wrap(errors.New("strconv.Atoi: parsing \"invalid\": invalid syntax"), "failed to parse ParallelFilesUpload config"),
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
result, err := GetParallelFilesUpload(test.uploaderCfg)
|
||||
|
||||
if result != test.expectedResult {
|
||||
t.Errorf("Expected result %d, but got %d", test.expectedResult, result)
|
||||
}
|
||||
|
||||
if (err == nil && test.expectedError != nil) || (err != nil && test.expectedError == nil) || (err != nil && test.expectedError != nil && err.Error() != test.expectedError.Error()) {
|
||||
t.Errorf("Expected error '%v', but got '%v'", test.expectedError, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetWriteSparseFiles(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
uploaderCfg map[string]string
|
||||
expectedResult bool
|
||||
expectedError error
|
||||
}{
|
||||
{
|
||||
name: "Valid WriteSparseFiles (true)",
|
||||
uploaderCfg: map[string]string{writeSparseFiles: "true"},
|
||||
expectedResult: true,
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Valid WriteSparseFiles (false)",
|
||||
uploaderCfg: map[string]string{writeSparseFiles: "false"},
|
||||
expectedResult: false,
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Invalid WriteSparseFiles (not a boolean)",
|
||||
uploaderCfg: map[string]string{writeSparseFiles: "invalid"},
|
||||
expectedResult: false,
|
||||
expectedError: errors.Wrap(errors.New("strconv.ParseBool: parsing \"invalid\": invalid syntax"), "failed to parse WriteSparseFiles config"),
|
||||
},
|
||||
{
|
||||
name: "Missing WriteSparseFiles",
|
||||
uploaderCfg: map[string]string{},
|
||||
expectedResult: false,
|
||||
expectedError: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
result, err := GetWriteSparseFiles(test.uploaderCfg)
|
||||
|
||||
if result != test.expectedResult {
|
||||
t.Errorf("Expected result %t, but got %t", test.expectedResult, result)
|
||||
}
|
||||
|
||||
if (err == nil && test.expectedError != nil) || (err != nil && test.expectedError == nil) || (err != nil && test.expectedError != nil && err.Error() != test.expectedError.Error()) {
|
||||
t.Errorf("Expected error '%v', but got '%v'", test.expectedError, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
|
@ -1,50 +0,0 @@
|
|||
package uploaderconfig
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
|
||||
)
|
||||
|
||||
const (
|
||||
parallelFilesUpload = "ParallelFilesUpload"
|
||||
writeSparseFiles = "WriteSparseFiles"
|
||||
)
|
||||
|
||||
func StoreBackupConfig(config *velerov1api.UploaderConfigForBackup) *map[string]string {
|
||||
data := make(map[string]string)
|
||||
data[parallelFilesUpload] = strconv.Itoa(config.ParallelFilesUpload)
|
||||
return &data
|
||||
}
|
||||
|
||||
func StoreRestoreConfig(config *velerov1api.UploaderConfigForRestore) *map[string]string {
|
||||
data := make(map[string]string)
|
||||
data[writeSparseFiles] = strconv.FormatBool(config.WriteSparseFiles)
|
||||
return &data
|
||||
}
|
||||
|
||||
func GetBackupConfig(data *map[string]string) (velerov1api.UploaderConfigForBackup, error) {
|
||||
config := velerov1api.UploaderConfigForBackup{}
|
||||
var err error
|
||||
if item, ok := (*data)[parallelFilesUpload]; ok {
|
||||
config.ParallelFilesUpload, err = strconv.Atoi(item)
|
||||
if err != nil {
|
||||
return velerov1api.UploaderConfigForBackup{}, errors.Wrap(err, "failed to parse ParallelFilesUpload")
|
||||
}
|
||||
}
|
||||
return config, nil
|
||||
}
|
||||
|
||||
func GetRestoreConfig(data *map[string]string) (velerov1api.UploaderConfigForRestore, error) {
|
||||
config := velerov1api.UploaderConfigForRestore{}
|
||||
var err error
|
||||
if item, ok := (*data)[writeSparseFiles]; ok {
|
||||
config.WriteSparseFiles, err = strconv.ParseBool(item)
|
||||
if err != nil {
|
||||
return velerov1api.UploaderConfigForRestore{}, errors.Wrap(err, "failed to parse WriteSparseFiles")
|
||||
}
|
||||
}
|
||||
return config, nil
|
||||
}
|
|
@ -1,95 +0,0 @@
|
|||
package uploaderconfig
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
|
||||
)
|
||||
|
||||
func TestStoreBackupConfig(t *testing.T) {
|
||||
config := &velerov1api.UploaderConfigForBackup{
|
||||
ParallelFilesUpload: 3,
|
||||
}
|
||||
|
||||
expectedData := map[string]string{
|
||||
parallelFilesUpload: "3",
|
||||
}
|
||||
|
||||
result := StoreBackupConfig(config)
|
||||
|
||||
if !reflect.DeepEqual(*result, expectedData) {
|
||||
t.Errorf("Expected: %v, but got: %v", expectedData, *result)
|
||||
}
|
||||
}
|
||||
|
||||
func TestStoreRestoreConfig(t *testing.T) {
|
||||
config := &velerov1api.UploaderConfigForRestore{
|
||||
WriteSparseFiles: true,
|
||||
}
|
||||
|
||||
expectedData := map[string]string{
|
||||
writeSparseFiles: "true",
|
||||
}
|
||||
|
||||
result := StoreRestoreConfig(config)
|
||||
|
||||
if !reflect.DeepEqual(*result, expectedData) {
|
||||
t.Errorf("Expected: %v, but got: %v", expectedData, *result)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetBackupConfig(t *testing.T) {
|
||||
data := &map[string]string{
|
||||
parallelFilesUpload: "3",
|
||||
}
|
||||
|
||||
expectedConfig := velerov1api.UploaderConfigForBackup{
|
||||
ParallelFilesUpload: 3,
|
||||
}
|
||||
|
||||
result, err := GetBackupConfig(data)
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error: %v", err)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(result, expectedConfig) {
|
||||
t.Errorf("Expected: %v, but got: %v", expectedConfig, result)
|
||||
}
|
||||
|
||||
// Test error case
|
||||
(*data)[parallelFilesUpload] = "invalid"
|
||||
_, err = GetBackupConfig(data)
|
||||
if !strings.Contains(err.Error(), "failed to parse ParallelFilesUpload") {
|
||||
t.Errorf("Expected error message containing 'failed to parse ParallelFilesUpload', but got: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetRestoreConfig(t *testing.T) {
|
||||
data := &map[string]string{
|
||||
writeSparseFiles: "true",
|
||||
}
|
||||
|
||||
expectedConfig := velerov1api.UploaderConfigForRestore{
|
||||
WriteSparseFiles: true,
|
||||
}
|
||||
|
||||
result, err := GetRestoreConfig(data)
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error: %v", err)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(result, expectedConfig) {
|
||||
t.Errorf("Expected: %v, but got: %v", expectedConfig, result)
|
||||
}
|
||||
|
||||
// Test error case
|
||||
(*data)[writeSparseFiles] = "invalid"
|
||||
_, err = GetRestoreConfig(data)
|
||||
if !strings.Contains(err.Error(), "failed to parse WriteSparseFiles") {
|
||||
t.Errorf("Expected error message containing 'failed to parse WriteSparseFiles', but got: %v", err)
|
||||
}
|
||||
}
|
Loading…
Reference in New Issue