repo maintenance for windows
Signed-off-by: Lyndon-Li <lyonghui@vmware.com>pull/8626/head
parent
5b1738abf8
commit
0a4b05cb6e
|
@ -0,0 +1 @@
|
||||||
|
Fix issue #8419, support repo maintenance job to run on Windows nodes
|
|
@ -26,6 +26,7 @@ import (
|
||||||
"github.com/vmware-tanzu/velero/pkg/util/logging"
|
"github.com/vmware-tanzu/velero/pkg/util/logging"
|
||||||
|
|
||||||
repokey "github.com/vmware-tanzu/velero/pkg/repository/keys"
|
repokey "github.com/vmware-tanzu/velero/pkg/repository/keys"
|
||||||
|
"github.com/vmware-tanzu/velero/pkg/repository/maintenance"
|
||||||
repomanager "github.com/vmware-tanzu/velero/pkg/repository/manager"
|
repomanager "github.com/vmware-tanzu/velero/pkg/repository/manager"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -78,17 +79,7 @@ func (o *Options) Run(f velerocli.Factory) {
|
||||||
}()
|
}()
|
||||||
|
|
||||||
if pruneError != nil {
|
if pruneError != nil {
|
||||||
logger.WithError(pruneError).Error("An error occurred when running repo prune")
|
os.Stdout.WriteString(fmt.Sprintf("%s%v", maintenance.TerminationLogIndicator, pruneError))
|
||||||
terminationLogFile, err := os.Create("/dev/termination-log")
|
|
||||||
if err != nil {
|
|
||||||
logger.WithError(err).Error("Failed to create termination log file")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
defer terminationLogFile.Close()
|
|
||||||
|
|
||||||
if _, errWrite := terminationLogFile.WriteString(fmt.Sprintf("An error occurred: %v", err)); errWrite != nil {
|
|
||||||
logger.WithError(errWrite).Error("Failed to write error to termination log file")
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -163,22 +154,38 @@ func (o *Options) runRepoPrune(f velerocli.Factory, namespace string, logger log
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var repo *velerov1api.BackupRepository
|
||||||
|
retry := 10
|
||||||
|
for {
|
||||||
|
repo, err = repository.GetBackupRepository(context.Background(), cli, namespace,
|
||||||
|
repository.BackupRepositoryKey{
|
||||||
|
VolumeNamespace: o.RepoName,
|
||||||
|
BackupLocation: o.BackupStorageLocation,
|
||||||
|
RepositoryType: o.RepoType,
|
||||||
|
}, true)
|
||||||
|
if err == nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
retry--
|
||||||
|
if retry == 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.WithError(err).Warn("Failed to retrieve backup repo, need retry")
|
||||||
|
|
||||||
|
time.Sleep(time.Second)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "failed to get backup repository")
|
||||||
|
}
|
||||||
|
|
||||||
manager, err := initRepoManager(namespace, cli, kubeClient, logger)
|
manager, err := initRepoManager(namespace, cli, kubeClient, logger)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// backupRepository
|
|
||||||
repo, err := repository.GetBackupRepository(context.Background(), cli, namespace,
|
|
||||||
repository.BackupRepositoryKey{
|
|
||||||
VolumeNamespace: o.RepoName,
|
|
||||||
BackupLocation: o.BackupStorageLocation,
|
|
||||||
RepositoryType: o.RepoType,
|
|
||||||
}, true)
|
|
||||||
if err != nil {
|
|
||||||
return errors.Wrap(err, "failed to get backup repository")
|
|
||||||
}
|
|
||||||
|
|
||||||
err = manager.PruneRepo(repo)
|
err = manager.PruneRepo(repo)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "failed to prune repo")
|
return errors.Wrap(err, "failed to prune repo")
|
||||||
|
|
|
@ -131,10 +131,15 @@ func waitMaintenanceJobCompleteFail(client.Client, context.Context, string, stri
|
||||||
}
|
}
|
||||||
|
|
||||||
func waitMaintenanceJobCompleteFunc(now time.Time, result velerov1api.BackupRepositoryMaintenanceResult, message string) func(client.Client, context.Context, string, string, logrus.FieldLogger) (velerov1api.BackupRepositoryMaintenanceStatus, error) {
|
func waitMaintenanceJobCompleteFunc(now time.Time, result velerov1api.BackupRepositoryMaintenanceResult, message string) func(client.Client, context.Context, string, string, logrus.FieldLogger) (velerov1api.BackupRepositoryMaintenanceStatus, error) {
|
||||||
|
completionTimeStamp := &metav1.Time{Time: now.Add(time.Hour)}
|
||||||
|
if result == velerov1api.BackupRepositoryMaintenanceFailed {
|
||||||
|
completionTimeStamp = nil
|
||||||
|
}
|
||||||
|
|
||||||
return func(client.Client, context.Context, string, string, logrus.FieldLogger) (velerov1api.BackupRepositoryMaintenanceStatus, error) {
|
return func(client.Client, context.Context, string, string, logrus.FieldLogger) (velerov1api.BackupRepositoryMaintenanceStatus, error) {
|
||||||
return velerov1api.BackupRepositoryMaintenanceStatus{
|
return velerov1api.BackupRepositoryMaintenanceStatus{
|
||||||
StartTimestamp: &metav1.Time{Time: now},
|
StartTimestamp: &metav1.Time{Time: now},
|
||||||
CompleteTimestamp: &metav1.Time{Time: now.Add(time.Hour)},
|
CompleteTimestamp: completionTimeStamp,
|
||||||
Result: result,
|
Result: result,
|
||||||
Message: message,
|
Message: message,
|
||||||
}, nil
|
}, nil
|
||||||
|
@ -316,10 +321,9 @@ func TestRunMaintenanceIfDue(t *testing.T) {
|
||||||
Result: velerov1api.BackupRepositoryMaintenanceSucceeded,
|
Result: velerov1api.BackupRepositoryMaintenanceSucceeded,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
StartTimestamp: &metav1.Time{Time: now},
|
StartTimestamp: &metav1.Time{Time: now},
|
||||||
CompleteTimestamp: &metav1.Time{Time: now.Add(time.Hour)},
|
Result: velerov1api.BackupRepositoryMaintenanceFailed,
|
||||||
Result: velerov1api.BackupRepositoryMaintenanceFailed,
|
Message: "fake-maintenance-message",
|
||||||
Message: "fake-maintenance-message",
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -893,7 +897,7 @@ func TestUpdateRepoMaintenanceHistory(t *testing.T) {
|
||||||
{
|
{
|
||||||
name: "full history",
|
name: "full history",
|
||||||
backupRepo: backupRepoWithFullHistory,
|
backupRepo: backupRepoWithFullHistory,
|
||||||
result: velerov1api.BackupRepositoryMaintenanceFailed,
|
result: velerov1api.BackupRepositoryMaintenanceSucceeded,
|
||||||
expectedHistory: []velerov1api.BackupRepositoryMaintenanceStatus{
|
expectedHistory: []velerov1api.BackupRepositoryMaintenanceStatus{
|
||||||
{
|
{
|
||||||
StartTimestamp: &metav1.Time{Time: standardTime.Add(-time.Hour * 22)},
|
StartTimestamp: &metav1.Time{Time: standardTime.Add(-time.Hour * 22)},
|
||||||
|
@ -915,7 +919,7 @@ func TestUpdateRepoMaintenanceHistory(t *testing.T) {
|
||||||
{
|
{
|
||||||
name: "over full history",
|
name: "over full history",
|
||||||
backupRepo: backupRepoWithOverFullHistory,
|
backupRepo: backupRepoWithOverFullHistory,
|
||||||
result: velerov1api.BackupRepositoryMaintenanceFailed,
|
result: velerov1api.BackupRepositoryMaintenanceSucceeded,
|
||||||
expectedHistory: []velerov1api.BackupRepositoryMaintenanceStatus{
|
expectedHistory: []velerov1api.BackupRepositoryMaintenanceStatus{
|
||||||
{
|
{
|
||||||
StartTimestamp: &metav1.Time{Time: standardTime.Add(-time.Hour * 20)},
|
StartTimestamp: &metav1.Time{Time: standardTime.Add(-time.Hour * 20)},
|
||||||
|
@ -1127,7 +1131,7 @@ func TestConsolidateHistory(t *testing.T) {
|
||||||
{
|
{
|
||||||
StartTimestamp: &metav1.Time{Time: now.Add(time.Hour)},
|
StartTimestamp: &metav1.Time{Time: now.Add(time.Hour)},
|
||||||
CompleteTimestamp: &metav1.Time{Time: now.Add(time.Hour * 2)},
|
CompleteTimestamp: &metav1.Time{Time: now.Add(time.Hour * 2)},
|
||||||
Result: velerov1api.BackupRepositoryMaintenanceFailed,
|
Result: velerov1api.BackupRepositoryMaintenanceSucceeded,
|
||||||
Message: "fake-maintenance-message-2",
|
Message: "fake-maintenance-message-2",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -1149,7 +1153,7 @@ func TestConsolidateHistory(t *testing.T) {
|
||||||
{
|
{
|
||||||
StartTimestamp: &metav1.Time{Time: now.Add(time.Hour)},
|
StartTimestamp: &metav1.Time{Time: now.Add(time.Hour)},
|
||||||
CompleteTimestamp: &metav1.Time{Time: now.Add(time.Hour * 2)},
|
CompleteTimestamp: &metav1.Time{Time: now.Add(time.Hour * 2)},
|
||||||
Result: velerov1api.BackupRepositoryMaintenanceFailed,
|
Result: velerov1api.BackupRepositoryMaintenanceSucceeded,
|
||||||
Message: "fake-maintenance-message-2",
|
Message: "fake-maintenance-message-2",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -1172,7 +1176,7 @@ func TestConsolidateHistory(t *testing.T) {
|
||||||
{
|
{
|
||||||
StartTimestamp: &metav1.Time{Time: now.Add(time.Hour)},
|
StartTimestamp: &metav1.Time{Time: now.Add(time.Hour)},
|
||||||
CompleteTimestamp: &metav1.Time{Time: now.Add(time.Hour * 2)},
|
CompleteTimestamp: &metav1.Time{Time: now.Add(time.Hour * 2)},
|
||||||
Result: velerov1api.BackupRepositoryMaintenanceFailed,
|
Result: velerov1api.BackupRepositoryMaintenanceSucceeded,
|
||||||
Message: "fake-maintenance-message-2",
|
Message: "fake-maintenance-message-2",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -1194,7 +1198,7 @@ func TestConsolidateHistory(t *testing.T) {
|
||||||
{
|
{
|
||||||
StartTimestamp: &metav1.Time{Time: now.Add(time.Hour)},
|
StartTimestamp: &metav1.Time{Time: now.Add(time.Hour)},
|
||||||
CompleteTimestamp: &metav1.Time{Time: now.Add(time.Hour * 2)},
|
CompleteTimestamp: &metav1.Time{Time: now.Add(time.Hour * 2)},
|
||||||
Result: velerov1api.BackupRepositoryMaintenanceFailed,
|
Result: velerov1api.BackupRepositoryMaintenanceSucceeded,
|
||||||
Message: "fake-maintenance-message-2",
|
Message: "fake-maintenance-message-2",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -1223,7 +1227,7 @@ func TestConsolidateHistory(t *testing.T) {
|
||||||
{
|
{
|
||||||
StartTimestamp: &metav1.Time{Time: now.Add(time.Hour)},
|
StartTimestamp: &metav1.Time{Time: now.Add(time.Hour)},
|
||||||
CompleteTimestamp: &metav1.Time{Time: now.Add(time.Hour * 2)},
|
CompleteTimestamp: &metav1.Time{Time: now.Add(time.Hour * 2)},
|
||||||
Result: velerov1api.BackupRepositoryMaintenanceFailed,
|
Result: velerov1api.BackupRepositoryMaintenanceSucceeded,
|
||||||
Message: "fake-maintenance-message-2",
|
Message: "fake-maintenance-message-2",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -1237,7 +1241,7 @@ func TestConsolidateHistory(t *testing.T) {
|
||||||
{
|
{
|
||||||
StartTimestamp: &metav1.Time{Time: now.Add(time.Hour)},
|
StartTimestamp: &metav1.Time{Time: now.Add(time.Hour)},
|
||||||
CompleteTimestamp: &metav1.Time{Time: now.Add(time.Hour * 2)},
|
CompleteTimestamp: &metav1.Time{Time: now.Add(time.Hour * 2)},
|
||||||
Result: velerov1api.BackupRepositoryMaintenanceFailed,
|
Result: velerov1api.BackupRepositoryMaintenanceSucceeded,
|
||||||
Message: "fake-maintenance-message-2",
|
Message: "fake-maintenance-message-2",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -1257,7 +1261,7 @@ func TestConsolidateHistory(t *testing.T) {
|
||||||
{
|
{
|
||||||
StartTimestamp: &metav1.Time{Time: now.Add(time.Hour)},
|
StartTimestamp: &metav1.Time{Time: now.Add(time.Hour)},
|
||||||
CompleteTimestamp: &metav1.Time{Time: now.Add(time.Hour * 2)},
|
CompleteTimestamp: &metav1.Time{Time: now.Add(time.Hour * 2)},
|
||||||
Result: velerov1api.BackupRepositoryMaintenanceFailed,
|
Result: velerov1api.BackupRepositoryMaintenanceSucceeded,
|
||||||
Message: "fake-maintenance-message-2",
|
Message: "fake-maintenance-message-2",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -1339,13 +1343,13 @@ func TestGetLastMaintenanceTimeFromHistory(t *testing.T) {
|
||||||
history: []velerov1api.BackupRepositoryMaintenanceStatus{
|
history: []velerov1api.BackupRepositoryMaintenanceStatus{
|
||||||
{
|
{
|
||||||
StartTimestamp: &metav1.Time{Time: now},
|
StartTimestamp: &metav1.Time{Time: now},
|
||||||
Result: velerov1api.BackupRepositoryMaintenanceSucceeded,
|
Result: velerov1api.BackupRepositoryMaintenanceFailed,
|
||||||
Message: "fake-maintenance-message",
|
Message: "fake-maintenance-message",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
StartTimestamp: &metav1.Time{Time: now.Add(time.Hour)},
|
StartTimestamp: &metav1.Time{Time: now.Add(time.Hour)},
|
||||||
CompleteTimestamp: &metav1.Time{Time: now.Add(time.Hour * 2)},
|
CompleteTimestamp: &metav1.Time{Time: now.Add(time.Hour * 2)},
|
||||||
Result: velerov1api.BackupRepositoryMaintenanceFailed,
|
Result: velerov1api.BackupRepositoryMaintenanceSucceeded,
|
||||||
Message: "fake-maintenance-message-2",
|
Message: "fake-maintenance-message-2",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
|
|
@ -22,6 +22,7 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"math"
|
"math"
|
||||||
"sort"
|
"sort"
|
||||||
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
@ -35,6 +36,7 @@ import (
|
||||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||||
|
|
||||||
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
|
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
|
||||||
|
"github.com/vmware-tanzu/velero/pkg/util"
|
||||||
"github.com/vmware-tanzu/velero/pkg/util/kube"
|
"github.com/vmware-tanzu/velero/pkg/util/kube"
|
||||||
|
|
||||||
appsv1 "k8s.io/api/apps/v1"
|
appsv1 "k8s.io/api/apps/v1"
|
||||||
|
@ -47,6 +49,7 @@ import (
|
||||||
const (
|
const (
|
||||||
RepositoryNameLabel = "velero.io/repo-name"
|
RepositoryNameLabel = "velero.io/repo-name"
|
||||||
GlobalKeyForRepoMaintenanceJobCM = "global"
|
GlobalKeyForRepoMaintenanceJobCM = "global"
|
||||||
|
TerminationLogIndicator = "Repo maintenance error: "
|
||||||
)
|
)
|
||||||
|
|
||||||
type JobConfigs struct {
|
type JobConfigs struct {
|
||||||
|
@ -147,7 +150,7 @@ func getResultFromJob(cli client.Client, job *batchv1.Job) (string, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(podList.Items) == 0 {
|
if len(podList.Items) == 0 {
|
||||||
return "", fmt.Errorf("no pod found for job %s", job.Name)
|
return "", errors.Errorf("no pod found for job %s", job.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
// we only have one maintenance pod for the job
|
// we only have one maintenance pod for the job
|
||||||
|
@ -155,16 +158,29 @@ func getResultFromJob(cli client.Client, job *batchv1.Job) (string, error) {
|
||||||
|
|
||||||
statuses := pod.Status.ContainerStatuses
|
statuses := pod.Status.ContainerStatuses
|
||||||
if len(statuses) == 0 {
|
if len(statuses) == 0 {
|
||||||
return "", fmt.Errorf("no container statuses found for job %s", job.Name)
|
return "", errors.Errorf("no container statuses found for job %s", job.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
// we only have one maintenance container
|
// we only have one maintenance container
|
||||||
terminated := statuses[0].State.Terminated
|
terminated := statuses[0].State.Terminated
|
||||||
if terminated == nil {
|
if terminated == nil {
|
||||||
return "", fmt.Errorf("container for job %s is not terminated", job.Name)
|
return "", errors.Errorf("container for job %s is not terminated", job.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
return terminated.Message, nil
|
if terminated.Message == "" {
|
||||||
|
return "", nil
|
||||||
|
}
|
||||||
|
|
||||||
|
idx := strings.Index(terminated.Message, TerminationLogIndicator)
|
||||||
|
if idx == -1 {
|
||||||
|
return "", errors.New("error to locate repo maintenance error indicator from termination message")
|
||||||
|
}
|
||||||
|
|
||||||
|
if idx+len(TerminationLogIndicator) >= len(terminated.Message) {
|
||||||
|
return "", errors.New("nothing after repo maintenance error indicator in termination message")
|
||||||
|
}
|
||||||
|
|
||||||
|
return terminated.Message[idx+len(TerminationLogIndicator):], nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// getJobConfig is called to get the Maintenance Job Config for the
|
// getJobConfig is called to get the Maintenance Job Config for the
|
||||||
|
@ -331,7 +347,7 @@ func WaitAllJobsComplete(ctx context.Context, cli client.Client, repo *velerov1a
|
||||||
if job.Status.Failed > 0 {
|
if job.Status.Failed > 0 {
|
||||||
if msg, err := getResultFromJob(cli, job); err != nil {
|
if msg, err := getResultFromJob(cli, job); err != nil {
|
||||||
log.WithError(err).Warnf("Failed to get result of maintenance job %s", job.Name)
|
log.WithError(err).Warnf("Failed to get result of maintenance job %s", job.Name)
|
||||||
message = "Repo maintenance failed but result is not retrieveable"
|
message = fmt.Sprintf("Repo maintenance failed but result is not retrieveable, err: %v", err)
|
||||||
} else {
|
} else {
|
||||||
message = msg
|
message = msg
|
||||||
}
|
}
|
||||||
|
@ -434,6 +450,16 @@ func buildJob(cli client.Client, ctx context.Context, repo *velerov1api.BackupRe
|
||||||
return nil, errors.Wrap(err, "failed to parse resource requirements for maintenance job")
|
return nil, errors.Wrap(err, "failed to parse resource requirements for maintenance job")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
podLabels := map[string]string{
|
||||||
|
RepositoryNameLabel: repo.Name,
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, k := range util.ThirdPartyLabels {
|
||||||
|
if v := veleroutil.GetVeleroServerLabelValue(deployment, k); v != "" {
|
||||||
|
podLabels[k] = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Set arguments
|
// Set arguments
|
||||||
args := []string{"repo-maintenance"}
|
args := []string{"repo-maintenance"}
|
||||||
args = append(args, fmt.Sprintf("--repo-name=%s", repo.Spec.VolumeNamespace))
|
args = append(args, fmt.Sprintf("--repo-name=%s", repo.Spec.VolumeNamespace))
|
||||||
|
@ -455,10 +481,8 @@ func buildJob(cli client.Client, ctx context.Context, repo *velerov1api.BackupRe
|
||||||
BackoffLimit: new(int32), // Never retry
|
BackoffLimit: new(int32), // Never retry
|
||||||
Template: v1.PodTemplateSpec{
|
Template: v1.PodTemplateSpec{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: "velero-repo-maintenance-pod",
|
Name: "velero-repo-maintenance-pod",
|
||||||
Labels: map[string]string{
|
Labels: podLabels,
|
||||||
RepositoryNameLabel: repo.Name,
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
Spec: v1.PodSpec{
|
Spec: v1.PodSpec{
|
||||||
Containers: []v1.Container{
|
Containers: []v1.Container{
|
||||||
|
@ -468,17 +492,26 @@ func buildJob(cli client.Client, ctx context.Context, repo *velerov1api.BackupRe
|
||||||
Command: []string{
|
Command: []string{
|
||||||
"/velero",
|
"/velero",
|
||||||
},
|
},
|
||||||
Args: args,
|
Args: args,
|
||||||
ImagePullPolicy: v1.PullIfNotPresent,
|
ImagePullPolicy: v1.PullIfNotPresent,
|
||||||
Env: envVars,
|
Env: envVars,
|
||||||
EnvFrom: envFromSources,
|
EnvFrom: envFromSources,
|
||||||
VolumeMounts: volumeMounts,
|
VolumeMounts: volumeMounts,
|
||||||
Resources: resources,
|
Resources: resources,
|
||||||
|
TerminationMessagePolicy: v1.TerminationMessageFallbackToLogsOnError,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
RestartPolicy: v1.RestartPolicyNever,
|
RestartPolicy: v1.RestartPolicyNever,
|
||||||
Volumes: volumes,
|
Volumes: volumes,
|
||||||
ServiceAccountName: serviceAccount,
|
ServiceAccountName: serviceAccount,
|
||||||
|
Tolerations: []v1.Toleration{
|
||||||
|
{
|
||||||
|
Key: "os",
|
||||||
|
Operator: "Equal",
|
||||||
|
Effect: "NoSchedule",
|
||||||
|
Value: "windows",
|
||||||
|
},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -489,22 +522,6 @@ func buildJob(cli client.Client, ctx context.Context, repo *velerov1api.BackupRe
|
||||||
job.Spec.Template.Spec.Affinity = affinity
|
job.Spec.Template.Spec.Affinity = affinity
|
||||||
}
|
}
|
||||||
|
|
||||||
if tolerations := veleroutil.GetTolerationsFromVeleroServer(deployment); tolerations != nil {
|
|
||||||
job.Spec.Template.Spec.Tolerations = tolerations
|
|
||||||
}
|
|
||||||
|
|
||||||
if nodeSelector := veleroutil.GetNodeSelectorFromVeleroServer(deployment); nodeSelector != nil {
|
|
||||||
job.Spec.Template.Spec.NodeSelector = nodeSelector
|
|
||||||
}
|
|
||||||
|
|
||||||
if labels := veleroutil.GetVeleroServerLables(deployment); len(labels) > 0 {
|
|
||||||
job.Spec.Template.Labels = labels
|
|
||||||
}
|
|
||||||
|
|
||||||
if annotations := veleroutil.GetVeleroServerAnnotations(deployment); len(annotations) > 0 {
|
|
||||||
job.Spec.Template.Annotations = annotations
|
|
||||||
}
|
|
||||||
|
|
||||||
return job, nil
|
return job, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -516,8 +533,8 @@ func composeStatusFromJob(job *batchv1.Job, message string) velerov1api.BackupRe
|
||||||
|
|
||||||
return velerov1api.BackupRepositoryMaintenanceStatus{
|
return velerov1api.BackupRepositoryMaintenanceStatus{
|
||||||
Result: result,
|
Result: result,
|
||||||
StartTimestamp: &metav1.Time{Time: job.CreationTimestamp.Time},
|
StartTimestamp: &job.CreationTimestamp,
|
||||||
CompleteTimestamp: &metav1.Time{Time: job.Status.CompletionTime.Time},
|
CompleteTimestamp: job.Status.CompletionTime,
|
||||||
Message: message,
|
Message: message,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -284,11 +284,18 @@ func TestGetResultFromJob(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create a fake Kubernetes client
|
// Create a fake Kubernetes client
|
||||||
cli := fake.NewClientBuilder().WithObjects(job, pod).Build()
|
cli := fake.NewClientBuilder().Build()
|
||||||
|
|
||||||
// test an error should be returned
|
// test an error should be returned
|
||||||
result, err := getResultFromJob(cli, job)
|
result, err := getResultFromJob(cli, job)
|
||||||
assert.Error(t, err)
|
assert.EqualError(t, err, "no pod found for job test-job")
|
||||||
|
assert.Equal(t, "", result)
|
||||||
|
|
||||||
|
cli = fake.NewClientBuilder().WithObjects(job, pod).Build()
|
||||||
|
|
||||||
|
// test an error should be returned
|
||||||
|
result, err = getResultFromJob(cli, job)
|
||||||
|
assert.EqualError(t, err, "no container statuses found for job test-job")
|
||||||
assert.Equal(t, "", result)
|
assert.Equal(t, "", result)
|
||||||
|
|
||||||
// Set a non-terminated container status to the pod
|
// Set a non-terminated container status to the pod
|
||||||
|
@ -303,7 +310,7 @@ func TestGetResultFromJob(t *testing.T) {
|
||||||
// Test an error should be returned
|
// Test an error should be returned
|
||||||
cli = fake.NewClientBuilder().WithObjects(job, pod).Build()
|
cli = fake.NewClientBuilder().WithObjects(job, pod).Build()
|
||||||
result, err = getResultFromJob(cli, job)
|
result, err = getResultFromJob(cli, job)
|
||||||
assert.Error(t, err)
|
assert.EqualError(t, err, "container for job test-job is not terminated")
|
||||||
assert.Equal(t, "", result)
|
assert.Equal(t, "", result)
|
||||||
|
|
||||||
// Set a terminated container status to the pod
|
// Set a terminated container status to the pod
|
||||||
|
@ -311,9 +318,7 @@ func TestGetResultFromJob(t *testing.T) {
|
||||||
ContainerStatuses: []v1.ContainerStatus{
|
ContainerStatuses: []v1.ContainerStatus{
|
||||||
{
|
{
|
||||||
State: v1.ContainerState{
|
State: v1.ContainerState{
|
||||||
Terminated: &v1.ContainerStateTerminated{
|
Terminated: &v1.ContainerStateTerminated{},
|
||||||
Message: "test message",
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -323,7 +328,61 @@ func TestGetResultFromJob(t *testing.T) {
|
||||||
cli = fake.NewClientBuilder().WithObjects(job, pod).Build()
|
cli = fake.NewClientBuilder().WithObjects(job, pod).Build()
|
||||||
result, err = getResultFromJob(cli, job)
|
result, err = getResultFromJob(cli, job)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Equal(t, "test message", result)
|
assert.Equal(t, "", result)
|
||||||
|
|
||||||
|
// Set a terminated container status with invalidate message to the pod
|
||||||
|
pod.Status = v1.PodStatus{
|
||||||
|
ContainerStatuses: []v1.ContainerStatus{
|
||||||
|
{
|
||||||
|
State: v1.ContainerState{
|
||||||
|
Terminated: &v1.ContainerStateTerminated{
|
||||||
|
Message: "fake-message",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
cli = fake.NewClientBuilder().WithObjects(job, pod).Build()
|
||||||
|
result, err = getResultFromJob(cli, job)
|
||||||
|
assert.EqualError(t, err, "error to locate repo maintenance error indicator from termination message")
|
||||||
|
assert.Equal(t, "", result)
|
||||||
|
|
||||||
|
// Set a terminated container status with empty maintenance error to the pod
|
||||||
|
pod.Status = v1.PodStatus{
|
||||||
|
ContainerStatuses: []v1.ContainerStatus{
|
||||||
|
{
|
||||||
|
State: v1.ContainerState{
|
||||||
|
Terminated: &v1.ContainerStateTerminated{
|
||||||
|
Message: "Repo maintenance error: ",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
cli = fake.NewClientBuilder().WithObjects(job, pod).Build()
|
||||||
|
result, err = getResultFromJob(cli, job)
|
||||||
|
assert.EqualError(t, err, "nothing after repo maintenance error indicator in termination message")
|
||||||
|
assert.Equal(t, "", result)
|
||||||
|
|
||||||
|
// Set a terminated container status with maintenance error to the pod
|
||||||
|
pod.Status = v1.PodStatus{
|
||||||
|
ContainerStatuses: []v1.ContainerStatus{
|
||||||
|
{
|
||||||
|
State: v1.ContainerState{
|
||||||
|
Terminated: &v1.ContainerStateTerminated{
|
||||||
|
Message: "Repo maintenance error: fake-error",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
cli = fake.NewClientBuilder().WithObjects(job, pod).Build()
|
||||||
|
result, err = getResultFromJob(cli, job)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, "fake-error", result)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestGetJobConfig(t *testing.T) {
|
func TestGetJobConfig(t *testing.T) {
|
||||||
|
@ -565,16 +624,15 @@ func TestWaitAllJobsComplete(t *testing.T) {
|
||||||
CreationTimestamp: metav1.Time{Time: now.Add(time.Hour)},
|
CreationTimestamp: metav1.Time{Time: now.Add(time.Hour)},
|
||||||
},
|
},
|
||||||
Status: batchv1.JobStatus{
|
Status: batchv1.JobStatus{
|
||||||
StartTime: &metav1.Time{Time: now.Add(time.Hour)},
|
StartTime: &metav1.Time{Time: now.Add(time.Hour)},
|
||||||
CompletionTime: &metav1.Time{Time: now.Add(time.Hour * 2)},
|
Failed: 1,
|
||||||
Failed: 1,
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
jobPodFailed1 := builder.ForPod(veleroNamespace, "job2").Labels(map[string]string{"job-name": "job2"}).ContainerStatuses(&v1.ContainerStatus{
|
jobPodFailed1 := builder.ForPod(veleroNamespace, "job2").Labels(map[string]string{"job-name": "job2"}).ContainerStatuses(&v1.ContainerStatus{
|
||||||
State: v1.ContainerState{
|
State: v1.ContainerState{
|
||||||
Terminated: &v1.ContainerStateTerminated{
|
Terminated: &v1.ContainerStateTerminated{
|
||||||
Message: "fake-message-2",
|
Message: "Repo maintenance error: fake-message-2",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}).Result()
|
}).Result()
|
||||||
|
@ -682,10 +740,9 @@ func TestWaitAllJobsComplete(t *testing.T) {
|
||||||
},
|
},
|
||||||
expectedStatus: []velerov1api.BackupRepositoryMaintenanceStatus{
|
expectedStatus: []velerov1api.BackupRepositoryMaintenanceStatus{
|
||||||
{
|
{
|
||||||
Result: velerov1api.BackupRepositoryMaintenanceFailed,
|
Result: velerov1api.BackupRepositoryMaintenanceFailed,
|
||||||
StartTimestamp: &metav1.Time{Time: now.Add(time.Hour)},
|
StartTimestamp: &metav1.Time{Time: now.Add(time.Hour)},
|
||||||
CompleteTimestamp: &metav1.Time{Time: now.Add(time.Hour * 2)},
|
Message: "Repo maintenance failed but result is not retrieveable, err: no pod found for job job2",
|
||||||
Message: "Repo maintenance failed but result is not retrieveable",
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -706,10 +763,9 @@ func TestWaitAllJobsComplete(t *testing.T) {
|
||||||
CompleteTimestamp: &metav1.Time{Time: now.Add(time.Hour)},
|
CompleteTimestamp: &metav1.Time{Time: now.Add(time.Hour)},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Result: velerov1api.BackupRepositoryMaintenanceFailed,
|
Result: velerov1api.BackupRepositoryMaintenanceFailed,
|
||||||
StartTimestamp: &metav1.Time{Time: now.Add(time.Hour)},
|
StartTimestamp: &metav1.Time{Time: now.Add(time.Hour)},
|
||||||
CompleteTimestamp: &metav1.Time{Time: now.Add(time.Hour * 2)},
|
Message: "fake-message-2",
|
||||||
Message: "fake-message-2",
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -732,10 +788,9 @@ func TestWaitAllJobsComplete(t *testing.T) {
|
||||||
CompleteTimestamp: &metav1.Time{Time: now.Add(time.Hour)},
|
CompleteTimestamp: &metav1.Time{Time: now.Add(time.Hour)},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Result: velerov1api.BackupRepositoryMaintenanceFailed,
|
Result: velerov1api.BackupRepositoryMaintenanceFailed,
|
||||||
StartTimestamp: &metav1.Time{Time: now.Add(time.Hour)},
|
StartTimestamp: &metav1.Time{Time: now.Add(time.Hour)},
|
||||||
CompleteTimestamp: &metav1.Time{Time: now.Add(time.Hour * 2)},
|
Message: "fake-message-2",
|
||||||
Message: "fake-message-2",
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Result: velerov1api.BackupRepositoryMaintenanceSucceeded,
|
Result: velerov1api.BackupRepositoryMaintenanceSucceeded,
|
||||||
|
@ -760,10 +815,9 @@ func TestWaitAllJobsComplete(t *testing.T) {
|
||||||
},
|
},
|
||||||
expectedStatus: []velerov1api.BackupRepositoryMaintenanceStatus{
|
expectedStatus: []velerov1api.BackupRepositoryMaintenanceStatus{
|
||||||
{
|
{
|
||||||
Result: velerov1api.BackupRepositoryMaintenanceFailed,
|
Result: velerov1api.BackupRepositoryMaintenanceFailed,
|
||||||
StartTimestamp: &metav1.Time{Time: now.Add(time.Hour)},
|
StartTimestamp: &metav1.Time{Time: now.Add(time.Hour)},
|
||||||
CompleteTimestamp: &metav1.Time{Time: now.Add(time.Hour * 2)},
|
Message: "fake-message-2",
|
||||||
Message: "fake-message-2",
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Result: velerov1api.BackupRepositoryMaintenanceSucceeded,
|
Result: velerov1api.BackupRepositoryMaintenanceSucceeded,
|
||||||
|
@ -799,7 +853,12 @@ func TestWaitAllJobsComplete(t *testing.T) {
|
||||||
assert.Equal(t, test.expectedStatus[i].Result, history[i].Result)
|
assert.Equal(t, test.expectedStatus[i].Result, history[i].Result)
|
||||||
assert.Equal(t, test.expectedStatus[i].Message, history[i].Message)
|
assert.Equal(t, test.expectedStatus[i].Message, history[i].Message)
|
||||||
assert.Equal(t, test.expectedStatus[i].StartTimestamp.Time, history[i].StartTimestamp.Time)
|
assert.Equal(t, test.expectedStatus[i].StartTimestamp.Time, history[i].StartTimestamp.Time)
|
||||||
assert.Equal(t, test.expectedStatus[i].CompleteTimestamp.Time, history[i].CompleteTimestamp.Time)
|
|
||||||
|
if test.expectedStatus[i].CompleteTimestamp == nil {
|
||||||
|
assert.Nil(t, history[i].CompleteTimestamp)
|
||||||
|
} else {
|
||||||
|
assert.Equal(t, test.expectedStatus[i].CompleteTimestamp.Time, history[i].CompleteTimestamp.Time)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -808,59 +867,36 @@ func TestWaitAllJobsComplete(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestBuildJob(t *testing.T) {
|
func TestBuildJob(t *testing.T) {
|
||||||
testCases := []struct {
|
deploy := appsv1.Deployment{
|
||||||
name string
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
m *JobConfigs
|
Name: "velero",
|
||||||
deploy *appsv1.Deployment
|
Namespace: "velero",
|
||||||
logLevel logrus.Level
|
},
|
||||||
logFormat *logging.FormatFlag
|
Spec: appsv1.DeploymentSpec{
|
||||||
expectedJobName string
|
Template: v1.PodTemplateSpec{
|
||||||
expectedError bool
|
Spec: v1.PodSpec{
|
||||||
expectedEnv []v1.EnvVar
|
Containers: []v1.Container{
|
||||||
expectedEnvFrom []v1.EnvFromSource
|
{
|
||||||
}{
|
Name: "velero-repo-maintenance-container",
|
||||||
{
|
Image: "velero-image",
|
||||||
name: "Valid maintenance job",
|
Env: []v1.EnvVar{
|
||||||
m: &JobConfigs{
|
|
||||||
PodResources: &kube.PodResources{
|
|
||||||
CPURequest: "100m",
|
|
||||||
MemoryRequest: "128Mi",
|
|
||||||
CPULimit: "200m",
|
|
||||||
MemoryLimit: "256Mi",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
deploy: &appsv1.Deployment{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: "velero",
|
|
||||||
Namespace: "velero",
|
|
||||||
},
|
|
||||||
Spec: appsv1.DeploymentSpec{
|
|
||||||
Template: v1.PodTemplateSpec{
|
|
||||||
Spec: v1.PodSpec{
|
|
||||||
Containers: []v1.Container{
|
|
||||||
{
|
{
|
||||||
Name: "velero-repo-maintenance-container",
|
Name: "test-name",
|
||||||
Image: "velero-image",
|
Value: "test-value",
|
||||||
Env: []v1.EnvVar{
|
},
|
||||||
{
|
},
|
||||||
Name: "test-name",
|
EnvFrom: []v1.EnvFromSource{
|
||||||
Value: "test-value",
|
{
|
||||||
|
ConfigMapRef: &v1.ConfigMapEnvSource{
|
||||||
|
LocalObjectReference: v1.LocalObjectReference{
|
||||||
|
Name: "test-configmap",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
EnvFrom: []v1.EnvFromSource{
|
},
|
||||||
{
|
{
|
||||||
ConfigMapRef: &v1.ConfigMapEnvSource{
|
SecretRef: &v1.SecretEnvSource{
|
||||||
LocalObjectReference: v1.LocalObjectReference{
|
LocalObjectReference: v1.LocalObjectReference{
|
||||||
Name: "test-configmap",
|
Name: "test-secret",
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
SecretRef: &v1.SecretEnvSource{
|
|
||||||
LocalObjectReference: v1.LocalObjectReference{
|
|
||||||
Name: "test-secret",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -869,6 +905,36 @@ func TestBuildJob(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
deploy2 := deploy
|
||||||
|
deploy2.Spec.Template.Labels = map[string]string{"azure.workload.identity/use": "fake-label-value"}
|
||||||
|
|
||||||
|
testCases := []struct {
|
||||||
|
name string
|
||||||
|
m *JobConfigs
|
||||||
|
deploy *appsv1.Deployment
|
||||||
|
logLevel logrus.Level
|
||||||
|
logFormat *logging.FormatFlag
|
||||||
|
thirdPartyLabel map[string]string
|
||||||
|
expectedJobName string
|
||||||
|
expectedError bool
|
||||||
|
expectedEnv []v1.EnvVar
|
||||||
|
expectedEnvFrom []v1.EnvFromSource
|
||||||
|
expectedPodLabel map[string]string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "Valid maintenance job without third party labels",
|
||||||
|
m: &JobConfigs{
|
||||||
|
PodResources: &kube.PodResources{
|
||||||
|
CPURequest: "100m",
|
||||||
|
MemoryRequest: "128Mi",
|
||||||
|
CPULimit: "200m",
|
||||||
|
MemoryLimit: "256Mi",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
deploy: &deploy,
|
||||||
logLevel: logrus.InfoLevel,
|
logLevel: logrus.InfoLevel,
|
||||||
logFormat: logging.NewFormatFlag(),
|
logFormat: logging.NewFormatFlag(),
|
||||||
expectedJobName: "test-123-maintain-job",
|
expectedJobName: "test-123-maintain-job",
|
||||||
|
@ -895,6 +961,51 @@ func TestBuildJob(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
expectedPodLabel: map[string]string{
|
||||||
|
RepositoryNameLabel: "test-123",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Valid maintenance job with third party labels",
|
||||||
|
m: &JobConfigs{
|
||||||
|
PodResources: &kube.PodResources{
|
||||||
|
CPURequest: "100m",
|
||||||
|
MemoryRequest: "128Mi",
|
||||||
|
CPULimit: "200m",
|
||||||
|
MemoryLimit: "256Mi",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
deploy: &deploy2,
|
||||||
|
logLevel: logrus.InfoLevel,
|
||||||
|
logFormat: logging.NewFormatFlag(),
|
||||||
|
expectedJobName: "test-123-maintain-job",
|
||||||
|
expectedError: false,
|
||||||
|
expectedEnv: []v1.EnvVar{
|
||||||
|
{
|
||||||
|
Name: "test-name",
|
||||||
|
Value: "test-value",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expectedEnvFrom: []v1.EnvFromSource{
|
||||||
|
{
|
||||||
|
ConfigMapRef: &v1.ConfigMapEnvSource{
|
||||||
|
LocalObjectReference: v1.LocalObjectReference{
|
||||||
|
Name: "test-configmap",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
SecretRef: &v1.SecretEnvSource{
|
||||||
|
LocalObjectReference: v1.LocalObjectReference{
|
||||||
|
Name: "test-secret",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expectedPodLabel: map[string]string{
|
||||||
|
RepositoryNameLabel: "test-123",
|
||||||
|
"azure.workload.identity/use": "fake-label-value",
|
||||||
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "Error getting Velero server deployment",
|
name: "Error getting Velero server deployment",
|
||||||
|
@ -996,14 +1107,7 @@ func TestBuildJob(t *testing.T) {
|
||||||
}
|
}
|
||||||
assert.Equal(t, expectedArgs, container.Args)
|
assert.Equal(t, expectedArgs, container.Args)
|
||||||
|
|
||||||
// Check affinity
|
assert.Equal(t, tc.expectedPodLabel, job.Spec.Template.Labels)
|
||||||
assert.Nil(t, job.Spec.Template.Spec.Affinity)
|
|
||||||
|
|
||||||
// Check tolerations
|
|
||||||
assert.Nil(t, job.Spec.Template.Spec.Tolerations)
|
|
||||||
|
|
||||||
// Check node selector
|
|
||||||
assert.Nil(t, job.Spec.Template.Spec.NodeSelector)
|
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
|
@ -87,3 +87,12 @@ func GetVeleroServerLables(deployment *appsv1.Deployment) map[string]string {
|
||||||
func GetVeleroServerAnnotations(deployment *appsv1.Deployment) map[string]string {
|
func GetVeleroServerAnnotations(deployment *appsv1.Deployment) map[string]string {
|
||||||
return deployment.Spec.Template.Annotations
|
return deployment.Spec.Template.Annotations
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetVeleroServerLabelValue returns the value of specified label of Velero server deployment
|
||||||
|
func GetVeleroServerLabelValue(deployment *appsv1.Deployment, key string) string {
|
||||||
|
if deployment.Spec.Template.Labels == nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
return deployment.Spec.Template.Labels[key]
|
||||||
|
}
|
||||||
|
|
|
@ -711,3 +711,51 @@ func TestGetVeleroServerAnnotations(t *testing.T) {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestGetVeleroServerLabelValue(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
deployment *appsv1.Deployment
|
||||||
|
expected string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "nil Labels",
|
||||||
|
deployment: &appsv1.Deployment{},
|
||||||
|
expected: "",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "no label key",
|
||||||
|
deployment: &appsv1.Deployment{
|
||||||
|
Spec: appsv1.DeploymentSpec{
|
||||||
|
Template: v1.PodTemplateSpec{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Labels: map[string]string{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expected: "",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "with label key",
|
||||||
|
deployment: &appsv1.Deployment{
|
||||||
|
Spec: appsv1.DeploymentSpec{
|
||||||
|
Template: v1.PodTemplateSpec{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Labels: map[string]string{"fake-key": "fake-value"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expected: "fake-value",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
// Run tests
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
result := GetVeleroServerLabelValue(tt.deployment, "fake-key")
|
||||||
|
assert.Equal(t, tt.expected, result)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
Loading…
Reference in New Issue