Enable stylecheck linter and resolve found issues.
Signed-off-by: Xun Jiang <blackpiglet@gmail.com>pull/6185/head
parent
980106dc39
commit
bbc1e2e151
|
@ -0,0 +1 @@
|
|||
Enable stylecheck linter and resolve found issues.
|
|
@ -298,6 +298,7 @@ linters:
|
|||
- gosec
|
||||
- govet
|
||||
- misspell
|
||||
- stylecheck
|
||||
- typecheck
|
||||
- unparam
|
||||
- unused
|
||||
|
|
|
@ -29,19 +29,19 @@ import (
|
|||
// minor
|
||||
// patch
|
||||
// prerelease (this will be alpha/beta/rc followed by a ".", followed by 1 or more digits (alpha.5)
|
||||
var release_regex *regexp.Regexp = regexp.MustCompile(`^v(?P<major>[[:digit:]]+)\.(?P<minor>[[:digit:]]+)\.(?P<patch>[[:digit:]]+)(-{1}(?P<prerelease>(alpha|beta|rc)\.[[:digit:]]+))*`)
|
||||
var releaseRegex = regexp.MustCompile(`^v(?P<major>[[:digit:]]+)\.(?P<minor>[[:digit:]]+)\.(?P<patch>[[:digit:]]+)(-{1}(?P<prerelease>(alpha|beta|rc)\.[[:digit:]]+))*`)
|
||||
|
||||
// This small program exists because checking the VELERO_VERSION rules in bash is difficult, and difficult to test for correctness.
|
||||
// Calling it with --verify will verify whether or not the VELERO_VERSION environment variable is a valid version string, without parsing for its components.
|
||||
// Calling it without --verify will try to parse the version into its component pieces.
|
||||
func main() {
|
||||
velero_version := os.Getenv("VELERO_VERSION")
|
||||
veleroVersion := os.Getenv("VELERO_VERSION")
|
||||
|
||||
submatches := reSubMatchMap(release_regex, velero_version)
|
||||
submatches := reSubMatchMap(releaseRegex, veleroVersion)
|
||||
|
||||
// Didn't match the regex, exit.
|
||||
if len(submatches) == 0 {
|
||||
fmt.Printf("VELERO_VERSION of %s was not valid. Please correct the value and retry.", velero_version)
|
||||
fmt.Printf("VELERO_VERSION of %s was not valid. Please correct the value and retry.", veleroVersion)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
|
|
|
@ -56,7 +56,7 @@ func TestRegexMatching(t *testing.T) {
|
|||
for _, test := range tests {
|
||||
name := fmt.Sprintf("Testing version string %s", test.version)
|
||||
t.Run(name, func(t *testing.T) {
|
||||
results := reSubMatchMap(release_regex, test.version)
|
||||
results := reSubMatchMap(releaseRegex, test.version)
|
||||
|
||||
if len(results) == 0 && test.expectMatch {
|
||||
t.Fail()
|
||||
|
|
|
@ -54,7 +54,7 @@ func unmarshalResourcePolicies(yamlData *string) (*resourcePolicies, error) {
|
|||
}
|
||||
}
|
||||
|
||||
func (policies *Policies) buildPolicy(resPolicies *resourcePolicies) error {
|
||||
func (p *Policies) buildPolicy(resPolicies *resourcePolicies) error {
|
||||
for _, vp := range resPolicies.VolumePolicies {
|
||||
con, err := unmarshalVolConditions(vp.Conditions)
|
||||
if err != nil {
|
||||
|
@ -64,18 +64,18 @@ func (policies *Policies) buildPolicy(resPolicies *resourcePolicies) error {
|
|||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
var p volPolicy
|
||||
p.action = vp.Action
|
||||
p.conditions = append(p.conditions, &capacityCondition{capacity: *volCap})
|
||||
p.conditions = append(p.conditions, &storageClassCondition{storageClass: con.StorageClass})
|
||||
p.conditions = append(p.conditions, &nfsCondition{nfs: con.NFS})
|
||||
p.conditions = append(p.conditions, &csiCondition{csi: con.CSI})
|
||||
policies.volumePolicies = append(policies.volumePolicies, p)
|
||||
var volP volPolicy
|
||||
volP.action = vp.Action
|
||||
volP.conditions = append(volP.conditions, &capacityCondition{capacity: *volCap})
|
||||
volP.conditions = append(volP.conditions, &storageClassCondition{storageClass: con.StorageClass})
|
||||
volP.conditions = append(volP.conditions, &nfsCondition{nfs: con.NFS})
|
||||
volP.conditions = append(volP.conditions, &csiCondition{csi: con.CSI})
|
||||
p.volumePolicies = append(p.volumePolicies, volP)
|
||||
}
|
||||
|
||||
// Other resource policies
|
||||
|
||||
policies.version = resPolicies.Version
|
||||
p.version = resPolicies.Version
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -52,7 +52,6 @@ import (
|
|||
vsv1 "github.com/vmware-tanzu/velero/pkg/plugin/velero/volumesnapshotter/v1"
|
||||
"github.com/vmware-tanzu/velero/pkg/podvolume"
|
||||
"github.com/vmware-tanzu/velero/pkg/test"
|
||||
testutil "github.com/vmware-tanzu/velero/pkg/test"
|
||||
kubeutil "github.com/vmware-tanzu/velero/pkg/util/kube"
|
||||
"github.com/vmware-tanzu/velero/pkg/volume"
|
||||
)
|
||||
|
@ -107,7 +106,6 @@ func TestBackedUpItemsMatchesTarballContents(t *testing.T) {
|
|||
if item.namespace != "" {
|
||||
fileWithVersion = fileWithVersion + "/v1-preferredversion/" + "namespaces/" + item.namespace
|
||||
} else {
|
||||
file = file + "/cluster"
|
||||
fileWithVersion = fileWithVersion + "/v1-preferredversion" + "/cluster"
|
||||
}
|
||||
fileWithVersion = fileWithVersion + "/" + item.name + ".json"
|
||||
|
@ -2845,7 +2843,7 @@ func TestBackupWithHooks(t *testing.T) {
|
|||
h = newHarness(t)
|
||||
req = &Request{Backup: tc.backup}
|
||||
backupFile = bytes.NewBuffer([]byte{})
|
||||
podCommandExecutor = new(testutil.MockPodCommandExecutor)
|
||||
podCommandExecutor = new(test.MockPodCommandExecutor)
|
||||
)
|
||||
|
||||
h.backupper.podCommandExecutor = podCommandExecutor
|
||||
|
|
|
@ -48,17 +48,17 @@ func (c *CustomResourceDefinitionV1Beta1Builder) Condition(cond apiextv1beta1.Cu
|
|||
}
|
||||
|
||||
// Result returns the built CustomResourceDefinition.
|
||||
func (b *CustomResourceDefinitionV1Beta1Builder) Result() *apiextv1beta1.CustomResourceDefinition {
|
||||
return b.object
|
||||
func (c *CustomResourceDefinitionV1Beta1Builder) Result() *apiextv1beta1.CustomResourceDefinition {
|
||||
return c.object
|
||||
}
|
||||
|
||||
// ObjectMeta applies functional options to the CustomResourceDefinition's ObjectMeta.
|
||||
func (b *CustomResourceDefinitionV1Beta1Builder) ObjectMeta(opts ...ObjectMetaOpt) *CustomResourceDefinitionV1Beta1Builder {
|
||||
func (c *CustomResourceDefinitionV1Beta1Builder) ObjectMeta(opts ...ObjectMetaOpt) *CustomResourceDefinitionV1Beta1Builder {
|
||||
for _, opt := range opts {
|
||||
opt(b.object)
|
||||
opt(c.object)
|
||||
}
|
||||
|
||||
return b
|
||||
return c
|
||||
}
|
||||
|
||||
// CustomResourceDefinitionV1Beta1ConditionBuilder builds CustomResourceDefinitionV1Beta1Condition objects.
|
||||
|
|
|
@ -98,8 +98,8 @@ func (c *V1CustomResourceDefinitionConditionBuilder) Status(cs apiextv1.Conditio
|
|||
}
|
||||
|
||||
// Result returns the built CustomResourceDefinitionCondition.
|
||||
func (b *V1CustomResourceDefinitionConditionBuilder) Result() apiextv1.CustomResourceDefinitionCondition {
|
||||
return b.object
|
||||
func (c *V1CustomResourceDefinitionConditionBuilder) Result() apiextv1.CustomResourceDefinitionCondition {
|
||||
return c.object
|
||||
}
|
||||
|
||||
// V1CustomResourceDefinitionVersionBuilder builds CustomResourceDefinitionVersion objects.
|
||||
|
@ -115,26 +115,26 @@ func ForV1CustomResourceDefinitionVersion(name string) *V1CustomResourceDefiniti
|
|||
}
|
||||
|
||||
// Served sets the Served field on a CustomResourceDefinitionVersion.
|
||||
func (b *V1CustomResourceDefinitionVersionBuilder) Served(s bool) *V1CustomResourceDefinitionVersionBuilder {
|
||||
b.object.Served = s
|
||||
return b
|
||||
func (c *V1CustomResourceDefinitionVersionBuilder) Served(s bool) *V1CustomResourceDefinitionVersionBuilder {
|
||||
c.object.Served = s
|
||||
return c
|
||||
}
|
||||
|
||||
// Storage sets the Storage field on a CustomResourceDefinitionVersion.
|
||||
func (b *V1CustomResourceDefinitionVersionBuilder) Storage(s bool) *V1CustomResourceDefinitionVersionBuilder {
|
||||
b.object.Storage = s
|
||||
return b
|
||||
func (c *V1CustomResourceDefinitionVersionBuilder) Storage(s bool) *V1CustomResourceDefinitionVersionBuilder {
|
||||
c.object.Storage = s
|
||||
return c
|
||||
}
|
||||
|
||||
func (b *V1CustomResourceDefinitionVersionBuilder) Schema(s *apiextv1.JSONSchemaProps) *V1CustomResourceDefinitionVersionBuilder {
|
||||
if b.object.Schema == nil {
|
||||
b.object.Schema = new(apiextv1.CustomResourceValidation)
|
||||
func (c *V1CustomResourceDefinitionVersionBuilder) Schema(s *apiextv1.JSONSchemaProps) *V1CustomResourceDefinitionVersionBuilder {
|
||||
if c.object.Schema == nil {
|
||||
c.object.Schema = new(apiextv1.CustomResourceValidation)
|
||||
}
|
||||
b.object.Schema.OpenAPIV3Schema = s
|
||||
return b
|
||||
c.object.Schema.OpenAPIV3Schema = s
|
||||
return c
|
||||
}
|
||||
|
||||
// Result returns the built CustomResourceDefinitionVersion.
|
||||
func (b *V1CustomResourceDefinitionVersionBuilder) Result() apiextv1.CustomResourceDefinitionVersion {
|
||||
return b.object
|
||||
func (c *V1CustomResourceDefinitionVersionBuilder) Result() apiextv1.CustomResourceDefinitionVersion {
|
||||
return c.object
|
||||
}
|
||||
|
|
|
@ -61,7 +61,7 @@ func NewDescribeCommand(f client.Factory, use string) *cobra.Command {
|
|||
cmd.CheckError(err)
|
||||
|
||||
if outputFormat != "plaintext" && outputFormat != "json" {
|
||||
cmd.CheckError(fmt.Errorf("Invalid output format '%s'. Valid value are 'plaintext, json'", outputFormat))
|
||||
cmd.CheckError(fmt.Errorf("invalid output format '%s'. valid value are 'plaintext, json'", outputFormat))
|
||||
}
|
||||
|
||||
var backups *velerov1api.BackupList
|
||||
|
|
|
@ -58,7 +58,7 @@ func (o *DeleteOptions) Complete(f client.Factory, args []string) error {
|
|||
// Validate validates the fields of the DeleteOptions struct.
|
||||
func (o *DeleteOptions) Validate(c *cobra.Command, f client.Factory, args []string) error {
|
||||
if o.Client == nil {
|
||||
return errors.New("Velero client is not set; unable to proceed")
|
||||
return errors.New("velero client is not set; unable to proceed")
|
||||
}
|
||||
|
||||
return o.SelectOptions.Validate()
|
||||
|
|
|
@ -23,7 +23,6 @@ import (
|
|||
|
||||
"github.com/stretchr/testify/assert"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
|
||||
|
@ -51,7 +50,7 @@ func Test_validatePodVolumesHostPath(t *testing.T) {
|
|||
name: "no error when pod volumes are present and there are mirror pods",
|
||||
pods: []*corev1.Pod{
|
||||
builder.ForPod("foo", "bar").ObjectMeta(builder.WithUID("foo")).Result(),
|
||||
builder.ForPod("zoo", "raz").ObjectMeta(builder.WithUID("zoo"), builder.WithAnnotations(v1.MirrorPodAnnotationKey, "baz")).Result(),
|
||||
builder.ForPod("zoo", "raz").ObjectMeta(builder.WithUID("zoo"), builder.WithAnnotations(corev1.MirrorPodAnnotationKey, "baz")).Result(),
|
||||
},
|
||||
dirs: []string{"foo", "baz"},
|
||||
wantErr: false,
|
||||
|
|
|
@ -22,7 +22,6 @@ import (
|
|||
|
||||
"github.com/pkg/errors"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
kbclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
|
||||
|
@ -48,7 +47,7 @@ func (g *DefaultServerStatusGetter) GetServerStatus(kbClient kbclient.Client) (*
|
|||
ctx, cancel := context.WithCancel(g.Context)
|
||||
defer cancel()
|
||||
|
||||
key := client.ObjectKey{Name: created.Name, Namespace: g.Namespace}
|
||||
key := kbclient.ObjectKey{Name: created.Name, Namespace: g.Namespace}
|
||||
checkFunc := func() {
|
||||
updated := &velerov1api.ServerStatusRequest{}
|
||||
if err := kbClient.Get(ctx, key, updated); err != nil {
|
||||
|
|
|
@ -60,11 +60,11 @@ func Stream(ctx context.Context, kbClient kbclient.Client, namespace, name strin
|
|||
|
||||
key := kbclient.ObjectKey{Name: created.Name, Namespace: namespace}
|
||||
timeStreamFirstCheck := time.Now()
|
||||
downloadUrlTimeout := false
|
||||
downloadURLTimeout := false
|
||||
checkFunc := func() {
|
||||
// if timeout has been reached, cancel request
|
||||
if time.Now().After(timeStreamFirstCheck.Add(timeout)) {
|
||||
downloadUrlTimeout = true
|
||||
downloadURLTimeout = true
|
||||
cancel()
|
||||
}
|
||||
updated := &velerov1api.DownloadRequest{}
|
||||
|
@ -85,7 +85,7 @@ func Stream(ctx context.Context, kbClient kbclient.Client, namespace, name strin
|
|||
}
|
||||
|
||||
wait.Until(checkFunc, 25*time.Millisecond, ctx.Done())
|
||||
if downloadUrlTimeout {
|
||||
if downloadURLTimeout {
|
||||
return ErrDownloadRequestDownloadURLTimeout
|
||||
}
|
||||
|
||||
|
|
|
@ -149,7 +149,7 @@ func NewStructuredDescriber(format string) *StructuredDescriber {
|
|||
func DescribeInSF(fn func(d *StructuredDescriber), format string) string {
|
||||
d := NewStructuredDescriber(format)
|
||||
fn(d)
|
||||
return d.JsonEncode()
|
||||
return d.JSONEncode()
|
||||
}
|
||||
|
||||
// Describe adds all types of argument to d.output.
|
||||
|
@ -167,8 +167,8 @@ func (d *StructuredDescriber) DescribeMetadata(metadata metav1.ObjectMeta) {
|
|||
d.Describe("metadata", metadataInfo)
|
||||
}
|
||||
|
||||
// JsonEncode encodes d.output to json
|
||||
func (d *StructuredDescriber) JsonEncode() string {
|
||||
// JSONEncode encodes d.output to json
|
||||
func (d *StructuredDescriber) JSONEncode() string {
|
||||
byteBuffer := &bytes.Buffer{}
|
||||
encoder := json.NewEncoder(byteBuffer)
|
||||
encoder.SetEscapeHTML(false)
|
||||
|
|
|
@ -31,7 +31,6 @@ import (
|
|||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/sync/errgroup"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
|
@ -468,7 +467,7 @@ func (b *backupReconciler) prepareBackupRequest(backup *velerov1api.Backup, logg
|
|||
}
|
||||
|
||||
if request.Spec.ResourcePolicy != nil && request.Spec.ResourcePolicy.Kind == resourcepolicies.ConfigmapRefType {
|
||||
policiesConfigmap := &v1.ConfigMap{}
|
||||
policiesConfigmap := &corev1api.ConfigMap{}
|
||||
err := b.kbClient.Get(context.Background(), kbclient.ObjectKey{Namespace: request.Namespace, Name: request.Spec.ResourcePolicy.Name}, policiesConfigmap)
|
||||
if err != nil {
|
||||
request.Status.ValidationErrors = append(request.Status.ValidationErrors, fmt.Sprintf("failed to get resource policies %s/%s configmap with err %v", request.Namespace, request.Spec.ResourcePolicy.Name, err))
|
||||
|
@ -1125,7 +1124,7 @@ func (b *backupReconciler) recreateVolumeSnapshotContent(vsc snapshotv1api.Volum
|
|||
// validation webhook will check whether name and namespace are nil.
|
||||
// external-snapshotter needs Source pointing to snapshot and VolumeSnapshot
|
||||
// reference's UID to nil to determine the VolumeSnapshotContent is deletable.
|
||||
vsc.Spec.VolumeSnapshotRef = v1.ObjectReference{
|
||||
vsc.Spec.VolumeSnapshotRef = corev1api.ObjectReference{
|
||||
APIVersion: snapshotv1api.SchemeGroupVersion.String(),
|
||||
Kind: "VolumeSnapshot",
|
||||
Namespace: "ns-" + string(vsc.UID),
|
||||
|
|
|
@ -298,8 +298,8 @@ func Test_prepareBackupRequest_BackupStorageLocation(t *testing.T) {
|
|||
name string
|
||||
backup *velerov1api.Backup
|
||||
backupLocationNameInBackup string
|
||||
backupLocationInApiServer *velerov1api.BackupStorageLocation
|
||||
defaultBackupLocationInApiServer *velerov1api.BackupStorageLocation
|
||||
backupLocationInAPIServer *velerov1api.BackupStorageLocation
|
||||
defaultBackupLocationInAPIServer *velerov1api.BackupStorageLocation
|
||||
expectedBackupLocation string
|
||||
expectedSuccess bool
|
||||
expectedValidationError string
|
||||
|
@ -308,8 +308,8 @@ func Test_prepareBackupRequest_BackupStorageLocation(t *testing.T) {
|
|||
name: "BackupLocation is specified in backup CR'spec and it can be found in ApiServer",
|
||||
backup: builder.ForBackup("velero", "backup-1").Result(),
|
||||
backupLocationNameInBackup: "test-backup-location",
|
||||
backupLocationInApiServer: builder.ForBackupStorageLocation("velero", "test-backup-location").Result(),
|
||||
defaultBackupLocationInApiServer: builder.ForBackupStorageLocation("velero", "default-location").Result(),
|
||||
backupLocationInAPIServer: builder.ForBackupStorageLocation("velero", "test-backup-location").Result(),
|
||||
defaultBackupLocationInAPIServer: builder.ForBackupStorageLocation("velero", "default-location").Result(),
|
||||
expectedBackupLocation: "test-backup-location",
|
||||
expectedSuccess: true,
|
||||
},
|
||||
|
@ -317,8 +317,8 @@ func Test_prepareBackupRequest_BackupStorageLocation(t *testing.T) {
|
|||
name: "BackupLocation is specified in backup CR'spec and it can't be found in ApiServer",
|
||||
backup: builder.ForBackup("velero", "backup-1").Result(),
|
||||
backupLocationNameInBackup: "test-backup-location",
|
||||
backupLocationInApiServer: nil,
|
||||
defaultBackupLocationInApiServer: nil,
|
||||
backupLocationInAPIServer: nil,
|
||||
defaultBackupLocationInAPIServer: nil,
|
||||
expectedSuccess: false,
|
||||
expectedValidationError: "an existing backup storage location wasn't specified at backup creation time and the default 'test-backup-location' wasn't found. Please address this issue (see `velero backup-location -h` for options) and create a new backup. Error: backupstoragelocations.velero.io \"test-backup-location\" not found",
|
||||
},
|
||||
|
@ -326,8 +326,8 @@ func Test_prepareBackupRequest_BackupStorageLocation(t *testing.T) {
|
|||
name: "Using default BackupLocation and it can be found in ApiServer",
|
||||
backup: builder.ForBackup("velero", "backup-1").Result(),
|
||||
backupLocationNameInBackup: "",
|
||||
backupLocationInApiServer: builder.ForBackupStorageLocation("velero", "test-backup-location").Result(),
|
||||
defaultBackupLocationInApiServer: builder.ForBackupStorageLocation("velero", "default-location").Result(),
|
||||
backupLocationInAPIServer: builder.ForBackupStorageLocation("velero", "test-backup-location").Result(),
|
||||
defaultBackupLocationInAPIServer: builder.ForBackupStorageLocation("velero", "default-location").Result(),
|
||||
expectedBackupLocation: defaultBackupLocation,
|
||||
expectedSuccess: true,
|
||||
},
|
||||
|
@ -335,8 +335,8 @@ func Test_prepareBackupRequest_BackupStorageLocation(t *testing.T) {
|
|||
name: "Using default BackupLocation and it can't be found in ApiServer",
|
||||
backup: builder.ForBackup("velero", "backup-1").Result(),
|
||||
backupLocationNameInBackup: "",
|
||||
backupLocationInApiServer: nil,
|
||||
defaultBackupLocationInApiServer: nil,
|
||||
backupLocationInAPIServer: nil,
|
||||
defaultBackupLocationInAPIServer: nil,
|
||||
expectedSuccess: false,
|
||||
expectedValidationError: fmt.Sprintf("an existing backup storage location wasn't specified at backup creation time and the server default '%s' doesn't exist. Please address this issue (see `velero backup-location -h` for options) and create a new backup. Error: backupstoragelocations.velero.io \"%s\" not found", defaultBackupLocation, defaultBackupLocation),
|
||||
},
|
||||
|
@ -353,11 +353,11 @@ func Test_prepareBackupRequest_BackupStorageLocation(t *testing.T) {
|
|||
|
||||
// objects that should init with client
|
||||
objects := make([]runtime.Object, 0)
|
||||
if test.backupLocationInApiServer != nil {
|
||||
objects = append(objects, test.backupLocationInApiServer)
|
||||
if test.backupLocationInAPIServer != nil {
|
||||
objects = append(objects, test.backupLocationInAPIServer)
|
||||
}
|
||||
if test.defaultBackupLocationInApiServer != nil {
|
||||
objects = append(objects, test.defaultBackupLocationInApiServer)
|
||||
if test.defaultBackupLocationInAPIServer != nil {
|
||||
objects = append(objects, test.defaultBackupLocationInAPIServer)
|
||||
}
|
||||
fakeClient := velerotest.NewFakeControllerRuntimeClient(t, objects...)
|
||||
|
||||
|
|
|
@ -24,7 +24,6 @@ import (
|
|||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
kuberrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
|
@ -174,10 +173,10 @@ func (b *backupSyncReconciler) Reconcile(ctx context.Context, req ctrl.Request)
|
|||
// attempt to create backup custom resource via API
|
||||
err = b.client.Create(ctx, backup, &client.CreateOptions{})
|
||||
switch {
|
||||
case err != nil && kuberrs.IsAlreadyExists(err):
|
||||
case err != nil && apierrors.IsAlreadyExists(err):
|
||||
log.Debug("Backup already exists in cluster")
|
||||
continue
|
||||
case err != nil && !kuberrs.IsAlreadyExists(err):
|
||||
case err != nil && !apierrors.IsAlreadyExists(err):
|
||||
log.WithError(errors.WithStack(err)).Error("Error syncing backup into cluster")
|
||||
continue
|
||||
default:
|
||||
|
@ -211,10 +210,10 @@ func (b *backupSyncReconciler) Reconcile(ctx context.Context, req ctrl.Request)
|
|||
|
||||
err = b.client.Create(ctx, podVolumeBackup, &client.CreateOptions{})
|
||||
switch {
|
||||
case err != nil && kuberrs.IsAlreadyExists(err):
|
||||
case err != nil && apierrors.IsAlreadyExists(err):
|
||||
log.Debug("Pod volume backup already exists in cluster")
|
||||
continue
|
||||
case err != nil && !kuberrs.IsAlreadyExists(err):
|
||||
case err != nil && !apierrors.IsAlreadyExists(err):
|
||||
log.WithError(errors.WithStack(err)).Error("Error syncing pod volume backup into cluster")
|
||||
continue
|
||||
default:
|
||||
|
@ -235,10 +234,10 @@ func (b *backupSyncReconciler) Reconcile(ctx context.Context, req ctrl.Request)
|
|||
vsClass.ResourceVersion = ""
|
||||
err := b.client.Create(ctx, vsClass, &client.CreateOptions{})
|
||||
switch {
|
||||
case err != nil && kuberrs.IsAlreadyExists(err):
|
||||
case err != nil && apierrors.IsAlreadyExists(err):
|
||||
log.Debugf("VolumeSnapshotClass %s already exists in cluster", vsClass.Name)
|
||||
continue
|
||||
case err != nil && !kuberrs.IsAlreadyExists(err):
|
||||
case err != nil && !apierrors.IsAlreadyExists(err):
|
||||
log.WithError(errors.WithStack(err)).Errorf("Error syncing VolumeSnapshotClass %s into cluster", vsClass.Name)
|
||||
continue
|
||||
default:
|
||||
|
@ -259,10 +258,10 @@ func (b *backupSyncReconciler) Reconcile(ctx context.Context, req ctrl.Request)
|
|||
snapCont.ResourceVersion = ""
|
||||
err := b.client.Create(ctx, snapCont, &client.CreateOptions{})
|
||||
switch {
|
||||
case err != nil && kuberrs.IsAlreadyExists(err):
|
||||
case err != nil && apierrors.IsAlreadyExists(err):
|
||||
log.Debugf("volumesnapshotcontent %s already exists in cluster", snapCont.Name)
|
||||
continue
|
||||
case err != nil && !kuberrs.IsAlreadyExists(err):
|
||||
case err != nil && !apierrors.IsAlreadyExists(err):
|
||||
log.WithError(errors.WithStack(err)).Errorf("Error syncing volumesnapshotcontent %s into cluster", snapCont.Name)
|
||||
continue
|
||||
default:
|
||||
|
|
|
@ -43,7 +43,7 @@ import (
|
|||
"github.com/vmware-tanzu/velero/pkg/util/kube"
|
||||
)
|
||||
|
||||
// For unit test to mock function
|
||||
// NewUploaderProviderFunc is used for unit test to mock function
|
||||
var NewUploaderProviderFunc = provider.NewUploaderProvider
|
||||
|
||||
// PodVolumeBackupReconciler reconciles a PodVolumeBackup object
|
||||
|
|
|
@ -314,19 +314,19 @@ func (c *PodVolumeRestoreReconciler) processRestore(ctx context.Context, req *ve
|
|||
return nil
|
||||
}
|
||||
|
||||
func (r *PodVolumeRestoreReconciler) NewRestoreProgressUpdater(pvr *velerov1api.PodVolumeRestore, log logrus.FieldLogger, ctx context.Context) *RestoreProgressUpdater {
|
||||
return &RestoreProgressUpdater{pvr, log, ctx, r.Client}
|
||||
func (c *PodVolumeRestoreReconciler) NewRestoreProgressUpdater(pvr *velerov1api.PodVolumeRestore, log logrus.FieldLogger, ctx context.Context) *RestoreProgressUpdater {
|
||||
return &RestoreProgressUpdater{pvr, log, ctx, c.Client}
|
||||
}
|
||||
|
||||
// UpdateProgress which implement ProgressUpdater interface to update pvr progress status
|
||||
func (r *RestoreProgressUpdater) UpdateProgress(p *uploader.UploaderProgress) {
|
||||
original := r.PodVolumeRestore.DeepCopy()
|
||||
r.PodVolumeRestore.Status.Progress = velerov1api.PodVolumeOperationProgress{TotalBytes: p.TotalBytes, BytesDone: p.BytesDone}
|
||||
if r.Cli == nil {
|
||||
r.Log.Errorf("failed to update restore pod %s volume %s progress with uninitailize client", r.PodVolumeRestore.Spec.Pod.Name, r.PodVolumeRestore.Spec.Volume)
|
||||
func (c *RestoreProgressUpdater) UpdateProgress(p *uploader.UploaderProgress) {
|
||||
original := c.PodVolumeRestore.DeepCopy()
|
||||
c.PodVolumeRestore.Status.Progress = velerov1api.PodVolumeOperationProgress{TotalBytes: p.TotalBytes, BytesDone: p.BytesDone}
|
||||
if c.Cli == nil {
|
||||
c.Log.Errorf("failed to update restore pod %s volume %s progress with uninitailize client", c.PodVolumeRestore.Spec.Pod.Name, c.PodVolumeRestore.Spec.Volume)
|
||||
return
|
||||
}
|
||||
if err := r.Cli.Patch(r.Ctx, r.PodVolumeRestore, client.MergeFrom(original)); err != nil {
|
||||
r.Log.Errorf("update restore pod %s volume %s progress with %v", r.PodVolumeRestore.Spec.Pod.Name, r.PodVolumeRestore.Spec.Volume, err)
|
||||
if err := c.Cli.Patch(c.Ctx, c.PodVolumeRestore, client.MergeFrom(original)); err != nil {
|
||||
c.Log.Errorf("update restore pod %s volume %s progress with %v", c.PodVolumeRestore.Spec.Pod.Name, c.PodVolumeRestore.Spec.Volume, err)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -36,7 +36,6 @@ import (
|
|||
"github.com/vmware-tanzu/velero/pkg/builder"
|
||||
"github.com/vmware-tanzu/velero/pkg/metrics"
|
||||
"github.com/vmware-tanzu/velero/pkg/util/kube"
|
||||
kubeutil "github.com/vmware-tanzu/velero/pkg/util/kube"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -151,7 +150,7 @@ func parseCronSchedule(itm *velerov1.Schedule, logger logrus.FieldLogger) (cron.
|
|||
return nil, validationErrors
|
||||
}
|
||||
|
||||
log := logger.WithField("schedule", kubeutil.NamespaceAndName(itm))
|
||||
log := logger.WithField("schedule", kube.NamespaceAndName(itm))
|
||||
|
||||
// adding a recover() around cron.Parse because it panics on empty string and is possible
|
||||
// that it panics under other scenarios as well.
|
||||
|
@ -183,7 +182,7 @@ func parseCronSchedule(itm *velerov1.Schedule, logger logrus.FieldLogger) (cron.
|
|||
|
||||
// checkIfBackupInNewOrProgress check whether there are backups created by this schedule still in New or InProgress state
|
||||
func (c *scheduleReconciler) checkIfBackupInNewOrProgress(schedule *velerov1.Schedule) bool {
|
||||
log := c.logger.WithField("schedule", kubeutil.NamespaceAndName(schedule))
|
||||
log := c.logger.WithField("schedule", kube.NamespaceAndName(schedule))
|
||||
backupList := &velerov1.BackupList{}
|
||||
options := &client.ListOptions{
|
||||
Namespace: schedule.Namespace,
|
||||
|
@ -211,7 +210,7 @@ func (c *scheduleReconciler) checkIfBackupInNewOrProgress(schedule *velerov1.Sch
|
|||
// ifDue check whether schedule is due to create a new backup.
|
||||
func (c *scheduleReconciler) ifDue(schedule *velerov1.Schedule, cronSchedule cron.Schedule) bool {
|
||||
isDue, nextRunTime := getNextRunTime(schedule, cronSchedule, c.clock.Now())
|
||||
log := c.logger.WithField("schedule", kubeutil.NamespaceAndName(schedule))
|
||||
log := c.logger.WithField("schedule", kube.NamespaceAndName(schedule))
|
||||
|
||||
if !isDue {
|
||||
log.WithField("nextRunTime", nextRunTime).Debug("Schedule is not due, skipping")
|
||||
|
|
|
@ -122,19 +122,19 @@ type OperationsForBackup struct {
|
|||
ErrsSinceUpdate []string
|
||||
}
|
||||
|
||||
func (in *OperationsForBackup) DeepCopy() *OperationsForBackup {
|
||||
if in == nil {
|
||||
func (m *OperationsForBackup) DeepCopy() *OperationsForBackup {
|
||||
if m == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(OperationsForBackup)
|
||||
in.DeepCopyInto(out)
|
||||
m.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
func (in *OperationsForBackup) DeepCopyInto(out *OperationsForBackup) {
|
||||
*out = *in
|
||||
if in.Operations != nil {
|
||||
in, out := &in.Operations, &out.Operations
|
||||
func (m *OperationsForBackup) DeepCopyInto(out *OperationsForBackup) {
|
||||
*out = *m
|
||||
if m.Operations != nil {
|
||||
in, out := &m.Operations, &out.Operations
|
||||
*out = make([]*itemoperation.BackupOperation, len(*in))
|
||||
for i := range *in {
|
||||
if (*in)[i] != nil {
|
||||
|
@ -144,17 +144,17 @@ func (in *OperationsForBackup) DeepCopyInto(out *OperationsForBackup) {
|
|||
}
|
||||
}
|
||||
}
|
||||
if in.ErrsSinceUpdate != nil {
|
||||
in, out := &in.ErrsSinceUpdate, &out.ErrsSinceUpdate
|
||||
if m.ErrsSinceUpdate != nil {
|
||||
in, out := &m.ErrsSinceUpdate, &out.ErrsSinceUpdate
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
}
|
||||
|
||||
func (o *OperationsForBackup) uploadProgress(backupStore persistence.BackupStore, backupName string) error {
|
||||
if len(o.Operations) > 0 {
|
||||
func (m *OperationsForBackup) uploadProgress(backupStore persistence.BackupStore, backupName string) error {
|
||||
if len(m.Operations) > 0 {
|
||||
var backupItemOperations *bytes.Buffer
|
||||
backupItemOperations, errs := encode.EncodeToJSONGzip(o.Operations, "backup item operations list")
|
||||
backupItemOperations, errs := encode.EncodeToJSONGzip(m.Operations, "backup item operations list")
|
||||
if errs != nil {
|
||||
return errors.Wrap(errs[0], "error encoding item operations json")
|
||||
}
|
||||
|
@ -163,7 +163,7 @@ func (o *OperationsForBackup) uploadProgress(backupStore persistence.BackupStore
|
|||
return errors.Wrap(err, "error uploading item operations json")
|
||||
}
|
||||
}
|
||||
o.ChangesSinceUpdate = false
|
||||
o.ErrsSinceUpdate = nil
|
||||
m.ChangesSinceUpdate = false
|
||||
m.ErrsSinceUpdate = nil
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -122,19 +122,19 @@ type OperationsForRestore struct {
|
|||
ErrsSinceUpdate []string
|
||||
}
|
||||
|
||||
func (in *OperationsForRestore) DeepCopy() *OperationsForRestore {
|
||||
if in == nil {
|
||||
func (m *OperationsForRestore) DeepCopy() *OperationsForRestore {
|
||||
if m == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(OperationsForRestore)
|
||||
in.DeepCopyInto(out)
|
||||
m.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
func (in *OperationsForRestore) DeepCopyInto(out *OperationsForRestore) {
|
||||
*out = *in
|
||||
if in.Operations != nil {
|
||||
in, out := &in.Operations, &out.Operations
|
||||
func (m *OperationsForRestore) DeepCopyInto(out *OperationsForRestore) {
|
||||
*out = *m
|
||||
if m.Operations != nil {
|
||||
in, out := &m.Operations, &out.Operations
|
||||
*out = make([]*itemoperation.RestoreOperation, len(*in))
|
||||
for i := range *in {
|
||||
if (*in)[i] != nil {
|
||||
|
@ -144,17 +144,17 @@ func (in *OperationsForRestore) DeepCopyInto(out *OperationsForRestore) {
|
|||
}
|
||||
}
|
||||
}
|
||||
if in.ErrsSinceUpdate != nil {
|
||||
in, out := &in.ErrsSinceUpdate, &out.ErrsSinceUpdate
|
||||
if m.ErrsSinceUpdate != nil {
|
||||
in, out := &m.ErrsSinceUpdate, &out.ErrsSinceUpdate
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
}
|
||||
|
||||
func (o *OperationsForRestore) uploadProgress(backupStore persistence.BackupStore, restoreName string) error {
|
||||
if len(o.Operations) > 0 {
|
||||
func (m *OperationsForRestore) uploadProgress(backupStore persistence.BackupStore, restoreName string) error {
|
||||
if len(m.Operations) > 0 {
|
||||
var restoreItemOperations *bytes.Buffer
|
||||
restoreItemOperations, errs := encode.EncodeToJSONGzip(o.Operations, "restore item operations list")
|
||||
restoreItemOperations, errs := encode.EncodeToJSONGzip(m.Operations, "restore item operations list")
|
||||
if errs != nil {
|
||||
return errors.Wrap(errs[0], "error encoding item operations json")
|
||||
}
|
||||
|
@ -163,7 +163,7 @@ func (o *OperationsForRestore) uploadProgress(backupStore persistence.BackupStor
|
|||
return errors.Wrap(err, "error uploading item operations json")
|
||||
}
|
||||
}
|
||||
o.ChangesSinceUpdate = false
|
||||
o.ErrsSinceUpdate = nil
|
||||
m.ChangesSinceUpdate = false
|
||||
m.ErrsSinceUpdate = nil
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -26,7 +26,6 @@ import (
|
|||
api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
|
||||
"github.com/vmware-tanzu/velero/pkg/plugin/framework/common"
|
||||
proto "github.com/vmware-tanzu/velero/pkg/plugin/generated"
|
||||
protobiav1 "github.com/vmware-tanzu/velero/pkg/plugin/generated"
|
||||
"github.com/vmware-tanzu/velero/pkg/plugin/velero"
|
||||
biav1 "github.com/vmware-tanzu/velero/pkg/plugin/velero/backupitemaction/v1"
|
||||
)
|
||||
|
@ -52,8 +51,8 @@ func (s *BackupItemActionGRPCServer) getImpl(name string) (biav1.BackupItemActio
|
|||
}
|
||||
|
||||
func (s *BackupItemActionGRPCServer) AppliesTo(
|
||||
ctx context.Context, req *protobiav1.BackupItemActionAppliesToRequest) (
|
||||
response *protobiav1.BackupItemActionAppliesToResponse, err error) {
|
||||
ctx context.Context, req *proto.BackupItemActionAppliesToRequest) (
|
||||
response *proto.BackupItemActionAppliesToResponse, err error) {
|
||||
defer func() {
|
||||
if recoveredErr := common.HandlePanic(recover()); recoveredErr != nil {
|
||||
err = recoveredErr
|
||||
|
@ -70,7 +69,7 @@ func (s *BackupItemActionGRPCServer) AppliesTo(
|
|||
return nil, common.NewGRPCError(err)
|
||||
}
|
||||
|
||||
return &protobiav1.BackupItemActionAppliesToResponse{
|
||||
return &proto.BackupItemActionAppliesToResponse{
|
||||
ResourceSelector: &proto.ResourceSelector{
|
||||
IncludedNamespaces: resourceSelector.IncludedNamespaces,
|
||||
ExcludedNamespaces: resourceSelector.ExcludedNamespaces,
|
||||
|
@ -82,7 +81,7 @@ func (s *BackupItemActionGRPCServer) AppliesTo(
|
|||
}
|
||||
|
||||
func (s *BackupItemActionGRPCServer) Execute(
|
||||
ctx context.Context, req *protobiav1.ExecuteRequest) (response *protobiav1.ExecuteResponse, err error) {
|
||||
ctx context.Context, req *proto.ExecuteRequest) (response *proto.ExecuteResponse, err error) {
|
||||
defer func() {
|
||||
if recoveredErr := common.HandlePanic(recover()); recoveredErr != nil {
|
||||
err = recoveredErr
|
||||
|
@ -121,7 +120,7 @@ func (s *BackupItemActionGRPCServer) Execute(
|
|||
}
|
||||
}
|
||||
|
||||
res := &protobiav1.ExecuteResponse{
|
||||
res := &proto.ExecuteResponse{
|
||||
Item: updatedItemJSON,
|
||||
}
|
||||
|
||||
|
|
|
@ -31,7 +31,6 @@ import (
|
|||
v1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
|
||||
"github.com/vmware-tanzu/velero/pkg/plugin/framework/common"
|
||||
proto "github.com/vmware-tanzu/velero/pkg/plugin/generated"
|
||||
protobiav1 "github.com/vmware-tanzu/velero/pkg/plugin/generated"
|
||||
"github.com/vmware-tanzu/velero/pkg/plugin/velero"
|
||||
mocks "github.com/vmware-tanzu/velero/pkg/plugin/velero/mocks/backupitemaction/v1"
|
||||
velerotest "github.com/vmware-tanzu/velero/pkg/test"
|
||||
|
@ -162,7 +161,7 @@ func TestBackupItemActionGRPCServerExecute(t *testing.T) {
|
|||
},
|
||||
}}
|
||||
|
||||
req := &protobiav1.ExecuteRequest{
|
||||
req := &proto.ExecuteRequest{
|
||||
Plugin: "xyz",
|
||||
Item: test.item,
|
||||
Backup: test.backup,
|
||||
|
|
|
@ -216,6 +216,6 @@ func backupResourceIdentifierToProto(id velero.ResourceIdentifier) *proto.Resour
|
|||
|
||||
// This shouldn't be called on the GRPC server since the server won't ever receive this request, as
|
||||
// the RestartableBackupItemAction in Velero won't delegate this to the server
|
||||
func (c *BackupItemActionGRPCServer) Name() string {
|
||||
func (s *BackupItemActionGRPCServer) Name() string {
|
||||
return ""
|
||||
}
|
||||
|
|
|
@ -51,7 +51,7 @@ const (
|
|||
PluginKindPluginLister PluginKind = "PluginLister"
|
||||
)
|
||||
|
||||
// If there are plugin kinds that are adaptable to newer API versions, list them here.
|
||||
// PluginKindsAdaptableTo if there are plugin kinds that are adaptable to newer API versions, list them here.
|
||||
// The older (adaptable) version is the key, and the value is the full list of newer
|
||||
// plugin kinds that are capable of adapting it.
|
||||
var PluginKindsAdaptableTo = map[PluginKind][]PluginKind{
|
||||
|
|
|
@ -266,6 +266,6 @@ func restoreResourceIdentifierToProto(id velero.ResourceIdentifier) *proto.Resou
|
|||
|
||||
// This shouldn't be called on the GRPC server since the server won't ever receive this request, as
|
||||
// the RestartableRestoreItemAction in Velero won't delegate this to the server
|
||||
func (c *RestoreItemActionGRPCServer) Name() string {
|
||||
func (s *RestoreItemActionGRPCServer) Name() string {
|
||||
return ""
|
||||
}
|
||||
|
|
|
@ -211,11 +211,11 @@ func TestExecutePodCommand(t *testing.T) {
|
|||
defer streamExecutorFactory.AssertExpectations(t)
|
||||
podCommandExecutor.streamExecutorFactory = streamExecutorFactory
|
||||
|
||||
baseUrl, _ := url.Parse("https://some.server")
|
||||
baseURL, _ := url.Parse("https://some.server")
|
||||
contentConfig := rest.ClientContentConfig{
|
||||
GroupVersion: schema.GroupVersion{Group: "", Version: "v1"},
|
||||
}
|
||||
poster.On("Post").Return(rest.NewRequestWithClient(baseUrl, "/api/v1", contentConfig, nil))
|
||||
poster.On("Post").Return(rest.NewRequestWithClient(baseURL, "/api/v1", contentConfig, nil))
|
||||
|
||||
streamExecutor := &mockStreamExecutor{}
|
||||
defer streamExecutor.AssertExpectations(t)
|
||||
|
|
|
@ -422,7 +422,7 @@ func getStorageCredentials(backupLocation *velerov1api.BackupStorageLocation, cr
|
|||
if err != nil {
|
||||
return map[string]string{}, errors.Wrap(err, "error get s3 credentials")
|
||||
}
|
||||
result[udmrepo.StoreOptionS3KeyId] = credValue.AccessKeyID
|
||||
result[udmrepo.StoreOptionS3KeyID] = credValue.AccessKeyID
|
||||
result[udmrepo.StoreOptionS3Provider] = credValue.ProviderName
|
||||
result[udmrepo.StoreOptionS3SecretKey] = credValue.SecretAccessKey
|
||||
result[udmrepo.StoreOptionS3Token] = credValue.SessionToken
|
||||
|
@ -467,35 +467,35 @@ func getStorageVariables(backupLocation *velerov1api.BackupStorageLocation, repo
|
|||
region := config["region"]
|
||||
|
||||
if backendType == repoconfig.AWSBackend {
|
||||
s3Url := config["s3Url"]
|
||||
disableTls := false
|
||||
s3URL := config["s3Url"]
|
||||
disableTLS := false
|
||||
|
||||
var err error
|
||||
if s3Url == "" {
|
||||
if s3URL == "" {
|
||||
region, err = getS3BucketRegion(bucket)
|
||||
if err != nil {
|
||||
return map[string]string{}, errors.Wrap(err, "error get s3 bucket region")
|
||||
}
|
||||
|
||||
s3Url = fmt.Sprintf("s3-%s.amazonaws.com", region)
|
||||
disableTls = false
|
||||
s3URL = fmt.Sprintf("s3-%s.amazonaws.com", region)
|
||||
disableTLS = false
|
||||
} else {
|
||||
url, err := url.Parse(s3Url)
|
||||
url, err := url.Parse(s3URL)
|
||||
if err != nil {
|
||||
return map[string]string{}, errors.Wrapf(err, "error to parse s3Url %s", s3Url)
|
||||
return map[string]string{}, errors.Wrapf(err, "error to parse s3Url %s", s3URL)
|
||||
}
|
||||
|
||||
if url.Path != "" && url.Path != "/" {
|
||||
return map[string]string{}, errors.Errorf("path is not expected in s3Url %s", s3Url)
|
||||
return map[string]string{}, errors.Errorf("path is not expected in s3Url %s", s3URL)
|
||||
}
|
||||
|
||||
s3Url = url.Host
|
||||
disableTls = (url.Scheme == "http")
|
||||
s3URL = url.Host
|
||||
disableTLS = (url.Scheme == "http")
|
||||
}
|
||||
|
||||
result[udmrepo.StoreOptionS3Endpoint] = strings.Trim(s3Url, "/")
|
||||
result[udmrepo.StoreOptionS3DisableTlsVerify] = config["insecureSkipTLSVerify"]
|
||||
result[udmrepo.StoreOptionS3DisableTls] = strconv.FormatBool(disableTls)
|
||||
result[udmrepo.StoreOptionS3Endpoint] = strings.Trim(s3URL, "/")
|
||||
result[udmrepo.StoreOptionS3DisableTLSVerify] = config["insecureSkipTLSVerify"]
|
||||
result[udmrepo.StoreOptionS3DisableTLS] = strconv.FormatBool(disableTLS)
|
||||
} else if backendType == repoconfig.AzureBackend {
|
||||
domain, err := getAzureStorageDomain(config)
|
||||
if err != nil {
|
||||
|
|
|
@ -36,7 +36,7 @@ func (c *S3Backend) Setup(ctx context.Context, flags map[string]string) error {
|
|||
return err
|
||||
}
|
||||
|
||||
c.options.AccessKeyID, err = mustHaveString(udmrepo.StoreOptionS3KeyId, flags)
|
||||
c.options.AccessKeyID, err = mustHaveString(udmrepo.StoreOptionS3KeyID, flags)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -49,8 +49,8 @@ func (c *S3Backend) Setup(ctx context.Context, flags map[string]string) error {
|
|||
c.options.Endpoint = optionalHaveString(udmrepo.StoreOptionS3Endpoint, flags)
|
||||
c.options.Region = optionalHaveString(udmrepo.StoreOptionOssRegion, flags)
|
||||
c.options.Prefix = optionalHaveString(udmrepo.StoreOptionPrefix, flags)
|
||||
c.options.DoNotUseTLS = optionalHaveBool(ctx, udmrepo.StoreOptionS3DisableTls, flags)
|
||||
c.options.DoNotVerifyTLS = optionalHaveBool(ctx, udmrepo.StoreOptionS3DisableTlsVerify, flags)
|
||||
c.options.DoNotUseTLS = optionalHaveBool(ctx, udmrepo.StoreOptionS3DisableTLS, flags)
|
||||
c.options.DoNotVerifyTLS = optionalHaveBool(ctx, udmrepo.StoreOptionS3DisableTLSVerify, flags)
|
||||
c.options.SessionToken = optionalHaveString(udmrepo.StoreOptionS3Token, flags)
|
||||
|
||||
c.options.Limits = setupLimits(ctx, flags)
|
||||
|
|
|
@ -41,13 +41,13 @@ func TestS3Setup(t *testing.T) {
|
|||
flags: map[string]string{
|
||||
udmrepo.StoreOptionOssBucket: "fake-bucket",
|
||||
},
|
||||
expectedErr: "key " + udmrepo.StoreOptionS3KeyId + " not found",
|
||||
expectedErr: "key " + udmrepo.StoreOptionS3KeyID + " not found",
|
||||
},
|
||||
{
|
||||
name: "must have access key",
|
||||
flags: map[string]string{
|
||||
udmrepo.StoreOptionOssBucket: "fake-bucket",
|
||||
udmrepo.StoreOptionS3KeyId: "fake-key-id",
|
||||
udmrepo.StoreOptionS3KeyID: "fake-key-id",
|
||||
},
|
||||
expectedErr: "key " + udmrepo.StoreOptionS3SecretKey + " not found",
|
||||
},
|
||||
|
|
|
@ -35,13 +35,13 @@ const (
|
|||
GenOptionOwnerName = "username"
|
||||
GenOptionOwnerDomain = "domainname"
|
||||
|
||||
StoreOptionS3KeyId = "accessKeyID"
|
||||
StoreOptionS3KeyID = "accessKeyID"
|
||||
StoreOptionS3Provider = "providerName"
|
||||
StoreOptionS3SecretKey = "secretAccessKey"
|
||||
StoreOptionS3Token = "sessionToken"
|
||||
StoreOptionS3Endpoint = "endpoint"
|
||||
StoreOptionS3DisableTls = "doNotUseTLS"
|
||||
StoreOptionS3DisableTlsVerify = "skipTLSVerify"
|
||||
StoreOptionS3DisableTLS = "doNotUseTLS"
|
||||
StoreOptionS3DisableTLSVerify = "skipTLSVerify"
|
||||
|
||||
StoreOptionAzureKey = "storageKey"
|
||||
StoreOptionAzureDomain = "storageDomain"
|
||||
|
|
|
@ -47,8 +47,8 @@ type backupStatusLine struct {
|
|||
|
||||
// GetSnapshotID runs provided 'restic snapshots' command to get the ID of a snapshot
|
||||
// and an error if a unique snapshot cannot be identified.
|
||||
func GetSnapshotID(snapshotIdCmd *Command) (string, error) {
|
||||
stdout, stderr, err := exec.RunCommand(snapshotIdCmd.Cmd())
|
||||
func GetSnapshotID(snapshotIDCmd *Command) (string, error) {
|
||||
stdout, stderr, err := exec.RunCommand(snapshotIDCmd.Cmd())
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "error running command, stderr=%s", stderr)
|
||||
}
|
||||
|
@ -63,7 +63,7 @@ func GetSnapshotID(snapshotIdCmd *Command) (string, error) {
|
|||
}
|
||||
|
||||
if len(snapshots) != 1 {
|
||||
return "", errors.Errorf("expected one matching snapshot by command: %s, got %d", snapshotIdCmd.String(), len(snapshots))
|
||||
return "", errors.Errorf("expected one matching snapshot by command: %s, got %d", snapshotIDCmd.String(), len(snapshots))
|
||||
}
|
||||
|
||||
return snapshots[0].ShortID, nil
|
||||
|
|
|
@ -33,7 +33,7 @@ import (
|
|||
)
|
||||
|
||||
const (
|
||||
DELIMITER_VALUE = ","
|
||||
delimiterValue = ","
|
||||
)
|
||||
|
||||
// ChangeImageNameAction updates a deployment or Pod's image name
|
||||
|
@ -194,13 +194,13 @@ func (a *ChangeImageNameAction) isImageReplaceRuleExist(log *logrus.Entry, oldIm
|
|||
//"case3":"abc:test,edf:test"
|
||||
//"case4":"1.1.1.1:5000/abc:test,2.2.2.2:3000/edf:test"
|
||||
for _, row := range cm.Data {
|
||||
if !strings.Contains(row, DELIMITER_VALUE) {
|
||||
if !strings.Contains(row, delimiterValue) {
|
||||
continue
|
||||
}
|
||||
if strings.Contains(oldImageName, strings.TrimSpace(row[0:strings.Index(row, DELIMITER_VALUE)])) && len(row[strings.Index(row, DELIMITER_VALUE):]) > len(DELIMITER_VALUE) {
|
||||
if strings.Contains(oldImageName, strings.TrimSpace(row[0:strings.Index(row, delimiterValue)])) && len(row[strings.Index(row, delimiterValue):]) > len(delimiterValue) {
|
||||
log.Infoln("match specific case:", row)
|
||||
oldImagePart := strings.TrimSpace(row[0:strings.Index(row, DELIMITER_VALUE)])
|
||||
newImagePart := strings.TrimSpace(row[strings.Index(row, DELIMITER_VALUE)+len(DELIMITER_VALUE):])
|
||||
oldImagePart := strings.TrimSpace(row[0:strings.Index(row, delimiterValue)])
|
||||
newImagePart := strings.TrimSpace(row[strings.Index(row, delimiterValue)+len(delimiterValue):])
|
||||
newImageName = strings.Replace(oldImageName, oldImagePart, newImagePart, -1)
|
||||
return true, newImageName, nil
|
||||
}
|
||||
|
|
|
@ -24,8 +24,6 @@ import (
|
|||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
@ -43,7 +41,7 @@ func TestChangeImageRepositoryActionExecute(t *testing.T) {
|
|||
tests := []struct {
|
||||
name string
|
||||
podOrObj interface{}
|
||||
configMap *corev1api.ConfigMap
|
||||
configMap *corev1.ConfigMap
|
||||
freshedImageName string
|
||||
imageNameSlice []string
|
||||
want interface{}
|
||||
|
@ -52,7 +50,7 @@ func TestChangeImageRepositoryActionExecute(t *testing.T) {
|
|||
{
|
||||
name: "a valid mapping with spaces for a new image repository is applied correctly",
|
||||
podOrObj: builder.ForPod("default", "pod1").ObjectMeta().
|
||||
Containers(&v1.Container{
|
||||
Containers(&corev1.Container{
|
||||
Name: "container1",
|
||||
Image: "1.1.1.1:5000/abc:test",
|
||||
}).Result(),
|
||||
|
@ -67,7 +65,7 @@ func TestChangeImageRepositoryActionExecute(t *testing.T) {
|
|||
{
|
||||
name: "a valid mapping for a new image repository is applied correctly",
|
||||
podOrObj: builder.ForPod("default", "pod1").ObjectMeta().
|
||||
Containers(&v1.Container{
|
||||
Containers(&corev1.Container{
|
||||
Name: "container2",
|
||||
Image: "1.1.1.1:5000/abc:test",
|
||||
}).Result(),
|
||||
|
@ -82,7 +80,7 @@ func TestChangeImageRepositoryActionExecute(t *testing.T) {
|
|||
{
|
||||
name: "a valid mapping for a new image name is applied correctly",
|
||||
podOrObj: builder.ForPod("default", "pod1").ObjectMeta().
|
||||
Containers(&v1.Container{
|
||||
Containers(&corev1.Container{
|
||||
Name: "container3",
|
||||
Image: "1.1.1.1:5000/abc:test",
|
||||
}).Result(),
|
||||
|
@ -97,7 +95,7 @@ func TestChangeImageRepositoryActionExecute(t *testing.T) {
|
|||
{
|
||||
name: "a valid mapping for a new image repository port is applied correctly",
|
||||
podOrObj: builder.ForPod("default", "pod1").ObjectMeta().
|
||||
Containers(&v1.Container{
|
||||
Containers(&corev1.Container{
|
||||
Name: "container4",
|
||||
Image: "1.1.1.1:5000/abc:test",
|
||||
}).Result(),
|
||||
|
@ -112,7 +110,7 @@ func TestChangeImageRepositoryActionExecute(t *testing.T) {
|
|||
{
|
||||
name: "a valid mapping for a new image tag is applied correctly",
|
||||
podOrObj: builder.ForPod("default", "pod1").ObjectMeta().
|
||||
Containers(&v1.Container{
|
||||
Containers(&corev1.Container{
|
||||
Name: "container5",
|
||||
Image: "1.1.1.1:5000/abc:test",
|
||||
}).Result(),
|
||||
|
@ -127,7 +125,7 @@ func TestChangeImageRepositoryActionExecute(t *testing.T) {
|
|||
{
|
||||
name: "image name contains more than one part that matching the replacing words.",
|
||||
podOrObj: builder.ForPod("default", "pod1").ObjectMeta().
|
||||
Containers(&v1.Container{
|
||||
Containers(&corev1.Container{
|
||||
Name: "container6",
|
||||
Image: "dev/image1:dev",
|
||||
}).Result(),
|
||||
|
|
|
@ -167,8 +167,8 @@ func TestChangePVCNodeSelectorActionExecute(t *testing.T) {
|
|||
res, err := a.Execute(input)
|
||||
|
||||
// Make sure mapped selected-node exists.
|
||||
log_output := buf.String()
|
||||
assert.Equal(t, strings.Contains(log_output, "Selected-node's mapped node doesn't exist"), false)
|
||||
logOutput := buf.String()
|
||||
assert.Equal(t, strings.Contains(logOutput, "Selected-node's mapped node doesn't exist"), false)
|
||||
|
||||
// validate for both error and non-error cases
|
||||
switch {
|
||||
|
|
|
@ -66,7 +66,7 @@ import (
|
|||
"github.com/vmware-tanzu/velero/pkg/util/collections"
|
||||
"github.com/vmware-tanzu/velero/pkg/util/filesystem"
|
||||
"github.com/vmware-tanzu/velero/pkg/util/kube"
|
||||
. "github.com/vmware-tanzu/velero/pkg/util/results"
|
||||
"github.com/vmware-tanzu/velero/pkg/util/results"
|
||||
"github.com/vmware-tanzu/velero/pkg/volume"
|
||||
)
|
||||
|
||||
|
@ -80,12 +80,12 @@ type Restorer interface {
|
|||
Restore(req *Request,
|
||||
actions []riav2.RestoreItemAction,
|
||||
volumeSnapshotterGetter VolumeSnapshotterGetter,
|
||||
) (Result, Result)
|
||||
) (results.Result, results.Result)
|
||||
RestoreWithResolvers(
|
||||
req *Request,
|
||||
restoreItemActionResolver framework.RestoreItemActionResolverV2,
|
||||
volumeSnapshotterGetter VolumeSnapshotterGetter,
|
||||
) (Result, Result)
|
||||
) (results.Result, results.Result)
|
||||
}
|
||||
|
||||
// kubernetesRestorer implements Restorer for restoring into a Kubernetes cluster.
|
||||
|
@ -134,11 +134,11 @@ func NewKubernetesRestorer(
|
|||
resourcePriorities: resourcePriorities,
|
||||
logger: logger,
|
||||
pvRenamer: func(string) (string, error) {
|
||||
veleroCloneUuid, err := uuid.NewRandom()
|
||||
veleroCloneUUID, err := uuid.NewRandom()
|
||||
if err != nil {
|
||||
return "", errors.WithStack(err)
|
||||
}
|
||||
veleroCloneName := "velero-clone-" + veleroCloneUuid.String()
|
||||
veleroCloneName := "velero-clone-" + veleroCloneUUID.String()
|
||||
return veleroCloneName, nil
|
||||
},
|
||||
fileSystem: filesystem.NewFileSystem(),
|
||||
|
@ -156,7 +156,7 @@ func (kr *kubernetesRestorer) Restore(
|
|||
req *Request,
|
||||
actions []riav2.RestoreItemAction,
|
||||
volumeSnapshotterGetter VolumeSnapshotterGetter,
|
||||
) (Result, Result) {
|
||||
) (results.Result, results.Result) {
|
||||
resolver := framework.NewRestoreItemActionResolverV2(actions)
|
||||
return kr.RestoreWithResolvers(req, resolver, volumeSnapshotterGetter)
|
||||
}
|
||||
|
@ -165,7 +165,7 @@ func (kr *kubernetesRestorer) RestoreWithResolvers(
|
|||
req *Request,
|
||||
restoreItemActionResolver framework.RestoreItemActionResolverV2,
|
||||
volumeSnapshotterGetter VolumeSnapshotterGetter,
|
||||
) (Result, Result) {
|
||||
) (results.Result, results.Result) {
|
||||
// metav1.LabelSelectorAsSelector converts a nil LabelSelector to a
|
||||
// Nothing Selector, i.e. a selector that matches nothing. We want
|
||||
// a selector that matches everything. This can be accomplished by
|
||||
|
@ -180,7 +180,7 @@ func (kr *kubernetesRestorer) RestoreWithResolvers(
|
|||
for _, s := range req.Restore.Spec.OrLabelSelectors {
|
||||
labelAsSelector, err := metav1.LabelSelectorAsSelector(s)
|
||||
if err != nil {
|
||||
return Result{}, Result{Velero: []string{err.Error()}}
|
||||
return results.Result{}, results.Result{Velero: []string{err.Error()}}
|
||||
}
|
||||
OrSelectors = append(OrSelectors, labelAsSelector)
|
||||
}
|
||||
|
@ -188,7 +188,7 @@ func (kr *kubernetesRestorer) RestoreWithResolvers(
|
|||
|
||||
selector, err := metav1.LabelSelectorAsSelector(ls)
|
||||
if err != nil {
|
||||
return Result{}, Result{Velero: []string{err.Error()}}
|
||||
return results.Result{}, results.Result{Velero: []string{err.Error()}}
|
||||
}
|
||||
|
||||
// Get resource includes-excludes.
|
||||
|
@ -216,7 +216,7 @@ func (kr *kubernetesRestorer) RestoreWithResolvers(
|
|||
|
||||
resolvedActions, err := restoreItemActionResolver.ResolveActions(kr.discoveryHelper, kr.logger)
|
||||
if err != nil {
|
||||
return Result{}, Result{Velero: []string{err.Error()}}
|
||||
return results.Result{}, results.Result{Velero: []string{err.Error()}}
|
||||
}
|
||||
|
||||
podVolumeTimeout := kr.podVolumeTimeout
|
||||
|
@ -239,13 +239,13 @@ func (kr *kubernetesRestorer) RestoreWithResolvers(
|
|||
if kr.podVolumeRestorerFactory != nil {
|
||||
podVolumeRestorer, err = kr.podVolumeRestorerFactory.NewRestorer(ctx, req.Restore)
|
||||
if err != nil {
|
||||
return Result{}, Result{Velero: []string{err.Error()}}
|
||||
return results.Result{}, results.Result{Velero: []string{err.Error()}}
|
||||
}
|
||||
}
|
||||
|
||||
resourceRestoreHooks, err := hook.GetRestoreHooksFromSpec(&req.Restore.Spec.Hooks)
|
||||
if err != nil {
|
||||
return Result{}, Result{Velero: []string{err.Error()}}
|
||||
return results.Result{}, results.Result{Velero: []string{err.Error()}}
|
||||
}
|
||||
hooksCtx, hooksCancelFunc := go_context.WithCancel(go_context.Background())
|
||||
waitExecHookHandler := &hook.DefaultWaitExecHookHandler{
|
||||
|
@ -390,8 +390,8 @@ type progressUpdate struct {
|
|||
totalItems, itemsRestored int
|
||||
}
|
||||
|
||||
func (ctx *restoreContext) execute() (Result, Result) {
|
||||
warnings, errs := Result{}, Result{}
|
||||
func (ctx *restoreContext) execute() (results.Result, results.Result) {
|
||||
warnings, errs := results.Result{}, results.Result{}
|
||||
|
||||
ctx.log.Infof("Starting restore of backup %s", kube.NamespaceAndName(ctx.backup))
|
||||
|
||||
|
@ -482,7 +482,7 @@ func (ctx *restoreContext) execute() (Result, Result) {
|
|||
}
|
||||
|
||||
for _, selectedResource := range crdResourceCollection {
|
||||
var w, e Result
|
||||
var w, e results.Result
|
||||
// Restore this resource
|
||||
processedItems, w, e = ctx.processSelectedResource(
|
||||
selectedResource,
|
||||
|
@ -514,7 +514,7 @@ func (ctx *restoreContext) execute() (Result, Result) {
|
|||
}
|
||||
|
||||
for _, selectedResource := range selectedResourceCollection {
|
||||
var w, e Result
|
||||
var w, e results.Result
|
||||
// Restore this resource
|
||||
processedItems, w, e = ctx.processSelectedResource(
|
||||
selectedResource,
|
||||
|
@ -590,8 +590,8 @@ func (ctx *restoreContext) processSelectedResource(
|
|||
processedItems int,
|
||||
existingNamespaces sets.String,
|
||||
update chan progressUpdate,
|
||||
) (int, Result, Result) {
|
||||
warnings, errs := Result{}, Result{}
|
||||
) (int, results.Result, results.Result) {
|
||||
warnings, errs := results.Result{}, results.Result{}
|
||||
groupResource := schema.ParseGroupResource(selectedResource.resource)
|
||||
|
||||
for namespace, selectedItems := range selectedResource.selectedItemsByNamespace {
|
||||
|
@ -939,8 +939,8 @@ func getResourceID(groupResource schema.GroupResource, namespace, name string) s
|
|||
return fmt.Sprintf("%s/%s/%s", groupResource.String(), namespace, name)
|
||||
}
|
||||
|
||||
func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupResource schema.GroupResource, namespace string) (Result, Result, bool) {
|
||||
warnings, errs := Result{}, Result{}
|
||||
func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupResource schema.GroupResource, namespace string) (results.Result, results.Result, bool) {
|
||||
warnings, errs := results.Result{}, results.Result{}
|
||||
// itemExists bool is used to determine whether to include this item in the "wait for additional items" list
|
||||
itemExists := false
|
||||
resourceID := getResourceID(groupResource, namespace, obj.GetName())
|
||||
|
@ -1281,7 +1281,7 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso
|
|||
if err != nil {
|
||||
errs.Add(namespace, errors.Wrapf(err, "error verifying additional items are ready to use"))
|
||||
} else if !available {
|
||||
errs.Add(namespace, fmt.Errorf("Additional items for %s are not ready to use.", resourceID))
|
||||
errs.Add(namespace, fmt.Errorf("additional items for %s are not ready to use", resourceID))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1548,7 +1548,7 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso
|
|||
if err != nil {
|
||||
errs.Add(namespace, errors.Wrapf(err, "error verifying custom resource definition is ready to use"))
|
||||
} else if !available {
|
||||
errs.Add(namespace, fmt.Errorf("CRD %s is not available to use for custom resources.", name))
|
||||
errs.Add(namespace, fmt.Errorf("the CRD %s is not available to use for custom resources", name))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1911,8 +1911,8 @@ func (ctx *restoreContext) getOrderedResourceCollection(
|
|||
processedResources sets.String,
|
||||
resourcePriorities Priorities,
|
||||
includeAllResources bool,
|
||||
) ([]restoreableResource, sets.String, Result, Result) {
|
||||
var warnings, errs Result
|
||||
) ([]restoreableResource, sets.String, results.Result, results.Result) {
|
||||
var warnings, errs results.Result
|
||||
// Iterate through an ordered list of resources to restore, checking each
|
||||
// one to see if it should be restored. Note that resources *may* be in this
|
||||
// list twice, i.e. once due to being a prioritized resource, and once due
|
||||
|
@ -2012,8 +2012,8 @@ func (ctx *restoreContext) getOrderedResourceCollection(
|
|||
// getSelectedRestoreableItems applies Kubernetes selectors on individual items
|
||||
// of each resource type to create a list of items which will be actually
|
||||
// restored.
|
||||
func (ctx *restoreContext) getSelectedRestoreableItems(resource, targetNamespace, originalNamespace string, items []string) (restoreableResource, Result, Result) {
|
||||
warnings, errs := Result{}, Result{}
|
||||
func (ctx *restoreContext) getSelectedRestoreableItems(resource, targetNamespace, originalNamespace string, items []string) (restoreableResource, results.Result, results.Result) {
|
||||
warnings, errs := results.Result{}, results.Result{}
|
||||
|
||||
restorable := restoreableResource{
|
||||
resource: resource,
|
||||
|
@ -2112,7 +2112,7 @@ func removeRestoreLabels(obj metav1.Object) {
|
|||
}
|
||||
|
||||
// updates the backup/restore labels
|
||||
func (ctx *restoreContext) updateBackupRestoreLabels(fromCluster, fromClusterWithLabels *unstructured.Unstructured, namespace string, resourceClient client.Dynamic) (warnings, errs Result) {
|
||||
func (ctx *restoreContext) updateBackupRestoreLabels(fromCluster, fromClusterWithLabels *unstructured.Unstructured, namespace string, resourceClient client.Dynamic) (warnings, errs results.Result) {
|
||||
patchBytes, err := generatePatch(fromCluster, fromClusterWithLabels)
|
||||
if err != nil {
|
||||
ctx.log.Errorf("error generating patch for %s %s: %v", fromCluster.GroupVersionKind().Kind, kube.NamespaceAndName(fromCluster), err)
|
||||
|
@ -2140,7 +2140,7 @@ func (ctx *restoreContext) updateBackupRestoreLabels(fromCluster, fromClusterWit
|
|||
|
||||
// function to process existingResourcePolicy as update, tries to patch the diff between in-cluster and restore obj first
|
||||
// if the patch fails then tries to update the backup/restore labels for the in-cluster version
|
||||
func (ctx *restoreContext) processUpdateResourcePolicy(fromCluster, fromClusterWithLabels, obj *unstructured.Unstructured, namespace string, resourceClient client.Dynamic) (warnings, errs Result) {
|
||||
func (ctx *restoreContext) processUpdateResourcePolicy(fromCluster, fromClusterWithLabels, obj *unstructured.Unstructured, namespace string, resourceClient client.Dynamic) (warnings, errs results.Result) {
|
||||
ctx.log.Infof("restore API has existingResourcePolicy defined as update , executing restore workflow accordingly for changed resource %s %s ", obj.GroupVersionKind().Kind, kube.NamespaceAndName(fromCluster))
|
||||
ctx.log.Infof("attempting patch on %s %q", fromCluster.GetKind(), fromCluster.GetName())
|
||||
// remove restore labels so that we apply the latest backup/restore names on the object via patch
|
||||
|
|
|
@ -53,9 +53,6 @@ import (
|
|||
"github.com/vmware-tanzu/velero/pkg/podvolume"
|
||||
uploadermocks "github.com/vmware-tanzu/velero/pkg/podvolume/mocks"
|
||||
"github.com/vmware-tanzu/velero/pkg/test"
|
||||
testutil "github.com/vmware-tanzu/velero/pkg/test"
|
||||
velerotest "github.com/vmware-tanzu/velero/pkg/test"
|
||||
"github.com/vmware-tanzu/velero/pkg/util/kube"
|
||||
kubeutil "github.com/vmware-tanzu/velero/pkg/util/kube"
|
||||
. "github.com/vmware-tanzu/velero/pkg/util/results"
|
||||
"github.com/vmware-tanzu/velero/pkg/volume"
|
||||
|
@ -3203,13 +3200,13 @@ func TestIsCompleted(t *testing.T) {
|
|||
groupResource: schema.GroupResource{Group: "", Resource: "namespaces"},
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
u := testutil.UnstructuredOrDie(test.content)
|
||||
backup, err := isCompleted(u, test.groupResource)
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
u := test.UnstructuredOrDie(tt.content)
|
||||
backup, err := isCompleted(u, tt.groupResource)
|
||||
|
||||
if assert.Equal(t, test.expectedErr, err != nil) {
|
||||
assert.Equal(t, test.expected, backup)
|
||||
if assert.Equal(t, tt.expectedErr, err != nil) {
|
||||
assert.Equal(t, tt.expected, backup)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
@ -3392,7 +3389,7 @@ func newHarness(t *testing.T) *harness {
|
|||
|
||||
apiServer := test.NewAPIServer(t)
|
||||
log := logrus.StandardLogger()
|
||||
kbClient := velerotest.NewFakeControllerRuntimeClient(t)
|
||||
kbClient := test.NewFakeControllerRuntimeClient(t)
|
||||
|
||||
discoveryHelper, err := discovery.NewHelper(apiServer.DiscoveryClient, log)
|
||||
require.NoError(t, err)
|
||||
|
@ -3405,7 +3402,7 @@ func newHarness(t *testing.T) *harness {
|
|||
namespaceClient: apiServer.KubeClient.CoreV1().Namespaces(),
|
||||
resourceTerminatingTimeout: time.Minute,
|
||||
logger: log,
|
||||
fileSystem: testutil.NewFakeFileSystem(),
|
||||
fileSystem: test.NewFakeFileSystem(),
|
||||
|
||||
// unsupported
|
||||
podVolumeRestorerFactory: nil,
|
||||
|
@ -3452,9 +3449,9 @@ func Test_resetVolumeBindingInfo(t *testing.T) {
|
|||
name: "PVs that are bound have their binding and dynamic provisioning annotations removed",
|
||||
obj: NewTestUnstructured().WithMetadataField("kind", "persistentVolume").
|
||||
WithName("pv-1").WithAnnotations(
|
||||
kube.KubeAnnBindCompleted,
|
||||
kube.KubeAnnBoundByController,
|
||||
kube.KubeAnnDynamicallyProvisioned,
|
||||
kubeutil.KubeAnnBindCompleted,
|
||||
kubeutil.KubeAnnBoundByController,
|
||||
kubeutil.KubeAnnDynamicallyProvisioned,
|
||||
).WithSpecField("claimRef", map[string]interface{}{
|
||||
"namespace": "ns-1",
|
||||
"name": "pvc-1",
|
||||
|
@ -3462,7 +3459,7 @@ func Test_resetVolumeBindingInfo(t *testing.T) {
|
|||
"resourceVersion": "1"}).Unstructured,
|
||||
expected: NewTestUnstructured().WithMetadataField("kind", "persistentVolume").
|
||||
WithName("pv-1").
|
||||
WithAnnotations(kube.KubeAnnDynamicallyProvisioned).
|
||||
WithAnnotations(kubeutil.KubeAnnDynamicallyProvisioned).
|
||||
WithSpecField("claimRef", map[string]interface{}{
|
||||
"namespace": "ns-1", "name": "pvc-1"}).Unstructured,
|
||||
},
|
||||
|
@ -3470,8 +3467,8 @@ func Test_resetVolumeBindingInfo(t *testing.T) {
|
|||
name: "PVCs that are bound have their binding annotations removed, but the volume name stays",
|
||||
obj: NewTestUnstructured().WithMetadataField("kind", "persistentVolumeClaim").
|
||||
WithName("pvc-1").WithAnnotations(
|
||||
kube.KubeAnnBindCompleted,
|
||||
kube.KubeAnnBoundByController,
|
||||
kubeutil.KubeAnnBindCompleted,
|
||||
kubeutil.KubeAnnBoundByController,
|
||||
).WithSpecField("volumeName", "pv-1").Unstructured,
|
||||
expected: NewTestUnstructured().WithMetadataField("kind", "persistentVolumeClaim").
|
||||
WithName("pvc-1").WithAnnotations().
|
||||
|
|
|
@ -149,7 +149,7 @@ func (dh *FakeDiscoveryHelper) APIGroups() []metav1.APIGroup {
|
|||
|
||||
type FakeServerResourcesInterface struct {
|
||||
ResourceList []*metav1.APIResourceList
|
||||
ApiGroup []*metav1.APIGroup
|
||||
APIGroup []*metav1.APIGroup
|
||||
FailedGroups map[schema.GroupVersion]error
|
||||
ReturnError error
|
||||
}
|
||||
|
@ -166,18 +166,18 @@ func (di *FakeServerResourcesInterface) ServerPreferredResources() ([]*metav1.AP
|
|||
|
||||
func (di *FakeServerResourcesInterface) ServerGroupsAndResources() ([]*metav1.APIGroup, []*metav1.APIResourceList, error) {
|
||||
if di.ReturnError != nil {
|
||||
return di.ApiGroup, di.ResourceList, di.ReturnError
|
||||
return di.APIGroup, di.ResourceList, di.ReturnError
|
||||
}
|
||||
if di.FailedGroups == nil || len(di.FailedGroups) == 0 {
|
||||
return di.ApiGroup, di.ResourceList, nil
|
||||
return di.APIGroup, di.ResourceList, nil
|
||||
}
|
||||
return di.ApiGroup, di.ResourceList, &discovery.ErrGroupDiscoveryFailed{Groups: di.FailedGroups}
|
||||
return di.APIGroup, di.ResourceList, &discovery.ErrGroupDiscoveryFailed{Groups: di.FailedGroups}
|
||||
}
|
||||
|
||||
func NewFakeServerResourcesInterface(resourceList []*metav1.APIResourceList, apiGroup []*metav1.APIGroup, failedGroups map[schema.GroupVersion]error, returnError error) *FakeServerResourcesInterface {
|
||||
helper := &FakeServerResourcesInterface{
|
||||
ResourceList: resourceList,
|
||||
ApiGroup: apiGroup,
|
||||
APIGroup: apiGroup,
|
||||
FailedGroups: failedGroups,
|
||||
ReturnError: returnError,
|
||||
}
|
||||
|
|
|
@ -33,7 +33,7 @@ import (
|
|||
"github.com/vmware-tanzu/velero/pkg/util/filesystem"
|
||||
)
|
||||
|
||||
// mainly used to make testing more convenient
|
||||
// ResticBackupCMDFunc and ResticRestoreCMDFunc are mainly used to make testing more convenient
|
||||
var ResticBackupCMDFunc = restic.BackupCommand
|
||||
var ResticRestoreCMDFunc = restic.RestoreCommand
|
||||
|
||||
|
@ -144,13 +144,13 @@ func (rp *resticProvider) RunBackup(
|
|||
return "", false, errors.WithStack(fmt.Errorf("error running restic backup command %s with error: %v stderr: %v", backupCmd.String(), err, stderrBuf))
|
||||
}
|
||||
// GetSnapshotID
|
||||
snapshotIdCmd := restic.GetSnapshotCommand(rp.repoIdentifier, rp.credentialsFile, tags)
|
||||
snapshotIdCmd.Env = rp.cmdEnv
|
||||
snapshotIdCmd.CACertFile = rp.caCertFile
|
||||
snapshotIDCmd := restic.GetSnapshotCommand(rp.repoIdentifier, rp.credentialsFile, tags)
|
||||
snapshotIDCmd.Env = rp.cmdEnv
|
||||
snapshotIDCmd.CACertFile = rp.caCertFile
|
||||
if len(rp.extraFlags) != 0 {
|
||||
snapshotIdCmd.ExtraFlags = append(snapshotIdCmd.ExtraFlags, rp.extraFlags...)
|
||||
snapshotIDCmd.ExtraFlags = append(snapshotIDCmd.ExtraFlags, rp.extraFlags...)
|
||||
}
|
||||
snapshotID, err := restic.GetSnapshotID(snapshotIdCmd)
|
||||
snapshotID, err := restic.GetSnapshotID(snapshotIDCmd)
|
||||
if err != nil {
|
||||
return "", false, errors.WithStack(fmt.Errorf("error getting snapshot id with error: %v", err))
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue