Fix usestdlibvars and whitespace linters issues. (#6162)
Signed-off-by: Xun Jiang <blackpiglet@gmail.com> Co-authored-by: Xun Jiang <blackpiglet@gmail.com>pull/6175/head
parent
14f31eed8c
commit
1fd28e8a36
|
@ -0,0 +1 @@
|
|||
Fix usestdlibvars and whitespace linters issues.
|
|
@ -300,6 +300,8 @@ linters:
|
|||
- typecheck
|
||||
- unparam
|
||||
- unused
|
||||
- usestdlibvars
|
||||
- whitespace
|
||||
fast: false
|
||||
|
||||
|
||||
|
|
|
@ -35,7 +35,6 @@ var release_regex *regexp.Regexp = regexp.MustCompile(`^v(?P<major>[[:digit:]]+)
|
|||
// Calling it with --verify will verify whether or not the VELERO_VERSION environment variable is a valid version string, without parsing for its components.
|
||||
// Calling it without --verify will try to parse the version into its component pieces.
|
||||
func main() {
|
||||
|
||||
velero_version := os.Getenv("VELERO_VERSION")
|
||||
|
||||
submatches := reSubMatchMap(release_regex, velero_version)
|
||||
|
|
|
@ -121,7 +121,6 @@ func InvokeDeleteActions(ctx *Context) error {
|
|||
// Since we want to keep looping even on errors, log them instead of just returning.
|
||||
if err != nil {
|
||||
itemLog.WithError(err).Error("plugin error")
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -283,7 +283,6 @@ func (h *NoOpItemHookHandler) HandleHooks(
|
|||
resourceHooks []ResourceHook,
|
||||
phase hookPhase,
|
||||
) error {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -121,7 +121,6 @@ func (c *nfsCondition) match(v *structuredVolume) bool {
|
|||
return false
|
||||
}
|
||||
return true
|
||||
|
||||
}
|
||||
|
||||
type csiCondition struct {
|
||||
|
|
|
@ -39,7 +39,6 @@ func (c *capacityCondition) validate() error {
|
|||
return nil
|
||||
}
|
||||
return errors.Errorf("illegal values for capacity %v", c.capacity)
|
||||
|
||||
}
|
||||
|
||||
func (s *storageClassCondition) validate() error {
|
||||
|
|
|
@ -528,7 +528,6 @@ func (kb *kubernetesBackupper) FinalizeBackup(log logrus.FieldLogger,
|
|||
outBackupFile io.Writer,
|
||||
backupItemActionResolver framework.BackupItemActionResolverV2,
|
||||
asyncBIAOperations []*itemoperation.BackupOperation) error {
|
||||
|
||||
gzw := gzip.NewWriter(outBackupFile)
|
||||
defer gzw.Close()
|
||||
tw := tar.NewWriter(gzw)
|
||||
|
@ -623,7 +622,6 @@ func (kb *kubernetesBackupper) FinalizeBackup(log logrus.FieldLogger,
|
|||
updateFiles[itemFile.FilePath] = itemFile
|
||||
}
|
||||
}
|
||||
|
||||
}()
|
||||
|
||||
// updated total is computed as "how many items we've backed up so far, plus
|
||||
|
@ -694,7 +692,6 @@ func buildFinalTarball(tr *tar.Reader, tw *tar.Writer, updateFiles map[string]Fi
|
|||
}
|
||||
}
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
type tarWriter interface {
|
||||
|
|
|
@ -150,7 +150,6 @@ func (ib *itemBackupper) backupItemInternal(logger logrus.FieldLogger, obj runti
|
|||
log.Info("Excluding item because resource is excluded")
|
||||
return false, itemFiles, nil
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if metadata.GetDeletionTimestamp() != nil {
|
||||
|
|
|
@ -62,7 +62,6 @@ type kubernetesResource struct {
|
|||
// getItemsFromResourceIdentifiers converts ResourceIdentifiers to
|
||||
// kubernetesResources
|
||||
func (r *itemCollector) getItemsFromResourceIdentifiers(resourceIDs []velero.ResourceIdentifier) []*kubernetesResource {
|
||||
|
||||
grResourceIDsMap := make(map[schema.GroupResource][]velero.ResourceIdentifier)
|
||||
for _, resourceID := range resourceIDs {
|
||||
grResourceIDsMap[resourceID.GroupResource] = append(grResourceIDsMap[resourceID.GroupResource], resourceID)
|
||||
|
|
|
@ -82,7 +82,6 @@ CheckVersion:
|
|||
break CheckVersion
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
if !supportv1b1 {
|
||||
|
@ -142,7 +141,6 @@ func fetchV1beta1CRD(name string, betaCRDClient apiextv1beta1client.CustomResour
|
|||
item := &unstructured.Unstructured{Object: m}
|
||||
|
||||
return item, nil
|
||||
|
||||
}
|
||||
|
||||
// hasPreserveUnknownFields determines whether or not a CRD is set to preserve unknown fields or not.
|
||||
|
|
|
@ -130,7 +130,6 @@ func (c VeleroConfig) Colorized() bool {
|
|||
}
|
||||
|
||||
return colorized
|
||||
|
||||
}
|
||||
|
||||
func (c VeleroConfig) CACertFile() string {
|
||||
|
|
|
@ -121,7 +121,6 @@ func NewDescribeCommand(f client.Factory, use string) *cobra.Command {
|
|||
fmt.Printf("\n\n%s", s)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
cmd.CheckError(err)
|
||||
},
|
||||
|
|
|
@ -81,7 +81,6 @@ func (o *DownloadOptions) BindFlags(flags *pflag.FlagSet) {
|
|||
flags.DurationVar(&o.Timeout, "timeout", o.Timeout, "Maximum time to wait to process download request.")
|
||||
flags.BoolVar(&o.InsecureSkipTLSVerify, "insecure-skip-tls-verify", o.InsecureSkipTLSVerify, "If true, the object store's TLS certificate will not be checked for validity. This is insecure and susceptible to man-in-the-middle attacks. Not recommended for production.")
|
||||
flags.StringVar(&o.caCertFile, "cacert", o.caCertFile, "Path to a certificate bundle to use when verifying TLS connections.")
|
||||
|
||||
}
|
||||
|
||||
func (o *DownloadOptions) Validate(c *cobra.Command, args []string, f client.Factory) error {
|
||||
|
|
|
@ -355,7 +355,6 @@ func (o *InstallOptions) Validate(c *cobra.Command, args []string, f client.Fact
|
|||
}
|
||||
|
||||
if o.NoDefaultBackupLocation {
|
||||
|
||||
if o.BucketName != "" {
|
||||
return errors.New("Cannot use both --bucket and --no-default-backup-location at the same time")
|
||||
}
|
||||
|
@ -375,7 +374,6 @@ func (o *InstallOptions) Validate(c *cobra.Command, args []string, f client.Fact
|
|||
if o.BucketName == "" {
|
||||
return errors.New("--bucket is required")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if o.UseVolumeSnapshots {
|
||||
|
|
|
@ -57,7 +57,6 @@ func NewDeleteCommand(f client.Factory, use string) *cobra.Command {
|
|||
cmd.CheckError(o.Complete(f, args))
|
||||
cmd.CheckError(o.Validate(c, f, args))
|
||||
cmd.CheckError(Run(o))
|
||||
|
||||
},
|
||||
}
|
||||
o.BindFlags(c.Flags())
|
||||
|
|
|
@ -123,7 +123,7 @@ func Stream(ctx context.Context, kbClient kbclient.Client, namespace, name strin
|
|||
ExpectContinueTimeout: defaultTransport.ExpectContinueTimeout,
|
||||
}
|
||||
|
||||
httpReq, err := http.NewRequest("GET", created.Status.DownloadURL, nil)
|
||||
httpReq, err := http.NewRequest(http.MethodGet, created.Status.DownloadURL, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -242,7 +242,6 @@ func DescribeBackupStatusInSF(ctx context.Context, kbClient kbclient.Client, d *
|
|||
}
|
||||
if status.CompletionTimestamp == nil || status.CompletionTimestamp.Time.IsZero() {
|
||||
backupStatusInfo["completed"] = "<n/a>"
|
||||
|
||||
} else {
|
||||
backupStatusInfo["completed"] = status.CompletionTimestamp.Time.String()
|
||||
}
|
||||
|
@ -258,7 +257,6 @@ func DescribeBackupStatusInSF(ctx context.Context, kbClient kbclient.Client, d *
|
|||
if backup.Status.Phase == velerov1api.BackupPhaseInProgress {
|
||||
backupStatusInfo["estimatedTotalItemsToBeBackedUp"] = backup.Status.Progress.TotalItems
|
||||
backupStatusInfo["itemsBackedUpSoFar"] = backup.Status.Progress.ItemsBackedUp
|
||||
|
||||
} else {
|
||||
backupStatusInfo["totalItemsToBeBackedUp"] = backup.Status.Progress.TotalItems
|
||||
backupStatusInfo["itemsBackedUp"] = backup.Status.Progress.ItemsBackedUp
|
||||
|
@ -298,7 +296,6 @@ func DescribeBackupStatusInSF(ctx context.Context, kbClient kbclient.Client, d *
|
|||
backupStatusInfo["veleroNativeSnapshotsDetail"] = snapshotDetails
|
||||
return
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func describeBackupResourceListInSF(ctx context.Context, kbClient kbclient.Client, backupStatusInfo map[string]interface{}, backup *velerov1api.Backup, insecureSkipTLSVerify bool, caCertPath string) {
|
||||
|
@ -340,7 +337,6 @@ func describeSnapshotInSF(pvName, snapshotID, volumeType, volumeAZ string, iops
|
|||
snapshotInfo["availabilityZone"] = volumeAZ
|
||||
snapshotInfo["IOPS"] = iopsString
|
||||
snapshotDetails[pvName] = snapshotInfo
|
||||
|
||||
}
|
||||
|
||||
// DescribeDeleteBackupRequestsInSF describes delete backup requests in structured format.
|
||||
|
@ -457,7 +453,6 @@ func DescribeVSCInSF(details bool, vsc snapshotv1api.VolumeSnapshotContent, vscD
|
|||
|
||||
if vsc.Status.RestoreSize != nil {
|
||||
content["snapshotSize(bytes)"] = *vsc.Status.RestoreSize
|
||||
|
||||
}
|
||||
|
||||
if vsc.Status.ReadyToUse != nil {
|
||||
|
|
|
@ -118,7 +118,6 @@ func NewBackupReconciler(
|
|||
credentialStore credentials.FileStore,
|
||||
maxConcurrentK8SConnections int,
|
||||
) *backupReconciler {
|
||||
|
||||
b := &backupReconciler{
|
||||
ctx: ctx,
|
||||
discoveryHelper: discoveryHelper,
|
||||
|
|
|
@ -215,7 +215,6 @@ func (c *backupOperationsReconciler) updateBackupAndOperationsJSON(
|
|||
operations *itemoperationmap.OperationsForBackup,
|
||||
changes bool,
|
||||
completionChanges bool) error {
|
||||
|
||||
backupScheduleName := backup.GetLabels()[velerov1api.ScheduleNameLabel]
|
||||
|
||||
if len(operations.ErrsSinceUpdate) > 0 {
|
||||
|
@ -229,7 +228,6 @@ func (c *backupOperationsReconciler) updateBackupAndOperationsJSON(
|
|||
backup.Status.Phase == velerov1api.BackupPhasePartiallyFailed ||
|
||||
backup.Status.Phase == velerov1api.BackupPhaseFinalizing ||
|
||||
backup.Status.Phase == velerov1api.BackupPhaseFinalizingPartiallyFailed) {
|
||||
|
||||
c.itemOperationsMap.DeleteOperationsForBackup(backup.Name)
|
||||
} else if changes {
|
||||
c.itemOperationsMap.PutOperationsForBackup(operations, backup.Name)
|
||||
|
|
|
@ -152,7 +152,6 @@ func (b *backupSyncReconciler) Reconcile(ctx context.Context, req ctrl.Request)
|
|||
backup.Status.Phase == velerov1api.BackupPhaseWaitingForPluginOperationsPartiallyFailed ||
|
||||
backup.Status.Phase == velerov1api.BackupPhaseFinalizing ||
|
||||
backup.Status.Phase == velerov1api.BackupPhaseFinalizingPartiallyFailed {
|
||||
|
||||
if backup.Status.Expiration == nil || backup.Status.Expiration.After(time.Now()) {
|
||||
log.Debugf("Skipping non-expired incomplete backup %v", backup.Name)
|
||||
continue
|
||||
|
|
|
@ -104,7 +104,6 @@ func (r *downloadRequestReconciler) Reconcile(ctx context.Context, req ctrl.Requ
|
|||
|
||||
if downloadRequest.Status != (velerov1api.DownloadRequestStatus{}) && downloadRequest.Status.Expiration != nil {
|
||||
if downloadRequest.Status.Expiration.Time.Before(r.clock.Now()) {
|
||||
|
||||
// Delete any request that is expired, regardless of the phase: it is not
|
||||
// worth proceeding and trying/retrying to find it.
|
||||
log.Debug("DownloadRequest has expired - deleting")
|
||||
|
@ -113,9 +112,7 @@ func (r *downloadRequestReconciler) Reconcile(ctx context.Context, req ctrl.Requ
|
|||
return ctrl.Result{}, errors.WithStack(err)
|
||||
}
|
||||
return ctrl.Result{Requeue: false}, nil
|
||||
|
||||
} else if downloadRequest.Status.Phase == velerov1api.DownloadRequestPhaseProcessed {
|
||||
|
||||
// Requeue the request if is not yet expired and has already been processed before,
|
||||
// since it might still be in use by the logs streaming and shouldn't
|
||||
// be deleted until after its expiration.
|
||||
|
@ -127,7 +124,6 @@ func (r *downloadRequestReconciler) Reconcile(ctx context.Context, req ctrl.Requ
|
|||
// Process a brand new request.
|
||||
backupName := downloadRequest.Spec.Target.Name
|
||||
if downloadRequest.Status.Phase == "" || downloadRequest.Status.Phase == velerov1api.DownloadRequestPhaseNew {
|
||||
|
||||
// Update the expiration.
|
||||
downloadRequest.Status.Expiration = &metav1.Time{Time: r.clock.Now().Add(persistence.DownloadURLTTL)}
|
||||
|
||||
|
|
|
@ -216,7 +216,6 @@ func (r *restoreOperationsReconciler) updateRestoreAndOperationsJSON(
|
|||
operations *itemoperationmap.OperationsForRestore,
|
||||
changes bool,
|
||||
completionChanges bool) error {
|
||||
|
||||
if len(operations.ErrsSinceUpdate) > 0 {
|
||||
// FIXME: download/upload results
|
||||
}
|
||||
|
@ -225,7 +224,6 @@ func (r *restoreOperationsReconciler) updateRestoreAndOperationsJSON(
|
|||
// remove local operations list if complete
|
||||
if removeIfComplete && (restore.Status.Phase == velerov1api.RestorePhaseCompleted ||
|
||||
restore.Status.Phase == velerov1api.RestorePhasePartiallyFailed) {
|
||||
|
||||
r.itemOperationsMap.DeleteOperationsForRestore(restore.Name)
|
||||
} else if changes {
|
||||
r.itemOperationsMap.PutOperationsForRestore(operations, restore.Name)
|
||||
|
|
|
@ -40,7 +40,6 @@ func DaemonSet(namespace string, opts ...podTemplateOption) *appsv1.DaemonSet {
|
|||
imageParts := strings.Split(c.image, ":")
|
||||
if len(imageParts) == 2 && imageParts[1] != "latest" {
|
||||
pullPolicy = corev1.PullIfNotPresent
|
||||
|
||||
}
|
||||
|
||||
daemonSetArgs := []string{
|
||||
|
|
|
@ -156,7 +156,6 @@ func Deployment(namespace string, opts ...podTemplateOption) *appsv1.Deployment
|
|||
imageParts := strings.Split(c.image, ":")
|
||||
if len(imageParts) == 2 && imageParts[1] != "latest" {
|
||||
pullPolicy = corev1.PullIfNotPresent
|
||||
|
||||
}
|
||||
|
||||
args := []string{"server"}
|
||||
|
@ -308,7 +307,6 @@ func Deployment(namespace string, opts ...podTemplateOption) *appsv1.Deployment
|
|||
container := *builder.ForPluginContainer(image, pullPolicy).Result()
|
||||
deployment.Spec.Template.Spec.InitContainers = append(deployment.Spec.Template.Spec.InitContainers, container)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return deployment
|
||||
|
|
|
@ -84,7 +84,6 @@ func (m *BackupItemOperationsMap) UploadProgressAndPutOperationsForBackup(
|
|||
backupStore persistence.BackupStore,
|
||||
operations *OperationsForBackup,
|
||||
backupName string) error {
|
||||
|
||||
m.opsLock.Lock()
|
||||
defer m.opsLock.Unlock()
|
||||
|
||||
|
|
|
@ -84,7 +84,6 @@ func (m *RestoreItemOperationsMap) UploadProgressAndPutOperationsForRestore(
|
|||
backupStore persistence.BackupStore,
|
||||
operations *OperationsForRestore,
|
||||
restoreName string) error {
|
||||
|
||||
m.opsLock.Lock()
|
||||
defer m.opsLock.Unlock()
|
||||
|
||||
|
|
|
@ -429,7 +429,6 @@ func (s *objectBackupStore) GetCSIVolumeSnapshotClasses(name string) ([]*snapsho
|
|||
return nil, err
|
||||
}
|
||||
return csiVSClasses, nil
|
||||
|
||||
}
|
||||
|
||||
func (s *objectBackupStore) GetCSIVolumeSnapshots(name string) ([]*snapshotv1api.VolumeSnapshot, error) {
|
||||
|
|
|
@ -36,7 +36,6 @@ type ObjectStorePlugin struct {
|
|||
// GRPCClient returns an ObjectStore gRPC client.
|
||||
func (p *ObjectStorePlugin) GRPCClient(_ context.Context, _ *plugin.GRPCBroker, clientConn *grpc.ClientConn) (interface{}, error) {
|
||||
return common.NewClientDispenser(p.ClientLogger, clientConn, newObjectStoreGRPCClient), nil
|
||||
|
||||
}
|
||||
|
||||
// GRPCServer registers an ObjectStore gRPC server.
|
||||
|
|
|
@ -102,7 +102,6 @@ func (a *ChangeImageNameAction) Execute(input *velero.RestoreItemActionExecuteIn
|
|||
return nil, errors.Errorf("object was of unexpected type %T", input.Item)
|
||||
}
|
||||
if obj.GetKind() == "Pod" {
|
||||
|
||||
err = a.replaceImageName(obj, config, "spec", "containers")
|
||||
if err != nil {
|
||||
a.logger.Infof("replace image name meet error: %v", err)
|
||||
|
@ -114,7 +113,6 @@ func (a *ChangeImageNameAction) Execute(input *velero.RestoreItemActionExecuteIn
|
|||
a.logger.Infof("replace image name meet error: %v", err)
|
||||
return nil, errors.Wrap(err, "error getting item's spec.containers")
|
||||
}
|
||||
|
||||
} else if obj.GetKind() == "CronJob" {
|
||||
//handle containers
|
||||
err = a.replaceImageName(obj, config, "spec", "jobTemplate", "spec", "template", "spec", "containers")
|
||||
|
@ -128,7 +126,6 @@ func (a *ChangeImageNameAction) Execute(input *velero.RestoreItemActionExecuteIn
|
|||
a.logger.Infof("replace image name meet error: %v", err)
|
||||
return nil, errors.Wrap(err, "error getting item's spec.containers")
|
||||
}
|
||||
|
||||
} else {
|
||||
//handle containers
|
||||
err = a.replaceImageName(obj, config, "spec", "template", "spec", "containers")
|
||||
|
@ -148,7 +145,6 @@ func (a *ChangeImageNameAction) Execute(input *velero.RestoreItemActionExecuteIn
|
|||
}
|
||||
|
||||
func (a *ChangeImageNameAction) replaceImageName(obj *unstructured.Unstructured, config *corev1.ConfigMap, filed ...string) error {
|
||||
|
||||
log := a.logger.WithFields(map[string]interface{}{
|
||||
"kind": obj.GetKind(),
|
||||
"namespace": obj.GetNamespace(),
|
||||
|
|
|
@ -33,7 +33,6 @@ func mergeServiceAccounts(fromCluster, fromBackup *unstructured.Unstructured) (*
|
|||
desired := new(corev1api.ServiceAccount)
|
||||
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(fromCluster.UnstructuredContent(), desired); err != nil {
|
||||
return nil, errors.Wrap(err, "unable to convert from-cluster service account from unstructured to serviceaccount")
|
||||
|
||||
}
|
||||
|
||||
backupSA := new(corev1api.ServiceAccount)
|
||||
|
|
|
@ -1283,7 +1283,6 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso
|
|||
} else if !available {
|
||||
errs.Add(namespace, fmt.Errorf("Additional items for %s are not ready to use.", resourceID))
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// This comes after running item actions because we have built-in actions that restore
|
||||
|
|
|
@ -130,7 +130,6 @@ func deleteNodePorts(service *corev1api.Service) error {
|
|||
} else {
|
||||
explicitNodePorts.Insert(portName.(string))
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -53,7 +53,6 @@ func (tw *TarWriter) AddItems(groupResource string, items ...metav1.Object) *Tar
|
|||
tw.t.Helper()
|
||||
|
||||
for _, obj := range items {
|
||||
|
||||
var path string
|
||||
if obj.GetNamespace() == "" {
|
||||
path = fmt.Sprintf("resources/%s/cluster/%s.json", groupResource, obj.GetName())
|
||||
|
|
Loading…
Reference in New Issue