Merge pull request #6208 from blackpiglet/linter_errcheck

Enable errcheck linter and resolve found issues.
pull/6232/head
qiuming 2023-05-05 14:43:55 +08:00 committed by GitHub
commit 4db1a781fc
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
19 changed files with 124 additions and 41 deletions

View File

@ -0,0 +1 @@
Enable errcheck linter and resolve found issues

View File

@ -302,6 +302,7 @@ linters:
- bodyclose
- dogsled
- durationcheck
- errcheck
- exportloopref
- goconst
- gofmt

View File

@ -63,7 +63,12 @@ func InvokeDeleteActions(ctx *Context) error {
if err != nil {
return errors.Wrapf(err, "error extracting backup")
}
defer ctx.Filesystem.RemoveAll(dir)
defer func() {
if err := ctx.Filesystem.RemoveAll(dir); err != nil {
ctx.Log.Errorf("error removing temporary directory %s: %s", dir, err.Error())
}
}()
ctx.Log.Debugf("Downloaded and extracted the backup file to: %s", dir)
backupResources, err := archive.NewParser(ctx.Log, ctx.Filesystem).Parse(dir)

View File

@ -635,7 +635,11 @@ func (kb *kubernetesBackupper) FinalizeBackup(log logrus.FieldLogger,
}
// write new tar archive replacing files in original with content updateFiles for matches
buildFinalTarball(tr, tw, updateFiles)
if err := buildFinalTarball(tr, tw, updateFiles); err != nil {
log.Errorf("Error building final tarball: %s", err.Error())
return err
}
log.WithField("progress", "").Infof("Updated a total of %d items", len(backupRequest.BackedUpItems))
return nil

View File

@ -153,10 +153,18 @@ func (f *factory) KubebuilderClient() (kbclient.Client, error) {
}
scheme := runtime.NewScheme()
velerov1api.AddToScheme(scheme)
k8scheme.AddToScheme(scheme)
apiextv1beta1.AddToScheme(scheme)
apiextv1.AddToScheme(scheme)
if err := velerov1api.AddToScheme(scheme); err != nil {
return nil, err
}
if err := k8scheme.AddToScheme(scheme); err != nil {
return nil, err
}
if err := apiextv1beta1.AddToScheme(scheme); err != nil {
return nil, err
}
if err := apiextv1.AddToScheme(scheme); err != nil {
return nil, err
}
kubebuilderClient, err := kbclient.New(clientConfig, kbclient.Options{
Scheme: scheme,
})

View File

@ -162,7 +162,9 @@ func getKubectlVersion() (string, error) {
case <-time.After(kubectlTimeout):
// we don't care about the possible error returned from Kill() here,
// just return an empty string
kubectlCmd.Process.Kill()
if err := kubectlCmd.Process.Kill(); err != nil {
return "", fmt.Errorf("error killing kubectl process: %w", err)
}
return "", errors.New("timeout waiting for kubectl version")
case err := <-done:

View File

@ -64,7 +64,10 @@ $ velero completion fish > ~/.config/fish/completions/velero.fish
shell := args[0]
switch shell {
case "bash":
cmd.Root().GenBashCompletion(os.Stdout)
if err := cmd.Root().GenBashCompletion(os.Stdout); err != nil {
fmt.Println("fail to generate bash completion script", err)
os.Exit(1)
}
case "zsh":
// # fix #4912
// cobra does not support zsh completion ouptput used by source command
@ -72,11 +75,20 @@ $ velero completion fish > ~/.config/fish/completions/velero.fish
// Need to append compdef manually to do that.
zshHead := "#compdef velero\ncompdef _velero velero\n"
out := os.Stdout
out.Write([]byte(zshHead))
if _, err := out.Write([]byte(zshHead)); err != nil {
fmt.Println("fail to append compdef command into zsh completion script: ", err)
os.Exit(1)
}
cmd.Root().GenZshCompletion(out)
if err := cmd.Root().GenZshCompletion(out); err != nil {
fmt.Println("fail to generate zsh completion script: ", err)
os.Exit(1)
}
case "fish":
cmd.Root().GenFishCompletion(os.Stdout, true)
if err := cmd.Root().GenFishCompletion(os.Stdout, true); err != nil {
fmt.Println("fail to generate fish completion script: ", err)
os.Exit(1)
}
default:
fmt.Println("Invalid shell specified, specify bash, zsh, or fish")
os.Exit(1)

View File

@ -121,9 +121,18 @@ func newNodeAgentServer(logger logrus.FieldLogger, factory client.Factory, metri
ctrl.SetLogger(zap.New(zap.UseDevMode(true)))
velerov1api.AddToScheme(scheme)
v1.AddToScheme(scheme)
storagev1api.AddToScheme(scheme)
if err := velerov1api.AddToScheme(scheme); err != nil {
cancelFunc()
return nil, err
}
if err := v1.AddToScheme(scheme); err != nil {
cancelFunc()
return nil, err
}
if err := storagev1api.AddToScheme(scheme); err != nil {
cancelFunc()
return nil, err
}
nodeName := os.Getenv("NODE_NAME")

View File

@ -313,9 +313,18 @@ func newServer(f client.Factory, config serverConfig, logger *logrus.Logger) (*s
}
scheme := runtime.NewScheme()
velerov1api.AddToScheme(scheme)
corev1api.AddToScheme(scheme)
snapshotv1api.AddToScheme(scheme)
if err := velerov1api.AddToScheme(scheme); err != nil {
cancelFunc()
return nil, err
}
if err := corev1api.AddToScheme(scheme); err != nil {
cancelFunc()
return nil, err
}
if err := snapshotv1api.AddToScheme(scheme); err != nil {
cancelFunc()
return nil, err
}
ctrl.SetLogger(logrusr.New(logger))

View File

@ -61,7 +61,9 @@ func ClearOutputFlagDefault(cmd *cobra.Command) {
return
}
f.DefValue = ""
f.Value.Set("")
if err := f.Value.Set(""); err != nil {
fmt.Printf("error clear the default value of output flag: %s\n", err.Error())
}
}
// GetOutputFlagValue returns the value of the "output" flag

View File

@ -561,8 +561,7 @@ func (b *backupReconciler) validateAndGetSnapshotLocations(backup *velerov1api.B
continue
}
location := &velerov1api.VolumeSnapshotLocation{}
b.kbClient.Get(context.Background(), kbclient.ObjectKey{Namespace: backup.Namespace, Name: defaultLocation}, location)
if err != nil {
if err := b.kbClient.Get(context.Background(), kbclient.ObjectKey{Namespace: backup.Namespace, Name: defaultLocation}, location); err != nil {
errors = append(errors, fmt.Sprintf("error getting volume snapshot location named %s: %v", defaultLocation, err))
continue
}

View File

@ -96,7 +96,9 @@ func (r *BackupRepoReconciler) invalidateBackupReposForBSL(bslObj client.Object)
for i := range list.Items {
r.logger.WithField("BSL", bsl.Name).Infof("Invalidating Backup Repository %s", list.Items[i].Name)
r.patchBackupRepository(context.Background(), &list.Items[i], repoNotReady("re-establish on BSL change"))
if err := r.patchBackupRepository(context.Background(), &list.Items[i], repoNotReady("re-establish on BSL change")); err != nil {
r.logger.WithField("BSL", bsl.Name).WithError(err).Errorf("fail to patch BackupRepository %s", list.Items[i].Name)
}
}
return []reconcile.Request{}

View File

@ -18,6 +18,7 @@ package controller
import (
"context"
"fmt"
"time"
snapshotv1api "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1"
@ -397,7 +398,10 @@ func backupSyncSourceOrderFunc(objList client.ObjectList) client.ObjectList {
cpBsl := bsl
bslArray = append(bslArray, &cpBsl)
}
meta.SetList(resultBSLList, bslArray)
if err := meta.SetList(resultBSLList, bslArray); err != nil {
fmt.Printf("fail to sort BSL list: %s", err.Error())
return &velerov1api.BackupStorageLocationList{}
}
return resultBSLList
}

View File

@ -305,10 +305,7 @@ func (r *restoreReconciler) validateAndComplete(restore *api.Restore) backupInfo
}))
backupList := &api.BackupList{}
r.kbClient.List(context.Background(), backupList, &client.ListOptions{
LabelSelector: selector,
})
if err != nil {
if err := r.kbClient.List(context.Background(), backupList, &client.ListOptions{LabelSelector: selector}); err != nil {
restore.Status.ValidationErrors = append(restore.Status.ValidationErrors, "Unable to list backups for schedule")
return backupInfo{}
}

View File

@ -17,6 +17,7 @@ limitations under the License.
package install
import (
"fmt"
"time"
corev1 "k8s.io/api/core/v1"
@ -250,7 +251,9 @@ func AllCRDs() *unstructured.UnstructuredList {
for _, crd := range v1crds.CRDs {
crd.SetLabels(Labels())
appendUnstructured(resources, crd)
if err := appendUnstructured(resources, crd); err != nil {
fmt.Printf("error appending CRD %s: %s\n", crd.GetName(), err.Error())
}
}
return resources
@ -262,32 +265,44 @@ func AllResources(o *VeleroOptions) *unstructured.UnstructuredList {
resources := AllCRDs()
ns := Namespace(o.Namespace)
appendUnstructured(resources, ns)
if err := appendUnstructured(resources, ns); err != nil {
fmt.Printf("error appending Namespace %s: %s\n", ns.GetName(), err.Error())
}
serviceAccountName := defaultServiceAccountName
if o.ServiceAccountName == "" {
crb := ClusterRoleBinding(o.Namespace)
appendUnstructured(resources, crb)
if err := appendUnstructured(resources, crb); err != nil {
fmt.Printf("error appending ClusterRoleBinding %s: %s\n", crb.GetName(), err.Error())
}
sa := ServiceAccount(o.Namespace, o.ServiceAccountAnnotations)
appendUnstructured(resources, sa)
if err := appendUnstructured(resources, sa); err != nil {
fmt.Printf("error appending ServiceAccount %s: %s\n", sa.GetName(), err.Error())
}
} else {
serviceAccountName = o.ServiceAccountName
}
if o.SecretData != nil {
sec := Secret(o.Namespace, o.SecretData)
appendUnstructured(resources, sec)
if err := appendUnstructured(resources, sec); err != nil {
fmt.Printf("error appending Secret %s: %s\n", sec.GetName(), err.Error())
}
}
if !o.NoDefaultBackupLocation {
bsl := BackupStorageLocation(o.Namespace, o.ProviderName, o.Bucket, o.Prefix, o.BSLConfig, o.CACertData)
appendUnstructured(resources, bsl)
if err := appendUnstructured(resources, bsl); err != nil {
fmt.Printf("error appending BackupStorageLocation %s: %s\n", bsl.GetName(), err.Error())
}
}
// A snapshot location may not be desirable for users relying on pod volume backup/restore
if o.UseVolumeSnapshots {
vsl := VolumeSnapshotLocation(o.Namespace, o.ProviderName, o.VSLConfig)
appendUnstructured(resources, vsl)
if err := appendUnstructured(resources, vsl); err != nil {
fmt.Printf("error appending VolumeSnapshotLocation %s: %s\n", vsl.GetName(), err.Error())
}
}
secretPresent := o.SecretData != nil
@ -322,7 +337,9 @@ func AllResources(o *VeleroOptions) *unstructured.UnstructuredList {
deploy := Deployment(o.Namespace, deployOpts...)
appendUnstructured(resources, deploy)
if err := appendUnstructured(resources, deploy); err != nil {
fmt.Printf("error appending Deployment %s: %s\n", deploy.GetName(), err.Error())
}
if o.UseNodeAgent {
dsOpts := []podTemplateOption{
@ -337,7 +354,9 @@ func AllResources(o *VeleroOptions) *unstructured.UnstructuredList {
dsOpts = append(dsOpts, WithFeatures(o.Features))
}
ds := DaemonSet(o.Namespace, dsOpts...)
appendUnstructured(resources, ds)
if err := appendUnstructured(resources, ds); err != nil {
fmt.Printf("error appending DaemonSet %s: %s\n", ds.GetName(), err.Error())
}
}
return resources

View File

@ -87,7 +87,9 @@ func (c *ObjectStoreGRPCClient) PutObject(bucket, key string, body io.Reader) er
return nil
}
if err != nil {
stream.CloseSend()
if err := stream.CloseSend(); err != nil {
return common.FromGRPCError(err)
}
return errors.WithStack(err)
}

View File

@ -232,7 +232,10 @@ func getNames(command string, kind common.PluginKind, plugin Interface) []Plugin
func (s *server) Serve() {
if s.flagSet != nil && !s.flagSet.Parsed() {
s.log.Debugf("Parsing flags")
s.flagSet.Parse(os.Args[1:])
if err := s.flagSet.Parse(os.Args[1:]); err != nil {
s.log.Errorf("fail to parse the flags: %s", err.Error())
return
}
}
s.log.Level = s.logLevelFlag.Parse()

View File

@ -401,7 +401,11 @@ func (ctx *restoreContext) execute() (results.Result, results.Result) {
errs.AddVeleroError(err)
return warnings, errs
}
defer ctx.fileSystem.RemoveAll(dir)
defer func() {
if err := ctx.fileSystem.RemoveAll(dir); err != nil {
ctx.log.Errorf("error removing temporary directory %s: %s", dir, err.Error())
}
}()
// Need to set this for additionalItems to be restored.
ctx.restoreDir = dir

View File

@ -81,7 +81,7 @@ func (fs *FakeFileSystem) Stat(path string) (os.FileInfo, error) {
func (fs *FakeFileSystem) WithFile(path string, data []byte) *FakeFileSystem {
file, _ := fs.fs.Create(path)
file.Write(data)
_, _ = file.Write(data)
file.Close()
return fs
@ -89,14 +89,14 @@ func (fs *FakeFileSystem) WithFile(path string, data []byte) *FakeFileSystem {
func (fs *FakeFileSystem) WithFileAndMode(path string, data []byte, mode os.FileMode) *FakeFileSystem {
file, _ := fs.fs.OpenFile(path, os.O_CREATE|os.O_RDWR, mode)
file.Write(data)
_, _ = file.Write(data)
file.Close()
return fs
}
func (fs *FakeFileSystem) WithDirectory(path string) *FakeFileSystem {
fs.fs.MkdirAll(path, 0755)
_ = fs.fs.MkdirAll(path, 0755)
return fs
}