Resolve linter issues.

Signed-off-by: Xun Jiang <jxun@vmware.com>
pull/7151/head
Xun Jiang 2023-11-28 15:23:13 +08:00
parent d70535b6d2
commit f5c159ce56
19 changed files with 31 additions and 22 deletions

View File

@ -0,0 +1 @@
Add more linters part 2.

View File

@ -56,7 +56,7 @@ RUN wget --quiet https://github.com/goreleaser/goreleaser/releases/download/v1.1
chmod +x /usr/bin/goreleaser
# get golangci-lint
RUN curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v1.51.0
RUN curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v1.54.2
# install kubectl
RUN curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl

View File

@ -84,7 +84,7 @@ func (e *Extractor) readBackup(tarRdr *tar.Reader) (string, error) {
return "", err
}
target := filepath.Join(dir, header.Name) //nolint:gosec
target := filepath.Join(dir, header.Name) //nolint:gosec // Internal usage. No need to check.
switch header.Typeflag {
case tar.TypeDir:

View File

@ -439,11 +439,11 @@ func (kb *kubernetesBackupper) BackupWithResolvers(log logrus.FieldLogger,
log.WithError(errors.WithStack((err))).Warn("Got error trying to update backup's status.progress and hook status")
}
var skippedPVSummary []byte
if skippedPVSummary, err = json.Marshal(backupRequest.SkippedPVTracker.Summary()); err != nil {
if skippedPVSummary, err := json.Marshal(backupRequest.SkippedPVTracker.Summary()); err != nil {
log.WithError(errors.WithStack(err)).Warn("Fail to generate skipped PV summary.")
} else {
log.Infof("Summary for skipped PVs: %s", skippedPVSummary)
}
log.Infof("Summary for skipped PVs: %s", skippedPVSummary)
backupRequest.Status.Progress = &velerov1api.BackupProgress{TotalItems: len(backupRequest.BackedUpItems), ItemsBackedUp: len(backupRequest.BackedUpItems)}
log.WithField("progress", "").Infof("Backed up a total of %d items", len(backupRequest.BackedUpItems))

View File

@ -29,7 +29,7 @@ type ServerStatusRequestBuilder struct {
object *velerov1api.ServerStatusRequest
}
// ForServerStatusRequest is the constructor for for a ServerStatusRequestBuilder.
// ForServerStatusRequest is the constructor for a ServerStatusRequestBuilder.
func ForServerStatusRequest(ns, name, resourceVersion string) *ServerStatusRequestBuilder {
return &ServerStatusRequestBuilder{
object: &velerov1api.ServerStatusRequest{

View File

@ -486,7 +486,7 @@ func (s *nodeAgentServer) getDataPathConcurrentNum(defaultNum int) int {
concurrentNum := math.MaxInt32
for _, rule := range configs.DataPathConcurrency.PerNodeConfig {
selector, err := metav1.LabelSelectorAsSelector(&rule.NodeSelector)
selector, err := metav1.LabelSelectorAsSelector(&(rule.NodeSelector))
if err != nil {
s.logger.WithError(err).Warnf("Failed to parse rule with label selector %s, skip it", rule.NodeSelector.String())
continue

View File

@ -248,7 +248,7 @@ type server struct {
discoveryHelper velerodiscovery.Helper
dynamicClient dynamic.Interface
// controller-runtime client. the difference from the controller-manager's client
// is that the the controller-manager's client is limited to list namespaced-scoped
// is that the controller-manager's client is limited to list namespaced-scoped
// resources in the namespace where Velero is installed, or the cluster-scoped
// resources. The crClient doesn't have the limitation.
crClient ctrlclient.Client

View File

@ -111,7 +111,7 @@ func Stream(ctx context.Context, kbClient kbclient.Client, namespace, name strin
httpClient := new(http.Client)
httpClient.Transport = &http.Transport{
TLSClientConfig: &tls.Config{
InsecureSkipVerify: insecureSkipTLSVerify, //nolint:gosec
InsecureSkipVerify: insecureSkipTLSVerify, //nolint:gosec // This parameter is useful for some scenarios.
RootCAs: caPool,
},
IdleConnTimeout: timeout,

View File

@ -164,6 +164,11 @@ func (d *StructuredDescriber) JSONEncode() string {
encoder := json.NewEncoder(byteBuffer)
encoder.SetEscapeHTML(false)
encoder.SetIndent("", " ")
encoder.Encode(d.output)
err := encoder.Encode(d.output)
if err != nil {
fmt.Printf("fail to encode %s", err.Error())
return ""
}
return byteBuffer.String()
}

View File

@ -1080,7 +1080,7 @@ func generateVolumeInfoForCSIVolumeSnapshot(backup *pkgbackup.Request, csiVolume
SnapshotDataMoved: false,
PreserveLocalSnapshot: true,
OperationID: operation.Spec.OperationID,
StartTimestamp: &volumeSnapshot.CreationTimestamp,
StartTimestamp: &(volumeSnapshot.CreationTimestamp),
CSISnapshotInfo: volume.CSISnapshotInfo{
VSCName: *volumeSnapshot.Status.BoundVolumeSnapshotContentName,
Size: size,

View File

@ -546,7 +546,8 @@ func (r *backupDeletionReconciler) deleteMovedSnapshots(ctx context.Context, bac
snapshot := repository.SnapshotIdentifier{}
b, err := json.Marshal(cm.Data)
if err != nil {
r.logger.WithError(err).Infof("Fail to encode JSON: %v", cm.Data)
errs = append(errs, errors.Wrapf(err, "fail to marshal the snapshot info into JSON"))
continue
}
if err := json.Unmarshal(b, &snapshot); err != nil {
errs = append(errs, errors.Wrapf(err, "failed to unmarshal snapshot info"))

View File

@ -303,7 +303,7 @@ func (c *PodVolumeRestoreReconciler) OnDataPathCompleted(ctx context.Context, na
// Write a done file with name=<restore-uid> into the just-created .velero dir
// within the volume. The velero init container on the pod is waiting
// for this file to exist in each restored volume before completing.
if err := os.WriteFile(filepath.Join(volumePath, ".velero", string(restoreUID)), nil, 0644); err != nil { //nolint:gosec
if err := os.WriteFile(filepath.Join(volumePath, ".velero", string(restoreUID)), nil, 0644); err != nil { //nolint:gosec // Internal usage. No need to check.
_, _ = c.errorOut(ctx, &pvr, err, "error writing done file", log)
return
}

View File

@ -63,7 +63,9 @@ func genConfigmap(bak *velerov1.Backup, du velerov2alpha1.DataUpload) *corev1api
return nil
}
data := make(map[string]string)
json.Unmarshal(b, &data)
if err := json.Unmarshal(b, &data); err != nil {
return nil
}
return &corev1api.ConfigMap{
TypeMeta: metav1.TypeMeta{
APIVersion: corev1api.SchemeGroupVersion.String(),

View File

@ -69,7 +69,7 @@ const (
// data mover metrics
DataUploadSuccessTotal = "data_upload_success_total"
DataUploadFailureTotal = "data_upload_failure_total"
DataUploadCancelTotal = "data_upload_cancel_total"
DataUploadCancelTotal = "data_upload_cancel_total" //nolint:gosec // Not a hard code secret.
DataDownloadSuccessTotal = "data_download_success_total"
DataDownloadFailureTotal = "data_download_failure_total"
DataDownloadCancelTotal = "data_download_cancel_total"

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
//nolint:gosec
//nolint:gosec // Internal usage. No need to check.
package config
import (

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
//nolint:gosec
//nolint:gosec // Internal usage. No need to check.
package config
import "os"

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
//nolint:gosec
//nolint:gosec // Internal call. No need to check.
package keys
import (

View File

@ -1900,7 +1900,7 @@ func shouldRenamePV(ctx *restoreContext, obj *unstructured.Unstructured, client
// remapClaimRefNS remaps a PersistentVolume's claimRef.Namespace based on a
// restore's NamespaceMappings, if necessary. Returns true if the namespace was
// remapped, false if it was not required.
func remapClaimRefNS(ctx *restoreContext, obj *unstructured.Unstructured) (bool, error) { //nolint:unparam
func remapClaimRefNS(ctx *restoreContext, obj *unstructured.Unstructured) (bool, error) { //nolint:unparam // ignore the result 0 (bool) is never used warning.
if len(ctx.restore.Spec.NamespaceMapping) == 0 {
ctx.log.Debug("Persistent volume does not need to have the claimRef.namespace remapped because restore is not remapping any namespaces")
return false, nil
@ -2315,7 +2315,7 @@ func (ctx *restoreContext) getOrderedResourceCollection(
// getSelectedRestoreableItems applies Kubernetes selectors on individual items
// of each resource type to create a list of items which will be actually
// restored.
func (ctx *restoreContext) getSelectedRestoreableItems(resource string, namespaceMapping map[string]string, originalNamespace string, items []string) (restoreableResource, results.Result, results.Result) {
func (ctx *restoreContext) getSelectedRestoreableItems(resource string, namespaceMapping map[string]string, originalNamespace string, items []string) (restoreableResource, results.Result, results.Result) { //nolint:unparam // Ignore the warnings is always nil warning.
warnings, errs := results.Result{}, results.Result{}
restorable := restoreableResource{
@ -2430,7 +2430,7 @@ func removeRestoreLabels(obj metav1.Object) {
}
// updates the backup/restore labels
func (ctx *restoreContext) updateBackupRestoreLabels(fromCluster, fromClusterWithLabels *unstructured.Unstructured, namespace string, resourceClient client.Dynamic) (warnings, errs results.Result) {
func (ctx *restoreContext) updateBackupRestoreLabels(fromCluster, fromClusterWithLabels *unstructured.Unstructured, namespace string, resourceClient client.Dynamic) (warnings, errs results.Result) { //nolint:unparam // Ignore the warnings is nil warning.
patchBytes, err := generatePatch(fromCluster, fromClusterWithLabels)
if err != nil {
ctx.log.Errorf("error generating patch for %s %s: %v", fromCluster.GroupVersionKind().Kind, kube.NamespaceAndName(fromCluster), err)

View File

@ -33,7 +33,7 @@ type Throttle struct {
func (t *Throttle) ShouldOutput() bool {
nextOutputTimeUnixNano := atomic.LoadInt64(&t.throttle)
if nowNano := time.Now().UnixNano(); nowNano > nextOutputTimeUnixNano { //nolint:forbidigo
if nowNano := time.Now().UnixNano(); nowNano > nextOutputTimeUnixNano {
if atomic.CompareAndSwapInt64(&t.throttle, nextOutputTimeUnixNano, nowNano+t.interval.Nanoseconds()) {
return true
}