Merge pull request #6194 from blackpiglet/linter_more
Enable more linters, and remove mal-functioned milestoned issue action.pull/6208/head
commit
a8a17d725a
|
@ -1,18 +0,0 @@
|
|||
name: Add issues with a milestone to the milestone's board
|
||||
|
||||
on:
|
||||
issues:
|
||||
types: [milestoned]
|
||||
|
||||
jobs:
|
||||
automate-project-columns:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: alex-page/github-project-automation-plus@v0.3.0
|
||||
with:
|
||||
# Do NOT add PRs to the board, as that's duplication. Their corresponding issue should be on the board.
|
||||
if: ${{ !github.event.issue.pull_request }}
|
||||
project: "${{ github.event.issue.milestone.title }}"
|
||||
column: "To Do"
|
||||
repo-token: ${{ secrets.GH_TOKEN }}
|
||||
|
|
@ -0,0 +1 @@
|
|||
Enable more linters, and remove mal-functioned milestoned issue action.
|
|
@ -255,8 +255,6 @@ linters-settings:
|
|||
rowserrcheck:
|
||||
packages:
|
||||
- github.com/jmoiron/sqlx
|
||||
staticcheck:
|
||||
|
||||
testpackage:
|
||||
# regexp pattern to skip files
|
||||
skip-regexp: (export|internal)_test\.go
|
||||
|
@ -298,18 +296,31 @@ linters-settings:
|
|||
linters:
|
||||
disable-all: true
|
||||
enable:
|
||||
- asasalint
|
||||
- asciicheck
|
||||
- bidichk
|
||||
- bodyclose
|
||||
- dogsled
|
||||
- durationcheck
|
||||
- exportloopref
|
||||
- goconst
|
||||
- gofmt
|
||||
- goheader
|
||||
- goimports
|
||||
- goprintffuncname
|
||||
- gosec
|
||||
- gosimple
|
||||
- govet
|
||||
- importas
|
||||
- ineffassign
|
||||
- misspell
|
||||
- nakedret
|
||||
- nosprintfhostport
|
||||
- staticcheck
|
||||
- stylecheck
|
||||
- revive
|
||||
- typecheck
|
||||
- unconvert
|
||||
- unparam
|
||||
- unused
|
||||
- usestdlibvars
|
||||
|
|
|
@ -89,7 +89,7 @@ type FileForArchive struct {
|
|||
func (ib *itemBackupper) backupItem(logger logrus.FieldLogger, obj runtime.Unstructured, groupResource schema.GroupResource, preferredGVR schema.GroupVersionResource, mustInclude, finalize bool) (bool, []FileForArchive, error) {
|
||||
selectedForBackup, files, err := ib.backupItemInternal(logger, obj, groupResource, preferredGVR, mustInclude, finalize)
|
||||
// return if not selected, an error occurred, there are no files to add, or for finalize
|
||||
if selectedForBackup == false || err != nil || len(files) == 0 || finalize {
|
||||
if !selectedForBackup || err != nil || len(files) == 0 || finalize {
|
||||
return selectedForBackup, files, err
|
||||
}
|
||||
for _, file := range files {
|
||||
|
@ -340,9 +340,7 @@ func (ib *itemBackupper) executeActions(
|
|||
mustInclude := u.GetAnnotations()[mustIncludeAdditionalItemAnnotation] == "true" || finalize
|
||||
// remove the annotation as it's for communication between BIA and velero server,
|
||||
// we don't want the resource be restored with this annotation.
|
||||
if _, ok := u.GetAnnotations()[mustIncludeAdditionalItemAnnotation]; ok {
|
||||
delete(u.GetAnnotations(), mustIncludeAdditionalItemAnnotation)
|
||||
}
|
||||
delete(u.GetAnnotations(), mustIncludeAdditionalItemAnnotation)
|
||||
obj = u
|
||||
|
||||
// If async plugin started async operation, add it to the ItemOperations list
|
||||
|
|
|
@ -83,7 +83,7 @@ func (b *BackupBuilder) ObjectMeta(opts ...ObjectMetaOpt) *BackupBuilder {
|
|||
|
||||
// FromSchedule sets the Backup's spec and labels from the Schedule template
|
||||
func (b *BackupBuilder) FromSchedule(schedule *velerov1api.Schedule) *BackupBuilder {
|
||||
var labels = make(map[string]string)
|
||||
var labels map[string]string
|
||||
|
||||
// Check if there's explicit Labels defined in the Schedule object template
|
||||
// and if present then copy it to the backup object.
|
||||
|
|
|
@ -617,7 +617,7 @@ func (s *server) getCSIVolumeSnapshotListers() (vsLister snapshotv1listers.Volum
|
|||
s.logger.Errorf("fail to find snapshot v1 schema: %s", err)
|
||||
}
|
||||
|
||||
return
|
||||
return vsLister, err
|
||||
}
|
||||
|
||||
func (s *server) runControllers(defaultVolumeSnapshotLocations map[string]string) error {
|
||||
|
|
|
@ -149,71 +149,71 @@ func printTable(cmd *cobra.Command, obj runtime.Object) (bool, error) {
|
|||
// 1. generate table
|
||||
var table *metav1.Table
|
||||
|
||||
switch obj.(type) {
|
||||
switch objType := obj.(type) {
|
||||
case *velerov1api.Backup:
|
||||
table = &metav1.Table{
|
||||
ColumnDefinitions: backupColumns,
|
||||
Rows: printBackup(obj.(*velerov1api.Backup)),
|
||||
Rows: printBackup(objType),
|
||||
}
|
||||
case *velerov1api.BackupList:
|
||||
table = &metav1.Table{
|
||||
ColumnDefinitions: backupColumns,
|
||||
Rows: printBackupList(obj.(*velerov1api.BackupList)),
|
||||
Rows: printBackupList(objType),
|
||||
}
|
||||
case *velerov1api.Restore:
|
||||
table = &metav1.Table{
|
||||
ColumnDefinitions: restoreColumns,
|
||||
Rows: printRestore(obj.(*velerov1api.Restore)),
|
||||
Rows: printRestore(objType),
|
||||
}
|
||||
case *velerov1api.RestoreList:
|
||||
table = &metav1.Table{
|
||||
ColumnDefinitions: restoreColumns,
|
||||
Rows: printRestoreList(obj.(*velerov1api.RestoreList)),
|
||||
Rows: printRestoreList(objType),
|
||||
}
|
||||
case *velerov1api.Schedule:
|
||||
table = &metav1.Table{
|
||||
ColumnDefinitions: scheduleColumns,
|
||||
Rows: printSchedule(obj.(*velerov1api.Schedule)),
|
||||
Rows: printSchedule(objType),
|
||||
}
|
||||
case *velerov1api.ScheduleList:
|
||||
table = &metav1.Table{
|
||||
ColumnDefinitions: scheduleColumns,
|
||||
Rows: printScheduleList(obj.(*velerov1api.ScheduleList)),
|
||||
Rows: printScheduleList(objType),
|
||||
}
|
||||
case *velerov1api.BackupRepository:
|
||||
table = &metav1.Table{
|
||||
ColumnDefinitions: backupRepoColumns,
|
||||
Rows: printBackupRepo(obj.(*velerov1api.BackupRepository)),
|
||||
Rows: printBackupRepo(objType),
|
||||
}
|
||||
case *velerov1api.BackupRepositoryList:
|
||||
table = &metav1.Table{
|
||||
ColumnDefinitions: backupRepoColumns,
|
||||
Rows: printBackupRepoList(obj.(*velerov1api.BackupRepositoryList)),
|
||||
Rows: printBackupRepoList(objType),
|
||||
}
|
||||
case *velerov1api.BackupStorageLocation:
|
||||
table = &metav1.Table{
|
||||
ColumnDefinitions: backupStorageLocationColumns,
|
||||
Rows: printBackupStorageLocation(obj.(*velerov1api.BackupStorageLocation)),
|
||||
Rows: printBackupStorageLocation(objType),
|
||||
}
|
||||
case *velerov1api.BackupStorageLocationList:
|
||||
table = &metav1.Table{
|
||||
ColumnDefinitions: backupStorageLocationColumns,
|
||||
Rows: printBackupStorageLocationList(obj.(*velerov1api.BackupStorageLocationList)),
|
||||
Rows: printBackupStorageLocationList(objType),
|
||||
}
|
||||
case *velerov1api.VolumeSnapshotLocation:
|
||||
table = &metav1.Table{
|
||||
ColumnDefinitions: volumeSnapshotLocationColumns,
|
||||
Rows: printVolumeSnapshotLocation(obj.(*velerov1api.VolumeSnapshotLocation)),
|
||||
Rows: printVolumeSnapshotLocation(objType),
|
||||
}
|
||||
case *velerov1api.VolumeSnapshotLocationList:
|
||||
table = &metav1.Table{
|
||||
ColumnDefinitions: volumeSnapshotLocationColumns,
|
||||
Rows: printVolumeSnapshotLocationList(obj.(*velerov1api.VolumeSnapshotLocationList)),
|
||||
Rows: printVolumeSnapshotLocationList(objType),
|
||||
}
|
||||
case *velerov1api.ServerStatusRequest:
|
||||
table = &metav1.Table{
|
||||
ColumnDefinitions: pluginColumns,
|
||||
Rows: printPluginList(obj.(*velerov1api.ServerStatusRequest)),
|
||||
Rows: printPluginList(objType),
|
||||
}
|
||||
default:
|
||||
return false, errors.Errorf("type %T is not supported", obj)
|
||||
|
|
|
@ -463,7 +463,7 @@ func (b *backupReconciler) prepareBackupRequest(backup *velerov1api.Backup, logg
|
|||
|
||||
// validate that only one exists orLabelSelector or just labelSelector (singular)
|
||||
if request.Spec.OrLabelSelectors != nil && request.Spec.LabelSelector != nil {
|
||||
request.Status.ValidationErrors = append(request.Status.ValidationErrors, fmt.Sprintf("encountered labelSelector as well as orLabelSelectors in backup spec, only one can be specified"))
|
||||
request.Status.ValidationErrors = append(request.Status.ValidationErrors, "encountered labelSelector as well as orLabelSelectors in backup spec, only one can be specified")
|
||||
}
|
||||
|
||||
if request.Spec.ResourcePolicy != nil && request.Spec.ResourcePolicy.Kind == resourcepolicies.ConfigmapRefType {
|
||||
|
|
|
@ -270,7 +270,7 @@ func (r *restoreReconciler) validateAndComplete(restore *api.Restore) backupInfo
|
|||
|
||||
// validate that only one exists orLabelSelector or just labelSelector (singular)
|
||||
if restore.Spec.OrLabelSelectors != nil && restore.Spec.LabelSelector != nil {
|
||||
restore.Status.ValidationErrors = append(restore.Status.ValidationErrors, fmt.Sprintf("encountered labelSelector as well as orLabelSelectors in restore spec, only one can be specified"))
|
||||
restore.Status.ValidationErrors = append(restore.Status.ValidationErrors, "encountered labelSelector as well as orLabelSelectors in restore spec, only one can be specified")
|
||||
}
|
||||
|
||||
// validate that exactly one of BackupName and ScheduleName have been specified
|
||||
|
|
|
@ -72,10 +72,7 @@ func (m *BackupItemOperationsMap) DeleteOperationsForBackup(backupName string) {
|
|||
// lock operations map
|
||||
m.opsLock.Lock()
|
||||
defer m.opsLock.Unlock()
|
||||
if _, ok := m.opsMap[backupName]; ok {
|
||||
delete(m.opsMap, backupName)
|
||||
}
|
||||
return
|
||||
delete(m.opsMap, backupName)
|
||||
}
|
||||
|
||||
// UploadProgressAndPutOperationsForBackup will upload the item operations for this backup to
|
||||
|
|
|
@ -72,10 +72,7 @@ func (m *RestoreItemOperationsMap) DeleteOperationsForRestore(restoreName string
|
|||
// lock operations map
|
||||
m.opsLock.Lock()
|
||||
defer m.opsLock.Unlock()
|
||||
if _, ok := m.opsMap[restoreName]; ok {
|
||||
delete(m.opsMap, restoreName)
|
||||
}
|
||||
return
|
||||
delete(m.opsMap, restoreName)
|
||||
}
|
||||
|
||||
// UploadProgressAndPutOperationsForRestore will upload the item operations for this restore to
|
||||
|
|
|
@ -161,7 +161,6 @@ func (l *logrusAdapter) StandardLogger(opts *hclog.StandardLoggerOptions) *log.L
|
|||
// Updates the level. This should affect all sub-loggers as well. If an
|
||||
// implementation cannot update the level on the fly, it should no-op.
|
||||
func (l *logrusAdapter) SetLevel(_ hclog.Level) {
|
||||
return
|
||||
}
|
||||
|
||||
// ImpliedArgs returns With key/value pairs
|
||||
|
|
|
@ -17,7 +17,6 @@ limitations under the License.
|
|||
package common
|
||||
|
||||
import (
|
||||
//lint:ignore SA1019 grpc-go still depends on github.com/golang/protobuf/proto
|
||||
goproto "github.com/golang/protobuf/proto"
|
||||
"github.com/pkg/errors"
|
||||
"google.golang.org/grpc/codes"
|
||||
|
|
|
@ -156,7 +156,7 @@ func decodeBackupStatusLine(lastLine []byte) (backupStatusLine, error) {
|
|||
// have a newline at the end of it, so this returns the substring between the
|
||||
// last two newlines.
|
||||
func getLastLine(b []byte) []byte {
|
||||
if b == nil || len(b) == 0 {
|
||||
if len(b) == 0 {
|
||||
return []byte("")
|
||||
}
|
||||
// subslice the byte array to ignore the newline at the end of the string
|
||||
|
|
|
@ -111,13 +111,13 @@ func deleteNodePorts(service *corev1api.Service) error {
|
|||
}
|
||||
if nodePortBool {
|
||||
nodePortInt := 0
|
||||
switch nodePort.(type) {
|
||||
switch nodePort := nodePort.(type) {
|
||||
case int32:
|
||||
nodePortInt = int(nodePort.(int32))
|
||||
nodePortInt = int(nodePort)
|
||||
case float64:
|
||||
nodePortInt = int(nodePort.(float64))
|
||||
nodePortInt = int(nodePort)
|
||||
case string:
|
||||
nodePortInt, err = strconv.Atoi(nodePort.(string))
|
||||
nodePortInt, err = strconv.Atoi(nodePort)
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
|
|
@ -72,11 +72,11 @@ func (tw *TarWriter) Add(name string, obj interface{}) *TarWriter {
|
|||
var data []byte
|
||||
var err error
|
||||
|
||||
switch obj.(type) {
|
||||
switch objType := obj.(type) {
|
||||
case runtime.Object:
|
||||
data, err = encode.Encode(obj.(runtime.Object), "json")
|
||||
data, err = encode.Encode(objType, "json")
|
||||
case []byte:
|
||||
data = obj.([]byte)
|
||||
data = objType
|
||||
default:
|
||||
data, err = json.Marshal(obj)
|
||||
}
|
||||
|
|
|
@ -232,7 +232,7 @@ func reportSnapshotStatus(manifest *snapshot.Manifest, policyTree *policy.Tree)
|
|||
if ds := manifest.RootEntry.DirSummary; ds != nil {
|
||||
for _, ent := range ds.FailedEntries {
|
||||
policy := policyTree.DefinedPolicy()
|
||||
if !(policy != nil && *policy.ErrorHandlingPolicy.IgnoreUnknownTypes == true && strings.Contains(ent.Error, fs.ErrUnknown.Error())) {
|
||||
if !(policy != nil && bool(*policy.ErrorHandlingPolicy.IgnoreUnknownTypes) && strings.Contains(ent.Error, fs.ErrUnknown.Error())) {
|
||||
errs = append(errs, fmt.Sprintf("Error when processing %v: %v", ent.EntryPath, ent.Error))
|
||||
}
|
||||
}
|
||||
|
|
|
@ -18,12 +18,12 @@ package boolptr
|
|||
|
||||
// IsSetToTrue returns true if and only if the bool pointer is non-nil and set to true.
|
||||
func IsSetToTrue(b *bool) bool {
|
||||
return b != nil && *b == true
|
||||
return b != nil && *b
|
||||
}
|
||||
|
||||
// IsSetToFalse returns true if and only if the bool pointer is non-nil and set to false.
|
||||
func IsSetToFalse(b *bool) bool {
|
||||
return b != nil && *b == false
|
||||
return b != nil && !*b
|
||||
}
|
||||
|
||||
// True returns a *bool whose underlying value is true.
|
||||
|
|
|
@ -138,7 +138,7 @@ func (ie *GlobalIncludesExcludes) ShouldInclude(typeName string) bool {
|
|||
return false
|
||||
}
|
||||
|
||||
if resource.Namespaced == false && boolptr.IsSetToFalse(ie.includeClusterResources) {
|
||||
if !resource.Namespaced && boolptr.IsSetToFalse(ie.includeClusterResources) {
|
||||
ie.logger.Info("Skipping resource %s, because it's cluster-scoped, and IncludeClusterResources is set to false.", typeName)
|
||||
return false
|
||||
}
|
||||
|
@ -150,7 +150,7 @@ func (ie *GlobalIncludesExcludes) ShouldInclude(typeName string) bool {
|
|||
// may still be backed up if triggered by a custom action (e.g. PVC->PV).
|
||||
// If we're processing namespaces themselves, we will not skip here, they may be
|
||||
// filtered out later.
|
||||
if typeName != kuberesource.Namespaces.String() && resource.Namespaced == false &&
|
||||
if typeName != kuberesource.Namespaces.String() && !resource.Namespaced &&
|
||||
ie.includeClusterResources == nil && !ie.namespaceFilter.IncludeEverything() {
|
||||
ie.logger.Infof("Skipping resource %s, because it's cluster-scoped and only specific namespaces or namespace scope types are included in the backup.", typeName)
|
||||
return false
|
||||
|
|
Loading…
Reference in New Issue