Bump up golang version to 1.18.8

Signed-off-by: Ming <mqiu@vmware.com>
pull/5558/head
Ming 2022-11-07 03:02:00 +00:00
parent 3ca580b7d9
commit bd646b14b9
42 changed files with 115 additions and 121 deletions

View File

@ -14,7 +14,7 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v2
with:
go-version: 1.17
go-version: 1.18
id: go
# Look for a CLI that's made for this PR
- name: Fetch built CLI

View File

@ -14,7 +14,7 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v2
with:
go-version: 1.17
go-version: 1.18
id: go
# Look for a CLI that's made for this PR
- name: Fetch built CLI
@ -76,7 +76,7 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v2
with:
go-version: 1.17
go-version: 1.18
id: go
- name: Check out the code
uses: actions/checkout@v2

View File

@ -8,7 +8,7 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v2
with:
go-version: 1.17
go-version: 1.18
id: go
- name: Check out the code
uses: actions/checkout@v2

View File

@ -18,7 +18,7 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v2
with:
go-version: 1.17
go-version: 1.18
id: go
- name: Check out code into the Go module directory

View File

@ -11,7 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
FROM --platform=$BUILDPLATFORM golang:1.17.13 as builder-env
FROM --platform=$BUILDPLATFORM golang:1.18.8 as builder-env
ARG GOPROXY
ARG PKG

View File

@ -50,7 +50,7 @@ git_sha = str(local("git rev-parse HEAD", quiet = True, echo_off = True)).strip(
tilt_helper_dockerfile_header = """
# Tilt image
FROM golang:1.17 as tilt-helper
FROM golang:1.18 as tilt-helper
# Support live reloading with Tilt
RUN wget --output-document /restart.sh --quiet https://raw.githubusercontent.com/windmilleng/rerun-process-wrapper/master/restart.sh && \

View File

@ -14,7 +14,7 @@ https://velero.io/docs/v1.9/
https://velero.io/docs/v1.9/upgrade-to-1.9/
### All changes
* Bump up golang version to 1.18.8 (#5558, @qiuming-best)
* Enhance the restore priorities list to support specifying the low prioritized resources that need to be restored in the last (#5529, @ywk253100)
* Fix v1.9.3 CSI VolumeSnapshot status duplicate issue. (#5518, @blackpiglet)
* Bump up the distroless image to the latest version (#5500, @ywk253100)

2
go.mod
View File

@ -1,6 +1,6 @@
module github.com/vmware-tanzu/velero
go 1.17
go 1.18
require (
cloud.google.com/go/storage v1.10.0

10
go.sum
View File

@ -142,7 +142,6 @@ github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx2
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
@ -190,7 +189,6 @@ github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25Kn
github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153 h1:yUdfgN0XgIJw7foRItutHYUIhlcKzcSf5vDpdhQAKTc=
github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
github.com/emicklei/go-restful v2.9.5+incompatible h1:spTtZBk5DYEvbxMVutUuTyh1Ao2r4iyvLdACqsl/Ljk=
github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
github.com/emicklei/go-restful/v3 v3.8.0 h1:eCZ8ulSerjdAiaNpF7GxXIE7ZCMo1moN1qX+S609eVw=
github.com/emicklei/go-restful/v3 v3.8.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
@ -416,7 +414,6 @@ github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk=
github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg=
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
@ -731,7 +728,6 @@ go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA=
go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4=
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
@ -967,8 +963,6 @@ golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220614162138-6c1b26c55098 h1:PgOr27OhUx2IRqGJ2RxAWI4dJQ7bi9cSrB82uzFzfUA=
golang.org/x/sys v0.0.0-20220614162138-6c1b26c55098/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
@ -1258,7 +1252,6 @@ k8s.io/api v0.22.2/go.mod h1:y3ydYpLJAaDI+BbSe2xmGcqxiWHmWjkEeIbiwHvnPR8=
k8s.io/api v0.24.0/go.mod h1:5Jl90IUrJHUJYEMANRURMiVvJ0g7Ax7r3R1bqO8zx8I=
k8s.io/api v0.24.1 h1:BjCMRDcyEYz03joa3K1+rbshwh1Ay6oB53+iUx2H8UY=
k8s.io/api v0.24.1/go.mod h1:JhoOvNiLXKTPQ60zh2g0ewpA+bnEYf5q44Flhquh4vQ=
k8s.io/apiextensions-apiserver v0.24.0/go.mod h1:iuVe4aEpe6827lvO6yWQVxiPSpPoSKVjkq+MIdg84cM=
k8s.io/apiextensions-apiserver v0.24.1 h1:5yBh9+ueTq/kfnHQZa0MAo6uNcPrtxPMpNQgorBaKS0=
k8s.io/apiextensions-apiserver v0.24.1/go.mod h1:A6MHfaLDGfjOc/We2nM7uewD5Oa/FnEbZ6cD7g2ca4Q=
k8s.io/apimachinery v0.19.0/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA=
@ -1268,7 +1261,6 @@ k8s.io/apimachinery v0.24.0/go.mod h1:82Bi4sCzVBdpYjyI4jY6aHX+YCUchUIrZrXKedjd2U
k8s.io/apimachinery v0.24.1 h1:ShD4aDxTQKN5zNf8K1RQ2u98ELLdIW7jEnlO9uAMX/I=
k8s.io/apimachinery v0.24.1/go.mod h1:82Bi4sCzVBdpYjyI4jY6aHX+YCUchUIrZrXKedjd2UM=
k8s.io/apiserver v0.19.12/go.mod h1:ldZAZTNIKfMMv/UUEhk6UyTXC0/34iRdNFHo+MJOPc4=
k8s.io/apiserver v0.24.0/go.mod h1:WFx2yiOMawnogNToVvUYT9nn1jaIkMKj41ZYCVycsBA=
k8s.io/apiserver v0.24.1/go.mod h1:dQWNMx15S8NqJMp0gpYfssyvhYnkilc1LpExd/dkLh0=
k8s.io/cli-runtime v0.22.2/go.mod h1:tkm2YeORFpbgQHEK/igqttvPTRIHFRz5kATlw53zlMI=
k8s.io/cli-runtime v0.24.0 h1:ot3Qf49T852uEyNApABO1UHHpFIckKK/NqpheZYN2gM=
@ -1281,10 +1273,8 @@ k8s.io/client-go v0.24.1 h1:w1hNdI9PFrzu3OlovVeTnf4oHDt+FJLd9Ndluvnb42E=
k8s.io/client-go v0.24.1/go.mod h1:f1kIDqcEYmwXS/vTbbhopMUbhKp2JhOeVTfxgaCIlF8=
k8s.io/code-generator v0.19.0/go.mod h1:moqLn7w0t9cMs4+5CQyxnfA/HV8MF6aAVENF+WZZhgk=
k8s.io/code-generator v0.19.12/go.mod h1:ADrDvaUQWGn4a8lX0ONtzb7uFmDRQOMSYIMk1qWIAx8=
k8s.io/code-generator v0.24.0/go.mod h1:dpVhs00hTuTdTY6jvVxvTFCk6gSMrtfRydbhZwHI15w=
k8s.io/code-generator v0.24.1/go.mod h1:dpVhs00hTuTdTY6jvVxvTFCk6gSMrtfRydbhZwHI15w=
k8s.io/component-base v0.19.12/go.mod h1:tpwExE0sY3A7CwtlxGL7SnQOdQfUlnFybT6GmAD+z/s=
k8s.io/component-base v0.24.0/go.mod h1:Dgazgon0i7KYUsS8krG8muGiMVtUZxG037l1MKyXgrA=
k8s.io/component-base v0.24.1 h1:APv6W/YmfOWZfo+XJ1mZwep/f7g7Tpwvdbo9CQLDuts=
k8s.io/component-base v0.24.1/go.mod h1:DW5vQGYVCog8WYpNob3PMmmsY8A3L9QZNg4j/dV3s38=
k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=

View File

@ -25,8 +25,8 @@ run:
# from this option's value (see skip-dirs-use-default).
# "/" will be replaced by current OS file path separator to properly work
# on Windows.
#skip-dirs:
# - src/external_libs
skip-dirs:
- test/e2e/*
# - autogenerated_by_my_lib
# default is true. Enables skipping of directories:
@ -39,8 +39,8 @@ run:
# autogenerated files. If it's not please let us know.
# "/" will be replaced by current OS file path separator to properly work
# on Windows.
# skip-files:
# - ".*\\.my\\.go$"
skip-files:
- ".*_test.go$"
# - lib/bad.go
# by default isn't set. If set we pass it to "go list -mod={option}". From "go help modules":
@ -117,7 +117,7 @@ linters-settings:
# minimal length of string constant, 3 by default
min-len: 3
# minimal occurrences count to trigger, 3 by default
min-occurrences: 3
min-occurrences: 5
gocritic:
# Which checks should be enabled; can't be combined with 'disabled-checks';
# See https://go-critic.github.io/overview#checks-overview
@ -320,7 +320,7 @@ linters:
fast: false
#issues:
issues:
# # List of regexps of issue texts to exclude, empty list by default.
# # But independently from this option we use default exclude patterns,
# # it can be disabled by `exclude-use-default: false`. To list all
@ -359,7 +359,7 @@ linters:
# it can be disabled by this option. To list all
# excluded by default patterns execute `golangci-lint run --help`.
# Default value for this option is true.
exclude-use-default: false
exclude-use-default: true
# The default value is false. If set to true exclude and exclude-rules
# regular expressions become case sensitive.

View File

@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
FROM golang:1.17.13
FROM golang:1.18.8
ARG GOPROXY
@ -36,11 +36,11 @@ RUN wget --quiet https://github.com/kubernetes-sigs/kubebuilder/releases/downloa
chmod +x /usr/local/kubebuilder/bin/kubebuilder
# get controller-tools
RUN go get sigs.k8s.io/controller-tools/cmd/controller-gen@v0.7.0
RUN go install sigs.k8s.io/controller-tools/cmd/controller-gen@v0.7.0
# get goimports (the revision is pinned so we don't indiscriminately update, but the particular commit
# is not important)
RUN go get golang.org/x/tools/cmd/goimports@11e9d9cc0042e6bd10337d4d2c3e5d9295508e7d
RUN go install golang.org/x/tools/cmd/goimports@11e9d9cc0042e6bd10337d4d2c3e5d9295508e7d
# get protoc compiler and golang plugin
WORKDIR /root
@ -49,7 +49,7 @@ RUN wget --quiet https://github.com/protocolbuffers/protobuf/releases/download/v
unzip protoc-3.9.1-linux-x86_64.zip && \
mv bin/protoc /usr/bin/protoc && \
chmod +x /usr/bin/protoc
RUN go get github.com/golang/protobuf/protoc-gen-go@v1.0.0
RUN go install github.com/golang/protobuf/protoc-gen-go@v1.0.0
# get goreleaser
RUN wget --quiet https://github.com/goreleaser/goreleaser/releases/download/v0.120.8/goreleaser_Linux_x86_64.tar.gz && \
@ -58,7 +58,7 @@ RUN wget --quiet https://github.com/goreleaser/goreleaser/releases/download/v0.1
chmod +x /usr/bin/goreleaser
# get golangci-lint
RUN curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v1.27.0
RUN curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v1.45.0
# install kubectl
RUN curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl

View File

@ -84,7 +84,7 @@ func (e *Extractor) readBackup(tarRdr *tar.Reader) (string, error) {
return "", err
}
target := filepath.Join(dir, header.Name)
target := filepath.Join(dir, header.Name) //nolint:gosec
switch header.Typeflag {
case tar.TypeDir:

View File

@ -151,7 +151,7 @@ func sortResourcesByOrder(log logrus.FieldLogger, items []*kubernetesResource, o
}
// getOrderedResourcesForType gets order of resourceType from orderResources.
func getOrderedResourcesForType(log logrus.FieldLogger, orderedResources map[string]string, resourceType string) []string {
func getOrderedResourcesForType(orderedResources map[string]string, resourceType string) []string {
if orderedResources == nil {
return nil
}
@ -175,7 +175,7 @@ func (r *itemCollector) getResourceItems(log logrus.FieldLogger, gv schema.Group
clusterScoped = !resource.Namespaced
)
orders := getOrderedResourcesForType(log, r.backupRequest.Backup.Spec.OrderedResources, resource.Name)
orders := getOrderedResourcesForType(r.backupRequest.Backup.Spec.OrderedResources, resource.Name)
// Getting the preferred group version of this resource
preferredGVR, _, err := r.discoveryHelper.ResourceFor(gr.WithVersion(""))
if err != nil {

View File

@ -73,7 +73,7 @@ func NewDescribeCommand(f client.Factory, use string) *cobra.Command {
}
first := true
for _, backup := range backups.Items {
for i, backup := range backups.Items {
deleteRequestListOptions := pkgbackup.NewDeleteBackupRequestListOptions(backup.Name, string(backup.UID))
deleteRequestList, err := veleroClient.VeleroV1().DeleteBackupRequests(f.Namespace()).List(context.TODO(), deleteRequestListOptions)
if err != nil {
@ -102,7 +102,7 @@ func NewDescribeCommand(f client.Factory, use string) *cobra.Command {
}
}
s := output.DescribeBackup(context.Background(), kbClient, &backup, deleteRequestList.Items, podVolumeBackupList.Items, vscList.Items, details, veleroClient, insecureSkipTLSVerify, caCertFile)
s := output.DescribeBackup(context.Background(), kbClient, &backups.Items[i], deleteRequestList.Items, podVolumeBackupList.Items, vscList.Items, details, veleroClient, insecureSkipTLSVerify, caCertFile)
if first {
first = false
fmt.Print(s)

View File

@ -209,10 +209,10 @@ func (o *CreateOptions) Run(c *cobra.Command, f client.Factory) error {
if err := kbClient.List(context.Background(), locations, &kbclient.ListOptions{Namespace: f.Namespace()}); err != nil {
return errors.WithStack(err)
}
for _, location := range locations.Items {
for i, location := range locations.Items {
if location.Spec.Default {
location.Spec.Default = false
if err := kbClient.Update(context.Background(), &location, &kbclient.UpdateOptions{}); err != nil {
if err := kbClient.Update(context.Background(), &locations.Items[i], &kbclient.UpdateOptions{}); err != nil {
return errors.WithStack(err)
}
break

View File

@ -115,8 +115,8 @@ func Run(f client.Factory, o *cli.DeleteOptions) error {
}
// Create a backup-location deletion request for each
for _, location := range locations.Items {
if err := kbClient.Delete(context.Background(), &location, &kbclient.DeleteOptions{}); err != nil {
for i, location := range locations.Items {
if err := kbClient.Delete(context.Background(), &locations.Items[i], &kbclient.DeleteOptions{}); err != nil {
errs = append(errs, errors.WithStack(err))
continue
}
@ -162,8 +162,8 @@ func findAssociatedResticRepos(client kbclient.Client, bslName, ns string) (vele
func deleteBackups(client kbclient.Client, backups velerov1api.BackupList) []error {
var errs []error
for _, backup := range backups.Items {
if err := client.Delete(context.Background(), &backup, &kbclient.DeleteOptions{}); err != nil {
for i, backup := range backups.Items {
if err := client.Delete(context.Background(), &backups.Items[i], &kbclient.DeleteOptions{}); err != nil {
errs = append(errs, errors.WithStack(fmt.Errorf("delete backup %q associated with deleted BSL: %w", backup.Name, err)))
continue
}
@ -174,8 +174,8 @@ func deleteBackups(client kbclient.Client, backups velerov1api.BackupList) []err
func deleteResticRepos(client kbclient.Client, repos velerov1api.ResticRepositoryList) []error {
var errs []error
for _, repo := range repos.Items {
if err := client.Delete(context.Background(), &repo, &kbclient.DeleteOptions{}); err != nil {
for i, repo := range repos.Items {
if err := client.Delete(context.Background(), &repos.Items[i], &kbclient.DeleteOptions{}); err != nil {
errs = append(errs, errors.WithStack(fmt.Errorf("delete Restic repository %q associated with deleted BSL: %w", repo.Name, err)))
continue
}

View File

@ -120,7 +120,7 @@ func (o *SetOptions) Run(c *cobra.Command, f client.Factory) error {
if err := kbClient.List(context.Background(), locations, &kbclient.ListOptions{Namespace: f.Namespace()}); err != nil {
return errors.WithStack(err)
}
for _, location := range locations.Items {
for i, location := range locations.Items {
if !location.Spec.Default {
continue
}
@ -129,7 +129,7 @@ func (o *SetOptions) Run(c *cobra.Command, f client.Factory) error {
break
}
location.Spec.Default = false
if err := kbClient.Update(context.Background(), &location, &kbclient.UpdateOptions{}); err != nil {
if err := kbClient.Update(context.Background(), &locations.Items[i], &kbclient.UpdateOptions{}); err != nil {
return errors.WithStack(err)
}
break

View File

@ -175,7 +175,7 @@ func (s *resticServer) run() {
metricsMux := http.NewServeMux()
metricsMux.Handle("/metrics", promhttp.Handler())
s.logger.Infof("Starting metric server for restic at address [%s]", s.metricsAddress)
if err := http.ListenAndServe(s.metricsAddress, metricsMux); err != nil {
if err := http.ListenAndServe(s.metricsAddress, metricsMux); err != nil { //nolint:gosec
s.logger.Fatalf("Failed to start metric server for restic at [%s]: %v", s.metricsAddress, err)
}
}()
@ -291,7 +291,7 @@ func (s *resticServer) markInProgressPVBsFailed(client ctrlclient.Client) {
log.WithError(errors.WithStack(err)).Error("failed to list podvolumebackups")
return
}
for _, pvb := range pvbs.Items {
for i, pvb := range pvbs.Items {
if pvb.Status.Phase != velerov1api.PodVolumeBackupPhaseInProgress {
log.Debugf("the status of podvolumebackup %q is %q, skip", pvb.GetName(), pvb.Status.Phase)
continue
@ -304,7 +304,7 @@ func (s *resticServer) markInProgressPVBsFailed(client ctrlclient.Client) {
pvb.Status.Phase = velerov1api.PodVolumeBackupPhaseFailed
pvb.Status.Message = fmt.Sprintf("get a podvolumebackup with status %q during the server starting, mark it as %q", velerov1api.PodVolumeBackupPhaseInProgress, pvb.Status.Phase)
pvb.Status.CompletionTimestamp = &metav1.Time{Time: time.Now()}
if err := client.Patch(s.ctx, &pvb, ctrlclient.MergeFrom(original)); err != nil {
if err := client.Patch(s.ctx, &pvbs.Items[i], ctrlclient.MergeFrom(original)); err != nil {
log.WithError(errors.WithStack(err)).Errorf("failed to patch podvolumebackup %q", pvb.GetName())
continue
}
@ -318,7 +318,7 @@ func (s *resticServer) markInProgressPVRsFailed(client ctrlclient.Client) {
log.WithError(errors.WithStack(err)).Error("failed to list podvolumerestores")
return
}
for _, pvr := range pvrs.Items {
for i, pvr := range pvrs.Items {
if pvr.Status.Phase != velerov1api.PodVolumeRestorePhaseInProgress {
log.Debugf("the status of podvolumerestore %q is %q, skip", pvr.GetName(), pvr.Status.Phase)
continue
@ -342,7 +342,7 @@ func (s *resticServer) markInProgressPVRsFailed(client ctrlclient.Client) {
pvr.Status.Phase = velerov1api.PodVolumeRestorePhaseFailed
pvr.Status.Message = fmt.Sprintf("get a podvolumerestore with status %q during the server starting, mark it as %q", velerov1api.PodVolumeRestorePhaseInProgress, pvr.Status.Phase)
pvr.Status.CompletionTimestamp = &metav1.Time{Time: time.Now()}
if err := client.Patch(s.ctx, &pvr, ctrlclient.MergeFrom(original)); err != nil {
if err := client.Patch(s.ctx, &pvrs.Items[i], ctrlclient.MergeFrom(original)); err != nil {
log.WithError(errors.WithStack(err)).Errorf("failed to patch podvolumerestore %q", pvr.GetName())
continue
}

View File

@ -68,14 +68,14 @@ func NewDescribeCommand(f client.Factory, use string) *cobra.Command {
}
first := true
for _, restore := range restores.Items {
for i, restore := range restores.Items {
opts := restic.NewPodVolumeRestoreListOptions(restore.Name)
podvolumeRestoreList, err := veleroClient.VeleroV1().PodVolumeRestores(f.Namespace()).List(context.TODO(), opts)
if err != nil {
fmt.Fprintf(os.Stderr, "error getting PodVolumeRestores for restore %s: %v\n", restore.Name, err)
}
s := output.DescribeRestore(context.Background(), kbClient, &restore, podvolumeRestoreList.Items, details, veleroClient, insecureSkipTLSVerify, caCertFile)
s := output.DescribeRestore(context.Background(), kbClient, &restores.Items[i], podvolumeRestoreList.Items, details, veleroClient, insecureSkipTLSVerify, caCertFile)
if first {
first = false
fmt.Print(s)

View File

@ -53,8 +53,8 @@ func NewDescribeCommand(f client.Factory, use string) *cobra.Command {
}
first := true
for _, schedule := range schedules.Items {
s := output.DescribeSchedule(&schedule)
for i := range schedules.Items {
s := output.DescribeSchedule(&schedules.Items[i])
if first {
first = false
fmt.Print(s)

View File

@ -594,7 +594,7 @@ func (s *server) runControllers(defaultVolumeSnapshotLocations map[string]string
metricsMux := http.NewServeMux()
metricsMux.Handle("/metrics", promhttp.Handler())
s.logger.Infof("Starting metric server at address [%s]", s.metricsAddress)
if err := http.ListenAndServe(s.metricsAddress, metricsMux); err != nil {
if err := http.ListenAndServe(s.metricsAddress, metricsMux); err != nil { //nolint:gosec
s.logger.Fatalf("Failed to start metric server at [%s]: %v", s.metricsAddress, err)
}
}()
@ -611,7 +611,7 @@ func (s *server) runControllers(defaultVolumeSnapshotLocations map[string]string
csiVSLister, csiVSCLister, csiVSClassLister := s.getCSISnapshotListers()
backupSyncControllerRunInfo := func() controllerRunInfo {
backupSyncControllerRunInfo := func() controllerRunInfo { //nolint:typecheck
backupSyncContoller := controller.NewBackupSyncController(
s.veleroClient.VeleroV1(),
s.mgr.GetClient(),
@ -636,7 +636,7 @@ func (s *server) runControllers(defaultVolumeSnapshotLocations map[string]string
backupTracker := controller.NewBackupTracker()
backupControllerRunInfo := func() controllerRunInfo {
backupControllerRunInfo := func() controllerRunInfo { //nolint:typecheck
backupper, err := backup.NewKubernetesBackupper(
s.veleroClient.VeleroV1(),
s.discoveryHelper,
@ -680,7 +680,7 @@ func (s *server) runControllers(defaultVolumeSnapshotLocations map[string]string
}
}
gcControllerRunInfo := func() controllerRunInfo {
gcControllerRunInfo := func() controllerRunInfo { //nolint:typecheck
gcController := controller.NewGCController(
s.logger,
s.sharedInformerFactory.Velero().V1().Backups(),
@ -696,7 +696,7 @@ func (s *server) runControllers(defaultVolumeSnapshotLocations map[string]string
}
}
restoreControllerRunInfo := func() controllerRunInfo {
restoreControllerRunInfo := func() controllerRunInfo { //nolint:typecheck
restorer, err := restore.NewKubernetesRestorer(
s.veleroClient.VeleroV1(),
s.discoveryHelper,
@ -906,7 +906,7 @@ func (s *server) runProfiler() {
mux.HandleFunc("/debug/pprof/symbol", pprof.Symbol)
mux.HandleFunc("/debug/pprof/trace", pprof.Trace)
if err := http.ListenAndServe(s.config.profilerAddress, mux); err != nil {
if err := http.ListenAndServe(s.config.profilerAddress, mux); err != nil { //nolint:gosec
s.logger.WithError(errors.WithStack(err)).Error("error running profiler http server")
}
}
@ -964,7 +964,7 @@ func markInProgressBackupsFailed(ctx context.Context, client ctrlclient.Client,
return
}
for _, backup := range backups.Items {
for i, backup := range backups.Items {
if backup.Status.Phase != velerov1api.BackupPhaseInProgress {
log.Debugf("the status of backup %q is %q, skip", backup.GetName(), backup.Status.Phase)
continue
@ -973,7 +973,7 @@ func markInProgressBackupsFailed(ctx context.Context, client ctrlclient.Client,
updated.Status.Phase = velerov1api.BackupPhaseFailed
updated.Status.FailureReason = fmt.Sprintf("get a backup with status %q during the server starting, mark it as %q", velerov1api.BackupPhaseInProgress, updated.Status.Phase)
updated.Status.CompletionTimestamp = &metav1.Time{Time: time.Now()}
if err := client.Patch(ctx, updated, ctrlclient.MergeFrom(&backup)); err != nil {
if err := client.Patch(ctx, updated, ctrlclient.MergeFrom(&backups.Items[i])); err != nil {
log.WithError(errors.WithStack(err)).Errorf("failed to patch backup %q", backup.GetName())
continue
}
@ -987,7 +987,7 @@ func markInProgressRestoresFailed(ctx context.Context, client ctrlclient.Client,
log.WithError(errors.WithStack(err)).Error("failed to list restores")
return
}
for _, restore := range restores.Items {
for i, restore := range restores.Items {
if restore.Status.Phase != velerov1api.RestorePhaseInProgress {
log.Debugf("the status of restore %q is %q, skip", restore.GetName(), restore.Status.Phase)
continue
@ -996,7 +996,7 @@ func markInProgressRestoresFailed(ctx context.Context, client ctrlclient.Client,
updated.Status.Phase = velerov1api.RestorePhaseFailed
updated.Status.FailureReason = fmt.Sprintf("get a restore with status %q during the server starting, mark it as %q", velerov1api.RestorePhaseInProgress, updated.Status.Phase)
updated.Status.CompletionTimestamp = &metav1.Time{Time: time.Now()}
if err := client.Patch(ctx, updated, ctrlclient.MergeFrom(&restore)); err != nil {
if err := client.Patch(ctx, updated, ctrlclient.MergeFrom(&restores.Items[i])); err != nil {
log.WithError(errors.WithStack(err)).Errorf("failed to patch restore %q", restore.GetName())
continue
}

View File

@ -104,7 +104,7 @@ func Stream(ctx context.Context, kbClient kbclient.Client, namespace, name strin
httpClient := new(http.Client)
httpClient.Transport = &http.Transport{
TLSClientConfig: &tls.Config{
InsecureSkipVerify: insecureSkipTLSVerify,
InsecureSkipVerify: insecureSkipTLSVerify, //nolint:gosec
RootCAs: caPool,
},
IdleConnTimeout: timeout,

View File

@ -126,7 +126,7 @@ func DescribeBackupSpec(d *Describer, spec velerov1api.BackupSpec) {
}
d.Printf("\tIncluded:\t%s\n", s)
if len(spec.ExcludedNamespaces) == 0 {
s = "<none>"
s = emptyDisplay
} else {
s = strings.Join(spec.ExcludedNamespaces, ", ")
}
@ -141,7 +141,7 @@ func DescribeBackupSpec(d *Describer, spec velerov1api.BackupSpec) {
}
d.Printf("\tIncluded:\t%s\n", s)
if len(spec.ExcludedResources) == 0 {
s = "<none>"
s = emptyDisplay
} else {
s = strings.Join(spec.ExcludedResources, ", ")
}
@ -150,7 +150,7 @@ func DescribeBackupSpec(d *Describer, spec velerov1api.BackupSpec) {
d.Printf("\tCluster-scoped:\t%s\n", BoolPointerString(spec.IncludeClusterResources, "excluded", "included", "auto"))
d.Println()
s = "<none>"
s = emptyDisplay
if spec.LabelSelector != nil {
s = metav1.FormatLabelSelector(spec.LabelSelector)
}
@ -167,7 +167,7 @@ func DescribeBackupSpec(d *Describer, spec velerov1api.BackupSpec) {
d.Println()
if len(spec.Hooks.Resources) == 0 {
d.Printf("Hooks:\t<none>\n")
d.Printf("Hooks:\t" + emptyDisplay + "\n")
} else {
d.Printf("Hooks:\n")
d.Printf("\tResources:\n")
@ -182,7 +182,7 @@ func DescribeBackupSpec(d *Describer, spec velerov1api.BackupSpec) {
}
d.Printf("\t\t\t\tIncluded:\t%s\n", s)
if len(spec.ExcludedNamespaces) == 0 {
s = "<none>"
s = emptyDisplay
} else {
s = strings.Join(spec.ExcludedNamespaces, ", ")
}
@ -197,14 +197,14 @@ func DescribeBackupSpec(d *Describer, spec velerov1api.BackupSpec) {
}
d.Printf("\t\t\t\tIncluded:\t%s\n", s)
if len(spec.ExcludedResources) == 0 {
s = "<none>"
s = emptyDisplay
} else {
s = strings.Join(spec.ExcludedResources, ", ")
}
d.Printf("\t\t\t\tExcluded:\t%s\n", s)
d.Println()
s = "<none>"
s = emptyDisplay
if backupResourceHookSpec.LabelSelector != nil {
s = metav1.FormatLabelSelector(backupResourceHookSpec.LabelSelector)
}

View File

@ -34,7 +34,10 @@ import (
"github.com/vmware-tanzu/velero/pkg/util/encode"
)
const downloadRequestTimeout = 30 * time.Second
const (
downloadRequestTimeout = 30 * time.Second
emptyDisplay = "<none>"
)
// BindFlags defines a set of output-specific flags within the provided
// FlagSet.

View File

@ -107,7 +107,7 @@ func DescribeRestore(ctx context.Context, kbClient kbclient.Client, restore *vel
}
d.Printf("\tIncluded:\t%s\n", s)
if len(restore.Spec.ExcludedNamespaces) == 0 {
s = "<none>"
s = emptyDisplay
} else {
s = strings.Join(restore.Spec.ExcludedNamespaces, ", ")
}
@ -122,7 +122,7 @@ func DescribeRestore(ctx context.Context, kbClient kbclient.Client, restore *vel
}
d.Printf("\tIncluded:\t%s\n", s)
if len(restore.Spec.ExcludedResources) == 0 {
s = "<none>"
s = emptyDisplay
} else {
s = strings.Join(restore.Spec.ExcludedResources, ", ")
}
@ -134,7 +134,7 @@ func DescribeRestore(ctx context.Context, kbClient kbclient.Client, restore *vel
d.DescribeMap("Namespace mappings", restore.Spec.NamespaceMapping)
d.Println()
s = "<none>"
s = emptyDisplay
if restore.Spec.LabelSelector != nil {
s = metav1.FormatLabelSelector(restore.Spec.LabelSelector)
}
@ -149,7 +149,7 @@ func DescribeRestore(ctx context.Context, kbClient kbclient.Client, restore *vel
}
d.Println()
s = "<none>"
s = emptyDisplay
if restore.Spec.ExistingResourcePolicy != "" {
s = string(restore.Spec.ExistingResourcePolicy)
}
@ -194,7 +194,7 @@ func describeRestoreResult(d *Describer, name string, result pkgrestore.Result)
d.DescribeSlice(1, "Velero", result.Velero)
d.DescribeSlice(1, "Cluster", result.Cluster)
if len(result.Namespaces) == 0 {
d.Printf("\tNamespaces: <none>\n")
d.Printf("\tNamespaces:" + emptyDisplay + "\n")
} else {
d.Printf("\tNamespaces:\n")
for ns, warnings := range result.Namespaces {

View File

@ -720,7 +720,7 @@ func (c *backupController) runBackup(backup *pkgbackup.Request) error {
return err
}
if errs := persistBackup(backup, backupFile, logFile, backupStore, c.logger.WithField(Backup, kubeutil.NamespaceAndName(backup)), volumeSnapshots, volumeSnapshotContents, volumeSnapshotClasses); len(errs) > 0 {
if errs := persistBackup(backup, backupFile, logFile, backupStore, volumeSnapshots, volumeSnapshotContents, volumeSnapshotClasses); len(errs) > 0 {
fatalErrs = append(fatalErrs, errs...)
}
@ -764,7 +764,6 @@ func recordBackupMetrics(log logrus.FieldLogger, backup *velerov1api.Backup, bac
func persistBackup(backup *pkgbackup.Request,
backupContents, backupLog *os.File,
backupStore persistence.BackupStore,
log logrus.FieldLogger,
csiVolumeSnapshots []snapshotv1api.VolumeSnapshot,
csiVolumeSnapshotContents []*snapshotv1api.VolumeSnapshotContent,
csiVolumesnapshotClasses []*snapshotv1api.VolumeSnapshotClass,

View File

@ -323,11 +323,11 @@ func (r *backupDeletionReconciler) Reconcile(ctx context.Context, req ctrl.Reque
}); err != nil {
log.WithError(errors.WithStack(err)).Error("Error listing restore API objects")
} else {
for _, restore := range restoreList.Items {
for i, restore := range restoreList.Items {
if restore.Spec.BackupName != backup.Name {
continue
}
restoreLog := log.WithField("restore", kube.NamespaceAndName(&restore))
restoreLog := log.WithField("restore", kube.NamespaceAndName(&restoreList.Items[i]))
restoreLog.Info("Deleting restore log/results from backup storage")
if err := backupStore.DeleteRestore(restore.Name); err != nil {
@ -337,8 +337,8 @@ func (r *backupDeletionReconciler) Reconcile(ctx context.Context, req ctrl.Reque
}
restoreLog.Info("Deleting restore referencing backup")
if err := r.Delete(ctx, &restore); err != nil {
errs = append(errs, errors.Wrapf(err, "error deleting restore %s", kube.NamespaceAndName(&restore)).Error())
if err := r.Delete(ctx, &restoreList.Items[i]); err != nil {
errs = append(errs, errors.Wrapf(err, "error deleting restore %s", kube.NamespaceAndName(&restoreList.Items[i])).Error())
}
}
}
@ -421,11 +421,11 @@ func (r *backupDeletionReconciler) deleteExistingDeletionRequests(ctx context.Co
return []error{errors.Wrap(err, "error listing existing DeleteBackupRequests for backup")}
}
var errs []error
for _, dbr := range dbrList.Items {
for i, dbr := range dbrList.Items {
if dbr.Name == req.Name {
continue
}
if err := r.Delete(ctx, &dbr); err != nil {
if err := r.Delete(ctx, &dbrList.Items[i]); err != nil {
errs = append(errs, errors.WithStack(err))
} else {
log.Infof("deletion request '%s' removed.", dbr.Name)

View File

@ -149,7 +149,7 @@ func (c *backupSyncController) run() {
pluginManager := c.newPluginManager(c.logger)
defer pluginManager.CleanupClients()
for _, location := range locations {
for i, location := range locations {
log := c.logger.WithField("backupLocation", location.Name)
syncPeriod := c.defaultBackupSyncPeriod
@ -177,7 +177,7 @@ func (c *backupSyncController) run() {
log.Debug("Checking backup location for backups to sync into cluster")
backupStore, err := c.backupStoreGetter.Get(&location, pluginManager, log)
backupStore, err := c.backupStoreGetter.Get(&locations[i], pluginManager, log)
if err != nil {
log.WithError(err).Error("Error getting backup store for this location")
continue
@ -337,7 +337,7 @@ func (c *backupSyncController) run() {
// update the location's last-synced time field
statusPatch := client.MergeFrom(location.DeepCopy())
location.Status.LastSyncedTime = &metav1.Time{Time: time.Now().UTC()}
if err := c.kbClient.Patch(context.Background(), &location, statusPatch); err != nil {
if err := c.kbClient.Patch(context.Background(), &locations[i], statusPatch); err != nil {
log.WithError(errors.WithStack(err)).Error("Error patching backup location's last-synced time")
continue
}

View File

@ -310,7 +310,7 @@ func (c *PodVolumeRestoreReconciler) processRestore(ctx context.Context, req *ve
// Write a done file with name=<restore-uid> into the just-created .velero dir
// within the volume. The velero restic init container on the pod is waiting
// for this file to exist in each restored volume before completing.
if err := ioutil.WriteFile(filepath.Join(volumePath, ".velero", string(restoreUID)), nil, 0644); err != nil {
if err := ioutil.WriteFile(filepath.Join(volumePath, ".velero", string(restoreUID)), nil, 0644); err != nil { //nolint:gosec
return errors.Wrap(err, "error writing done file")
}

View File

@ -442,7 +442,7 @@ func (c *restoreController) fetchBackupInfo(backupName string, pluginManager cli
func (c *restoreController) runValidatedRestore(restore *api.Restore, info backupInfo) error {
// instantiate the per-restore logger that will output both to a temp file
// (for upload to object storage) and to stdout.
restoreLog, err := newRestoreLogger(restore, c.logger, c.restoreLogLevel, c.logFormat)
restoreLog, err := newRestoreLogger(restore, c.restoreLogLevel, c.logFormat)
if err != nil {
return err
}
@ -556,14 +556,14 @@ func (c *restoreController) runValidatedRestore(restore *api.Restore, info backu
"errors": restoreErrors,
}
if err := putResults(restore, m, info.backupStore, c.logger); err != nil {
if err := putResults(restore, m, info.backupStore); err != nil {
c.logger.WithError(err).Error("Error uploading restore results to backup storage")
}
return nil
}
func putResults(restore *api.Restore, results map[string]pkgrestore.Result, backupStore persistence.BackupStore, log logrus.FieldLogger) error {
func putResults(restore *api.Restore, results map[string]pkgrestore.Result, backupStore persistence.BackupStore) error {
buf := new(bytes.Buffer)
gzw := gzip.NewWriter(buf)
defer gzw.Close()
@ -648,7 +648,7 @@ type restoreLogger struct {
w *gzip.Writer
}
func newRestoreLogger(restore *api.Restore, baseLogger logrus.FieldLogger, logLevel logrus.Level, logFormat logging.Format) (*restoreLogger, error) {
func newRestoreLogger(restore *api.Restore, logLevel logrus.Level, logFormat logging.Format) (*restoreLogger, error) {
file, err := ioutil.TempFile("", "")
if err != nil {
return nil, errors.Wrap(err, "error creating temp file")

View File

@ -76,7 +76,7 @@ func (b *clientBuilder) clientConfig() *hcplugin.ClientConfig {
string(framework.PluginKindItemSnapshotter): framework.NewItemSnapshotterPlugin(framework.ClientLogger(b.clientLogger)),
},
Logger: b.pluginLogger,
Cmd: exec.Command(b.commandName, b.commandArgs...),
Cmd: exec.Command(b.commandName, b.commandArgs...), //nolint
}
}

View File

@ -54,7 +54,7 @@ func newGRPCErrorWithCode(err error, code codes.Code, details ...goproto.Message
// newGRPCError is a convenience function for creating a new gRPC error
// with code = codes.Unknown
func newGRPCError(err error, details ...goproto.Message) error {
func newGRPCError(err error, details ...goproto.Message) error { //nolint:unparam
return newGRPCErrorWithCode(err, codes.Unknown, details...)
}

View File

@ -14,13 +14,14 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
//nolint:unparam
package restic
const (
// AWS specific environment variable
awsProfileEnvVar = "AWS_PROFILE"
awsProfileKey = "profile"
awsCredentialsFileEnvVar = "AWS_SHARED_CREDENTIALS_FILE"
awsCredentialsFileEnvVar = "AWS_SHARED_CREDENTIALS_FILE" //nolint:gosec
)
// getS3ResticEnvVars gets the environment variables that restic

View File

@ -81,7 +81,7 @@ func getStorageAccountKey(config map[string]string) (string, *azure.Environment,
}
// we need config["resourceGroup"], config["storageAccount"]
if _, err := getRequiredValues(mapLookup(config), resourceGroupConfigKey, storageAccountConfigKey); err != nil {
if err := getRequiredValues(mapLookup(config), resourceGroupConfigKey, storageAccountConfigKey); err != nil {
return "", env, errors.Wrap(err, "unable to get all required config values")
}
@ -140,7 +140,7 @@ func getAzureResticEnvVars(config map[string]string) (map[string]string, error)
return nil, err
}
if _, err := getRequiredValues(mapLookup(config), storageAccountConfigKey); err != nil {
if err := getRequiredValues(mapLookup(config), storageAccountConfigKey); err != nil {
return nil, errors.Wrap(err, "unable to get all required config values")
}
@ -190,7 +190,7 @@ func parseAzureEnvironment(cloudName string) (*azure.Environment, error) {
return &env, errors.WithStack(err)
}
func getRequiredValues(getValue func(string) string, keys ...string) (map[string]string, error) {
func getRequiredValues(getValue func(string) string, keys ...string) error {
missing := []string{}
results := map[string]string{}
@ -203,8 +203,8 @@ func getRequiredValues(getValue func(string) string, keys ...string) (map[string
}
if len(missing) > 0 {
return nil, errors.Errorf("the following keys do not have values: %s", strings.Join(missing, ", "))
return errors.Errorf("the following keys do not have values: %s", strings.Join(missing, ", "))
}
return results, nil
return nil
}

View File

@ -77,7 +77,7 @@ func (c *Command) String() string {
// Cmd returns an exec.Cmd for the command.
func (c *Command) Cmd() *exec.Cmd {
parts := c.StringSlice()
cmd := exec.Command(parts[0], parts[1:]...)
cmd := exec.Command(parts[0], parts[1:]...) //nolint:gosec
cmd.Dir = c.Dir
if len(c.Env) > 0 {

View File

@ -18,12 +18,12 @@ package restic
const (
// GCP specific environment variable
gcpCredentialsFileEnvVar = "GOOGLE_APPLICATION_CREDENTIALS"
gcpCredentialsFileEnvVar = "GOOGLE_APPLICATION_CREDENTIALS" //nolint:gosec
)
// getGCPResticEnvVars gets the environment variables that restic relies
// on based on info in the provided object storage location config map.
func getGCPResticEnvVars(config map[string]string) (map[string]string, error) {
func getGCPResticEnvVars(config map[string]string) (map[string]string, error) { //nolint:unparam
result := make(map[string]string)
if credentialsFile, ok := config[credentialsFileKey]; ok {

View File

@ -34,4 +34,4 @@ func (exec FakeResticBackupExec) RunBackup(cmd *restic.Command, log logrus.Field
// GetSnapshotID gets the Restic snapshot ID.
func (exec FakeResticBackupExec) GetSnapshotID(cmd *restic.Command) (string, error) {
return "", nil
}
}

View File

@ -14,6 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
//nolint:gosec
package restic
import (

View File

@ -70,9 +70,9 @@ func (a *AdmissionWebhookConfigurationAction) Execute(input *velero.RestoreItemA
return velero.NewRestoreItemActionExecuteOutput(input.Item), nil
}
newWebhooks := make([]interface{}, 0)
for i, entry := range webhooks {
for i := range webhooks {
logger2 := logger.WithField("index", i)
obj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&entry)
obj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&webhooks[i])
if err != nil {
logger2.Errorf("failed to convert the webhook entry, error: %v, it will be dropped", err)
continue

View File

@ -1029,7 +1029,7 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso
// Check to see if the claimRef.namespace field needs to be remapped,
// and do so if necessary.
_, err = remapClaimRefNS(ctx, obj)
err = remapClaimRefNS(ctx, obj)
if err != nil {
errs.Add(namespace, err)
return warnings, errs
@ -1119,7 +1119,7 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso
ctx.log.Infof("Restoring persistent volume as-is because it doesn't have a snapshot and its reclaim policy is not Delete.")
// Check to see if the claimRef.namespace field needs to be remapped, and do so if necessary.
_, err = remapClaimRefNS(ctx, obj)
err = remapClaimRefNS(ctx, obj)
if err != nil {
errs.Add(namespace, err)
return warnings, errs
@ -1529,37 +1529,37 @@ func shouldRenamePV(ctx *restoreContext, obj *unstructured.Unstructured, client
// remapClaimRefNS remaps a PersistentVolume's claimRef.Namespace based on a
// restore's NamespaceMappings, if necessary. Returns true if the namespace was
// remapped, false if it was not required.
func remapClaimRefNS(ctx *restoreContext, obj *unstructured.Unstructured) (bool, error) {
func remapClaimRefNS(ctx *restoreContext, obj *unstructured.Unstructured) error {
if len(ctx.restore.Spec.NamespaceMapping) == 0 {
ctx.log.Debug("Persistent volume does not need to have the claimRef.namespace remapped because restore is not remapping any namespaces")
return false, nil
return nil
}
// Conversion to the real type here is more readable than all the error checking
// involved with reading each field individually.
pv := new(v1.PersistentVolume)
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.Object, pv); err != nil {
return false, errors.Wrapf(err, "error converting persistent volume to structured")
return errors.Wrapf(err, "error converting persistent volume to structured")
}
if pv.Spec.ClaimRef == nil {
ctx.log.Debugf("Persistent volume does not need to have the claimRef.namespace remapped because it's not claimed")
return false, nil
return nil
}
targetNS, ok := ctx.restore.Spec.NamespaceMapping[pv.Spec.ClaimRef.Namespace]
if !ok {
ctx.log.Debugf("Persistent volume does not need to have the claimRef.namespace remapped because it's not claimed by a PVC in a namespace that's being remapped")
return false, nil
return nil
}
err := unstructured.SetNestedField(obj.Object, targetNS, "spec", "claimRef", "namespace")
if err != nil {
return false, err
return err
}
ctx.log.Debug("Persistent volume's namespace was updated")
return true, nil
return nil
}
// restorePodVolumeBackups restores the PodVolumeBackups for the given restored pod

View File

@ -123,9 +123,9 @@ func EnsureNamespaceExistsAndIsReady(namespace *corev1api.Namespace, client core
func GetVolumeDirectory(ctx context.Context, log logrus.FieldLogger, pod *corev1api.Pod, volumeName string, cli client.Client) (string, error) {
var volume *corev1api.Volume
for _, item := range pod.Spec.Volumes {
for i, item := range pod.Spec.Volumes {
if item.Name == volumeName {
volume = &item
volume = &pod.Spec.Volumes[i]
break
}
}

View File

@ -88,7 +88,7 @@ DEBUG_E2E_TEST ?= false
.PHONY:ginkgo
ginkgo: # Make sure ginkgo is in $GOPATH/bin
go get github.com/onsi/ginkgo/ginkgo
go install github.com/onsi/ginkgo/ginkgo@v1.16.5
.PHONY: run
run: ginkgo