Modify golangci.yaml file. Resolve found lint issues. (#6008)

Signed-off-by: Xun Jiang <blackpiglet@gmail.com>
Co-authored-by: Xun Jiang <blackpiglet@gmail.com>
pull/6028/head
Xun Jiang/Bruce Jiang 2023-03-24 12:15:08 +08:00 committed by GitHub
parent 82358666c8
commit 15d44724e7
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
32 changed files with 103 additions and 243 deletions

View File

@ -96,9 +96,6 @@ else
GIT_TREE_STATE ?= clean
endif
# The default linters used by lint and local-lint
LINTERS ?= "gosec,goconst,gofmt,goimports,unparam"
###
### These variables should not need tweaking.
###
@ -221,22 +218,12 @@ endif
lint:
ifneq ($(SKIP_TESTS), 1)
@$(MAKE) shell CMD="-c 'hack/lint.sh $(LINTERS)'"
@$(MAKE) shell CMD="-c 'hack/lint.sh'"
endif
local-lint:
ifneq ($(SKIP_TESTS), 1)
@hack/lint.sh $(LINTERS)
endif
lint-all:
ifneq ($(SKIP_TESTS), 1)
@$(MAKE) shell CMD="-c 'hack/lint.sh $(LINTERS) true'"
endif
local-lint-all:
ifneq ($(SKIP_TESTS), 1)
@hack/lint.sh $(LINTERS) true
@hack/lint.sh
endif
update:

View File

@ -0,0 +1 @@
Modify golangci.yaml file. Resolve found lint issues.

View File

@ -56,7 +56,7 @@ spec:
nullable: true
type: array
existingResourcePolicy:
description: ExistingResourcePolicy specifies the restore behaviour
description: ExistingResourcePolicy specifies the restore behavior
for the kubernetes resource to be restored
nullable: true
type: string

File diff suppressed because one or more lines are too long

View File

@ -7,18 +7,11 @@ run:
concurrency: 4
# timeout for analysis, e.g. 30s, 5m, default is 1m
timeout: 5m
timeout: 20m
# exit code when at least one issue was found, default is 1
issues-exit-code: 1
# include test files or not, default is true
tests: true
# list of build tags, all linters use it. Default is empty list.
#build-tags:
# - mytag
# which dirs to skip: issues from them won't be reported;
# can use regexp here: generated.*, regexp is applied on full path;
# default value is empty list, but default dirs are skipped independently
@ -294,77 +287,23 @@ linters-settings:
# Allow leading comments to be separated with empty liens
allow-separated-leading-comment: false
# The custom section can be used to define linter plugins to be loaded at runtime. See README doc
# for more info.
# custom:
# Each custom linter should have a unique name.
# example:
# The path to the plugin *.so. Can be absolute or local. Required for each custom linter
# path: /path/to/example.so
# The description of the linter. Optional, just for documentation purposes.
# description: This is an example usage of a plugin linter.
# Intended to point to the repo location of the linter. Optional, just for documentation purposes.
# original-url: github.com/golangci/example-linter
linters:
# enable:
# - megacheck
# - govet
# disable:
# - maligned
# - prealloc
disable-all: true
presets:
# - bugs
# - unused
enable:
- bodyclose
- goconst
- gofmt
- goheader
- goimports
- gosec
- misspell
- typecheck
- unparam
- unused
fast: false
issues:
# # List of regexps of issue texts to exclude, empty list by default.
# # But independently from this option we use default exclude patterns,
# # it can be disabled by `exclude-use-default: false`. To list all
# # excluded by default patterns execute `golangci-lint run --help`
# exclude:
# - abcdef
#
# # Excluding configuration per-path, per-linter, per-text and per-source
# exclude-rules:
# # Exclude some linters from running on tests files.
# - path: _test\.go
# linters:
# - gocyclo
# - errcheck
# - dupl
# - gosec
#
# # Exclude known linters from partially hard-vendored code,
# # which is impossible to exclude via "nolint" comments.
# - path: internal/hmac/
# text: "weak cryptographic primitive"
# linters:
# - gosec
#
# # Exclude some staticcheck messages
# - linters:
# - staticcheck
# text: "SA9003:"
#
# # Exclude lll issues for long lines with go:generate
# - linters:
# - lll
# source: "^//go:generate "
# Independently from option `exclude` we use default exclude patterns,
# it can be disabled by this option. To list all
# excluded by default patterns execute `golangci-lint run --help`.
# Default value for this option is true.
exclude-use-default: true
# The default value is false. If set to true exclude and exclude-rules
# regular expressions become case sensitive.
exclude-case-sensitive: false
# The list of ids of default excludes to include or disable. By default it's empty.
include:
- EXC0002 # disable excluding of issues about comments from golint
@ -375,19 +314,8 @@ issues:
# Maximum count of issues with the same text. Set to 0 to disable. Default is 3.
max-same-issues: 0
# Show only new issues: if there are unstaged changes or untracked files,
# only those changes are analyzed, else only changes in HEAD~ are analyzed.
# It's a super-useful option for integration of golangci-lint into existing
# large codebase. It's not practical to fix all existing issues at the moment
# of integration: much better don't allow issues in new code.
# Default is false.
new: false
# Show only new issues created after git revision `REV`
new-from-rev: REV
# Show only new issues created in git patch with set file path.
new-from-patch: path/to/patch/file
# new-from-rev: origin/main
severity:
# Default value is empty string.

View File

@ -14,23 +14,14 @@
# See the License for the specific language governing permissions and
# limitations under the License.
LINTERS=${1:-"gosec,goconst,gofmt,goimports,unparam"}
ALL=${2:-false}
HACK_DIR=$(dirname "${BASH_SOURCE[0]}")
# Printing out cache status
golangci-lint cache status
if [[ $ALL == true ]] ; then
action=""
else
action="-n"
fi
# Enable GL_DEBUG line below for debug messages for golangci-lint
# export GL_DEBUG=loader,gocritic,env
CMD="golangci-lint run -E ${LINTERS} $action -c $HACK_DIR/../golangci.yaml"
CMD="golangci-lint run -c $HACK_DIR/../golangci.yaml"
echo "Running $CMD"
eval $CMD

View File

@ -24,6 +24,7 @@ import (
// This regex should match both our GA format (example: v1.4.3) and pre-release formats (v1.2.4-beta.2, v1.5.0-rc.1)
// The following sub-capture groups are defined:
//
// major
// minor
// patch

View File

@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,

View File

@ -108,7 +108,7 @@ type RestoreSpec struct {
// +optional
Hooks RestoreHooks `json:"hooks,omitempty"`
// ExistingResourcePolicy specifies the restore behaviour for the kubernetes resource to be restored
// ExistingResourcePolicy specifies the restore behavior for the kubernetes resource to be restored
// +optional
// +nullable
ExistingResourcePolicy PolicyType `json:"existingResourcePolicy,omitempty"`

View File

@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,

View File

@ -51,7 +51,7 @@ func NewDeleteCommand(f client.Factory, use string) *cobra.Command {
# Delete backup storage locations named "backup-location-1" and "backup-location-2".
velero backup-location delete backup-location-1 backup-location-2
# Delete all backup storage locations labelled with "foo=bar".
# Delete all backup storage locations labeled with "foo=bar".
velero backup-location delete --selector foo=bar
# Delete all backup storage locations.

View File

@ -173,7 +173,12 @@ func (s *nodeAgentServer) run() {
metricsMux := http.NewServeMux()
metricsMux.Handle("/metrics", promhttp.Handler())
s.logger.Infof("Starting metric server for node agent at address [%s]", s.metricsAddress)
if err := http.ListenAndServe(s.metricsAddress, metricsMux); err != nil {
server := &http.Server{
Addr: s.metricsAddress,
Handler: metricsMux,
ReadHeaderTimeout: 3 * time.Second,
}
if err := server.ListenAndServe(); err != nil {
s.logger.Fatalf("Failed to start metric server for node agent at [%s]: %v", s.metricsAddress, err)
}
}()

View File

@ -48,7 +48,7 @@ func NewDeleteCommand(f client.Factory, use string) *cobra.Command {
# Delete restores named "restore-1" and "restore-2".
velero restore delete restore-1 restore-2
# Delete all restores labelled with "foo=bar".
# Delete all restores labeled with "foo=bar".
velero restore delete --selector foo=bar
# Delete all restores.

View File

@ -48,7 +48,7 @@ func NewDeleteCommand(f client.Factory, use string) *cobra.Command {
# Delete schedules named "schedule-1" and "schedule-2".
velero schedule delete schedule-1 schedule-2
# Delete all schedules labelled with "foo=bar".
# Delete all schedules labeled with "foo=bar".
velero schedule delete --selector foo=bar
# Delete all schedules.

View File

@ -45,7 +45,7 @@ func NewPauseCommand(f client.Factory, use string) *cobra.Command {
# Pause schedules named "schedule-1" and "schedule-2".
velero schedule pause schedule-1 schedule-2
# Pause all schedules labelled with "foo=bar".
# Pause all schedules labeled with "foo=bar".
velero schedule pause --selector foo=bar
# Pause all schedules.

View File

@ -37,7 +37,7 @@ func NewUnpauseCommand(f client.Factory, use string) *cobra.Command {
# Unpause schedules named "schedule-1" and "schedule-2".
velero schedule unpause schedule-1 schedule-2
# Unpause all schedules labelled with "foo=bar".
# Unpause all schedules labeled with "foo=bar".
velero schedule unpause --selector foo=bar
# Unpause all schedules.

View File

@ -97,7 +97,6 @@ const (
defaultProfilerAddress = "localhost:6060"
defaultControllerWorkers = 1
// the default TTL for a backup
defaultBackupTTL = 30 * 24 * time.Hour
@ -136,11 +135,6 @@ type serverConfig struct {
maxConcurrentK8SConnections int
}
type controllerRunInfo struct {
controller controller.Interface
numWorkers int
}
func NewCommand(f client.Factory) *cobra.Command {
var (
volumeSnapshotLocations = flag.NewMap().WithKeyValueDelimiter(':')
@ -307,7 +301,7 @@ func newServer(f client.Factory, config serverConfig, logger *logrus.Logger) (*s
}
// cancelFunc is not deferred here because if it was, then ctx would immediately
// be cancelled once this function exited, making it useless to any informers using later.
// be canceled once this function exited, making it useless to any informers using later.
// That, in turn, causes the velero server to halt when the first informer tries to use it.
// Therefore, we must explicitly call it on the error paths in this function.
ctx, cancelFunc := context.WithCancel(context.Background())
@ -625,7 +619,12 @@ func (s *server) runControllers(defaultVolumeSnapshotLocations map[string]string
metricsMux := http.NewServeMux()
metricsMux.Handle("/metrics", promhttp.Handler())
s.logger.Infof("Starting metric server at address [%s]", s.metricsAddress)
if err := http.ListenAndServe(s.metricsAddress, metricsMux); err != nil {
server := &http.Server{
Addr: s.metricsAddress,
Handler: metricsMux,
ReadHeaderTimeout: 3 * time.Second,
}
if err := server.ListenAndServe(); err != nil {
s.logger.Fatalf("Failed to start metric server at [%s]: %v", s.metricsAddress, err)
}
}()
@ -973,7 +972,12 @@ func (s *server) runProfiler() {
mux.HandleFunc("/debug/pprof/symbol", pprof.Symbol)
mux.HandleFunc("/debug/pprof/trace", pprof.Trace)
if err := http.ListenAndServe(s.config.profilerAddress, mux); err != nil {
server := &http.Server{
Addr: s.config.profilerAddress,
Handler: mux,
ReadHeaderTimeout: 3 * time.Second,
}
if err := server.ListenAndServe(); err != nil {
s.logger.WithError(errors.WithStack(err)).Error("error running profiler http server")
}
}

View File

@ -484,14 +484,14 @@ func (r *backupDeletionReconciler) patchBackup(ctx context.Context, backup *vele
// Record original json
oldData, err := json.Marshal(backup)
if err != nil {
return nil, errors.Wrap(err, "error marshalling original Backup")
return nil, errors.Wrap(err, "error marshaling original Backup")
}
newBackup := backup.DeepCopy()
mutate(newBackup)
newData, err := json.Marshal(newBackup)
if err != nil {
return nil, errors.Wrap(err, "error marshalling updated Backup")
return nil, errors.Wrap(err, "error marshaling updated Backup")
}
patchBytes, err := jsonpatch.CreateMergePatch(oldData, newData)
if err != nil {

View File

@ -1,58 +0,0 @@
/*
Copyright the Velero contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
import (
"k8s.io/apimachinery/pkg/runtime/schema"
proto "github.com/vmware-tanzu/velero/pkg/plugin/generated"
"github.com/vmware-tanzu/velero/pkg/plugin/velero"
)
func packResourceIdentifiers(resourcesIDs []velero.ResourceIdentifier) (protoIDs []*proto.ResourceIdentifier) {
for _, item := range resourcesIDs {
protoIDs = append(protoIDs, resourceIdentifierToProto(item))
}
return
}
func unpackResourceIdentifiers(protoIDs []*proto.ResourceIdentifier) (resourceIDs []velero.ResourceIdentifier) {
for _, itm := range protoIDs {
resourceIDs = append(resourceIDs, protoToResourceIdentifier(itm))
}
return
}
func protoToResourceIdentifier(proto *proto.ResourceIdentifier) velero.ResourceIdentifier {
return velero.ResourceIdentifier{
GroupResource: schema.GroupResource{
Group: proto.Group,
Resource: proto.Resource,
},
Namespace: proto.Namespace,
Name: proto.Name,
}
}
func resourceIdentifierToProto(id velero.ResourceIdentifier) *proto.ResourceIdentifier {
return &proto.ResourceIdentifier{
Group: id.Group,
Resource: id.Resource,
Namespace: id.Namespace,
Name: id.Name,
}
}

View File

@ -59,7 +59,7 @@ func GetSnapshotID(snapshotIdCmd *Command) (string, error) {
var snapshots []snapshotID
if err := json.Unmarshal([]byte(stdout), &snapshots); err != nil {
return "", errors.Wrap(err, "error unmarshalling restic snapshots result")
return "", errors.Wrap(err, "error unmarshaling restic snapshots result")
}
if len(snapshots) != 1 {
@ -260,7 +260,7 @@ func getSnapshotSize(repoIdentifier, passwordFile, caCertFile, snapshotID string
}
if err := json.Unmarshal([]byte(stdout), &snapshotStats); err != nil {
return 0, errors.Wrapf(err, "error unmarshalling restic stats result, stdout=%s", stdout)
return 0, errors.Wrapf(err, "error unmarshaling restic stats result, stdout=%s", stdout)
}
return snapshotStats.TotalSize, nil

View File

@ -59,8 +59,9 @@ func (p *ChangePVCNodeSelectorAction) AppliesTo() (velero.ResourceSelector, erro
}
// Execute updates the pvc's selected-node annotation:
// a) if node mapping found in the config map for the plugin
// b) if node mentioned in annotation doesn't exist
//
// a) if node mapping found in the config map for the plugin
// b) if node mentioned in annotation doesn't exist
func (p *ChangePVCNodeSelectorAction) Execute(input *velero.RestoreItemActionExecuteInput) (*velero.RestoreItemActionExecuteOutput, error) {
p.logger.Info("Executing ChangePVCNodeSelectorAction")
defer p.logger.Info("Done executing ChangePVCNodeSelectorAction")

View File

@ -26,9 +26,9 @@ const (
)
// Priorities defines the desired order of resource operations:
// Resources in the HighPriorities list will be handled first
// Resources in the LowPriorities list will be handled last
// Other resources will be handled alphabetically after the high prioritized resources and before the low prioritized resources
// Resources in the HighPriorities list will be handled first
// Resources in the LowPriorities list will be handled last
// Other resources will be handled alphabetically after the high prioritized resources and before the low prioritized resources
type Priorities struct {
HighPriorities []string
LowPriorities []string

View File

@ -704,7 +704,7 @@ func getNamespace(logger logrus.FieldLogger, path, remappedName string) *v1.Name
var backupNS v1.Namespace
if err := json.Unmarshal(nsBytes, &backupNS); err != nil {
logger.Warnf("Error unmarshalling namespace from backup, creating new one.")
logger.Warnf("Error unmarshaling namespace from backup, creating new one.")
return &v1.Namespace{
TypeMeta: metav1.TypeMeta{
Kind: "Namespace",

View File

@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,

View File

@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,

View File

@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@ -25,7 +25,7 @@ import (
"github.com/vmware-tanzu/velero/pkg/uploader"
)
//Throttle throttles controlle the interval of output result
// Throttle throttles controlle the interval of output result
type Throttle struct {
throttle int64
interval time.Duration
@ -68,7 +68,7 @@ type KopiaProgress struct {
Log logrus.FieldLogger // output info into log when backup
}
//UploadedBytes the total bytes has uploaded currently
// UploadedBytes the total bytes has uploaded currently
func (p *KopiaProgress) UploadedBytes(numBytes int64) {
atomic.AddInt64(&p.uploadedBytes, numBytes)
atomic.AddInt32(&p.uploadedFiles, 1)
@ -76,7 +76,7 @@ func (p *KopiaProgress) UploadedBytes(numBytes int64) {
p.UpdateProgress()
}
//Error statistic the total Error has occurred
// Error statistic the total Error has occurred
func (p *KopiaProgress) Error(path string, err error, isIgnored bool) {
if isIgnored {
atomic.AddInt32(&p.ignoredErrorCount, 1)
@ -87,7 +87,7 @@ func (p *KopiaProgress) Error(path string, err error, isIgnored bool) {
}
}
//EstimatedDataSize statistic the total size of files to be processed and total files to be processed
// EstimatedDataSize statistic the total size of files to be processed and total files to be processed
func (p *KopiaProgress) EstimatedDataSize(fileCount int, totalBytes int64) {
atomic.StoreInt64(&p.estimatedTotalBytes, totalBytes)
atomic.StoreInt32(&p.estimatedFileCount, int32(fileCount))
@ -95,57 +95,57 @@ func (p *KopiaProgress) EstimatedDataSize(fileCount int, totalBytes int64) {
p.UpdateProgress()
}
//UpdateProgress which calls Updater UpdateProgress interface, update progress by third-party implementation
// UpdateProgress which calls Updater UpdateProgress interface, update progress by third-party implementation
func (p *KopiaProgress) UpdateProgress() {
if p.outputThrottle.ShouldOutput() {
p.Updater.UpdateProgress(&uploader.UploaderProgress{TotalBytes: p.estimatedTotalBytes, BytesDone: p.processedBytes})
}
}
//UploadStarted statistic the total Error has occurred
// UploadStarted statistic the total Error has occurred
func (p *KopiaProgress) UploadStarted() {}
//CachedFile statistic the total bytes been cached currently
// CachedFile statistic the total bytes been cached currently
func (p *KopiaProgress) CachedFile(fname string, numBytes int64) {
atomic.AddInt64(&p.cachedBytes, numBytes)
p.UpdateProgress()
}
//HashedBytes statistic the total bytes been hashed currently
// HashedBytes statistic the total bytes been hashed currently
func (p *KopiaProgress) HashedBytes(numBytes int64) {
atomic.AddInt64(&p.processedBytes, numBytes)
atomic.AddInt64(&p.hashededBytes, numBytes)
p.UpdateProgress()
}
//HashingFile statistic the file been hashed currently
// HashingFile statistic the file been hashed currently
func (p *KopiaProgress) HashingFile(fname string) {}
//ExcludedFile statistic the file been excluded currently
// ExcludedFile statistic the file been excluded currently
func (p *KopiaProgress) ExcludedFile(fname string, numBytes int64) {}
//ExcludedDir statistic the dir been excluded currently
// ExcludedDir statistic the dir been excluded currently
func (p *KopiaProgress) ExcludedDir(dirname string) {}
//FinishedHashingFile which will called when specific file finished hash
// FinishedHashingFile which will called when specific file finished hash
func (p *KopiaProgress) FinishedHashingFile(fname string, numBytes int64) {
p.UpdateProgress()
}
//StartedDirectory called when begin to upload one directory
// StartedDirectory called when begin to upload one directory
func (p *KopiaProgress) StartedDirectory(dirname string) {}
//FinishedDirectory called when finish to upload one directory
// FinishedDirectory called when finish to upload one directory
func (p *KopiaProgress) FinishedDirectory(dirname string) {
p.UpdateProgress()
}
//UploadFinished which report the files flushed after the Upload has completed.
// UploadFinished which report the files flushed after the Upload has completed.
func (p *KopiaProgress) UploadFinished() {
p.UpdateProgress()
}
//ProgressBytes which statistic all bytes has been processed currently
// ProgressBytes which statistic all bytes has been processed currently
func (p *KopiaProgress) ProgressBytes(processedBytes int64, totalBytes int64) {
atomic.StoreInt64(&p.processedBytes, processedBytes)
atomic.StoreInt64(&p.estimatedTotalBytes, totalBytes)

View File

@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@ -31,18 +31,18 @@ import (
"github.com/kopia/kopia/repo/object"
)
//shimRepository which is one adapter for unifited repo and kopia.
//it implement kopia RepositoryWriter interfaces
// shimRepository which is one adapter for unified repo and kopia.
// it implement kopia RepositoryWriter interfaces
type shimRepository struct {
udmRepo udmrepo.BackupRepo
}
//shimObjectWriter object writer for unifited repo
// shimObjectWriter object writer for unifited repo
type shimObjectWriter struct {
repoWriter udmrepo.ObjectWriter
}
//shimObjectReader object reader for unifited repo
// shimObjectReader object reader for unifited repo
type shimObjectReader struct {
repoReader udmrepo.ObjectReader
}
@ -53,7 +53,7 @@ func NewShimRepo(repo udmrepo.BackupRepo) repo.RepositoryWriter {
}
}
//OpenObject open specific object
// OpenObject open specific object
func (sr *shimRepository) OpenObject(ctx context.Context, id object.ID) (object.Reader, error) {
reader, err := sr.udmRepo.OpenObject(ctx, udmrepo.ID(id))
if err != nil {
@ -94,7 +94,7 @@ func (sr *shimRepository) FindManifests(ctx context.Context, labels map[string]s
}
}
//GetKopiaManifestEntries get metadata from specific ManifestEntryMetadata
// GetKopiaManifestEntries get metadata from specific ManifestEntryMetadata
func GetKopiaManifestEntry(uMani *udmrepo.ManifestEntryMetadata) *manifest.EntryMetadata {
var ret manifest.EntryMetadata
@ -106,7 +106,7 @@ func GetKopiaManifestEntry(uMani *udmrepo.ManifestEntryMetadata) *manifest.Entry
return &ret
}
//GetKopiaManifestEntries get metadata list from specific ManifestEntryMetadata
// GetKopiaManifestEntries get metadata list from specific ManifestEntryMetadata
func GetKopiaManifestEntries(uMani []*udmrepo.ManifestEntryMetadata) []*manifest.EntryMetadata {
var ret []*manifest.EntryMetadata
@ -123,12 +123,12 @@ func GetKopiaManifestEntries(uMani []*udmrepo.ManifestEntryMetadata) []*manifest
return ret
}
//Time Get the local time of the unified repo
// Time Get the local time of the unified repo
func (sr *shimRepository) Time() time.Time {
return sr.udmRepo.Time()
}
//ClientOptions is not supported by unified repo
// ClientOptions is not supported by unified repo
func (sr *shimRepository) ClientOptions() repo.ClientOptions {
return repo.ClientOptions{}
}
@ -143,26 +143,26 @@ func (sr *shimRepository) ContentInfo(ctx context.Context, contentID content.ID)
return nil, errors.New("not supported")
}
//PrefetchContents is not supported by unified repo
// PrefetchContents is not supported by unified repo
func (sr *shimRepository) PrefetchContents(ctx context.Context, contentIDs []content.ID, hint string) []content.ID {
return nil
}
//PrefetchObjects is not supported by unified repo
// PrefetchObjects is not supported by unified repo
func (sr *shimRepository) PrefetchObjects(ctx context.Context, objectIDs []object.ID, hint string) ([]content.ID, error) {
return nil, errors.New("not supported")
}
//UpdateDescription is not supported by unified repo
// UpdateDescription is not supported by unified repo
func (sr *shimRepository) UpdateDescription(d string) {
}
//NewWriter is not supported by unified repo
// NewWriter is not supported by unified repo
func (sr *shimRepository) NewWriter(ctx context.Context, option repo.WriteSessionOptions) (context.Context, repo.RepositoryWriter, error) {
return nil, nil, errors.New("not supported")
}
//Close will close unified repo
// Close will close unified repo
func (sr *shimRepository) Close(ctx context.Context) error {
return sr.udmRepo.Close(ctx)
}
@ -222,7 +222,7 @@ func (sr *shimObjectReader) Seek(offset int64, whence int) (int64, error) {
return sr.repoReader.Seek(offset, whence)
}
//Close current io for ObjectReader
// Close current io for ObjectReader
func (sr *shimObjectReader) Close() error {
return sr.repoReader.Close()
}

View File

@ -35,18 +35,18 @@ import (
"github.com/vmware-tanzu/velero/pkg/repository/udmrepo/service"
)
//BackupFunc mainly used to make testing more convenient
// BackupFunc mainly used to make testing more convenient
var BackupFunc = kopia.Backup
var RestoreFunc = kopia.Restore
//kopiaProvider recorded info related with kopiaProvider
// kopiaProvider recorded info related with kopiaProvider
type kopiaProvider struct {
bkRepo udmrepo.BackupRepo
credGetter *credentials.CredentialGetter
log logrus.FieldLogger
}
//NewKopiaUploaderProvider initialized with open or create a repository
// NewKopiaUploaderProvider initialized with open or create a repository
func NewKopiaUploaderProvider(
ctx context.Context,
credGetter *credentials.CredentialGetter,
@ -78,7 +78,7 @@ func NewKopiaUploaderProvider(
return kp, nil
}
//CheckContext check context status check if context is timeout or cancel and backup restore once finished it will quit and return
// CheckContext check context status check if context is timeout or cancel and backup restore once finished it will quit and return
func (kp *kopiaProvider) CheckContext(ctx context.Context, finishChan chan struct{}, restoreChan chan struct{}, uploader *snapshotfs.Uploader) {
select {
case <-finishChan:
@ -166,7 +166,7 @@ func (kp *kopiaProvider) GetPassword(param interface{}) (string, error) {
return strings.TrimSpace(rawPass), nil
}
//RunRestore which will restore specific path and update restore progress
// RunRestore which will restore specific path and update restore progress
func (kp *kopiaProvider) RunRestore(
ctx context.Context,
snapshotID string,

View File

@ -41,13 +41,13 @@ type SnapshotInfo struct {
Size int64 `json:"Size"`
}
//UploaderProgress which defined two variables to record progress
// UploaderProgress which defined two variables to record progress
type UploaderProgress struct {
TotalBytes int64 `json:"totalBytes,omitempty"`
BytesDone int64 `json:"doneBytes,omitempty"`
}
//UploaderProgress which defined generic interface to update progress
// UploaderProgress which defined generic interface to update progress
type ProgressUpdater interface {
UpdateProgress(p *UploaderProgress)
}

View File

@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,

View File

@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,

View File

@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,