switch to logrus.FieldLogger and arktest.NewLogger() everywhere

Signed-off-by: Steve Kriss <steve@heptio.com>
pull/241/head
Steve Kriss 2017-12-12 15:22:46 -08:00
parent 4aea9b9a2c
commit 56c9d68137
24 changed files with 91 additions and 193 deletions

4
Gopkg.lock generated
View File

@ -283,7 +283,7 @@
[[projects]]
name = "github.com/sirupsen/logrus"
packages = [".","hooks/test"]
packages = ["."]
revision = "f006c2ac4710855cf0f916dd6b77acf6b048dc6e"
version = "v1.0.3"
@ -426,6 +426,6 @@
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
inputs-digest = "6287197115277ba882d5bb5dc20d74a8cb8e13d90c4e783c518a4e4aed55245f"
inputs-digest = "c3cd1b703421685e5b2343ced6eaa6ec958b9c44d62277322f4c93de164c2d04"
solver-name = "gps-cdcl"
solver-version = 1

View File

@ -37,7 +37,7 @@ type itemHookHandler interface {
// to specify a hook, that is executed. Otherwise, this looks at the backup context's Backup to
// determine if there are any hooks relevant to the item, taking into account the hook spec's
// namespaces, resources, and label selector.
handleHooks(log *logrus.Entry, groupResource schema.GroupResource, obj runtime.Unstructured, resourceHooks []resourceHook) error
handleHooks(log logrus.FieldLogger, groupResource schema.GroupResource, obj runtime.Unstructured, resourceHooks []resourceHook) error
}
// defaultItemHookHandler is the default itemHookHandler.
@ -46,7 +46,7 @@ type defaultItemHookHandler struct {
}
func (h *defaultItemHookHandler) handleHooks(
log *logrus.Entry,
log logrus.FieldLogger,
groupResource schema.GroupResource,
obj runtime.Unstructured,
resourceHooks []resourceHook,

View File

@ -38,7 +38,7 @@ type mockItemHookHandler struct {
mock.Mock
}
func (h *mockItemHookHandler) handleHooks(log *logrus.Entry, groupResource schema.GroupResource, obj runtime.Unstructured, resourceHooks []resourceHook) error {
func (h *mockItemHookHandler) handleHooks(log logrus.FieldLogger, groupResource schema.GroupResource, obj runtime.Unstructured, resourceHooks []resourceHook) error {
args := h.Called(log, groupResource, obj, resourceHooks)
return args.Error(0)
}

View File

@ -35,7 +35,7 @@ import (
type podCommandExecutor interface {
// executePodCommand executes a command in a container in a pod. If the command takes longer than
// the specified timeout, an error is returned.
executePodCommand(log *logrus.Entry, item map[string]interface{}, namespace, name, hookName string, hook *api.ExecHook) error
executePodCommand(log logrus.FieldLogger, item map[string]interface{}, namespace, name, hookName string, hook *api.ExecHook) error
}
type poster interface {
@ -63,7 +63,7 @@ func NewPodCommandExecutor(restClientConfig *rest.Config, restClient poster) pod
// command takes longer than the specified timeout, an error is returned (NOTE: it is not currently
// possible to ensure the command is terminated when the timeout occurs, so it may continue to run
// in the background).
func (e *defaultPodCommandExecutor) executePodCommand(log *logrus.Entry, item map[string]interface{}, namespace, name, hookName string, hook *api.ExecHook) error {
func (e *defaultPodCommandExecutor) executePodCommand(log logrus.FieldLogger, item map[string]interface{}, namespace, name, hookName string, hook *api.ExecHook) error {
if item == nil {
return errors.New("item is required")
}

View File

@ -270,7 +270,7 @@ type mockPodCommandExecutor struct {
mock.Mock
}
func (e *mockPodCommandExecutor) executePodCommand(log *logrus.Entry, item map[string]interface{}, namespace, name, hookName string, hook *v1.ExecHook) error {
func (e *mockPodCommandExecutor) executePodCommand(log logrus.FieldLogger, item map[string]interface{}, namespace, name, hookName string, hook *v1.ExecHook) error {
args := e.Called(log, item, namespace, name, hookName, hook)
return args.Error(0)
}

View File

@ -41,13 +41,13 @@ type backupCache struct {
// This doesn't really need to be a map right now, but if we ever move to supporting multiple
// buckets, this will be ready for it.
buckets map[string]*backupCacheBucket
logger *logrus.Logger
logger logrus.FieldLogger
}
var _ BackupGetter = &backupCache{}
// NewBackupCache returns a new backup cache that refreshes from delegate every resyncPeriod.
func NewBackupCache(ctx context.Context, delegate BackupGetter, resyncPeriod time.Duration, logger *logrus.Logger) BackupGetter {
func NewBackupCache(ctx context.Context, delegate BackupGetter, resyncPeriod time.Duration, logger logrus.FieldLogger) BackupGetter {
c := &backupCache{
delegate: delegate,
buckets: make(map[string]*backupCacheBucket),

View File

@ -22,7 +22,6 @@ import (
"testing"
"time"
testlogger "github.com/sirupsen/logrus/hooks/test"
"github.com/stretchr/testify/assert"
"github.com/heptio/ark/pkg/apis/ark/v1"
@ -34,7 +33,7 @@ func TestNewBackupCache(t *testing.T) {
var (
delegate = &test.FakeBackupService{}
ctx, cancel = context.WithTimeout(context.Background(), 5*time.Second)
logger, _ = testlogger.NewNullLogger()
logger = test.NewLogger()
)
defer cancel()
@ -104,8 +103,8 @@ func TestNewBackupCache(t *testing.T) {
func TestBackupCacheRefresh(t *testing.T) {
var (
delegate = &test.FakeBackupService{}
logger, _ = testlogger.NewNullLogger()
delegate = &test.FakeBackupService{}
logger = test.NewLogger()
)
c := &backupCache{
@ -136,9 +135,9 @@ func TestBackupCacheRefresh(t *testing.T) {
func TestBackupCacheGetAllBackupsUsesCacheIfPresent(t *testing.T) {
var (
delegate = &test.FakeBackupService{}
logger, _ = testlogger.NewNullLogger()
bucket1 = []*v1.Backup{
delegate = &test.FakeBackupService{}
logger = test.NewLogger()
bucket1 = []*v1.Backup{
test.NewTestBackup().WithName("backup1").Backup,
test.NewTestBackup().WithName("backup2").Backup,
}

View File

@ -101,14 +101,14 @@ func getRestoreResultsKey(backup, restore string) string {
type backupService struct {
objectStore ObjectStore
decoder runtime.Decoder
logger *logrus.Logger
logger logrus.FieldLogger
}
var _ BackupService = &backupService{}
var _ BackupGetter = &backupService{}
// NewBackupService creates a backup service using the provided object store
func NewBackupService(objectStore ObjectStore, logger *logrus.Logger) BackupService {
func NewBackupService(objectStore ObjectStore, logger logrus.FieldLogger) BackupService {
return &backupService{
objectStore: objectStore,
decoder: scheme.Codecs.UniversalDecoder(api.SchemeGroupVersion),
@ -268,7 +268,7 @@ func NewBackupServiceWithCachedBackupGetter(
ctx context.Context,
delegate BackupService,
resyncPeriod time.Duration,
logger *logrus.Logger,
logger logrus.FieldLogger,
) BackupService {
return &cachedBackupService{
BackupService: delegate,

View File

@ -27,7 +27,6 @@ import (
"time"
testutil "github.com/heptio/ark/pkg/util/test"
testlogger "github.com/sirupsen/logrus/hooks/test"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@ -36,6 +35,7 @@ import (
api "github.com/heptio/ark/pkg/apis/ark/v1"
"github.com/heptio/ark/pkg/util/encode"
arktest "github.com/heptio/ark/pkg/util/test"
)
func TestUploadBackup(t *testing.T) {
@ -85,7 +85,7 @@ func TestUploadBackup(t *testing.T) {
objStore = &testutil.ObjectStore{}
bucket = "test-bucket"
backupName = "test-backup"
logger, _ = testlogger.NewNullLogger()
logger = arktest.NewLogger()
)
if test.metadata != nil {
@ -118,10 +118,10 @@ func TestUploadBackup(t *testing.T) {
func TestDownloadBackup(t *testing.T) {
var (
o = &testutil.ObjectStore{}
bucket = "b"
backup = "bak"
logger, _ = testlogger.NewNullLogger()
o = &testutil.ObjectStore{}
bucket = "b"
backup = "bak"
logger = arktest.NewLogger()
)
o.On("GetObject", bucket, backup+"/"+backup+".tar.gz").Return(ioutil.NopCloser(strings.NewReader("foo")), nil)
@ -155,11 +155,11 @@ func TestDeleteBackup(t *testing.T) {
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
var (
bucket = "bucket"
backup = "bak"
objects = []string{"bak/ark-backup.json", "bak/bak.tar.gz", "bak/bak.log.gz"}
objStore = &testutil.ObjectStore{}
logger, _ = testlogger.NewNullLogger()
bucket = "bucket"
backup = "bak"
objects = []string{"bak/ark-backup.json", "bak/bak.tar.gz", "bak/bak.log.gz"}
objStore = &testutil.ObjectStore{}
logger = arktest.NewLogger()
)
objStore.On("ListObjects", bucket, backup+"/").Return(objects, test.listObjectsError)
@ -229,9 +229,9 @@ func TestGetAllBackups(t *testing.T) {
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
var (
bucket = "bucket"
objStore = &testutil.ObjectStore{}
logger, _ = testlogger.NewNullLogger()
bucket = "bucket"
objStore = &testutil.ObjectStore{}
logger = arktest.NewLogger()
)
objStore.On("ListCommonPrefixes", bucket, "/").Return([]string{"backup-1", "backup-2"}, nil)
@ -328,7 +328,7 @@ func TestCreateSignedURL(t *testing.T) {
t.Run(test.name, func(t *testing.T) {
var (
objectStorage = &testutil.ObjectStore{}
logger, _ = testlogger.NewNullLogger()
logger = arktest.NewLogger()
backupService = NewBackupService(objectStorage, logger)
)

View File

@ -141,7 +141,7 @@ type server struct {
sharedInformerFactory informers.SharedInformerFactory
ctx context.Context
cancelFunc context.CancelFunc
logger *logrus.Logger
logger logrus.FieldLogger
pluginManager plugin.Manager
}
@ -272,7 +272,7 @@ var defaultResourcePriorities = []string{
"configmaps",
}
func applyConfigDefaults(c *api.Config, logger *logrus.Logger) {
func applyConfigDefaults(c *api.Config, logger logrus.FieldLogger) {
if c.GCSyncPeriod.Duration == 0 {
c.GCSyncPeriod.Duration = defaultGCSyncPeriod
}
@ -567,7 +567,7 @@ func newRestorer(
resourcePriorities []string,
backupClient arkv1client.BackupsGetter,
kubeClient kubernetes.Interface,
logger *logrus.Logger,
logger logrus.FieldLogger,
) (restore.Restorer, error) {
return restore.NewKubernetesRestorer(
discoveryHelper,

View File

@ -20,16 +20,16 @@ import (
"testing"
"time"
"github.com/sirupsen/logrus/hooks/test"
"github.com/stretchr/testify/assert"
"github.com/heptio/ark/pkg/apis/ark/v1"
arktest "github.com/heptio/ark/pkg/util/test"
)
func TestApplyConfigDefaults(t *testing.T) {
var (
logger, _ = test.NewNullLogger()
c = &v1.Config{}
logger = arktest.NewLogger()
c = &v1.Config{}
)
// test defaulting

View File

@ -36,7 +36,7 @@ type backupSyncController struct {
backupService cloudprovider.BackupService
bucket string
syncPeriod time.Duration
logger *logrus.Logger
logger logrus.FieldLogger
}
func NewBackupSyncController(
@ -44,7 +44,7 @@ func NewBackupSyncController(
backupService cloudprovider.BackupService,
bucket string,
syncPeriod time.Duration,
logger *logrus.Logger,
logger logrus.FieldLogger,
) Interface {
if syncPeriod < time.Minute {
logger.Infof("Provided backup sync period %v is too short. Setting to 1 minute", syncPeriod)

View File

@ -21,14 +21,13 @@ import (
"testing"
"time"
testlogger "github.com/sirupsen/logrus/hooks/test"
"github.com/stretchr/testify/assert"
core "k8s.io/client-go/testing"
"github.com/heptio/ark/pkg/apis/ark/v1"
"github.com/heptio/ark/pkg/generated/clientset/versioned/fake"
. "github.com/heptio/ark/pkg/util/test"
arktest "github.com/heptio/ark/pkg/util/test"
)
func TestBackupSyncControllerRun(t *testing.T) {
@ -47,9 +46,9 @@ func TestBackupSyncControllerRun(t *testing.T) {
{
name: "normal case",
cloudBackups: []*v1.Backup{
NewTestBackup().WithNamespace("ns-1").WithName("backup-1").Backup,
NewTestBackup().WithNamespace("ns-1").WithName("backup-2").Backup,
NewTestBackup().WithNamespace("ns-2").WithName("backup-3").Backup,
arktest.NewTestBackup().WithNamespace("ns-1").WithName("backup-1").Backup,
arktest.NewTestBackup().WithNamespace("ns-1").WithName("backup-2").Backup,
arktest.NewTestBackup().WithNamespace("ns-2").WithName("backup-3").Backup,
},
},
}
@ -57,9 +56,9 @@ func TestBackupSyncControllerRun(t *testing.T) {
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
var (
bs = &BackupService{}
client = fake.NewSimpleClientset()
logger, _ = testlogger.NewNullLogger()
bs = &arktest.BackupService{}
client = fake.NewSimpleClientset()
logger = arktest.NewLogger()
)
c := NewBackupSyncController(

View File

@ -52,7 +52,7 @@ type downloadRequestController struct {
syncHandler func(key string) error
queue workqueue.RateLimitingInterface
clock clock.Clock
logger *logrus.Logger
logger logrus.FieldLogger
}
// NewDownloadRequestController creates a new DownloadRequestController.
@ -61,7 +61,7 @@ func NewDownloadRequestController(
downloadRequestInformer informers.DownloadRequestInformer,
backupService cloudprovider.BackupService,
bucket string,
logger *logrus.Logger,
logger logrus.FieldLogger,
) Interface {
c := &downloadRequestController{
downloadRequestClient: downloadRequestClient,

View File

@ -21,7 +21,6 @@ import (
"testing"
"time"
testlogger "github.com/sirupsen/logrus/hooks/test"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@ -32,7 +31,7 @@ import (
"github.com/heptio/ark/pkg/generated/clientset/versioned/fake"
informers "github.com/heptio/ark/pkg/generated/informers/externalversions"
"github.com/heptio/ark/pkg/util/collections"
"github.com/heptio/ark/pkg/util/test"
arktest "github.com/heptio/ark/pkg/util/test"
)
func TestProcessDownloadRequest(t *testing.T) {
@ -99,8 +98,8 @@ func TestProcessDownloadRequest(t *testing.T) {
client = fake.NewSimpleClientset()
sharedInformers = informers.NewSharedInformerFactory(client, 0)
downloadRequestsInformer = sharedInformers.Ark().V1().DownloadRequests()
backupService = &test.BackupService{}
logger, _ = testlogger.NewNullLogger()
backupService = &arktest.BackupService{}
logger = arktest.NewLogger()
)
defer backupService.AssertExpectations(t)

View File

@ -50,7 +50,7 @@ type gcController struct {
restoreLister listers.RestoreLister
restoreListerSynced cache.InformerSynced
restoreClient arkv1client.RestoresGetter
logger *logrus.Logger
logger logrus.FieldLogger
}
// NewGCController constructs a new gcController.
@ -63,7 +63,7 @@ func NewGCController(
backupClient arkv1client.BackupsGetter,
restoreInformer informers.RestoreInformer,
restoreClient arkv1client.RestoresGetter,
logger *logrus.Logger,
logger logrus.FieldLogger,
) Interface {
if syncPeriod < time.Minute {
logger.WithField("syncPeriod", syncPeriod).Info("Provided GC sync period is too short. Setting to 1 minute")

View File

@ -20,7 +20,6 @@ import (
"testing"
"time"
testlogger "github.com/sirupsen/logrus/hooks/test"
"github.com/stretchr/testify/assert"
"k8s.io/apimachinery/pkg/util/clock"
@ -31,7 +30,7 @@ import (
"github.com/heptio/ark/pkg/cloudprovider"
"github.com/heptio/ark/pkg/generated/clientset/versioned/fake"
informers "github.com/heptio/ark/pkg/generated/informers/externalversions"
. "github.com/heptio/ark/pkg/util/test"
arktest "github.com/heptio/ark/pkg/util/test"
)
type gcTest struct {
@ -51,7 +50,7 @@ func TestGarbageCollect(t *testing.T) {
{
name: "basic-expired",
backups: []*api.Backup{
NewTestBackup().WithName("backup-1").
arktest.NewTestBackup().WithName("backup-1").
WithExpiration(fakeClock.Now().Add(-1*time.Second)).
WithSnapshot("pv-1", "snapshot-1").
WithSnapshot("pv-2", "snapshot-2").
@ -64,7 +63,7 @@ func TestGarbageCollect(t *testing.T) {
{
name: "basic-unexpired",
backups: []*api.Backup{
NewTestBackup().WithName("backup-1").
arktest.NewTestBackup().WithName("backup-1").
WithExpiration(fakeClock.Now().Add(1*time.Minute)).
WithSnapshot("pv-1", "snapshot-1").
WithSnapshot("pv-2", "snapshot-2").
@ -77,12 +76,12 @@ func TestGarbageCollect(t *testing.T) {
{
name: "one expired, one unexpired",
backups: []*api.Backup{
NewTestBackup().WithName("backup-1").
arktest.NewTestBackup().WithName("backup-1").
WithExpiration(fakeClock.Now().Add(-1*time.Minute)).
WithSnapshot("pv-1", "snapshot-1").
WithSnapshot("pv-2", "snapshot-2").
Backup,
NewTestBackup().WithName("backup-2").
arktest.NewTestBackup().WithName("backup-2").
WithExpiration(fakeClock.Now().Add(1*time.Minute)).
WithSnapshot("pv-3", "snapshot-3").
WithSnapshot("pv-4", "snapshot-4").
@ -95,7 +94,7 @@ func TestGarbageCollect(t *testing.T) {
{
name: "none expired in target bucket",
backups: []*api.Backup{
NewTestBackup().WithName("backup-2").
arktest.NewTestBackup().WithName("backup-2").
WithExpiration(fakeClock.Now().Add(1*time.Minute)).
WithSnapshot("pv-3", "snapshot-3").
WithSnapshot("pv-4", "snapshot-4").
@ -108,7 +107,7 @@ func TestGarbageCollect(t *testing.T) {
{
name: "orphan snapshots",
backups: []*api.Backup{
NewTestBackup().WithName("backup-1").
arktest.NewTestBackup().WithName("backup-1").
WithExpiration(fakeClock.Now().Add(-1*time.Minute)).
WithSnapshot("pv-1", "snapshot-1").
WithSnapshot("pv-2", "snapshot-2").
@ -121,12 +120,12 @@ func TestGarbageCollect(t *testing.T) {
{
name: "no snapshot service only GC's backups without snapshots",
backups: []*api.Backup{
NewTestBackup().WithName("backup-1").
arktest.NewTestBackup().WithName("backup-1").
WithExpiration(fakeClock.Now().Add(-1*time.Second)).
WithSnapshot("pv-1", "snapshot-1").
WithSnapshot("pv-2", "snapshot-2").
Backup,
NewTestBackup().WithName("backup-2").
arktest.NewTestBackup().WithName("backup-2").
WithExpiration(fakeClock.Now().Add(-1 * time.Second)).
Backup,
},
@ -138,12 +137,12 @@ func TestGarbageCollect(t *testing.T) {
for _, test := range tests {
var (
backupService = &BackupService{}
snapshotService *FakeSnapshotService
backupService = &arktest.BackupService{}
snapshotService *arktest.FakeSnapshotService
)
if !test.nilSnapshotService {
snapshotService = &FakeSnapshotService{SnapshotsTaken: test.snapshots}
snapshotService = &arktest.FakeSnapshotService{SnapshotsTaken: test.snapshots}
}
t.Run(test.name, func(t *testing.T) {
@ -152,7 +151,7 @@ func TestGarbageCollect(t *testing.T) {
sharedInformers = informers.NewSharedInformerFactory(client, 0)
snapSvc cloudprovider.SnapshotService
bucket = "bucket"
logger, _ = testlogger.NewNullLogger()
logger = arktest.NewLogger()
)
if snapshotService != nil {
@ -204,7 +203,7 @@ func TestGarbageCollectBackup(t *testing.T) {
}{
{
name: "deleteBackupFile=false, snapshot deletion fails, don't delete kube backup",
backup: NewTestBackup().WithName("backup-1").
backup: arktest.NewTestBackup().WithName("backup-1").
WithSnapshot("pv-1", "snapshot-1").
WithSnapshot("pv-2", "snapshot-2").
Backup,
@ -215,12 +214,12 @@ func TestGarbageCollectBackup(t *testing.T) {
},
{
name: "related restores should be deleted",
backup: NewTestBackup().WithName("backup-1").Backup,
backup: arktest.NewTestBackup().WithName("backup-1").Backup,
deleteBackupFile: true,
snapshots: sets.NewString(),
restores: []*api.Restore{
NewTestRestore(api.DefaultNamespace, "restore-1", api.RestorePhaseCompleted).WithBackup("backup-1").Restore,
NewTestRestore(api.DefaultNamespace, "restore-2", api.RestorePhaseCompleted).WithBackup("backup-2").Restore,
arktest.NewTestRestore(api.DefaultNamespace, "restore-1", api.RestorePhaseCompleted).WithBackup("backup-1").Restore,
arktest.NewTestRestore(api.DefaultNamespace, "restore-2", api.RestorePhaseCompleted).WithBackup("backup-2").Restore,
},
expectedRestoreDeletes: []string{"restore-1"},
expectedBackupDelete: "backup-1",
@ -232,12 +231,12 @@ func TestGarbageCollectBackup(t *testing.T) {
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
var (
backupService = &BackupService{}
snapshotService = &FakeSnapshotService{SnapshotsTaken: test.snapshots}
backupService = &arktest.BackupService{}
snapshotService = &arktest.FakeSnapshotService{SnapshotsTaken: test.snapshots}
client = fake.NewSimpleClientset()
sharedInformers = informers.NewSharedInformerFactory(client, 0)
bucket = "bucket-1"
logger, _ = testlogger.NewNullLogger()
logger = arktest.NewLogger()
controller = NewGCController(
backupService,
snapshotService,
@ -298,8 +297,8 @@ func TestGarbageCollectBackup(t *testing.T) {
func TestGarbageCollectPicksUpBackupUponExpiration(t *testing.T) {
var (
backupService = &BackupService{}
snapshotService = &FakeSnapshotService{}
backupService = &arktest.BackupService{}
snapshotService = &arktest.FakeSnapshotService{}
fakeClock = clock.NewFakeClock(time.Now())
assert = assert.New(t)
)
@ -307,7 +306,7 @@ func TestGarbageCollectPicksUpBackupUponExpiration(t *testing.T) {
scenario := gcTest{
name: "basic-expired",
backups: []*api.Backup{
NewTestBackup().WithName("backup-1").
arktest.NewTestBackup().WithName("backup-1").
WithExpiration(fakeClock.Now().Add(1*time.Second)).
WithSnapshot("pv-1", "snapshot-1").
WithSnapshot("pv-2", "snapshot-2").
@ -321,7 +320,7 @@ func TestGarbageCollectPicksUpBackupUponExpiration(t *testing.T) {
var (
client = fake.NewSimpleClientset()
sharedInformers = informers.NewSharedInformerFactory(client, 0)
logger, _ = testlogger.NewNullLogger()
logger = arktest.NewLogger()
)
controller := NewGCController(

View File

@ -23,7 +23,6 @@ import (
"time"
"github.com/robfig/cron"
testlogger "github.com/sirupsen/logrus/hooks/test"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@ -362,7 +361,7 @@ func TestParseCronSchedule(t *testing.T) {
},
}
logger, _ := testlogger.NewNullLogger()
logger := arktest.NewLogger()
c, errs := parseCronSchedule(s, logger)
require.Empty(t, errs)

View File

@ -49,7 +49,7 @@ type Helper interface {
type helper struct {
discoveryClient discovery.DiscoveryInterface
logger *logrus.Logger
logger logrus.FieldLogger
// lock guards mapper, resources and resourcesMap
lock sync.RWMutex
@ -60,7 +60,7 @@ type helper struct {
var _ Helper = &helper{}
func NewHelper(discoveryClient discovery.DiscoveryInterface, logger *logrus.Logger) (Helper, error) {
func NewHelper(discoveryClient discovery.DiscoveryInterface, logger logrus.FieldLogger) (Helper, error) {
h := &helper{
discoveryClient: discoveryClient,
}

View File

@ -70,7 +70,7 @@ type kubernetesRestorer struct {
namespaceClient corev1.NamespaceInterface
resourcePriorities []string
fileSystem FileSystem
logger *logrus.Logger
logger logrus.FieldLogger
}
// prioritizeResources returns an ordered, fully-resolved list of resources to restore based on
@ -140,7 +140,7 @@ func NewKubernetesRestorer(
resourcePriorities []string,
backupClient arkv1client.BackupsGetter,
namespaceClient corev1.NamespaceInterface,
logger *logrus.Logger,
logger logrus.FieldLogger,
) (Restorer, error) {
return &kubernetesRestorer{
discoveryHelper: discoveryHelper,

View File

@ -23,8 +23,6 @@ import (
"testing"
"github.com/pkg/errors"
"github.com/sirupsen/logrus/hooks/test"
testlogger "github.com/sirupsen/logrus/hooks/test"
"github.com/spf13/afero"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@ -83,7 +81,7 @@ func TestPrioritizeResources(t *testing.T) {
},
}
logger, _ := test.NewNullLogger()
logger := arktest.NewLogger()
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
@ -180,7 +178,7 @@ func TestRestoreNamespaceFiltering(t *testing.T) {
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
log, _ := testlogger.NewNullLogger()
log := arktest.NewLogger()
ctx := &context{
restore: test.restore,
@ -272,7 +270,7 @@ func TestRestorePriority(t *testing.T) {
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
log, _ := testlogger.NewNullLogger()
log := arktest.NewLogger()
ctx := &context{
restore: test.restore,

View File

@ -22,7 +22,7 @@ import (
"github.com/sirupsen/logrus"
)
func NewLogger() *logrus.Entry {
func NewLogger() logrus.FieldLogger {
logger := logrus.New()
logger.Out = ioutil.Discard
return logrus.NewEntry(logger)

View File

@ -39,12 +39,12 @@ type shortcutExpander struct {
RESTMapper meta.RESTMapper
discoveryClient discovery.DiscoveryInterface
logger *logrus.Logger
logger logrus.FieldLogger
}
var _ meta.RESTMapper = &shortcutExpander{}
func NewShortcutExpander(delegate meta.RESTMapper, client discovery.DiscoveryInterface, logger *logrus.Logger) (shortcutExpander, error) {
func NewShortcutExpander(delegate meta.RESTMapper, client discovery.DiscoveryInterface, logger logrus.FieldLogger) (shortcutExpander, error) {
if client == nil {
return shortcutExpander{}, errors.New("Please provide discovery client to shortcut expander")
}

View File

@ -1,95 +0,0 @@
// The Test package is used for testing logrus. It is here for backwards
// compatibility from when logrus' organization was upper-case. Please use
// lower-case logrus and the `null` package instead of this one.
package test
import (
"io/ioutil"
"sync"
"github.com/sirupsen/logrus"
)
// Hook is a hook designed for dealing with logs in test scenarios.
type Hook struct {
// Entries is an array of all entries that have been received by this hook.
// For safe access, use the AllEntries() method, rather than reading this
// value directly.
Entries []*logrus.Entry
mu sync.RWMutex
}
// NewGlobal installs a test hook for the global logger.
func NewGlobal() *Hook {
hook := new(Hook)
logrus.AddHook(hook)
return hook
}
// NewLocal installs a test hook for a given local logger.
func NewLocal(logger *logrus.Logger) *Hook {
hook := new(Hook)
logger.Hooks.Add(hook)
return hook
}
// NewNullLogger creates a discarding logger and installs the test hook.
func NewNullLogger() (*logrus.Logger, *Hook) {
logger := logrus.New()
logger.Out = ioutil.Discard
return logger, NewLocal(logger)
}
func (t *Hook) Fire(e *logrus.Entry) error {
t.mu.Lock()
defer t.mu.Unlock()
t.Entries = append(t.Entries, e)
return nil
}
func (t *Hook) Levels() []logrus.Level {
return logrus.AllLevels
}
// LastEntry returns the last entry that was logged or nil.
func (t *Hook) LastEntry() *logrus.Entry {
t.mu.RLock()
defer t.mu.RUnlock()
i := len(t.Entries) - 1
if i < 0 {
return nil
}
// Make a copy, for safety
e := *t.Entries[i]
return &e
}
// AllEntries returns all entries that were logged.
func (t *Hook) AllEntries() []*logrus.Entry {
t.mu.RLock()
defer t.mu.RUnlock()
// Make a copy so the returned value won't race with future log requests
entries := make([]*logrus.Entry, len(t.Entries))
for i, entry := range t.Entries {
// Make a copy, for safety
e := *entry
entries[i] = &e
}
return entries
}
// Reset removes all Entries from this test hook.
func (t *Hook) Reset() {
t.mu.Lock()
defer t.mu.Unlock()
t.Entries = make([]*logrus.Entry, 0)
}